aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c40
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h3
-rw-r--r--drivers/accel/ivpu/ivpu_hw.h6
-rw-r--r--drivers/accel/ivpu/ivpu_hw_37xx.c11
-rw-r--r--drivers/accel/ivpu/ivpu_hw_40xx.c6
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.c8
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.c8
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c14
-rw-r--r--drivers/accessibility/speakup/devsynth.c149
-rw-r--r--drivers/accessibility/speakup/main.c2
-rw-r--r--drivers/accessibility/speakup/synth.c4
-rw-r--r--drivers/acpi/Kconfig3
-rw-r--r--drivers/acpi/acpica/dbnames.c8
-rw-r--r--drivers/acpi/apei/Kconfig13
-rw-r--r--drivers/acpi/apei/Makefile2
-rw-r--r--drivers/acpi/apei/apei-internal.h18
-rw-r--r--drivers/acpi/apei/einj-core.c (renamed from drivers/acpi/apei/einj.c)122
-rw-r--r--drivers/acpi/apei/einj-cxl.c113
-rw-r--r--drivers/acpi/dock.c2
-rw-r--r--drivers/acpi/numa/hmat.c83
-rw-r--r--drivers/acpi/numa/srat.c11
-rw-r--r--drivers/acpi/riscv/Makefile4
-rw-r--r--drivers/acpi/riscv/cppc.c157
-rw-r--r--drivers/acpi/riscv/cpuidle.c81
-rw-r--r--drivers/acpi/scan.c5
-rw-r--r--drivers/acpi/sleep.c36
-rw-r--r--drivers/acpi/tables.c2
-rw-r--r--drivers/acpi/thermal.c22
-rw-r--r--drivers/android/binder.c8
-rw-r--r--drivers/android/binder_alloc.c2
-rw-r--r--drivers/ata/ahci.c92
-rw-r--r--drivers/ata/ahci_st.c1
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/ata/libata-eh.c5
-rw-r--r--drivers/ata/libata-scsi.c16
-rw-r--r--drivers/ata/pata_macio.c7
-rw-r--r--drivers/ata/sata_gemini.c5
-rw-r--r--drivers/ata/sata_mv.c63
-rw-r--r--drivers/ata/sata_sx4.c6
-rw-r--r--drivers/base/cacheinfo.c50
-rw-r--r--drivers/base/component.c4
-rw-r--r--drivers/base/core.c98
-rw-r--r--drivers/base/cpu.c8
-rw-r--r--drivers/base/dd.c32
-rw-r--r--drivers/base/firmware_loader/main.c16
-rw-r--r--drivers/base/memory.c23
-rw-r--r--drivers/base/node.c7
-rw-r--r--drivers/base/platform-msi.c6
-rw-r--r--drivers/base/property.c67
-rw-r--r--drivers/base/regmap/regcache-maple.c6
-rw-r--r--drivers/base/swnode.c13
-rw-r--r--drivers/block/floppy.c1
-rw-r--r--drivers/block/null_blk/main.c4
-rw-r--r--drivers/block/sunvdc.c2
-rw-r--r--drivers/block/zram/zcomp.c5
-rw-r--r--drivers/block/zram/zcomp.h1
-rw-r--r--drivers/block/zram/zram_drv.c2
-rw-r--r--drivers/bluetooth/btmtkuart.c4
-rw-r--r--drivers/bluetooth/btnxpuart.c4
-rw-r--r--drivers/bluetooth/btqca.c8
-rw-r--r--drivers/bluetooth/hci_qca.c19
-rw-r--r--drivers/bluetooth/hci_serdev.c4
-rw-r--r--drivers/bus/brcmstb_gisb.c15
-rw-r--r--drivers/bus/bt1-apb.c1
-rw-r--r--drivers/bus/mhi/common.h38
-rw-r--r--drivers/bus/mhi/ep/main.c7
-rw-r--r--drivers/bus/mhi/host/boot.c11
-rw-r--r--drivers/bus/mhi/host/init.c91
-rw-r--r--drivers/bus/mhi/host/internal.h56
-rw-r--r--drivers/bus/mhi/host/main.c19
-rw-r--r--drivers/bus/mhi/host/pci_generic.c2
-rw-r--r--drivers/bus/mhi/host/pm.c27
-rw-r--r--drivers/bus/mhi/host/trace.h282
-rw-r--r--drivers/bus/mips_cdmm.c2
-rw-r--r--drivers/bus/ts-nbus.c81
-rw-r--r--drivers/cache/sifive_ccache.c72
-rw-r--r--drivers/cdx/Makefile4
-rw-r--r--drivers/cdx/cdx.c20
-rw-r--r--drivers/cdx/cdx.h12
-rw-r--r--drivers/cdx/cdx_msi.c192
-rw-r--r--drivers/cdx/controller/Kconfig1
-rw-r--r--drivers/cdx/controller/cdx_controller.c25
-rw-r--r--drivers/cdx/controller/mc_cdx_pcol.h64
-rw-r--r--drivers/cdx/controller/mcdi_functions.c33
-rw-r--r--drivers/cdx/controller/mcdi_functions.h33
-rw-r--r--drivers/char/hpet.c1
-rw-r--r--drivers/char/hw_random/hisi-rng.c6
-rw-r--r--drivers/char/hw_random/n2-drv.c2
-rw-r--r--drivers/char/random.c10
-rw-r--r--drivers/char/tpm/st33zp24/i2c.c2
-rw-r--r--drivers/char/tpm/st33zp24/spi.c2
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.c2
-rw-r--r--drivers/char/tpm/tpm-interface.c2
-rw-r--r--drivers/char/tpm/tpm_atmel.c2
-rw-r--r--drivers/char/tpm/tpm_i2c_nuvoton.c2
-rw-r--r--drivers/char/tpm/tpm_nsc.c2
-rw-r--r--drivers/char/tpm/tpm_tis.c2
-rw-r--r--drivers/char/tpm/tpm_tis_core.c2
-rw-r--r--drivers/char/tpm/tpm_vtpm_proxy.c2
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c6
-rw-r--r--drivers/char/xillybus/xillybus_of.c6
-rw-r--r--drivers/clk/clk-ast2600.c7
-rw-r--r--drivers/clk/clk-cdce925.c1
-rw-r--r--drivers/clk/clk-devres.c40
-rw-r--r--drivers/clk/clk-fixed-factor.c103
-rw-r--r--drivers/clk/clk-fractional-divider.c14
-rw-r--r--drivers/clk/clk.c195
-rw-r--r--drivers/clk/clkdev.c2
-rw-r--r--drivers/clk/hisilicon/clk-hi3519.c2
-rw-r--r--drivers/clk/hisilicon/clk-hi3559a.c4
-rw-r--r--drivers/clk/imx/clk-composite-8m.c16
-rw-r--r--drivers/clk/imx/clk-imx8-acm.c6
-rw-r--r--drivers/clk/imx/clk-imx8mp-audiomix.c11
-rw-r--r--drivers/clk/imx/clk-scu.c22
-rw-r--r--drivers/clk/keystone/sci-clk.c12
-rw-r--r--drivers/clk/mediatek/clk-mt7622-apmixedsys.c1
-rw-r--r--drivers/clk/mediatek/clk-mt7981-topckgen.c5
-rw-r--r--drivers/clk/mediatek/clk-mt7988-infracfg.c25
-rw-r--r--drivers/clk/mediatek/clk-mt8135-apmixedsys.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8173-apmixedsys.c5
-rw-r--r--drivers/clk/mediatek/clk-mt8183.c2
-rw-r--r--drivers/clk/mediatek/clk-mtk.c15
-rw-r--r--drivers/clk/meson/axg.c2
-rw-r--r--drivers/clk/microchip/clk-mpfs.c154
-rw-r--r--drivers/clk/qcom/Kconfig45
-rw-r--r--drivers/clk/qcom/Makefile5
-rw-r--r--drivers/clk/qcom/camcc-sc7180.c12
-rw-r--r--drivers/clk/qcom/camcc-sc7280.c12
-rw-r--r--drivers/clk/qcom/camcc-sc8280xp.c27
-rw-r--r--drivers/clk/qcom/camcc-sdm845.c12
-rw-r--r--drivers/clk/qcom/camcc-sm6350.c12
-rw-r--r--drivers/clk/qcom/camcc-sm8550.c10
-rw-r--r--drivers/clk/qcom/camcc-x1e80100.c2487
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c16
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h4
-rw-r--r--drivers/clk/qcom/clk-branch.h6
-rw-r--r--drivers/clk/qcom/dispcc-qcm2290.c16
-rw-r--r--drivers/clk/qcom/dispcc-sc7180.c12
-rw-r--r--drivers/clk/qcom/dispcc-sc7280.c19
-rw-r--r--drivers/clk/qcom/dispcc-sc8280xp.c16
-rw-r--r--drivers/clk/qcom/dispcc-sdm845.c14
-rw-r--r--drivers/clk/qcom/dispcc-sm6115.c4
-rw-r--r--drivers/clk/qcom/dispcc-sm6125.c12
-rw-r--r--drivers/clk/qcom/dispcc-sm6350.c12
-rw-r--r--drivers/clk/qcom/dispcc-sm6375.c12
-rw-r--r--drivers/clk/qcom/dispcc-sm8250.c134
-rw-r--r--drivers/clk/qcom/dispcc-sm8450.c19
-rw-r--r--drivers/clk/qcom/dispcc-sm8550.c19
-rw-r--r--drivers/clk/qcom/dispcc-sm8650.c16
-rw-r--r--drivers/clk/qcom/dispcc-x1e80100.c1718
-rw-r--r--drivers/clk/qcom/gcc-ipq5018.c9
-rw-r--r--drivers/clk/qcom/gcc-ipq6018.c19
-rw-r--r--drivers/clk/qcom/gcc-ipq8074.c2
-rw-r--r--drivers/clk/qcom/gcc-ipq9574.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8953.c4
-rw-r--r--drivers/clk/qcom/gcc-sa8775p.c29
-rw-r--r--drivers/clk/qcom/gcc-sc7180.c22
-rw-r--r--drivers/clk/qcom/gcc-sc7280.c20
-rw-r--r--drivers/clk/qcom/gcc-sc8180x.c62
-rw-r--r--drivers/clk/qcom/gcc-sc8280xp.c29
-rw-r--r--drivers/clk/qcom/gcc-sdm845.c1
-rw-r--r--drivers/clk/qcom/gcc-sdx55.c12
-rw-r--r--drivers/clk/qcom/gcc-sdx65.c13
-rw-r--r--drivers/clk/qcom/gcc-sdx75.c10
-rw-r--r--drivers/clk/qcom/gcc-sm4450.c32
-rw-r--r--drivers/clk/qcom/gcc-sm6375.c11
-rw-r--r--drivers/clk/qcom/gcc-sm7150.c25
-rw-r--r--drivers/clk/qcom/gcc-sm8150.c352
-rw-r--r--drivers/clk/qcom/gcc-sm8250.c23
-rw-r--r--drivers/clk/qcom/gcc-sm8350.c24
-rw-r--r--drivers/clk/qcom/gcc-sm8450.c25
-rw-r--r--drivers/clk/qcom/gcc-sm8550.c25
-rw-r--r--drivers/clk/qcom/gcc-sm8650.c20
-rw-r--r--drivers/clk/qcom/gcc-x1e80100.c16
-rw-r--r--drivers/clk/qcom/gdsc.c12
-rw-r--r--drivers/clk/qcom/gpucc-sa8775p.c12
-rw-r--r--drivers/clk/qcom/gpucc-sc7180.c12
-rw-r--r--drivers/clk/qcom/gpucc-sc7280.c21
-rw-r--r--drivers/clk/qcom/gpucc-sc8280xp.c10
-rw-r--r--drivers/clk/qcom/gpucc-sdm845.c12
-rw-r--r--drivers/clk/qcom/gpucc-sm8150.c12
-rw-r--r--drivers/clk/qcom/gpucc-sm8250.c12
-rw-r--r--drivers/clk/qcom/gpucc-sm8350.c12
-rw-r--r--drivers/clk/qcom/gpucc-sm8550.c22
-rw-r--r--drivers/clk/qcom/gpucc-x1e80100.c656
-rw-r--r--drivers/clk/qcom/lpasscorecc-sc7180.c7
-rw-r--r--drivers/clk/qcom/mmcc-apq8084.c2
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c2
-rw-r--r--drivers/clk/qcom/mss-sc7180.c140
-rw-r--r--drivers/clk/qcom/reset.c27
-rw-r--r--drivers/clk/qcom/reset.h2
-rw-r--r--drivers/clk/qcom/tcsrcc-x1e80100.c285
-rw-r--r--drivers/clk/qcom/videocc-sc7180.c12
-rw-r--r--drivers/clk/qcom/videocc-sc7280.c12
-rw-r--r--drivers/clk/qcom/videocc-sdm845.c12
-rw-r--r--drivers/clk/qcom/videocc-sm8150.c14
-rw-r--r--drivers/clk/qcom/videocc-sm8250.c22
-rw-r--r--drivers/clk/qcom/videocc-sm8350.c14
-rw-r--r--drivers/clk/qcom/videocc-sm8450.c29
-rw-r--r--drivers/clk/qcom/videocc-sm8550.c29
-rw-r--r--drivers/clk/renesas/Kconfig5
-rw-r--r--drivers/clk/renesas/Makefile1
-rw-r--r--drivers/clk/renesas/clk-mstp.c16
-rw-r--r--drivers/clk/renesas/r8a779f0-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a779g0-cpg-mssr.c13
-rw-r--r--drivers/clk/renesas/r8a779h0-cpg-mssr.c256
-rw-r--r--drivers/clk/renesas/r9a07g043-cpg.c37
-rw-r--r--drivers/clk/renesas/r9a07g044-cpg.c6
-rw-r--r--drivers/clk/renesas/r9a08g045-cpg.c3
-rw-r--r--drivers/clk/renesas/rcar-gen4-cpg.c10
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c117
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h1
-rw-r--r--drivers/clk/rockchip/clk-rk3399.c6
-rw-r--r--drivers/clk/rockchip/clk-rk3568.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3588.c50
-rw-r--r--drivers/clk/samsung/clk-cpu.c556
-rw-r--r--drivers/clk/samsung/clk-cpu.h53
-rw-r--r--drivers/clk/samsung/clk-exynos3250.c2
-rw-r--r--drivers/clk/samsung/clk-exynos4.c9
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c5
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c16
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c10
-rw-r--r--drivers/clk/samsung/clk-exynos850.c43
-rw-r--r--drivers/clk/samsung/clk-gs101.c940
-rw-r--r--drivers/clk/samsung/clk.h5
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-isp.c6
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-vout.c6
-rw-r--r--drivers/clk/sunxi/clk-a20-gmac.c21
-rw-r--r--drivers/clk/sunxi/clk-sun9i-cpus.c7
-rw-r--r--drivers/clk/sunxi/clk-usb.c9
-rw-r--r--drivers/clk/ti/apll.c11
-rw-r--r--drivers/clk/ti/clk.c71
-rw-r--r--drivers/clk/ti/clock.h1
-rw-r--r--drivers/clk/ti/divider.c5
-rw-r--r--drivers/clk/ti/dpll3xxx.c4
-rw-r--r--drivers/clk/ti/gate.c9
-rw-r--r--drivers/clk/ti/interface.c4
-rw-r--r--drivers/clk/ti/mux.c6
-rw-r--r--drivers/clk/x86/clk-pmc-atom.c13
-rw-r--r--drivers/clk/xilinx/clk-xlnx-clock-wizard.c2
-rw-r--r--drivers/clk/zynq/clkc.c8
-rw-r--r--drivers/clocksource/arm_global_timer.c35
-rw-r--r--drivers/clocksource/hyperv_timer.c26
-rw-r--r--drivers/clocksource/timer-clint.c2
-rw-r--r--drivers/clocksource/timer-imx-gpt.c3
-rw-r--r--drivers/clocksource/timer-imx-sysctr.c117
-rw-r--r--drivers/clocksource/timer-riscv.c5
-rw-r--r--drivers/clocksource/timer-stm32.c4
-rw-r--r--drivers/clocksource/timer-ti-32k.c2
-rw-r--r--drivers/comedi/drivers/das08.c1
-rw-r--r--drivers/comedi/drivers/vmk80xx.c35
-rw-r--r--drivers/cpufreq/Kconfig29
-rw-r--r--drivers/cpufreq/Kconfig.arm26
-rw-r--r--drivers/cpufreq/cpufreq-dt.c2
-rw-r--r--drivers/cpufreq/cpufreq.c18
-rw-r--r--drivers/cpufreq/freq_table.c2
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c20
-rw-r--r--drivers/cpuidle/cpuidle-riscv-sbi.c49
-rw-r--r--drivers/cpuidle/cpuidle.c2
-rw-r--r--drivers/crypto/Kconfig14
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c2
-rw-r--r--drivers/crypto/ccp/platform-access.c11
-rw-r--r--drivers/crypto/ccp/psp-dev.c11
-rw-r--r--drivers/crypto/ccp/sev-dev.c2
-rw-r--r--drivers/crypto/hisilicon/debugfs.c58
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c2
-rw-r--r--drivers/crypto/hisilicon/qm.c184
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c33
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c7
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c1
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c2
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto.h25
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c1
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c132
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_stats.c30
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_stats.h8
-rw-r--r--drivers/crypto/intel/qat/Kconfig14
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c64
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c64
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_devices.h3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_aer.c138
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_clock.c3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_common_drv.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c4
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c59
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c6
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_heartbeat.c20
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_heartbeat.h21
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c53
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c76
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c25
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_init.c12
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_isr.c11
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c64
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h21
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c8
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c6
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl.c20
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sriov.c38
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs.c37
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_vf_isr.c2
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_comp_algs.c9
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_crypto.c4
-rw-r--r--drivers/crypto/n2_core.c2
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.c5
-rw-r--r--drivers/crypto/virtio/virtio_crypto_akcipher_algs.c12
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c2
-rw-r--r--drivers/crypto/vmx/.gitignore3
-rw-r--r--drivers/crypto/vmx/Kconfig14
-rw-r--r--drivers/crypto/vmx/Makefile23
-rw-r--r--drivers/crypto/vmx/aes.c134
-rw-r--r--drivers/crypto/vmx/aes_cbc.c133
-rw-r--r--drivers/crypto/vmx/aes_ctr.c149
-rw-r--r--drivers/crypto/vmx/aes_xts.c162
-rw-r--r--drivers/crypto/vmx/aesp8-ppc.h30
-rw-r--r--drivers/crypto/vmx/aesp8-ppc.pl3889
-rw-r--r--drivers/crypto/vmx/ghash.c185
-rw-r--r--drivers/crypto/vmx/ghashp8-ppc.pl243
-rw-r--r--drivers/crypto/vmx/ppc-xlate.pl231
-rw-r--r--drivers/crypto/vmx/vmx.c77
-rw-r--r--drivers/crypto/xilinx/zynqmp-aes-gcm.c3
-rw-r--r--drivers/cxl/Kconfig13
-rw-r--r--drivers/cxl/acpi.c11
-rw-r--r--drivers/cxl/core/cdat.c212
-rw-r--r--drivers/cxl/core/core.h4
-rw-r--r--drivers/cxl/core/mbox.c5
-rw-r--r--drivers/cxl/core/pci.c99
-rw-r--r--drivers/cxl/core/port.c148
-rw-r--r--drivers/cxl/core/region.c169
-rw-r--r--drivers/cxl/core/regs.c5
-rw-r--r--drivers/cxl/core/trace.h14
-rw-r--r--drivers/cxl/cxl.h17
-rw-r--r--drivers/cxl/cxlmem.h2
-rw-r--r--drivers/cxl/cxlpci.h24
-rw-r--r--drivers/dax/bus.c295
-rw-r--r--drivers/dax/super.c14
-rw-r--r--drivers/dio/dio-driver.c2
-rw-r--r--drivers/dma-buf/st-dma-fence-chain.c6
-rw-r--r--drivers/dma/Kconfig14
-rw-r--r--drivers/dma/amba-pl08x.c2
-rw-r--r--drivers/dma/bestcomm/sram.c5
-rw-r--r--drivers/dma/fsl-edma-common.c101
-rw-r--r--drivers/dma/fsl-edma-common.h161
-rw-r--r--drivers/dma/fsl-edma-main.c19
-rw-r--r--drivers/dma/idxd/bus.c2
-rw-r--r--drivers/dma/idxd/cdev.c4
-rw-r--r--drivers/dma/idxd/idxd.h14
-rw-r--r--drivers/dma/idxd/sysfs.c10
-rw-r--r--drivers/dma/mcf-edma-main.c2
-rw-r--r--drivers/dma/of-dma.c2
-rw-r--r--drivers/dma/pl330.c1
-rw-r--r--drivers/dma/ti/k3-psil-j721s2.c73
-rw-r--r--drivers/dma/ti/k3-udma-glue.c298
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c6
-rw-r--r--drivers/dpll/Kconfig2
-rw-r--r--drivers/firewire/core-device.c6
-rw-r--r--drivers/firewire/ohci.c6
-rw-r--r--drivers/firmware/arm_ffa/driver.c2
-rw-r--r--drivers/firmware/arm_scmi/perf.c3
-rw-r--r--drivers/firmware/arm_scmi/powercap.c2
-rw-r--r--drivers/firmware/arm_scmi/raw_mode.c7
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c7
-rw-r--r--drivers/firmware/efi/cper.c4
-rw-r--r--drivers/firmware/efi/earlycon.c2
-rw-r--r--drivers/firmware/efi/efi.c2
-rw-r--r--drivers/firmware/efi/libstub/Makefile2
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c2
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c8
-rw-r--r--drivers/firmware/efi/sysfb_efi.c2
-rw-r--r--drivers/firmware/qemu_fw_cfg.c14
-rw-r--r--drivers/firmware/xilinx/zynqmp.c25
-rw-r--r--drivers/fpga/dfl.c2
-rw-r--r--drivers/fpga/fpga-bridge.c8
-rw-r--r--drivers/gnss/serial.c2
-rw-r--r--drivers/gnss/sirf.c2
-rw-r--r--drivers/gpio/Kconfig12
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/gpio-crystalcove.c2
-rw-r--r--drivers/gpio/gpio-lpc32xx.c1
-rw-r--r--drivers/gpio/gpio-nomadik.c730
-rw-r--r--drivers/gpio/gpio-wcove.c2
-rw-r--r--drivers/gpio/gpiolib-cdev.c58
-rw-r--r--drivers/gpio/gpiolib.c35
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v7_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c17
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c62
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c77
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c54
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c103
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c71
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h8
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c3
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c13
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h2
-rw-r--r--drivers/gpu/drm/amd/include/umsch_mm_4_0_api_def.h13
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c27
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h33
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h55
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h46
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c28
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c19
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c31
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c26
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c18
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c52
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c368
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c3
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c16
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c2
-rw-r--r--drivers/gpu/drm/display/drm_dp_dual_mode_helper.c4
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c7
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c3
-rw-r--r--drivers/gpu/drm/drm_panel.c17
-rw-r--r--drivers/gpu/drm/drm_prime.c7
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c4
-rw-r--r--drivers/gpu/drm/i915/Makefile7
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c2
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c46
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c42
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_trace.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c89
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c14
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c3
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c39
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c31
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c23
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c4
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c2
-rw-r--r--drivers/gpu/drm/i915/i915_hwmon.c37
-rw-r--r--drivers/gpu/drm/i915/i915_memcpy.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c50
-rw-r--r--drivers/gpu/drm/imx/ipuv3/parallel-display.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h34
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c8
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c6
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c6
-rw-r--r--drivers/gpu/drm/msm/msm_kms.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c7
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672e.c2
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm69299.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c13
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c50
-rw-r--r--drivers/gpu/drm/radeon/pptable.h10
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c38
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c35
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c27
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c32
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c44
-rw-r--r--drivers/gpu/drm/xe/Makefile4
-rw-r--r--drivers/gpu/drm/xe/display/intel_fb_bo.c8
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c5
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h2
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c59
-rw-r--r--drivers/gpu/drm/xe/xe_bo_types.h19
-rw-r--r--drivers/gpu/drm/xe/xe_device.c11
-rw-r--r--drivers/gpu/drm/xe/xe_device.h4
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h3
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c120
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c2
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h5
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c7
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h7
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c2
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c4
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c25
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c8
-rw-r--r--drivers/gpu/drm/xe/xe_preempt_fence.c2
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c25
-rw-r--r--drivers/gpu/drm/xe/xe_query.c2
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c11
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.c10
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h2
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c163
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h8
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h15
-rw-r--r--drivers/gpu/drm/xe/xe_vram_freq.c4
-rw-r--r--drivers/gpu/host1x/bus.c8
-rw-r--r--drivers/greybus/bundle.c2
-rw-r--r--drivers/greybus/control.c2
-rw-r--r--drivers/greybus/core.c32
-rw-r--r--drivers/greybus/es2.c8
-rw-r--r--drivers/greybus/gb-beagleplay.c6
-rw-r--r--drivers/greybus/hd.c18
-rw-r--r--drivers/greybus/interface.c11
-rw-r--r--drivers/greybus/module.c2
-rw-r--r--drivers/greybus/svc.c2
-rw-r--r--drivers/hsi/clients/ssi_protocol.c3
-rw-r--r--drivers/hsi/hsi_core.c2
-rw-r--r--drivers/hv/Kconfig1
-rw-r--r--drivers/hv/channel.c29
-rw-r--r--drivers/hv/connection.c29
-rw-r--r--drivers/hv/hv.c36
-rw-r--r--drivers/hv/hv_common.c99
-rw-r--r--drivers/hv/vmbus_drv.c99
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c3
-rw-r--r--drivers/hwmon/ultra45_env.c2
-rw-r--r--drivers/hwspinlock/omap_hwspinlock.c57
-rw-r--r--drivers/hwtracing/coresight/Makefile20
-rw-r--r--drivers/hwtracing/coresight/coresight-cfg-afdo.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c496
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-core.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c31
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-core.c27
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-sysfs.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c38
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h1
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h9
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c32
-rw-r--r--drivers/hwtracing/coresight/coresight-sysfs.c391
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-core.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c46
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c33
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-tpda.c145
-rw-r--r--drivers/hwtracing/coresight/coresight-tpda.h6
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.c457
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.h114
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c16
-rw-r--r--drivers/hwtracing/coresight/ultrasoc-smb.c24
-rw-r--r--drivers/hwtracing/coresight/ultrasoc-smb.h2
-rw-r--r--drivers/hwtracing/ptt/hisi_ptt.c6
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/i2c/busses/i2c-cadence.c33
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h23
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c424
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c2
-rw-r--r--drivers/i2c/busses/i2c-hisi.c13
-rw-r--r--drivers/i2c/busses/i2c-i801.c223
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c27
-rw-r--r--drivers/i2c/busses/i2c-imx.c62
-rw-r--r--drivers/i2c/busses/i2c-mpc.c16
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c740
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c3
-rw-r--r--drivers/i2c/busses/i2c-pxa.c2
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c27
-rw-r--r--drivers/i2c/busses/i2c-sprd.c6
-rw-r--r--drivers/i2c/i2c-core-base.c4
-rw-r--r--drivers/i2c/i2c-smbus.c19
-rw-r--r--drivers/i2c/muxes/i2c-mux-mlxcpld.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c89
-rw-r--r--drivers/i3c/internals.h2
-rw-r--r--drivers/i3c/master.c2
-rw-r--r--drivers/i3c/master/dw-i3c-master.c4
-rw-r--r--drivers/iio/Kconfig9
-rw-r--r--drivers/iio/Makefile1
-rw-r--r--drivers/iio/accel/Kconfig8
-rw-r--r--drivers/iio/accel/Makefile1
-rw-r--r--drivers/iio/accel/adxl367.c297
-rw-r--r--drivers/iio/accel/adxl372_spi.c2
-rw-r--r--drivers/iio/accel/bma180.c2
-rw-r--r--drivers/iio/accel/bmc150-accel-i2c.c15
-rw-r--r--drivers/iio/accel/bmc150-accel-spi.c3
-rw-r--r--drivers/iio/accel/bmi088-accel-i2c.c70
-rw-r--r--drivers/iio/accel/da280.c66
-rw-r--r--drivers/iio/accel/kxcjk-1013.c120
-rw-r--r--drivers/iio/accel/kxsd9-spi.c2
-rw-r--r--drivers/iio/accel/mma9551.c4
-rw-r--r--drivers/iio/accel/mma9553.c4
-rw-r--r--drivers/iio/accel/mxc4005.c5
-rw-r--r--drivers/iio/accel/mxc6255.c4
-rw-r--r--drivers/iio/accel/st_accel_i2c.c5
-rw-r--r--drivers/iio/accel/stk8ba50.c4
-rw-r--r--drivers/iio/adc/Kconfig26
-rw-r--r--drivers/iio/adc/Makefile2
-rw-r--r--drivers/iio/adc/ad4130.c131
-rw-r--r--drivers/iio/adc/ad7091r-base.c25
-rw-r--r--drivers/iio/adc/ad9467.c267
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c7
-rw-r--r--drivers/iio/adc/adi-axi-adc.c385
-rw-r--r--drivers/iio/adc/max1363.c171
-rw-r--r--drivers/iio/adc/mcp320x.c29
-rw-r--r--drivers/iio/adc/pac1934.c1636
-rw-r--r--drivers/iio/adc/qcom-pm8xxx-xoadc.c1
-rw-r--r--drivers/iio/adc/rockchip_saradc.c17
-rw-r--r--drivers/iio/adc/rtq6056.c275
-rw-r--r--drivers/iio/adc/ti-adc108s102.c4
-rw-r--r--drivers/iio/adc/ti-ads1015.c2
-rw-r--r--drivers/iio/adc/ti-ads1298.c771
-rw-r--r--drivers/iio/adc/ti-ads8688.c2
-rw-r--r--drivers/iio/amplifiers/hmc425a.c274
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c11
-rw-r--r--drivers/iio/chemical/pms7003.c4
-rw-r--r--drivers/iio/chemical/scd30_serial.c4
-rw-r--r--drivers/iio/chemical/sps30_serial.c4
-rw-r--r--drivers/iio/common/inv_sensors/inv_sensors_timestamp.c2
-rw-r--r--drivers/iio/dac/mcp4821.c2
-rw-r--r--drivers/iio/dummy/iio_dummy_evgen.c2
-rw-r--r--drivers/iio/dummy/iio_simple_dummy.c185
-rw-r--r--drivers/iio/frequency/Kconfig10
-rw-r--r--drivers/iio/frequency/Makefile1
-rw-r--r--drivers/iio/frequency/admfm2000.c282
-rw-r--r--drivers/iio/gyro/bmg160_i2c.c4
-rw-r--r--drivers/iio/health/afe4403.c65
-rw-r--r--drivers/iio/health/afe4404.c65
-rw-r--r--drivers/iio/humidity/hdc3020.c445
-rw-r--r--drivers/iio/humidity/hts221_i2c.c4
-rw-r--r--drivers/iio/imu/adis16475.c8
-rw-r--r--drivers/iio/imu/adis16480.c9
-rw-r--r--drivers/iio/imu/bmi160/bmi160_i2c.c9
-rw-r--r--drivers/iio/imu/bmi323/bmi323_core.c78
-rw-r--r--drivers/iio/imu/bmi323/bmi323_i2c.c21
-rw-r--r--drivers/iio/imu/bno055/bno055_ser_core.c4
-rw-r--r--drivers/iio/imu/fxos8700_i2c.c3
-rw-r--r--drivers/iio/imu/fxos8700_spi.c3
-rw-r--r--drivers/iio/imu/kmx61.c2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/Kconfig31
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c28
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c33
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c5
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c5
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0.h5
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_core.c21
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c6
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c4
-rw-r--r--drivers/iio/industrialio-backend.c418
-rw-r--r--drivers/iio/industrialio-core.c6
-rw-r--r--drivers/iio/industrialio-gts-helper.c15
-rw-r--r--drivers/iio/light/Kconfig5
-rw-r--r--drivers/iio/light/al3010.c2
-rw-r--r--drivers/iio/light/al3320a.c1
-rw-r--r--drivers/iio/light/as73211.c142
-rw-r--r--drivers/iio/light/hid-sensor-als.c122
-rw-r--r--drivers/iio/light/jsa1212.c4
-rw-r--r--drivers/iio/light/ltr501.c3
-rw-r--r--drivers/iio/light/max44000.c6
-rw-r--r--drivers/iio/light/rpr0521.c4
-rw-r--r--drivers/iio/light/stk3310.c4
-rw-r--r--drivers/iio/light/us5182d.c4
-rw-r--r--drivers/iio/light/vcnl4000.c36
-rw-r--r--drivers/iio/light/vl6180.c1
-rw-r--r--drivers/iio/magnetometer/Kconfig12
-rw-r--r--drivers/iio/magnetometer/Makefile1
-rw-r--r--drivers/iio/magnetometer/af8133j.c528
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_i2c.c3
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_spi.c3
-rw-r--r--drivers/iio/magnetometer/mmc35240.c4
-rw-r--r--drivers/iio/potentiometer/max5487.c4
-rw-r--r--drivers/iio/pressure/Kconfig16
-rw-r--r--drivers/iio/pressure/Makefile2
-rw-r--r--drivers/iio/pressure/hp206c.c6
-rw-r--r--drivers/iio/pressure/hsc030pa.c49
-rw-r--r--drivers/iio/pressure/hsc030pa.h7
-rw-r--r--drivers/iio/pressure/hsc030pa_i2c.c9
-rw-r--r--drivers/iio/pressure/hsc030pa_spi.c7
-rw-r--r--drivers/iio/pressure/mprls0025pa.c313
-rw-r--r--drivers/iio/pressure/mprls0025pa.h102
-rw-r--r--drivers/iio/pressure/mprls0025pa_i2c.c100
-rw-r--r--drivers/iio/pressure/mprls0025pa_spi.c92
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c5
-rw-r--r--drivers/iio/proximity/isl29501.c3
-rw-r--r--drivers/iio/proximity/sx9310.c114
-rw-r--r--drivers/iio/proximity/sx9324.c178
-rw-r--r--drivers/iio/proximity/sx9360.c115
-rw-r--r--drivers/iio/temperature/ltc2983.c28
-rw-r--r--drivers/iio/temperature/tmp117.c9
-rw-r--r--drivers/iio/test/Kconfig14
-rw-r--r--drivers/iio/test/Makefile1
-rw-r--r--drivers/iio/test/iio-test-gts.c513
-rw-r--r--drivers/infiniband/core/cm.c23
-rw-r--r--drivers/infiniband/core/device.c37
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c16
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c78
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h2
-rw-r--r--drivers/infiniband/hw/efa/efa.h1
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c32
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c25
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.h3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c11
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h35
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c95
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.h56
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c154
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h9
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c339
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c60
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_restrack.c23
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c3
-rw-r--r--drivers/infiniband/hw/mana/cq.c29
-rw-r--r--drivers/infiniband/hw/mana/main.c82
-rw-r--r--drivers/infiniband/hw/mana/mana_ib.h27
-rw-r--r--drivers/infiniband/hw/mana/mr.c17
-rw-r--r--drivers/infiniband/hw/mana/qp.c94
-rw-r--r--drivers/infiniband/hw/mana/wq.c4
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c3
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h1
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c8
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c18
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mw.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c8
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c12
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c218
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c3
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c2
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c3
-rw-r--r--drivers/input/gameport/gameport.c4
-rw-r--r--drivers/input/input-leds.c8
-rw-r--r--drivers/input/input.c16
-rw-r--r--drivers/input/joystick/xpad.c14
-rw-r--r--drivers/input/keyboard/amikbd.c6
-rw-r--r--drivers/input/keyboard/bcm-keypad.c2
-rw-r--r--drivers/input/keyboard/matrix_keypad.c170
-rw-r--r--drivers/input/misc/88pm80x_onkey.c14
-rw-r--r--drivers/input/misc/iqs7222.c112
-rw-r--r--drivers/input/mouse/Kconfig12
-rw-r--r--drivers/input/mouse/Makefile1
-rw-r--r--drivers/input/mouse/navpoint.c350
-rw-r--r--drivers/input/rmi4/rmi_bus.c2
-rw-r--r--drivers/input/rmi4/rmi_bus.h2
-rw-r--r--drivers/input/rmi4/rmi_driver.c6
-rw-r--r--drivers/input/serio/serio.c2
-rw-r--r--drivers/input/serio/xilinx_ps2.c3
-rw-r--r--drivers/input/touchscreen/Kconfig31
-rw-r--r--drivers/input/touchscreen/Makefile3
-rw-r--r--drivers/input/touchscreen/goodix_berlin.h24
-rw-r--r--drivers/input/touchscreen/goodix_berlin_core.c755
-rw-r--r--drivers/input/touchscreen/goodix_berlin_i2c.c75
-rw-r--r--drivers/input/touchscreen/goodix_berlin_spi.c178
-rw-r--r--drivers/input/touchscreen/imagis.c118
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c1
-rw-r--r--drivers/interconnect/core.c12
-rw-r--r--drivers/interconnect/qcom/Kconfig18
-rw-r--r--drivers/interconnect/qcom/Makefile4
-rw-r--r--drivers/interconnect/qcom/icc-common.c3
-rw-r--r--drivers/interconnect/qcom/icc-common.h3
-rw-r--r--drivers/interconnect/qcom/msm8909.c1329
-rw-r--r--drivers/interconnect/qcom/sa8775p.c56
-rw-r--r--drivers/interconnect/qcom/sm6115.c12
-rw-r--r--drivers/interconnect/qcom/sm7150.c1754
-rw-r--r--drivers/interconnect/qcom/sm7150.h140
-rw-r--r--drivers/interconnect/qcom/sm8250.c2
-rw-r--r--drivers/interconnect/qcom/sm8550.c574
-rw-r--r--drivers/interconnect/qcom/sm8550.h284
-rw-r--r--drivers/interconnect/qcom/x1e80100.c353
-rw-r--r--drivers/interconnect/samsung/exynos.c2
-rw-r--r--drivers/iommu/amd/init.c25
-rw-r--r--drivers/iommu/amd/iommu.c11
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c38
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h2
-rw-r--r--drivers/iommu/dma-iommu.c9
-rw-r--r--drivers/iommu/intel/Kconfig2
-rw-r--r--drivers/iommu/intel/iommu.c11
-rw-r--r--drivers/iommu/intel/perfmon.c2
-rw-r--r--drivers/iommu/intel/svm.c2
-rw-r--r--drivers/iommu/iommu.c11
-rw-r--r--drivers/iommu/iommufd/Kconfig1
-rw-r--r--drivers/iommu/mtk_iommu.c1
-rw-r--r--drivers/iommu/mtk_iommu_v1.c1
-rw-r--r--drivers/ipack/ipack.c2
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c8
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c72
-rw-r--r--drivers/irqchip/irq-riscv-intc.c13
-rw-r--r--drivers/isdn/mISDN/socket.c10
-rw-r--r--drivers/leds/Kconfig14
-rw-r--r--drivers/leds/Makefile3
-rw-r--r--drivers/leds/flash/Kconfig7
-rw-r--r--drivers/leds/flash/leds-ktd2692.c116
-rw-r--r--drivers/leds/flash/leds-lm3601x.c3
-rw-r--r--drivers/leds/flash/leds-sgm3140.c3
-rw-r--r--drivers/leds/led-class.c6
-rw-r--r--drivers/leds/led-triggers.c38
-rw-r--r--drivers/leds/leds-aw200xx.c2
-rw-r--r--drivers/leds/leds-aw2013.c1
-rw-r--r--drivers/leds/leds-expresswire.c72
-rw-r--r--drivers/leds/leds-mlxcpld.c2
-rw-r--r--drivers/leds/leds-mlxreg.c1
-rw-r--r--drivers/leds/leds-pca963x.c28
-rw-r--r--drivers/leds/leds-spi-byte.c11
-rw-r--r--drivers/leds/leds-sunfire.c2
-rw-r--r--drivers/leds/leds.h1
-rw-r--r--drivers/leds/rgb/Kconfig12
-rw-r--r--drivers/leds/rgb/Makefile1
-rw-r--r--drivers/leds/rgb/leds-group-multicolor.c8
-rw-r--r--drivers/leds/rgb/leds-ncp5623.c271
-rw-r--r--drivers/leds/rgb/leds-qcom-lpg.c366
-rw-r--r--drivers/leds/trigger/ledtrig-audio.c2
-rw-r--r--drivers/leds/trigger/ledtrig-default-on.c1
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c102
-rw-r--r--drivers/leds/trigger/ledtrig-panic.c23
-rw-r--r--drivers/macintosh/adb.c10
-rw-r--r--drivers/macintosh/macio_asic.c2
-rw-r--r--drivers/macintosh/rack-meter.c4
-rw-r--r--drivers/macintosh/therm_windtunnel.c6
-rw-r--r--drivers/macintosh/windfarm_pm112.c6
-rw-r--r--drivers/macintosh/windfarm_pm121.c5
-rw-r--r--drivers/macintosh/windfarm_pm72.c7
-rw-r--r--drivers/macintosh/windfarm_pm81.c8
-rw-r--r--drivers/macintosh/windfarm_pm91.c8
-rw-r--r--drivers/macintosh/windfarm_rm31.c7
-rw-r--r--drivers/mcb/mcb-core.c4
-rw-r--r--drivers/md/Kconfig1
-rw-r--r--drivers/md/bcache/sysfs.c8
-rw-r--r--drivers/md/dm-integrity.c20
-rw-r--r--drivers/md/dm-snap.c4
-rw-r--r--drivers/md/dm-vdo/murmurhash3.c33
-rw-r--r--drivers/md/dm.c17
-rw-r--r--drivers/md/raid1.c2
-rw-r--r--drivers/media/cec/core/cec-adap.c14
-rw-r--r--drivers/media/cec/core/cec-core.c2
-rw-r--r--drivers/media/cec/platform/cros-ec/cros-ec-cec.c2
-rw-r--r--drivers/media/common/siano/smscoreapi.c2
-rw-r--r--drivers/media/common/siano/smsdvb-main.c2
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-core.c52
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c25
-rw-r--r--drivers/media/dvb-core/dvbdev.c5
-rw-r--r--drivers/media/dvb-frontends/bcm3510.c3
-rw-r--r--drivers/media/dvb-frontends/bcm3510_priv.h6
-rw-r--r--drivers/media/dvb-frontends/cx24110.c4
-rw-r--r--drivers/media/dvb-frontends/cx24110.h8
-rw-r--r--drivers/media/dvb-frontends/cx24117.c2
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.c6
-rw-r--r--drivers/media/dvb-frontends/stv0367.c34
-rw-r--r--drivers/media/dvb-frontends/stv6110x_priv.h8
-rw-r--r--drivers/media/dvb-frontends/tda8083.h8
-rw-r--r--drivers/media/dvb-frontends/zl10036.c2
-rw-r--r--drivers/media/dvb-frontends/zl10036.h2
-rw-r--r--drivers/media/i2c/Kconfig2
-rw-r--r--drivers/media/i2c/adv7180.c4
-rw-r--r--drivers/media/i2c/adv7343.c2
-rw-r--r--drivers/media/i2c/adv748x/adv748x.h1
-rw-r--r--drivers/media/i2c/adv7604.c4
-rw-r--r--drivers/media/i2c/alvium-csi2.c101
-rw-r--r--drivers/media/i2c/alvium-csi2.h5
-rw-r--r--drivers/media/i2c/ar0521.c6
-rw-r--r--drivers/media/i2c/ccs/ccs-quirk.h8
-rw-r--r--drivers/media/i2c/dw9714.c2
-rw-r--r--drivers/media/i2c/imx214.c2
-rw-r--r--drivers/media/i2c/imx274.c2
-rw-r--r--drivers/media/i2c/imx290.c16
-rw-r--r--drivers/media/i2c/imx319.c53
-rw-r--r--drivers/media/i2c/imx334.c41
-rw-r--r--drivers/media/i2c/imx335.c251
-rw-r--r--drivers/media/i2c/imx355.c53
-rw-r--r--drivers/media/i2c/imx415.c672
-rw-r--r--drivers/media/i2c/isl7998x.c2
-rw-r--r--drivers/media/i2c/max2175.c2
-rw-r--r--drivers/media/i2c/msp3400-driver.c22
-rw-r--r--drivers/media/i2c/msp3400-driver.h2
-rw-r--r--drivers/media/i2c/mt9p031.c2
-rw-r--r--drivers/media/i2c/mt9v032.c4
-rw-r--r--drivers/media/i2c/ov08x40.c1307
-rw-r--r--drivers/media/i2c/ov2659.c2
-rw-r--r--drivers/media/i2c/ov5645.c7
-rw-r--r--drivers/media/i2c/ov5647.c2
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c2
-rw-r--r--drivers/media/i2c/s5k5baf.c2
-rw-r--r--drivers/media/i2c/st-vgxy61.c392
-rw-r--r--drivers/media/i2c/tc358743.c2
-rw-r--r--drivers/media/i2c/tc358746.c4
-rw-r--r--drivers/media/i2c/tda1997x.c2
-rw-r--r--drivers/media/i2c/tvp514x.c2
-rw-r--r--drivers/media/i2c/tvp5150.c2
-rw-r--r--drivers/media/i2c/tvp7002.c2
-rw-r--r--drivers/media/mc/mc-devnode.c3
-rw-r--r--drivers/media/mc/mc-entity.c93
-rw-r--r--drivers/media/pci/bt8xx/bttv-gpio.c2
-rw-r--r--drivers/media/pci/bt8xx/bttvp.h2
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c8
-rw-r--r--drivers/media/pci/dt3155/dt3155.h1
-rw-r--r--drivers/media/pci/intel/ipu-bridge.c26
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c53
-rw-r--r--drivers/media/pci/intel/ivsc/mei_csi.c87
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c9
-rw-r--r--drivers/media/pci/ttpci/budget-av.c8
-rw-r--r--drivers/media/platform/amphion/vdec.c4
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c4
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c19
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-hw.c2
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c2
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu.c6
-rw-r--r--drivers/media/platform/intel/pxa_camera.c2
-rw-r--r--drivers/media/platform/marvell/Kconfig2
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h1
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.c2
-rw-r--r--drivers/media/platform/mediatek/mdp3/mdp_cfg_data.c729
-rw-r--r--drivers/media/platform/mediatek/mdp3/mdp_reg_aal.h25
-rw-r--r--drivers/media/platform/mediatek/mdp3/mdp_reg_color.h31
-rw-r--r--drivers/media/platform/mediatek/mdp3/mdp_reg_fg.h23
-rw-r--r--drivers/media/platform/mediatek/mdp3/mdp_reg_hdr.h31
-rw-r--r--drivers/media/platform/mediatek/mdp3/mdp_reg_merge.h25
-rw-r--r--drivers/media/platform/mediatek/mdp3/mdp_reg_ovl.h25
-rw-r--r--drivers/media/platform/mediatek/mdp3/mdp_reg_pad.h21
-rw-r--r--drivers/media/platform/mediatek/mdp3/mdp_reg_rdma.h24
-rw-r--r--drivers/media/platform/mediatek/mdp3/mdp_reg_rsz.h2
-rw-r--r--drivers/media/platform/mediatek/mdp3/mdp_reg_tdshp.h34
-rw-r--r--drivers/media/platform/mediatek/mdp3/mdp_reg_wrot.h8
-rw-r--r--drivers/media/platform/mediatek/mdp3/mdp_sm_mt8195.h283
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-img-ipi.h4
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-cfg.h2
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c440
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h1
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c895
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.h93
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c142
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h50
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c15
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c18
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.h1
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c3
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c18
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.h1
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c5
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c14
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c1
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c11
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c5
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.h1
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.h1
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c5
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h2
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c2
-rw-r--r--drivers/media/platform/mediatek/vpu/mtk_vpu.c2
-rw-r--r--drivers/media/platform/mediatek/vpu/mtk_vpu.h2
-rw-r--r--drivers/media/platform/nuvoton/npcm-video.c6
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c16
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h1
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c6
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c4
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c8
-rw-r--r--drivers/media/platform/qcom/venus/core.h1
-rw-r--r--drivers/media/platform/renesas/Kconfig16
-rw-r--r--drivers/media/platform/renesas/Makefile1
-rw-r--r--drivers/media/platform/renesas/rcar-csi2.c (renamed from drivers/media/platform/renesas/rcar-vin/rcar-csi2.c)0
-rw-r--r--drivers/media/platform/renesas/rcar-isp.c1
-rw-r--r--drivers/media/platform/renesas/rcar-vin/Kconfig16
-rw-r--r--drivers/media/platform/renesas/rcar-vin/Makefile1
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h3
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c37
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c18
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c83
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c216
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-common.h35
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c71
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c131
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h36
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c19
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-capture.c52
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-core.c23
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-core.h23
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is.c2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-lite-reg.c13
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-lite-reg.h12
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-lite.c2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-lite.h3
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-m2m.c23
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-reg.c38
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-reg.h10
-rw-r--r--drivers/media/platform/samsung/exynos4-is/mipi-csis.c3
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c76
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd.c8
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd.h2
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v5.c6
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v5.h2
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c8
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.h2
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h15
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_ctrl.c26
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c20
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.h3
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c12
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.h3
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr.c7
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v5.c28
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v5.h2
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c36
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.h2
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_pm.c51
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_pm.h8
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmi.c4
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c6
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.c69
-rw-r--r--drivers/media/platform/ti/davinci/vpif.c3
-rw-r--r--drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c35
-rw-r--r--drivers/media/platform/verisilicon/Kconfig14
-rw-r--r--drivers/media/platform/verisilicon/Makefile3
-rw-r--r--drivers/media/platform/verisilicon/hantro.h1
-rw-r--r--drivers/media/platform/verisilicon/hantro_drv.c4
-rw-r--r--drivers/media/platform/verisilicon/hantro_g1_h264_dec.c2
-rw-r--r--drivers/media/platform/verisilicon/hantro_hw.h2
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu2_hw_h264_dec.c2
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu981_regs.h2
-rw-r--r--drivers/media/platform/verisilicon/stm32mp25_vpu_hw.c186
-rw-r--r--drivers/media/platform/xilinx/Kconfig4
-rw-r--r--drivers/media/test-drivers/vicodec/codec-fwht.c2
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_bridge.c26
-rw-r--r--drivers/media/test-drivers/visl/visl-core.c15
-rw-r--r--drivers/media/test-drivers/visl/visl-dec.c301
-rw-r--r--drivers/media/test-drivers/visl/visl.h1
-rw-r--r--drivers/media/tuners/tda18271-fe.c1
-rw-r--r--drivers/media/tuners/xc4000.c4
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c2
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb.h2
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c4
-rw-r--r--drivers/media/usb/go7007/go7007-driver.c8
-rw-r--r--drivers/media/usb/go7007/go7007-usb.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-context.c10
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-dvb.c12
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c11
-rw-r--r--drivers/media/usb/s2255/s2255drv.c7
-rw-r--r--drivers/media/usb/siano/smsusb.c2
-rw-r--r--drivers/media/usb/usbtv/usbtv-video.c7
-rw-r--r--drivers/media/v4l2-core/v4l2-cci.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c47
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-api.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-core.c23
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-mc.c23
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c10
-rw-r--r--drivers/memory/tegra/mc.c2
-rw-r--r--drivers/memory/tegra/tegra124-emc.c2
-rw-r--r--drivers/memory/tegra/tegra124.c2
-rw-r--r--drivers/memory/tegra/tegra186-emc.c2
-rw-r--r--drivers/memory/tegra/tegra20-emc.c2
-rw-r--r--drivers/memory/tegra/tegra20.c2
-rw-r--r--drivers/memory/tegra/tegra30-emc.c2
-rw-r--r--drivers/memory/tegra/tegra30.c2
-rw-r--r--drivers/message/fusion/mptfc.c4
-rw-r--r--drivers/mfd/Kconfig1
-rw-r--r--drivers/mfd/ac100.c2
-rw-r--r--drivers/mfd/altera-sysmgr.c4
-rw-r--r--drivers/mfd/as3711.c2
-rw-r--r--drivers/mfd/as3722.c2
-rw-r--r--drivers/mfd/axp20x.c4
-rw-r--r--drivers/mfd/bcm590xx.c4
-rw-r--r--drivers/mfd/bd9571mwv.c4
-rw-r--r--drivers/mfd/cros_ec_dev.c18
-rw-r--r--drivers/mfd/cs42l43-i2c.c15
-rw-r--r--drivers/mfd/cs42l43-sdw.c15
-rw-r--r--drivers/mfd/cs42l43.c124
-rw-r--r--drivers/mfd/cs42l43.h10
-rw-r--r--drivers/mfd/da9052-core.c2
-rw-r--r--drivers/mfd/da9055-core.c2
-rw-r--r--drivers/mfd/da9062-core.c4
-rw-r--r--drivers/mfd/da9063-i2c.c2
-rw-r--r--drivers/mfd/da9150-core.c2
-rw-r--r--drivers/mfd/intel-lpss-pci.c28
-rw-r--r--drivers/mfd/intel-lpss.c9
-rw-r--r--drivers/mfd/intel-lpss.h14
-rw-r--r--drivers/mfd/kempld-core.c37
-rw-r--r--drivers/mfd/khadas-mcu.c2
-rw-r--r--drivers/mfd/lochnagar-i2c.c4
-rw-r--r--drivers/mfd/lpc_ich.c3
-rw-r--r--drivers/mfd/mc13xxx-core.c22
-rw-r--r--drivers/mfd/mcp-core.c2
-rw-r--r--drivers/mfd/mfd-core.c2
-rw-r--r--drivers/mfd/mt6397-core.c3
-rw-r--r--drivers/mfd/omap-usb-host.c2
-rw-r--r--drivers/mfd/rave-sp.c6
-rw-r--r--drivers/mfd/rc5t583.c2
-rw-r--r--drivers/mfd/rk8xx-core.c2
-rw-r--r--drivers/mfd/rk8xx-spi.c2
-rw-r--r--drivers/mfd/rn5t618.c2
-rw-r--r--drivers/mfd/rohm-bd71828.c4
-rw-r--r--drivers/mfd/rohm-bd718x7.c2
-rw-r--r--drivers/mfd/rohm-bd9576.c2
-rw-r--r--drivers/mfd/rsmu_i2c.c2
-rw-r--r--drivers/mfd/si476x-prop.c2
-rw-r--r--drivers/mfd/stmfx.c2
-rw-r--r--drivers/mfd/stpmic1.c2
-rw-r--r--drivers/mfd/syscon.c4
-rw-r--r--drivers/mfd/twl-core.c28
-rw-r--r--drivers/mfd/twl4030-power.c3
-rw-r--r--drivers/mfd/wm5102-tables.c2
-rw-r--r--drivers/mfd/wm5110-tables.c2
-rw-r--r--drivers/mfd/wm831x-auxadc.c43
-rw-r--r--drivers/mfd/wm8350-regmap.c2
-rw-r--r--drivers/mfd/wm8400-core.c2
-rw-r--r--drivers/mfd/wm97xx-core.c6
-rw-r--r--drivers/misc/atmel-ssc.c6
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c2
-rw-r--r--drivers/misc/cxl/of.c5
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c1
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c6
-rw-r--r--drivers/misc/fastrpc.c6
-rw-r--r--drivers/misc/hi6421v600-irq.c1
-rw-r--r--drivers/misc/hisi_hikey_usb.c7
-rw-r--r--drivers/misc/hpilo.c8
-rw-r--r--drivers/misc/lkdtm/bugs.c2
-rw-r--r--drivers/misc/mei/gsc-me.c22
-rw-r--r--drivers/misc/mei/hdcp/Kconfig2
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.c14
-rw-r--r--drivers/misc/mei/pci-me.c40
-rw-r--r--drivers/misc/mei/pci-txe.c40
-rw-r--r--drivers/misc/mei/platform-vsc.c23
-rw-r--r--drivers/misc/mei/pxp/Kconfig2
-rw-r--r--drivers/misc/mei/pxp/mei_pxp.c14
-rw-r--r--drivers/misc/mei/vsc-tp.c113
-rw-r--r--drivers/misc/mei/vsc-tp.h3
-rw-r--r--drivers/misc/open-dice.c5
-rw-r--r--drivers/misc/sgi-gru/grufault.c2
-rw-r--r--drivers/misc/sram.c6
-rw-r--r--drivers/misc/ti-st/st_kim.c5
-rw-r--r--drivers/misc/tifm_core.c2
-rw-r--r--drivers/misc/vcpu_stall_detector.c6
-rw-r--r--drivers/misc/xilinx_sdfec.c5
-rw-r--r--drivers/misc/xilinx_tmr_inject.c5
-rw-r--r--drivers/mmc/core/block.c4
-rw-r--r--drivers/mmc/host/omap.c48
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c28
-rw-r--r--drivers/mmc/host/sdhci-omap.c3
-rw-r--r--drivers/most/core.c2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c4
-rw-r--r--drivers/mtd/devices/block2mtd.c2
-rw-r--r--drivers/mtd/maps/Kconfig7
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c265
-rw-r--r--drivers/mtd/maps/physmap-core.c2
-rw-r--r--drivers/mtd/maps/sun_uflash.c2
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/Makefile2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c99
-rw-r--r--drivers/mtd/nand/raw/brcmnand/bcmbca_nand.c126
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c148
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.h2
-rw-r--r--drivers/mtd/nand/raw/fsl_elbc_nand.c3
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_mlc.c5
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c2
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c2
-rw-r--r--drivers/mtd/nand/raw/nand_base.c88
-rw-r--r--drivers/mtd/nand/raw/nand_bbt.c1
-rw-r--r--drivers/mtd/nand/raw/nand_hynix.c1
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c83
-rw-r--r--drivers/mtd/nand/spi/esmt.c9
-rw-r--r--drivers/mtd/nand/spi/winbond.c12
-rw-r--r--drivers/mtd/spi-nor/core.c187
-rw-r--r--drivers/mtd/spi-nor/core.h30
-rw-r--r--drivers/mtd/spi-nor/debugfs.c26
-rw-r--r--drivers/mtd/spi-nor/sfdp.c47
-rw-r--r--drivers/mtd/ssfdc.c7
-rw-r--r--drivers/mtd/ubi/Kconfig13
-rw-r--r--drivers/mtd/ubi/Makefile1
-rw-r--r--drivers/mtd/ubi/block.c136
-rw-r--r--drivers/mtd/ubi/build.c154
-rw-r--r--drivers/mtd/ubi/eba.c7
-rw-r--r--drivers/mtd/ubi/fastmap.c7
-rw-r--r--drivers/mtd/ubi/kapi.c56
-rw-r--r--drivers/mtd/ubi/nvmem.c191
-rw-r--r--drivers/mtd/ubi/ubi.h3
-rw-r--r--drivers/mtd/ubi/vmt.c75
-rw-r--r--drivers/mtd/ubi/vtbl.c6
-rw-r--r--drivers/net/can/kvaser_pciefd.c4
-rw-r--r--drivers/net/dsa/mt7530.c271
-rw-r--r--drivers/net/dsa/mt7530.h32
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c6
-rw-r--r--drivers/net/dsa/sja1105/sja1105_mdio.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c35
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_xdp.c4
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c13
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h2
-rw-r--r--drivers/net/ethernet/amd/pds_core/dev.c3
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c1
-rw-r--r--drivers/net/ethernet/apple/bmac.c4
-rw-r--r--drivers/net/ethernet/apple/mace.c4
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c43
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c6
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c25
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h1
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c16
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c19
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h8
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c38
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c18
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c182
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c13
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c82
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c45
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c18
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c43
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c31
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c22
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c121
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c71
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.c1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c7
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c18
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/selq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c31
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h3
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c16
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c11
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c11
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c18
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h4
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.c4
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c61
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c2
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.h6
-rw-r--r--drivers/net/ethernet/realtek/r8169_leds.c35
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c49
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c92
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c47
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c56
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c29
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sunhme.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c18
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c8
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/geneve.c4
-rw-r--r--drivers/net/hyperv/netvsc.c7
-rw-r--r--drivers/net/phy/micrel.c31
-rw-r--r--drivers/net/phy/phy_device.c4
-rw-r--r--drivers/net/phy/qcom/at803x.c4
-rw-r--r--drivers/net/ppp/pptp.c2
-rw-r--r--drivers/net/tun.c18
-rw-r--r--drivers/net/usb/ax88179_178a.c6
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/veth.c18
-rw-r--r--drivers/net/virtio_net.c177
-rw-r--r--drivers/net/vmxnet3/vmxnet3_xdp.c6
-rw-r--r--drivers/net/wan/fsl_qmc_hdlc.c2
-rw-r--r--drivers/net/wireguard/device.c11
-rw-r--r--drivers/net/wireguard/netlink.c10
-rw-r--r--drivers/net/wireguard/receive.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rfi.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_cldma.c4
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.c9
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pcie_mac.c8
-rw-r--r--drivers/net/xen-netfront.c1
-rw-r--r--drivers/nfc/pn533/uart.c4
-rw-r--r--drivers/nfc/s3fwrn5/uart.c4
-rw-r--r--drivers/ntb/core.c8
-rw-r--r--drivers/nvdimm/Kconfig2
-rw-r--r--drivers/nvdimm/bus.c2
-rw-r--r--drivers/nvdimm/pmem.c23
-rw-r--r--drivers/nvme/host/apple.c6
-rw-r--r--drivers/nvme/host/core.c52
-rw-r--r--drivers/nvme/host/fabrics.h7
-rw-r--r--drivers/nvme/host/fc.c4
-rw-r--r--drivers/nvme/host/nvme.h12
-rw-r--r--drivers/nvme/host/pci.c3
-rw-r--r--drivers/nvme/host/pr.c3
-rw-r--r--drivers/nvme/host/sysfs.c3
-rw-r--r--drivers/nvme/host/tcp.c21
-rw-r--r--drivers/nvme/host/trace.c105
-rw-r--r--drivers/nvme/host/zns.c33
-rw-r--r--drivers/nvme/target/configfs.c47
-rw-r--r--drivers/nvme/target/core.c7
-rw-r--r--drivers/nvme/target/fc.c17
-rw-r--r--drivers/nvme/target/rdma.c8
-rw-r--r--drivers/nvme/target/tcp.c1
-rw-r--r--drivers/nvme/target/trace.c98
-rw-r--r--drivers/nvmem/core.c5
-rw-r--r--drivers/nvmem/layouts.c2
-rw-r--r--drivers/nvmem/meson-efuse.c25
-rw-r--r--drivers/nvmem/mtk-efuse.c20
-rw-r--r--drivers/nvmem/zynqmp_nvmem.c215
-rw-r--r--drivers/of/.kunitconfig3
-rw-r--r--drivers/of/Kconfig14
-rw-r--r--drivers/of/Makefile4
-rw-r--r--drivers/of/base.c130
-rw-r--r--drivers/of/dynamic.c12
-rw-r--r--drivers/of/empty_root.dts6
-rw-r--r--drivers/of/fdt.c187
-rw-r--r--drivers/of/kexec.c2
-rw-r--r--drivers/of/module.c8
-rw-r--r--drivers/of/of_private.h5
-rw-r--r--drivers/of/of_reserved_mem.c125
-rw-r--r--drivers/of/of_test.c57
-rw-r--r--drivers/of/platform.c5
-rw-r--r--drivers/of/property.c29
-rw-r--r--drivers/of/unittest.c27
-rw-r--r--drivers/parisc/led.c6
-rw-r--r--drivers/parport/parport_amiga.c5
-rw-r--r--drivers/parport/parport_sunbpp.c6
-rw-r--r--drivers/pci/Kconfig5
-rw-r--r--drivers/pci/Makefile7
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c14
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h6
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c630
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c12
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c5
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c21
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c8
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c42
-rw-r--r--drivers/pci/controller/dwc/pcie-rcar-gen4.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier-ep.c15
-rw-r--r--drivers/pci/controller/pci-hyperv.c3
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c2
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c14
-rw-r--r--drivers/pci/devres.c448
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-mhi.c21
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-ntb.c6
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c21
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c25
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c25
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c20
-rw-r--r--drivers/pci/iomap.c177
-rw-r--r--drivers/pci/irq.c204
-rw-r--r--drivers/pci/mmap.c29
-rw-r--r--drivers/pci/p2pdma.c2
-rw-r--r--drivers/pci/pci-driver.c23
-rw-r--r--drivers/pci/pci-sysfs.c167
-rw-r--r--drivers/pci/pci.c496
-rw-r--r--drivers/pci/pci.h55
-rw-r--r--drivers/pci/pcie/Makefile2
-rw-r--r--drivers/pci/pcie/aer.c20
-rw-r--r--drivers/pci/pcie/aspm.c268
-rw-r--r--drivers/pci/pcie/dpc.c76
-rw-r--r--drivers/pci/pcie/err.c20
-rw-r--r--drivers/pci/pcie/portdrv.h2
-rw-r--r--drivers/pci/probe.c66
-rw-r--r--drivers/pci/quirks.c3
-rw-r--r--drivers/pci/setup-irq.c64
-rw-r--r--drivers/pci/switch/switchtec.c4
-rw-r--r--drivers/pcmcia/cs.c2
-rw-r--r--drivers/pcmcia/cs_internal.h4
-rw-r--r--drivers/pcmcia/ds.c2
-rw-r--r--drivers/perf/Kconfig24
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/alibaba_uncore_drw_pmu.c6
-rw-r--r--drivers/perf/amlogic/meson_g12_ddr_pmu.c6
-rw-r--r--drivers/perf/arm-cci.c8
-rw-r--r--drivers/perf/arm-ccn.c6
-rw-r--r--drivers/perf/arm-cmn.c14
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.c159
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.h1
-rw-r--r--drivers/perf/arm_cspmu/nvidia_cspmu.c6
-rw-r--r--drivers/perf/arm_dmc620_pmu.c6
-rw-r--r--drivers/perf/arm_dsu_pmu.c6
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c6
-rw-r--r--drivers/perf/arm_spe_pmu.c5
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c5
-rw-r--r--drivers/perf/fsl_imx9_ddr_perf.c6
-rw-r--r--drivers/perf/hisilicon/hisi_pcie_pmu.c102
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c5
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c5
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c5
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c5
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pa_pmu.c5
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c5
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_uc_pmu.c42
-rw-r--r--drivers/perf/marvell_cn10k_ddr_pmu.c5
-rw-r--r--drivers/perf/marvell_cn10k_tad_pmu.c6
-rw-r--r--drivers/perf/qcom_l2_pmu.c5
-rw-r--r--drivers/perf/riscv_pmu.c4
-rw-r--r--drivers/perf/riscv_pmu_sbi.c37
-rw-r--r--drivers/perf/starfive_starlink_pmu.c642
-rw-r--r--drivers/perf/thunderx2_pmu.c5
-rw-r--r--drivers/perf/xgene_pmu.c6
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c2
-rw-r--r--drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c2
-rw-r--r--drivers/phy/broadcom/phy-bcm-sr-pcie.c2
-rw-r--r--drivers/phy/broadcom/phy-bcm-sr-usb.c2
-rw-r--r--drivers/phy/broadcom/phy-bcm63xx-usbh.c2
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb.c2
-rw-r--r--drivers/phy/cadence/phy-cadence-torrent.c720
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8qm-lvds-phy.c2
-rw-r--r--drivers/phy/freescale/phy-fsl-lynx-28g.c2
-rw-r--r--drivers/phy/hisilicon/phy-histb-combphy.c2
-rw-r--r--drivers/phy/intel/phy-intel-lgm-combo.c2
-rw-r--r--drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c2
-rw-r--r--drivers/phy/marvell/phy-armada375-usb2.c2
-rw-r--r--drivers/phy/marvell/phy-armada38x-comphy.c9
-rw-r--r--drivers/phy/marvell/phy-berlin-sata.c2
-rw-r--r--drivers/phy/marvell/phy-mvebu-a3700-comphy.c2
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c2
-rw-r--r--drivers/phy/mediatek/Kconfig12
-rw-r--r--drivers/phy/mediatek/Makefile2
-rw-r--r--drivers/phy/mediatek/phy-mtk-mipi-csi-0-5-rx-reg.h62
-rw-r--r--drivers/phy/mediatek/phy-mtk-mipi-csi-0-5.c294
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c2
-rw-r--r--drivers/phy/mediatek/phy-mtk-xsphy.c2
-rw-r--r--drivers/phy/microchip/lan966x_serdes.c2
-rw-r--r--drivers/phy/microchip/sparx5_serdes.c2
-rw-r--r--drivers/phy/mscc/phy-ocelot-serdes.c2
-rw-r--r--drivers/phy/phy-core.c55
-rw-r--r--drivers/phy/phy-xgene.c2
-rw-r--r--drivers/phy/qualcomm/Makefile2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-edp.c3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c111
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-common.h59
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-dp-com-v3.h18
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v3.h21
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v4.h19
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v5.h13
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v6.h13
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-dp-phy.h62
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c70
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie.c288
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6_20.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-sgmii.h20
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v6.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6_20.h1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v6.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-ufs-v6.h8
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_20.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-ufs.c305
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb-legacy.c76
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c422
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usbc.c1149
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h111
-rw-r--r--drivers/phy/qualcomm/phy-qcom-sgmii-eth.c441
-rw-r--r--drivers/phy/ralink/phy-mt7621-pci.c2
-rw-r--r--drivers/phy/realtek/Kconfig32
-rw-r--r--drivers/phy/realtek/Makefile3
-rw-r--r--drivers/phy/realtek/phy-rtk-usb2.c1312
-rw-r--r--drivers/phy/realtek/phy-rtk-usb3.c748
-rw-r--r--drivers/phy/renesas/phy-rcar-gen2.c2
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c2
-rw-r--r--drivers/phy/renesas/r8a779f0-ether-serdes.c2
-rw-r--r--drivers/phy/rockchip/Kconfig8
-rw-r--r--drivers/phy/rockchip/Makefile1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-naneng-combphy.c2
-rw-r--r--drivers/phy/rockchip/phy-rockchip-pcie.c2
-rw-r--r--drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c1028
-rw-r--r--drivers/phy/samsung/phy-exynos-mipi-video.c2
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c2
-rw-r--r--drivers/phy/samsung/phy-samsung-usb2.c2
-rw-r--r--drivers/phy/socionext/phy-uniphier-usb2.c2
-rw-r--r--drivers/phy/st/phy-miphy28lp.c2
-rw-r--r--drivers/phy/st/phy-spear1310-miphy.c2
-rw-r--r--drivers/phy/st/phy-spear1340-miphy.c2
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c2
-rw-r--r--drivers/phy/tegra/xusb.c15
-rw-r--r--drivers/phy/ti/phy-am654-serdes.c2
-rw-r--r--drivers/phy/ti/phy-da8xx-usb.c2
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c26
-rw-r--r--drivers/phy/ti/phy-tusb1210.c57
-rw-r--r--drivers/phy/xilinx/phy-zynqmp.c2
-rw-r--r--drivers/pinctrl/Kconfig18
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/aspeed/Makefile2
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-cs42l43.c18
-rw-r--r--drivers/pinctrl/core.c4
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7981.c24
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7986.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8186.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8192.c1
-rw-r--r--drivers/pinctrl/nomadik/Kconfig8
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c3
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c3
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c955
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.h180
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-wpcm450.c2
-rw-r--r--drivers/pinctrl/pinctrl-amd.c2
-rw-r--r--drivers/pinctrl/pinctrl-aw9523.c1119
-rw-r--r--drivers/pinctrl/pinctrl-da9062.c7
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c15
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c1
-rw-r--r--drivers/pinctrl/pinctrl-st.c3
-rw-r--r--drivers/pinctrl/pinctrl-zynqmp.c8
-rw-r--r--drivers/pinctrl/pinmux.c6
-rw-r--r--drivers/pinctrl/qcom/Kconfig2
-rw-r--r--drivers/pinctrl/renesas/Kconfig7
-rw-r--r--drivers/pinctrl/renesas/Makefile1
-rw-r--r--drivers/pinctrl/renesas/core.c14
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779g0.c14
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779h0.c3967
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c790
-rw-r--r--drivers/pinctrl/renesas/sh_pfc.h1
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra-xusb.c2
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c19
-rw-r--r--drivers/platform/chrome/cros_ec_uart.c32
-rw-r--r--drivers/platform/goldfish/Kconfig1
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.c14
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c267
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c14
-rw-r--r--drivers/platform/surface/aggregator/core.c4
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c7
-rw-r--r--drivers/platform/x86/Kconfig7
-rw-r--r--drivers/platform/x86/acer-wmi.c26
-rw-r--r--drivers/platform/x86/amd/Kconfig2
-rw-r--r--drivers/platform/x86/amd/hsmp.c584
-rw-r--r--drivers/platform/x86/amd/pmc/pmc-quirks.c9
-rw-r--r--drivers/platform/x86/amd/pmf/Makefile2
-rw-r--r--drivers/platform/x86/amd/pmf/acpi.c143
-rw-r--r--drivers/platform/x86/amd/pmf/core.c16
-rw-r--r--drivers/platform/x86/amd/pmf/pmf-quirks.c51
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h96
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c145
-rw-r--r--drivers/platform/x86/amd/pmf/tee-if.c23
-rw-r--r--drivers/platform/x86/asus-wmi.c84
-rw-r--r--drivers/platform/x86/compal-laptop.c2
-rw-r--r--drivers/platform/x86/dell/Kconfig3
-rw-r--r--drivers/platform/x86/dell/dell-laptop.c2
-rw-r--r--drivers/platform/x86/dell/dell-wmi-ddv.c1
-rw-r--r--drivers/platform/x86/dell/dell-wmi-privacy.c5
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/sysman.c2
-rw-r--r--drivers/platform/x86/firmware_attributes_class.c4
-rw-r--r--drivers/platform/x86/firmware_attributes_class.h2
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c117
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/bioscfg.c2
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c71
-rw-r--r--drivers/platform/x86/huawei-wmi.c1
-rw-r--r--drivers/platform/x86/ibm_rtl.c2
-rw-r--r--drivers/platform/x86/ideapad-laptop.c4
-rw-r--r--drivers/platform/x86/intel/hid.c9
-rw-r--r--drivers/platform/x86/intel/ifs/load.c2
-rw-r--r--drivers/platform/x86/intel/ifs/runtest.c101
-rw-r--r--drivers/platform/x86/intel/oaktrail.c2
-rw-r--r--drivers/platform/x86/intel/pmc/arl.c2
-rw-r--r--drivers/platform/x86/intel/pmc/core.c47
-rw-r--r--drivers/platform/x86/intel/pmc/core.h7
-rw-r--r--drivers/platform/x86/intel/pmc/lnl.c40
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c1
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c4
-rw-r--r--drivers/platform/x86/intel/tpmi.c9
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c4
-rw-r--r--drivers/platform/x86/intel/vbtn.c11
-rw-r--r--drivers/platform/x86/intel/vsec.c5
-rw-r--r--drivers/platform/x86/intel/wmi/sbl-fw-update.c1
-rw-r--r--drivers/platform/x86/intel/wmi/thunderbolt.c1
-rw-r--r--drivers/platform/x86/intel_scu_ipcutil.c2
-rw-r--r--drivers/platform/x86/intel_scu_pcidrv.c1
-rw-r--r--drivers/platform/x86/intel_scu_wdt.c1
-rw-r--r--drivers/platform/x86/lg-laptop.c2
-rw-r--r--drivers/platform/x86/mlx-platform.c2
-rw-r--r--drivers/platform/x86/pmc_atom.c79
-rw-r--r--drivers/platform/x86/serial-multi-instantiate.c28
-rw-r--r--drivers/platform/x86/silicom-platform.c7
-rw-r--r--drivers/platform/x86/think-lmi.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c253
-rw-r--r--drivers/platform/x86/toshiba_acpi.c4
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c9
-rw-r--r--drivers/platform/x86/wmi-bmof.c1
-rw-r--r--drivers/platform/x86/wmi.c226
-rw-r--r--drivers/power/reset/as3722-poweroff.c30
-rw-r--r--drivers/power/reset/atc260x-poweroff.c55
-rw-r--r--drivers/power/reset/axxia-reset.c16
-rw-r--r--drivers/power/reset/brcm-kona-reset.c11
-rw-r--r--drivers/power/reset/gemini-poweroff.c16
-rw-r--r--drivers/power/reset/msm-poweroff.c21
-rw-r--r--drivers/power/reset/mt6323-poweroff.c26
-rw-r--r--drivers/power/reset/regulator-poweroff.c36
-rw-r--r--drivers/power/reset/restart-poweroff.c25
-rw-r--r--drivers/power/reset/rmobile-reset.c38
-rw-r--r--drivers/power/reset/syscon-poweroff.c66
-rw-r--r--drivers/power/reset/tps65086-restart.c58
-rw-r--r--drivers/power/reset/xgene-reboot.c25
-rw-r--r--drivers/power/supply/ab8500_btemp.c3
-rw-r--r--drivers/power/supply/ab8500_chargalg.c3
-rw-r--r--drivers/power/supply/ab8500_charger.c3
-rw-r--r--drivers/power/supply/ab8500_fg.c3
-rw-r--r--drivers/power/supply/apm_power.c3
-rw-r--r--drivers/power/supply/axp20x_usb_power.c147
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c18
-rw-r--r--drivers/power/supply/bq2415x_charger.c10
-rw-r--r--drivers/power/supply/bq27xxx_battery.c56
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c46
-rw-r--r--drivers/power/supply/da9030_battery.c6
-rw-r--r--drivers/power/supply/da9052-battery.c4
-rw-r--r--drivers/power/supply/da9150-charger.c72
-rw-r--r--drivers/power/supply/ds2760_battery.c4
-rw-r--r--drivers/power/supply/goldfish_battery.c24
-rw-r--r--drivers/power/supply/lp8727_charger.c35
-rw-r--r--drivers/power/supply/lp8788-charger.c21
-rw-r--r--drivers/power/supply/max14577_charger.c8
-rw-r--r--drivers/power/supply/max77693_charger.c10
-rw-r--r--drivers/power/supply/max8925_power.c37
-rw-r--r--drivers/power/supply/mm8013.c13
-rw-r--r--drivers/power/supply/pcf50633-charger.c23
-rw-r--r--drivers/power/supply/power_supply.h6
-rw-r--r--drivers/power/supply/power_supply_core.c65
-rw-r--r--drivers/power/supply/power_supply_sysfs.c40
-rw-r--r--drivers/power/supply/rt5033_battery.c14
-rw-r--r--drivers/power/supply/rx51_battery.c57
-rw-r--r--drivers/power/supply/tps65090-charger.c18
-rw-r--r--drivers/power/supply/twl4030_madc_battery.c59
-rw-r--r--drivers/power/supply/wm831x_backup.c13
-rw-r--r--drivers/power/supply/wm831x_power.c24
-rw-r--r--drivers/power/supply/wm8350_power.c30
-rw-r--r--drivers/powercap/intel_rapl_msr.c5
-rw-r--r--drivers/pps/generators/Makefile4
-rw-r--r--drivers/pwm/core.c2
-rw-r--r--drivers/pwm/pwm-dwc-core.c1
-rw-r--r--drivers/pwm/pwm-dwc.c88
-rw-r--r--drivers/pwm/pwm-dwc.h6
-rw-r--r--drivers/pwm/pwm-img.c4
-rw-r--r--drivers/ras/amd/fmpm.c57
-rw-r--r--drivers/ras/debugfs.h4
-rw-r--r--drivers/regulator/Kconfig2
-rw-r--r--drivers/regulator/core.c11
-rw-r--r--drivers/regulator/tps65132-regulator.c7
-rw-r--r--drivers/remoteproc/imx_dsp_rproc.c11
-rw-r--r--drivers/remoteproc/imx_rproc.c16
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c14
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c28
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c326
-rw-r--r--drivers/remoteproc/qcom_q6v5_wcss.c24
-rw-r--r--drivers/remoteproc/qcom_wcnss.c17
-rw-r--r--drivers/remoteproc/remoteproc_core.c29
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c6
-rw-r--r--drivers/remoteproc/st_remoteproc.c15
-rw-r--r--drivers/remoteproc/stm32_rproc.c10
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c156
-rw-r--r--drivers/reset/Kconfig12
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/core.c224
-rw-r--r--drivers/reset/reset-gpio.c119
-rw-r--r--drivers/reset/reset-simple.c2
-rw-r--r--drivers/rpmsg/rpmsg_char.c12
-rw-r--r--drivers/rpmsg/rpmsg_core.c2
-rw-r--r--drivers/rpmsg/rpmsg_ctrl.c12
-rw-r--r--drivers/rtc/Kconfig3
-rw-r--r--drivers/rtc/class.c21
-rw-r--r--drivers/rtc/interface.c2
-rw-r--r--drivers/rtc/rtc-ds1511.c340
-rw-r--r--drivers/rtc/rtc-m41t80.c5
-rw-r--r--drivers/rtc/rtc-max31335.c2
-rw-r--r--drivers/rtc/rtc-nct3018y.c6
-rw-r--r--drivers/rtc/rtc-pcf8523.c25
-rw-r--r--drivers/s390/block/dasd.c4
-rw-r--r--drivers/s390/block/dasd_3990_erp.c14
-rw-r--r--drivers/s390/block/dasd_alias.c6
-rw-r--r--drivers/s390/block/dasd_eckd.c118
-rw-r--r--drivers/s390/block/dasd_eer.c2
-rw-r--r--drivers/s390/block/dasd_fba.c32
-rw-r--r--drivers/s390/block/dcssblk.c13
-rw-r--r--drivers/s390/block/scm_blk.c6
-rw-r--r--drivers/s390/char/con3215.c4
-rw-r--r--drivers/s390/char/fs3270.c14
-rw-r--r--drivers/s390/char/raw3270.c42
-rw-r--r--drivers/s390/char/raw3270.h2
-rw-r--r--drivers/s390/char/sclp_cmd.c44
-rw-r--r--drivers/s390/char/tape.h12
-rw-r--r--drivers/s390/char/tape_class.c17
-rw-r--r--drivers/s390/char/vmlogrdr.c18
-rw-r--r--drivers/s390/char/vmur.c22
-rw-r--r--drivers/s390/cio/ccwgroup.c4
-rw-r--r--drivers/s390/cio/chsc.c12
-rw-r--r--drivers/s390/cio/chsc.h6
-rw-r--r--drivers/s390/cio/cio.c4
-rw-r--r--drivers/s390/cio/css.c25
-rw-r--r--drivers/s390/cio/device.c13
-rw-r--r--drivers/s390/cio/device_fsm.c13
-rw-r--r--drivers/s390/cio/device_id.c2
-rw-r--r--drivers/s390/cio/device_ops.c13
-rw-r--r--drivers/s390/cio/device_pgid.c8
-rw-r--r--drivers/s390/cio/device_status.c2
-rw-r--r--drivers/s390/cio/eadm_sch.c4
-rw-r--r--drivers/s390/cio/fcx.c22
-rw-r--r--drivers/s390/cio/orb.h9
-rw-r--r--drivers/s390/cio/qdio_main.c40
-rw-r--r--drivers/s390/cio/qdio_setup.c10
-rw-r--r--drivers/s390/cio/qdio_thinint.c6
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c82
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c2
-rw-r--r--drivers/s390/crypto/zcrypt_api.c39
-rw-r--r--drivers/s390/net/ctcm_fsms.c4
-rw-r--r--drivers/s390/net/ctcm_main.c2
-rw-r--r--drivers/s390/net/ctcm_mpc.c20
-rw-r--r--drivers/s390/net/fsm.c2
-rw-r--r--drivers/s390/net/ism_drv.c37
-rw-r--r--drivers/s390/net/lcs.c12
-rw-r--r--drivers/s390/net/qeth_core_main.c62
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c4
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h6
-rw-r--r--drivers/s390/virtio/virtio_ccw.c170
-rw-r--r--drivers/sbus/char/bbc_i2c.c9
-rw-r--r--drivers/sbus/char/bbc_i2c.h3
-rw-r--r--drivers/sbus/char/display7seg.c6
-rw-r--r--drivers/sbus/char/envctrl.c6
-rw-r--r--drivers/sbus/char/flash.c6
-rw-r--r--drivers/sbus/char/openprom.c2
-rw-r--r--drivers/sbus/char/uctrl.c5
-rw-r--r--drivers/scsi/3w-9xxx.c44
-rw-r--r--drivers/scsi/3w-sas.c36
-rw-r--r--drivers/scsi/3w-xxxx.c44
-rw-r--r--drivers/scsi/53c700.c2
-rw-r--r--drivers/scsi/Kconfig14
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/aacraid/aachba.c6
-rw-r--r--drivers/scsi/bfa/bfa.h30
-rw-r--r--drivers/scsi/bfa/bfa_core.c4
-rw-r--r--drivers/scsi/bfa/bfa_cs.h21
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c51
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h66
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h312
-rw-r--r--drivers/scsi/bfa/bfa_fcs_fcpim.c23
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c112
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c34
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c85
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h84
-rw-r--r--drivers/scsi/bfa/bfa_svc.c72
-rw-r--r--drivers/scsi/bfa/bfa_svc.h115
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c11
-rw-r--r--drivers/scsi/bfa/bfad_drv.h31
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c2
-rw-r--r--drivers/scsi/ch.c47
-rw-r--r--drivers/scsi/csiostor/csio_defs.h18
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c8
-rw-r--r--drivers/scsi/csiostor/csio_lnode.h13
-rw-r--r--drivers/scsi/cxlflash/main.c17
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c49
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c84
-rw-r--r--drivers/scsi/esp_scsi.c2
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c4
-rw-r--r--drivers/scsi/fnic/fnic_attrs.c7
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c4
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c28
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c18
-rw-r--r--drivers/scsi/hosts.c9
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c22
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c24
-rw-r--r--drivers/scsi/isci/init.c2
-rw-r--r--drivers/scsi/jazz_esp.c2
-rw-r--r--drivers/scsi/libfc/fc_encode.h14
-rw-r--r--drivers/scsi/libsas/sas_expander.c53
-rw-r--r--drivers/scsi/lpfc/lpfc.h96
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c111
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c48
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c158
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c26
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c491
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c383
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c150
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c40
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c103
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c33
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c155
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h30
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c79
-rw-r--r--drivers/scsi/mac53c94.c5
-rw-r--r--drivers/scsi/megaraid.c2
-rw-r--r--drivers/scsi/mesh.c7
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c2
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c12
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c99
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h8
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c54
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h10
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c1
-rw-r--r--drivers/scsi/myrb.c20
-rw-r--r--drivers/scsi/myrs.c24
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c6
-rw-r--r--drivers/scsi/pmcraid.c20
-rw-r--r--drivers/scsi/qla1280.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c128
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c68
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qlogicpti.c2
-rw-r--r--drivers/scsi/scsi.c2
-rw-r--r--drivers/scsi/scsi_debug.c297
-rw-r--r--drivers/scsi/scsi_devinfo.c6
-rw-r--r--drivers/scsi/scsi_lib.c131
-rw-r--r--drivers/scsi/scsi_lib_test.c330
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_proto_test.c56
-rw-r--r--drivers/scsi/scsi_scan.c143
-rw-r--r--drivers/scsi/scsi_sysfs.c16
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c4
-rw-r--r--drivers/scsi/scsi_transport_spi.c35
-rw-r--r--drivers/scsi/sd.c356
-rw-r--r--drivers/scsi/sd.h3
-rw-r--r--drivers/scsi/ses.c66
-rw-r--r--drivers/scsi/sg.c38
-rw-r--r--drivers/scsi/sr.c38
-rw-r--r--drivers/scsi/st.c4
-rw-r--r--drivers/scsi/sun3x_esp.c2
-rw-r--r--drivers/scsi/sun_esp.c2
-rw-r--r--drivers/sh/intc/core.c2
-rw-r--r--drivers/sh/intc/internals.h2
-rw-r--r--drivers/siox/siox-bus-gpio.c62
-rw-r--r--drivers/siox/siox-core.c52
-rw-r--r--drivers/siox/siox.h4
-rw-r--r--drivers/slimbus/core.c6
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c8
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c2
-rw-r--r--drivers/soc/fsl/qbman/bman_ccsr.c27
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.c12
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.h4
-rw-r--r--drivers/soc/fsl/qbman/qman.c25
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c73
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c22
-rw-r--r--drivers/soundwire/Makefile2
-rw-r--r--drivers/soundwire/amd_init.c235
-rw-r--r--drivers/soundwire/amd_init.h13
-rw-r--r--drivers/soundwire/amd_manager.c47
-rw-r--r--drivers/soundwire/amd_manager.h16
-rw-r--r--drivers/soundwire/bus_type.c2
-rw-r--r--drivers/soundwire/dmi-quirks.c8
-rw-r--r--drivers/soundwire/intel_auxdevice.c2
-rw-r--r--drivers/soundwire/master.c2
-rw-r--r--drivers/soundwire/slave.c2
-rw-r--r--drivers/soundwire/stream.c6
-rw-r--r--drivers/spi/spi-cs42l43.c2
-rw-r--r--drivers/spi/spi-fsl-lpspi.c22
-rw-r--r--drivers/spi/spi-imx.c4
-rw-r--r--drivers/spi/spi-lm70llp.c4
-rw-r--r--drivers/spi/spi-mem.c2
-rw-r--r--drivers/spi/spi-mt65xx.c22
-rw-r--r--drivers/spi/spi-pci1xxxx.c2
-rw-r--r--drivers/spi/spi-s3c64xx.c5
-rw-r--r--drivers/spi/spi.c24
-rw-r--r--drivers/staging/Kconfig4
-rw-r--r--drivers/staging/Makefile2
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c7
-rw-r--r--drivers/staging/board/Kconfig12
-rw-r--r--drivers/staging/board/Makefile4
-rw-r--r--drivers/staging/board/TODO2
-rw-r--r--drivers/staging/board/armadillo800eva.c88
-rw-r--r--drivers/staging/board/board.c204
-rw-r--r--drivers/staging/board/board.h46
-rw-r--r--drivers/staging/board/kzm9d.c26
-rw-r--r--drivers/staging/emxx_udc/Kconfig11
-rw-r--r--drivers/staging/emxx_udc/Makefile2
-rw-r--r--drivers/staging/emxx_udc/TODO6
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c3223
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.h554
-rw-r--r--drivers/staging/fbtft/fbtft-core.c2
-rw-r--r--drivers/staging/fieldbus/anybuss/arcx-anybus.c6
-rw-r--r--drivers/staging/fieldbus/anybuss/host.c2
-rw-r--r--drivers/staging/fieldbus/dev_core.c8
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c2
-rw-r--r--drivers/staging/greybus/Kconfig2
-rw-r--r--drivers/staging/greybus/audio_apbridgea.h1
-rw-r--r--drivers/staging/greybus/audio_manager.c8
-rw-r--r--drivers/staging/greybus/audio_topology.c3
-rw-r--r--drivers/staging/greybus/authentication.c6
-rw-r--r--drivers/staging/greybus/bootrom.c8
-rw-r--r--drivers/staging/greybus/fw-download.c15
-rw-r--r--drivers/staging/greybus/fw-management.c20
-rw-r--r--drivers/staging/greybus/gbphy.c8
-rw-r--r--drivers/staging/greybus/greybus_authentication.h6
-rw-r--r--drivers/staging/greybus/greybus_firmware.h8
-rw-r--r--drivers/staging/greybus/light.c29
-rw-r--r--drivers/staging/greybus/loopback.c6
-rw-r--r--drivers/staging/greybus/raw.c6
-rw-r--r--drivers/staging/greybus/vibrator.c6
-rw-r--r--drivers/staging/media/atomisp/TODO10
-rw-r--r--drivers/staging/media/atomisp/i2c/gc2235.h5
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.c141
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_drvfs.c144
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_drvfs.h5
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_internal.h1
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.c8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.c260
-rw-r--r--drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h2
-rw-r--r--drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c6
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_acc_types.h4
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_control.h29
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_firmware.h6
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_irq.h6
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_types.h4
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.c4
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_global.h2
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_public.h2
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c2
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c2
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/src/queue.c22
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c6
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css.c35
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_defs.h2
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mipi.c4
-rw-r--r--drivers/staging/media/imx/imx-media-csc-scaler.c1
-rw-r--r--drivers/staging/media/imx/imx-media-fim.c2
-rw-r--r--drivers/staging/media/ipu3/include/uapi/intel-ipu3.h3
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c16
-rw-r--r--drivers/staging/media/meson/vdec/vdec.h1
-rw-r--r--drivers/staging/media/starfive/camss/stf-capture.c8
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h265.c10
-rw-r--r--drivers/staging/nvec/TODO7
-rw-r--r--drivers/staging/nvec/nvec.c7
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c2
-rw-r--r--drivers/staging/octeon/octeon-stubs.h2
-rw-r--r--drivers/staging/pi433/pi433_if.c1
-rw-r--r--drivers/staging/pi433/rf69.c4
-rw-r--r--drivers/staging/pi433/rf69.h4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c103
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c1
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c64
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c37
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.c4
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c70
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HT.h6
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c44
-rw-r--r--drivers/staging/rtl8192e/rtl819x_Qos.h2
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c30
-rw-r--r--drivers/staging/rtl8192e/rtllib.h98
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c50
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c278
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac_wx.c8
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c18
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ieee80211.c4
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c9
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_sta_mgt.c3
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c14
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c3
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c61
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.c2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.h2
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c5
-rw-r--r--drivers/staging/vme_user/vme.c2
-rw-r--r--drivers/staging/vme_user/vme.h2
-rw-r--r--drivers/staging/vme_user/vme_tsi148.h6
-rw-r--r--drivers/staging/vt6655/card.c74
-rw-r--r--drivers/staging/vt6655/rxtx.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c3
-rw-r--r--drivers/target/loopback/tcm_loop.c2
-rw-r--r--drivers/target/target_core_configfs.c12
-rw-r--r--drivers/tc/tc-driver.c2
-rw-r--r--drivers/thermal/Makefile2
-rw-r--r--drivers/thermal/devfreq_cooling.c2
-rw-r--r--drivers/thermal/gov_power_allocator.c14
-rw-r--r--drivers/thermal/mediatek/auxadc_thermal.c3
-rw-r--r--drivers/thermal/mediatek/lvts_thermal.c4
-rw-r--r--drivers/thermal/qoriq_thermal.c12
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c4
-rw-r--r--drivers/thermal/st/st_thermal.h18
-rw-r--r--drivers/thermal/st/st_thermal_memmap.c2
-rw-r--r--drivers/thermal/sun8i_thermal.c139
-rw-r--r--drivers/thermal/thermal_debugfs.c1
-rw-r--r--drivers/thermal/thermal_of.c14
-rw-r--r--drivers/thermal/thermal_trip.c19
-rw-r--r--drivers/thunderbolt/Makefile1
-rw-r--r--drivers/thunderbolt/ctl.c19
-rw-r--r--drivers/thunderbolt/ctl.h4
-rw-r--r--drivers/thunderbolt/domain.c19
-rw-r--r--drivers/thunderbolt/icm.c2
-rw-r--r--drivers/thunderbolt/lc.c45
-rw-r--r--drivers/thunderbolt/nhi.c11
-rw-r--r--drivers/thunderbolt/nvm.c4
-rw-r--r--drivers/thunderbolt/path.c13
-rw-r--r--drivers/thunderbolt/quirks.c14
-rw-r--r--drivers/thunderbolt/retimer.c2
-rw-r--r--drivers/thunderbolt/switch.c188
-rw-r--r--drivers/thunderbolt/tb.c900
-rw-r--r--drivers/thunderbolt/tb.h32
-rw-r--r--drivers/thunderbolt/tb_regs.h6
-rw-r--r--drivers/thunderbolt/trace.h188
-rw-r--r--drivers/thunderbolt/tunnel.c96
-rw-r--r--drivers/thunderbolt/tunnel.h6
-rw-r--r--drivers/thunderbolt/usb4.c56
-rw-r--r--drivers/thunderbolt/usb4_port.c2
-rw-r--r--drivers/thunderbolt/xdomain.c16
-rw-r--r--drivers/tty/Kconfig7
-rw-r--r--drivers/tty/amiserial.c6
-rw-r--r--drivers/tty/goldfish.c5
-rw-r--r--drivers/tty/hvc/hvc_iucv.c6
-rw-r--r--drivers/tty/mips_ejtag_fdc.c2
-rw-r--r--drivers/tty/serdev/core.c2
-rw-r--r--drivers/tty/serdev/serdev-ttyport.c10
-rw-r--r--drivers/tty/serial/8250/8250_aspeed_vuart.c50
-rw-r--r--drivers/tty/serial/8250/8250_bcm2835aux.c94
-rw-r--r--drivers/tty/serial/8250/8250_bcm7271.c73
-rw-r--r--drivers/tty/serial/8250/8250_dw.c127
-rw-r--r--drivers/tty/serial/8250/8250_exar.c52
-rw-r--r--drivers/tty/serial/8250/8250_ingenic.c20
-rw-r--r--drivers/tty/serial/8250/8250_lpc18xx.c20
-rw-r--r--drivers/tty/serial/8250/8250_of.c143
-rw-r--r--drivers/tty/serial/8250/8250_omap.c29
-rw-r--r--drivers/tty/serial/8250/8250_pci.c6
-rw-r--r--drivers/tty/serial/8250/8250_pci1xxxx.c175
-rw-r--r--drivers/tty/serial/8250/8250_port.c50
-rw-r--r--drivers/tty/serial/8250/8250_pxa.c22
-rw-r--r--drivers/tty/serial/8250/8250_tegra.c26
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c17
-rw-r--r--drivers/tty/serial/8250/Kconfig1
-rw-r--r--drivers/tty/serial/amba-pl011.c24
-rw-r--r--drivers/tty/serial/ar933x_uart.c18
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c24
-rw-r--r--drivers/tty/serial/fsl_linflexuart.c1
-rw-r--r--drivers/tty/serial/jsm/jsm_cls.c1
-rw-r--r--drivers/tty/serial/lpc32xx_hs.c19
-rw-r--r--drivers/tty/serial/max310x.c329
-rw-r--r--drivers/tty/serial/mcf.c27
-rw-r--r--drivers/tty/serial/meson_uart.c22
-rw-r--r--drivers/tty/serial/msm_serial.c33
-rw-r--r--drivers/tty/serial/mxs-auart.c8
-rw-r--r--drivers/tty/serial/omap-serial.c16
-rw-r--r--drivers/tty/serial/owl-uart.c30
-rw-r--r--drivers/tty/serial/pch_uart.c70
-rw-r--r--drivers/tty/serial/pmac_zilog.c30
-rw-r--r--drivers/tty/serial/pxa.c17
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c27
-rw-r--r--drivers/tty/serial/rda-uart.c28
-rw-r--r--drivers/tty/serial/samsung_tty.c270
-rw-r--r--drivers/tty/serial/serial_base.h4
-rw-r--r--drivers/tty/serial/serial_base_bus.c2
-rw-r--r--drivers/tty/serial/serial_core.c35
-rw-r--r--drivers/tty/serial/serial_port.c179
-rw-r--r--drivers/tty/serial/serial_txx9.c3
-rw-r--r--drivers/tty/serial/sh-sci.c245
-rw-r--r--drivers/tty/serial/sifive.c17
-rw-r--r--drivers/tty/serial/st-asc.c40
-rw-r--r--drivers/tty/serial/stm32-usart.c236
-rw-r--r--drivers/tty/serial/stm32-usart.h38
-rw-r--r--drivers/tty/serial/sunplus-uart.c18
-rw-r--r--drivers/tty/serial/xilinx_uartps.c236
-rw-r--r--drivers/tty/tty_buffer.c1
-rw-r--r--drivers/tty/vt/Makefile4
-rw-r--r--drivers/tty/vt/selection.c43
-rw-r--r--drivers/tty/vt/vt.c1531
-rw-r--r--drivers/tty/vt/vt_ioctl.c6
-rw-r--r--drivers/ufs/core/ufs-mcq.c14
-rw-r--r--drivers/ufs/core/ufs-sysfs.c49
-rw-r--r--drivers/ufs/core/ufshcd.c99
-rw-r--r--drivers/ufs/host/ufs-mediatek.c90
-rw-r--r--drivers/ufs/host/ufs-mediatek.h7
-rw-r--r--drivers/ufs/host/ufs-qcom.c42
-rw-r--r--drivers/uio/uio.c47
-rw-r--r--drivers/uio/uio_dmem_genirq.c22
-rw-r--r--drivers/uio/uio_hv_generic.c12
-rw-r--r--drivers/uio/uio_pruss.c6
-rw-r--r--drivers/usb/cdns3/drd.c2
-rw-r--r--drivers/usb/core/Kconfig27
-rw-r--r--drivers/usb/core/driver.c8
-rw-r--r--drivers/usb/core/endpoint.c2
-rw-r--r--drivers/usb/core/hcd.c20
-rw-r--r--drivers/usb/core/hub.c52
-rw-r--r--drivers/usb/core/hub.h2
-rw-r--r--drivers/usb/core/message.c7
-rw-r--r--drivers/usb/core/of.c71
-rw-r--r--drivers/usb/core/phy.c120
-rw-r--r--drivers/usb/core/phy.h3
-rw-r--r--drivers/usb/core/port.c46
-rw-r--r--drivers/usb/core/sysfs.c119
-rw-r--r--drivers/usb/core/usb-acpi.c46
-rw-r--r--drivers/usb/core/usb.c2
-rw-r--r--drivers/usb/core/usb.h8
-rw-r--r--drivers/usb/dwc2/core.h14
-rw-r--r--drivers/usb/dwc2/core_intr.c72
-rw-r--r--drivers/usb/dwc2/gadget.c10
-rw-r--r--drivers/usb/dwc2/hcd.c49
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c19
-rw-r--r--drivers/usb/dwc2/hw.h2
-rw-r--r--drivers/usb/dwc2/platform.c2
-rw-r--r--drivers/usb/dwc3/Kconfig2
-rw-r--r--drivers/usb/dwc3/core.c2
-rw-r--r--drivers/usb/dwc3/core.h4
-rw-r--r--drivers/usb/dwc3/dwc3-am62.c42
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c4
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c276
-rw-r--r--drivers/usb/dwc3/ep0.c4
-rw-r--r--drivers/usb/dwc3/gadget.c101
-rw-r--r--drivers/usb/dwc3/gadget.h1
-rw-r--r--drivers/usb/dwc3/host.c61
-rw-r--r--drivers/usb/gadget/Kconfig1
-rw-r--r--drivers/usb/gadget/function/f_fs.c536
-rw-r--r--drivers/usb/gadget/function/f_ncm.c4
-rw-r--r--drivers/usb/gadget/function/u_ether.c2
-rw-r--r--drivers/usb/gadget/function/uvc_video.c115
-rw-r--r--drivers/usb/gadget/udc/core.c11
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c132
-rw-r--r--drivers/usb/gadget/udc/fsl_usb2_udc.h47
-rw-r--r--drivers/usb/gadget/udc/net2272.c2
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c1
-rw-r--r--drivers/usb/gadget/udc/snps_udc_plat.c1
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c39
-rw-r--r--drivers/usb/host/ehci-orion.c18
-rw-r--r--drivers/usb/host/ohci-pxa27x.c1
-rw-r--r--drivers/usb/host/sl811-hcd.c2
-rw-r--r--drivers/usb/host/xhci-caps.h85
-rw-r--r--drivers/usb/host/xhci-dbgcap.c13
-rw-r--r--drivers/usb/host/xhci-dbgcap.h2
-rw-r--r--drivers/usb/host/xhci-hub.c69
-rw-r--r--drivers/usb/host/xhci-mem.c95
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c14
-rw-r--r--drivers/usb/host/xhci-pci.c15
-rw-r--r--drivers/usb/host/xhci-port.h176
-rw-r--r--drivers/usb/host/xhci-ring.c234
-rw-r--r--drivers/usb/host/xhci-trace.h12
-rw-r--r--drivers/usb/host/xhci.c56
-rw-r--r--drivers/usb/host/xhci.h272
-rw-r--r--drivers/usb/image/mdc800.c1
-rw-r--r--drivers/usb/misc/onboard_usb_hub.c10
-rw-r--r--drivers/usb/misc/onboard_usb_hub.h7
-rw-r--r--drivers/usb/misc/usb-ljca.c22
-rw-r--r--drivers/usb/mtu3/mtu3_host.c30
-rw-r--r--drivers/usb/musb/musb_gadget.c4
-rw-r--r--drivers/usb/phy/phy-generic.c48
-rw-r--r--drivers/usb/phy/phy.c2
-rw-r--r--drivers/usb/roles/class.c43
-rw-r--r--drivers/usb/serial/cp210x.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c4
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/serial/keyspan.c1
-rw-r--r--drivers/usb/serial/option.c46
-rw-r--r--drivers/usb/serial/oti6858.c1
-rw-r--r--drivers/usb/storage/freecom.c1
-rw-r--r--drivers/usb/storage/sddr55.c4
-rw-r--r--drivers/usb/storage/uas.c28
-rw-r--r--drivers/usb/typec/altmodes/displayport.c165
-rw-r--r--drivers/usb/typec/bus.c102
-rw-r--r--drivers/usb/typec/class.c68
-rw-r--r--drivers/usb/typec/class.h7
-rw-r--r--drivers/usb/typec/mux.c2
-rw-r--r--drivers/usb/typec/mux/Kconfig10
-rw-r--r--drivers/usb/typec/mux/Makefile1
-rw-r--r--drivers/usb/typec/mux/it5205.c294
-rw-r--r--drivers/usb/typec/pd.c30
-rw-r--r--drivers/usb/typec/retimer.c2
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c2
-rw-r--r--drivers/usb/typec/tcpm/qcom/Makefile3
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c254
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.h27
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c159
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.h94
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy_stub.c80
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c290
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.h172
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c27
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim.h1
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim_core.c38
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c1062
-rw-r--r--drivers/usb/typec/tcpm/wcove.c2
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c360
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h112
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c75
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c92
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c15
-rw-r--r--drivers/vdpa/alibaba/eni_vdpa.c8
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c11
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.h2
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c15
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c13
-rw-r--r--drivers/vdpa/pds/aux_drv.c2
-rw-r--r--drivers/vdpa/pds/vdpa_dev.c20
-rw-r--r--drivers/vdpa/pds/vdpa_dev.h1
-rw-r--r--drivers/vdpa/vdpa.c214
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c15
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.c27
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.h8
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c34
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c8
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c7
-rw-r--r--drivers/vfio/mdev/mdev_driver.c2
-rw-r--r--drivers/vfio/mdev/mdev_private.h2
-rw-r--r--drivers/vfio/pci/Kconfig2
-rw-r--r--drivers/vfio/pci/Makefile2
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c48
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h6
-rw-r--r--drivers/vfio/pci/mlx5/cmd.c157
-rw-r--r--drivers/vfio/pci/mlx5/cmd.h11
-rw-r--r--drivers/vfio/pci/mlx5/main.c148
-rw-r--r--drivers/vfio/pci/nvgrace-gpu/Kconfig10
-rw-r--r--drivers/vfio/pci/nvgrace-gpu/Makefile3
-rw-r--r--drivers/vfio/pci/nvgrace-gpu/main.c888
-rw-r--r--drivers/vfio/pci/pds/dirty.c6
-rw-r--r--drivers/vfio/pci/pds/lm.c13
-rw-r--r--drivers/vfio/pci/pds/lm.h1
-rw-r--r--drivers/vfio/pci/pds/pci_drv.c27
-rw-r--r--drivers/vfio/pci/pds/vfio_dev.c45
-rw-r--r--drivers/vfio/pci/pds/vfio_dev.h8
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c42
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c20
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c176
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c16
-rw-r--r--drivers/vfio/pci/virtio/main.c72
-rw-r--r--drivers/vfio/platform/vfio_amba.c6
-rw-r--r--drivers/vfio/platform/vfio_platform.c5
-rw-r--r--drivers/vfio/platform/vfio_platform_irq.c105
-rw-r--r--drivers/vfio/vfio.h2
-rw-r--r--drivers/vfio/vfio_iommu_type1.c12
-rw-r--r--drivers/vfio/vfio_main.c4
-rw-r--r--drivers/vfio/virqfd.c21
-rw-r--r--drivers/vhost/net.c3
-rw-r--r--drivers/vhost/vdpa.c14
-rw-r--r--drivers/vhost/vhost.c26
-rw-r--r--drivers/video/backlight/Kconfig7
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/as3711_bl.c6
-rw-r--r--drivers/video/backlight/bd6107.c9
-rw-r--r--drivers/video/backlight/da9052_bl.c1
-rw-r--r--drivers/video/backlight/gpio_backlight.c10
-rw-r--r--drivers/video/backlight/hx8357.c67
-rw-r--r--drivers/video/backlight/ktd2801-backlight.c128
-rw-r--r--drivers/video/backlight/ktz8866.c6
-rw-r--r--drivers/video/backlight/l4f00242t03.c34
-rw-r--r--drivers/video/backlight/lm3630a_bl.c42
-rw-r--r--drivers/video/backlight/lm3639_bl.c1
-rw-r--r--drivers/video/backlight/lp8788_bl.c1
-rw-r--r--drivers/video/backlight/mp3309c.c93
-rw-r--r--drivers/video/backlight/pandora_bl.c4
-rw-r--r--drivers/video/console/dummycon.c38
-rw-r--r--drivers/video/console/mdacon.c43
-rw-r--r--drivers/video/console/newport_con.c69
-rw-r--r--drivers/video/console/sticon.c79
-rw-r--r--drivers/video/console/vgacon.c152
-rw-r--r--drivers/video/fbdev/Kconfig5
-rw-r--r--drivers/video/fbdev/arkfb.c15
-rw-r--r--drivers/video/fbdev/core/bitblit.c13
-rw-r--r--drivers/video/fbdev/core/fbcon.c139
-rw-r--r--drivers/video/fbdev/core/fbcon.h4
-rw-r--r--drivers/video/fbdev/core/fbcon_ccw.c13
-rw-r--r--drivers/video/fbdev/core/fbcon_cw.c13
-rw-r--r--drivers/video/fbdev/core/fbcon_ud.c13
-rw-r--r--drivers/video/fbdev/core/fbmem.c12
-rw-r--r--drivers/video/fbdev/core/fbmon.c7
-rw-r--r--drivers/video/fbdev/core/svgalib.c15
-rw-r--r--drivers/video/fbdev/core/tileblit.c4
-rw-r--r--drivers/video/fbdev/hgafb.c2
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfbdrv.c18
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c13
-rw-r--r--drivers/video/fbdev/s3fb.c15
-rw-r--r--drivers/video/fbdev/tgafb.c2
-rw-r--r--drivers/video/fbdev/uvesafb.c2
-rw-r--r--drivers/video/fbdev/vga16fb.c6
-rw-r--r--drivers/video/fbdev/via/accel.c4
-rw-r--r--drivers/video/fbdev/vt8623fb.c15
-rw-r--r--drivers/video/sticore.c2
-rw-r--r--drivers/virt/vmgenid.c2
-rw-r--r--drivers/virtio/virtio.c12
-rw-r--r--drivers/virtio/virtio_ring.c6
-rw-r--r--drivers/virtio/virtio_vdpa.c5
-rw-r--r--drivers/w1/masters/Kconfig10
-rw-r--r--drivers/w1/masters/Makefile1
-rw-r--r--drivers/w1/masters/mxc_w1.c6
-rw-r--r--drivers/w1/masters/omap_hdq.c6
-rw-r--r--drivers/w1/masters/sgi_w1.c6
-rw-r--r--drivers/w1/masters/w1-gpio.c6
-rw-r--r--drivers/w1/masters/w1-uart.c415
-rw-r--r--drivers/w1/w1.c2
-rw-r--r--drivers/watchdog/Kconfig11
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/cros_ec_wdt.c204
-rw-r--r--drivers/watchdog/hpwdt.c25
-rw-r--r--drivers/watchdog/intel-mid_wdt.c11
-rw-r--r--drivers/watchdog/it87_wdt.c4
-rw-r--r--drivers/watchdog/qcom-wdt.c7
-rw-r--r--drivers/watchdog/sp805_wdt.c8
-rw-r--r--drivers/watchdog/starfive-wdt.c14
-rw-r--r--drivers/watchdog/stm32_iwdg.c3
-rw-r--r--drivers/watchdog/watchdog_core.c17
-rw-r--r--drivers/xen/balloon.c2
-rw-r--r--drivers/xen/events/events_base.c22
-rw-r--r--drivers/xen/evtchn.c6
-rw-r--r--drivers/xen/grant-dma-iommu.c6
2507 files changed, 79078 insertions, 41465 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 37fd6ce3bd7f5..3bf5cab4b4519 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -135,7 +135,7 @@ obj-$(CONFIG_CPU_IDLE) += cpuidle/
obj-y += mmc/
obj-y += ufs/
obj-$(CONFIG_MEMSTICK) += memstick/
-obj-$(CONFIG_NEW_LEDS) += leds/
+obj-y += leds/
obj-$(CONFIG_INFINIBAND) += infiniband/
obj-y += firmware/
obj-$(CONFIG_CRYPTO) += crypto/
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 39f6d1b98fd6a..51d3f1a55d024 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include <linux/firmware.h>
@@ -131,22 +131,6 @@ static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param
return 0;
}
-static int ivpu_get_core_clock_rate(struct ivpu_device *vdev, u64 *clk_rate)
-{
- int ret;
-
- ret = ivpu_rpm_get_if_active(vdev);
- if (ret < 0)
- return ret;
-
- *clk_rate = ret ? ivpu_hw_reg_pll_freq_get(vdev) : 0;
-
- if (ret)
- ivpu_rpm_put(vdev);
-
- return 0;
-}
-
static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
@@ -170,7 +154,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
args->value = vdev->platform;
break;
case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
- ret = ivpu_get_core_clock_rate(vdev, &args->value);
+ args->value = ivpu_hw_ratio_to_freq(vdev, vdev->hw->pll.max_ratio);
break;
case DRM_IVPU_PARAM_NUM_CONTEXTS:
args->value = ivpu_get_context_count(vdev);
@@ -387,12 +371,15 @@ int ivpu_shutdown(struct ivpu_device *vdev)
{
int ret;
- ivpu_prepare_for_reset(vdev);
+ /* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
+ pci_save_state(to_pci_dev(vdev->drm.dev));
ret = ivpu_hw_power_down(vdev);
if (ret)
ivpu_warn(vdev, "Failed to power down HW: %d\n", ret);
+ pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
+
return ret;
}
@@ -530,7 +517,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
atomic64_set(&vdev->unique_id_counter, 0);
- xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC);
+ xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
@@ -560,11 +547,11 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
/* Power up early so the rest of init code can access VPU registers */
ret = ivpu_hw_power_up(vdev);
if (ret)
- goto err_power_down;
+ goto err_shutdown;
ret = ivpu_mmu_global_context_init(vdev);
if (ret)
- goto err_power_down;
+ goto err_shutdown;
ret = ivpu_mmu_init(vdev);
if (ret)
@@ -601,10 +588,8 @@ err_mmu_rctx_fini:
ivpu_mmu_reserved_context_fini(vdev);
err_mmu_gctx_fini:
ivpu_mmu_global_context_fini(vdev);
-err_power_down:
- ivpu_hw_power_down(vdev);
- if (IVPU_WA(d3hot_after_power_off))
- pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
+err_shutdown:
+ ivpu_shutdown(vdev);
err_xa_destroy:
xa_destroy(&vdev->db_xa);
xa_destroy(&vdev->submitted_jobs_xa);
@@ -628,9 +613,8 @@ static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
static void ivpu_dev_fini(struct ivpu_device *vdev)
{
ivpu_pm_disable(vdev);
+ ivpu_prepare_for_reset(vdev);
ivpu_shutdown(vdev);
- if (IVPU_WA(d3hot_after_power_off))
- pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
ivpu_jobs_abort_all(vdev);
ivpu_job_done_consumer_fini(vdev);
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 7be0500d9bb89..bb4374d0eaecc 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#ifndef __IVPU_DRV_H__
@@ -90,7 +90,6 @@
struct ivpu_wa_table {
bool punit_disabled;
bool clear_runtime_mem;
- bool d3hot_after_power_off;
bool interrupt_clear_with_0;
bool disable_clock_relinquish;
bool disable_d0i3_msg;
diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
index b2909168a0a69..094c659d2800b 100644
--- a/drivers/accel/ivpu/ivpu_hw.h
+++ b/drivers/accel/ivpu/ivpu_hw.h
@@ -21,6 +21,7 @@ struct ivpu_hw_ops {
u32 (*profiling_freq_get)(struct ivpu_device *vdev);
void (*profiling_freq_drive)(struct ivpu_device *vdev, bool enable);
u32 (*reg_pll_freq_get)(struct ivpu_device *vdev);
+ u32 (*ratio_to_freq)(struct ivpu_device *vdev, u32 ratio);
u32 (*reg_telemetry_offset_get)(struct ivpu_device *vdev);
u32 (*reg_telemetry_size_get)(struct ivpu_device *vdev);
u32 (*reg_telemetry_enable_get)(struct ivpu_device *vdev);
@@ -130,6 +131,11 @@ static inline u32 ivpu_hw_reg_pll_freq_get(struct ivpu_device *vdev)
return vdev->hw->ops->reg_pll_freq_get(vdev);
};
+static inline u32 ivpu_hw_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
+{
+ return vdev->hw->ops->ratio_to_freq(vdev, ratio);
+}
+
static inline u32 ivpu_hw_reg_telemetry_offset_get(struct ivpu_device *vdev)
{
return vdev->hw->ops->reg_telemetry_offset_get(vdev);
diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c
index 9a0c9498baba2..bd25e2d9fb0f4 100644
--- a/drivers/accel/ivpu/ivpu_hw_37xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_37xx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include "ivpu_drv.h"
@@ -75,7 +75,6 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
{
vdev->wa.punit_disabled = false;
vdev->wa.clear_runtime_mem = false;
- vdev->wa.d3hot_after_power_off = true;
REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, BUTTRESS_ALL_IRQ_MASK);
if (REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) == BUTTRESS_ALL_IRQ_MASK) {
@@ -86,7 +85,6 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
IVPU_PRINT_WA(punit_disabled);
IVPU_PRINT_WA(clear_runtime_mem);
- IVPU_PRINT_WA(d3hot_after_power_off);
IVPU_PRINT_WA(interrupt_clear_with_0);
}
@@ -805,12 +803,12 @@ static void ivpu_hw_37xx_profiling_freq_drive(struct ivpu_device *vdev, bool ena
/* Profiling freq - is a debug feature. Unavailable on VPU 37XX. */
}
-static u32 ivpu_hw_37xx_pll_to_freq(u32 ratio, u32 config)
+static u32 ivpu_hw_37xx_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
{
u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
u32 cpu_clock;
- if ((config & 0xff) == PLL_RATIO_4_3)
+ if ((vdev->hw->config & 0xff) == PLL_RATIO_4_3)
cpu_clock = pll_clock * 2 / 4;
else
cpu_clock = pll_clock * 2 / 5;
@@ -829,7 +827,7 @@ static u32 ivpu_hw_37xx_reg_pll_freq_get(struct ivpu_device *vdev)
if (!ivpu_is_silicon(vdev))
return PLL_SIMULATION_FREQ;
- return ivpu_hw_37xx_pll_to_freq(pll_curr_ratio, vdev->hw->config);
+ return ivpu_hw_37xx_ratio_to_freq(vdev, pll_curr_ratio);
}
static u32 ivpu_hw_37xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
@@ -1052,6 +1050,7 @@ const struct ivpu_hw_ops ivpu_hw_37xx_ops = {
.profiling_freq_get = ivpu_hw_37xx_profiling_freq_get,
.profiling_freq_drive = ivpu_hw_37xx_profiling_freq_drive,
.reg_pll_freq_get = ivpu_hw_37xx_reg_pll_freq_get,
+ .ratio_to_freq = ivpu_hw_37xx_ratio_to_freq,
.reg_telemetry_offset_get = ivpu_hw_37xx_reg_telemetry_offset_get,
.reg_telemetry_size_get = ivpu_hw_37xx_reg_telemetry_size_get,
.reg_telemetry_enable_get = ivpu_hw_37xx_reg_telemetry_enable_get,
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c
index e4eddbf5d11c2..b0b88d4c89264 100644
--- a/drivers/accel/ivpu/ivpu_hw_40xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_40xx.c
@@ -980,6 +980,11 @@ static u32 ivpu_hw_40xx_reg_pll_freq_get(struct ivpu_device *vdev)
return PLL_RATIO_TO_FREQ(pll_curr_ratio);
}
+static u32 ivpu_hw_40xx_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
+{
+ return PLL_RATIO_TO_FREQ(ratio);
+}
+
static u32 ivpu_hw_40xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
{
return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET);
@@ -1230,6 +1235,7 @@ const struct ivpu_hw_ops ivpu_hw_40xx_ops = {
.profiling_freq_get = ivpu_hw_40xx_profiling_freq_get,
.profiling_freq_drive = ivpu_hw_40xx_profiling_freq_drive,
.reg_pll_freq_get = ivpu_hw_40xx_reg_pll_freq_get,
+ .ratio_to_freq = ivpu_hw_40xx_ratio_to_freq,
.reg_telemetry_offset_get = ivpu_hw_40xx_reg_telemetry_offset_get,
.reg_telemetry_size_get = ivpu_hw_40xx_reg_telemetry_size_get,
.reg_telemetry_enable_get = ivpu_hw_40xx_reg_telemetry_enable_get,
diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
index 04ac4b9840fbe..56ff067f63e29 100644
--- a/drivers/accel/ivpu/ivpu_ipc.c
+++ b/drivers/accel/ivpu/ivpu_ipc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include <linux/genalloc.h>
@@ -501,7 +501,11 @@ int ivpu_ipc_init(struct ivpu_device *vdev)
spin_lock_init(&ipc->cons_lock);
INIT_LIST_HEAD(&ipc->cons_list);
INIT_LIST_HEAD(&ipc->cb_msg_list);
- drmm_mutex_init(&vdev->drm, &ipc->lock);
+ ret = drmm_mutex_init(&vdev->drm, &ipc->lock);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize ipc->lock, ret %d\n", ret);
+ goto err_free_rx;
+ }
ivpu_ipc_reset(vdev);
return 0;
diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c
index 91bd640655ab3..2e46b322c4505 100644
--- a/drivers/accel/ivpu/ivpu_mmu.c
+++ b/drivers/accel/ivpu/ivpu_mmu.c
@@ -278,7 +278,7 @@ static const char *ivpu_mmu_event_to_str(u32 cmd)
case IVPU_MMU_EVT_F_VMS_FETCH:
return "Fetch of VMS caused external abort";
default:
- return "Unknown CMDQ command";
+ return "Unknown event";
}
}
@@ -286,15 +286,15 @@ static const char *ivpu_mmu_cmdq_err_to_str(u32 err)
{
switch (err) {
case IVPU_MMU_CERROR_NONE:
- return "No CMDQ Error";
+ return "No error";
case IVPU_MMU_CERROR_ILL:
return "Illegal command";
case IVPU_MMU_CERROR_ABT:
- return "External abort on CMDQ read";
+ return "External abort on command queue read";
case IVPU_MMU_CERROR_ATC_INV_SYNC:
return "Sync failed to complete ATS invalidation";
default:
- return "Unknown CMDQ Error";
+ return "Unknown error";
}
}
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index 7cce1c928a7f4..4f5ea466731ff 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include <linux/highmem.h>
@@ -58,14 +58,11 @@ static int ivpu_suspend(struct ivpu_device *vdev)
{
int ret;
- /* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
- pci_save_state(to_pci_dev(vdev->drm.dev));
+ ivpu_prepare_for_reset(vdev);
ret = ivpu_shutdown(vdev);
if (ret)
- ivpu_err(vdev, "Failed to shutdown VPU: %d\n", ret);
-
- pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
+ ivpu_err(vdev, "Failed to shutdown NPU: %d\n", ret);
return ret;
}
@@ -74,10 +71,10 @@ static int ivpu_resume(struct ivpu_device *vdev)
{
int ret;
- pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
+retry:
pci_restore_state(to_pci_dev(vdev->drm.dev));
+ pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
-retry:
ret = ivpu_hw_power_up(vdev);
if (ret) {
ivpu_err(vdev, "Failed to power up HW: %d\n", ret);
@@ -100,6 +97,7 @@ err_mmu_disable:
ivpu_mmu_disable(vdev);
err_power_down:
ivpu_hw_power_down(vdev);
+ pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
if (!ivpu_fw_is_cold_boot(vdev)) {
ivpu_pm_prepare_cold_boot(vdev);
diff --git a/drivers/accessibility/speakup/devsynth.c b/drivers/accessibility/speakup/devsynth.c
index d305716635855..cb7e1114e8ebe 100644
--- a/drivers/accessibility/speakup/devsynth.c
+++ b/drivers/accessibility/speakup/devsynth.c
@@ -7,9 +7,10 @@
#include "speakup.h"
#include "spk_priv.h"
-static int misc_registered;
+static int synth_registered, synthu_registered;
static int dev_opened;
+/* Latin1 version */
static ssize_t speakup_file_write(struct file *fp, const char __user *buffer,
size_t nbytes, loff_t *ppos)
{
@@ -34,6 +35,98 @@ static ssize_t speakup_file_write(struct file *fp, const char __user *buffer,
return (ssize_t)nbytes;
}
+/* UTF-8 version */
+static ssize_t speakup_file_writeu(struct file *fp, const char __user *buffer,
+ size_t nbytes, loff_t *ppos)
+{
+ size_t count = nbytes, want;
+ const char __user *ptr = buffer;
+ size_t bytes;
+ unsigned long flags;
+ unsigned char buf[256];
+ u16 ubuf[256];
+ size_t in, in2, out;
+
+ if (!synth)
+ return -ENODEV;
+
+ want = 1;
+ while (count >= want) {
+ /* Copy some UTF-8 piece from userland */
+ bytes = min(count, sizeof(buf));
+ if (copy_from_user(buf, ptr, bytes))
+ return -EFAULT;
+
+ /* Convert to u16 */
+ for (in = 0, out = 0; in < bytes; in++) {
+ unsigned char c = buf[in];
+ int nbytes = 8 - fls(c ^ 0xff);
+ u32 value;
+
+ switch (nbytes) {
+ case 8: /* 0xff */
+ case 7: /* 0xfe */
+ case 1: /* 0x80 */
+ /* Invalid, drop */
+ goto drop;
+
+ case 0:
+ /* ASCII, copy */
+ ubuf[out++] = c;
+ continue;
+
+ default:
+ /* 2..6-byte UTF-8 */
+
+ if (bytes - in < nbytes) {
+ /* We don't have it all yet, stop here
+ * and wait for the rest
+ */
+ bytes = in;
+ want = nbytes;
+ continue;
+ }
+
+ /* First byte */
+ value = c & ((1u << (7 - nbytes)) - 1);
+
+ /* Other bytes */
+ for (in2 = 2; in2 <= nbytes; in2++) {
+ c = buf[in + 1];
+ if ((c & 0xc0) != 0x80) {
+ /* Invalid, drop the head */
+ want = 1;
+ goto drop;
+ }
+ value = (value << 6) | (c & 0x3f);
+ in++;
+ }
+
+ if (value < 0x10000)
+ ubuf[out++] = value;
+ want = 1;
+ break;
+ }
+drop:
+ /* empty statement */;
+ }
+
+ count -= bytes;
+ ptr += bytes;
+
+ /* And speak this up */
+ if (out) {
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
+ for (in = 0; in < out; in++)
+ synth_buffer_add(ubuf[in]);
+ synth_start();
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+ }
+ }
+
+ return (ssize_t)(nbytes - count);
+}
+
static ssize_t speakup_file_read(struct file *fp, char __user *buf,
size_t nbytes, loff_t *ppos)
{
@@ -62,31 +155,57 @@ static const struct file_operations synth_fops = {
.release = speakup_file_release,
};
+static const struct file_operations synthu_fops = {
+ .read = speakup_file_read,
+ .write = speakup_file_writeu,
+ .open = speakup_file_open,
+ .release = speakup_file_release,
+};
+
static struct miscdevice synth_device = {
.minor = MISC_DYNAMIC_MINOR,
.name = "synth",
.fops = &synth_fops,
};
+static struct miscdevice synthu_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "synthu",
+ .fops = &synthu_fops,
+};
+
void speakup_register_devsynth(void)
{
- if (misc_registered != 0)
- return;
-/* zero it so if register fails, deregister will not ref invalid ptrs */
- if (misc_register(&synth_device)) {
- pr_warn("Couldn't initialize miscdevice /dev/synth.\n");
- } else {
- pr_info("initialized device: /dev/synth, node (MAJOR %d, MINOR %d)\n",
- MISC_MAJOR, synth_device.minor);
- misc_registered = 1;
+ if (!synth_registered) {
+ if (misc_register(&synth_device)) {
+ pr_warn("Couldn't initialize miscdevice /dev/synth.\n");
+ } else {
+ pr_info("initialized device: /dev/synth, node (MAJOR %d, MINOR %d)\n",
+ MISC_MAJOR, synth_device.minor);
+ synth_registered = 1;
+ }
+ }
+ if (!synthu_registered) {
+ if (misc_register(&synthu_device)) {
+ pr_warn("Couldn't initialize miscdevice /dev/synthu.\n");
+ } else {
+ pr_info("initialized device: /dev/synthu, node (MAJOR %d, MINOR %d)\n",
+ MISC_MAJOR, synthu_device.minor);
+ synthu_registered = 1;
+ }
}
}
void speakup_unregister_devsynth(void)
{
- if (!misc_registered)
- return;
- pr_info("speakup: unregistering synth device /dev/synth\n");
- misc_deregister(&synth_device);
- misc_registered = 0;
+ if (synth_registered) {
+ pr_info("speakup: unregistering synth device /dev/synth\n");
+ misc_deregister(&synth_device);
+ synth_registered = 0;
+ }
+ if (synthu_registered) {
+ pr_info("speakup: unregistering synth device /dev/synthu\n");
+ misc_deregister(&synthu_device);
+ synthu_registered = 0;
+ }
}
diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c
index 1fbc9b921c4fc..736c2eb8c0f37 100644
--- a/drivers/accessibility/speakup/main.c
+++ b/drivers/accessibility/speakup/main.c
@@ -574,7 +574,7 @@ static u_long get_word(struct vc_data *vc)
}
attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
buf[cnt++] = attr_ch;
- while (tmpx < vc->vc_cols - 1) {
+ while (tmpx < vc->vc_cols - 1 && cnt < sizeof(buf) - 1) {
tmp_pos += 2;
tmpx++;
ch = get_char(vc, (u_short *)tmp_pos, &temp);
diff --git a/drivers/accessibility/speakup/synth.c b/drivers/accessibility/speakup/synth.c
index eea2a2fa4f015..45f9061031338 100644
--- a/drivers/accessibility/speakup/synth.c
+++ b/drivers/accessibility/speakup/synth.c
@@ -208,8 +208,10 @@ void spk_do_flush(void)
wake_up_process(speakup_task);
}
-void synth_write(const char *buf, size_t count)
+void synth_write(const char *_buf, size_t count)
{
+ const unsigned char *buf = (const unsigned char *) _buf;
+
while (count--)
synth_buffer_add(*buf++);
synth_start();
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index c645bb453f3b7..ff1689bb3124d 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -286,7 +286,7 @@ config ACPI_CPPC_LIB
config ACPI_PROCESSOR
tristate "Processor"
- depends on X86 || ARM64 || LOONGARCH
+ depends on X86 || ARM64 || LOONGARCH || RISCV
select ACPI_PROCESSOR_IDLE
select ACPI_CPU_FREQ_PSS if X86 || LOONGARCH
select THERMAL
@@ -460,7 +460,6 @@ config ACPI_BGRT
config ACPI_REDUCED_HARDWARE_ONLY
bool "Hardware-reduced ACPI support only" if EXPERT
- def_bool n
help
This config item changes the way the ACPI code is built. When this
option is selected, the kernel will use a specialized version of
diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c
index b91155ea9c343..c9131259f717b 100644
--- a/drivers/acpi/acpica/dbnames.c
+++ b/drivers/acpi/acpica/dbnames.c
@@ -550,8 +550,12 @@ acpi_db_walk_for_fields(acpi_handle obj_handle,
ACPI_FREE(buffer.pointer);
buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
- acpi_evaluate_object(obj_handle, NULL, NULL, &buffer);
-
+ status = acpi_evaluate_object(obj_handle, NULL, NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("Could Not evaluate object %p\n",
+ obj_handle);
+ return (AE_OK);
+ }
/*
* Since this is a field unit, surround the output in braces
*/
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index 6b18f8bc7be35..3cfe7e7475f2f 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -60,6 +60,19 @@ config ACPI_APEI_EINJ
mainly used for debugging and testing the other parts of
APEI and some other RAS features.
+config ACPI_APEI_EINJ_CXL
+ bool "CXL Error INJection Support"
+ default ACPI_APEI_EINJ
+ depends on ACPI_APEI_EINJ
+ depends on CXL_BUS && CXL_BUS <= ACPI_APEI_EINJ
+ help
+ Support for CXL protocol Error INJection through debugfs/cxl.
+ Availability and which errors are supported is dependent on
+ the host platform. Look to ACPI v6.5 section 18.6.4 and kernel
+ EINJ documentation for more information.
+
+ If unsure say 'n'
+
config ACPI_APEI_ERST_DEBUG
tristate "APEI Error Record Serialization Table (ERST) Debug Support"
depends on ACPI_APEI
diff --git a/drivers/acpi/apei/Makefile b/drivers/acpi/apei/Makefile
index 4dfac2128737c..2c474e6477e12 100644
--- a/drivers/acpi/apei/Makefile
+++ b/drivers/acpi/apei/Makefile
@@ -2,6 +2,8 @@
obj-$(CONFIG_ACPI_APEI) += apei.o
obj-$(CONFIG_ACPI_APEI_GHES) += ghes.o
obj-$(CONFIG_ACPI_APEI_EINJ) += einj.o
+einj-y := einj-core.o
+einj-$(CONFIG_ACPI_APEI_EINJ_CXL) += einj-cxl.o
obj-$(CONFIG_ACPI_APEI_ERST_DEBUG) += erst-dbg.o
apei-y := apei-base.o hest.o erst.o bert.o
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index 67c2c3b959e15..cd2766c69d78d 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -130,4 +130,22 @@ static inline u32 cper_estatus_len(struct acpi_hest_generic_status *estatus)
}
int apei_osc_setup(void);
+
+int einj_get_available_error_type(u32 *type);
+int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3,
+ u64 param4);
+int einj_cxl_rch_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
+ u64 param3, u64 param4);
+bool einj_is_cxl_error_type(u64 type);
+int einj_validate_error_type(u64 type);
+
+#ifndef ACPI_EINJ_CXL_CACHE_CORRECTABLE
+#define ACPI_EINJ_CXL_CACHE_CORRECTABLE BIT(12)
+#define ACPI_EINJ_CXL_CACHE_UNCORRECTABLE BIT(13)
+#define ACPI_EINJ_CXL_CACHE_FATAL BIT(14)
+#define ACPI_EINJ_CXL_MEM_CORRECTABLE BIT(15)
+#define ACPI_EINJ_CXL_MEM_UNCORRECTABLE BIT(16)
+#define ACPI_EINJ_CXL_MEM_FATAL BIT(17)
+#endif
+
#endif
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj-core.c
index 89fb9331c611e..01faca3a238a3 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj-core.c
@@ -21,6 +21,7 @@
#include <linux/nmi.h>
#include <linux/delay.h>
#include <linux/mm.h>
+#include <linux/platform_device.h>
#include <asm/unaligned.h>
#include "apei-internal.h"
@@ -36,6 +37,12 @@
#define MEM_ERROR_MASK (ACPI_EINJ_MEMORY_CORRECTABLE | \
ACPI_EINJ_MEMORY_UNCORRECTABLE | \
ACPI_EINJ_MEMORY_FATAL)
+#define CXL_ERROR_MASK (ACPI_EINJ_CXL_CACHE_CORRECTABLE | \
+ ACPI_EINJ_CXL_CACHE_UNCORRECTABLE | \
+ ACPI_EINJ_CXL_CACHE_FATAL | \
+ ACPI_EINJ_CXL_MEM_CORRECTABLE | \
+ ACPI_EINJ_CXL_MEM_UNCORRECTABLE | \
+ ACPI_EINJ_CXL_MEM_FATAL)
/*
* ACPI version 5 provides a SET_ERROR_TYPE_WITH_ADDRESS action.
@@ -137,6 +144,11 @@ static struct apei_exec_ins_type einj_ins_type[] = {
*/
static DEFINE_MUTEX(einj_mutex);
+/*
+ * Exported APIs use this flag to exit early if einj_probe() failed.
+ */
+bool einj_initialized __ro_after_init;
+
static void *einj_param;
static void einj_exec_ctx_init(struct apei_exec_context *ctx)
@@ -160,7 +172,7 @@ static int __einj_get_available_error_type(u32 *type)
}
/* Get error injection capabilities of the platform */
-static int einj_get_available_error_type(u32 *type)
+int einj_get_available_error_type(u32 *type)
{
int rc;
@@ -530,8 +542,8 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
}
/* Inject the specified hardware error */
-static int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
- u64 param3, u64 param4)
+int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3,
+ u64 param4)
{
int rc;
u64 base_addr, size;
@@ -554,8 +566,17 @@ static int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
if (type & ACPI5_VENDOR_BIT) {
if (vendor_flags != SETWA_FLAGS_MEM)
goto inject;
- } else if (!(type & MEM_ERROR_MASK) && !(flags & SETWA_FLAGS_MEM))
+ } else if (!(type & MEM_ERROR_MASK) && !(flags & SETWA_FLAGS_MEM)) {
goto inject;
+ }
+
+ /*
+ * Injections targeting a CXL 1.0/1.1 port have to be injected
+ * via the einj_cxl_rch_error_inject() path as that does the proper
+ * validation of the given RCRB base (MMIO) address.
+ */
+ if (einj_is_cxl_error_type(type) && (flags & SETWA_FLAGS_MEM))
+ return -EINVAL;
/*
* Disallow crazy address masks that give BIOS leeway to pick
@@ -587,6 +608,21 @@ inject:
return rc;
}
+int einj_cxl_rch_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
+ u64 param3, u64 param4)
+{
+ int rc;
+
+ if (!(einj_is_cxl_error_type(type) && (flags & SETWA_FLAGS_MEM)))
+ return -EINVAL;
+
+ mutex_lock(&einj_mutex);
+ rc = __einj_error_inject(type, flags, param1, param2, param3, param4);
+ mutex_unlock(&einj_mutex);
+
+ return rc;
+}
+
static u32 error_type;
static u32 error_flags;
static u64 error_param1;
@@ -607,12 +643,6 @@ static struct { u32 mask; const char *str; } const einj_error_type_string[] = {
{ BIT(9), "Platform Correctable" },
{ BIT(10), "Platform Uncorrectable non-fatal" },
{ BIT(11), "Platform Uncorrectable fatal"},
- { BIT(12), "CXL.cache Protocol Correctable" },
- { BIT(13), "CXL.cache Protocol Uncorrectable non-fatal" },
- { BIT(14), "CXL.cache Protocol Uncorrectable fatal" },
- { BIT(15), "CXL.mem Protocol Correctable" },
- { BIT(16), "CXL.mem Protocol Uncorrectable non-fatal" },
- { BIT(17), "CXL.mem Protocol Uncorrectable fatal" },
{ BIT(31), "Vendor Defined Error Types" },
};
@@ -641,22 +671,26 @@ static int error_type_get(void *data, u64 *val)
return 0;
}
-static int error_type_set(void *data, u64 val)
+bool einj_is_cxl_error_type(u64 type)
{
+ return (type & CXL_ERROR_MASK) && (!(type & ACPI5_VENDOR_BIT));
+}
+
+int einj_validate_error_type(u64 type)
+{
+ u32 tval, vendor, available_error_type = 0;
int rc;
- u32 available_error_type = 0;
- u32 tval, vendor;
/* Only low 32 bits for error type are valid */
- if (val & GENMASK_ULL(63, 32))
+ if (type & GENMASK_ULL(63, 32))
return -EINVAL;
/*
* Vendor defined types have 0x80000000 bit set, and
* are not enumerated by ACPI_EINJ_GET_ERROR_TYPE
*/
- vendor = val & ACPI5_VENDOR_BIT;
- tval = val & 0x7fffffff;
+ vendor = type & ACPI5_VENDOR_BIT;
+ tval = type & GENMASK(30, 0);
/* Only one error type can be specified */
if (tval & (tval - 1))
@@ -665,9 +699,21 @@ static int error_type_set(void *data, u64 val)
rc = einj_get_available_error_type(&available_error_type);
if (rc)
return rc;
- if (!(val & available_error_type))
+ if (!(type & available_error_type))
return -EINVAL;
}
+
+ return 0;
+}
+
+static int error_type_set(void *data, u64 val)
+{
+ int rc;
+
+ rc = einj_validate_error_type(val);
+ if (rc)
+ return rc;
+
error_type = val;
return 0;
@@ -703,21 +749,21 @@ static int einj_check_table(struct acpi_table_einj *einj_tab)
return 0;
}
-static int __init einj_init(void)
+static int __init einj_probe(struct platform_device *pdev)
{
int rc;
acpi_status status;
struct apei_exec_context ctx;
if (acpi_disabled) {
- pr_info("ACPI disabled.\n");
+ pr_debug("ACPI disabled.\n");
return -ENODEV;
}
status = acpi_get_table(ACPI_SIG_EINJ, 0,
(struct acpi_table_header **)&einj_tab);
if (status == AE_NOT_FOUND) {
- pr_warn("EINJ table not found.\n");
+ pr_debug("EINJ table not found.\n");
return -ENODEV;
} else if (ACPI_FAILURE(status)) {
pr_err("Failed to get EINJ table: %s\n",
@@ -805,7 +851,7 @@ err_put_table:
return rc;
}
-static void __exit einj_exit(void)
+static void einj_remove(struct platform_device *pdev)
{
struct apei_exec_context ctx;
@@ -826,6 +872,40 @@ static void __exit einj_exit(void)
acpi_put_table((struct acpi_table_header *)einj_tab);
}
+static struct platform_device *einj_dev;
+static struct platform_driver einj_driver = {
+ .remove_new = einj_remove,
+ .driver = {
+ .name = "acpi-einj",
+ },
+};
+
+static int __init einj_init(void)
+{
+ struct platform_device_info einj_dev_info = {
+ .name = "acpi-einj",
+ .id = -1,
+ };
+ int rc;
+
+ einj_dev = platform_device_register_full(&einj_dev_info);
+ if (IS_ERR(einj_dev))
+ return PTR_ERR(einj_dev);
+
+ rc = platform_driver_probe(&einj_driver, einj_probe);
+ einj_initialized = rc == 0;
+
+ return 0;
+}
+
+static void __exit einj_exit(void)
+{
+ if (einj_initialized)
+ platform_driver_unregister(&einj_driver);
+
+ platform_device_del(einj_dev);
+}
+
module_init(einj_init);
module_exit(einj_exit);
diff --git a/drivers/acpi/apei/einj-cxl.c b/drivers/acpi/apei/einj-cxl.c
new file mode 100644
index 0000000000000..8b8be0c90709f
--- /dev/null
+++ b/drivers/acpi/apei/einj-cxl.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CXL Error INJection support. Used by CXL core to inject
+ * protocol errors into CXL ports.
+ *
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Author: Ben Cheatham <benjamin.cheatham@amd.com>
+ */
+#include <linux/einj-cxl.h>
+#include <linux/seq_file.h>
+#include <linux/pci.h>
+
+#include "apei-internal.h"
+
+/* Defined in einj-core.c */
+extern bool einj_initialized;
+
+static struct { u32 mask; const char *str; } const einj_cxl_error_type_string[] = {
+ { ACPI_EINJ_CXL_CACHE_CORRECTABLE, "CXL.cache Protocol Correctable" },
+ { ACPI_EINJ_CXL_CACHE_UNCORRECTABLE, "CXL.cache Protocol Uncorrectable non-fatal" },
+ { ACPI_EINJ_CXL_CACHE_FATAL, "CXL.cache Protocol Uncorrectable fatal" },
+ { ACPI_EINJ_CXL_MEM_CORRECTABLE, "CXL.mem Protocol Correctable" },
+ { ACPI_EINJ_CXL_MEM_UNCORRECTABLE, "CXL.mem Protocol Uncorrectable non-fatal" },
+ { ACPI_EINJ_CXL_MEM_FATAL, "CXL.mem Protocol Uncorrectable fatal" },
+};
+
+int einj_cxl_available_error_type_show(struct seq_file *m, void *v)
+{
+ int cxl_err, rc;
+ u32 available_error_type = 0;
+
+ rc = einj_get_available_error_type(&available_error_type);
+ if (rc)
+ return rc;
+
+ for (int pos = 0; pos < ARRAY_SIZE(einj_cxl_error_type_string); pos++) {
+ cxl_err = ACPI_EINJ_CXL_CACHE_CORRECTABLE << pos;
+
+ if (available_error_type & cxl_err)
+ seq_printf(m, "0x%08x\t%s\n",
+ einj_cxl_error_type_string[pos].mask,
+ einj_cxl_error_type_string[pos].str);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(einj_cxl_available_error_type_show, CXL);
+
+static int cxl_dport_get_sbdf(struct pci_dev *dport_dev, u64 *sbdf)
+{
+ struct pci_bus *pbus;
+ struct pci_host_bridge *bridge;
+ u64 seg = 0, bus;
+
+ pbus = dport_dev->bus;
+ bridge = pci_find_host_bridge(pbus);
+
+ if (!bridge)
+ return -ENODEV;
+
+ if (bridge->domain_nr != PCI_DOMAIN_NR_NOT_SET)
+ seg = bridge->domain_nr;
+
+ bus = pbus->number;
+ *sbdf = (seg << 24) | (bus << 16) | dport_dev->devfn;
+
+ return 0;
+}
+
+int einj_cxl_inject_rch_error(u64 rcrb, u64 type)
+{
+ int rc;
+
+ /* Only CXL error types can be specified */
+ if (!einj_is_cxl_error_type(type))
+ return -EINVAL;
+
+ rc = einj_validate_error_type(type);
+ if (rc)
+ return rc;
+
+ return einj_cxl_rch_error_inject(type, 0x2, rcrb, GENMASK_ULL(63, 0),
+ 0, 0);
+}
+EXPORT_SYMBOL_NS_GPL(einj_cxl_inject_rch_error, CXL);
+
+int einj_cxl_inject_error(struct pci_dev *dport, u64 type)
+{
+ u64 param4 = 0;
+ int rc;
+
+ /* Only CXL error types can be specified */
+ if (!einj_is_cxl_error_type(type))
+ return -EINVAL;
+
+ rc = einj_validate_error_type(type);
+ if (rc)
+ return rc;
+
+ rc = cxl_dport_get_sbdf(dport, &param4);
+ if (rc)
+ return rc;
+
+ return einj_error_inject(type, 0x4, 0, 0, 0, param4);
+}
+EXPORT_SYMBOL_NS_GPL(einj_cxl_inject_error, CXL);
+
+bool einj_cxl_is_initialized(void)
+{
+ return einj_initialized;
+}
+EXPORT_SYMBOL_NS_GPL(einj_cxl_is_initialized, CXL);
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index a89bdbe001844..a7c00ef78086c 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -380,6 +380,8 @@ static int dock_in_progress(struct dock_station *ds)
/**
* handle_eject_request - handle an undock request checking for error conditions
+ * @ds: The dock station to undock.
+ * @event: The ACPI event number associated with the undock request.
*
* Check to make sure the dock device is still present, then undock and
* hotremove all the devices that may need removing.
diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
index d6b85f0f6082f..2c8ccc91ebe6d 100644
--- a/drivers/acpi/numa/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -59,9 +59,8 @@ struct target_cache {
};
enum {
- NODE_ACCESS_CLASS_0 = 0,
- NODE_ACCESS_CLASS_1,
- NODE_ACCESS_CLASS_GENPORT_SINK,
+ NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL = ACCESS_COORDINATE_MAX,
+ NODE_ACCESS_CLASS_GENPORT_SINK_CPU,
NODE_ACCESS_CLASS_MAX,
};
@@ -75,6 +74,7 @@ struct memory_target {
struct node_cache_attrs cache_attrs;
u8 gen_port_device_handle[ACPI_SRAT_DEVICE_HANDLE_SIZE];
bool registered;
+ bool ext_updated; /* externally updated */
};
struct memory_initiator {
@@ -127,7 +127,8 @@ static struct memory_target *acpi_find_genport_target(u32 uid)
/**
* acpi_get_genport_coordinates - Retrieve the access coordinates for a generic port
* @uid: ACPI unique id
- * @coord: The access coordinates written back out for the generic port
+ * @coord: The access coordinates written back out for the generic port.
+ * Expect 2 levels array.
*
* Return: 0 on success. Errno on failure.
*
@@ -143,7 +144,10 @@ int acpi_get_genport_coordinates(u32 uid,
if (!target)
return -ENOENT;
- *coord = target->coord[NODE_ACCESS_CLASS_GENPORT_SINK];
+ coord[ACCESS_COORDINATE_LOCAL] =
+ target->coord[NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL];
+ coord[ACCESS_COORDINATE_CPU] =
+ target->coord[NODE_ACCESS_CLASS_GENPORT_SINK_CPU];
return 0;
}
@@ -325,6 +329,35 @@ static void hmat_update_target_access(struct memory_target *target,
}
}
+int hmat_update_target_coordinates(int nid, struct access_coordinate *coord,
+ enum access_coordinate_class access)
+{
+ struct memory_target *target;
+ int pxm;
+
+ if (nid == NUMA_NO_NODE)
+ return -EINVAL;
+
+ pxm = node_to_pxm(nid);
+ guard(mutex)(&target_lock);
+ target = find_mem_target(pxm);
+ if (!target)
+ return -ENODEV;
+
+ hmat_update_target_access(target, ACPI_HMAT_READ_LATENCY,
+ coord->read_latency, access);
+ hmat_update_target_access(target, ACPI_HMAT_WRITE_LATENCY,
+ coord->write_latency, access);
+ hmat_update_target_access(target, ACPI_HMAT_READ_BANDWIDTH,
+ coord->read_bandwidth, access);
+ hmat_update_target_access(target, ACPI_HMAT_WRITE_BANDWIDTH,
+ coord->write_bandwidth, access);
+ target->ext_updated = true;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hmat_update_target_coordinates);
+
static __init void hmat_add_locality(struct acpi_hmat_locality *hmat_loc)
{
struct memory_locality *loc;
@@ -374,11 +407,11 @@ static __init void hmat_update_target(unsigned int tgt_pxm, unsigned int init_px
if (target && target->processor_pxm == init_pxm) {
hmat_update_target_access(target, type, value,
- NODE_ACCESS_CLASS_0);
+ ACCESS_COORDINATE_LOCAL);
/* If the node has a CPU, update access 1 */
if (node_state(pxm_to_node(init_pxm), N_CPU))
hmat_update_target_access(target, type, value,
- NODE_ACCESS_CLASS_1);
+ ACCESS_COORDINATE_CPU);
}
}
@@ -696,8 +729,13 @@ static void hmat_update_target_attrs(struct memory_target *target,
u32 best = 0;
int i;
+ /* Don't update if an external agent has changed the data. */
+ if (target->ext_updated)
+ return;
+
/* Don't update for generic port if there's no device handle */
- if (access == NODE_ACCESS_CLASS_GENPORT_SINK &&
+ if ((access == NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL ||
+ access == NODE_ACCESS_CLASS_GENPORT_SINK_CPU) &&
!(*(u16 *)target->gen_port_device_handle))
return;
@@ -709,7 +747,8 @@ static void hmat_update_target_attrs(struct memory_target *target,
*/
if (target->processor_pxm != PXM_INVAL) {
cpu_nid = pxm_to_node(target->processor_pxm);
- if (access == 0 || node_state(cpu_nid, N_CPU)) {
+ if (access == ACCESS_COORDINATE_LOCAL ||
+ node_state(cpu_nid, N_CPU)) {
set_bit(target->processor_pxm, p_nodes);
return;
}
@@ -737,7 +776,9 @@ static void hmat_update_target_attrs(struct memory_target *target,
list_for_each_entry(initiator, &initiators, node) {
u32 value;
- if (access == 1 && !initiator->has_cpu) {
+ if ((access == ACCESS_COORDINATE_CPU ||
+ access == NODE_ACCESS_CLASS_GENPORT_SINK_CPU) &&
+ !initiator->has_cpu) {
clear_bit(initiator->processor_pxm, p_nodes);
continue;
}
@@ -770,20 +811,24 @@ static void __hmat_register_target_initiators(struct memory_target *target,
}
}
-static void hmat_register_generic_target_initiators(struct memory_target *target)
+static void hmat_update_generic_target(struct memory_target *target)
{
static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
- __hmat_register_target_initiators(target, p_nodes,
- NODE_ACCESS_CLASS_GENPORT_SINK);
+ hmat_update_target_attrs(target, p_nodes,
+ NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL);
+ hmat_update_target_attrs(target, p_nodes,
+ NODE_ACCESS_CLASS_GENPORT_SINK_CPU);
}
static void hmat_register_target_initiators(struct memory_target *target)
{
static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
- __hmat_register_target_initiators(target, p_nodes, 0);
- __hmat_register_target_initiators(target, p_nodes, 1);
+ __hmat_register_target_initiators(target, p_nodes,
+ ACCESS_COORDINATE_LOCAL);
+ __hmat_register_target_initiators(target, p_nodes,
+ ACCESS_COORDINATE_CPU);
}
static void hmat_register_target_cache(struct memory_target *target)
@@ -835,7 +880,7 @@ static void hmat_register_target(struct memory_target *target)
*/
mutex_lock(&target_lock);
if (*(u16 *)target->gen_port_device_handle) {
- hmat_register_generic_target_initiators(target);
+ hmat_update_generic_target(target);
target->registered = true;
}
mutex_unlock(&target_lock);
@@ -854,8 +899,8 @@ static void hmat_register_target(struct memory_target *target)
if (!target->registered) {
hmat_register_target_initiators(target);
hmat_register_target_cache(target);
- hmat_register_target_perf(target, NODE_ACCESS_CLASS_0);
- hmat_register_target_perf(target, NODE_ACCESS_CLASS_1);
+ hmat_register_target_perf(target, ACCESS_COORDINATE_LOCAL);
+ hmat_register_target_perf(target, ACCESS_COORDINATE_CPU);
target->registered = true;
}
mutex_unlock(&target_lock);
@@ -927,7 +972,7 @@ static int hmat_calculate_adistance(struct notifier_block *self,
return NOTIFY_OK;
mutex_lock(&target_lock);
- hmat_update_target_attrs(target, p_nodes, 1);
+ hmat_update_target_attrs(target, p_nodes, ACCESS_COORDINATE_CPU);
mutex_unlock(&target_lock);
perf = &target->coord[1];
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index 0214518fc582f..e45e64993c504 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -29,6 +29,8 @@ static int node_to_pxm_map[MAX_NUMNODES]
unsigned char acpi_srat_revision __initdata;
static int acpi_numa __initdata;
+static int last_real_pxm;
+
void __init disable_srat(void)
{
acpi_numa = -1;
@@ -536,6 +538,7 @@ int __init acpi_numa_init(void)
if (node_to_pxm_map[i] > fake_pxm)
fake_pxm = node_to_pxm_map[i];
}
+ last_real_pxm = fake_pxm;
fake_pxm++;
acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, acpi_parse_cfmws,
&fake_pxm);
@@ -547,6 +550,14 @@ int __init acpi_numa_init(void)
return 0;
}
+bool acpi_node_backed_by_real_pxm(int nid)
+{
+ int pxm = node_to_pxm(nid);
+
+ return pxm <= last_real_pxm;
+}
+EXPORT_SYMBOL_GPL(acpi_node_backed_by_real_pxm);
+
static int acpi_get_pxm(acpi_handle h)
{
unsigned long long pxm;
diff --git a/drivers/acpi/riscv/Makefile b/drivers/acpi/riscv/Makefile
index 8b3b126e0b940..86b0925f612d9 100644
--- a/drivers/acpi/riscv/Makefile
+++ b/drivers/acpi/riscv/Makefile
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-y += rhct.o
+obj-y += rhct.o
+obj-$(CONFIG_ACPI_PROCESSOR_IDLE) += cpuidle.o
+obj-$(CONFIG_ACPI_CPPC_LIB) += cppc.o
diff --git a/drivers/acpi/riscv/cppc.c b/drivers/acpi/riscv/cppc.c
new file mode 100644
index 0000000000000..4cdff387deff6
--- /dev/null
+++ b/drivers/acpi/riscv/cppc.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Implement CPPC FFH helper routines for RISC-V.
+ *
+ * Copyright (C) 2024 Ventana Micro Systems Inc.
+ */
+
+#include <acpi/cppc_acpi.h>
+#include <asm/csr.h>
+#include <asm/sbi.h>
+
+#define SBI_EXT_CPPC 0x43505043
+
+/* CPPC interfaces defined in SBI spec */
+#define SBI_CPPC_PROBE 0x0
+#define SBI_CPPC_READ 0x1
+#define SBI_CPPC_READ_HI 0x2
+#define SBI_CPPC_WRITE 0x3
+
+/* RISC-V FFH definitions from RISC-V FFH spec */
+#define FFH_CPPC_TYPE(r) (((r) & GENMASK_ULL(63, 60)) >> 60)
+#define FFH_CPPC_SBI_REG(r) ((r) & GENMASK(31, 0))
+#define FFH_CPPC_CSR_NUM(r) ((r) & GENMASK(11, 0))
+
+#define FFH_CPPC_SBI 0x1
+#define FFH_CPPC_CSR 0x2
+
+struct sbi_cppc_data {
+ u64 val;
+ u32 reg;
+ struct sbiret ret;
+};
+
+static bool cppc_ext_present;
+
+static int __init sbi_cppc_init(void)
+{
+ if (sbi_spec_version >= sbi_mk_version(2, 0) &&
+ sbi_probe_extension(SBI_EXT_CPPC) > 0) {
+ pr_info("SBI CPPC extension detected\n");
+ cppc_ext_present = true;
+ } else {
+ pr_info("SBI CPPC extension NOT detected!!\n");
+ cppc_ext_present = false;
+ }
+
+ return 0;
+}
+device_initcall(sbi_cppc_init);
+
+static void sbi_cppc_read(void *read_data)
+{
+ struct sbi_cppc_data *data = (struct sbi_cppc_data *)read_data;
+
+ data->ret = sbi_ecall(SBI_EXT_CPPC, SBI_CPPC_READ,
+ data->reg, 0, 0, 0, 0, 0);
+}
+
+static void sbi_cppc_write(void *write_data)
+{
+ struct sbi_cppc_data *data = (struct sbi_cppc_data *)write_data;
+
+ data->ret = sbi_ecall(SBI_EXT_CPPC, SBI_CPPC_WRITE,
+ data->reg, data->val, 0, 0, 0, 0);
+}
+
+static void cppc_ffh_csr_read(void *read_data)
+{
+ struct sbi_cppc_data *data = (struct sbi_cppc_data *)read_data;
+
+ switch (data->reg) {
+ /* Support only TIME CSR for now */
+ case CSR_TIME:
+ data->ret.value = csr_read(CSR_TIME);
+ data->ret.error = 0;
+ break;
+ default:
+ data->ret.error = -EINVAL;
+ break;
+ }
+}
+
+static void cppc_ffh_csr_write(void *write_data)
+{
+ struct sbi_cppc_data *data = (struct sbi_cppc_data *)write_data;
+
+ data->ret.error = -EINVAL;
+}
+
+/*
+ * Refer to drivers/acpi/cppc_acpi.c for the description of the functions
+ * below.
+ */
+bool cpc_ffh_supported(void)
+{
+ return true;
+}
+
+int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val)
+{
+ struct sbi_cppc_data data;
+
+ if (WARN_ON_ONCE(irqs_disabled()))
+ return -EPERM;
+
+ if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_SBI) {
+ if (!cppc_ext_present)
+ return -EINVAL;
+
+ data.reg = FFH_CPPC_SBI_REG(reg->address);
+
+ smp_call_function_single(cpu, sbi_cppc_read, &data, 1);
+
+ *val = data.ret.value;
+
+ return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0;
+ } else if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_CSR) {
+ data.reg = FFH_CPPC_CSR_NUM(reg->address);
+
+ smp_call_function_single(cpu, cppc_ffh_csr_read, &data, 1);
+
+ *val = data.ret.value;
+
+ return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0;
+ }
+
+ return -EINVAL;
+}
+
+int cpc_write_ffh(int cpu, struct cpc_reg *reg, u64 val)
+{
+ struct sbi_cppc_data data;
+
+ if (WARN_ON_ONCE(irqs_disabled()))
+ return -EPERM;
+
+ if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_SBI) {
+ if (!cppc_ext_present)
+ return -EINVAL;
+
+ data.reg = FFH_CPPC_SBI_REG(reg->address);
+ data.val = val;
+
+ smp_call_function_single(cpu, sbi_cppc_write, &data, 1);
+
+ return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0;
+ } else if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_CSR) {
+ data.reg = FFH_CPPC_CSR_NUM(reg->address);
+ data.val = val;
+
+ smp_call_function_single(cpu, cppc_ffh_csr_write, &data, 1);
+
+ return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0;
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/acpi/riscv/cpuidle.c b/drivers/acpi/riscv/cpuidle.c
new file mode 100644
index 0000000000000..624f9bbdb58c4
--- /dev/null
+++ b/drivers/acpi/riscv/cpuidle.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024, Ventana Micro Systems Inc
+ * Author: Sunil V L <sunilvl@ventanamicro.com>
+ *
+ */
+
+#include <linux/acpi.h>
+#include <acpi/processor.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpuidle.h>
+#include <linux/suspend.h>
+#include <asm/cpuidle.h>
+#include <asm/sbi.h>
+#include <asm/suspend.h>
+
+#define RISCV_FFH_LPI_TYPE_MASK GENMASK_ULL(63, 60)
+#define RISCV_FFH_LPI_RSVD_MASK GENMASK_ULL(59, 32)
+
+#define RISCV_FFH_LPI_TYPE_SBI BIT_ULL(60)
+
+static int acpi_cpu_init_idle(unsigned int cpu)
+{
+ int i;
+ struct acpi_lpi_state *lpi;
+ struct acpi_processor *pr = per_cpu(processors, cpu);
+
+ if (unlikely(!pr || !pr->flags.has_lpi))
+ return -EINVAL;
+
+ if (!riscv_sbi_hsm_is_supported())
+ return -ENODEV;
+
+ if (pr->power.count <= 1)
+ return -ENODEV;
+
+ for (i = 1; i < pr->power.count; i++) {
+ u32 state;
+
+ lpi = &pr->power.lpi_states[i];
+
+ /*
+ * Validate Entry Method as per FFH spec.
+ * bits[63:60] should be 0x1
+ * bits[59:32] should be 0x0
+ * bits[31:0] represent a SBI power_state
+ */
+ if (((lpi->address & RISCV_FFH_LPI_TYPE_MASK) != RISCV_FFH_LPI_TYPE_SBI) ||
+ (lpi->address & RISCV_FFH_LPI_RSVD_MASK)) {
+ pr_warn("Invalid LPI entry method %#llx\n", lpi->address);
+ return -EINVAL;
+ }
+
+ state = lpi->address;
+ if (!riscv_sbi_suspend_state_is_valid(state)) {
+ pr_warn("Invalid SBI power state %#x\n", state);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int acpi_processor_ffh_lpi_probe(unsigned int cpu)
+{
+ return acpi_cpu_init_idle(cpu);
+}
+
+int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
+{
+ u32 state = lpi->address;
+
+ if (state & SBI_HSM_SUSP_NON_RET_BIT)
+ return CPU_PM_CPU_IDLE_ENTER_PARAM(riscv_sbi_hart_suspend,
+ lpi->index,
+ state);
+ else
+ return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(riscv_sbi_hart_suspend,
+ lpi->index,
+ state);
+}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 3b722e4c0f062..d1464324de951 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1760,7 +1760,9 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
{"BSG1160", },
{"BSG2150", },
{"CSC3551", },
+ {"CSC3554", },
{"CSC3556", },
+ {"CSC3557", },
{"INT33FE", },
{"INT3515", },
/* Non-conforming _HID for Cirrus Logic already released */
@@ -1841,7 +1843,8 @@ static void acpi_scan_dep_init(struct acpi_device *adev)
if (dep->honor_dep)
adev->flags.honor_deps = 1;
- adev->dep_unmet++;
+ if (!dep->met)
+ adev->dep_unmet++;
}
}
}
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 808484d112097..889f1c1a1fa92 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -385,18 +385,6 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
},
},
- /*
- * ASUS B1400CEAE hangs on resume from suspend (see
- * https://bugzilla.kernel.org/show_bug.cgi?id=215742).
- */
- {
- .callback = init_default_s3,
- .ident = "ASUS B1400CEAE",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ASUS EXPERTBOOK B1400CEAE"),
- },
- },
{},
};
@@ -514,6 +502,7 @@ static void acpi_pm_finish(void)
/**
* acpi_pm_start - Start system PM transition.
+ * @acpi_state: The target ACPI power state to transition to.
*/
static void acpi_pm_start(u32 acpi_state)
{
@@ -552,8 +541,9 @@ static u32 acpi_suspend_states[] = {
};
/**
- * acpi_suspend_begin - Set the target system sleep state to the state
- * associated with given @pm_state, if supported.
+ * acpi_suspend_begin - Set the target system sleep state to the state
+ * associated with given @pm_state, if supported.
+ * @pm_state: The target system power management state.
*/
static int acpi_suspend_begin(suspend_state_t pm_state)
{
@@ -683,10 +673,11 @@ static const struct platform_suspend_ops acpi_suspend_ops = {
};
/**
- * acpi_suspend_begin_old - Set the target system sleep state to the
- * state associated with given @pm_state, if supported, and
- * execute the _PTS control method. This function is used if the
- * pre-ACPI 2.0 suspend ordering has been requested.
+ * acpi_suspend_begin_old - Set the target system sleep state to the
+ * state associated with given @pm_state, if supported, and
+ * execute the _PTS control method. This function is used if the
+ * pre-ACPI 2.0 suspend ordering has been requested.
+ * @pm_state: The target suspend state for the system.
*/
static int acpi_suspend_begin_old(suspend_state_t pm_state)
{
@@ -979,10 +970,11 @@ static const struct platform_hibernation_ops acpi_hibernation_ops = {
};
/**
- * acpi_hibernation_begin_old - Set the target system sleep state to
- * ACPI_STATE_S4 and execute the _PTS control method. This
- * function is used if the pre-ACPI 2.0 suspend ordering has been
- * requested.
+ * acpi_hibernation_begin_old - Set the target system sleep state to
+ * ACPI_STATE_S4 and execute the _PTS control method. This
+ * function is used if the pre-ACPI 2.0 suspend ordering has been
+ * requested.
+ * @stage: The power management event message.
*/
static int acpi_hibernation_begin_old(pm_message_t stage)
{
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index b07f7d091d133..b976e5fc3fbcd 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -253,7 +253,7 @@ int __init_or_acpilib acpi_table_parse_entries_array(
count = acpi_parse_entries_array(id, table_size,
(union fw_table_header *)table_header,
- proc, proc_num, max_entries);
+ 0, proc, proc_num, max_entries);
acpi_put_table(table_header);
return count;
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 302dce0b2b504..d67881b50bca2 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -662,14 +662,15 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz,
{
int result;
- tz->thermal_zone = thermal_zone_device_register_with_trips("acpitz",
- trip_table,
- trip_count,
- tz,
- &acpi_thermal_zone_ops,
- NULL,
- passive_delay,
- tz->polling_frequency * 100);
+ if (trip_count)
+ tz->thermal_zone = thermal_zone_device_register_with_trips(
+ "acpitz", trip_table, trip_count, tz,
+ &acpi_thermal_zone_ops, NULL, passive_delay,
+ tz->polling_frequency * 100);
+ else
+ tz->thermal_zone = thermal_tripless_zone_device_register(
+ "acpitz", tz, &acpi_thermal_zone_ops, NULL);
+
if (IS_ERR(tz->thermal_zone))
return PTR_ERR(tz->thermal_zone);
@@ -901,11 +902,8 @@ static int acpi_thermal_add(struct acpi_device *device)
trip++;
}
- if (trip == trip_table) {
+ if (trip == trip_table)
pr_warn(FW_BUG "No valid trip points!\n");
- result = -ENODEV;
- goto free_memory;
- }
result = acpi_thermal_register_thermal_zone(tz, trip_table,
trip - trip_table,
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index eca24f41556df..dd6923d37931f 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1708,8 +1708,10 @@ static size_t binder_get_object(struct binder_proc *proc,
size_t object_size = 0;
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
- if (offset > buffer->data_size || read_size < sizeof(*hdr))
+ if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
+ !IS_ALIGNED(offset, sizeof(u32)))
return 0;
+
if (u) {
if (copy_from_user(object, u + offset, read_size))
return 0;
@@ -6086,9 +6088,7 @@ static void print_binder_node_nilocked(struct seq_file *m,
struct binder_work *w;
int count;
- count = 0;
- hlist_for_each_entry(ref, &node->refs, node_entry)
- count++;
+ count = hlist_count_nodes(&node->refs);
seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
node->debug_id, (u64)node->ptr, (u64)node->cookie,
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index e0e4dc38b6920..2e1f261ec5c89 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -925,7 +925,6 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
int i;
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- unsigned long page_addr;
bool on_lru;
if (!alloc->pages[i].page_ptr)
@@ -933,7 +932,6 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
on_lru = list_lru_del_obj(&binder_freelist,
&alloc->pages[i].lru);
- page_addr = alloc->buffer + i * PAGE_SIZE;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%s: %d: page %d %s\n",
__func__, alloc->pid, i,
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 78570684ff68f..6548f10e61d9c 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -666,22 +666,90 @@ static int mobile_lpm_policy = -1;
module_param(mobile_lpm_policy, int, 0644);
MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
-static void ahci_pci_save_initial_config(struct pci_dev *pdev,
- struct ahci_host_priv *hpriv)
+static char *ahci_mask_port_map;
+module_param_named(mask_port_map, ahci_mask_port_map, charp, 0444);
+MODULE_PARM_DESC(mask_port_map,
+ "32-bits port map masks to ignore controllers ports. "
+ "Valid values are: "
+ "\"<mask>\" to apply the same mask to all AHCI controller "
+ "devices, and \"<pci_dev>=<mask>,<pci_dev>=<mask>,...\" to "
+ "specify different masks for the controllers specified, "
+ "where <pci_dev> is the PCI ID of an AHCI controller in the "
+ "form \"domain:bus:dev.func\"");
+
+static void ahci_apply_port_map_mask(struct device *dev,
+ struct ahci_host_priv *hpriv, char *mask_s)
{
- if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA) {
- switch (pdev->device) {
- case 0x1166:
- dev_info(&pdev->dev, "ASM1166 has only six ports\n");
- hpriv->saved_port_map = 0x3f;
+ unsigned int mask;
+
+ if (kstrtouint(mask_s, 0, &mask)) {
+ dev_err(dev, "Invalid port map mask\n");
+ return;
+ }
+
+ hpriv->mask_port_map = mask;
+}
+
+static void ahci_get_port_map_mask(struct device *dev,
+ struct ahci_host_priv *hpriv)
+{
+ char *param, *end, *str, *mask_s;
+ char *name;
+
+ if (!strlen(ahci_mask_port_map))
+ return;
+
+ str = kstrdup(ahci_mask_port_map, GFP_KERNEL);
+ if (!str)
+ return;
+
+ /* Handle single mask case */
+ if (!strchr(str, '=')) {
+ ahci_apply_port_map_mask(dev, hpriv, str);
+ goto free;
+ }
+
+ /*
+ * Mask list case: parse the parameter to apply the mask only if
+ * the device name matches.
+ */
+ param = str;
+ end = param + strlen(param);
+ while (param && param < end && *param) {
+ name = param;
+ param = strchr(name, '=');
+ if (!param)
break;
- case 0x1064:
- dev_info(&pdev->dev, "ASM1064 has only four ports\n");
- hpriv->saved_port_map = 0xf;
+
+ *param = '\0';
+ param++;
+ if (param >= end)
break;
+
+ if (strcmp(dev_name(dev), name) != 0) {
+ param = strchr(param, ',');
+ if (param)
+ param++;
+ continue;
+ }
+
+ mask_s = param;
+ param = strchr(mask_s, ',');
+ if (param) {
+ *param = '\0';
+ param++;
}
+
+ ahci_apply_port_map_mask(dev, hpriv, mask_s);
}
+free:
+ kfree(str);
+}
+
+static void ahci_pci_save_initial_config(struct pci_dev *pdev,
+ struct ahci_host_priv *hpriv)
+{
if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
dev_info(&pdev->dev, "JMB361 has only one port\n");
hpriv->saved_port_map = 1;
@@ -701,6 +769,10 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
"Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
}
+ /* Handle port map masks passed as module parameter. */
+ if (ahci_mask_port_map)
+ ahci_get_port_map_mask(&pdev->dev, hpriv);
+
ahci_save_initial_config(&pdev->dev, hpriv);
}
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
index d4a626f87963b..79a8b0aa37bf3 100644
--- a/drivers/ata/ahci_st.c
+++ b/drivers/ata/ahci_st.c
@@ -30,7 +30,6 @@
#define ST_AHCI_OOBR_CIMAX_SHIFT 0
struct st_ahci_drv_data {
- struct platform_device *ahci;
struct reset_control *pwr;
struct reset_control *sw_rst;
struct reset_control *pwr_rst;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index be3412cdb22e7..c449d60d9bb96 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2539,7 +2539,7 @@ static void ata_dev_config_cdl(struct ata_device *dev)
bool cdl_enabled;
u64 val;
- if (ata_id_major_version(dev->id) < 12)
+ if (ata_id_major_version(dev->id) < 11)
goto not_supported;
if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE) ||
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index b0d6e69c4a5b2..214b935c2ced7 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -712,8 +712,10 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
ehc->saved_ncq_enabled |= 1 << devno;
/* If we are resuming, wake up the device */
- if (ap->pflags & ATA_PFLAG_RESUMING)
+ if (ap->pflags & ATA_PFLAG_RESUMING) {
+ dev->flags |= ATA_DFLAG_RESUMING;
ehc->i.dev_action[devno] |= ATA_EH_SET_ACTIVE;
+ }
}
}
@@ -3169,6 +3171,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
return 0;
err:
+ dev->flags &= ~ATA_DFLAG_RESUMING;
*r_failed_dev = dev;
return rc;
}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 0a0f483124c3a..e954976891a9f 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -4730,6 +4730,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
struct ata_link *link;
struct ata_device *dev;
unsigned long flags;
+ bool do_resume;
int ret = 0;
mutex_lock(&ap->scsi_scan_mutex);
@@ -4744,25 +4745,34 @@ void ata_scsi_dev_rescan(struct work_struct *work)
* bail out.
*/
if (ap->pflags & ATA_PFLAG_SUSPENDED)
- goto unlock;
+ goto unlock_ap;
if (!sdev)
continue;
if (scsi_device_get(sdev))
continue;
+ do_resume = dev->flags & ATA_DFLAG_RESUMING;
+
spin_unlock_irqrestore(ap->lock, flags);
+ if (do_resume) {
+ ret = scsi_resume_device(sdev);
+ if (ret == -EWOULDBLOCK)
+ goto unlock_scan;
+ dev->flags &= ~ATA_DFLAG_RESUMING;
+ }
ret = scsi_rescan_device(sdev);
scsi_device_put(sdev);
spin_lock_irqsave(ap->lock, flags);
if (ret)
- goto unlock;
+ goto unlock_ap;
}
}
-unlock:
+unlock_ap:
spin_unlock_irqrestore(ap->lock, flags);
+unlock_scan:
mutex_unlock(&ap->scsi_scan_mutex);
/* Reschedule with a delay if scsi_rescan_device() returned an error */
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 17f6ccee53c7c..88b2e9817f49d 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -1188,7 +1188,7 @@ static int pata_macio_attach(struct macio_dev *mdev,
return rc;
}
-static int pata_macio_detach(struct macio_dev *mdev)
+static void pata_macio_detach(struct macio_dev *mdev)
{
struct ata_host *host = macio_get_drvdata(mdev);
struct pata_macio_priv *priv = host->private_data;
@@ -1203,8 +1203,6 @@ static int pata_macio_detach(struct macio_dev *mdev)
ata_host_detach(host);
unlock_media_bay(priv->mdev->media_bay);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1373,9 +1371,6 @@ static struct pci_driver pata_macio_pci_driver = {
.suspend = pata_macio_pci_suspend,
.resume = pata_macio_pci_resume,
#endif
- .driver = {
- .owner = THIS_MODULE,
- },
};
MODULE_DEVICE_TABLE(pci, pata_macio_pci_match);
diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c
index 400b22ee99c33..4c270999ba3cc 100644
--- a/drivers/ata/sata_gemini.c
+++ b/drivers/ata/sata_gemini.c
@@ -200,7 +200,10 @@ int gemini_sata_start_bridge(struct sata_gemini *sg, unsigned int bridge)
pclk = sg->sata0_pclk;
else
pclk = sg->sata1_pclk;
- clk_enable(pclk);
+ ret = clk_enable(pclk);
+ if (ret)
+ return ret;
+
msleep(10);
/* Do not keep clocking a bridge that is not online */
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index e82786c63fbd7..9bec0aee92e04 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -787,37 +787,6 @@ static const struct ata_port_info mv_port_info[] = {
},
};
-static const struct pci_device_id mv_pci_tbl[] = {
- { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
- { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
- { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
- { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
- /* RocketRAID 1720/174x have different identifiers */
- { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
- { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
- { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
-
- { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
- { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
- { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
- { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
- { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
-
- { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
-
- /* Adaptec 1430SA */
- { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
-
- /* Marvell 7042 support */
- { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
-
- /* Highpoint RocketRAID PCIe series */
- { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
- { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
-
- { } /* terminate list */
-};
-
static const struct mv_hw_ops mv5xxx_ops = {
.phy_errata = mv5_phy_errata,
.enable_leds = mv5_enable_leds,
@@ -4303,6 +4272,36 @@ static int mv_pci_init_one(struct pci_dev *pdev,
static int mv_pci_device_resume(struct pci_dev *pdev);
#endif
+static const struct pci_device_id mv_pci_tbl[] = {
+ { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
+ { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
+ { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
+ { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
+ /* RocketRAID 1720/174x have different identifiers */
+ { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
+ { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
+ { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
+
+ { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
+ { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
+ { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
+ { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
+ { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
+
+ { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
+
+ /* Adaptec 1430SA */
+ { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
+
+ /* Marvell 7042 support */
+ { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
+
+ /* Highpoint RocketRAID PCIe series */
+ { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
+ { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
+
+ { } /* terminate list */
+};
static struct pci_driver mv_pci_driver = {
.name = DRV_NAME,
@@ -4315,6 +4314,7 @@ static struct pci_driver mv_pci_driver = {
#endif
};
+MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
/**
* mv_print_info - Dump key info to kernel log for perusal.
@@ -4487,7 +4487,6 @@ static void __exit mv_exit(void)
MODULE_AUTHOR("Brett Russ");
MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index b51d7a9d0d90c..a482741eb181f 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -957,8 +957,7 @@ static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
offset -= (idx * window_size);
idx++;
- dist = ((long) (window_size - (offset + size))) >= 0 ? size :
- (long) (window_size - offset);
+ dist = min(size, window_size - offset);
memcpy_fromio(psource, dimm_mmio + offset / 4, dist);
psource += dist;
@@ -1005,8 +1004,7 @@ static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
readl(mmio + PDC_DIMM_WINDOW_CTLR);
offset -= (idx * window_size);
idx++;
- dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
- (long) (window_size - offset);
+ dist = min(size, window_size - offset);
memcpy_toio(dimm_mmio + offset / 4, psource, dist);
writel(0x01, mmio + PDC_GENERAL_CTLR);
readl(mmio + PDC_GENERAL_CTLR);
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index f1e79263fe61e..23b8cba4a2a3b 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -898,6 +898,37 @@ err:
return rc;
}
+static unsigned int cpu_map_shared_cache(bool online, unsigned int cpu,
+ cpumask_t **map)
+{
+ struct cacheinfo *llc, *sib_llc;
+ unsigned int sibling;
+
+ if (!last_level_cache_is_valid(cpu))
+ return 0;
+
+ llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
+
+ if (llc->type != CACHE_TYPE_DATA && llc->type != CACHE_TYPE_UNIFIED)
+ return 0;
+
+ if (online) {
+ *map = &llc->shared_cpu_map;
+ return cpumask_weight(*map);
+ }
+
+ /* shared_cpu_map of offlined CPU will be cleared, so use sibling map */
+ for_each_cpu(sibling, &llc->shared_cpu_map) {
+ if (sibling == cpu || !last_level_cache_is_valid(sibling))
+ continue;
+ sib_llc = per_cpu_cacheinfo_idx(sibling, cache_leaves(sibling) - 1);
+ *map = &sib_llc->shared_cpu_map;
+ return cpumask_weight(*map);
+ }
+
+ return 0;
+}
+
/*
* Calculate the size of the per-CPU data cache slice. This can be
* used to estimate the size of the data cache slice that can be used
@@ -929,28 +960,31 @@ static void update_per_cpu_data_slice_size_cpu(unsigned int cpu)
ci->per_cpu_data_slice_size = llc->size / nr_shared;
}
-static void update_per_cpu_data_slice_size(bool cpu_online, unsigned int cpu)
+static void update_per_cpu_data_slice_size(bool cpu_online, unsigned int cpu,
+ cpumask_t *cpu_map)
{
unsigned int icpu;
- for_each_online_cpu(icpu) {
+ for_each_cpu(icpu, cpu_map) {
if (!cpu_online && icpu == cpu)
continue;
update_per_cpu_data_slice_size_cpu(icpu);
+ setup_pcp_cacheinfo(icpu);
}
}
static int cacheinfo_cpu_online(unsigned int cpu)
{
int rc = detect_cache_attributes(cpu);
+ cpumask_t *cpu_map;
if (rc)
return rc;
rc = cache_add_dev(cpu);
if (rc)
goto err;
- update_per_cpu_data_slice_size(true, cpu);
- setup_pcp_cacheinfo();
+ if (cpu_map_shared_cache(true, cpu, &cpu_map))
+ update_per_cpu_data_slice_size(true, cpu, cpu_map);
return 0;
err:
free_cache_attributes(cpu);
@@ -959,12 +993,16 @@ err:
static int cacheinfo_cpu_pre_down(unsigned int cpu)
{
+ cpumask_t *cpu_map;
+ unsigned int nr_shared;
+
+ nr_shared = cpu_map_shared_cache(false, cpu, &cpu_map);
if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
cpu_cache_sysfs_exit(cpu);
free_cache_attributes(cpu);
- update_per_cpu_data_slice_size(false, cpu);
- setup_pcp_cacheinfo();
+ if (nr_shared > 1)
+ update_per_cpu_data_slice_size(false, cpu, cpu_map);
return 0;
}
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 7dbf14a1d9157..741497324d78a 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -751,7 +751,7 @@ static int __component_add(struct device *dev, const struct component_ops *ops,
* component_bind_all(). See also &struct component_ops.
*
* @subcomponent must be nonzero and is used to differentiate between multiple
- * components registerd on the same device @dev. These components are match
+ * components registered on the same device @dev. These components are match
* using component_match_add_typed().
*
* The component needs to be unregistered at driver unload/disconnect by
@@ -781,7 +781,7 @@ EXPORT_SYMBOL_GPL(component_add_typed);
* The component needs to be unregistered at driver unload/disconnect by
* calling component_del().
*
- * See also component_add_typed() for a variant that allows multipled different
+ * See also component_add_typed() for a variant that allows multiple different
* components on the same device.
*/
int component_add(struct device *dev, const struct component_ops *ops)
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 9828da9b933cb..5f4e03336e68e 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -44,6 +44,7 @@ static bool fw_devlink_is_permissive(void);
static void __fw_devlink_link_to_consumers(struct device *dev);
static bool fw_devlink_drv_reg_done;
static bool fw_devlink_best_effort;
+static struct workqueue_struct *device_link_wq;
/**
* __fwnode_link_add - Create a link between two fwnode_handles.
@@ -92,12 +93,13 @@ static int __fwnode_link_add(struct fwnode_handle *con,
return 0;
}
-int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup)
+int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup,
+ u8 flags)
{
int ret;
mutex_lock(&fwnode_link_lock);
- ret = __fwnode_link_add(con, sup, 0);
+ ret = __fwnode_link_add(con, sup, flags);
mutex_unlock(&fwnode_link_lock);
return ret;
}
@@ -532,12 +534,26 @@ static void devlink_dev_release(struct device *dev)
/*
* It may take a while to complete this work because of the SRCU
* synchronization in device_link_release_fn() and if the consumer or
- * supplier devices get deleted when it runs, so put it into the "long"
- * workqueue.
+ * supplier devices get deleted when it runs, so put it into the
+ * dedicated workqueue.
*/
- queue_work(system_long_wq, &link->rm_work);
+ queue_work(device_link_wq, &link->rm_work);
}
+/**
+ * device_link_wait_removal - Wait for ongoing devlink removal jobs to terminate
+ */
+void device_link_wait_removal(void)
+{
+ /*
+ * devlink removal jobs are queued in the dedicated work queue.
+ * To be sure that all removal jobs are terminated, ensure that any
+ * scheduled work has run to completion.
+ */
+ flush_workqueue(device_link_wq);
+}
+EXPORT_SYMBOL_GPL(device_link_wait_removal);
+
static struct class devlink_class = {
.name = "devlink",
.dev_groups = devlink_groups,
@@ -1011,7 +1027,8 @@ static struct fwnode_handle *fwnode_links_check_suppliers(
return NULL;
list_for_each_entry(link, &fwnode->suppliers, c_hook)
- if (!(link->flags & FWLINK_FLAG_CYCLE))
+ if (!(link->flags &
+ (FWLINK_FLAG_CYCLE | FWLINK_FLAG_IGNORE)))
return link->supplier;
return NULL;
@@ -1871,6 +1888,7 @@ static void fw_devlink_unblock_consumers(struct device *dev)
device_links_write_unlock();
}
+#define get_dev_from_fwnode(fwnode) get_device((fwnode)->dev)
static bool fwnode_init_without_drv(struct fwnode_handle *fwnode)
{
@@ -1902,6 +1920,63 @@ static bool fwnode_ancestor_init_without_drv(struct fwnode_handle *fwnode)
}
/**
+ * fwnode_is_ancestor_of - Test if @ancestor is ancestor of @child
+ * @ancestor: Firmware which is tested for being an ancestor
+ * @child: Firmware which is tested for being the child
+ *
+ * A node is considered an ancestor of itself too.
+ *
+ * Return: true if @ancestor is an ancestor of @child. Otherwise, returns false.
+ */
+static bool fwnode_is_ancestor_of(const struct fwnode_handle *ancestor,
+ const struct fwnode_handle *child)
+{
+ struct fwnode_handle *parent;
+
+ if (IS_ERR_OR_NULL(ancestor))
+ return false;
+
+ if (child == ancestor)
+ return true;
+
+ fwnode_for_each_parent_node(child, parent) {
+ if (parent == ancestor) {
+ fwnode_handle_put(parent);
+ return true;
+ }
+ }
+ return false;
+}
+
+/**
+ * fwnode_get_next_parent_dev - Find device of closest ancestor fwnode
+ * @fwnode: firmware node
+ *
+ * Given a firmware node (@fwnode), this function finds its closest ancestor
+ * firmware node that has a corresponding struct device and returns that struct
+ * device.
+ *
+ * The caller is responsible for calling put_device() on the returned device
+ * pointer.
+ *
+ * Return: a pointer to the device of the @fwnode's closest ancestor.
+ */
+static struct device *fwnode_get_next_parent_dev(const struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *parent;
+ struct device *dev;
+
+ fwnode_for_each_parent_node(fwnode, parent) {
+ dev = get_dev_from_fwnode(parent);
+ if (dev) {
+ fwnode_handle_put(parent);
+ return dev;
+ }
+ }
+ return NULL;
+}
+
+/**
* __fw_devlink_relax_cycles - Relax and mark dependency cycles.
* @con: Potential consumer device.
* @sup_handle: Potential supplier's fwnode.
@@ -1962,6 +2037,9 @@ static bool __fw_devlink_relax_cycles(struct device *con,
}
list_for_each_entry(link, &sup_handle->suppliers, c_hook) {
+ if (link->flags & FWLINK_FLAG_IGNORE)
+ continue;
+
if (__fw_devlink_relax_cycles(con, link->supplier)) {
__fwnode_link_cycle(link);
ret = true;
@@ -2040,6 +2118,9 @@ static int fw_devlink_create_devlink(struct device *con,
int ret = 0;
u32 flags;
+ if (link->flags & FWLINK_FLAG_IGNORE)
+ return 0;
+
if (con->fwnode == link->consumer)
flags = fw_devlink_get_flags(link->flags);
else
@@ -4098,9 +4179,14 @@ int __init devices_init(void)
sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
if (!sysfs_dev_char_kobj)
goto char_kobj_err;
+ device_link_wq = alloc_workqueue("device_link_wq", 0, 0);
+ if (!device_link_wq)
+ goto wq_err;
return 0;
+ wq_err:
+ kobject_put(sysfs_dev_char_kobj);
char_kobj_err:
kobject_put(sysfs_dev_block_kobj);
block_kobj_err:
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 0b33e81f9c9b6..56fba44ba391a 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -144,7 +144,7 @@ static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
#endif /* CONFIG_HOTPLUG_CPU */
-#ifdef CONFIG_KEXEC_CORE
+#ifdef CONFIG_CRASH_DUMP
#include <linux/kexec.h>
static ssize_t crash_notes_show(struct device *dev,
@@ -189,14 +189,14 @@ static const struct attribute_group crash_note_cpu_attr_group = {
#endif
static const struct attribute_group *common_cpu_attr_groups[] = {
-#ifdef CONFIG_KEXEC_CORE
+#ifdef CONFIG_CRASH_DUMP
&crash_note_cpu_attr_group,
#endif
NULL
};
static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
-#ifdef CONFIG_KEXEC_CORE
+#ifdef CONFIG_CRASH_DUMP
&crash_note_cpu_attr_group,
#endif
NULL
@@ -366,7 +366,7 @@ static int cpu_uevent(const struct device *dev, struct kobj_uevent_env *env)
}
#endif
-struct bus_type cpu_subsys = {
+const struct bus_type cpu_subsys = {
.name = "cpu",
.dev_name = "cpu",
.match = cpu_subsys_match,
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 85152537dbf12..83d352394fdf4 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -313,7 +313,7 @@ static void deferred_probe_timeout_work_func(struct work_struct *work)
mutex_lock(&deferred_probe_mutex);
list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
- dev_info(p->device, "deferred probe pending: %s", p->deferred_probe_reason ?: "(reason unknown)\n");
+ dev_warn(p->device, "deferred probe pending: %s", p->deferred_probe_reason ?: "(reason unknown)\n");
mutex_unlock(&deferred_probe_mutex);
fw_devlink_probing_done();
@@ -397,13 +397,12 @@ bool device_is_bound(struct device *dev)
static void driver_bound(struct device *dev)
{
if (device_is_bound(dev)) {
- pr_warn("%s: device %s already bound\n",
- __func__, kobject_name(&dev->kobj));
+ dev_warn(dev, "%s: device already bound\n", __func__);
return;
}
- pr_debug("driver: '%s': %s: bound to device '%s'\n", dev->driver->name,
- __func__, dev_name(dev));
+ dev_dbg(dev, "driver: '%s': %s: bound to device\n", dev->driver->name,
+ __func__);
klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
device_links_driver_bound(dev);
@@ -587,13 +586,13 @@ static int call_driver_probe(struct device *dev, struct device_driver *drv)
break;
case -ENODEV:
case -ENXIO:
- pr_debug("%s: probe of %s rejects match %d\n",
- drv->name, dev_name(dev), ret);
+ dev_dbg(dev, "probe with driver %s rejects match %d\n",
+ drv->name, ret);
break;
default:
/* driver matched but the probe failed */
- pr_warn("%s: probe of %s failed with error %d\n",
- drv->name, dev_name(dev), ret);
+ dev_err(dev, "probe with driver %s failed with error %d\n",
+ drv->name, ret);
break;
}
@@ -620,8 +619,8 @@ static int really_probe(struct device *dev, struct device_driver *drv)
if (link_ret == -EPROBE_DEFER)
return link_ret;
- pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
- drv->bus->name, __func__, drv->name, dev_name(dev));
+ dev_dbg(dev, "bus: '%s': %s: probing driver %s with device\n",
+ drv->bus->name, __func__, drv->name);
if (!list_empty(&dev->devres_head)) {
dev_crit(dev, "Resources present before probing\n");
ret = -EBUSY;
@@ -644,8 +643,7 @@ re_probe:
ret = driver_sysfs_add(dev);
if (ret) {
- pr_err("%s: driver_sysfs_add(%s) failed\n",
- __func__, dev_name(dev));
+ dev_err(dev, "%s: driver_sysfs_add failed\n", __func__);
goto sysfs_failed;
}
@@ -706,8 +704,8 @@ re_probe:
dev->pm_domain->sync(dev);
driver_bound(dev);
- pr_debug("bus: '%s': %s: bound device %s to driver %s\n",
- drv->bus->name, __func__, dev_name(dev), drv->name);
+ dev_dbg(dev, "bus: '%s': %s: bound device to driver %s\n",
+ drv->bus->name, __func__, drv->name);
goto done;
dev_sysfs_state_synced_failed:
@@ -786,8 +784,8 @@ static int __driver_probe_device(struct device_driver *drv, struct device *dev)
return -EBUSY;
dev->can_match = true;
- pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
- drv->bus->name, __func__, dev_name(dev), drv->name);
+ dev_dbg(dev, "bus: '%s': %s: matched device with driver %s\n",
+ drv->bus->name, __func__, drv->name);
pm_runtime_get_suppliers(dev);
if (dev->parent)
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index ea28102d421eb..da8ca01d011c3 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -551,12 +551,16 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv,
file_size_ptr,
READING_FIRMWARE);
if (rc < 0) {
- if (rc != -ENOENT)
- dev_warn(device, "loading %s failed with error %d\n",
- path, rc);
- else
- dev_dbg(device, "loading %s failed for no such file or directory.\n",
- path);
+ if (!(fw_priv->opt_flags & FW_OPT_NO_WARN)) {
+ if (rc != -ENOENT)
+ dev_warn(device,
+ "loading %s failed with error %d\n",
+ path, rc);
+ else
+ dev_dbg(device,
+ "loading %s failed for no such file or directory.\n",
+ path);
+ }
continue;
}
size = rc;
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 14f964a7719bd..c0436f46cfb70 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -188,6 +188,7 @@ static int memory_block_online(struct memory_block *mem)
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
unsigned long nr_vmemmap_pages = 0;
+ struct memory_notify arg;
struct zone *zone;
int ret;
@@ -207,9 +208,19 @@ static int memory_block_online(struct memory_block *mem)
if (mem->altmap)
nr_vmemmap_pages = mem->altmap->free;
+ arg.altmap_start_pfn = start_pfn;
+ arg.altmap_nr_pages = nr_vmemmap_pages;
+ arg.start_pfn = start_pfn + nr_vmemmap_pages;
+ arg.nr_pages = nr_pages - nr_vmemmap_pages;
mem_hotplug_begin();
+ ret = memory_notify(MEM_PREPARE_ONLINE, &arg);
+ ret = notifier_to_errno(ret);
+ if (ret)
+ goto out_notifier;
+
if (nr_vmemmap_pages) {
- ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
+ ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages,
+ zone, mem->altmap->inaccessible);
if (ret)
goto out;
}
@@ -231,7 +242,11 @@ static int memory_block_online(struct memory_block *mem)
nr_vmemmap_pages);
mem->zone = zone;
+ mem_hotplug_done();
+ return ret;
out:
+ memory_notify(MEM_FINISH_OFFLINE, &arg);
+out_notifier:
mem_hotplug_done();
return ret;
}
@@ -244,6 +259,7 @@ static int memory_block_offline(struct memory_block *mem)
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
unsigned long nr_vmemmap_pages = 0;
+ struct memory_notify arg;
int ret;
if (!mem->zone)
@@ -275,6 +291,11 @@ static int memory_block_offline(struct memory_block *mem)
mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
mem->zone = NULL;
+ arg.altmap_start_pfn = start_pfn;
+ arg.altmap_nr_pages = nr_vmemmap_pages;
+ arg.start_pfn = start_pfn + nr_vmemmap_pages;
+ arg.nr_pages = nr_pages - nr_vmemmap_pages;
+ memory_notify(MEM_FINISH_OFFLINE, &arg);
out:
mem_hotplug_done();
return ret;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 1c05640461dd1..eb72580288e62 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -126,7 +126,7 @@ static void node_access_release(struct device *dev)
}
static struct node_access_nodes *node_init_node_access(struct node *node,
- unsigned int access)
+ enum access_coordinate_class access)
{
struct node_access_nodes *access_node;
struct device *dev;
@@ -191,7 +191,7 @@ static struct attribute *access_attrs[] = {
* @access: The access class the for the given attributes
*/
void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
- unsigned int access)
+ enum access_coordinate_class access)
{
struct node_access_nodes *c;
struct node *node;
@@ -215,6 +215,7 @@ void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
}
}
}
+EXPORT_SYMBOL_GPL(node_set_perf_attrs);
/**
* struct node_cache_info - Internal tracking for memory node caches
@@ -689,7 +690,7 @@ int register_cpu_under_node(unsigned int cpu, unsigned int nid)
*/
int register_memory_node_under_compute_node(unsigned int mem_nid,
unsigned int cpu_nid,
- unsigned int access)
+ enum access_coordinate_class access)
{
struct node *init_node, *targ_node;
struct node_access_nodes *initiator, *target;
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 0d01890160f3f..11f5fdf65b9ef 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -174,8 +174,8 @@ static int platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec,
if (!datap)
return -ENOMEM;
- datap->devid = ida_simple_get(&platform_msi_devid_ida,
- 0, 1 << DEV_ID_SHIFT, GFP_KERNEL);
+ datap->devid = ida_alloc_max(&platform_msi_devid_ida,
+ (1 << DEV_ID_SHIFT) - 1, GFP_KERNEL);
if (datap->devid < 0) {
err = datap->devid;
kfree(datap);
@@ -193,7 +193,7 @@ static void platform_msi_free_priv_data(struct device *dev)
struct platform_msi_priv_data *data = dev->msi.data->platform_data;
dev->msi.data->platform_data = NULL;
- ida_simple_remove(&platform_msi_devid_ida, data->devid);
+ ida_free(&platform_msi_devid_ida, data->devid);
kfree(data);
}
diff --git a/drivers/base/property.c b/drivers/base/property.c
index a1b01ab420528..7324a704a9a11 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -7,15 +7,16 @@
* Mika Westerberg <mika.westerberg@linux.intel.com>
*/
-#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/err.h>
#include <linux/export.h>
-#include <linux/kernel.h>
+#include <linux/kconfig.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_graph.h>
-#include <linux/of_irq.h>
#include <linux/property.h>
#include <linux/phy.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
struct fwnode_handle *__dev_fwnode(struct device *dev)
{
@@ -700,34 +701,6 @@ struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode)
EXPORT_SYMBOL_GPL(fwnode_get_next_parent);
/**
- * fwnode_get_next_parent_dev - Find device of closest ancestor fwnode
- * @fwnode: firmware node
- *
- * Given a firmware node (@fwnode), this function finds its closest ancestor
- * firmware node that has a corresponding struct device and returns that struct
- * device.
- *
- * The caller is responsible for calling put_device() on the returned device
- * pointer.
- *
- * Return: a pointer to the device of the @fwnode's closest ancestor.
- */
-struct device *fwnode_get_next_parent_dev(const struct fwnode_handle *fwnode)
-{
- struct fwnode_handle *parent;
- struct device *dev;
-
- fwnode_for_each_parent_node(fwnode, parent) {
- dev = get_dev_from_fwnode(parent);
- if (dev) {
- fwnode_handle_put(parent);
- return dev;
- }
- }
- return NULL;
-}
-
-/**
* fwnode_count_parents - Return the number of parents a node has
* @fwnode: The node the parents of which are to be counted
*
@@ -774,34 +747,6 @@ struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwnode,
EXPORT_SYMBOL_GPL(fwnode_get_nth_parent);
/**
- * fwnode_is_ancestor_of - Test if @ancestor is ancestor of @child
- * @ancestor: Firmware which is tested for being an ancestor
- * @child: Firmware which is tested for being the child
- *
- * A node is considered an ancestor of itself too.
- *
- * Return: true if @ancestor is an ancestor of @child. Otherwise, returns false.
- */
-bool fwnode_is_ancestor_of(const struct fwnode_handle *ancestor, const struct fwnode_handle *child)
-{
- struct fwnode_handle *parent;
-
- if (IS_ERR_OR_NULL(ancestor))
- return false;
-
- if (child == ancestor)
- return true;
-
- fwnode_for_each_parent_node(child, parent) {
- if (parent == ancestor) {
- fwnode_handle_put(parent);
- return true;
- }
- }
- return false;
-}
-
-/**
* fwnode_get_next_child_node - Return the next child node handle for a node
* @fwnode: Firmware node to find the next child node for.
* @child: Handle to one of the node's child nodes or a %NULL handle.
diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c
index 41edd6a430eb4..55999a50ccc0b 100644
--- a/drivers/base/regmap/regcache-maple.c
+++ b/drivers/base/regmap/regcache-maple.c
@@ -112,7 +112,7 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
unsigned long *entry, *lower, *upper;
unsigned long lower_index, lower_last;
unsigned long upper_index, upper_last;
- int ret;
+ int ret = 0;
lower = NULL;
upper = NULL;
@@ -145,7 +145,7 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
upper_index = max + 1;
upper_last = mas.last;
- upper = kmemdup(&entry[max + 1],
+ upper = kmemdup(&entry[max - mas.index + 1],
((mas.last - max) *
sizeof(unsigned long)),
map->alloc_flags);
@@ -244,7 +244,7 @@ static int regcache_maple_sync(struct regmap *map, unsigned int min,
unsigned long lmin = min;
unsigned long lmax = max;
unsigned int r, v, sync_start;
- int ret;
+ int ret = 0;
bool sync_needed = false;
map->cache_bypass = true;
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index 36512fb75a201..eb6eb25b343ba 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -6,10 +6,21 @@
* Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
*/
+#include <linux/container_of.h>
#include <linux/device.h>
-#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/kstrtox.h>
+#include <linux/list.h>
#include <linux/property.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
#include "base.h"
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 1b399ec8c07d1..25c9d85667f1a 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2787,7 +2787,6 @@ do_request:
pending = set_next_request();
spin_unlock_irq(&floppy_lock);
if (!pending) {
- do_floppy = NULL;
unlock_fdc();
return;
}
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 71c39bcd872c7..ed33cf7192d21 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1965,10 +1965,10 @@ static int null_add_dev(struct nullb_device *dev)
out_ida_free:
ida_free(&nullb_indexes, nullb->index);
-out_cleanup_zone:
- null_free_zoned_dev(dev);
out_cleanup_disk:
put_disk(nullb->disk);
+out_cleanup_zone:
+ null_free_zoned_dev(dev);
out_cleanup_tags:
if (nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index c99dd6698977e..5286cb8e0824d 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -28,7 +28,7 @@
static char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
-MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
MODULE_DESCRIPTION("Sun LDOM virtual disk client driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index 55af4efd79835..8237b08c49d86 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -11,6 +11,7 @@
#include <linux/sched.h>
#include <linux/cpu.h>
#include <linux/crypto.h>
+#include <linux/vmalloc.h>
#include "zcomp.h"
@@ -37,7 +38,7 @@ static void zcomp_strm_free(struct zcomp_strm *zstrm)
{
if (!IS_ERR_OR_NULL(zstrm->tfm))
crypto_free_comp(zstrm->tfm);
- free_pages((unsigned long)zstrm->buffer, 1);
+ vfree(zstrm->buffer);
zstrm->tfm = NULL;
zstrm->buffer = NULL;
}
@@ -53,7 +54,7 @@ static int zcomp_strm_init(struct zcomp_strm *zstrm, struct zcomp *comp)
* allocate 2 pages. 1 for compressed data, plus 1 extra for the
* case when compressed size is larger than the original one
*/
- zstrm->buffer = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
+ zstrm->buffer = vzalloc(2 * PAGE_SIZE);
if (IS_ERR_OR_NULL(zstrm->tfm) || !zstrm->buffer) {
zcomp_strm_free(zstrm);
return -ENOMEM;
diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
index cdefdef93da8c..e9fe63da0e9b1 100644
--- a/drivers/block/zram/zcomp.h
+++ b/drivers/block/zram/zcomp.h
@@ -39,5 +39,4 @@ int zcomp_compress(struct zcomp_strm *zstrm,
int zcomp_decompress(struct zcomp_strm *zstrm,
const void *src, unsigned int src_len, void *dst);
-bool zcomp_set_max_streams(struct zcomp *comp, int num_strm);
#endif /* _ZCOMP_H_ */
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index da7a20fa6152a..f0639df6cd184 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1337,7 +1337,7 @@ static int zram_read_from_zspool(struct zram *zram, struct page *page,
src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
if (size == PAGE_SIZE) {
dst = kmap_local_page(page);
- memcpy(dst, src, PAGE_SIZE);
+ copy_page(dst, src);
kunmap_local(dst);
ret = 0;
} else {
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index 3c84fcbda01aa..e6bc4a73c9fc3 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -383,8 +383,8 @@ static void btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
}
}
-static ssize_t btmtkuart_receive_buf(struct serdev_device *serdev,
- const u8 *data, size_t count)
+static size_t btmtkuart_receive_buf(struct serdev_device *serdev,
+ const u8 *data, size_t count)
{
struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 0b93c2ff29e49..9d0c7e278114b 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -1285,8 +1285,8 @@ static const struct h4_recv_pkt nxp_recv_pkts[] = {
{ NXP_RECV_FW_REQ_V3, .recv = nxp_recv_fw_req_v3 },
};
-static ssize_t btnxpuart_receive_buf(struct serdev_device *serdev,
- const u8 *data, size_t count)
+static size_t btnxpuart_receive_buf(struct serdev_device *serdev,
+ const u8 *data, size_t count)
{
struct btnxpuart_dev *nxpdev = serdev_device_get_drvdata(serdev);
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index b40b32fa7f1c3..19cfc342fc7bb 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -826,11 +826,15 @@ EXPORT_SYMBOL_GPL(qca_uart_setup);
int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
{
+ bdaddr_t bdaddr_swapped;
struct sk_buff *skb;
int err;
- skb = __hci_cmd_sync_ev(hdev, EDL_WRITE_BD_ADDR_OPCODE, 6, bdaddr,
- HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
+ baswap(&bdaddr_swapped, bdaddr);
+
+ skb = __hci_cmd_sync_ev(hdev, EDL_WRITE_BD_ADDR_OPCODE, 6,
+ &bdaddr_swapped, HCI_EV_VENDOR,
+ HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
bt_dev_err(hdev, "QCA Change address cmd failed (%d)", err);
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 8a60ad7acd705..ecbc52eaf1010 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -7,7 +7,6 @@
*
* Copyright (C) 2007 Texas Instruments, Inc.
* Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved.
- * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Acknowledgements:
* This file is based on hci_ll.c, which was...
@@ -226,6 +225,7 @@ struct qca_serdev {
struct qca_power *bt_power;
u32 init_speed;
u32 oper_speed;
+ bool bdaddr_property_broken;
const char *firmware_name;
};
@@ -1843,6 +1843,7 @@ static int qca_setup(struct hci_uart *hu)
const char *firmware_name = qca_get_firmware_name(hu);
int ret;
struct qca_btsoc_version ver;
+ struct qca_serdev *qcadev;
const char *soc_name;
ret = qca_check_speeds(hu);
@@ -1904,16 +1905,11 @@ retry:
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
+ set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
- /* Set BDA quirk bit for reading BDA value from fwnode property
- * only if that property exist in DT.
- */
- if (fwnode_property_present(dev_fwnode(hdev->dev.parent), "local-bd-address")) {
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
- bt_dev_info(hdev, "setting quirk bit to read BDA from fwnode later");
- } else {
- bt_dev_dbg(hdev, "local-bd-address` is not present in the devicetree so not setting quirk bit for BDA");
- }
+ qcadev = serdev_device_get_drvdata(hu->serdev);
+ if (qcadev->bdaddr_property_broken)
+ set_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks);
hci_set_aosp_capable(hdev);
@@ -2295,6 +2291,9 @@ static int qca_serdev_probe(struct serdev_device *serdev)
if (!qcadev->oper_speed)
BT_DBG("UART will pick default operating speed");
+ qcadev->bdaddr_property_broken = device_property_read_bool(&serdev->dev,
+ "qcom,local-bd-address-broken");
+
if (data)
qcadev->btsoc_type = data->soc_type;
else
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index 214fff876eae5..85c0d9b68f5f7 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -271,8 +271,8 @@ static void hci_uart_write_wakeup(struct serdev_device *serdev)
*
* Return: number of processed bytes
*/
-static ssize_t hci_uart_receive_buf(struct serdev_device *serdev,
- const u8 *data, size_t count)
+static size_t hci_uart_receive_buf(struct serdev_device *serdev,
+ const u8 *data, size_t count)
{
struct hci_uart *hu = serdev_device_get_drvdata(serdev);
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index b6dfe4340da2c..65ae758f31943 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -96,6 +96,20 @@ static const int gisb_offsets_bcm7400[] = {
[ARB_ERR_CAP_MASTER] = 0x0d8,
};
+static const int gisb_offsets_bcm74165[] = {
+ [ARB_TIMER] = 0x008,
+ [ARB_BP_CAP_CLR] = 0x044,
+ [ARB_BP_CAP_HI_ADDR] = -1,
+ [ARB_BP_CAP_ADDR] = 0x048,
+ [ARB_BP_CAP_STATUS] = 0x058,
+ [ARB_BP_CAP_MASTER] = 0x05c,
+ [ARB_ERR_CAP_CLR] = 0x038,
+ [ARB_ERR_CAP_HI_ADDR] = -1,
+ [ARB_ERR_CAP_ADDR] = 0x020,
+ [ARB_ERR_CAP_STATUS] = 0x030,
+ [ARB_ERR_CAP_MASTER] = 0x034,
+};
+
static const int gisb_offsets_bcm7435[] = {
[ARB_TIMER] = 0x00c,
[ARB_BP_CAP_CLR] = 0x014,
@@ -393,6 +407,7 @@ static const struct of_device_id brcmstb_gisb_arb_of_match[] = {
{ .compatible = "brcm,bcm7400-gisb-arb", .data = gisb_offsets_bcm7400 },
{ .compatible = "brcm,bcm7278-gisb-arb", .data = gisb_offsets_bcm7278 },
{ .compatible = "brcm,bcm7038-gisb-arb", .data = gisb_offsets_bcm7038 },
+ { .compatible = "brcm,bcm74165-gisb-arb", .data = gisb_offsets_bcm74165 },
{ },
};
diff --git a/drivers/bus/bt1-apb.c b/drivers/bus/bt1-apb.c
index e97c1d1c7578b..595fb22b73e06 100644
--- a/drivers/bus/bt1-apb.c
+++ b/drivers/bus/bt1-apb.c
@@ -22,7 +22,6 @@
#include <linux/clk.h>
#include <linux/reset.h>
#include <linux/time64.h>
-#include <linux/clk.h>
#include <linux/sysfs.h>
#define APB_EHB_ISR 0x00
diff --git a/drivers/bus/mhi/common.h b/drivers/bus/mhi/common.h
index f794b9c8049e2..dda340aaed95a 100644
--- a/drivers/bus/mhi/common.h
+++ b/drivers/bus/mhi/common.h
@@ -297,30 +297,30 @@ struct mhi_ring_element {
__le32 dword[2];
};
+#define MHI_STATE_LIST \
+ mhi_state(RESET, "RESET") \
+ mhi_state(READY, "READY") \
+ mhi_state(M0, "M0") \
+ mhi_state(M1, "M1") \
+ mhi_state(M2, "M2") \
+ mhi_state(M3, "M3") \
+ mhi_state(M3_FAST, "M3_FAST") \
+ mhi_state(BHI, "BHI") \
+ mhi_state_end(SYS_ERR, "SYS ERROR")
+
+#undef mhi_state
+#undef mhi_state_end
+
+#define mhi_state(a, b) case MHI_STATE_##a: return b;
+#define mhi_state_end(a, b) case MHI_STATE_##a: return b;
+
static inline const char *mhi_state_str(enum mhi_state state)
{
switch (state) {
- case MHI_STATE_RESET:
- return "RESET";
- case MHI_STATE_READY:
- return "READY";
- case MHI_STATE_M0:
- return "M0";
- case MHI_STATE_M1:
- return "M1";
- case MHI_STATE_M2:
- return "M2";
- case MHI_STATE_M3:
- return "M3";
- case MHI_STATE_M3_FAST:
- return "M3 FAST";
- case MHI_STATE_BHI:
- return "BHI";
- case MHI_STATE_SYS_ERR:
- return "SYS ERROR";
+ MHI_STATE_LIST
default:
return "Unknown state";
}
-};
+}
#endif /* _MHI_COMMON_H */
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
index 65fc1d738bec2..f8f674adf1d40 100644
--- a/drivers/bus/mhi/ep/main.c
+++ b/drivers/bus/mhi/ep/main.c
@@ -1149,8 +1149,9 @@ int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl)
mhi_ep_mmio_mask_interrupts(mhi_cntrl);
mhi_ep_mmio_init(mhi_cntrl);
- mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)),
- GFP_KERNEL);
+ mhi_cntrl->mhi_event = kcalloc(mhi_cntrl->event_rings,
+ sizeof(*mhi_cntrl->mhi_event),
+ GFP_KERNEL);
if (!mhi_cntrl->mhi_event)
return -ENOMEM;
@@ -1496,7 +1497,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
mhi_cntrl->ring_item_cache = kmem_cache_create("mhi_ep_ring_item",
sizeof(struct mhi_ep_ring_item), 0,
0, NULL);
- if (!mhi_cntrl->ev_ring_el_cache) {
+ if (!mhi_cntrl->ring_item_cache) {
ret = -ENOMEM;
goto err_destroy_tre_buf_cache;
}
diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c
index edc0ec5a09339..dedd29ca8db35 100644
--- a/drivers/bus/mhi/host/boot.c
+++ b/drivers/bus/mhi/host/boot.c
@@ -395,7 +395,7 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
void *buf;
dma_addr_t dma_addr;
size_t size, fw_sz;
- int i, ret;
+ int ret;
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
dev_err(dev, "Device MHI is not in valid state\n");
@@ -408,15 +408,6 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
if (ret)
dev_err(dev, "Could not capture serial number via BHI\n");
- for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) {
- ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_OEMPKHASH(i),
- &mhi_cntrl->oem_pk_hash[i]);
- if (ret) {
- dev_err(dev, "Could not capture OEM PK HASH via BHI\n");
- break;
- }
- }
-
/* wait for ready on pass through or any other execution environment */
if (!MHI_FW_LOAD_CAPABLE(mhi_cntrl->ee))
goto fw_load_ready_state;
diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
index 65ceac1837f9a..44f934981de82 100644
--- a/drivers/bus/mhi/host/init.c
+++ b/drivers/bus/mhi/host/init.c
@@ -20,50 +20,49 @@
#include <linux/wait.h>
#include "internal.h"
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
static DEFINE_IDA(mhi_controller_ida);
+#undef mhi_ee
+#undef mhi_ee_end
+
+#define mhi_ee(a, b) [MHI_EE_##a] = b,
+#define mhi_ee_end(a, b) [MHI_EE_##a] = b,
+
const char * const mhi_ee_str[MHI_EE_MAX] = {
- [MHI_EE_PBL] = "PRIMARY BOOTLOADER",
- [MHI_EE_SBL] = "SECONDARY BOOTLOADER",
- [MHI_EE_AMSS] = "MISSION MODE",
- [MHI_EE_RDDM] = "RAMDUMP DOWNLOAD MODE",
- [MHI_EE_WFW] = "WLAN FIRMWARE",
- [MHI_EE_PTHRU] = "PASS THROUGH",
- [MHI_EE_EDL] = "EMERGENCY DOWNLOAD",
- [MHI_EE_FP] = "FLASH PROGRAMMER",
- [MHI_EE_DISABLE_TRANSITION] = "DISABLE",
- [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED",
+ MHI_EE_LIST
};
+#undef dev_st_trans
+#undef dev_st_trans_end
+
+#define dev_st_trans(a, b) [DEV_ST_TRANSITION_##a] = b,
+#define dev_st_trans_end(a, b) [DEV_ST_TRANSITION_##a] = b,
+
const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
- [DEV_ST_TRANSITION_PBL] = "PBL",
- [DEV_ST_TRANSITION_READY] = "READY",
- [DEV_ST_TRANSITION_SBL] = "SBL",
- [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION MODE",
- [DEV_ST_TRANSITION_FP] = "FLASH PROGRAMMER",
- [DEV_ST_TRANSITION_SYS_ERR] = "SYS ERROR",
- [DEV_ST_TRANSITION_DISABLE] = "DISABLE",
+ DEV_ST_TRANSITION_LIST
};
+#undef ch_state_type
+#undef ch_state_type_end
+
+#define ch_state_type(a, b) [MHI_CH_STATE_TYPE_##a] = b,
+#define ch_state_type_end(a, b) [MHI_CH_STATE_TYPE_##a] = b,
+
const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX] = {
- [MHI_CH_STATE_TYPE_RESET] = "RESET",
- [MHI_CH_STATE_TYPE_STOP] = "STOP",
- [MHI_CH_STATE_TYPE_START] = "START",
+ MHI_CH_STATE_TYPE_LIST
};
+#undef mhi_pm_state
+#undef mhi_pm_state_end
+
+#define mhi_pm_state(a, b) [MHI_PM_STATE_##a] = b,
+#define mhi_pm_state_end(a, b) [MHI_PM_STATE_##a] = b,
+
static const char * const mhi_pm_state_str[] = {
- [MHI_PM_STATE_DISABLE] = "DISABLE",
- [MHI_PM_STATE_POR] = "POWER ON RESET",
- [MHI_PM_STATE_M0] = "M0",
- [MHI_PM_STATE_M2] = "M2",
- [MHI_PM_STATE_M3_ENTER] = "M?->M3",
- [MHI_PM_STATE_M3] = "M3",
- [MHI_PM_STATE_M3_EXIT] = "M3->M0",
- [MHI_PM_STATE_FW_DL_ERR] = "Firmware Download Error",
- [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS ERROR Detect",
- [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS ERROR Process",
- [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
- [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect",
+ MHI_PM_STATE_LIST
};
const char *to_mhi_pm_state_str(u32 state)
@@ -97,11 +96,19 @@ static ssize_t oem_pk_hash_show(struct device *dev,
{
struct mhi_device *mhi_dev = to_mhi_device(dev);
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
- int i, cnt = 0;
+ u32 hash_segment[MHI_MAX_OEM_PK_HASH_SEGMENTS];
+ int i, cnt = 0, ret;
- for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++)
- cnt += sysfs_emit_at(buf, cnt, "OEMPKHASH[%d]: 0x%x\n",
- i, mhi_cntrl->oem_pk_hash[i]);
+ for (i = 0; i < MHI_MAX_OEM_PK_HASH_SEGMENTS; i++) {
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_OEMPKHASH(i), &hash_segment[i]);
+ if (ret) {
+ dev_err(dev, "Could not capture OEM PK HASH\n");
+ return ret;
+ }
+ }
+
+ for (i = 0; i < MHI_MAX_OEM_PK_HASH_SEGMENTS; i++)
+ cnt += sysfs_emit_at(buf, cnt, "OEMPKHASH[%d]: 0x%x\n", i, hash_segment[i]);
return cnt;
}
@@ -907,7 +914,6 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan;
struct mhi_cmd *mhi_cmd;
struct mhi_device *mhi_dev;
- u32 soc_info;
int ret, i;
if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs ||
@@ -982,17 +988,6 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
}
- /* Read the MHI device info */
- ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
- SOC_HW_VERSION_OFFS, &soc_info);
- if (ret)
- goto err_destroy_wq;
-
- mhi_cntrl->family_number = FIELD_GET(SOC_HW_VERSION_FAM_NUM_BMSK, soc_info);
- mhi_cntrl->device_number = FIELD_GET(SOC_HW_VERSION_DEV_NUM_BMSK, soc_info);
- mhi_cntrl->major_version = FIELD_GET(SOC_HW_VERSION_MAJOR_VER_BMSK, soc_info);
- mhi_cntrl->minor_version = FIELD_GET(SOC_HW_VERSION_MINOR_VER_BMSK, soc_info);
-
mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL);
if (mhi_cntrl->index < 0) {
ret = mhi_cntrl->index;
diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h
index 30ac415a3000f..5fe49311b8eb4 100644
--- a/drivers/bus/mhi/host/internal.h
+++ b/drivers/bus/mhi/host/internal.h
@@ -15,12 +15,6 @@ extern struct bus_type mhi_bus_type;
#define MHI_SOC_RESET_REQ_OFFSET 0xb0
#define MHI_SOC_RESET_REQ BIT(0)
-#define SOC_HW_VERSION_OFFS 0x224
-#define SOC_HW_VERSION_FAM_NUM_BMSK GENMASK(31, 28)
-#define SOC_HW_VERSION_DEV_NUM_BMSK GENMASK(27, 16)
-#define SOC_HW_VERSION_MAJOR_VER_BMSK GENMASK(15, 8)
-#define SOC_HW_VERSION_MINOR_VER_BMSK GENMASK(7, 0)
-
struct mhi_ctxt {
struct mhi_event_ctxt *er_ctxt;
struct mhi_chan_ctxt *chan_ctxt;
@@ -42,6 +36,11 @@ enum mhi_ch_state_type {
MHI_CH_STATE_TYPE_MAX,
};
+#define MHI_CH_STATE_TYPE_LIST \
+ ch_state_type(RESET, "RESET") \
+ ch_state_type(STOP, "STOP") \
+ ch_state_type_end(START, "START")
+
extern const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX];
#define TO_CH_STATE_TYPE_STR(state) (((state) >= MHI_CH_STATE_TYPE_MAX) ? \
"INVALID_STATE" : \
@@ -50,6 +49,18 @@ extern const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX];
#define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \
mode != MHI_DB_BRST_ENABLE)
+#define MHI_EE_LIST \
+ mhi_ee(PBL, "PRIMARY BOOTLOADER") \
+ mhi_ee(SBL, "SECONDARY BOOTLOADER") \
+ mhi_ee(AMSS, "MISSION MODE") \
+ mhi_ee(RDDM, "RAMDUMP DOWNLOAD MODE")\
+ mhi_ee(WFW, "WLAN FIRMWARE") \
+ mhi_ee(PTHRU, "PASS THROUGH") \
+ mhi_ee(EDL, "EMERGENCY DOWNLOAD") \
+ mhi_ee(FP, "FLASH PROGRAMMER") \
+ mhi_ee(DISABLE_TRANSITION, "DISABLE") \
+ mhi_ee_end(NOT_SUPPORTED, "NOT SUPPORTED")
+
extern const char * const mhi_ee_str[MHI_EE_MAX];
#define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \
"INVALID_EE" : mhi_ee_str[ee])
@@ -72,6 +83,15 @@ enum dev_st_transition {
DEV_ST_TRANSITION_MAX,
};
+#define DEV_ST_TRANSITION_LIST \
+ dev_st_trans(PBL, "PBL") \
+ dev_st_trans(READY, "READY") \
+ dev_st_trans(SBL, "SBL") \
+ dev_st_trans(MISSION_MODE, "MISSION MODE") \
+ dev_st_trans(FP, "FLASH PROGRAMMER") \
+ dev_st_trans(SYS_ERR, "SYS ERROR") \
+ dev_st_trans_end(DISABLE, "DISABLE")
+
extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX];
#define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \
"INVALID_STATE" : dev_state_tran_str[state])
@@ -88,11 +108,27 @@ enum mhi_pm_state {
MHI_PM_STATE_FW_DL_ERR,
MHI_PM_STATE_SYS_ERR_DETECT,
MHI_PM_STATE_SYS_ERR_PROCESS,
+ MHI_PM_STATE_SYS_ERR_FAIL,
MHI_PM_STATE_SHUTDOWN_PROCESS,
MHI_PM_STATE_LD_ERR_FATAL_DETECT,
MHI_PM_STATE_MAX
};
+#define MHI_PM_STATE_LIST \
+ mhi_pm_state(DISABLE, "DISABLE") \
+ mhi_pm_state(POR, "POWER ON RESET") \
+ mhi_pm_state(M0, "M0") \
+ mhi_pm_state(M2, "M2") \
+ mhi_pm_state(M3_ENTER, "M?->M3") \
+ mhi_pm_state(M3, "M3") \
+ mhi_pm_state(M3_EXIT, "M3->M0") \
+ mhi_pm_state(FW_DL_ERR, "Firmware Download Error") \
+ mhi_pm_state(SYS_ERR_DETECT, "SYS ERROR Detect") \
+ mhi_pm_state(SYS_ERR_PROCESS, "SYS ERROR Process") \
+ mhi_pm_state(SYS_ERR_FAIL, "SYS ERROR Failure") \
+ mhi_pm_state(SHUTDOWN_PROCESS, "SHUTDOWN Process") \
+ mhi_pm_state_end(LD_ERR_FATAL_DETECT, "Linkdown or Error Fatal Detect")
+
#define MHI_PM_DISABLE BIT(0)
#define MHI_PM_POR BIT(1)
#define MHI_PM_M0 BIT(2)
@@ -104,14 +140,16 @@ enum mhi_pm_state {
#define MHI_PM_FW_DL_ERR BIT(7)
#define MHI_PM_SYS_ERR_DETECT BIT(8)
#define MHI_PM_SYS_ERR_PROCESS BIT(9)
-#define MHI_PM_SHUTDOWN_PROCESS BIT(10)
+#define MHI_PM_SYS_ERR_FAIL BIT(10)
+#define MHI_PM_SHUTDOWN_PROCESS BIT(11)
/* link not accessible */
-#define MHI_PM_LD_ERR_FATAL_DETECT BIT(11)
+#define MHI_PM_LD_ERR_FATAL_DETECT BIT(12)
#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \
MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
- MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR)))
+ MHI_PM_SYS_ERR_FAIL | MHI_PM_SHUTDOWN_PROCESS | \
+ MHI_PM_FW_DL_ERR)))
#define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR)
#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT)
#define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & mhi_cntrl->db_access)
diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
index abb561db9ae1d..15d657af9b5b8 100644
--- a/drivers/bus/mhi/host/main.c
+++ b/drivers/bus/mhi/host/main.c
@@ -15,6 +15,7 @@
#include <linux/skbuff.h>
#include <linux/slab.h>
#include "internal.h"
+#include "trace.h"
int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
void __iomem *base, u32 offset, u32 *out)
@@ -493,11 +494,8 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
state = mhi_get_mhi_state(mhi_cntrl);
ee = mhi_get_exec_env(mhi_cntrl);
- dev_dbg(dev, "local ee: %s state: %s device ee: %s state: %s\n",
- TO_MHI_EXEC_STR(mhi_cntrl->ee),
- mhi_state_str(mhi_cntrl->dev_state),
- TO_MHI_EXEC_STR(ee), mhi_state_str(state));
+ trace_mhi_intvec_states(mhi_cntrl, ee, state);
if (state == MHI_STATE_SYS_ERR) {
dev_dbg(dev, "System error detected\n");
pm_state = mhi_tryset_pm_state(mhi_cntrl,
@@ -838,6 +836,8 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
while (dev_rp != local_rp) {
enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
+ trace_mhi_ctrl_event(mhi_cntrl, local_rp);
+
switch (type) {
case MHI_PKT_TYPE_BW_REQ_EVENT:
{
@@ -1003,6 +1003,8 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
while (dev_rp != local_rp && event_quota > 0) {
enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
+ trace_mhi_data_event(mhi_cntrl, local_rp);
+
chan = MHI_TRE_GET_EV_CHID(local_rp);
WARN_ON(chan >= mhi_cntrl->max_chan);
@@ -1243,6 +1245,7 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len);
mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
+ trace_mhi_gen_tre(mhi_cntrl, mhi_chan, mhi_tre);
/* increment WP */
mhi_add_ring_element(mhi_cntrl, tre_ring);
mhi_add_ring_element(mhi_cntrl, buf_ring);
@@ -1337,9 +1340,7 @@ static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl,
enum mhi_cmd_type cmd = MHI_CMD_NOP;
int ret;
- dev_dbg(dev, "%d: Updating channel state to: %s\n", mhi_chan->chan,
- TO_CH_STATE_TYPE_STR(to_state));
-
+ trace_mhi_channel_command_start(mhi_cntrl, mhi_chan, to_state, TPS("Updating"));
switch (to_state) {
case MHI_CH_STATE_TYPE_RESET:
write_lock_irq(&mhi_chan->lock);
@@ -1406,9 +1407,7 @@ static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl,
write_unlock_irq(&mhi_chan->lock);
}
- dev_dbg(dev, "%d: Channel state change to %s successful\n",
- mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
-
+ trace_mhi_channel_command_end(mhi_cntrl, mhi_chan, to_state, TPS("Updated"));
exit_channel_update:
mhi_cntrl->runtime_put(mhi_cntrl);
mhi_device_put(mhi_cntrl->mhi_dev);
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index cd6cd14b3d29b..51639bfcfec70 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -538,7 +538,7 @@ static struct mhi_event_config mhi_telit_fn980_hw_v1_events[] = {
MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
};
-static struct mhi_controller_config modem_telit_fn980_hw_v1_config = {
+static const struct mhi_controller_config modem_telit_fn980_hw_v1_config = {
.max_channels = 128,
.timeout_ms = 20000,
.num_channels = ARRAY_SIZE(mhi_telit_fn980_hw_v1_channels),
diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c
index a2f2feef14768..8b40d3f01accd 100644
--- a/drivers/bus/mhi/host/pm.c
+++ b/drivers/bus/mhi/host/pm.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/wait.h>
#include "internal.h"
+#include "trace.h"
/*
* Not all MHI state transitions are synchronous. Transitions like Linkdown,
@@ -36,7 +37,10 @@
* M0 <--> M0
* M0 -> FW_DL_ERR
* M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
- * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
+ * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS
+ * SYS_ERR_PROCESS -> SYS_ERR_FAIL
+ * SYS_ERR_FAIL -> SYS_ERR_DETECT
+ * SYS_ERR_PROCESS --> POR
* L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
* SHUTDOWN_PROCESS -> DISABLE
* L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
@@ -93,7 +97,12 @@ static const struct mhi_pm_transitions dev_state_transitions[] = {
},
{
MHI_PM_SYS_ERR_PROCESS,
- MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_POR | MHI_PM_SYS_ERR_FAIL | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_SYS_ERR_FAIL,
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
MHI_PM_LD_ERR_FATAL_DETECT
},
/* L2 States */
@@ -123,6 +132,7 @@ enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cn
if (unlikely(!(dev_state_transitions[index].to_states & state)))
return cur_state;
+ trace_mhi_tryset_pm_state(mhi_cntrl, state);
mhi_cntrl->pm_state = state;
return mhi_cntrl->pm_state;
}
@@ -629,7 +639,13 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
!in_reset, timeout);
if (!ret || in_reset) {
dev_err(dev, "Device failed to exit MHI Reset state\n");
- goto exit_sys_error_transition;
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl,
+ MHI_PM_SYS_ERR_FAIL);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ /* Shutdown may have occurred, otherwise cleanup now */
+ if (cur_state != MHI_PM_SYS_ERR_FAIL)
+ goto exit_sys_error_transition;
}
/*
@@ -758,7 +774,6 @@ void mhi_pm_st_worker(struct work_struct *work)
struct mhi_controller *mhi_cntrl = container_of(work,
struct mhi_controller,
st_worker);
- struct device *dev = &mhi_cntrl->mhi_dev->dev;
spin_lock_irq(&mhi_cntrl->transition_lock);
list_splice_tail_init(&mhi_cntrl->transition_list, &head);
@@ -766,8 +781,8 @@ void mhi_pm_st_worker(struct work_struct *work)
list_for_each_entry_safe(itr, tmp, &head, node) {
list_del(&itr->node);
- dev_dbg(dev, "Handling state transition: %s\n",
- TO_DEV_STATE_TRANS_STR(itr->state));
+
+ trace_mhi_pm_st_transition(mhi_cntrl, itr->state);
switch (itr->state) {
case DEV_ST_TRANSITION_PBL:
diff --git a/drivers/bus/mhi/host/trace.h b/drivers/bus/mhi/host/trace.h
new file mode 100644
index 0000000000000..368515dcb22d1
--- /dev/null
+++ b/drivers/bus/mhi/host/trace.h
@@ -0,0 +1,282 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mhi_host
+
+#if !defined(_TRACE_EVENT_MHI_HOST_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_EVENT_MHI_HOST_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+#include "../common.h"
+#include "internal.h"
+
+#undef mhi_state
+#undef mhi_state_end
+
+#define mhi_state(a, b) TRACE_DEFINE_ENUM(MHI_STATE_##a);
+#define mhi_state_end(a, b) TRACE_DEFINE_ENUM(MHI_STATE_##a);
+
+MHI_STATE_LIST
+
+#undef mhi_state
+#undef mhi_state_end
+
+#define mhi_state(a, b) { MHI_STATE_##a, b },
+#define mhi_state_end(a, b) { MHI_STATE_##a, b }
+
+#undef mhi_pm_state
+#undef mhi_pm_state_end
+
+#define mhi_pm_state(a, b) TRACE_DEFINE_ENUM(MHI_PM_STATE_##a);
+#define mhi_pm_state_end(a, b) TRACE_DEFINE_ENUM(MHI_PM_STATE_##a);
+
+MHI_PM_STATE_LIST
+
+#undef mhi_pm_state
+#undef mhi_pm_state_end
+
+#define mhi_pm_state(a, b) { MHI_PM_STATE_##a, b },
+#define mhi_pm_state_end(a, b) { MHI_PM_STATE_##a, b }
+
+#undef mhi_ee
+#undef mhi_ee_end
+
+#define mhi_ee(a, b) TRACE_DEFINE_ENUM(MHI_EE_##a);
+#define mhi_ee_end(a, b) TRACE_DEFINE_ENUM(MHI_EE_##a);
+
+MHI_EE_LIST
+
+#undef mhi_ee
+#undef mhi_ee_end
+
+#define mhi_ee(a, b) { MHI_EE_##a, b },
+#define mhi_ee_end(a, b) { MHI_EE_##a, b }
+
+#undef ch_state_type
+#undef ch_state_type_end
+
+#define ch_state_type(a, b) TRACE_DEFINE_ENUM(MHI_CH_STATE_TYPE_##a);
+#define ch_state_type_end(a, b) TRACE_DEFINE_ENUM(MHI_CH_STATE_TYPE_##a);
+
+MHI_CH_STATE_TYPE_LIST
+
+#undef ch_state_type
+#undef ch_state_type_end
+
+#define ch_state_type(a, b) { MHI_CH_STATE_TYPE_##a, b },
+#define ch_state_type_end(a, b) { MHI_CH_STATE_TYPE_##a, b }
+
+#undef dev_st_trans
+#undef dev_st_trans_end
+
+#define dev_st_trans(a, b) TRACE_DEFINE_ENUM(DEV_ST_TRANSITION_##a);
+#define dev_st_trans_end(a, b) TRACE_DEFINE_ENUM(DEV_ST_TRANSITION_##a);
+
+DEV_ST_TRANSITION_LIST
+
+#undef dev_st_trans
+#undef dev_st_trans_end
+
+#define dev_st_trans(a, b) { DEV_ST_TRANSITION_##a, b },
+#define dev_st_trans_end(a, b) { DEV_ST_TRANSITION_##a, b }
+
+#define TPS(x) tracepoint_string(x)
+
+TRACE_EVENT(mhi_gen_tre,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ struct mhi_ring_element *mhi_tre),
+
+ TP_ARGS(mhi_cntrl, mhi_chan, mhi_tre),
+
+ TP_STRUCT__entry(
+ __string(name, mhi_cntrl->mhi_dev->name)
+ __field(int, ch_num)
+ __field(void *, wp)
+ __field(__le64, tre_ptr)
+ __field(__le32, dword0)
+ __field(__le32, dword1)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, mhi_cntrl->mhi_dev->name);
+ __entry->ch_num = mhi_chan->chan;
+ __entry->wp = mhi_tre;
+ __entry->tre_ptr = mhi_tre->ptr;
+ __entry->dword0 = mhi_tre->dword[0];
+ __entry->dword1 = mhi_tre->dword[1];
+ ),
+
+ TP_printk("%s: Chan: %d TRE: 0x%p TRE buf: 0x%llx DWORD0: 0x%08x DWORD1: 0x%08x\n",
+ __get_str(name), __entry->ch_num, __entry->wp, __entry->tre_ptr,
+ __entry->dword0, __entry->dword1)
+);
+
+TRACE_EVENT(mhi_intvec_states,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, int dev_ee, int dev_state),
+
+ TP_ARGS(mhi_cntrl, dev_ee, dev_state),
+
+ TP_STRUCT__entry(
+ __string(name, mhi_cntrl->mhi_dev->name)
+ __field(int, local_ee)
+ __field(int, state)
+ __field(int, dev_ee)
+ __field(int, dev_state)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, mhi_cntrl->mhi_dev->name);
+ __entry->local_ee = mhi_cntrl->ee;
+ __entry->state = mhi_cntrl->dev_state;
+ __entry->dev_ee = dev_ee;
+ __entry->dev_state = dev_state;
+ ),
+
+ TP_printk("%s: Local EE: %s State: %s Device EE: %s Dev State: %s\n",
+ __get_str(name),
+ __print_symbolic(__entry->local_ee, MHI_EE_LIST),
+ __print_symbolic(__entry->state, MHI_STATE_LIST),
+ __print_symbolic(__entry->dev_ee, MHI_EE_LIST),
+ __print_symbolic(__entry->dev_state, MHI_STATE_LIST))
+);
+
+TRACE_EVENT(mhi_tryset_pm_state,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, int pm_state),
+
+ TP_ARGS(mhi_cntrl, pm_state),
+
+ TP_STRUCT__entry(
+ __string(name, mhi_cntrl->mhi_dev->name)
+ __field(int, pm_state)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, mhi_cntrl->mhi_dev->name);
+ if (pm_state)
+ pm_state = __fls(pm_state);
+ __entry->pm_state = pm_state;
+ ),
+
+ TP_printk("%s: PM state: %s\n", __get_str(name),
+ __print_symbolic(__entry->pm_state, MHI_PM_STATE_LIST))
+);
+
+DECLARE_EVENT_CLASS(mhi_process_event_ring,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, struct mhi_ring_element *rp),
+
+ TP_ARGS(mhi_cntrl, rp),
+
+ TP_STRUCT__entry(
+ __string(name, mhi_cntrl->mhi_dev->name)
+ __field(__le32, dword0)
+ __field(__le32, dword1)
+ __field(int, state)
+ __field(__le64, ptr)
+ __field(void *, rp)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, mhi_cntrl->mhi_dev->name);
+ __entry->rp = rp;
+ __entry->ptr = rp->ptr;
+ __entry->dword0 = rp->dword[0];
+ __entry->dword1 = rp->dword[1];
+ __entry->state = MHI_TRE_GET_EV_STATE(rp);
+ ),
+
+ TP_printk("%s: TRE: 0x%p TRE buf: 0x%llx DWORD0: 0x%08x DWORD1: 0x%08x State: %s\n",
+ __get_str(name), __entry->rp, __entry->ptr, __entry->dword0,
+ __entry->dword1, __print_symbolic(__entry->state, MHI_STATE_LIST))
+);
+
+DEFINE_EVENT(mhi_process_event_ring, mhi_data_event,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, struct mhi_ring_element *rp),
+
+ TP_ARGS(mhi_cntrl, rp)
+);
+
+DEFINE_EVENT(mhi_process_event_ring, mhi_ctrl_event,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, struct mhi_ring_element *rp),
+
+ TP_ARGS(mhi_cntrl, rp)
+);
+
+DECLARE_EVENT_CLASS(mhi_update_channel_state,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, int state,
+ const char *reason),
+
+ TP_ARGS(mhi_cntrl, mhi_chan, state, reason),
+
+ TP_STRUCT__entry(
+ __string(name, mhi_cntrl->mhi_dev->name)
+ __field(int, ch_num)
+ __field(int, state)
+ __field(const char *, reason)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, mhi_cntrl->mhi_dev->name);
+ __entry->ch_num = mhi_chan->chan;
+ __entry->state = state;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("%s: chan%d: %s state to: %s\n",
+ __get_str(name), __entry->ch_num, __entry->reason,
+ __print_symbolic(__entry->state, MHI_CH_STATE_TYPE_LIST))
+);
+
+DEFINE_EVENT(mhi_update_channel_state, mhi_channel_command_start,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, int state,
+ const char *reason),
+
+ TP_ARGS(mhi_cntrl, mhi_chan, state, reason)
+);
+
+DEFINE_EVENT(mhi_update_channel_state, mhi_channel_command_end,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, int state,
+ const char *reason),
+
+ TP_ARGS(mhi_cntrl, mhi_chan, state, reason)
+);
+
+TRACE_EVENT(mhi_pm_st_transition,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, int state),
+
+ TP_ARGS(mhi_cntrl, state),
+
+ TP_STRUCT__entry(
+ __string(name, mhi_cntrl->mhi_dev->name)
+ __field(int, state)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, mhi_cntrl->mhi_dev->name);
+ __entry->state = state;
+ ),
+
+ TP_printk("%s: Handling state transition: %s\n", __get_str(name),
+ __print_symbolic(__entry->state, DEV_ST_TRANSITION_LIST))
+);
+
+#endif
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/bus/mhi/host
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c
index 554e1992edd44..8baf14bd5effb 100644
--- a/drivers/bus/mips_cdmm.c
+++ b/drivers/bus/mips_cdmm.c
@@ -118,7 +118,7 @@ static struct attribute *mips_cdmm_dev_attrs[] = {
};
ATTRIBUTE_GROUPS(mips_cdmm_dev);
-struct bus_type mips_cdmm_bustype = {
+const struct bus_type mips_cdmm_bustype = {
.name = "cdmm",
.dev_groups = mips_cdmm_dev_groups,
.match = mips_cdmm_match,
diff --git a/drivers/bus/ts-nbus.c b/drivers/bus/ts-nbus.c
index 4fa932cb09150..baf22a82c47a7 100644
--- a/drivers/bus/ts-nbus.c
+++ b/drivers/bus/ts-nbus.c
@@ -39,45 +39,39 @@ struct ts_nbus {
/*
* request all gpios required by the bus.
*/
-static int ts_nbus_init_pdata(struct platform_device *pdev, struct ts_nbus
- *ts_nbus)
+static int ts_nbus_init_pdata(struct platform_device *pdev,
+ struct ts_nbus *ts_nbus)
{
ts_nbus->data = devm_gpiod_get_array(&pdev->dev, "ts,data",
GPIOD_OUT_HIGH);
- if (IS_ERR(ts_nbus->data)) {
- dev_err(&pdev->dev, "failed to retrieve ts,data-gpio from dts\n");
- return PTR_ERR(ts_nbus->data);
- }
+ if (IS_ERR(ts_nbus->data))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ts_nbus->data),
+ "failed to retrieve ts,data-gpio from dts\n");
ts_nbus->csn = devm_gpiod_get(&pdev->dev, "ts,csn", GPIOD_OUT_HIGH);
- if (IS_ERR(ts_nbus->csn)) {
- dev_err(&pdev->dev, "failed to retrieve ts,csn-gpio from dts\n");
- return PTR_ERR(ts_nbus->csn);
- }
+ if (IS_ERR(ts_nbus->csn))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ts_nbus->csn),
+ "failed to retrieve ts,csn-gpio from dts\n");
ts_nbus->txrx = devm_gpiod_get(&pdev->dev, "ts,txrx", GPIOD_OUT_HIGH);
- if (IS_ERR(ts_nbus->txrx)) {
- dev_err(&pdev->dev, "failed to retrieve ts,txrx-gpio from dts\n");
- return PTR_ERR(ts_nbus->txrx);
- }
+ if (IS_ERR(ts_nbus->txrx))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ts_nbus->txrx),
+ "failed to retrieve ts,txrx-gpio from dts\n");
ts_nbus->strobe = devm_gpiod_get(&pdev->dev, "ts,strobe", GPIOD_OUT_HIGH);
- if (IS_ERR(ts_nbus->strobe)) {
- dev_err(&pdev->dev, "failed to retrieve ts,strobe-gpio from dts\n");
- return PTR_ERR(ts_nbus->strobe);
- }
+ if (IS_ERR(ts_nbus->strobe))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ts_nbus->strobe),
+ "failed to retrieve ts,strobe-gpio from dts\n");
ts_nbus->ale = devm_gpiod_get(&pdev->dev, "ts,ale", GPIOD_OUT_HIGH);
- if (IS_ERR(ts_nbus->ale)) {
- dev_err(&pdev->dev, "failed to retrieve ts,ale-gpio from dts\n");
- return PTR_ERR(ts_nbus->ale);
- }
+ if (IS_ERR(ts_nbus->ale))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ts_nbus->ale),
+ "failed to retrieve ts,ale-gpio from dts\n");
ts_nbus->rdy = devm_gpiod_get(&pdev->dev, "ts,rdy", GPIOD_IN);
- if (IS_ERR(ts_nbus->rdy)) {
- dev_err(&pdev->dev, "failed to retrieve ts,rdy-gpio from dts\n");
- return PTR_ERR(ts_nbus->rdy);
- }
+ if (IS_ERR(ts_nbus->rdy))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ts_nbus->rdy),
+ "failed to retrieve ts,rdy-gpio from dts\n");
return 0;
}
@@ -273,7 +267,7 @@ EXPORT_SYMBOL_GPL(ts_nbus_write);
static int ts_nbus_probe(struct platform_device *pdev)
{
struct pwm_device *pwm;
- struct pwm_args pargs;
+ struct pwm_state state;
struct device *dev = &pdev->dev;
struct ts_nbus *ts_nbus;
int ret;
@@ -289,32 +283,24 @@ static int ts_nbus_probe(struct platform_device *pdev)
return ret;
pwm = devm_pwm_get(dev, NULL);
- if (IS_ERR(pwm)) {
- ret = PTR_ERR(pwm);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "unable to request PWM\n");
- return ret;
- }
+ if (IS_ERR(pwm))
+ return dev_err_probe(dev, PTR_ERR(pwm),
+ "unable to request PWM\n");
- pwm_get_args(pwm, &pargs);
- if (!pargs.period) {
- dev_err(&pdev->dev, "invalid PWM period\n");
- return -EINVAL;
- }
+ pwm_init_state(pwm, &state);
+ if (!state.period)
+ return dev_err_probe(dev, -EINVAL, "invalid PWM period\n");
- /*
- * FIXME: pwm_apply_args() should be removed when switching to
- * the atomic PWM API.
- */
- pwm_apply_args(pwm);
- ret = pwm_config(pwm, pargs.period, pargs.period);
+ state.duty_cycle = state.period;
+ state.enabled = true;
+
+ ret = pwm_apply_state(pwm, &state);
if (ret < 0)
- return ret;
+ return dev_err_probe(dev, ret, "failed to configure PWM\n");
/*
* we can now start the FPGA and populate the peripherals.
*/
- pwm_enable(pwm);
ts_nbus->pwm = pwm;
/*
@@ -324,7 +310,8 @@ static int ts_nbus_probe(struct platform_device *pdev)
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret < 0)
- return ret;
+ return dev_err_probe(dev, ret,
+ "failed to populate platform devices on bus\n");
dev_info(dev, "initialized\n");
diff --git a/drivers/cache/sifive_ccache.c b/drivers/cache/sifive_ccache.c
index 89ed6cd6b059e..e9cc8b4786fbf 100644
--- a/drivers/cache/sifive_ccache.c
+++ b/drivers/cache/sifive_ccache.c
@@ -15,6 +15,8 @@
#include <linux/of_address.h>
#include <linux/device.h>
#include <linux/bitfield.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#include <asm/cacheflush.h>
#include <asm/cacheinfo.h>
#include <asm/dma-noncoherent.h>
@@ -247,13 +249,49 @@ static irqreturn_t ccache_int_handler(int irq, void *device)
return IRQ_HANDLED;
}
+static int sifive_ccache_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned long quirks;
+ int intr_num, rc;
+
+ quirks = (unsigned long)device_get_match_data(dev);
+
+ intr_num = platform_irq_count(pdev);
+ if (!intr_num)
+ return dev_err_probe(dev, -ENODEV, "No interrupts property\n");
+
+ for (int i = 0; i < intr_num; i++) {
+ if (i == DATA_UNCORR && (quirks & QUIRK_BROKEN_DATA_UNCORR))
+ continue;
+
+ g_irq[i] = platform_get_irq(pdev, i);
+ if (g_irq[i] < 0)
+ return g_irq[i];
+
+ rc = devm_request_irq(dev, g_irq[i], ccache_int_handler, 0, "ccache_ecc", NULL);
+ if (rc)
+ return dev_err_probe(dev, rc, "Could not request IRQ %d\n", g_irq[i]);
+ }
+
+ return 0;
+}
+
+static struct platform_driver sifive_ccache_driver = {
+ .probe = sifive_ccache_probe,
+ .driver = {
+ .name = "sifive_ccache",
+ .of_match_table = sifive_ccache_ids,
+ },
+};
+
static int __init sifive_ccache_init(void)
{
struct device_node *np;
struct resource res;
- int i, rc, intr_num;
const struct of_device_id *match;
unsigned long quirks;
+ int rc;
np = of_find_matching_node_and_match(NULL, sifive_ccache_ids, &match);
if (!np)
@@ -277,28 +315,6 @@ static int __init sifive_ccache_init(void)
goto err_unmap;
}
- intr_num = of_property_count_u32_elems(np, "interrupts");
- if (!intr_num) {
- pr_err("No interrupts property\n");
- rc = -ENODEV;
- goto err_unmap;
- }
-
- for (i = 0; i < intr_num; i++) {
- g_irq[i] = irq_of_parse_and_map(np, i);
-
- if (i == DATA_UNCORR && (quirks & QUIRK_BROKEN_DATA_UNCORR))
- continue;
-
- rc = request_irq(g_irq[i], ccache_int_handler, 0, "ccache_ecc",
- NULL);
- if (rc) {
- pr_err("Could not request IRQ %d\n", g_irq[i]);
- goto err_free_irq;
- }
- }
- of_node_put(np);
-
#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
if (quirks & QUIRK_NONSTANDARD_CACHE_OPS) {
riscv_cbom_block_size = SIFIVE_CCACHE_LINE_SIZE;
@@ -315,11 +331,15 @@ static int __init sifive_ccache_init(void)
#ifdef CONFIG_DEBUG_FS
setup_sifive_debug();
#endif
+
+ rc = platform_driver_register(&sifive_ccache_driver);
+ if (rc)
+ goto err_unmap;
+
+ of_node_put(np);
+
return 0;
-err_free_irq:
- while (--i >= 0)
- free_irq(g_irq[i], NULL);
err_unmap:
iounmap(ccache_base);
err_node_put:
diff --git a/drivers/cdx/Makefile b/drivers/cdx/Makefile
index 5d1ea482419f0..749a3295c2bdc 100644
--- a/drivers/cdx/Makefile
+++ b/drivers/cdx/Makefile
@@ -8,3 +8,7 @@
ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CDX_BUS
obj-$(CONFIG_CDX_BUS) += cdx.o controller/
+
+ifdef CONFIG_GENERIC_MSI_IRQ
+obj-$(CONFIG_CDX_BUS) += cdx_msi.o
+endif
diff --git a/drivers/cdx/cdx.c b/drivers/cdx/cdx.c
index b74d76afccb63..236d381dc5f75 100644
--- a/drivers/cdx/cdx.c
+++ b/drivers/cdx/cdx.c
@@ -56,6 +56,7 @@
*/
#include <linux/init.h>
+#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -302,8 +303,19 @@ static int cdx_probe(struct device *dev)
{
struct cdx_driver *cdx_drv = to_cdx_driver(dev->driver);
struct cdx_device *cdx_dev = to_cdx_device(dev);
+ struct cdx_controller *cdx = cdx_dev->cdx;
int error;
+ /*
+ * Setup MSI device data so that generic MSI alloc/free can
+ * be used by the device driver.
+ */
+ if (cdx->msi_domain) {
+ error = msi_setup_device_data(&cdx_dev->dev);
+ if (error)
+ return error;
+ }
+
error = cdx_drv->probe(cdx_dev);
if (error) {
dev_err_probe(dev, error, "%s failed\n", __func__);
@@ -787,6 +799,7 @@ int cdx_device_add(struct cdx_dev_params *dev_params)
/* Populate CDX dev params */
cdx_dev->req_id = dev_params->req_id;
+ cdx_dev->msi_dev_id = dev_params->msi_dev_id;
cdx_dev->vendor = dev_params->vendor;
cdx_dev->device = dev_params->device;
cdx_dev->subsystem_vendor = dev_params->subsys_vendor;
@@ -804,12 +817,19 @@ int cdx_device_add(struct cdx_dev_params *dev_params)
cdx_dev->dev.bus = &cdx_bus_type;
cdx_dev->dev.dma_mask = &cdx_dev->dma_mask;
cdx_dev->dev.release = cdx_device_release;
+ cdx_dev->msi_write_pending = false;
+ mutex_init(&cdx_dev->irqchip_lock);
/* Set Name */
dev_set_name(&cdx_dev->dev, "cdx-%02x:%02x",
((cdx->id << CDX_CONTROLLER_ID_SHIFT) | (cdx_dev->bus_num & CDX_BUS_NUM_MASK)),
cdx_dev->dev_num);
+ if (cdx->msi_domain) {
+ cdx_dev->num_msi = dev_params->num_msi;
+ dev_set_msi_domain(&cdx_dev->dev, cdx->msi_domain);
+ }
+
ret = device_add(&cdx_dev->dev);
if (ret) {
dev_err(&cdx_dev->dev,
diff --git a/drivers/cdx/cdx.h b/drivers/cdx/cdx.h
index 300ad8be7a344..9c60c04dcf877 100644
--- a/drivers/cdx/cdx.h
+++ b/drivers/cdx/cdx.h
@@ -25,6 +25,8 @@
* @req_id: Requestor ID associated with CDX device
* @class: Class of the CDX Device
* @revision: Revision of the CDX device
+ * @msi_dev_id: MSI device ID associated with CDX device
+ * @num_msi: Number of MSI's supported by the device
*/
struct cdx_dev_params {
struct cdx_controller *cdx;
@@ -40,6 +42,8 @@ struct cdx_dev_params {
u32 req_id;
u32 class;
u8 revision;
+ u32 msi_dev_id;
+ u32 num_msi;
};
/**
@@ -79,4 +83,12 @@ int cdx_device_add(struct cdx_dev_params *dev_params);
*/
struct device *cdx_bus_add(struct cdx_controller *cdx, u8 bus_num);
+/**
+ * cdx_msi_domain_init - Init the CDX bus MSI domain.
+ * @dev: Device of the CDX bus controller
+ *
+ * Return: CDX MSI domain, NULL on failure
+ */
+struct irq_domain *cdx_msi_domain_init(struct device *dev);
+
#endif /* _CDX_H_ */
diff --git a/drivers/cdx/cdx_msi.c b/drivers/cdx/cdx_msi.c
new file mode 100644
index 0000000000000..e55f1716cfcb2
--- /dev/null
+++ b/drivers/cdx/cdx_msi.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD CDX bus driver MSI support
+ *
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/cdx/cdx_bus.h>
+
+#include "cdx.h"
+
+static void cdx_msi_write_msg(struct irq_data *irq_data, struct msi_msg *msg)
+{
+ struct msi_desc *msi_desc = irq_data_get_msi_desc(irq_data);
+ struct cdx_device *cdx_dev = to_cdx_device(msi_desc->dev);
+
+ /* We would not operate on msg here rather we wait for irq_bus_sync_unlock()
+ * to be called from preemptible task context.
+ */
+ msi_desc->msg = *msg;
+ cdx_dev->msi_write_pending = true;
+}
+
+static void cdx_msi_write_irq_lock(struct irq_data *irq_data)
+{
+ struct msi_desc *msi_desc = irq_data_get_msi_desc(irq_data);
+ struct cdx_device *cdx_dev = to_cdx_device(msi_desc->dev);
+
+ mutex_lock(&cdx_dev->irqchip_lock);
+}
+
+static void cdx_msi_write_irq_unlock(struct irq_data *irq_data)
+{
+ struct msi_desc *msi_desc = irq_data_get_msi_desc(irq_data);
+ struct cdx_device *cdx_dev = to_cdx_device(msi_desc->dev);
+ struct cdx_controller *cdx = cdx_dev->cdx;
+ struct cdx_device_config dev_config;
+
+ if (!cdx_dev->msi_write_pending) {
+ mutex_unlock(&cdx_dev->irqchip_lock);
+ return;
+ }
+
+ cdx_dev->msi_write_pending = false;
+ mutex_unlock(&cdx_dev->irqchip_lock);
+
+ dev_config.msi.msi_index = msi_desc->msi_index;
+ dev_config.msi.data = msi_desc->msg.data;
+ dev_config.msi.addr = ((u64)(msi_desc->msg.address_hi) << 32) | msi_desc->msg.address_lo;
+
+ /*
+ * dev_configure() is a controller callback which can interact with
+ * Firmware or other entities, and can sleep, so invoke this function
+ * outside of the mutex held region.
+ */
+ dev_config.type = CDX_DEV_MSI_CONF;
+ if (cdx->ops->dev_configure)
+ cdx->ops->dev_configure(cdx, cdx_dev->bus_num, cdx_dev->dev_num, &dev_config);
+}
+
+int cdx_enable_msi(struct cdx_device *cdx_dev)
+{
+ struct cdx_controller *cdx = cdx_dev->cdx;
+ struct cdx_device_config dev_config;
+
+ dev_config.type = CDX_DEV_MSI_ENABLE;
+ dev_config.msi_enable = true;
+ if (cdx->ops->dev_configure) {
+ return cdx->ops->dev_configure(cdx, cdx_dev->bus_num, cdx_dev->dev_num,
+ &dev_config);
+ }
+
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL_GPL(cdx_enable_msi);
+
+void cdx_disable_msi(struct cdx_device *cdx_dev)
+{
+ struct cdx_controller *cdx = cdx_dev->cdx;
+ struct cdx_device_config dev_config;
+
+ dev_config.type = CDX_DEV_MSI_ENABLE;
+ dev_config.msi_enable = false;
+ if (cdx->ops->dev_configure)
+ cdx->ops->dev_configure(cdx, cdx_dev->bus_num, cdx_dev->dev_num, &dev_config);
+}
+EXPORT_SYMBOL_GPL(cdx_disable_msi);
+
+static struct irq_chip cdx_msi_irq_chip = {
+ .name = "CDX-MSI",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = msi_domain_set_affinity,
+ .irq_write_msi_msg = cdx_msi_write_msg,
+ .irq_bus_lock = cdx_msi_write_irq_lock,
+ .irq_bus_sync_unlock = cdx_msi_write_irq_unlock
+};
+
+/* Convert an msi_desc to a unique identifier within the domain. */
+static irq_hw_number_t cdx_domain_calc_hwirq(struct cdx_device *dev,
+ struct msi_desc *desc)
+{
+ return ((irq_hw_number_t)dev->msi_dev_id << 10) | desc->msi_index;
+}
+
+static void cdx_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
+{
+ arg->desc = desc;
+ arg->hwirq = cdx_domain_calc_hwirq(to_cdx_device(desc->dev), desc);
+}
+
+static int cdx_msi_prepare(struct irq_domain *msi_domain,
+ struct device *dev,
+ int nvec, msi_alloc_info_t *info)
+{
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+ struct device *parent = cdx_dev->cdx->dev;
+ struct msi_domain_info *msi_info;
+ u32 dev_id;
+ int ret;
+
+ /* Retrieve device ID from requestor ID using parent device */
+ ret = of_map_id(parent->of_node, cdx_dev->msi_dev_id, "msi-map", "msi-map-mask",
+ NULL, &dev_id);
+ if (ret) {
+ dev_err(dev, "of_map_id failed for MSI: %d\n", ret);
+ return ret;
+ }
+
+#ifdef GENERIC_MSI_DOMAIN_OPS
+ /* Set the device Id to be passed to the GIC-ITS */
+ info->scratchpad[0].ul = dev_id;
+#endif
+
+ msi_info = msi_get_domain_info(msi_domain->parent);
+
+ return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info);
+}
+
+static struct msi_domain_ops cdx_msi_ops = {
+ .msi_prepare = cdx_msi_prepare,
+ .set_desc = cdx_msi_set_desc
+};
+
+static struct msi_domain_info cdx_msi_domain_info = {
+ .ops = &cdx_msi_ops,
+ .chip = &cdx_msi_irq_chip,
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS | MSI_FLAG_FREE_MSI_DESCS
+};
+
+struct irq_domain *cdx_msi_domain_init(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct fwnode_handle *fwnode_handle;
+ struct irq_domain *cdx_msi_domain;
+ struct device_node *parent_node;
+ struct irq_domain *parent;
+
+ fwnode_handle = of_node_to_fwnode(np);
+
+ parent_node = of_parse_phandle(np, "msi-map", 1);
+ if (!parent_node) {
+ dev_err(dev, "msi-map not present on cdx controller\n");
+ return NULL;
+ }
+
+ parent = irq_find_matching_fwnode(of_node_to_fwnode(parent_node), DOMAIN_BUS_NEXUS);
+ if (!parent || !msi_get_domain_info(parent)) {
+ dev_err(dev, "unable to locate ITS domain\n");
+ return NULL;
+ }
+
+ cdx_msi_domain = msi_create_irq_domain(fwnode_handle, &cdx_msi_domain_info, parent);
+ if (!cdx_msi_domain) {
+ dev_err(dev, "unable to create CDX-MSI domain\n");
+ return NULL;
+ }
+
+ dev_dbg(dev, "CDX-MSI domain created\n");
+
+ return cdx_msi_domain;
+}
+EXPORT_SYMBOL_NS_GPL(cdx_msi_domain_init, CDX_BUS_CONTROLLER);
diff --git a/drivers/cdx/controller/Kconfig b/drivers/cdx/controller/Kconfig
index 61bf17fbe4336..f8e729761aeed 100644
--- a/drivers/cdx/controller/Kconfig
+++ b/drivers/cdx/controller/Kconfig
@@ -9,6 +9,7 @@ if CDX_BUS
config CDX_CONTROLLER
tristate "CDX bus controller"
+ select GENERIC_MSI_IRQ
select REMOTEPROC
select RPMSG
help
diff --git a/drivers/cdx/controller/cdx_controller.c b/drivers/cdx/controller/cdx_controller.c
index 85fe4b1c4e5ee..112a1541de6d7 100644
--- a/drivers/cdx/controller/cdx_controller.c
+++ b/drivers/cdx/controller/cdx_controller.c
@@ -9,6 +9,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/cdx/cdx_bus.h>
+#include <linux/irqdomain.h>
#include "cdx_controller.h"
#include "../cdx.h"
@@ -60,9 +61,19 @@ static int cdx_configure_device(struct cdx_controller *cdx,
u8 bus_num, u8 dev_num,
struct cdx_device_config *dev_config)
{
+ u16 msi_index;
int ret = 0;
+ u32 data;
+ u64 addr;
switch (dev_config->type) {
+ case CDX_DEV_MSI_CONF:
+ msi_index = dev_config->msi.msi_index;
+ data = dev_config->msi.data;
+ addr = dev_config->msi.addr;
+
+ ret = cdx_mcdi_write_msi(cdx->priv, bus_num, dev_num, msi_index, addr, data);
+ break;
case CDX_DEV_RESET_CONF:
ret = cdx_mcdi_reset_device(cdx->priv, bus_num, dev_num);
break;
@@ -70,6 +81,9 @@ static int cdx_configure_device(struct cdx_controller *cdx,
ret = cdx_mcdi_bus_master_enable(cdx->priv, bus_num, dev_num,
dev_config->bus_master_enable);
break;
+ case CDX_DEV_MSI_ENABLE:
+ ret = cdx_mcdi_msi_enable(cdx->priv, bus_num, dev_num, dev_config->msi_enable);
+ break;
default:
ret = -EINVAL;
}
@@ -178,6 +192,14 @@ static int xlnx_cdx_probe(struct platform_device *pdev)
cdx->priv = cdx_mcdi;
cdx->ops = &cdx_ops;
+ /* Create MSI domain */
+ cdx->msi_domain = cdx_msi_domain_init(&pdev->dev);
+ if (!cdx->msi_domain) {
+ dev_err(&pdev->dev, "cdx_msi_domain_init() failed");
+ ret = -ENODEV;
+ goto cdx_msi_fail;
+ }
+
ret = cdx_setup_rpmsg(pdev);
if (ret) {
if (ret != -EPROBE_DEFER)
@@ -189,6 +211,8 @@ static int xlnx_cdx_probe(struct platform_device *pdev)
return 0;
cdx_rpmsg_fail:
+ irq_domain_remove(cdx->msi_domain);
+cdx_msi_fail:
kfree(cdx);
cdx_alloc_fail:
cdx_mcdi_finish(cdx_mcdi);
@@ -205,6 +229,7 @@ static int xlnx_cdx_remove(struct platform_device *pdev)
cdx_destroy_rpmsg(pdev);
+ irq_domain_remove(cdx->msi_domain);
kfree(cdx);
cdx_mcdi_finish(cdx_mcdi);
diff --git a/drivers/cdx/controller/mc_cdx_pcol.h b/drivers/cdx/controller/mc_cdx_pcol.h
index 2de019406b577..832a44af963ef 100644
--- a/drivers/cdx/controller/mc_cdx_pcol.h
+++ b/drivers/cdx/controller/mc_cdx_pcol.h
@@ -455,6 +455,12 @@
#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_REQUESTER_ID_OFST 84
#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_REQUESTER_ID_LEN 4
+/* MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_V2 msgresponse */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_V2_LEN 92
+/* Requester ID used by device for GIC ITS DeviceID */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_V2_REQUESTER_DEVICE_ID_OFST 88
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_V2_REQUESTER_DEVICE_ID_LEN 4
+
/***********************************/
/*
* MC_CMD_CDX_BUS_DOWN
@@ -617,6 +623,64 @@
#define MC_CMD_CDX_DEVICE_CONTROL_GET_OUT_MMIO_REGIONS_ENABLE_WIDTH 1
/***********************************/
+/*
+ * MC_CMD_CDX_DEVICE_WRITE_MSI_MSG
+ * Populates the MSI message to be used by the hardware to raise the specified
+ * interrupt vector. Versal-net implementation specific limitations are that
+ * only 4 CDX devices with MSI interrupt capability are supported and all
+ * vectors within a device must use the same write address. The command will
+ * return EINVAL if any of these limitations is violated.
+ */
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG 0x9
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_MSGSET 0x9
+#undef MC_CMD_0x9_PRIVILEGE_CTG
+
+#define MC_CMD_0x9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN msgrequest */
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_LEN 28
+/* Device bus number, in range 0 to BUS_COUNT-1 */
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_BUS_OFST 0
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_BUS_LEN 4
+/* Device number relative to the bus, in range 0 to DEVICE_COUNT-1 for that bus */
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_DEVICE_OFST 4
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_DEVICE_LEN 4
+/*
+ * Device-relative MSI vector number. Must be < MSI_COUNT reported for the
+ * device.
+ */
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_VECTOR_OFST 8
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_VECTOR_LEN 4
+/* Reserved (alignment) */
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_RESERVED_OFST 12
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_RESERVED_LEN 4
+/*
+ * MSI address to be used by the hardware. Typically, on ARM systems this
+ * address is translated by the IOMMU (if enabled) and it is the responsibility
+ * of the entity managing the IOMMU (APU kernel) to supply the correct IOVA
+ * here.
+ */
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_OFST 16
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_LEN 8
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_LO_OFST 16
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_LO_LEN 4
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_LO_LBN 128
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_LO_WIDTH 32
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_HI_OFST 20
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_HI_LEN 4
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_HI_LBN 160
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_HI_WIDTH 32
+/*
+ * MSI data to be used by the hardware. On versal-net, only the lower 16-bits
+ * are used, the remaining bits are ignored and should be set to zero.
+ */
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_DATA_OFST 24
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_DATA_LEN 4
+
+/* MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_OUT msgresponse */
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_OUT_LEN 0
+
+/***********************************/
/* MC_CMD_V2_EXTN - Encapsulation for a v2 extended command */
#define MC_CMD_V2_EXTN 0x7f
diff --git a/drivers/cdx/controller/mcdi_functions.c b/drivers/cdx/controller/mcdi_functions.c
index b1f5309463893..885c69e6ebe5b 100644
--- a/drivers/cdx/controller/mcdi_functions.c
+++ b/drivers/cdx/controller/mcdi_functions.c
@@ -49,7 +49,7 @@ int cdx_mcdi_get_dev_config(struct cdx_mcdi *cdx,
u8 bus_num, u8 dev_num,
struct cdx_dev_params *dev_params)
{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_V2_LEN);
MCDI_DECLARE_BUF(inbuf, MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_IN_LEN);
struct resource *res = &dev_params->res[0];
size_t outlen;
@@ -64,7 +64,7 @@ int cdx_mcdi_get_dev_config(struct cdx_mcdi *cdx,
if (ret)
return ret;
- if (outlen != MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_LEN)
+ if (outlen != MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_V2_LEN)
return -EIO;
dev_params->bus_num = bus_num;
@@ -73,6 +73,9 @@ int cdx_mcdi_get_dev_config(struct cdx_mcdi *cdx,
req_id = MCDI_DWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_REQUESTER_ID);
dev_params->req_id = req_id;
+ dev_params->msi_dev_id = MCDI_DWORD(outbuf,
+ CDX_BUS_GET_DEVICE_CONFIG_OUT_V2_REQUESTER_DEVICE_ID);
+
dev_params->res_count = 0;
if (MCDI_QWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_SIZE) != 0) {
res[dev_params->res_count].start =
@@ -127,6 +130,7 @@ int cdx_mcdi_get_dev_config(struct cdx_mcdi *cdx,
dev_params->class = MCDI_DWORD(outbuf,
CDX_BUS_GET_DEVICE_CONFIG_OUT_DEVICE_CLASS) & 0xFFFFFF;
dev_params->revision = MCDI_BYTE(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_DEVICE_REVISION);
+ dev_params->num_msi = MCDI_DWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_MSI_COUNT);
return 0;
}
@@ -155,6 +159,24 @@ int cdx_mcdi_bus_disable(struct cdx_mcdi *cdx, u8 bus_num)
return ret;
}
+int cdx_mcdi_write_msi(struct cdx_mcdi *cdx, u8 bus_num, u8 dev_num,
+ u32 msi_vector, u64 msi_address, u32 msi_data)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_LEN);
+ int ret;
+
+ MCDI_SET_DWORD(inbuf, CDX_DEVICE_WRITE_MSI_MSG_IN_BUS, bus_num);
+ MCDI_SET_DWORD(inbuf, CDX_DEVICE_WRITE_MSI_MSG_IN_DEVICE, dev_num);
+ MCDI_SET_DWORD(inbuf, CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_VECTOR, msi_vector);
+ MCDI_SET_QWORD(inbuf, CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS, msi_address);
+ MCDI_SET_DWORD(inbuf, CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_DATA, msi_data);
+
+ ret = cdx_mcdi_rpc(cdx, MC_CMD_CDX_DEVICE_WRITE_MSI_MSG, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+
+ return ret;
+}
+
int cdx_mcdi_reset_device(struct cdx_mcdi *cdx, u8 bus_num, u8 dev_num)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_CDX_DEVICE_RESET_IN_LEN);
@@ -226,3 +248,10 @@ int cdx_mcdi_bus_master_enable(struct cdx_mcdi *cdx, u8 bus_num,
return cdx_mcdi_ctrl_flag_set(cdx, bus_num, dev_num, enable,
MC_CMD_CDX_DEVICE_CONTROL_SET_IN_BUS_MASTER_ENABLE_LBN);
}
+
+int cdx_mcdi_msi_enable(struct cdx_mcdi *cdx, u8 bus_num,
+ u8 dev_num, bool enable)
+{
+ return cdx_mcdi_ctrl_flag_set(cdx, bus_num, dev_num, enable,
+ MC_CMD_CDX_DEVICE_CONTROL_SET_IN_MSI_ENABLE_LBN);
+}
diff --git a/drivers/cdx/controller/mcdi_functions.h b/drivers/cdx/controller/mcdi_functions.h
index 258a5462fbe34..b9942affdc6b2 100644
--- a/drivers/cdx/controller/mcdi_functions.h
+++ b/drivers/cdx/controller/mcdi_functions.h
@@ -66,6 +66,26 @@ int cdx_mcdi_bus_enable(struct cdx_mcdi *cdx, u8 bus_num);
int cdx_mcdi_bus_disable(struct cdx_mcdi *cdx, u8 bus_num);
/**
+ * cdx_mcdi_write_msi - Write MSI configuration for CDX device
+ * @cdx: pointer to MCDI interface.
+ * @bus_num: Bus number.
+ * @dev_num: Device number.
+ * @msi_vector: Device-relative MSI vector number.
+ * Must be < MSI_COUNT reported for the device.
+ * @msi_address: MSI address to be used by the hardware. Typically, on ARM
+ * systems this address is translated by the IOMMU (if enabled) and
+ * it is the responsibility of the entity managing the IOMMU (APU kernel)
+ * to supply the correct IOVA here.
+ * @msi_data: MSI data to be used by the hardware. On versal-net, only the
+ * lower 16-bits are used, the remaining bits are ignored and should be
+ * set to zero.
+ *
+ * Return: 0 on success, <0 on failure
+ */
+int cdx_mcdi_write_msi(struct cdx_mcdi *cdx, u8 bus_num, u8 dev_num,
+ u32 msi_vector, u64 msi_address, u32 msi_data);
+
+/**
* cdx_mcdi_reset_device - Reset cdx device represented by bus_num:dev_num
* @cdx: pointer to MCDI interface.
* @bus_num: Bus number.
@@ -89,4 +109,17 @@ int cdx_mcdi_reset_device(struct cdx_mcdi *cdx,
int cdx_mcdi_bus_master_enable(struct cdx_mcdi *cdx, u8 bus_num,
u8 dev_num, bool enable);
+/**
+ * cdx_mcdi_msi_enable - Enable/Disable MSIs for cdx device represented
+ * by bus_num:dev_num
+ * @cdx: pointer to MCDI interface.
+ * @bus_num: Bus number.
+ * @dev_num: Device number.
+ * @enable: Enable msi's if set, disable otherwise.
+ *
+ * Return: 0 on success, <0 on failure
+ */
+int cdx_mcdi_msi_enable(struct cdx_mcdi *cdx, u8 bus_num,
+ u8 dev_num, bool enable);
+
#endif /* CDX_MCDI_FUNCTIONS_H */
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 9c90b1d2c0366..d51fc8321d411 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -87,7 +87,6 @@ struct hpets {
struct hpets *hp_next;
struct hpet __iomem *hp_hpet;
unsigned long hp_hpet_phys;
- struct clocksource *hp_clocksource;
unsigned long long hp_tick_freq;
unsigned long hp_delta;
unsigned int hp_ntimer;
diff --git a/drivers/char/hw_random/hisi-rng.c b/drivers/char/hw_random/hisi-rng.c
index b6f27566e0ba3..4e501d5c121ff 100644
--- a/drivers/char/hw_random/hisi-rng.c
+++ b/drivers/char/hw_random/hisi-rng.c
@@ -89,10 +89,8 @@ static int hisi_rng_probe(struct platform_device *pdev)
rng->rng.read = hisi_rng_read;
ret = devm_hwrng_register(&pdev->dev, &rng->rng);
- if (ret) {
- dev_err(&pdev->dev, "failed to register hwrng\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "failed to register hwrng\n");
return 0;
}
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index 2e669e7c14d31..1b49e3a86d57b 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -29,7 +29,7 @@
static char version[] =
DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
-MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
MODULE_DESCRIPTION("Niagara2 RNG driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 456be28ba67cb..2597cb43f4387 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -702,7 +702,7 @@ static void extract_entropy(void *buf, size_t len)
static void __cold _credit_init_bits(size_t bits)
{
- static struct execute_work set_ready;
+ static DECLARE_WORK(set_ready, crng_set_ready);
unsigned int new, orig, add;
unsigned long flags;
@@ -718,8 +718,8 @@ static void __cold _credit_init_bits(size_t bits)
if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */
- if (static_key_initialized)
- execute_in_process_context(crng_set_ready, &set_ready);
+ if (static_key_initialized && system_unbound_wq)
+ queue_work(system_unbound_wq, &set_ready);
atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
@@ -890,8 +890,8 @@ void __init random_init(void)
/*
* If we were initialized by the cpu or bootloader before jump labels
- * are initialized, then we should enable the static branch here, where
- * it's guaranteed that jump labels have been initialized.
+ * or workqueues are initialized, then we should enable the static
+ * branch here, where it's guaranteed that these have been initialized.
*/
if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
crng_set_ready(NULL);
diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c
index 661574bb0acf5..45ca33b3dcb26 100644
--- a/drivers/char/tpm/st33zp24/i2c.c
+++ b/drivers/char/tpm/st33zp24/i2c.c
@@ -167,7 +167,7 @@ static struct i2c_driver st33zp24_i2c_driver = {
module_i2c_driver(st33zp24_i2c_driver);
-MODULE_AUTHOR("TPM support (TPMsupport@list.st.com)");
+MODULE_AUTHOR("TPM support <TPMsupport@list.st.com>");
MODULE_DESCRIPTION("STM TPM 1.2 I2C ST33 Driver");
MODULE_VERSION("1.3.0");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/st33zp24/spi.c b/drivers/char/tpm/st33zp24/spi.c
index f5811b301d3b2..5149231f3de28 100644
--- a/drivers/char/tpm/st33zp24/spi.c
+++ b/drivers/char/tpm/st33zp24/spi.c
@@ -284,7 +284,7 @@ static struct spi_driver st33zp24_spi_driver = {
module_spi_driver(st33zp24_spi_driver);
-MODULE_AUTHOR("TPM support (TPMsupport@list.st.com)");
+MODULE_AUTHOR("TPM support <TPMsupport@list.st.com>");
MODULE_DESCRIPTION("STM TPM 1.2 SPI ST33 Driver");
MODULE_VERSION("1.3.0");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
index a5b554cd47786..c0771980bc2ff 100644
--- a/drivers/char/tpm/st33zp24/st33zp24.c
+++ b/drivers/char/tpm/st33zp24/st33zp24.c
@@ -582,7 +582,7 @@ int st33zp24_pm_resume(struct device *dev)
EXPORT_SYMBOL(st33zp24_pm_resume);
#endif
-MODULE_AUTHOR("TPM support (TPMsupport@list.st.com)");
+MODULE_AUTHOR("TPM support <TPMsupport@list.st.com>");
MODULE_DESCRIPTION("ST33ZP24 TPM 1.2 driver");
MODULE_VERSION("1.3.0");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 66b16d26eecc7..757336324c904 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -524,7 +524,7 @@ static void __exit tpm_exit(void)
subsys_initcall(tpm_init);
module_exit(tpm_exit);
-MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
+MODULE_AUTHOR("Leendert van Doorn <leendert@watson.ibm.com>");
MODULE_DESCRIPTION("TPM Driver");
MODULE_VERSION("2.0");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
index 54a6750a67578..9fb2defa9dc42 100644
--- a/drivers/char/tpm/tpm_atmel.c
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -229,7 +229,7 @@ static void __exit cleanup_atmel(void)
module_init(init_atmel);
module_exit(cleanup_atmel);
-MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
+MODULE_AUTHOR("Leendert van Doorn <leendert@watson.ibm.com>");
MODULE_DESCRIPTION("TPM Driver");
MODULE_VERSION("2.0");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index 5490f7e0fa436..3c3ee5f551db1 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -654,6 +654,6 @@ static struct i2c_driver i2c_nuvoton_driver = {
module_i2c_driver(i2c_nuvoton_driver);
-MODULE_AUTHOR("Dan Morav (dan.morav@nuvoton.com)");
+MODULE_AUTHOR("Dan Morav <dan.morav@nuvoton.com>");
MODULE_DESCRIPTION("Nuvoton TPM I2C Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index 038701d483513..0f62bbc940daa 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -410,7 +410,7 @@ static void __exit cleanup_nsc(void)
module_init(init_nsc);
module_exit(cleanup_nsc);
-MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
+MODULE_AUTHOR("Leendert van Doorn <leendert@watson.ibm.com>");
MODULE_DESCRIPTION("TPM Driver");
MODULE_VERSION("2.0");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 14652aaf82546..2f7326d297adb 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -429,7 +429,7 @@ static void __exit cleanup_tis(void)
module_init(init_tis);
module_exit(cleanup_tis);
-MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
+MODULE_AUTHOR("Leendert van Doorn <leendert@watson.ibm.com>");
MODULE_DESCRIPTION("TPM Driver");
MODULE_VERSION("2.0");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 64c875657687d..714070ebb6e7a 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -1360,7 +1360,7 @@ int tpm_tis_resume(struct device *dev)
EXPORT_SYMBOL_GPL(tpm_tis_resume);
#endif
-MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
+MODULE_AUTHOR("Leendert van Doorn <leendert@watson.ibm.com>");
MODULE_DESCRIPTION("TPM Driver");
MODULE_VERSION("2.0");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
index 30e953988cabe..11c502039faf5 100644
--- a/drivers/char/tpm/tpm_vtpm_proxy.c
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -711,7 +711,7 @@ static void __exit vtpm_module_exit(void)
module_init(vtpm_module_init);
module_exit(vtpm_module_exit);
-MODULE_AUTHOR("Stefan Berger (stefanb@us.ibm.com)");
+MODULE_AUTHOR("Stefan Berger <stefanb@us.ibm.com>");
MODULE_DESCRIPTION("vTPM Driver");
MODULE_VERSION("0.1");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 019cf6079cecd..4f6c3cb8aa413 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -636,11 +636,11 @@ static int hwicap_setup(struct platform_device *pdev, int id,
retval = -ENOMEM;
goto failed;
}
- dev_set_drvdata(dev, (void *)drvdata);
+ dev_set_drvdata(dev, drvdata);
drvdata->base_address = devm_platform_ioremap_resource(pdev, 0);
- if (!drvdata->base_address) {
- retval = -ENODEV;
+ if (IS_ERR(drvdata->base_address)) {
+ retval = PTR_ERR(drvdata->base_address);
goto failed;
}
diff --git a/drivers/char/xillybus/xillybus_of.c b/drivers/char/xillybus/xillybus_of.c
index e5372e45d2118..8802e2a6fd20b 100644
--- a/drivers/char/xillybus/xillybus_of.c
+++ b/drivers/char/xillybus/xillybus_of.c
@@ -64,19 +64,17 @@ static int xilly_drv_probe(struct platform_device *op)
return xillybus_endpoint_discovery(endpoint);
}
-static int xilly_drv_remove(struct platform_device *op)
+static void xilly_drv_remove(struct platform_device *op)
{
struct device *dev = &op->dev;
struct xilly_endpoint *endpoint = dev_get_drvdata(dev);
xillybus_endpoint_remove(endpoint);
-
- return 0;
}
static struct platform_driver xillybus_platform_driver = {
.probe = xilly_drv_probe,
- .remove = xilly_drv_remove,
+ .remove_new = xilly_drv_remove,
.driver = {
.name = xillyname,
.of_match_table = xillybus_of_match,
diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
index 909c3137c4283..faf88324f7b16 100644
--- a/drivers/clk/clk-ast2600.c
+++ b/drivers/clk/clk-ast2600.c
@@ -19,7 +19,7 @@
* This includes the gates (configured from aspeed_g6_gates), plus the
* explicitly-configured clocks (ASPEED_CLK_HPLL and up).
*/
-#define ASPEED_G6_NUM_CLKS 72
+#define ASPEED_G6_NUM_CLKS 73
#define ASPEED_G6_SILICON_REV 0x014
#define CHIP_REVISION_ID GENMASK(23, 16)
@@ -157,7 +157,7 @@ static const struct aspeed_gate_data aspeed_g6_gates[] = {
[ASPEED_CLK_GATE_UART11CLK] = { 59, -1, "uart11clk-gate", "uartx", 0 }, /* UART11 */
[ASPEED_CLK_GATE_UART12CLK] = { 60, -1, "uart12clk-gate", "uartx", 0 }, /* UART12 */
[ASPEED_CLK_GATE_UART13CLK] = { 61, -1, "uart13clk-gate", "uartx", 0 }, /* UART13 */
- [ASPEED_CLK_GATE_FSICLK] = { 62, 59, "fsiclk-gate", NULL, 0 }, /* FSI */
+ [ASPEED_CLK_GATE_FSICLK] = { 62, 59, "fsiclk-gate", "fsiclk", 0 }, /* FSI */
};
static const struct clk_div_table ast2600_eclk_div_table[] = {
@@ -821,6 +821,9 @@ static void __init aspeed_g6_cc(struct regmap *map)
hw = clk_hw_register_fixed_factor(NULL, "i3cclk", "apll", 0, 1, 8);
aspeed_g6_clk_data->hws[ASPEED_CLK_I3C] = hw;
+
+ hw = clk_hw_register_fixed_factor(NULL, "fsiclk", "apll", 0, 1, 4);
+ aspeed_g6_clk_data->hws[ASPEED_CLK_FSI] = hw;
};
static void __init aspeed_g6_cc_init(struct device_node *np)
diff --git a/drivers/clk/clk-cdce925.c b/drivers/clk/clk-cdce925.c
index b0122093c6ff8..e48be7a6c0e2b 100644
--- a/drivers/clk/clk-cdce925.c
+++ b/drivers/clk/clk-cdce925.c
@@ -101,7 +101,6 @@ static void cdce925_pll_find_rate(unsigned long rate,
if (rate <= parent_rate) {
/* Can always deliver parent_rate in bypass mode */
- rate = parent_rate;
*n = 0;
*m = 0;
} else {
diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
index 737aa70e2cb3d..90e6078fb6e1b 100644
--- a/drivers/clk/clk-devres.c
+++ b/drivers/clk/clk-devres.c
@@ -182,6 +182,46 @@ int __must_check devm_clk_bulk_get_all(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_clk_bulk_get_all);
+static void devm_clk_bulk_release_all_enable(struct device *dev, void *res)
+{
+ struct clk_bulk_devres *devres = res;
+
+ clk_bulk_disable_unprepare(devres->num_clks, devres->clks);
+ clk_bulk_put_all(devres->num_clks, devres->clks);
+}
+
+int __must_check devm_clk_bulk_get_all_enable(struct device *dev,
+ struct clk_bulk_data **clks)
+{
+ struct clk_bulk_devres *devres;
+ int ret;
+
+ devres = devres_alloc(devm_clk_bulk_release_all_enable,
+ sizeof(*devres), GFP_KERNEL);
+ if (!devres)
+ return -ENOMEM;
+
+ ret = clk_bulk_get_all(dev, &devres->clks);
+ if (ret > 0) {
+ *clks = devres->clks;
+ devres->num_clks = ret;
+ } else {
+ devres_free(devres);
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(devres->num_clks, *clks);
+ if (!ret) {
+ devres_add(dev, devres);
+ } else {
+ clk_bulk_put_all(devres->num_clks, devres->clks);
+ devres_free(devres);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_clk_bulk_get_all_enable);
+
static int devm_clk_match(struct device *dev, void *res, void *data)
{
struct clk **c = res;
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index b3e66202b9424..fe0500a1af3ea 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -57,10 +57,22 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static unsigned long clk_factor_recalc_accuracy(struct clk_hw *hw,
+ unsigned long parent_accuracy)
+{
+ struct clk_fixed_factor *fix = to_clk_fixed_factor(hw);
+
+ if (fix->flags & CLK_FIXED_FACTOR_FIXED_ACCURACY)
+ return fix->acc;
+
+ return parent_accuracy;
+}
+
const struct clk_ops clk_fixed_factor_ops = {
.round_rate = clk_factor_round_rate,
.set_rate = clk_factor_set_rate,
.recalc_rate = clk_factor_recalc_rate,
+ .recalc_accuracy = clk_factor_recalc_accuracy,
};
EXPORT_SYMBOL_GPL(clk_fixed_factor_ops);
@@ -79,13 +91,12 @@ static void devm_clk_hw_register_fixed_factor_release(struct device *dev, void *
static struct clk_hw *
__clk_hw_register_fixed_factor(struct device *dev, struct device_node *np,
const char *name, const char *parent_name,
- const struct clk_hw *parent_hw, int index,
+ const struct clk_hw *parent_hw, const struct clk_parent_data *pdata,
unsigned long flags, unsigned int mult, unsigned int div,
- bool devm)
+ unsigned long acc, unsigned int fixflags, bool devm)
{
struct clk_fixed_factor *fix;
struct clk_init_data init = { };
- struct clk_parent_data pdata = { .index = index };
struct clk_hw *hw;
int ret;
@@ -105,6 +116,8 @@ __clk_hw_register_fixed_factor(struct device *dev, struct device_node *np,
fix->mult = mult;
fix->div = div;
fix->hw.init = &init;
+ fix->acc = acc;
+ fix->flags = fixflags;
init.name = name;
init.ops = &clk_fixed_factor_ops;
@@ -114,7 +127,7 @@ __clk_hw_register_fixed_factor(struct device *dev, struct device_node *np,
else if (parent_hw)
init.parent_hws = &parent_hw;
else
- init.parent_data = &pdata;
+ init.parent_data = pdata;
init.num_parents = 1;
hw = &fix->hw;
@@ -151,8 +164,10 @@ struct clk_hw *devm_clk_hw_register_fixed_factor_index(struct device *dev,
const char *name, unsigned int index, unsigned long flags,
unsigned int mult, unsigned int div)
{
- return __clk_hw_register_fixed_factor(dev, NULL, name, NULL, NULL, index,
- flags, mult, div, true);
+ const struct clk_parent_data pdata = { .index = index };
+
+ return __clk_hw_register_fixed_factor(dev, NULL, name, NULL, NULL, &pdata,
+ flags, mult, div, 0, 0, true);
}
EXPORT_SYMBOL_GPL(devm_clk_hw_register_fixed_factor_index);
@@ -173,8 +188,10 @@ struct clk_hw *devm_clk_hw_register_fixed_factor_parent_hw(struct device *dev,
const char *name, const struct clk_hw *parent_hw,
unsigned long flags, unsigned int mult, unsigned int div)
{
+ const struct clk_parent_data pdata = { .index = -1 };
+
return __clk_hw_register_fixed_factor(dev, NULL, name, NULL, parent_hw,
- -1, flags, mult, div, true);
+ &pdata, flags, mult, div, 0, 0, true);
}
EXPORT_SYMBOL_GPL(devm_clk_hw_register_fixed_factor_parent_hw);
@@ -182,9 +199,10 @@ struct clk_hw *clk_hw_register_fixed_factor_parent_hw(struct device *dev,
const char *name, const struct clk_hw *parent_hw,
unsigned long flags, unsigned int mult, unsigned int div)
{
- return __clk_hw_register_fixed_factor(dev, NULL, name, NULL,
- parent_hw, -1, flags, mult, div,
- false);
+ const struct clk_parent_data pdata = { .index = -1 };
+
+ return __clk_hw_register_fixed_factor(dev, NULL, name, NULL, parent_hw,
+ &pdata, flags, mult, div, 0, 0, false);
}
EXPORT_SYMBOL_GPL(clk_hw_register_fixed_factor_parent_hw);
@@ -192,11 +210,37 @@ struct clk_hw *clk_hw_register_fixed_factor(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div)
{
- return __clk_hw_register_fixed_factor(dev, NULL, name, parent_name, NULL, -1,
- flags, mult, div, false);
+ const struct clk_parent_data pdata = { .index = -1 };
+
+ return __clk_hw_register_fixed_factor(dev, NULL, name, parent_name, NULL,
+ &pdata, flags, mult, div, 0, 0, false);
}
EXPORT_SYMBOL_GPL(clk_hw_register_fixed_factor);
+struct clk_hw *clk_hw_register_fixed_factor_fwname(struct device *dev,
+ struct device_node *np, const char *name, const char *fw_name,
+ unsigned long flags, unsigned int mult, unsigned int div)
+{
+ const struct clk_parent_data pdata = { .index = -1, .fw_name = fw_name };
+
+ return __clk_hw_register_fixed_factor(dev, np, name, NULL, NULL,
+ &pdata, flags, mult, div, 0, 0, false);
+}
+EXPORT_SYMBOL_GPL(clk_hw_register_fixed_factor_fwname);
+
+struct clk_hw *clk_hw_register_fixed_factor_with_accuracy_fwname(struct device *dev,
+ struct device_node *np, const char *name, const char *fw_name,
+ unsigned long flags, unsigned int mult, unsigned int div,
+ unsigned long acc)
+{
+ const struct clk_parent_data pdata = { .index = -1, .fw_name = fw_name };
+
+ return __clk_hw_register_fixed_factor(dev, np, name, NULL, NULL,
+ &pdata, flags, mult, div, acc,
+ CLK_FIXED_FACTOR_FIXED_ACCURACY, false);
+}
+EXPORT_SYMBOL_GPL(clk_hw_register_fixed_factor_with_accuracy_fwname);
+
struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div)
@@ -239,16 +283,43 @@ struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div)
{
- return __clk_hw_register_fixed_factor(dev, NULL, name, parent_name, NULL, -1,
- flags, mult, div, true);
+ const struct clk_parent_data pdata = { .index = -1 };
+
+ return __clk_hw_register_fixed_factor(dev, NULL, name, parent_name, NULL,
+ &pdata, flags, mult, div, 0, 0, true);
}
EXPORT_SYMBOL_GPL(devm_clk_hw_register_fixed_factor);
+struct clk_hw *devm_clk_hw_register_fixed_factor_fwname(struct device *dev,
+ struct device_node *np, const char *name, const char *fw_name,
+ unsigned long flags, unsigned int mult, unsigned int div)
+{
+ const struct clk_parent_data pdata = { .index = -1, .fw_name = fw_name };
+
+ return __clk_hw_register_fixed_factor(dev, np, name, NULL, NULL,
+ &pdata, flags, mult, div, 0, 0, true);
+}
+EXPORT_SYMBOL_GPL(devm_clk_hw_register_fixed_factor_fwname);
+
+struct clk_hw *devm_clk_hw_register_fixed_factor_with_accuracy_fwname(struct device *dev,
+ struct device_node *np, const char *name, const char *fw_name,
+ unsigned long flags, unsigned int mult, unsigned int div,
+ unsigned long acc)
+{
+ const struct clk_parent_data pdata = { .index = -1, .fw_name = fw_name };
+
+ return __clk_hw_register_fixed_factor(dev, np, name, NULL, NULL,
+ &pdata, flags, mult, div, acc,
+ CLK_FIXED_FACTOR_FIXED_ACCURACY, true);
+}
+EXPORT_SYMBOL_GPL(devm_clk_hw_register_fixed_factor_with_accuracy_fwname);
+
#ifdef CONFIG_OF
static struct clk_hw *_of_fixed_factor_clk_setup(struct device_node *node)
{
struct clk_hw *hw;
const char *clk_name = node->name;
+ const struct clk_parent_data pdata = { .index = 0 };
u32 div, mult;
int ret;
@@ -266,8 +337,8 @@ static struct clk_hw *_of_fixed_factor_clk_setup(struct device_node *node)
of_property_read_string(node, "clock-output-names", &clk_name);
- hw = __clk_hw_register_fixed_factor(NULL, node, clk_name, NULL, NULL, 0,
- 0, mult, div, false);
+ hw = __clk_hw_register_fixed_factor(NULL, node, clk_name, NULL, NULL,
+ &pdata, 0, mult, div, 0, 0, false);
if (IS_ERR(hw)) {
/*
* Clear OF_POPULATED flag so that clock registration can be
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
index 5067e067e9066..da057172cc90f 100644
--- a/drivers/clk/clk-fractional-divider.c
+++ b/drivers/clk/clk-fractional-divider.c
@@ -140,8 +140,8 @@ void clk_fractional_divider_general_approximation(struct clk_hw *hw,
}
if (fd->flags & CLK_FRAC_DIVIDER_ZERO_BASED) {
- max_m = 1 << fd->mwidth;
- max_n = 1 << fd->nwidth;
+ max_m = BIT(fd->mwidth);
+ max_n = BIT(fd->nwidth);
} else {
max_m = GENMASK(fd->mwidth - 1, 0);
max_n = GENMASK(fd->nwidth - 1, 0);
@@ -182,8 +182,8 @@ static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate,
u32 val;
if (fd->flags & CLK_FRAC_DIVIDER_ZERO_BASED) {
- max_m = 1 << fd->mwidth;
- max_n = 1 << fd->nwidth;
+ max_m = BIT(fd->mwidth);
+ max_n = BIT(fd->nwidth);
} else {
max_m = GENMASK(fd->mwidth - 1, 0);
max_n = GENMASK(fd->nwidth - 1, 0);
@@ -195,14 +195,14 @@ static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate,
n--;
}
+ mmask = GENMASK(fd->mwidth - 1, 0) << fd->mshift;
+ nmask = GENMASK(fd->nwidth - 1, 0) << fd->nshift;
+
if (fd->lock)
spin_lock_irqsave(fd->lock, flags);
else
__acquire(fd->lock);
- mmask = GENMASK(fd->mwidth - 1, 0) << fd->mshift;
- nmask = GENMASK(fd->nwidth - 1, 0) << fd->nshift;
-
val = clk_fd_readl(fd);
val &= ~(mmask | nmask);
val |= (m << fd->mshift) | (n << fd->nshift);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 2253c154a8248..8cca52be993f4 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -37,6 +37,10 @@ static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
+/* List of registered clks that use runtime PM */
+static HLIST_HEAD(clk_rpm_list);
+static DEFINE_MUTEX(clk_rpm_list_lock);
+
static const struct hlist_head *all_lists[] = {
&clk_root_list,
&clk_orphan_list,
@@ -59,6 +63,7 @@ struct clk_core {
struct clk_hw *hw;
struct module *owner;
struct device *dev;
+ struct hlist_node rpm_node;
struct device_node *of_node;
struct clk_core *parent;
struct clk_parent_map *parents;
@@ -122,6 +127,89 @@ static void clk_pm_runtime_put(struct clk_core *core)
pm_runtime_put_sync(core->dev);
}
+/**
+ * clk_pm_runtime_get_all() - Runtime "get" all clk provider devices
+ *
+ * Call clk_pm_runtime_get() on all runtime PM enabled clks in the clk tree so
+ * that disabling unused clks avoids a deadlock where a device is runtime PM
+ * resuming/suspending and the runtime PM callback is trying to grab the
+ * prepare_lock for something like clk_prepare_enable() while
+ * clk_disable_unused_subtree() holds the prepare_lock and is trying to runtime
+ * PM resume/suspend the device as well.
+ *
+ * Context: Acquires the 'clk_rpm_list_lock' and returns with the lock held on
+ * success. Otherwise the lock is released on failure.
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+static int clk_pm_runtime_get_all(void)
+{
+ int ret;
+ struct clk_core *core, *failed;
+
+ /*
+ * Grab the list lock to prevent any new clks from being registered
+ * or unregistered until clk_pm_runtime_put_all().
+ */
+ mutex_lock(&clk_rpm_list_lock);
+
+ /*
+ * Runtime PM "get" all the devices that are needed for the clks
+ * currently registered. Do this without holding the prepare_lock, to
+ * avoid the deadlock.
+ */
+ hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
+ ret = clk_pm_runtime_get(core);
+ if (ret) {
+ failed = core;
+ pr_err("clk: Failed to runtime PM get '%s' for clk '%s'\n",
+ dev_name(failed->dev), failed->name);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
+ if (core == failed)
+ break;
+
+ clk_pm_runtime_put(core);
+ }
+ mutex_unlock(&clk_rpm_list_lock);
+
+ return ret;
+}
+
+/**
+ * clk_pm_runtime_put_all() - Runtime "put" all clk provider devices
+ *
+ * Put the runtime PM references taken in clk_pm_runtime_get_all() and release
+ * the 'clk_rpm_list_lock'.
+ */
+static void clk_pm_runtime_put_all(void)
+{
+ struct clk_core *core;
+
+ hlist_for_each_entry(core, &clk_rpm_list, rpm_node)
+ clk_pm_runtime_put(core);
+ mutex_unlock(&clk_rpm_list_lock);
+}
+
+static void clk_pm_runtime_init(struct clk_core *core)
+{
+ struct device *dev = core->dev;
+
+ if (dev && pm_runtime_enabled(dev)) {
+ core->rpm_enabled = true;
+
+ mutex_lock(&clk_rpm_list_lock);
+ hlist_add_head(&core->rpm_node, &clk_rpm_list);
+ mutex_unlock(&clk_rpm_list_lock);
+ }
+}
+
/*** locking ***/
static void clk_prepare_lock(void)
{
@@ -418,6 +506,9 @@ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
if (IS_ERR(hw))
return ERR_CAST(hw);
+ if (!hw)
+ return NULL;
+
return hw->core;
}
@@ -939,6 +1030,25 @@ int clk_rate_exclusive_get(struct clk *clk)
}
EXPORT_SYMBOL_GPL(clk_rate_exclusive_get);
+static void devm_clk_rate_exclusive_put(void *data)
+{
+ struct clk *clk = data;
+
+ clk_rate_exclusive_put(clk);
+}
+
+int devm_clk_rate_exclusive_get(struct device *dev, struct clk *clk)
+{
+ int ret;
+
+ ret = clk_rate_exclusive_get(clk);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, devm_clk_rate_exclusive_put, clk);
+}
+EXPORT_SYMBOL_GPL(devm_clk_rate_exclusive_get);
+
static void clk_core_unprepare(struct clk_core *core)
{
lockdep_assert_held(&prepare_lock);
@@ -1359,9 +1469,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
if (core->flags & CLK_IGNORE_UNUSED)
return;
- if (clk_pm_runtime_get(core))
- return;
-
if (clk_core_is_prepared(core)) {
trace_clk_unprepare(core);
if (core->ops->unprepare_unused)
@@ -1370,8 +1477,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
core->ops->unprepare(core->hw);
trace_clk_unprepare_complete(core);
}
-
- clk_pm_runtime_put(core);
}
static void __init clk_disable_unused_subtree(struct clk_core *core)
@@ -1387,9 +1492,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_prepare_enable(core->parent);
- if (clk_pm_runtime_get(core))
- goto unprepare_out;
-
flags = clk_enable_lock();
if (core->enable_count)
@@ -1414,8 +1516,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
unlock_out:
clk_enable_unlock(flags);
- clk_pm_runtime_put(core);
-unprepare_out:
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_disable_unprepare(core->parent);
}
@@ -1431,6 +1531,7 @@ __setup("clk_ignore_unused", clk_ignore_unused_setup);
static int __init clk_disable_unused(void)
{
struct clk_core *core;
+ int ret;
if (clk_ignore_unused) {
pr_warn("clk: Not disabling unused clocks\n");
@@ -1439,6 +1540,13 @@ static int __init clk_disable_unused(void)
pr_info("clk: Disabling unused clocks\n");
+ ret = clk_pm_runtime_get_all();
+ if (ret)
+ return ret;
+ /*
+ * Grab the prepare lock to keep the clk topology stable while iterating
+ * over clks.
+ */
clk_prepare_lock();
hlist_for_each_entry(core, &clk_root_list, child_node)
@@ -1455,6 +1563,8 @@ static int __init clk_disable_unused(void)
clk_prepare_unlock();
+ clk_pm_runtime_put_all();
+
return 0;
}
late_initcall_sync(clk_disable_unused);
@@ -3230,9 +3340,7 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
{
struct clk_core *child;
- clk_pm_runtime_get(c);
clk_summary_show_one(s, c, level);
- clk_pm_runtime_put(c);
hlist_for_each_entry(child, &c->children, child_node)
clk_summary_show_subtree(s, child, level + 1);
@@ -3242,11 +3350,15 @@ static int clk_summary_show(struct seq_file *s, void *data)
{
struct clk_core *c;
struct hlist_head **lists = s->private;
+ int ret;
seq_puts(s, " enable prepare protect duty hardware connection\n");
seq_puts(s, " clock count count count rate accuracy phase cycle enable consumer id\n");
seq_puts(s, "---------------------------------------------------------------------------------------------------------------------------------------------\n");
+ ret = clk_pm_runtime_get_all();
+ if (ret)
+ return ret;
clk_prepare_lock();
@@ -3255,6 +3367,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
clk_summary_show_subtree(s, c, 0);
clk_prepare_unlock();
+ clk_pm_runtime_put_all();
return 0;
}
@@ -3302,8 +3415,14 @@ static int clk_dump_show(struct seq_file *s, void *data)
struct clk_core *c;
bool first_node = true;
struct hlist_head **lists = s->private;
+ int ret;
+
+ ret = clk_pm_runtime_get_all();
+ if (ret)
+ return ret;
seq_putc(s, '{');
+
clk_prepare_lock();
for (; *lists; lists++) {
@@ -3316,6 +3435,7 @@ static int clk_dump_show(struct seq_file *s, void *data)
}
clk_prepare_unlock();
+ clk_pm_runtime_put_all();
seq_puts(s, "}\n");
return 0;
@@ -3959,8 +4079,6 @@ static int __clk_core_init(struct clk_core *core)
}
clk_core_reparent_orphans_nolock();
-
- kref_init(&core->ref);
out:
clk_pm_runtime_put(core);
unlock:
@@ -4189,6 +4307,22 @@ static void clk_core_free_parent_map(struct clk_core *core)
kfree(core->parents);
}
+/* Free memory allocated for a struct clk_core */
+static void __clk_release(struct kref *ref)
+{
+ struct clk_core *core = container_of(ref, struct clk_core, ref);
+
+ if (core->rpm_enabled) {
+ mutex_lock(&clk_rpm_list_lock);
+ hlist_del(&core->rpm_node);
+ mutex_unlock(&clk_rpm_list_lock);
+ }
+
+ clk_core_free_parent_map(core);
+ kfree_const(core->name);
+ kfree(core);
+}
+
static struct clk *
__clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
{
@@ -4209,6 +4343,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
goto fail_out;
}
+ kref_init(&core->ref);
+
core->name = kstrdup_const(init->name, GFP_KERNEL);
if (!core->name) {
ret = -ENOMEM;
@@ -4221,9 +4357,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
}
core->ops = init->ops;
- if (dev && pm_runtime_enabled(dev))
- core->rpm_enabled = true;
core->dev = dev;
+ clk_pm_runtime_init(core);
core->of_node = np;
if (dev && dev->driver)
core->owner = dev->driver->owner;
@@ -4263,12 +4398,10 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
hw->clk = NULL;
fail_create_clk:
- clk_core_free_parent_map(core);
fail_parents:
fail_ops:
- kfree_const(core->name);
fail_name:
- kfree(core);
+ kref_put(&core->ref, __clk_release);
fail_out:
return ERR_PTR(ret);
}
@@ -4348,18 +4481,6 @@ int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(of_clk_hw_register);
-/* Free memory allocated for a clock. */
-static void __clk_release(struct kref *ref)
-{
- struct clk_core *core = container_of(ref, struct clk_core, ref);
-
- lockdep_assert_held(&prepare_lock);
-
- clk_core_free_parent_map(core);
- kfree_const(core->name);
- kfree(core);
-}
-
/*
* Empty clk_ops for unregistered clocks. These are used temporarily
* after clk_unregister() was called on a clock and until last clock
@@ -4450,7 +4571,8 @@ void clk_unregister(struct clk *clk)
if (ops == &clk_nodrv_ops) {
pr_err("%s: unregistered clock: %s\n", __func__,
clk->core->name);
- goto unlock;
+ clk_prepare_unlock();
+ return;
}
/*
* Assign empty clock ops for consumers that might still hold
@@ -4484,11 +4606,10 @@ void clk_unregister(struct clk *clk)
if (clk->core->protect_count)
pr_warn("%s: unregistering protected clock: %s\n",
__func__, clk->core->name);
+ clk_prepare_unlock();
kref_put(&clk->core->ref, __clk_release);
free_clk(clk);
-unlock:
- clk_prepare_unlock();
}
EXPORT_SYMBOL_GPL(clk_unregister);
@@ -4647,13 +4768,11 @@ void __clk_put(struct clk *clk)
if (clk->min_rate > 0 || clk->max_rate < ULONG_MAX)
clk_set_rate_range_nolock(clk, 0, ULONG_MAX);
- owner = clk->core->owner;
- kref_put(&clk->core->ref, __clk_release);
-
clk_prepare_unlock();
+ owner = clk->core->owner;
+ kref_put(&clk->core->ref, __clk_release);
module_put(owner);
-
free_clk(clk);
}
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index ee37d0be6877d..9cd80522ca2d7 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -144,7 +144,7 @@ void clkdev_add_table(struct clk_lookup *cl, size_t num)
mutex_unlock(&clocks_mutex);
}
-#define MAX_DEV_ID 20
+#define MAX_DEV_ID 24
#define MAX_CON_ID 16
struct clk_lookup_alloc {
diff --git a/drivers/clk/hisilicon/clk-hi3519.c b/drivers/clk/hisilicon/clk-hi3519.c
index b871872d9960d..141b727ff60d6 100644
--- a/drivers/clk/hisilicon/clk-hi3519.c
+++ b/drivers/clk/hisilicon/clk-hi3519.c
@@ -130,7 +130,7 @@ static void hi3519_clk_unregister(struct platform_device *pdev)
of_clk_del_provider(pdev->dev.of_node);
hisi_clk_unregister_gate(hi3519_gate_clks,
- ARRAY_SIZE(hi3519_mux_clks),
+ ARRAY_SIZE(hi3519_gate_clks),
crg->clk_data);
hisi_clk_unregister_mux(hi3519_mux_clks,
ARRAY_SIZE(hi3519_mux_clks),
diff --git a/drivers/clk/hisilicon/clk-hi3559a.c b/drivers/clk/hisilicon/clk-hi3559a.c
index ff4ca0edce06a..c79a94f6d9d24 100644
--- a/drivers/clk/hisilicon/clk-hi3559a.c
+++ b/drivers/clk/hisilicon/clk-hi3559a.c
@@ -461,8 +461,7 @@ static void hisi_clk_register_pll(struct hi3559av100_pll_clock *clks,
struct clk_init_data init;
int i;
- p_clk = devm_kzalloc(dev, sizeof(*p_clk) * nums, GFP_KERNEL);
-
+ p_clk = devm_kcalloc(dev, nums, sizeof(*p_clk), GFP_KERNEL);
if (!p_clk)
return;
@@ -491,7 +490,6 @@ static void hisi_clk_register_pll(struct hi3559av100_pll_clock *clks,
clk = clk_register(NULL, &p_clk->hw);
if (IS_ERR(clk)) {
- devm_kfree(dev, p_clk);
dev_err(dev, "%s: failed to register clock %s\n",
__func__, clks[i].name);
continue;
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
index 27a08c50ac1d8..8cc07d056a838 100644
--- a/drivers/clk/imx/clk-composite-8m.c
+++ b/drivers/clk/imx/clk-composite-8m.c
@@ -212,15 +212,15 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name,
{
struct clk_hw *hw = ERR_PTR(-ENOMEM), *mux_hw;
struct clk_hw *div_hw, *gate_hw = NULL;
- struct clk_divider *div = NULL;
+ struct clk_divider *div;
struct clk_gate *gate = NULL;
- struct clk_mux *mux = NULL;
+ struct clk_mux *mux;
const struct clk_ops *divider_ops;
const struct clk_ops *mux_ops;
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
if (!mux)
- goto fail;
+ return ERR_CAST(hw);
mux_hw = &mux->hw;
mux->reg = reg;
@@ -230,7 +230,7 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name,
div = kzalloc(sizeof(*div), GFP_KERNEL);
if (!div)
- goto fail;
+ goto free_mux;
div_hw = &div->hw;
div->reg = reg;
@@ -260,7 +260,7 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name,
if (!mcore_booted) {
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate)
- goto fail;
+ goto free_div;
gate_hw = &gate->hw;
gate->reg = reg;
@@ -272,13 +272,15 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name,
mux_hw, mux_ops, div_hw,
divider_ops, gate_hw, &clk_gate_ops, flags);
if (IS_ERR(hw))
- goto fail;
+ goto free_gate;
return hw;
-fail:
+free_gate:
kfree(gate);
+free_div:
kfree(div);
+free_mux:
kfree(mux);
return ERR_CAST(hw);
}
diff --git a/drivers/clk/imx/clk-imx8-acm.c b/drivers/clk/imx/clk-imx8-acm.c
index f68877eef8736..1bdb480cc96c6 100644
--- a/drivers/clk/imx/clk-imx8-acm.c
+++ b/drivers/clk/imx/clk-imx8-acm.c
@@ -394,15 +394,13 @@ err_clk_register:
return ret;
}
-static int imx8_acm_clk_remove(struct platform_device *pdev)
+static void imx8_acm_clk_remove(struct platform_device *pdev)
{
struct imx8_acm_priv *priv = dev_get_drvdata(&pdev->dev);
pm_runtime_disable(&pdev->dev);
clk_imx_acm_detach_pm_domains(&pdev->dev, &priv->dev_pm);
-
- return 0;
}
static const struct imx8_acm_soc_data imx8qm_acm_data = {
@@ -470,7 +468,7 @@ static struct platform_driver imx8_acm_clk_driver = {
.pm = &imx8_acm_pm_ops,
},
.probe = imx8_acm_clk_probe,
- .remove = imx8_acm_clk_remove,
+ .remove_new = imx8_acm_clk_remove,
};
module_platform_driver(imx8_acm_clk_driver);
diff --git a/drivers/clk/imx/clk-imx8mp-audiomix.c b/drivers/clk/imx/clk-imx8mp-audiomix.c
index e4300df88f1ac..55ed211a5e0b1 100644
--- a/drivers/clk/imx/clk-imx8mp-audiomix.c
+++ b/drivers/clk/imx/clk-imx8mp-audiomix.c
@@ -18,7 +18,12 @@
#define CLKEN0 0x000
#define CLKEN1 0x004
-#define SAI_MCLK_SEL(n) (0x300 + 4 * (n)) /* n in 0..5 */
+#define SAI1_MCLK_SEL 0x300
+#define SAI2_MCLK_SEL 0x304
+#define SAI3_MCLK_SEL 0x308
+#define SAI5_MCLK_SEL 0x30C
+#define SAI6_MCLK_SEL 0x310
+#define SAI7_MCLK_SEL 0x314
#define PDM_SEL 0x318
#define SAI_PLL_GNRL_CTL 0x400
@@ -95,13 +100,13 @@ static const struct clk_parent_data clk_imx8mp_audiomix_pll_bypass_sels[] = {
IMX8MP_CLK_AUDIOMIX_SAI##n##_MCLK1_SEL, {}, \
clk_imx8mp_audiomix_sai##n##_mclk1_parents, \
ARRAY_SIZE(clk_imx8mp_audiomix_sai##n##_mclk1_parents), \
- SAI_MCLK_SEL(n), 1, 0 \
+ SAI##n##_MCLK_SEL, 1, 0 \
}, { \
"sai"__stringify(n)"_mclk2_sel", \
IMX8MP_CLK_AUDIOMIX_SAI##n##_MCLK2_SEL, {}, \
clk_imx8mp_audiomix_sai_mclk2_parents, \
ARRAY_SIZE(clk_imx8mp_audiomix_sai_mclk2_parents), \
- SAI_MCLK_SEL(n), 4, 1 \
+ SAI##n##_MCLK_SEL, 4, 1 \
}, { \
"sai"__stringify(n)"_ipg_cg", \
IMX8MP_CLK_AUDIOMIX_SAI##n##_IPG, \
diff --git a/drivers/clk/imx/clk-scu.c b/drivers/clk/imx/clk-scu.c
index e48a904c00133..b1dd0c08e091b 100644
--- a/drivers/clk/imx/clk-scu.c
+++ b/drivers/clk/imx/clk-scu.c
@@ -712,17 +712,13 @@ struct clk_hw *imx_clk_scu_alloc_dev(const char *name,
}
ret = platform_device_add_data(pdev, &clk, sizeof(clk));
- if (ret) {
- platform_device_put(pdev);
- return ERR_PTR(ret);
- }
+ if (ret)
+ goto put_device;
ret = driver_set_override(&pdev->dev, &pdev->driver_override,
"imx-scu-clk", strlen("imx-scu-clk"));
- if (ret) {
- platform_device_put(pdev);
- return ERR_PTR(ret);
- }
+ if (ret)
+ goto put_device;
ret = imx_clk_scu_attach_pd(&pdev->dev, rsrc_id);
if (ret)
@@ -730,13 +726,15 @@ struct clk_hw *imx_clk_scu_alloc_dev(const char *name,
name, ret);
ret = platform_device_add(pdev);
- if (ret) {
- platform_device_put(pdev);
- return ERR_PTR(ret);
- }
+ if (ret)
+ goto put_device;
/* For API backwards compatiblilty, simply return NULL for success */
return NULL;
+
+put_device:
+ platform_device_put(pdev);
+ return ERR_PTR(ret);
}
void imx_clk_scu_unregister(void)
diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c
index 35fe197dd303c..5cefc30a843ee 100644
--- a/drivers/clk/keystone/sci-clk.c
+++ b/drivers/clk/keystone/sci-clk.c
@@ -272,7 +272,7 @@ static const struct clk_ops sci_clk_ops = {
};
/**
- * _sci_clk_get - Gets a handle for an SCI clock
+ * _sci_clk_build - Gets a handle for an SCI clock
* @provider: Handle to SCI clock provider
* @sci_clk: Handle to the SCI clock to populate
*
@@ -516,6 +516,7 @@ static int ti_sci_scan_clocks_from_dt(struct sci_clk_provider *provider)
struct sci_clk *sci_clk, *prev;
int num_clks = 0;
int num_parents;
+ bool state;
int clk_id;
const char * const clk_names[] = {
"clocks", "assigned-clocks", "assigned-clock-parents", NULL
@@ -586,6 +587,15 @@ static int ti_sci_scan_clocks_from_dt(struct sci_clk_provider *provider)
clk_id = args.args[1] + 1;
while (num_parents--) {
+ /* Check if this clock id is valid */
+ ret = provider->ops->is_auto(provider->sci,
+ sci_clk->dev_id, clk_id, &state);
+
+ if (ret) {
+ clk_id++;
+ continue;
+ }
+
sci_clk = devm_kzalloc(dev,
sizeof(*sci_clk),
GFP_KERNEL);
diff --git a/drivers/clk/mediatek/clk-mt7622-apmixedsys.c b/drivers/clk/mediatek/clk-mt7622-apmixedsys.c
index 9cffd278e9a43..1b8f859b6b6cc 100644
--- a/drivers/clk/mediatek/clk-mt7622-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt7622-apmixedsys.c
@@ -127,7 +127,6 @@ static void clk_mt7622_apmixed_remove(struct platform_device *pdev)
of_clk_del_provider(node);
mtk_clk_unregister_gates(apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
- mtk_free_clk_data(clk_data);
}
static const struct of_device_id of_match_clk_mt7622_apmixed[] = {
diff --git a/drivers/clk/mediatek/clk-mt7981-topckgen.c b/drivers/clk/mediatek/clk-mt7981-topckgen.c
index 682f4ca9e89ad..493aa11d3a175 100644
--- a/drivers/clk/mediatek/clk-mt7981-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt7981-topckgen.c
@@ -357,8 +357,9 @@ static const struct mtk_mux top_muxes[] = {
MUX_GATE_CLR_SET_UPD(CLK_TOP_SGM_325M_SEL, "sgm_325m_sel",
sgm_325m_parents, 0x050, 0x054, 0x058, 8, 1, 15,
0x1C0, 21),
- MUX_GATE_CLR_SET_UPD(CLK_TOP_SGM_REG_SEL, "sgm_reg_sel", sgm_reg_parents,
- 0x050, 0x054, 0x058, 16, 1, 23, 0x1C0, 22),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SGM_REG_SEL, "sgm_reg_sel", sgm_reg_parents,
+ 0x050, 0x054, 0x058, 16, 1, 23, 0x1C0, 22,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
MUX_GATE_CLR_SET_UPD(CLK_TOP_EIP97B_SEL, "eip97b_sel", eip97b_parents,
0x050, 0x054, 0x058, 24, 3, 31, 0x1C0, 23),
/* CLK_CFG_6 */
diff --git a/drivers/clk/mediatek/clk-mt7988-infracfg.c b/drivers/clk/mediatek/clk-mt7988-infracfg.c
index 8011ef278bea3..c8c023afe3e5a 100644
--- a/drivers/clk/mediatek/clk-mt7988-infracfg.c
+++ b/drivers/clk/mediatek/clk-mt7988-infracfg.c
@@ -14,6 +14,10 @@
#include "clk-gate.h"
#include "clk-mux.h"
#include <dt-bindings/clock/mediatek,mt7988-clk.h>
+#include <dt-bindings/reset/mediatek,mt7988-resets.h>
+
+#define MT7988_INFRA_RST0_SET_OFFSET 0x70
+#define MT7988_INFRA_RST1_SET_OFFSET 0x80
static DEFINE_SPINLOCK(mt7988_clk_lock);
@@ -152,7 +156,7 @@ static const struct mtk_gate infra_clks[] = {
GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P1, "infra_pcie_peri_ck_26m_ck_p1",
"csw_infra_f26m_sel", 8),
GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P2, "infra_pcie_peri_ck_26m_ck_p2",
- "csw_infra_f26m_sel", 9),
+ "infra_pcie_peri_ck_26m_ck_p3", 9),
GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P3, "infra_pcie_peri_ck_26m_ck_p3",
"csw_infra_f26m_sel", 10),
/* INFRA1 */
@@ -249,12 +253,31 @@ static const struct mtk_gate infra_clks[] = {
GATE_INFRA3(CLK_INFRA_133M_PCIE_CK_P3, "infra_133m_pcie_ck_p3", "sysaxi_sel", 31),
};
+static u16 infra_rst_ofs[] = {
+ MT7988_INFRA_RST0_SET_OFFSET,
+ MT7988_INFRA_RST1_SET_OFFSET,
+};
+
+static u16 infra_idx_map[] = {
+ [MT7988_INFRA_RST0_PEXTP_MAC_SWRST] = 0 * RST_NR_PER_BANK + 6,
+ [MT7988_INFRA_RST1_THERM_CTRL_SWRST] = 1 * RST_NR_PER_BANK + 9,
+};
+
+static struct mtk_clk_rst_desc infra_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = infra_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infra_rst_ofs),
+ .rst_idx_map = infra_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(infra_idx_map),
+};
+
static const struct mtk_clk_desc infra_desc = {
.clks = infra_clks,
.num_clks = ARRAY_SIZE(infra_clks),
.mux_clks = infra_muxes,
.num_mux_clks = ARRAY_SIZE(infra_muxes),
.clk_lock = &mt7988_clk_lock,
+ .rst_desc = &infra_rst_desc,
};
static const struct of_device_id of_match_clk_mt7988_infracfg[] = {
diff --git a/drivers/clk/mediatek/clk-mt8135-apmixedsys.c b/drivers/clk/mediatek/clk-mt8135-apmixedsys.c
index d1239b4b3db74..41bb2d2e2ea74 100644
--- a/drivers/clk/mediatek/clk-mt8135-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt8135-apmixedsys.c
@@ -59,7 +59,7 @@ static int clk_mt8135_apmixed_probe(struct platform_device *pdev)
ret = mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
if (ret)
- return ret;
+ goto free_clk_data;
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (ret)
@@ -69,6 +69,8 @@ static int clk_mt8135_apmixed_probe(struct platform_device *pdev)
unregister_plls:
mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
return ret;
}
diff --git a/drivers/clk/mediatek/clk-mt8173-apmixedsys.c b/drivers/clk/mediatek/clk-mt8173-apmixedsys.c
index 1bbb21ab17869..6cab483b8e1ed 100644
--- a/drivers/clk/mediatek/clk-mt8173-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt8173-apmixedsys.c
@@ -152,8 +152,8 @@ static int clk_mt8173_apmixed_probe(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
if (IS_ERR_OR_NULL(clk_data)) {
- iounmap(base);
- return -ENOMEM;
+ r = -ENOMEM;
+ goto unmap_io;
}
fhctl_parse_dt(fhctl_node, pllfhs, ARRAY_SIZE(pllfhs));
@@ -188,6 +188,7 @@ unregister_plls:
ARRAY_SIZE(pllfhs), clk_data);
free_clk_data:
mtk_free_clk_data(clk_data);
+unmap_io:
iounmap(base);
return r;
}
diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c
index 6e23461a04559..934d5a15acfc5 100644
--- a/drivers/clk/mediatek/clk-mt8183.c
+++ b/drivers/clk/mediatek/clk-mt8183.c
@@ -790,7 +790,7 @@ static const struct mtk_gate infra_clks[] = {
/* infra_sspm_26m_self is main clock in co-processor, should not be closed in Linux. */
GATE_INFRA3_FLAGS(CLK_INFRA_SSPM_26M_SELF, "infra_sspm_26m_self", "f_f26m_ck", 3, CLK_IS_CRITICAL),
/* infra_sspm_32k_self is main clock in co-processor, should not be closed in Linux. */
- GATE_INFRA3_FLAGS(CLK_INFRA_SSPM_32K_SELF, "infra_sspm_32k_self", "f_f26m_ck", 4, CLK_IS_CRITICAL),
+ GATE_INFRA3_FLAGS(CLK_INFRA_SSPM_32K_SELF, "infra_sspm_32k_self", "clk32k", 4, CLK_IS_CRITICAL),
GATE_INFRA3(CLK_INFRA_UFS_AXI, "infra_ufs_axi", "axi_sel", 5),
GATE_INFRA3(CLK_INFRA_I2C6, "infra_i2c6", "i2c_sel", 6),
GATE_INFRA3(CLK_INFRA_AP_MSDC0, "infra_ap_msdc0", "msdc50_hclk_sel", 7),
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index 2e55368dc4d82..bd37ab4d1a9bb 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -13,6 +13,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include "clk-mtk.h"
@@ -494,6 +495,16 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
return IS_ERR(base) ? PTR_ERR(base) : -ENOMEM;
}
+
+ devm_pm_runtime_enable(&pdev->dev);
+ /*
+ * Do a pm_runtime_resume_and_get() to workaround a possible
+ * deadlock between clk_register() and the genpd framework.
+ */
+ r = pm_runtime_resume_and_get(&pdev->dev);
+ if (r)
+ return r;
+
/* Calculate how many clk_hw_onecell_data entries to allocate */
num_clks = mcd->num_clks + mcd->num_composite_clks;
num_clks += mcd->num_fixed_clks + mcd->num_factor_clks;
@@ -574,6 +585,8 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
goto unregister_clks;
}
+ pm_runtime_put(&pdev->dev);
+
return r;
unregister_clks:
@@ -604,6 +617,8 @@ free_data:
free_base:
if (mcd->shared_io && base)
iounmap(base);
+
+ pm_runtime_put(&pdev->dev);
return r;
}
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index c12f81dfa6745..5f60f2bcca592 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -2142,7 +2142,9 @@ static struct clk_regmap *const axg_clk_regmaps[] = {
&axg_vclk_input,
&axg_vclk2_input,
&axg_vclk_div,
+ &axg_vclk_div1,
&axg_vclk2_div,
+ &axg_vclk2_div1,
&axg_vclk_div2_en,
&axg_vclk_div4_en,
&axg_vclk_div6_en,
diff --git a/drivers/clk/microchip/clk-mpfs.c b/drivers/clk/microchip/clk-mpfs.c
index c8ffa755b58df..22eab91a67129 100644
--- a/drivers/clk/microchip/clk-mpfs.c
+++ b/drivers/clk/microchip/clk-mpfs.c
@@ -15,7 +15,8 @@
/* address offset of control registers */
#define REG_MSSPLL_REF_CR 0x08u
-#define REG_MSSPLL_POSTDIV_CR 0x10u
+#define REG_MSSPLL_POSTDIV01_CR 0x10u
+#define REG_MSSPLL_POSTDIV23_CR 0x14u
#define REG_MSSPLL_SSCG_2_CR 0x2Cu
#define REG_CLOCK_CONFIG_CR 0x08u
#define REG_RTC_CLOCK_CR 0x0Cu
@@ -26,10 +27,18 @@
#define MSSPLL_FBDIV_WIDTH 0x0Cu
#define MSSPLL_REFDIV_SHIFT 0x08u
#define MSSPLL_REFDIV_WIDTH 0x06u
-#define MSSPLL_POSTDIV_SHIFT 0x08u
+#define MSSPLL_POSTDIV02_SHIFT 0x08u
+#define MSSPLL_POSTDIV13_SHIFT 0x18u
#define MSSPLL_POSTDIV_WIDTH 0x07u
#define MSSPLL_FIXED_DIV 4u
+/*
+ * This clock ID is defined here, rather than the binding headers, as it is an
+ * internal clock only, and therefore has no consumers in other peripheral
+ * blocks.
+ */
+#define CLK_MSSPLL_INTERNAL 38u
+
struct mpfs_clock_data {
struct device *dev;
void __iomem *base;
@@ -39,17 +48,27 @@ struct mpfs_clock_data {
struct mpfs_msspll_hw_clock {
void __iomem *base;
+ struct clk_hw hw;
+ struct clk_init_data init;
unsigned int id;
u32 reg_offset;
u32 shift;
u32 width;
u32 flags;
- struct clk_hw hw;
- struct clk_init_data init;
};
#define to_mpfs_msspll_clk(_hw) container_of(_hw, struct mpfs_msspll_hw_clock, hw)
+struct mpfs_msspll_out_hw_clock {
+ void __iomem *base;
+ struct clk_divider output;
+ struct clk_init_data init;
+ unsigned int id;
+ u32 reg_offset;
+};
+
+#define to_mpfs_msspll_out_clk(_hw) container_of(_hw, struct mpfs_msspll_out_hw_clock, hw)
+
struct mpfs_cfg_hw_clock {
struct clk_divider cfg;
struct clk_init_data init;
@@ -93,93 +112,40 @@ static const struct clk_div_table mpfs_div_rtcref_table[] = {
{ 0, 0 }
};
-static unsigned long mpfs_clk_msspll_recalc_rate(struct clk_hw *hw, unsigned long prate)
-{
- struct mpfs_msspll_hw_clock *msspll_hw = to_mpfs_msspll_clk(hw);
- void __iomem *mult_addr = msspll_hw->base + msspll_hw->reg_offset;
- void __iomem *ref_div_addr = msspll_hw->base + REG_MSSPLL_REF_CR;
- void __iomem *postdiv_addr = msspll_hw->base + REG_MSSPLL_POSTDIV_CR;
- u32 mult, ref_div, postdiv;
-
- mult = readl_relaxed(mult_addr) >> MSSPLL_FBDIV_SHIFT;
- mult &= clk_div_mask(MSSPLL_FBDIV_WIDTH);
- ref_div = readl_relaxed(ref_div_addr) >> MSSPLL_REFDIV_SHIFT;
- ref_div &= clk_div_mask(MSSPLL_REFDIV_WIDTH);
- postdiv = readl_relaxed(postdiv_addr) >> MSSPLL_POSTDIV_SHIFT;
- postdiv &= clk_div_mask(MSSPLL_POSTDIV_WIDTH);
-
- return prate * mult / (ref_div * MSSPLL_FIXED_DIV * postdiv);
-}
+/*
+ * MSS PLL internal clock
+ */
-static long mpfs_clk_msspll_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate)
+static unsigned long mpfs_clk_msspll_recalc_rate(struct clk_hw *hw, unsigned long prate)
{
struct mpfs_msspll_hw_clock *msspll_hw = to_mpfs_msspll_clk(hw);
void __iomem *mult_addr = msspll_hw->base + msspll_hw->reg_offset;
void __iomem *ref_div_addr = msspll_hw->base + REG_MSSPLL_REF_CR;
u32 mult, ref_div;
- unsigned long rate_before_ctrl;
-
- mult = readl_relaxed(mult_addr) >> MSSPLL_FBDIV_SHIFT;
- mult &= clk_div_mask(MSSPLL_FBDIV_WIDTH);
- ref_div = readl_relaxed(ref_div_addr) >> MSSPLL_REFDIV_SHIFT;
- ref_div &= clk_div_mask(MSSPLL_REFDIV_WIDTH);
-
- rate_before_ctrl = rate * (ref_div * MSSPLL_FIXED_DIV) / mult;
-
- return divider_round_rate(hw, rate_before_ctrl, prate, NULL, MSSPLL_POSTDIV_WIDTH,
- msspll_hw->flags);
-}
-
-static int mpfs_clk_msspll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long prate)
-{
- struct mpfs_msspll_hw_clock *msspll_hw = to_mpfs_msspll_clk(hw);
- void __iomem *mult_addr = msspll_hw->base + msspll_hw->reg_offset;
- void __iomem *ref_div_addr = msspll_hw->base + REG_MSSPLL_REF_CR;
- void __iomem *postdiv_addr = msspll_hw->base + REG_MSSPLL_POSTDIV_CR;
- u32 mult, ref_div, postdiv;
- int divider_setting;
- unsigned long rate_before_ctrl, flags;
mult = readl_relaxed(mult_addr) >> MSSPLL_FBDIV_SHIFT;
mult &= clk_div_mask(MSSPLL_FBDIV_WIDTH);
ref_div = readl_relaxed(ref_div_addr) >> MSSPLL_REFDIV_SHIFT;
ref_div &= clk_div_mask(MSSPLL_REFDIV_WIDTH);
- rate_before_ctrl = rate * (ref_div * MSSPLL_FIXED_DIV) / mult;
- divider_setting = divider_get_val(rate_before_ctrl, prate, NULL, MSSPLL_POSTDIV_WIDTH,
- msspll_hw->flags);
-
- if (divider_setting < 0)
- return divider_setting;
-
- spin_lock_irqsave(&mpfs_clk_lock, flags);
-
- postdiv = readl_relaxed(postdiv_addr);
- postdiv &= ~(clk_div_mask(MSSPLL_POSTDIV_WIDTH) << MSSPLL_POSTDIV_SHIFT);
- writel_relaxed(postdiv, postdiv_addr);
-
- spin_unlock_irqrestore(&mpfs_clk_lock, flags);
-
- return 0;
+ return prate * mult / (ref_div * MSSPLL_FIXED_DIV);
}
static const struct clk_ops mpfs_clk_msspll_ops = {
.recalc_rate = mpfs_clk_msspll_recalc_rate,
- .round_rate = mpfs_clk_msspll_round_rate,
- .set_rate = mpfs_clk_msspll_set_rate,
};
#define CLK_PLL(_id, _name, _parent, _shift, _width, _flags, _offset) { \
.id = _id, \
+ .flags = _flags, \
.shift = _shift, \
.width = _width, \
.reg_offset = _offset, \
- .flags = _flags, \
.hw.init = CLK_HW_INIT_PARENTS_DATA(_name, _parent, &mpfs_clk_msspll_ops, 0), \
}
static struct mpfs_msspll_hw_clock mpfs_msspll_clks[] = {
- CLK_PLL(CLK_MSSPLL, "clk_msspll", mpfs_ext_ref, MSSPLL_FBDIV_SHIFT,
+ CLK_PLL(CLK_MSSPLL_INTERNAL, "clk_msspll_internal", mpfs_ext_ref, MSSPLL_FBDIV_SHIFT,
MSSPLL_FBDIV_WIDTH, 0, REG_MSSPLL_SSCG_2_CR),
};
@@ -196,7 +162,7 @@ static int mpfs_clk_register_mssplls(struct device *dev, struct mpfs_msspll_hw_c
ret = devm_clk_hw_register(dev, &msspll_hw->hw);
if (ret)
return dev_err_probe(dev, ret, "failed to register msspll id: %d\n",
- CLK_MSSPLL);
+ CLK_MSSPLL_INTERNAL);
data->hw_data.hws[msspll_hw->id] = &msspll_hw->hw;
}
@@ -205,6 +171,54 @@ static int mpfs_clk_register_mssplls(struct device *dev, struct mpfs_msspll_hw_c
}
/*
+ * MSS PLL output clocks
+ */
+
+#define CLK_PLL_OUT(_id, _name, _parent, _flags, _shift, _width, _offset) { \
+ .id = _id, \
+ .output.shift = _shift, \
+ .output.width = _width, \
+ .output.table = NULL, \
+ .reg_offset = _offset, \
+ .output.flags = _flags, \
+ .output.hw.init = CLK_HW_INIT(_name, _parent, &clk_divider_ops, 0), \
+ .output.lock = &mpfs_clk_lock, \
+}
+
+static struct mpfs_msspll_out_hw_clock mpfs_msspll_out_clks[] = {
+ CLK_PLL_OUT(CLK_MSSPLL0, "clk_msspll", "clk_msspll_internal", CLK_DIVIDER_ONE_BASED,
+ MSSPLL_POSTDIV02_SHIFT, MSSPLL_POSTDIV_WIDTH, REG_MSSPLL_POSTDIV01_CR),
+ CLK_PLL_OUT(CLK_MSSPLL1, "clk_msspll1", "clk_msspll_internal", CLK_DIVIDER_ONE_BASED,
+ MSSPLL_POSTDIV13_SHIFT, MSSPLL_POSTDIV_WIDTH, REG_MSSPLL_POSTDIV01_CR),
+ CLK_PLL_OUT(CLK_MSSPLL2, "clk_msspll2", "clk_msspll_internal", CLK_DIVIDER_ONE_BASED,
+ MSSPLL_POSTDIV02_SHIFT, MSSPLL_POSTDIV_WIDTH, REG_MSSPLL_POSTDIV23_CR),
+ CLK_PLL_OUT(CLK_MSSPLL3, "clk_msspll3", "clk_msspll_internal", CLK_DIVIDER_ONE_BASED,
+ MSSPLL_POSTDIV13_SHIFT, MSSPLL_POSTDIV_WIDTH, REG_MSSPLL_POSTDIV23_CR),
+};
+
+static int mpfs_clk_register_msspll_outs(struct device *dev,
+ struct mpfs_msspll_out_hw_clock *msspll_out_hws,
+ unsigned int num_clks, struct mpfs_clock_data *data)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < num_clks; i++) {
+ struct mpfs_msspll_out_hw_clock *msspll_out_hw = &msspll_out_hws[i];
+
+ msspll_out_hw->output.reg = data->msspll_base + msspll_out_hw->reg_offset;
+ ret = devm_clk_hw_register(dev, &msspll_out_hw->output.hw);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register msspll out id: %d\n",
+ msspll_out_hw->id);
+
+ data->hw_data.hws[msspll_out_hw->id] = &msspll_out_hw->output.hw;
+ }
+
+ return 0;
+}
+
+/*
* "CFG" clocks
*/
@@ -442,8 +456,8 @@ static int mpfs_clk_probe(struct platform_device *pdev)
int ret;
/* CLK_RESERVED is not part of clock arrays, so add 1 */
- num_clks = ARRAY_SIZE(mpfs_msspll_clks) + ARRAY_SIZE(mpfs_cfg_clks)
- + ARRAY_SIZE(mpfs_periph_clks) + 1;
+ num_clks = ARRAY_SIZE(mpfs_msspll_clks) + ARRAY_SIZE(mpfs_msspll_out_clks)
+ + ARRAY_SIZE(mpfs_cfg_clks) + ARRAY_SIZE(mpfs_periph_clks) + 1;
clk_data = devm_kzalloc(dev, struct_size(clk_data, hw_data.hws, num_clks), GFP_KERNEL);
if (!clk_data)
@@ -466,6 +480,12 @@ static int mpfs_clk_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = mpfs_clk_register_msspll_outs(dev, mpfs_msspll_out_clks,
+ ARRAY_SIZE(mpfs_msspll_out_clks),
+ clk_data);
+ if (ret)
+ return ret;
+
ret = mpfs_clk_register_cfgs(dev, mpfs_cfg_clks, ARRAY_SIZE(mpfs_cfg_clks), clk_data);
if (ret)
return ret;
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 2a9da0939377a..8ab08e7b5b6c6 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -20,6 +20,24 @@ menuconfig COMMON_CLK_QCOM
if COMMON_CLK_QCOM
+config CLK_X1E80100_CAMCC
+ tristate "X1E80100 Camera Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select CLK_X1E80100_GCC
+ help
+ Support for the camera clock controller on X1E80100 devices.
+ Say Y if you want to support camera devices and camera functionality.
+
+config CLK_X1E80100_DISPCC
+ tristate "X1E80100 Display Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select CLK_X1E80100_GCC
+ help
+ Support for the two display clock controllers on Qualcomm
+ Technologies, Inc. X1E80100 devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen.
+
config CLK_X1E80100_GCC
tristate "X1E80100 Global Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -30,6 +48,23 @@ config CLK_X1E80100_GCC
Say Y if you want to use peripheral devices such as UART, SPI, I2C,
USB, UFS, SD/eMMC, PCIe, etc.
+config CLK_X1E80100_GPUCC
+ tristate "X1E80100 Graphics Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select CLK_X1E80100_GCC
+ help
+ Support for the graphics clock controller on X1E80100 devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
+config CLK_X1E80100_TCSRCC
+ tristate "X1E80100 TCSR Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the TCSR clock controller on X1E80100 devices.
+ Say Y if you want to use peripheral devices such as SD/UFS.
+
config QCOM_A53PLL
tristate "MSM8916 A53 PLL"
help
@@ -600,16 +635,6 @@ config SC_LPASS_CORECC_7280
Say Y if you want to use LPASS clocks and power domains of the LPASS
core clock controller.
-config SC_MSS_7180
- tristate "SC7180 Modem Clock Controller"
- depends on ARM64 || COMPILE_TEST
- select SC_GCC_7180
- help
- Support for the Modem Subsystem clock controller on Qualcomm
- Technologies, Inc on SC7180 devices.
- Say Y if you want to use the Modem branch clocks of the Modem
- subsystem clock controller to reset the MSS subsystem.
-
config SC_VIDEOCC_7180
tristate "SC7180 Video Clock Controller"
depends on ARM64 || COMPILE_TEST
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 582e06dc1d939..dec5b6db6860c 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -21,7 +21,11 @@ clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
obj-$(CONFIG_CLK_GFM_LPASS_SM8250) += lpass-gfm-sm8250.o
+obj-$(CONFIG_CLK_X1E80100_CAMCC) += camcc-x1e80100.o
+obj-$(CONFIG_CLK_X1E80100_DISPCC) += dispcc-x1e80100.o
obj-$(CONFIG_CLK_X1E80100_GCC) += gcc-x1e80100.o
+obj-$(CONFIG_CLK_X1E80100_GPUCC) += gpucc-x1e80100.o
+obj-$(CONFIG_CLK_X1E80100_TCSRCC) += tcsrcc-x1e80100.o
obj-$(CONFIG_IPQ_APSS_PLL) += apss-ipq-pll.o
obj-$(CONFIG_IPQ_APSS_6018) += apss-ipq6018.o
obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
@@ -87,7 +91,6 @@ obj-$(CONFIG_SC_LPASSCC_7280) += lpasscc-sc7280.o
obj-$(CONFIG_SC_LPASSCC_8280XP) += lpasscc-sc8280xp.o
obj-$(CONFIG_SC_LPASS_CORECC_7180) += lpasscorecc-sc7180.o
obj-$(CONFIG_SC_LPASS_CORECC_7280) += lpasscorecc-sc7280.o lpassaudiocc-sc7280.o
-obj-$(CONFIG_SC_MSS_7180) += mss-sc7180.o
obj-$(CONFIG_SC_VIDEOCC_7180) += videocc-sc7180.o
obj-$(CONFIG_SC_VIDEOCC_7280) += videocc-sc7280.o
obj-$(CONFIG_SDM_CAMCC_845) += camcc-sdm845.o
diff --git a/drivers/clk/qcom/camcc-sc7180.c b/drivers/clk/qcom/camcc-sc7180.c
index 0a9a6df3ddace..a78808b22b030 100644
--- a/drivers/clk/qcom/camcc-sc7180.c
+++ b/drivers/clk/qcom/camcc-sc7180.c
@@ -1703,17 +1703,7 @@ static struct platform_driver cam_cc_sc7180_driver = {
},
};
-static int __init cam_cc_sc7180_init(void)
-{
- return platform_driver_register(&cam_cc_sc7180_driver);
-}
-subsys_initcall(cam_cc_sc7180_init);
-
-static void __exit cam_cc_sc7180_exit(void)
-{
- platform_driver_unregister(&cam_cc_sc7180_driver);
-}
-module_exit(cam_cc_sc7180_exit);
+module_platform_driver(cam_cc_sc7180_driver);
MODULE_DESCRIPTION("QTI CAM_CC SC7180 Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/camcc-sc7280.c b/drivers/clk/qcom/camcc-sc7280.c
index 49f046ea857cb..d89ddb2298e32 100644
--- a/drivers/clk/qcom/camcc-sc7280.c
+++ b/drivers/clk/qcom/camcc-sc7280.c
@@ -2468,17 +2468,7 @@ static struct platform_driver cam_cc_sc7280_driver = {
},
};
-static int __init cam_cc_sc7280_init(void)
-{
- return platform_driver_register(&cam_cc_sc7280_driver);
-}
-subsys_initcall(cam_cc_sc7280_init);
-
-static void __exit cam_cc_sc7280_exit(void)
-{
- platform_driver_unregister(&cam_cc_sc7280_driver);
-}
-module_exit(cam_cc_sc7280_exit);
+module_platform_driver(cam_cc_sc7280_driver);
MODULE_DESCRIPTION("QTI CAM_CC SC7280 Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/camcc-sc8280xp.c b/drivers/clk/qcom/camcc-sc8280xp.c
index 3dcd79b015151..8e26ec2def73a 100644
--- a/drivers/clk/qcom/camcc-sc8280xp.c
+++ b/drivers/clk/qcom/camcc-sc8280xp.c
@@ -630,6 +630,7 @@ static const struct freq_tbl ftbl_camcc_bps_clk_src[] = {
F(480000000, P_CAMCC_PLL7_OUT_EVEN, 1, 0, 0),
F(600000000, P_CAMCC_PLL0_OUT_MAIN, 2, 0, 0),
F(760000000, P_CAMCC_PLL3_OUT_EVEN, 1, 0, 0),
+ { }
};
static struct clk_rcg2 camcc_bps_clk_src = {
@@ -654,6 +655,7 @@ static const struct freq_tbl ftbl_camcc_camnoc_axi_clk_src[] = {
F(320000000, P_CAMCC_PLL7_OUT_ODD, 1, 0, 0),
F(400000000, P_CAMCC_PLL0_OUT_ODD, 1, 0, 0),
F(480000000, P_CAMCC_PLL7_OUT_EVEN, 1, 0, 0),
+ { }
};
static struct clk_rcg2 camcc_camnoc_axi_clk_src = {
@@ -673,6 +675,7 @@ static struct clk_rcg2 camcc_camnoc_axi_clk_src = {
static const struct freq_tbl ftbl_camcc_cci_0_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
F(37500000, P_CAMCC_PLL0_OUT_EVEN, 16, 0, 0),
+ { }
};
static struct clk_rcg2 camcc_cci_0_clk_src = {
@@ -735,6 +738,7 @@ static const struct freq_tbl ftbl_camcc_cphy_rx_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
F(240000000, P_CAMCC_PLL0_OUT_EVEN, 2.5, 0, 0),
F(400000000, P_CAMCC_PLL0_OUT_ODD, 1, 0, 0),
+ { }
};
static struct clk_rcg2 camcc_cphy_rx_clk_src = {
@@ -754,6 +758,7 @@ static struct clk_rcg2 camcc_cphy_rx_clk_src = {
static const struct freq_tbl ftbl_camcc_csi0phytimer_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
F(300000000, P_CAMCC_PLL0_OUT_EVEN, 2, 0, 0),
+ { }
};
static struct clk_rcg2 camcc_csi0phytimer_clk_src = {
@@ -818,6 +823,7 @@ static const struct freq_tbl ftbl_camcc_fast_ahb_clk_src[] = {
F(200000000, P_CAMCC_PLL0_OUT_EVEN, 3, 0, 0),
F(300000000, P_CAMCC_PLL0_OUT_MAIN, 4, 0, 0),
F(400000000, P_CAMCC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
};
static struct clk_rcg2 camcc_fast_ahb_clk_src = {
@@ -838,6 +844,7 @@ static const struct freq_tbl ftbl_camcc_icp_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
F(400000000, P_CAMCC_PLL0_OUT_ODD, 1, 0, 0),
F(600000000, P_CAMCC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
};
static struct clk_rcg2 camcc_icp_clk_src = {
@@ -860,6 +867,7 @@ static const struct freq_tbl ftbl_camcc_ife_0_clk_src[] = {
F(558000000, P_CAMCC_PLL3_OUT_EVEN, 1, 0, 0),
F(637000000, P_CAMCC_PLL3_OUT_EVEN, 1, 0, 0),
F(760000000, P_CAMCC_PLL3_OUT_EVEN, 1, 0, 0),
+ { }
};
static struct clk_rcg2 camcc_ife_0_clk_src = {
@@ -883,6 +891,7 @@ static const struct freq_tbl ftbl_camcc_ife_0_csid_clk_src[] = {
F(400000000, P_CAMCC_PLL0_OUT_ODD, 1, 0, 0),
F(480000000, P_CAMCC_PLL7_OUT_EVEN, 1, 0, 0),
F(600000000, P_CAMCC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
};
static struct clk_rcg2 camcc_ife_0_csid_clk_src = {
@@ -905,6 +914,7 @@ static const struct freq_tbl ftbl_camcc_ife_1_clk_src[] = {
F(558000000, P_CAMCC_PLL4_OUT_EVEN, 1, 0, 0),
F(637000000, P_CAMCC_PLL4_OUT_EVEN, 1, 0, 0),
F(760000000, P_CAMCC_PLL4_OUT_EVEN, 1, 0, 0),
+ { }
};
static struct clk_rcg2 camcc_ife_1_clk_src = {
@@ -941,6 +951,7 @@ static const struct freq_tbl ftbl_camcc_ife_2_clk_src[] = {
F(558000000, P_CAMCC_PLL5_OUT_EVEN, 1, 0, 0),
F(637000000, P_CAMCC_PLL5_OUT_EVEN, 1, 0, 0),
F(760000000, P_CAMCC_PLL5_OUT_EVEN, 1, 0, 0),
+ { }
};
static struct clk_rcg2 camcc_ife_2_clk_src = {
@@ -962,6 +973,7 @@ static const struct freq_tbl ftbl_camcc_ife_2_csid_clk_src[] = {
F(400000000, P_CAMCC_PLL0_OUT_ODD, 1, 0, 0),
F(480000000, P_CAMCC_PLL7_OUT_EVEN, 1, 0, 0),
F(600000000, P_CAMCC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
};
static struct clk_rcg2 camcc_ife_2_csid_clk_src = {
@@ -984,6 +996,7 @@ static const struct freq_tbl ftbl_camcc_ife_3_clk_src[] = {
F(558000000, P_CAMCC_PLL6_OUT_EVEN, 1, 0, 0),
F(637000000, P_CAMCC_PLL6_OUT_EVEN, 1, 0, 0),
F(760000000, P_CAMCC_PLL6_OUT_EVEN, 1, 0, 0),
+ { }
};
static struct clk_rcg2 camcc_ife_3_clk_src = {
@@ -1020,6 +1033,7 @@ static const struct freq_tbl ftbl_camcc_ife_lite_0_clk_src[] = {
F(400000000, P_CAMCC_PLL0_OUT_ODD, 1, 0, 0),
F(480000000, P_CAMCC_PLL7_OUT_EVEN, 1, 0, 0),
F(600000000, P_CAMCC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
};
static struct clk_rcg2 camcc_ife_lite_0_clk_src = {
@@ -1140,6 +1154,7 @@ static const struct freq_tbl ftbl_camcc_ipe_0_clk_src[] = {
F(475000000, P_CAMCC_PLL1_OUT_EVEN, 1, 0, 0),
F(520000000, P_CAMCC_PLL1_OUT_EVEN, 1, 0, 0),
F(600000000, P_CAMCC_PLL1_OUT_EVEN, 1, 0, 0),
+ { }
};
static struct clk_rcg2 camcc_ipe_0_clk_src = {
@@ -1163,6 +1178,7 @@ static const struct freq_tbl ftbl_camcc_jpeg_clk_src[] = {
F(400000000, P_CAMCC_PLL0_OUT_ODD, 1, 0, 0),
F(480000000, P_CAMCC_PLL7_OUT_EVEN, 1, 0, 0),
F(600000000, P_CAMCC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
};
static struct clk_rcg2 camcc_jpeg_clk_src = {
@@ -1184,6 +1200,7 @@ static const struct freq_tbl ftbl_camcc_lrme_clk_src[] = {
F(300000000, P_CAMCC_PLL0_OUT_EVEN, 2, 0, 0),
F(320000000, P_CAMCC_PLL7_OUT_ODD, 1, 0, 0),
F(400000000, P_CAMCC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
};
static struct clk_rcg2 camcc_lrme_clk_src = {
@@ -1204,6 +1221,7 @@ static const struct freq_tbl ftbl_camcc_mclk0_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
F(24000000, P_CAMCC_PLL2_OUT_EARLY, 10, 1, 4),
F(64000000, P_CAMCC_PLL2_OUT_EARLY, 15, 0, 0),
+ { }
};
static struct clk_rcg2 camcc_mclk0_clk_src = {
@@ -1320,6 +1338,7 @@ static struct clk_rcg2 camcc_mclk7_clk_src = {
static const struct freq_tbl ftbl_camcc_sleep_clk_src[] = {
F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
};
static struct clk_rcg2 camcc_sleep_clk_src = {
@@ -1339,6 +1358,7 @@ static struct clk_rcg2 camcc_sleep_clk_src = {
static const struct freq_tbl ftbl_camcc_slow_ahb_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
F(80000000, P_CAMCC_PLL7_OUT_EVEN, 6, 0, 0),
+ { }
};
static struct clk_rcg2 camcc_slow_ahb_clk_src = {
@@ -1357,6 +1377,7 @@ static struct clk_rcg2 camcc_slow_ahb_clk_src = {
static const struct freq_tbl ftbl_camcc_xo_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
};
static struct clk_rcg2 camcc_xo_clk_src = {
@@ -3010,10 +3031,8 @@ static int camcc_sc8280xp_probe(struct platform_device *pdev)
clk_lucid_pll_configure(&camcc_pll6, regmap, &camcc_pll6_config);
clk_lucid_pll_configure(&camcc_pll7, regmap, &camcc_pll7_config);
- /*
- * Keep camcc_gdsc_clk always enabled:
- */
- regmap_update_bits(regmap, 0xc1e4, BIT(0), 1);
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0xc1e4); /* CAMCC_GDSC_CLK */
ret = qcom_cc_really_probe(pdev, &camcc_sc8280xp_desc, regmap);
if (ret)
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index 27d44188a7abb..8466d03e0d058 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -1746,17 +1746,7 @@ static struct platform_driver cam_cc_sdm845_driver = {
},
};
-static int __init cam_cc_sdm845_init(void)
-{
- return platform_driver_register(&cam_cc_sdm845_driver);
-}
-subsys_initcall(cam_cc_sdm845_init);
-
-static void __exit cam_cc_sdm845_exit(void)
-{
- platform_driver_unregister(&cam_cc_sdm845_driver);
-}
-module_exit(cam_cc_sdm845_exit);
+module_platform_driver(cam_cc_sdm845_driver);
MODULE_DESCRIPTION("QTI CAM_CC SDM845 Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/camcc-sm6350.c b/drivers/clk/qcom/camcc-sm6350.c
index acba9f99d960c..e4e7b308ecf16 100644
--- a/drivers/clk/qcom/camcc-sm6350.c
+++ b/drivers/clk/qcom/camcc-sm6350.c
@@ -1890,17 +1890,7 @@ static struct platform_driver camcc_sm6350_driver = {
},
};
-static int __init camcc_sm6350_init(void)
-{
- return platform_driver_register(&camcc_sm6350_driver);
-}
-subsys_initcall(camcc_sm6350_init);
-
-static void __exit camcc_sm6350_exit(void)
-{
- platform_driver_unregister(&camcc_sm6350_driver);
-}
-module_exit(camcc_sm6350_exit);
+module_platform_driver(camcc_sm6350_driver);
MODULE_DESCRIPTION("QTI CAMCC SM6350 Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/camcc-sm8550.c b/drivers/clk/qcom/camcc-sm8550.c
index dd51ba4ea757b..1ef59a96f664f 100644
--- a/drivers/clk/qcom/camcc-sm8550.c
+++ b/drivers/clk/qcom/camcc-sm8550.c
@@ -3536,13 +3536,9 @@ static int cam_cc_sm8550_probe(struct platform_device *pdev)
clk_lucid_ole_pll_configure(&cam_cc_pll11, regmap, &cam_cc_pll11_config);
clk_lucid_ole_pll_configure(&cam_cc_pll12, regmap, &cam_cc_pll12_config);
- /*
- * Keep clocks always enabled:
- * cam_cc_gdsc_clk
- * cam_cc_sleep_clk
- */
- regmap_update_bits(regmap, 0x1419c, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x142cc, BIT(0), BIT(0));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x1419c); /* CAM_CC_GDSC_CLK */
+ qcom_branch_set_clk_en(regmap, 0x142cc); /* CAM_CC_SLEEP_CLK */
ret = qcom_cc_really_probe(pdev, &cam_cc_sm8550_desc, regmap);
diff --git a/drivers/clk/qcom/camcc-x1e80100.c b/drivers/clk/qcom/camcc-x1e80100.c
new file mode 100644
index 0000000000000..46bb225906bff
--- /dev/null
+++ b/drivers/clk/qcom/camcc-x1e80100.c
@@ -0,0 +1,2487 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,x1e80100-camcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_IFACE,
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_BI_TCXO_AO,
+ P_CAM_CC_PLL0_OUT_EVEN,
+ P_CAM_CC_PLL0_OUT_MAIN,
+ P_CAM_CC_PLL0_OUT_ODD,
+ P_CAM_CC_PLL1_OUT_EVEN,
+ P_CAM_CC_PLL2_OUT_EVEN,
+ P_CAM_CC_PLL2_OUT_MAIN,
+ P_CAM_CC_PLL3_OUT_EVEN,
+ P_CAM_CC_PLL4_OUT_EVEN,
+ P_CAM_CC_PLL6_OUT_EVEN,
+ P_CAM_CC_PLL8_OUT_EVEN,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco lucid_ole_vco[] = {
+ { 249600000, 2300000000, 0 },
+};
+
+static const struct pll_vco rivian_ole_vco[] = {
+ { 777000000, 1285000000, 0 },
+};
+
+static const struct alpha_pll_config cam_cc_pll0_config = {
+ .l = 0x3e,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00008400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_odd[] = {
+ { 0x2, 3 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = {
+ .offset = 0x0,
+ .post_div_shift = 14,
+ .post_div_table = post_div_table_cam_cc_pll0_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_odd",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll1_config = {
+ .l = 0x1f,
+ .alpha = 0xaaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll1_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = {
+ .offset = 0x1000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll1_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll1_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll1.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll2_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x10000030,
+ .config_ctl_hi_val = 0x80890263,
+ .config_ctl_hi1_val = 0x00000217,
+ .user_ctl_val = 0x00000001,
+ .user_ctl_hi_val = 0x00000000,
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+ .offset = 0x2000,
+ .vco_table = rivian_ole_vco,
+ .num_vco = ARRAY_SIZE(rivian_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_rivian_evo_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll3_config = {
+ .l = 0x24,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+ .offset = 0x3000,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll3_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
+ .offset = 0x3000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll3_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll3_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll4_config = {
+ .l = 0x24,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll4 = {
+ .offset = 0x4000,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll4_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll4_out_even = {
+ .offset = 0x4000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll4_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll4_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll4.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll6_config = {
+ .l = 0x24,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll6 = {
+ .offset = 0x6000,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll6",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll6_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll6_out_even = {
+ .offset = 0x6000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll6_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll6_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll6_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll6.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll8_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll8 = {
+ .offset = 0x8000,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll8",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll8_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll8_out_even = {
+ .offset = 0x8000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll8_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll8_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll8_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll8.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL0_OUT_ODD, 3 },
+ { P_CAM_CC_PLL8_OUT_EVEN, 5 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll8_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_EVEN, 3 },
+ { P_CAM_CC_PLL2_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll2.clkr.hw },
+ { .hw = &cam_cc_pll2.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL3_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll3_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL4_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll4_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL1_OUT_EVEN, 4 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll1_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL6_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll6_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_6[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_6_ao[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map cam_cc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_7_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+};
+
+static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(160000000, P_CAM_CC_PLL0_OUT_ODD, 2.5, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_ODD, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_bps_clk_src = {
+ .cmd_rcgr = 0x10278,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_camnoc_axi_rt_clk_src[] = {
+ F(240000000, P_CAM_CC_PLL0_OUT_EVEN, 2.5, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_camnoc_axi_rt_clk_src = {
+ .cmd_rcgr = 0x138f8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_camnoc_axi_rt_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_rt_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(30000000, P_CAM_CC_PLL8_OUT_EVEN, 16, 0, 0),
+ F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cci_0_clk_src = {
+ .cmd_rcgr = 0x1365c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_1_clk_src = {
+ .cmd_rcgr = 0x1378c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_MAIN, 4, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(480000000, P_CAM_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+ .cmd_rcgr = 0x11164,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cphy_rx_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(266666667, P_CAM_CC_PLL0_OUT_ODD, 1.5, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x150e0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x15104,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x15124,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
+ .cmd_rcgr = 0x15258,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi4phytimer_clk_src = {
+ .cmd_rcgr = 0x1538c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi4phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi5phytimer_clk_src = {
+ .cmd_rcgr = 0x154c0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi5phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_csid_clk_src[] = {
+ F(300000000, P_CAM_CC_PLL0_OUT_MAIN, 4, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(480000000, P_CAM_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_csid_clk_src = {
+ .cmd_rcgr = 0x138d4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csid_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(80000000, P_CAM_CC_PLL0_OUT_EVEN, 7.5, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_MAIN, 4, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+ .cmd_rcgr = 0x10018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fast_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL8_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+ .cmd_rcgr = 0x13520,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(345600000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(432000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(594000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(675000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(727000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_clk_src = {
+ .cmd_rcgr = 0x11018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(345600000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(432000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(594000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(675000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(727000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_1_clk_src = {
+ .cmd_rcgr = 0x12018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_ife_1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_lite_clk_src[] = {
+ F(266666667, P_CAM_CC_PLL0_OUT_ODD, 1.5, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL8_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
+ .cmd_rcgr = 0x13000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_lite_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
+ .cmd_rcgr = 0x1313c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_lite_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ipe_nps_clk_src[] = {
+ F(304000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(364000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(500000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(700000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ipe_nps_clk_src = {
+ .cmd_rcgr = 0x103cc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_ipe_nps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_nps_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_jpeg_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(160000000, P_CAM_CC_PLL0_OUT_ODD, 2.5, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_ODD, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL8_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_jpeg_clk_src = {
+ .cmd_rcgr = 0x133dc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_jpeg_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_jpeg_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24000000, P_CAM_CC_PLL2_OUT_MAIN, 10, 1, 4),
+ F(68571429, P_CAM_CC_PLL2_OUT_MAIN, 14, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+ .cmd_rcgr = 0x15000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+ .cmd_rcgr = 0x1501c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+ .cmd_rcgr = 0x15038,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+ .cmd_rcgr = 0x15054,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk4_clk_src = {
+ .cmd_rcgr = 0x15070,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk4_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk5_clk_src = {
+ .cmd_rcgr = 0x1508c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk5_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk6_clk_src = {
+ .cmd_rcgr = 0x150a8,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk6_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk7_clk_src = {
+ .cmd_rcgr = 0x150c4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk7_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_sfe_0_clk_src[] = {
+ F(345600000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(432000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(594000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(675000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(727000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_sfe_0_clk_src = {
+ .cmd_rcgr = 0x13294,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_5,
+ .freq_tbl = ftbl_cam_cc_sfe_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_0_clk_src",
+ .parent_data = cam_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_sleep_clk_src = {
+ .cmd_rcgr = 0x13aa0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_6,
+ .freq_tbl = ftbl_cam_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sleep_clk_src",
+ .parent_data = cam_cc_parent_data_6_ao,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_6_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(64000000, P_CAM_CC_PLL8_OUT_EVEN, 7.5, 0, 0),
+ F(80000000, P_CAM_CC_PLL0_OUT_EVEN, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+ .cmd_rcgr = 0x10148,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_slow_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_xo_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_xo_clk_src = {
+ .cmd_rcgr = 0x13a84,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_7,
+ .freq_tbl = ftbl_cam_cc_xo_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_xo_clk_src",
+ .parent_data = cam_cc_parent_data_7_ao,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_7_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch cam_cc_bps_ahb_clk = {
+ .halt_reg = 0x10274,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10274,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_clk = {
+ .halt_reg = 0x103a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x103a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_bps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_fast_ahb_clk = {
+ .halt_reg = 0x10144,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10144,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_nrt_clk = {
+ .halt_reg = 0x13920,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13920,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_nrt_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_rt_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_rt_clk = {
+ .halt_reg = 0x13910,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13910,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_rt_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_rt_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_dcd_xo_clk = {
+ .halt_reg = 0x1392c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1392c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_dcd_xo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_xo_clk = {
+ .halt_reg = 0x13930,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13930,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_xo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_0_clk = {
+ .halt_reg = 0x13788,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13788,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_1_clk = {
+ .halt_reg = 0x138b8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x138b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_core_ahb_clk = {
+ .halt_reg = 0x13a80,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x13a80,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_core_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+ .halt_reg = 0x138bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x138bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_bps_clk = {
+ .halt_reg = 0x103b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x103b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_bps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_bps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_fast_ahb_clk = {
+ .halt_reg = 0x138c8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x138c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ife_0_clk = {
+ .halt_reg = 0x11150,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11150,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ife_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ife_1_clk = {
+ .halt_reg = 0x1203c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ife_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ife_lite_clk = {
+ .halt_reg = 0x13138,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13138,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ife_lite_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ipe_nps_clk = {
+ .halt_reg = 0x10504,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10504,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ipe_nps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ipe_nps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_sfe_0_clk = {
+ .halt_reg = 0x133cc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x133cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_sfe_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_sfe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+ .halt_reg = 0x150f8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150f8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+ .halt_reg = 0x1511c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1511c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+ .halt_reg = 0x15250,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15250,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi2phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi3phytimer_clk = {
+ .halt_reg = 0x15384,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15384,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi3phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi4phytimer_clk = {
+ .halt_reg = 0x154b8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x154b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi4phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi4phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi5phytimer_clk = {
+ .halt_reg = 0x155ec,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x155ec,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi5phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi5phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csid_clk = {
+ .halt_reg = 0x138ec,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x138ec,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csid_csiphy_rx_clk = {
+ .halt_reg = 0x15100,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15100,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csid_csiphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+ .halt_reg = 0x150fc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150fc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+ .halt_reg = 0x15120,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15120,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+ .halt_reg = 0x15254,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15254,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy3_clk = {
+ .halt_reg = 0x15388,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15388,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy4_clk = {
+ .halt_reg = 0x154bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x154bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy5_clk = {
+ .halt_reg = 0x155f0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x155f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_ahb_clk = {
+ .halt_reg = 0x13658,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13658,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+ .halt_reg = 0x1364c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1364c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_icp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_clk = {
+ .halt_reg = 0x11144,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11144,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_dsp_clk = {
+ .halt_reg = 0x11154,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11154,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_fast_ahb_clk = {
+ .halt_reg = 0x11160,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11160,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_clk = {
+ .halt_reg = 0x12030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_dsp_clk = {
+ .halt_reg = 0x12040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_fast_ahb_clk = {
+ .halt_reg = 0x1204c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1204c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_ahb_clk = {
+ .halt_reg = 0x13278,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13278,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_clk = {
+ .halt_reg = 0x1312c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1312c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_cphy_rx_clk = {
+ .halt_reg = 0x13274,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13274,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_csid_clk = {
+ .halt_reg = 0x13268,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13268,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_nps_ahb_clk = {
+ .halt_reg = 0x1051c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1051c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_nps_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_nps_clk = {
+ .halt_reg = 0x104f8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x104f8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_nps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ipe_nps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_nps_fast_ahb_clk = {
+ .halt_reg = 0x10520,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10520,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_nps_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_pps_clk = {
+ .halt_reg = 0x10508,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10508,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_pps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ipe_nps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_pps_fast_ahb_clk = {
+ .halt_reg = 0x10524,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10524,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_pps_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_jpeg_clk = {
+ .halt_reg = 0x13508,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13508,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_jpeg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_jpeg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+ .halt_reg = 0x15018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+ .halt_reg = 0x15034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+ .halt_reg = 0x15050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+ .halt_reg = 0x1506c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1506c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk4_clk = {
+ .halt_reg = 0x15088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk5_clk = {
+ .halt_reg = 0x150a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk6_clk = {
+ .halt_reg = 0x150c0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk7_clk = {
+ .halt_reg = 0x150dc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150dc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_0_clk = {
+ .halt_reg = 0x133c0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x133c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_sfe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_0_fast_ahb_clk = {
+ .halt_reg = 0x133d8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x133d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_0_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc cam_cc_bps_gdsc = {
+ .gdscr = 0x10004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "cam_cc_bps_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc cam_cc_ife_0_gdsc = {
+ .gdscr = 0x11004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "cam_cc_ife_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc cam_cc_ife_1_gdsc = {
+ .gdscr = 0x12004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "cam_cc_ife_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc cam_cc_ipe_0_gdsc = {
+ .gdscr = 0x103b8,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "cam_cc_ipe_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc cam_cc_sfe_0_gdsc = {
+ .gdscr = 0x13280,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "cam_cc_sfe_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc cam_cc_titan_top_gdsc = {
+ .gdscr = 0x13a6c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "cam_cc_titan_top_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *cam_cc_x1e80100_clocks[] = {
+ [CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr,
+ [CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr,
+ [CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr,
+ [CAM_CC_BPS_FAST_AHB_CLK] = &cam_cc_bps_fast_ahb_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_NRT_CLK] = &cam_cc_camnoc_axi_nrt_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_RT_CLK] = &cam_cc_camnoc_axi_rt_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_RT_CLK_SRC] = &cam_cc_camnoc_axi_rt_clk_src.clkr,
+ [CAM_CC_CAMNOC_DCD_XO_CLK] = &cam_cc_camnoc_dcd_xo_clk.clkr,
+ [CAM_CC_CAMNOC_XO_CLK] = &cam_cc_camnoc_xo_clk.clkr,
+ [CAM_CC_CCI_0_CLK] = &cam_cc_cci_0_clk.clkr,
+ [CAM_CC_CCI_0_CLK_SRC] = &cam_cc_cci_0_clk_src.clkr,
+ [CAM_CC_CCI_1_CLK] = &cam_cc_cci_1_clk.clkr,
+ [CAM_CC_CCI_1_CLK_SRC] = &cam_cc_cci_1_clk_src.clkr,
+ [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr,
+ [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+ [CAM_CC_CPAS_BPS_CLK] = &cam_cc_cpas_bps_clk.clkr,
+ [CAM_CC_CPAS_FAST_AHB_CLK] = &cam_cc_cpas_fast_ahb_clk.clkr,
+ [CAM_CC_CPAS_IFE_0_CLK] = &cam_cc_cpas_ife_0_clk.clkr,
+ [CAM_CC_CPAS_IFE_1_CLK] = &cam_cc_cpas_ife_1_clk.clkr,
+ [CAM_CC_CPAS_IFE_LITE_CLK] = &cam_cc_cpas_ife_lite_clk.clkr,
+ [CAM_CC_CPAS_IPE_NPS_CLK] = &cam_cc_cpas_ipe_nps_clk.clkr,
+ [CAM_CC_CPAS_SFE_0_CLK] = &cam_cc_cpas_sfe_0_clk.clkr,
+ [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK] = &cam_cc_csi3phytimer_clk.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK_SRC] = &cam_cc_csi3phytimer_clk_src.clkr,
+ [CAM_CC_CSI4PHYTIMER_CLK] = &cam_cc_csi4phytimer_clk.clkr,
+ [CAM_CC_CSI4PHYTIMER_CLK_SRC] = &cam_cc_csi4phytimer_clk_src.clkr,
+ [CAM_CC_CSI5PHYTIMER_CLK] = &cam_cc_csi5phytimer_clk.clkr,
+ [CAM_CC_CSI5PHYTIMER_CLK_SRC] = &cam_cc_csi5phytimer_clk_src.clkr,
+ [CAM_CC_CSID_CLK] = &cam_cc_csid_clk.clkr,
+ [CAM_CC_CSID_CLK_SRC] = &cam_cc_csid_clk_src.clkr,
+ [CAM_CC_CSID_CSIPHY_RX_CLK] = &cam_cc_csid_csiphy_rx_clk.clkr,
+ [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+ [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+ [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+ [CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr,
+ [CAM_CC_CSIPHY4_CLK] = &cam_cc_csiphy4_clk.clkr,
+ [CAM_CC_CSIPHY5_CLK] = &cam_cc_csiphy5_clk.clkr,
+ [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+ [CAM_CC_ICP_AHB_CLK] = &cam_cc_icp_ahb_clk.clkr,
+ [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+ [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+ [CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr,
+ [CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr,
+ [CAM_CC_IFE_0_DSP_CLK] = &cam_cc_ife_0_dsp_clk.clkr,
+ [CAM_CC_IFE_0_FAST_AHB_CLK] = &cam_cc_ife_0_fast_ahb_clk.clkr,
+ [CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr,
+ [CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr,
+ [CAM_CC_IFE_1_DSP_CLK] = &cam_cc_ife_1_dsp_clk.clkr,
+ [CAM_CC_IFE_1_FAST_AHB_CLK] = &cam_cc_ife_1_fast_ahb_clk.clkr,
+ [CAM_CC_IFE_LITE_AHB_CLK] = &cam_cc_ife_lite_ahb_clk.clkr,
+ [CAM_CC_IFE_LITE_CLK] = &cam_cc_ife_lite_clk.clkr,
+ [CAM_CC_IFE_LITE_CLK_SRC] = &cam_cc_ife_lite_clk_src.clkr,
+ [CAM_CC_IFE_LITE_CPHY_RX_CLK] = &cam_cc_ife_lite_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_CSID_CLK] = &cam_cc_ife_lite_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_CSID_CLK_SRC] = &cam_cc_ife_lite_csid_clk_src.clkr,
+ [CAM_CC_IPE_NPS_AHB_CLK] = &cam_cc_ipe_nps_ahb_clk.clkr,
+ [CAM_CC_IPE_NPS_CLK] = &cam_cc_ipe_nps_clk.clkr,
+ [CAM_CC_IPE_NPS_CLK_SRC] = &cam_cc_ipe_nps_clk_src.clkr,
+ [CAM_CC_IPE_NPS_FAST_AHB_CLK] = &cam_cc_ipe_nps_fast_ahb_clk.clkr,
+ [CAM_CC_IPE_PPS_CLK] = &cam_cc_ipe_pps_clk.clkr,
+ [CAM_CC_IPE_PPS_FAST_AHB_CLK] = &cam_cc_ipe_pps_fast_ahb_clk.clkr,
+ [CAM_CC_JPEG_CLK] = &cam_cc_jpeg_clk.clkr,
+ [CAM_CC_JPEG_CLK_SRC] = &cam_cc_jpeg_clk_src.clkr,
+ [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+ [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+ [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+ [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+ [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+ [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+ [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+ [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+ [CAM_CC_MCLK4_CLK] = &cam_cc_mclk4_clk.clkr,
+ [CAM_CC_MCLK4_CLK_SRC] = &cam_cc_mclk4_clk_src.clkr,
+ [CAM_CC_MCLK5_CLK] = &cam_cc_mclk5_clk.clkr,
+ [CAM_CC_MCLK5_CLK_SRC] = &cam_cc_mclk5_clk_src.clkr,
+ [CAM_CC_MCLK6_CLK] = &cam_cc_mclk6_clk.clkr,
+ [CAM_CC_MCLK6_CLK_SRC] = &cam_cc_mclk6_clk_src.clkr,
+ [CAM_CC_MCLK7_CLK] = &cam_cc_mclk7_clk.clkr,
+ [CAM_CC_MCLK7_CLK_SRC] = &cam_cc_mclk7_clk_src.clkr,
+ [CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+ [CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr,
+ [CAM_CC_PLL0_OUT_ODD] = &cam_cc_pll0_out_odd.clkr,
+ [CAM_CC_PLL1] = &cam_cc_pll1.clkr,
+ [CAM_CC_PLL1_OUT_EVEN] = &cam_cc_pll1_out_even.clkr,
+ [CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+ [CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+ [CAM_CC_PLL3_OUT_EVEN] = &cam_cc_pll3_out_even.clkr,
+ [CAM_CC_PLL4] = &cam_cc_pll4.clkr,
+ [CAM_CC_PLL4_OUT_EVEN] = &cam_cc_pll4_out_even.clkr,
+ [CAM_CC_PLL6] = &cam_cc_pll6.clkr,
+ [CAM_CC_PLL6_OUT_EVEN] = &cam_cc_pll6_out_even.clkr,
+ [CAM_CC_PLL8] = &cam_cc_pll8.clkr,
+ [CAM_CC_PLL8_OUT_EVEN] = &cam_cc_pll8_out_even.clkr,
+ [CAM_CC_SFE_0_CLK] = &cam_cc_sfe_0_clk.clkr,
+ [CAM_CC_SFE_0_CLK_SRC] = &cam_cc_sfe_0_clk_src.clkr,
+ [CAM_CC_SFE_0_FAST_AHB_CLK] = &cam_cc_sfe_0_fast_ahb_clk.clkr,
+ [CAM_CC_SLEEP_CLK_SRC] = &cam_cc_sleep_clk_src.clkr,
+ [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+ [CAM_CC_XO_CLK_SRC] = &cam_cc_xo_clk_src.clkr,
+};
+
+static struct gdsc *cam_cc_x1e80100_gdscs[] = {
+ [CAM_CC_BPS_GDSC] = &cam_cc_bps_gdsc,
+ [CAM_CC_IFE_0_GDSC] = &cam_cc_ife_0_gdsc,
+ [CAM_CC_IFE_1_GDSC] = &cam_cc_ife_1_gdsc,
+ [CAM_CC_IPE_0_GDSC] = &cam_cc_ipe_0_gdsc,
+ [CAM_CC_SFE_0_GDSC] = &cam_cc_sfe_0_gdsc,
+ [CAM_CC_TITAN_TOP_GDSC] = &cam_cc_titan_top_gdsc,
+};
+
+static const struct qcom_reset_map cam_cc_x1e80100_resets[] = {
+ [CAM_CC_BPS_BCR] = { 0x10000 },
+ [CAM_CC_ICP_BCR] = { 0x1351c },
+ [CAM_CC_IFE_0_BCR] = { 0x11000 },
+ [CAM_CC_IFE_1_BCR] = { 0x12000 },
+ [CAM_CC_IPE_0_BCR] = { 0x103b4 },
+ [CAM_CC_SFE_0_BCR] = { 0x1327c },
+};
+
+static const struct regmap_config cam_cc_x1e80100_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1603c,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc cam_cc_x1e80100_desc = {
+ .config = &cam_cc_x1e80100_regmap_config,
+ .clks = cam_cc_x1e80100_clocks,
+ .num_clks = ARRAY_SIZE(cam_cc_x1e80100_clocks),
+ .resets = cam_cc_x1e80100_resets,
+ .num_resets = ARRAY_SIZE(cam_cc_x1e80100_resets),
+ .gdscs = cam_cc_x1e80100_gdscs,
+ .num_gdscs = ARRAY_SIZE(cam_cc_x1e80100_gdscs),
+};
+
+static const struct of_device_id cam_cc_x1e80100_match_table[] = {
+ { .compatible = "qcom,x1e80100-camcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_x1e80100_match_table);
+
+static int cam_cc_x1e80100_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &cam_cc_x1e80100_desc);
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
+ return PTR_ERR(regmap);
+ }
+
+ clk_lucid_ole_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
+ clk_lucid_ole_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
+ clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
+ clk_lucid_ole_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
+ clk_lucid_ole_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
+ clk_lucid_ole_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config);
+ clk_lucid_ole_pll_configure(&cam_cc_pll8, regmap, &cam_cc_pll8_config);
+
+ /* Keep clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0x13a9c); /* CAM_CC_GDSC_CLK */
+ qcom_branch_set_clk_en(regmap, 0x13ab8); /* CAM_CC_SLEEP_CLK */
+
+ ret = qcom_cc_really_probe(pdev, &cam_cc_x1e80100_desc, regmap);
+
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver cam_cc_x1e80100_driver = {
+ .probe = cam_cc_x1e80100_probe,
+ .driver = {
+ .name = "camcc-x1e80100",
+ .of_match_table = cam_cc_x1e80100_match_table,
+ },
+};
+
+module_platform_driver(cam_cc_x1e80100_driver);
+
+MODULE_DESCRIPTION("QTI Camera Clock Controller X1E80100 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 05898d2a8b22c..8a412ef47e163 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -52,6 +52,7 @@
#define PLL_CONFIG_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL])
#define PLL_CONFIG_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL_U])
#define PLL_CONFIG_CTL_U1(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL_U1])
+#define PLL_CONFIG_CTL_U2(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL_U2])
#define PLL_TEST_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL])
#define PLL_TEST_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U])
#define PLL_TEST_CTL_U1(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U1])
@@ -228,6 +229,21 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_ALPHA_VAL] = 0x24,
[PLL_OFF_ALPHA_VAL_U] = 0x28,
},
+ [CLK_ALPHA_PLL_TYPE_ZONDA_OLE] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_ALPHA_VAL] = 0x08,
+ [PLL_OFF_USER_CTL] = 0x0c,
+ [PLL_OFF_USER_CTL_U] = 0x10,
+ [PLL_OFF_CONFIG_CTL] = 0x14,
+ [PLL_OFF_CONFIG_CTL_U] = 0x18,
+ [PLL_OFF_CONFIG_CTL_U1] = 0x1c,
+ [PLL_OFF_CONFIG_CTL_U2] = 0x20,
+ [PLL_OFF_TEST_CTL] = 0x24,
+ [PLL_OFF_TEST_CTL_U] = 0x28,
+ [PLL_OFF_TEST_CTL_U1] = 0x2c,
+ [PLL_OFF_OPMODE] = 0x30,
+ [PLL_OFF_STATUS] = 0x3c,
+ },
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index a1a75bb12fe88..fb6d50263bb9d 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -21,6 +21,7 @@ enum {
CLK_ALPHA_PLL_TYPE_LUCID = CLK_ALPHA_PLL_TYPE_TRION,
CLK_ALPHA_PLL_TYPE_AGERA,
CLK_ALPHA_PLL_TYPE_ZONDA,
+ CLK_ALPHA_PLL_TYPE_ZONDA_OLE,
CLK_ALPHA_PLL_TYPE_LUCID_EVO,
CLK_ALPHA_PLL_TYPE_LUCID_OLE,
CLK_ALPHA_PLL_TYPE_RIVIAN_EVO,
@@ -42,6 +43,7 @@ enum {
PLL_OFF_CONFIG_CTL,
PLL_OFF_CONFIG_CTL_U,
PLL_OFF_CONFIG_CTL_U1,
+ PLL_OFF_CONFIG_CTL_U2,
PLL_OFF_TEST_CTL,
PLL_OFF_TEST_CTL_U,
PLL_OFF_TEST_CTL_U1,
@@ -119,6 +121,7 @@ struct alpha_pll_config {
u32 config_ctl_val;
u32 config_ctl_hi_val;
u32 config_ctl_hi1_val;
+ u32 config_ctl_hi2_val;
u32 user_ctl_val;
u32 user_ctl_hi_val;
u32 user_ctl_hi1_val;
@@ -173,6 +176,7 @@ extern const struct clk_ops clk_alpha_pll_postdiv_lucid_5lpe_ops;
extern const struct clk_ops clk_alpha_pll_zonda_ops;
#define clk_alpha_pll_postdiv_zonda_ops clk_alpha_pll_postdiv_fabia_ops
+#define clk_alpha_pll_zonda_ole_ops clk_alpha_pll_zonda_ops
extern const struct clk_ops clk_alpha_pll_lucid_evo_ops;
extern const struct clk_ops clk_alpha_pll_reset_lucid_evo_ops;
diff --git a/drivers/clk/qcom/clk-branch.h b/drivers/clk/qcom/clk-branch.h
index 8ffed603c050b..f1b3b635ff324 100644
--- a/drivers/clk/qcom/clk-branch.h
+++ b/drivers/clk/qcom/clk-branch.h
@@ -64,6 +64,7 @@ struct clk_mem_branch {
#define CBCR_FORCE_MEM_PERIPH_OFF BIT(12)
#define CBCR_WAKEUP GENMASK(11, 8)
#define CBCR_SLEEP GENMASK(7, 4)
+#define CBCR_CLOCK_ENABLE BIT(0)
static inline void qcom_branch_set_force_mem_core(struct regmap *regmap,
struct clk_branch clk, bool on)
@@ -98,6 +99,11 @@ static inline void qcom_branch_set_sleep(struct regmap *regmap, struct clk_branc
FIELD_PREP(CBCR_SLEEP, val));
}
+static inline void qcom_branch_set_clk_en(struct regmap *regmap, u32 cbcr)
+{
+ regmap_update_bits(regmap, cbcr, CBCR_CLOCK_ENABLE, CBCR_CLOCK_ENABLE);
+}
+
extern const struct clk_ops clk_branch_ops;
extern const struct clk_ops clk_branch2_ops;
extern const struct clk_ops clk_branch_simple_ops;
diff --git a/drivers/clk/qcom/dispcc-qcm2290.c b/drivers/clk/qcom/dispcc-qcm2290.c
index 9206f0eed4462..654a10d53e5c5 100644
--- a/drivers/clk/qcom/dispcc-qcm2290.c
+++ b/drivers/clk/qcom/dispcc-qcm2290.c
@@ -519,8 +519,8 @@ static int disp_cc_qcm2290_probe(struct platform_device *pdev)
clk_alpha_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
- /* Keep DISP_CC_XO_CLK always-ON */
- regmap_update_bits(regmap, 0x604c, BIT(0), BIT(0));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x604c); /* DISP_CC_XO_CLK */
ret = qcom_cc_really_probe(pdev, &disp_cc_qcm2290_desc, regmap);
if (ret) {
@@ -539,17 +539,7 @@ static struct platform_driver disp_cc_qcm2290_driver = {
},
};
-static int __init disp_cc_qcm2290_init(void)
-{
- return platform_driver_register(&disp_cc_qcm2290_driver);
-}
-subsys_initcall(disp_cc_qcm2290_init);
-
-static void __exit disp_cc_qcm2290_exit(void)
-{
- platform_driver_unregister(&disp_cc_qcm2290_driver);
-}
-module_exit(disp_cc_qcm2290_exit);
+module_platform_driver(disp_cc_qcm2290_driver);
MODULE_DESCRIPTION("QTI DISP_CC qcm2290 Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/dispcc-sc7180.c b/drivers/clk/qcom/dispcc-sc7180.c
index 9536bfc72a43c..38d7859981c7d 100644
--- a/drivers/clk/qcom/dispcc-sc7180.c
+++ b/drivers/clk/qcom/dispcc-sc7180.c
@@ -724,17 +724,7 @@ static struct platform_driver disp_cc_sc7180_driver = {
},
};
-static int __init disp_cc_sc7180_init(void)
-{
- return platform_driver_register(&disp_cc_sc7180_driver);
-}
-subsys_initcall(disp_cc_sc7180_init);
-
-static void __exit disp_cc_sc7180_exit(void)
-{
- platform_driver_unregister(&disp_cc_sc7180_driver);
-}
-module_exit(disp_cc_sc7180_exit);
+module_platform_driver(disp_cc_sc7180_driver);
MODULE_DESCRIPTION("QTI DISP_CC SC7180 Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/dispcc-sc7280.c b/drivers/clk/qcom/dispcc-sc7280.c
index ad596d567f6ab..fbeb8fccb99af 100644
--- a/drivers/clk/qcom/dispcc-sc7280.c
+++ b/drivers/clk/qcom/dispcc-sc7280.c
@@ -878,11 +878,8 @@ static int disp_cc_sc7280_probe(struct platform_device *pdev)
clk_lucid_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
- /*
- * Keep the clocks always-ON
- * DISP_CC_XO_CLK
- */
- regmap_update_bits(regmap, 0x5008, BIT(0), BIT(0));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x5008); /* DISP_CC_XO_CLK */
return qcom_cc_really_probe(pdev, &disp_cc_sc7280_desc, regmap);
}
@@ -895,17 +892,7 @@ static struct platform_driver disp_cc_sc7280_driver = {
},
};
-static int __init disp_cc_sc7280_init(void)
-{
- return platform_driver_register(&disp_cc_sc7280_driver);
-}
-subsys_initcall(disp_cc_sc7280_init);
-
-static void __exit disp_cc_sc7280_exit(void)
-{
- platform_driver_unregister(&disp_cc_sc7280_driver);
-}
-module_exit(disp_cc_sc7280_exit);
+module_platform_driver(disp_cc_sc7280_driver);
MODULE_DESCRIPTION("QTI DISP_CC sc7280 Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/dispcc-sc8280xp.c b/drivers/clk/qcom/dispcc-sc8280xp.c
index 30f636b9f0ec8..91172f5b2f15b 100644
--- a/drivers/clk/qcom/dispcc-sc8280xp.c
+++ b/drivers/clk/qcom/dispcc-sc8280xp.c
@@ -3178,8 +3178,8 @@ static int disp_cc_sc8280xp_probe(struct platform_device *pdev)
goto out_pm_runtime_put;
}
- /* DISP_CC_XO_CLK always-on */
- regmap_update_bits(regmap, 0x605c, BIT(0), BIT(0));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x605c); /* DISP_CC_XO_CLK */
out_pm_runtime_put:
pm_runtime_put_sync(&pdev->dev);
@@ -3202,17 +3202,7 @@ static struct platform_driver disp_cc_sc8280xp_driver = {
},
};
-static int __init disp_cc_sc8280xp_init(void)
-{
- return platform_driver_register(&disp_cc_sc8280xp_driver);
-}
-subsys_initcall(disp_cc_sc8280xp_init);
-
-static void __exit disp_cc_sc8280xp_exit(void)
-{
- platform_driver_unregister(&disp_cc_sc8280xp_driver);
-}
-module_exit(disp_cc_sc8280xp_exit);
+module_platform_driver(disp_cc_sc8280xp_driver);
MODULE_DESCRIPTION("Qualcomm SC8280XP dispcc driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
index 735adfefc3798..b84fdd17c3d8c 100644
--- a/drivers/clk/qcom/dispcc-sdm845.c
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -759,6 +759,8 @@ static struct clk_branch disp_cc_mdss_vsync_clk = {
static struct gdsc mdss_gdsc = {
.gdscr = 0x3000,
+ .en_few_wait_val = 0x6,
+ .en_rest_wait_val = 0x5,
.pd = {
.name = "mdss_gdsc",
},
@@ -872,17 +874,7 @@ static struct platform_driver disp_cc_sdm845_driver = {
},
};
-static int __init disp_cc_sdm845_init(void)
-{
- return platform_driver_register(&disp_cc_sdm845_driver);
-}
-subsys_initcall(disp_cc_sdm845_init);
-
-static void __exit disp_cc_sdm845_exit(void)
-{
- platform_driver_unregister(&disp_cc_sdm845_driver);
-}
-module_exit(disp_cc_sdm845_exit);
+module_platform_driver(disp_cc_sdm845_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("QTI DISPCC SDM845 Driver");
diff --git a/drivers/clk/qcom/dispcc-sm6115.c b/drivers/clk/qcom/dispcc-sm6115.c
index 1fab43f08e737..bd07f26af35a2 100644
--- a/drivers/clk/qcom/dispcc-sm6115.c
+++ b/drivers/clk/qcom/dispcc-sm6115.c
@@ -583,8 +583,8 @@ static int disp_cc_sm6115_probe(struct platform_device *pdev)
clk_alpha_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
- /* Keep DISP_CC_XO_CLK always-ON */
- regmap_update_bits(regmap, 0x604c, BIT(0), BIT(0));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x604c); /* DISP_CC_XO_CLK */
ret = qcom_cc_really_probe(pdev, &disp_cc_sm6115_desc, regmap);
if (ret) {
diff --git a/drivers/clk/qcom/dispcc-sm6125.c b/drivers/clk/qcom/dispcc-sm6125.c
index 87b27053ddb62..1cc5f220a3c49 100644
--- a/drivers/clk/qcom/dispcc-sm6125.c
+++ b/drivers/clk/qcom/dispcc-sm6125.c
@@ -693,17 +693,7 @@ static struct platform_driver disp_cc_sm6125_driver = {
},
};
-static int __init disp_cc_sm6125_init(void)
-{
- return platform_driver_register(&disp_cc_sm6125_driver);
-}
-subsys_initcall(disp_cc_sm6125_init);
-
-static void __exit disp_cc_sm6125_exit(void)
-{
- platform_driver_unregister(&disp_cc_sm6125_driver);
-}
-module_exit(disp_cc_sm6125_exit);
+module_platform_driver(disp_cc_sm6125_driver);
MODULE_DESCRIPTION("QTI DISPCC SM6125 Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/dispcc-sm6350.c b/drivers/clk/qcom/dispcc-sm6350.c
index ea6f54ed846ec..839435362010e 100644
--- a/drivers/clk/qcom/dispcc-sm6350.c
+++ b/drivers/clk/qcom/dispcc-sm6350.c
@@ -781,17 +781,7 @@ static struct platform_driver disp_cc_sm6350_driver = {
},
};
-static int __init disp_cc_sm6350_init(void)
-{
- return platform_driver_register(&disp_cc_sm6350_driver);
-}
-subsys_initcall(disp_cc_sm6350_init);
-
-static void __exit disp_cc_sm6350_exit(void)
-{
- platform_driver_unregister(&disp_cc_sm6350_driver);
-}
-module_exit(disp_cc_sm6350_exit);
+module_platform_driver(disp_cc_sm6350_driver);
MODULE_DESCRIPTION("QTI DISP_CC SM6350 Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/dispcc-sm6375.c b/drivers/clk/qcom/dispcc-sm6375.c
index caa1b90a5ff2d..d81d4e3c0b0de 100644
--- a/drivers/clk/qcom/dispcc-sm6375.c
+++ b/drivers/clk/qcom/dispcc-sm6375.c
@@ -594,17 +594,7 @@ static struct platform_driver disp_cc_sm6375_driver = {
},
};
-static int __init disp_cc_sm6375_init(void)
-{
- return platform_driver_register(&disp_cc_sm6375_driver);
-}
-subsys_initcall(disp_cc_sm6375_init);
-
-static void __exit disp_cc_sm6375_exit(void)
-{
- platform_driver_unregister(&disp_cc_sm6375_driver);
-}
-module_exit(disp_cc_sm6375_exit);
+module_platform_driver(disp_cc_sm6375_driver);
MODULE_DESCRIPTION("QTI DISPCC SM6375 Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/dispcc-sm8250.c b/drivers/clk/qcom/dispcc-sm8250.c
index e17bb8b543b51..43307c8a342ca 100644
--- a/drivers/clk/qcom/dispcc-sm8250.c
+++ b/drivers/clk/qcom/dispcc-sm8250.c
@@ -39,11 +39,11 @@ enum {
P_DSI1_PHY_PLL_OUT_DSICLK,
};
-static struct pll_vco vco_table[] = {
+static const struct pll_vco vco_table[] = {
{ 249600000, 2000000000, 0 },
};
-static struct pll_vco lucid_5lpe_vco[] = {
+static const struct pll_vco lucid_5lpe_vco[] = {
{ 249600000, 1750000000, 0 },
};
@@ -214,7 +214,7 @@ static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_3,
.freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_ahb_clk_src",
.parent_data = disp_cc_parent_data_3,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
@@ -233,7 +233,7 @@ static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_2,
- .clkr.hw.init = &(struct clk_init_data){
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_byte0_clk_src",
.parent_data = disp_cc_parent_data_2,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
@@ -247,7 +247,7 @@ static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_2,
- .clkr.hw.init = &(struct clk_init_data){
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_byte1_clk_src",
.parent_data = disp_cc_parent_data_2,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
@@ -262,7 +262,7 @@ static struct clk_rcg2 disp_cc_mdss_dp_aux1_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_1,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dp_aux1_clk_src",
.parent_data = disp_cc_parent_data_1,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
@@ -277,7 +277,7 @@ static struct clk_rcg2 disp_cc_mdss_dp_aux_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_1,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dp_aux_clk_src",
.parent_data = disp_cc_parent_data_1,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
@@ -291,7 +291,7 @@ static struct clk_rcg2 disp_cc_mdss_dp_link1_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_0,
- .clkr.hw.init = &(struct clk_init_data){
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dp_link1_clk_src",
.parent_data = disp_cc_parent_data_0,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
@@ -304,7 +304,7 @@ static struct clk_rcg2 disp_cc_mdss_dp_link_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_0,
- .clkr.hw.init = &(struct clk_init_data){
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dp_link_clk_src",
.parent_data = disp_cc_parent_data_0,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
@@ -317,7 +317,7 @@ static struct clk_rcg2 disp_cc_mdss_dp_pixel1_clk_src = {
.mnd_width = 16,
.hid_width = 5,
.parent_map = disp_cc_parent_map_0,
- .clkr.hw.init = &(struct clk_init_data){
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dp_pixel1_clk_src",
.parent_data = disp_cc_parent_data_0,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
@@ -330,7 +330,7 @@ static struct clk_rcg2 disp_cc_mdss_dp_pixel2_clk_src = {
.mnd_width = 16,
.hid_width = 5,
.parent_map = disp_cc_parent_map_0,
- .clkr.hw.init = &(struct clk_init_data){
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dp_pixel2_clk_src",
.parent_data = disp_cc_parent_data_0,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
@@ -343,7 +343,7 @@ static struct clk_rcg2 disp_cc_mdss_dp_pixel_clk_src = {
.mnd_width = 16,
.hid_width = 5,
.parent_map = disp_cc_parent_map_0,
- .clkr.hw.init = &(struct clk_init_data){
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dp_pixel_clk_src",
.parent_data = disp_cc_parent_data_0,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
@@ -357,7 +357,7 @@ static struct clk_rcg2 disp_cc_mdss_edp_aux_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_1,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_edp_aux_clk_src",
.parent_data = disp_cc_parent_data_1,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
@@ -372,7 +372,7 @@ static struct clk_rcg2 disp_cc_mdss_edp_gtc_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_7,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_edp_gtc_clk_src",
.parent_data = disp_cc_parent_data_7,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_7),
@@ -386,7 +386,7 @@ static struct clk_rcg2 disp_cc_mdss_edp_link_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_4,
- .clkr.hw.init = &(struct clk_init_data){
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_edp_link_clk_src",
.parent_data = disp_cc_parent_data_4,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
@@ -400,7 +400,7 @@ static struct clk_rcg2 disp_cc_mdss_edp_pixel_clk_src = {
.mnd_width = 16,
.hid_width = 5,
.parent_map = disp_cc_parent_map_4,
- .clkr.hw.init = &(struct clk_init_data){
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_edp_pixel_clk_src",
.parent_data = disp_cc_parent_data_4,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
@@ -414,7 +414,7 @@ static struct clk_branch disp_cc_mdss_edp_aux_clk = {
.clkr = {
.enable_reg = 0x2078,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_edp_aux_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_edp_aux_clk_src.clkr.hw,
@@ -432,7 +432,7 @@ static struct clk_branch disp_cc_mdss_edp_gtc_clk = {
.clkr = {
.enable_reg = 0x207c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_edp_gtc_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_edp_gtc_clk_src.clkr.hw,
@@ -450,7 +450,7 @@ static struct clk_branch disp_cc_mdss_edp_link_clk = {
.clkr = {
.enable_reg = 0x2070,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_edp_link_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_edp_link_clk_src.clkr.hw,
@@ -466,7 +466,7 @@ static struct clk_regmap_div disp_cc_mdss_edp_link_div_clk_src = {
.reg = 0x2288,
.shift = 0,
.width = 2,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_edp_link_div_clk_src",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_edp_link_clk_src.clkr.hw,
@@ -482,7 +482,7 @@ static struct clk_branch disp_cc_mdss_edp_link_intf_clk = {
.clkr = {
.enable_reg = 0x2074,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_edp_link_intf_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_edp_link_div_clk_src.clkr.hw,
@@ -500,7 +500,7 @@ static struct clk_branch disp_cc_mdss_edp_pixel_clk = {
.clkr = {
.enable_reg = 0x206c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_edp_pixel_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_edp_pixel_clk_src.clkr.hw,
@@ -518,7 +518,7 @@ static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_2,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_esc0_clk_src",
.parent_data = disp_cc_parent_data_2,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
@@ -533,7 +533,7 @@ static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_2,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_esc1_clk_src",
.parent_data = disp_cc_parent_data_2,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
@@ -560,7 +560,7 @@ static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_5,
.freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_mdp_clk_src",
.parent_data = disp_cc_parent_data_5,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
@@ -574,7 +574,7 @@ static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = disp_cc_parent_map_6,
- .clkr.hw.init = &(struct clk_init_data){
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_pclk0_clk_src",
.parent_data = disp_cc_parent_data_6,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
@@ -588,7 +588,7 @@ static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = disp_cc_parent_map_6,
- .clkr.hw.init = &(struct clk_init_data){
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_pclk1_clk_src",
.parent_data = disp_cc_parent_data_6,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
@@ -612,7 +612,7 @@ static struct clk_rcg2 disp_cc_mdss_rot_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_5,
.freq_tbl = ftbl_disp_cc_mdss_rot_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_rot_clk_src",
.parent_data = disp_cc_parent_data_5,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
@@ -627,7 +627,7 @@ static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_1,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_vsync_clk_src",
.parent_data = disp_cc_parent_data_1,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
@@ -640,7 +640,7 @@ static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
.reg = 0x2128,
.shift = 0,
.width = 2,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_byte0_div_clk_src",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_byte0_clk_src.clkr.hw,
@@ -655,7 +655,7 @@ static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
.reg = 0x2144,
.shift = 0,
.width = 2,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_byte1_div_clk_src",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_byte1_clk_src.clkr.hw,
@@ -665,12 +665,11 @@ static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
},
};
-
static struct clk_regmap_div disp_cc_mdss_dp_link1_div_clk_src = {
.reg = 0x2224,
.shift = 0,
.width = 2,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dp_link1_div_clk_src",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_dp_link1_clk_src.clkr.hw,
@@ -680,12 +679,11 @@ static struct clk_regmap_div disp_cc_mdss_dp_link1_div_clk_src = {
},
};
-
static struct clk_regmap_div disp_cc_mdss_dp_link_div_clk_src = {
.reg = 0x2190,
.shift = 0,
.width = 2,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dp_link_div_clk_src",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_dp_link_clk_src.clkr.hw,
@@ -701,7 +699,7 @@ static struct clk_branch disp_cc_mdss_ahb_clk = {
.clkr = {
.enable_reg = 0x2080,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_ahb_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_ahb_clk_src.clkr.hw,
@@ -719,7 +717,7 @@ static struct clk_branch disp_cc_mdss_byte0_clk = {
.clkr = {
.enable_reg = 0x2028,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_byte0_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_byte0_clk_src.clkr.hw,
@@ -737,7 +735,7 @@ static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
.clkr = {
.enable_reg = 0x202c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_byte0_intf_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_byte0_div_clk_src.clkr.hw,
@@ -755,7 +753,7 @@ static struct clk_branch disp_cc_mdss_byte1_clk = {
.clkr = {
.enable_reg = 0x2030,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_byte1_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_byte1_clk_src.clkr.hw,
@@ -773,7 +771,7 @@ static struct clk_branch disp_cc_mdss_byte1_intf_clk = {
.clkr = {
.enable_reg = 0x2034,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_byte1_intf_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_byte1_div_clk_src.clkr.hw,
@@ -791,7 +789,7 @@ static struct clk_branch disp_cc_mdss_dp_aux1_clk = {
.clkr = {
.enable_reg = 0x2068,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dp_aux1_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_dp_aux1_clk_src.clkr.hw,
@@ -809,7 +807,7 @@ static struct clk_branch disp_cc_mdss_dp_aux_clk = {
.clkr = {
.enable_reg = 0x2054,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dp_aux_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_dp_aux_clk_src.clkr.hw,
@@ -827,7 +825,7 @@ static struct clk_branch disp_cc_mdss_dp_link1_clk = {
.clkr = {
.enable_reg = 0x205c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dp_link1_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_dp_link1_clk_src.clkr.hw,
@@ -845,7 +843,7 @@ static struct clk_branch disp_cc_mdss_dp_link1_intf_clk = {
.clkr = {
.enable_reg = 0x2060,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dp_link1_intf_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_dp_link1_div_clk_src.clkr.hw,
@@ -862,7 +860,7 @@ static struct clk_branch disp_cc_mdss_dp_link_clk = {
.clkr = {
.enable_reg = 0x2040,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dp_link_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_dp_link_clk_src.clkr.hw,
@@ -880,7 +878,7 @@ static struct clk_branch disp_cc_mdss_dp_link_intf_clk = {
.clkr = {
.enable_reg = 0x2044,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dp_link_intf_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_dp_link_div_clk_src.clkr.hw,
@@ -897,7 +895,7 @@ static struct clk_branch disp_cc_mdss_dp_pixel1_clk = {
.clkr = {
.enable_reg = 0x2050,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dp_pixel1_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_dp_pixel1_clk_src.clkr.hw,
@@ -915,7 +913,7 @@ static struct clk_branch disp_cc_mdss_dp_pixel2_clk = {
.clkr = {
.enable_reg = 0x2058,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dp_pixel2_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_dp_pixel2_clk_src.clkr.hw,
@@ -933,7 +931,7 @@ static struct clk_branch disp_cc_mdss_dp_pixel_clk = {
.clkr = {
.enable_reg = 0x204c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dp_pixel_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_dp_pixel_clk_src.clkr.hw,
@@ -951,7 +949,7 @@ static struct clk_branch disp_cc_mdss_esc0_clk = {
.clkr = {
.enable_reg = 0x2038,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_esc0_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_esc0_clk_src.clkr.hw,
@@ -969,7 +967,7 @@ static struct clk_branch disp_cc_mdss_esc1_clk = {
.clkr = {
.enable_reg = 0x203c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_esc1_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_esc1_clk_src.clkr.hw,
@@ -987,7 +985,7 @@ static struct clk_branch disp_cc_mdss_mdp_clk = {
.clkr = {
.enable_reg = 0x200c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_mdp_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_mdp_clk_src.clkr.hw,
@@ -1005,7 +1003,7 @@ static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
.clkr = {
.enable_reg = 0x201c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_mdp_lut_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_mdp_clk_src.clkr.hw,
@@ -1022,7 +1020,7 @@ static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
.clkr = {
.enable_reg = 0x4004,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_non_gdsc_ahb_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_ahb_clk_src.clkr.hw,
@@ -1040,7 +1038,7 @@ static struct clk_branch disp_cc_mdss_pclk0_clk = {
.clkr = {
.enable_reg = 0x2004,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_pclk0_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_pclk0_clk_src.clkr.hw,
@@ -1058,7 +1056,7 @@ static struct clk_branch disp_cc_mdss_pclk1_clk = {
.clkr = {
.enable_reg = 0x2008,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_pclk1_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_pclk1_clk_src.clkr.hw,
@@ -1076,7 +1074,7 @@ static struct clk_branch disp_cc_mdss_rot_clk = {
.clkr = {
.enable_reg = 0x2014,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_rot_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_rot_clk_src.clkr.hw,
@@ -1094,7 +1092,7 @@ static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
.clkr = {
.enable_reg = 0x400c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_rscc_ahb_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_ahb_clk_src.clkr.hw,
@@ -1112,7 +1110,7 @@ static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
.clkr = {
.enable_reg = 0x4008,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_rscc_vsync_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_vsync_clk_src.clkr.hw,
@@ -1130,7 +1128,7 @@ static struct clk_branch disp_cc_mdss_vsync_clk = {
.clkr = {
.enable_reg = 0x2024,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_vsync_clk",
.parent_hws = (const struct clk_hw*[]){
&disp_cc_mdss_vsync_clk_src.clkr.hw,
@@ -1365,8 +1363,8 @@ static int disp_cc_sm8250_probe(struct platform_device *pdev)
/* Enable clock gating for MDP clocks */
regmap_update_bits(regmap, 0x8000, 0x10, 0x10);
- /* DISP_CC_XO_CLK always-on */
- regmap_update_bits(regmap, 0x605c, BIT(0), BIT(0));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x605c); /* DISP_CC_XO_CLK */
ret = qcom_cc_really_probe(pdev, &disp_cc_sm8250_desc, regmap);
@@ -1383,17 +1381,7 @@ static struct platform_driver disp_cc_sm8250_driver = {
},
};
-static int __init disp_cc_sm8250_init(void)
-{
- return platform_driver_register(&disp_cc_sm8250_driver);
-}
-subsys_initcall(disp_cc_sm8250_init);
-
-static void __exit disp_cc_sm8250_exit(void)
-{
- platform_driver_unregister(&disp_cc_sm8250_driver);
-}
-module_exit(disp_cc_sm8250_exit);
+module_platform_driver(disp_cc_sm8250_driver);
MODULE_DESCRIPTION("QTI DISPCC SM8250 Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/dispcc-sm8450.c b/drivers/clk/qcom/dispcc-sm8450.c
index 2c4aecd75186b..92e9c4e7b13dc 100644
--- a/drivers/clk/qcom/dispcc-sm8450.c
+++ b/drivers/clk/qcom/dispcc-sm8450.c
@@ -1787,11 +1787,8 @@ static int disp_cc_sm8450_probe(struct platform_device *pdev)
/* Enable clock gating for MDP clocks */
regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x10, 0x10);
- /*
- * Keep clocks always enabled:
- * disp_cc_xo_clk
- */
- regmap_update_bits(regmap, 0xe05c, BIT(0), BIT(0));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0xe05c); /* DISP_CC_XO_CLK */
ret = qcom_cc_really_probe(pdev, &disp_cc_sm8450_desc, regmap);
if (ret)
@@ -1815,17 +1812,7 @@ static struct platform_driver disp_cc_sm8450_driver = {
},
};
-static int __init disp_cc_sm8450_init(void)
-{
- return platform_driver_register(&disp_cc_sm8450_driver);
-}
-subsys_initcall(disp_cc_sm8450_init);
-
-static void __exit disp_cc_sm8450_exit(void)
-{
- platform_driver_unregister(&disp_cc_sm8450_driver);
-}
-module_exit(disp_cc_sm8450_exit);
+module_platform_driver(disp_cc_sm8450_driver);
MODULE_DESCRIPTION("QTI DISPCC SM8450 Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/dispcc-sm8550.c b/drivers/clk/qcom/dispcc-sm8550.c
index f96d8b81fd9ad..3672c73ac11c6 100644
--- a/drivers/clk/qcom/dispcc-sm8550.c
+++ b/drivers/clk/qcom/dispcc-sm8550.c
@@ -1780,11 +1780,8 @@ static int disp_cc_sm8550_probe(struct platform_device *pdev)
/* Enable clock gating for MDP clocks */
regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x10, 0x10);
- /*
- * Keep clocks always enabled:
- * disp_cc_xo_clk
- */
- regmap_update_bits(regmap, 0xe054, BIT(0), BIT(0));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0xe054); /* DISP_CC_XO_CLK */
ret = qcom_cc_really_probe(pdev, &disp_cc_sm8550_desc, regmap);
if (ret)
@@ -1808,17 +1805,7 @@ static struct platform_driver disp_cc_sm8550_driver = {
},
};
-static int __init disp_cc_sm8550_init(void)
-{
- return platform_driver_register(&disp_cc_sm8550_driver);
-}
-subsys_initcall(disp_cc_sm8550_init);
-
-static void __exit disp_cc_sm8550_exit(void)
-{
- platform_driver_unregister(&disp_cc_sm8550_driver);
-}
-module_exit(disp_cc_sm8550_exit);
+module_platform_driver(disp_cc_sm8550_driver);
MODULE_DESCRIPTION("QTI DISPCC SM8550 Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/dispcc-sm8650.c b/drivers/clk/qcom/dispcc-sm8650.c
index f3b1d9d16baea..9539db0d91145 100644
--- a/drivers/clk/qcom/dispcc-sm8650.c
+++ b/drivers/clk/qcom/dispcc-sm8650.c
@@ -1777,8 +1777,8 @@ static int disp_cc_sm8650_probe(struct platform_device *pdev)
/* Enable clock gating for MDP clocks */
regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x10, 0x10);
- /* Keep clocks always enabled */
- regmap_update_bits(regmap, 0xe054, BIT(0), BIT(0)); /* disp_cc_xo_clk */
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0xe054); /* DISP_CC_XO_CLK */
ret = qcom_cc_really_probe(pdev, &disp_cc_sm8650_desc, regmap);
if (ret)
@@ -1802,17 +1802,7 @@ static struct platform_driver disp_cc_sm8650_driver = {
},
};
-static int __init disp_cc_sm8650_init(void)
-{
- return platform_driver_register(&disp_cc_sm8650_driver);
-}
-subsys_initcall(disp_cc_sm8650_init);
-
-static void __exit disp_cc_sm8650_exit(void)
-{
- platform_driver_unregister(&disp_cc_sm8650_driver);
-}
-module_exit(disp_cc_sm8650_exit);
+module_platform_driver(disp_cc_sm8650_driver);
MODULE_DESCRIPTION("QTI DISPCC SM8650 Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/dispcc-x1e80100.c b/drivers/clk/qcom/dispcc-x1e80100.c
new file mode 100644
index 0000000000000..0b2ee6456762d
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-x1e80100.c
@@ -0,0 +1,1718 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,x1e80100-dispcc.h>
+
+#include "common.h"
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "reset.h"
+#include "gdsc.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_AHB_CLK,
+ DT_SLEEP_CLK,
+
+ DT_DSI0_PHY_PLL_OUT_BYTECLK,
+ DT_DSI0_PHY_PLL_OUT_DSICLK,
+ DT_DSI1_PHY_PLL_OUT_BYTECLK,
+ DT_DSI1_PHY_PLL_OUT_DSICLK,
+
+ DT_DP0_PHY_PLL_LINK_CLK,
+ DT_DP0_PHY_PLL_VCO_DIV_CLK,
+ DT_DP1_PHY_PLL_LINK_CLK,
+ DT_DP1_PHY_PLL_VCO_DIV_CLK,
+ DT_DP2_PHY_PLL_LINK_CLK,
+ DT_DP2_PHY_PLL_VCO_DIV_CLK,
+ DT_DP3_PHY_PLL_LINK_CLK,
+ DT_DP3_PHY_PLL_VCO_DIV_CLK,
+};
+
+#define DISP_CC_MISC_CMD 0xF000
+
+enum {
+ P_BI_TCXO,
+ P_BI_TCXO_AO,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DISP_CC_PLL1_OUT_EVEN,
+ P_DISP_CC_PLL1_OUT_MAIN,
+ P_DP0_PHY_PLL_LINK_CLK,
+ P_DP0_PHY_PLL_VCO_DIV_CLK,
+ P_DP1_PHY_PLL_LINK_CLK,
+ P_DP1_PHY_PLL_VCO_DIV_CLK,
+ P_DP2_PHY_PLL_LINK_CLK,
+ P_DP2_PHY_PLL_VCO_DIV_CLK,
+ P_DP3_PHY_PLL_LINK_CLK,
+ P_DP3_PHY_PLL_VCO_DIV_CLK,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_DSI1_PHY_PLL_OUT_BYTECLK,
+ P_DSI1_PHY_PLL_OUT_DSICLK,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco lucid_ole_vco[] = {
+ { 249600000, 2300000000, 0 },
+};
+
+static const struct alpha_pll_config disp_cc_pll0_config = {
+ .l = 0xd,
+ .alpha = 0x6492,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_reset_lucid_ole_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config disp_cc_pll1_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll disp_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_reset_lucid_ole_ops,
+ },
+ },
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 },
+ { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 },
+ { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
+ { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP0_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP3_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP2_PHY_PLL_VCO_DIV_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 3 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP1_PHY_PLL_LINK_CLK, 2 },
+ { P_DP2_PHY_PLL_LINK_CLK, 3 },
+ { P_DP3_PHY_PLL_LINK_CLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP1_PHY_PLL_LINK_CLK },
+ { .index = DT_DP2_PHY_PLL_LINK_CLK },
+ { .index = DT_DP3_PHY_PLL_LINK_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll0.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_7[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_7[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0),
+ F(75000000, P_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x82ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_byte0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x810c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
+ .cmd_rcgr = 0x8128,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
+ .cmd_rcgr = 0x81c0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
+ .cmd_rcgr = 0x8174,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel0_clk_src = {
+ .cmd_rcgr = 0x8190,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel1_clk_src = {
+ .cmd_rcgr = 0x81a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_aux_clk_src = {
+ .cmd_rcgr = 0x8224,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = {
+ .cmd_rcgr = 0x8208,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_pixel0_clk_src = {
+ .cmd_rcgr = 0x81d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_pixel1_clk_src = {
+ .cmd_rcgr = 0x81f0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_aux_clk_src = {
+ .cmd_rcgr = 0x8288,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = {
+ .cmd_rcgr = 0x823c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_pixel0_clk_src = {
+ .cmd_rcgr = 0x8258,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_pixel1_clk_src = {
+ .cmd_rcgr = 0x8270,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_aux_clk_src = {
+ .cmd_rcgr = 0x82d4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = {
+ .cmd_rcgr = 0x82b8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_pixel0_clk_src = {
+ .cmd_rcgr = 0x82a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x8144,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = {
+ .cmd_rcgr = 0x815c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc1_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(100000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(150000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(172000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(200000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(325000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(375000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(514000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(575000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x80dc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_6,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x80ac,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = {
+ .cmd_rcgr = 0x80c4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x80f4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_sleep_clk_src = {
+ .cmd_rcgr = 0xe05c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_7,
+ .freq_tbl = ftbl_disp_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_sleep_clk_src",
+ .parent_data = disp_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_xo_clk_src = {
+ .cmd_rcgr = 0xe03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_xo_clk_src",
+ .parent_data = disp_cc_parent_data_1_ao,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x8124,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
+ .reg = 0x8140,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx0_link_div_clk_src = {
+ .reg = 0x818c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx1_link_div_clk_src = {
+ .reg = 0x8220,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx2_link_div_clk_src = {
+ .reg = 0x8254,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx3_link_div_clk_src = {
+ .reg = 0x82d0,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch disp_cc_mdss_accu_clk = {
+ .halt_reg = 0xe058,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xe058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_accu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb1_clk = {
+ .halt_reg = 0xa020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x80a8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x8028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x802c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x802c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte1_clk = {
+ .halt_reg = 0x8030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte1_intf_clk = {
+ .halt_reg = 0x8034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_aux_clk = {
+ .halt_reg = 0x8058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_clk = {
+ .halt_reg = 0x8040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_intf_clk = {
+ .halt_reg = 0x8048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel0_clk = {
+ .halt_reg = 0x8050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel1_clk = {
+ .halt_reg = 0x8054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_usb_router_link_intf_clk = {
+ .halt_reg = 0x8044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_aux_clk = {
+ .halt_reg = 0x8074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_clk = {
+ .halt_reg = 0x8064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_intf_clk = {
+ .halt_reg = 0x806c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x806c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_pixel0_clk = {
+ .halt_reg = 0x805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_pixel1_clk = {
+ .halt_reg = 0x8060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_usb_router_link_intf_clk = {
+ .halt_reg = 0x8068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_aux_clk = {
+ .halt_reg = 0x8090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_clk = {
+ .halt_reg = 0x8080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_intf_clk = {
+ .halt_reg = 0x8084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_pixel0_clk = {
+ .halt_reg = 0x8078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_pixel1_clk = {
+ .halt_reg = 0x807c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x807c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_usb_router_link_intf_clk = {
+ .halt_reg = 0x8088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_aux_clk = {
+ .halt_reg = 0x80a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_clk = {
+ .halt_reg = 0x8098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8098,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_intf_clk = {
+ .halt_reg = 0x809c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x809c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_pixel0_clk = {
+ .halt_reg = 0x8094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8094,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x8038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc1_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp1_clk = {
+ .halt_reg = 0xa004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x800c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut1_clk = {
+ .halt_reg = 0xa010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x8018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0xc004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xc004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x8004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk1_clk = {
+ .halt_reg = 0x8008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
+ .halt_reg = 0xc00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
+ .halt_reg = 0xc008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync1_clk = {
+ .halt_reg = 0xa01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x8024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x9000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "mdss_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc mdss_int2_gdsc = {
+ .gdscr = 0xb000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "mdss_int2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *disp_cc_x1e80100_clocks[] = {
+ [DISP_CC_MDSS_ACCU_CLK] = &disp_cc_mdss_accu_clk.clkr,
+ [DISP_CC_MDSS_AHB1_CLK] = &disp_cc_mdss_ahb1_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK] = &disp_cc_mdss_byte1_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK_SRC] = &disp_cc_mdss_byte1_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] = &disp_cc_mdss_byte1_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_INTF_CLK] = &disp_cc_mdss_byte1_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK] = &disp_cc_mdss_dptx0_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK_SRC] = &disp_cc_mdss_dptx0_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK] = &disp_cc_mdss_dptx0_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK_SRC] = &disp_cc_mdss_dptx0_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx0_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_INTF_CLK] = &disp_cc_mdss_dptx0_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK] = &disp_cc_mdss_dptx0_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx0_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK] = &disp_cc_mdss_dptx0_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx0_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx0_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_AUX_CLK] = &disp_cc_mdss_dptx1_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_AUX_CLK_SRC] = &disp_cc_mdss_dptx1_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_CLK] = &disp_cc_mdss_dptx1_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_CLK_SRC] = &disp_cc_mdss_dptx1_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx1_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_INTF_CLK] = &disp_cc_mdss_dptx1_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL0_CLK] = &disp_cc_mdss_dptx1_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx1_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL1_CLK] = &disp_cc_mdss_dptx1_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx1_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_AUX_CLK] = &disp_cc_mdss_dptx2_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_AUX_CLK_SRC] = &disp_cc_mdss_dptx2_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_CLK] = &disp_cc_mdss_dptx2_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_CLK_SRC] = &disp_cc_mdss_dptx2_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx2_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_INTF_CLK] = &disp_cc_mdss_dptx2_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL0_CLK] = &disp_cc_mdss_dptx2_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx2_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL1_CLK] = &disp_cc_mdss_dptx2_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx2_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx2_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_AUX_CLK] = &disp_cc_mdss_dptx3_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_AUX_CLK_SRC] = &disp_cc_mdss_dptx3_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_CLK] = &disp_cc_mdss_dptx3_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_CLK_SRC] = &disp_cc_mdss_dptx3_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx3_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_INTF_CLK] = &disp_cc_mdss_dptx3_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_PIXEL0_CLK] = &disp_cc_mdss_dptx3_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx3_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC1_CLK] = &disp_cc_mdss_esc1_clk.clkr,
+ [DISP_CC_MDSS_ESC1_CLK_SRC] = &disp_cc_mdss_esc1_clk_src.clkr,
+ [DISP_CC_MDSS_MDP1_CLK] = &disp_cc_mdss_mdp1_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT1_CLK] = &disp_cc_mdss_mdp_lut1_clk.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK] = &disp_cc_mdss_pclk1_clk.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK_SRC] = &disp_cc_mdss_pclk1_clk_src.clkr,
+ [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
+ [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC1_CLK] = &disp_cc_mdss_vsync1_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+ [DISP_CC_PLL1] = &disp_cc_pll1.clkr,
+ [DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr,
+ [DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr,
+};
+
+static const struct qcom_reset_map disp_cc_x1e80100_resets[] = {
+ [DISP_CC_MDSS_CORE_BCR] = { 0x8000 },
+ [DISP_CC_MDSS_CORE_INT2_BCR] = { 0xa000 },
+ [DISP_CC_MDSS_RSCC_BCR] = { 0xc000 },
+};
+
+static struct gdsc *disp_cc_x1e80100_gdscs[] = {
+ [MDSS_GDSC] = &mdss_gdsc,
+ [MDSS_INT2_GDSC] = &mdss_int2_gdsc,
+};
+
+static const struct regmap_config disp_cc_x1e80100_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x11008,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc disp_cc_x1e80100_desc = {
+ .config = &disp_cc_x1e80100_regmap_config,
+ .clks = disp_cc_x1e80100_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_x1e80100_clocks),
+ .resets = disp_cc_x1e80100_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_x1e80100_resets),
+ .gdscs = disp_cc_x1e80100_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_x1e80100_gdscs),
+};
+
+static const struct of_device_id disp_cc_x1e80100_match_table[] = {
+ { .compatible = "qcom,x1e80100-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_x1e80100_match_table);
+
+static int disp_cc_x1e80100_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &disp_cc_x1e80100_desc);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ goto err_put_rpm;
+ }
+
+ clk_lucid_evo_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
+
+ /* Enable clock gating for MDP clocks */
+ regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x10, 0x10);
+
+ /* Keep clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0xe074); /* DISP_CC_SLEEP_CLK */
+ qcom_branch_set_clk_en(regmap, 0xe054); /* DISP_CC_XO_CLK */
+
+ ret = qcom_cc_really_probe(pdev, &disp_cc_x1e80100_desc, regmap);
+ if (ret)
+ goto err_put_rpm;
+
+ pm_runtime_put(&pdev->dev);
+
+ return 0;
+
+err_put_rpm:
+ pm_runtime_put_sync(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver disp_cc_x1e80100_driver = {
+ .probe = disp_cc_x1e80100_probe,
+ .driver = {
+ .name = "dispcc-x1e80100",
+ .of_match_table = disp_cc_x1e80100_match_table,
+ },
+};
+
+static int __init disp_cc_x1e80100_init(void)
+{
+ return platform_driver_register(&disp_cc_x1e80100_driver);
+}
+subsys_initcall(disp_cc_x1e80100_init);
+
+static void __exit disp_cc_x1e80100_exit(void)
+{
+ platform_driver_unregister(&disp_cc_x1e80100_driver);
+}
+module_exit(disp_cc_x1e80100_exit);
+
+MODULE_DESCRIPTION("QTI Display Clock Controller X1E80100 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-ipq5018.c b/drivers/clk/qcom/gcc-ipq5018.c
index 4aba47e8700d2..c1732d70e3a23 100644
--- a/drivers/clk/qcom/gcc-ipq5018.c
+++ b/drivers/clk/qcom/gcc-ipq5018.c
@@ -857,6 +857,7 @@ static struct clk_rcg2 lpass_sway_clk_src = {
static const struct freq_tbl ftbl_pcie0_aux_clk_src[] = {
F(2000000, P_XO, 12, 0, 0),
+ { }
};
static struct clk_rcg2 pcie0_aux_clk_src = {
@@ -1099,6 +1100,7 @@ static const struct freq_tbl ftbl_qpic_io_macro_clk_src[] = {
F(100000000, P_GPLL0, 8, 0, 0),
F(200000000, P_GPLL0, 4, 0, 0),
F(320000000, P_GPLL0, 2.5, 0, 0),
+ { }
};
static struct clk_rcg2 qpic_io_macro_clk_src = {
@@ -1194,6 +1196,7 @@ static struct clk_rcg2 ubi0_axi_clk_src = {
static const struct freq_tbl ftbl_ubi0_core_clk_src[] = {
F(850000000, P_UBI32_PLL, 1, 0, 0),
F(1000000000, P_UBI32_PLL, 1, 0, 0),
+ { }
};
static struct clk_rcg2 ubi0_core_clk_src = {
@@ -1754,7 +1757,7 @@ static struct clk_branch gcc_gmac0_sys_clk = {
.halt_check = BRANCH_HALT_DELAY,
.halt_bit = 31,
.clkr = {
- .enable_reg = 0x683190,
+ .enable_reg = 0x68190,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "gcc_gmac0_sys_clk",
@@ -2180,7 +2183,7 @@ static struct clk_branch gcc_pcie1_axi_s_clk = {
};
static struct clk_branch gcc_pcie1_pipe_clk = {
- .halt_reg = 8,
+ .halt_reg = 0x76018,
.halt_check = BRANCH_HALT_DELAY,
.halt_bit = 31,
.clkr = {
@@ -3632,7 +3635,7 @@ static const struct qcom_reset_map gcc_ipq5018_resets[] = {
[GCC_SYSTEM_NOC_BCR] = { 0x26000, 0 },
[GCC_TCSR_BCR] = { 0x28000, 0 },
[GCC_TLMM_BCR] = { 0x34000, 0 },
- [GCC_UBI0_AXI_ARES] = { 0x680},
+ [GCC_UBI0_AXI_ARES] = { 0x68010, 0 },
[GCC_UBI0_AHB_ARES] = { 0x68010, 1 },
[GCC_UBI0_NC_AXI_ARES] = { 0x68010, 2 },
[GCC_UBI0_DBG_ARES] = { 0x68010, 3 },
diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c
index b366912cd6480..7e69de34c310c 100644
--- a/drivers/clk/qcom/gcc-ipq6018.c
+++ b/drivers/clk/qcom/gcc-ipq6018.c
@@ -1554,6 +1554,7 @@ static struct clk_regmap_div nss_ubi0_div_clk_src = {
static const struct freq_tbl ftbl_pcie_aux_clk_src[] = {
F(24000000, P_XO, 1, 0, 0),
+ { }
};
static const struct clk_parent_data gcc_xo_gpll0_core_pi_sleep_clk[] = {
@@ -1734,6 +1735,7 @@ static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = {
F(160000000, P_GPLL0, 5, 0, 0),
F(216000000, P_GPLL6, 5, 0, 0),
F(308570000, P_GPLL6, 3.5, 0, 0),
+ { }
};
static const struct clk_parent_data gcc_xo_gpll0_gpll6_gpll0_div2[] = {
@@ -3522,6 +3524,22 @@ static struct clk_branch gcc_prng_ahb_clk = {
},
};
+static struct clk_branch gcc_qdss_at_clk = {
+ .halt_reg = 0x29024,
+ .clkr = {
+ .enable_reg = 0x29024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qdss_at_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &qdss_at_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_qdss_dap_clk = {
.halt_reg = 0x29084,
.clkr = {
@@ -4361,6 +4379,7 @@ static struct clk_regmap *gcc_ipq6018_clks[] = {
[GCC_SYS_NOC_PCIE0_AXI_CLK] = &gcc_sys_noc_pcie0_axi_clk.clkr,
[GCC_PCIE0_PIPE_CLK] = &gcc_pcie0_pipe_clk.clkr,
[GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QDSS_AT_CLK] = &gcc_qdss_at_clk.clkr,
[GCC_QDSS_DAP_CLK] = &gcc_qdss_dap_clk.clkr,
[GCC_QPIC_AHB_CLK] = &gcc_qpic_ahb_clk.clkr,
[GCC_QPIC_CLK] = &gcc_qpic_clk.clkr,
diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
index b7faf12a511a1..7bc679871f324 100644
--- a/drivers/clk/qcom/gcc-ipq8074.c
+++ b/drivers/clk/qcom/gcc-ipq8074.c
@@ -644,6 +644,7 @@ static struct clk_rcg2 pcie0_axi_clk_src = {
static const struct freq_tbl ftbl_pcie_aux_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
+ { }
};
static const struct clk_parent_data gcc_xo_gpll0_sleep_clk[] = {
@@ -795,6 +796,7 @@ static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(160000000, P_GPLL0, 5, 0, 0),
F(308570000, P_GPLL6, 3.5, 0, 0),
+ { }
};
static const struct clk_parent_data gcc_xo_gpll0_gpll6_gpll0_div2[] = {
diff --git a/drivers/clk/qcom/gcc-ipq9574.c b/drivers/clk/qcom/gcc-ipq9574.c
index e8190108e1aef..0a3f846695b80 100644
--- a/drivers/clk/qcom/gcc-ipq9574.c
+++ b/drivers/clk/qcom/gcc-ipq9574.c
@@ -2082,6 +2082,7 @@ static struct clk_branch gcc_sdcc1_apps_clk = {
static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = {
F(150000000, P_GPLL4, 8, 0, 0),
F(300000000, P_GPLL4, 4, 0, 0),
+ { }
};
static struct clk_rcg2 sdcc1_ice_core_clk_src = {
diff --git a/drivers/clk/qcom/gcc-msm8953.c b/drivers/clk/qcom/gcc-msm8953.c
index 3e5a8cb14d4df..68359534ff257 100644
--- a/drivers/clk/qcom/gcc-msm8953.c
+++ b/drivers/clk/qcom/gcc-msm8953.c
@@ -4171,6 +4171,10 @@ static const struct qcom_reset_map gcc_msm8953_resets[] = {
[GCC_USB3PHY_PHY_BCR] = { 0x3f03c },
[GCC_USB3_PHY_BCR] = { 0x3f034 },
[GCC_USB_30_BCR] = { 0x3f070 },
+ [GCC_MDSS_BCR] = { 0x4d074 },
+ [GCC_CRYPTO_BCR] = { 0x16000 },
+ [GCC_SDCC1_BCR] = { 0x42000 },
+ [GCC_SDCC2_BCR] = { 0x43000 },
};
static const struct regmap_config gcc_msm8953_regmap_config = {
diff --git a/drivers/clk/qcom/gcc-sa8775p.c b/drivers/clk/qcom/gcc-sa8775p.c
index 8171d23c96e64..5bcbfbf52cb9e 100644
--- a/drivers/clk/qcom/gcc-sa8775p.c
+++ b/drivers/clk/qcom/gcc-sa8775p.c
@@ -4662,8 +4662,8 @@ static const struct qcom_reset_map gcc_sa8775p_resets[] = {
[GCC_USB3UNIPHY_PHY_MP0_BCR] = { 0x5c020 },
[GCC_USB3UNIPHY_PHY_MP1_BCR] = { 0x5c024 },
[GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x76000 },
- [GCC_VIDEO_AXI0_CLK_ARES] = { 0x34014, 2 },
- [GCC_VIDEO_AXI1_CLK_ARES] = { 0x3401c, 2 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { .reg = 0x34014, .bit = 2, .udelay = 400 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { .reg = 0x3401c, .bit = 2, .udelay = 400 },
[GCC_VIDEO_BCR] = { 0x34000 },
};
@@ -4742,21 +4742,16 @@ static int gcc_sa8775p_probe(struct platform_device *pdev)
if (ret)
return ret;
- /*
- * Keep the clocks always-ON
- * GCC_CAMERA_AHB_CLK, GCC_CAMERA_XO_CLK, GCC_DISP1_AHB_CLK,
- * GCC_DISP1_XO_CLK, GCC_DISP_AHB_CLK, GCC_DISP_XO_CLK,
- * GCC_GPU_CFG_AHB_CLK, GCC_VIDEO_AHB_CLK, GCC_VIDEO_XO_CLK.
- */
- regmap_update_bits(regmap, 0x32004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x32020, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0xc7004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0xc7018, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x33004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x33018, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x7d004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x34004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x34024, BIT(0), BIT(0));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x32004); /* GCC_CAMERA_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x32020); /* GCC_CAMERA_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0xc7004); /* GCC_DISP1_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0xc7018); /* GCC_DISP1_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x33004); /* GCC_DISP_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x33018); /* GCC_DISP_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x7d004); /* GCC_GPU_CFG_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x34004); /* GCC_VIDEO_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x34024); /* GCC_VIDEO_XO_CLK */
return qcom_cc_really_probe(pdev, &gcc_sa8775p_desc, regmap);
}
diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c
index a3406aadbd170..6a5f785c0ced0 100644
--- a/drivers/clk/qcom/gcc-sc7180.c
+++ b/drivers/clk/qcom/gcc-sc7180.c
@@ -2443,19 +2443,15 @@ static int gcc_sc7180_probe(struct platform_device *pdev)
regmap_update_bits(regmap, 0x4d110, 0x3, 0x3);
regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
- /*
- * Keep the clocks always-ON
- * GCC_CPUSS_GNOC_CLK, GCC_VIDEO_AHB_CLK, GCC_CAMERA_AHB_CLK,
- * GCC_DISP_AHB_CLK, GCC_GPU_CFG_AHB_CLK
- */
- regmap_update_bits(regmap, 0x48004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x0b004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x0b008, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x0b00c, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x0b02c, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x0b028, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x0b030, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x48004); /* GCC_CPUSS_GNOC_CLK */
+ qcom_branch_set_clk_en(regmap, 0x0b004); /* GCC_VIDEO_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x0b008); /* GCC_CAMERA_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x0b00c); /* GCC_DISP_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x0b02c); /* GCC_CAMERA_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x0b028); /* GCC_VIDEO_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x0b030); /* GCC_DISP_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x71004); /* GCC_GPU_CFG_AHB_CLK */
ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
ARRAY_SIZE(gcc_dfs_clocks));
diff --git a/drivers/clk/qcom/gcc-sc7280.c b/drivers/clk/qcom/gcc-sc7280.c
index 2b661df5de266..f45a8318900c5 100644
--- a/drivers/clk/qcom/gcc-sc7280.c
+++ b/drivers/clk/qcom/gcc-sc7280.c
@@ -3453,18 +3453,14 @@ static int gcc_sc7280_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- /*
- * Keep the clocks always-ON
- * GCC_CAMERA_AHB_CLK/XO_CLK, GCC_DISP_AHB_CLK/XO_CLK
- * GCC_VIDEO_AHB_CLK/XO_CLK, GCC_GPU_CFG_AHB_CLK
- */
- regmap_update_bits(regmap, 0x26004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x26028, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x27004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x2701C, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x28004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x28014, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x26004);/* GCC_CAMERA_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x26028);/* GCC_CAMERA_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x27004);/* GCC_DISP_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x2701c);/* GCC_DISP_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x28004);/* GCC_VIDEO_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x28014);/* GCC_VIDEO_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x71004);/* GCC_GPU_CFG_AHB_CLK */
regmap_update_bits(regmap, 0x7100C, BIT(13), BIT(13));
ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
diff --git a/drivers/clk/qcom/gcc-sc8180x.c b/drivers/clk/qcom/gcc-sc8180x.c
index ae21473815596..5261bfc92b3dc 100644
--- a/drivers/clk/qcom/gcc-sc8180x.c
+++ b/drivers/clk/qcom/gcc-sc8180x.c
@@ -3347,6 +3347,19 @@ static struct clk_branch gcc_ufs_card_2_unipro_core_clk = {
},
};
+static struct clk_branch gcc_ufs_card_clkref_en = {
+ .halt_reg = 0x8c004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_card_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_ufs_card_ahb_clk = {
.halt_reg = 0x75014,
.halt_check = BRANCH_HALT,
@@ -3561,6 +3574,19 @@ static struct clk_branch gcc_ufs_card_unipro_core_hw_ctl_clk = {
},
};
+static struct clk_branch gcc_ufs_mem_clkref_en = {
+ .halt_reg = 0x8c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_mem_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_ufs_phy_ahb_clk = {
.halt_reg = 0x77014,
.halt_check = BRANCH_HALT,
@@ -4413,6 +4439,7 @@ static struct clk_regmap *gcc_sc8180x_clocks[] = {
[GCC_UFS_CARD_2_TX_SYMBOL_0_CLK] = &gcc_ufs_card_2_tx_symbol_0_clk.clkr,
[GCC_UFS_CARD_2_UNIPRO_CORE_CLK] = &gcc_ufs_card_2_unipro_core_clk.clkr,
[GCC_UFS_CARD_2_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_card_2_unipro_core_clk_src.clkr,
+ [GCC_UFS_CARD_CLKREF_EN] = &gcc_ufs_card_clkref_en.clkr,
[GCC_UFS_CARD_AHB_CLK] = &gcc_ufs_card_ahb_clk.clkr,
[GCC_UFS_CARD_AXI_CLK] = &gcc_ufs_card_axi_clk.clkr,
[GCC_UFS_CARD_AXI_CLK_SRC] = &gcc_ufs_card_axi_clk_src.clkr,
@@ -4429,6 +4456,7 @@ static struct clk_regmap *gcc_sc8180x_clocks[] = {
[GCC_UFS_CARD_UNIPRO_CORE_CLK] = &gcc_ufs_card_unipro_core_clk.clkr,
[GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_card_unipro_core_clk_src.clkr,
[GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK] = &gcc_ufs_card_unipro_core_hw_ctl_clk.clkr,
+ [GCC_UFS_MEM_CLKREF_EN] = &gcc_ufs_mem_clkref_en.clkr,
[GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
[GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
[GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
@@ -4528,9 +4556,9 @@ static const struct qcom_reset_map gcc_sc8180x_resets[] = {
[GCC_USB30_PRIM_BCR] = { 0xf000 },
[GCC_USB30_SEC_BCR] = { 0x10000 },
[GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
- [GCC_VIDEO_AXIC_CLK_BCR] = { 0xb02c, 2 },
- [GCC_VIDEO_AXI0_CLK_BCR] = { 0xb024, 2 },
- [GCC_VIDEO_AXI1_CLK_BCR] = { 0xb028, 2 },
+ [GCC_VIDEO_AXIC_CLK_BCR] = { .reg = 0xb02c, .bit = 2, .udelay = 150 },
+ [GCC_VIDEO_AXI0_CLK_BCR] = { .reg = 0xb024, .bit = 2, .udelay = 150 },
+ [GCC_VIDEO_AXI1_CLK_BCR] = { .reg = 0xb028, .bit = 2, .udelay = 150 },
};
static struct gdsc *gcc_sc8180x_gdscs[] = {
@@ -4579,23 +4607,17 @@ static int gcc_sc8180x_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- /*
- * Enable the following always-on clocks:
- * GCC_VIDEO_AHB_CLK, GCC_CAMERA_AHB_CLK, GCC_DISP_AHB_CLK,
- * GCC_VIDEO_XO_CLK, GCC_CAMERA_XO_CLK, GCC_DISP_XO_CLK,
- * GCC_CPUSS_GNOC_CLK, GCC_CPUSS_DVM_BUS_CLK, GCC_NPU_CFG_AHB_CLK and
- * GCC_GPU_CFG_AHB_CLK
- */
- regmap_update_bits(regmap, 0xb004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0xb008, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0xb00c, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0xb040, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0xb044, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0xb048, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x48004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x48190, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x4d004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0xb004); /* GCC_VIDEO_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0xb008); /* GCC_CAMERA_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0xb00c); /* GCC_DISP_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0xb040); /* GCC_VIDEO_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0xb044); /* GCC_CAMERA_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0xb048); /* GCC_DISP_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x48004); /* GCC_CPUSS_GNOC_CLK */
+ qcom_branch_set_clk_en(regmap, 0x48190); /* GCC_CPUSS_DVM_BUS_CLK */
+ qcom_branch_set_clk_en(regmap, 0x4d004); /* GCC_NPU_CFG_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x71004); /* GCC_GPU_CFG_AHB_CLK */
/* Disable the GPLL0 active input to NPU and GPU via MISC registers */
regmap_update_bits(regmap, 0x4d110, 0x3, 0x3);
diff --git a/drivers/clk/qcom/gcc-sc8280xp.c b/drivers/clk/qcom/gcc-sc8280xp.c
index bfb77931e8686..082d7b5504ebb 100644
--- a/drivers/clk/qcom/gcc-sc8280xp.c
+++ b/drivers/clk/qcom/gcc-sc8280xp.c
@@ -7448,8 +7448,8 @@ static const struct qcom_reset_map gcc_sc8280xp_resets[] = {
[GCC_USB4PHY_PHY_PRIM_BCR] = { 0x4a004 },
[GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
[GCC_VIDEO_BCR] = { 0x28000 },
- [GCC_VIDEO_AXI0_CLK_ARES] = { 0x28010, 2 },
- [GCC_VIDEO_AXI1_CLK_ARES] = { 0x28018, 2 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { .reg = 0x28010, .bit = 2, .udelay = 400 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { .reg = 0x28018, .bit = 2, .udelay = 400 },
};
static struct gdsc *gcc_sc8280xp_gdscs[] = {
@@ -7543,21 +7543,16 @@ static int gcc_sc8280xp_probe(struct platform_device *pdev)
goto err_put_rpm;
}
- /*
- * Keep the clocks always-ON
- * GCC_CAMERA_AHB_CLK, GCC_CAMERA_XO_CLK, GCC_DISP_AHB_CLK,
- * GCC_DISP_XO_CLK, GCC_GPU_CFG_AHB_CLK, GCC_VIDEO_AHB_CLK,
- * GCC_VIDEO_XO_CLK, GCC_DISP1_AHB_CLK, GCC_DISP1_XO_CLK
- */
- regmap_update_bits(regmap, 0x26004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x26020, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x27004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x27028, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x28004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x28028, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0xbb004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0xbb028, BIT(0), BIT(0));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x26004); /* GCC_CAMERA_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x26020); /* GCC_CAMERA_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x27004); /* GCC_DISP_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x27028); /* GCC_DISP_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x71004); /* GCC_GPU_CFG_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x28004); /* GCC_VIDEO_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x28028); /* GCC_VIDEO_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0xbb004); /* GCC_DISP1_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0xbb028); /* GCC_DISP1_XO_CLK */
ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks, ARRAY_SIZE(gcc_dfs_clocks));
if (ret)
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 725cd52d2398e..ea4c3bf4fb9bf 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -4037,3 +4037,4 @@ module_exit(gcc_sdm845_exit);
MODULE_DESCRIPTION("QTI GCC SDM845 Driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:gcc-sdm845");
+MODULE_SOFTDEP("pre: rpmhpd");
diff --git a/drivers/clk/qcom/gcc-sdx55.c b/drivers/clk/qcom/gcc-sdx55.c
index d5e17122698cd..26279b8d321a3 100644
--- a/drivers/clk/qcom/gcc-sdx55.c
+++ b/drivers/clk/qcom/gcc-sdx55.c
@@ -1611,14 +1611,10 @@ static int gcc_sdx55_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- /*
- * Keep the clocks always-ON as they are critical to the functioning
- * of the system:
- * GCC_SYS_NOC_CPUSS_AHB_CLK, GCC_CPUSS_AHB_CLK, GCC_CPUSS_GNOC_CLK
- */
- regmap_update_bits(regmap, 0x6d008, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x6d008, BIT(21), BIT(21));
- regmap_update_bits(regmap, 0x6d008, BIT(22), BIT(22));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x6d008); /* GCC_SYS_NOC_CPUSS_AHB_CLK */
+ regmap_update_bits(regmap, 0x6d008, BIT(21), BIT(21)); /* GCC_CPUSS_AHB_CLK */
+ regmap_update_bits(regmap, 0x6d008, BIT(22), BIT(22)); /* GCC_CPUSS_GNOC_CLK */
return qcom_cc_really_probe(pdev, &gcc_sdx55_desc, regmap);
}
diff --git a/drivers/clk/qcom/gcc-sdx65.c b/drivers/clk/qcom/gcc-sdx65.c
index ffddbed5a6dba..8fde6463574b9 100644
--- a/drivers/clk/qcom/gcc-sdx65.c
+++ b/drivers/clk/qcom/gcc-sdx65.c
@@ -1574,14 +1574,11 @@ static int gcc_sdx65_probe(struct platform_device *pdev)
regmap = qcom_cc_map(pdev, &gcc_sdx65_desc);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- /*
- * Keep the clocks always-ON as they are critical to the functioning
- * of the system:
- * GCC_SYS_NOC_CPUSS_AHB_CLK, GCC_CPUSS_AHB_CLK, GCC_CPUSS_GNOC_CLK
- */
- regmap_update_bits(regmap, 0x6d008, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x6d008, BIT(21), BIT(21));
- regmap_update_bits(regmap, 0x6d008, BIT(22), BIT(22));
+
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x6d008); /* GCC_SYS_NOC_CPUSS_AHB_CLK */
+ regmap_update_bits(regmap, 0x6d008, BIT(21), BIT(21)); /* GCC_CPUSS_AHB_CLK */
+ regmap_update_bits(regmap, 0x6d008, BIT(22), BIT(22)); /* GCC_CPUSS_GNOC_CLK */
return qcom_cc_really_probe(pdev, &gcc_sdx65_desc, regmap);
}
diff --git a/drivers/clk/qcom/gcc-sdx75.c b/drivers/clk/qcom/gcc-sdx75.c
index 573af17bd24ca..c51338f08ef16 100644
--- a/drivers/clk/qcom/gcc-sdx75.c
+++ b/drivers/clk/qcom/gcc-sdx75.c
@@ -2936,13 +2936,9 @@ static int gcc_sdx75_probe(struct platform_device *pdev)
if (ret)
return ret;
- /*
- * Keep clocks always enabled:
- * gcc_ahb_pcie_link_clk
- * gcc_xo_pcie_link_clk
- */
- regmap_update_bits(regmap, 0x3e004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x3e008, BIT(0), BIT(0));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x3e004); /* GCC_AHB_PCIE_LINK_CLK */
+ qcom_branch_set_clk_en(regmap, 0x3e008); /* GCC_XO_PCIE_LINK_CLK */
return qcom_cc_really_probe(pdev, &gcc_sdx75_desc, regmap);
}
diff --git a/drivers/clk/qcom/gcc-sm4450.c b/drivers/clk/qcom/gcc-sm4450.c
index 31abe2775fc83..062e55e981569 100644
--- a/drivers/clk/qcom/gcc-sm4450.c
+++ b/drivers/clk/qcom/gcc-sm4450.c
@@ -2791,8 +2791,8 @@ static const struct qcom_reset_map gcc_sm4450_resets[] = {
[GCC_VENUS_BCR] = { 0xb601c },
[GCC_VIDEO_BCR] = { 0x42000 },
[GCC_VIDEO_VENUS_BCR] = { 0xb6000 },
- [GCC_VENUS_CTL_AXI_CLK_ARES] = { 0x4201c, 2 },
- [GCC_VIDEO_VENUS_CTL_CLK_ARES] = { 0xb6038, 2 },
+ [GCC_VENUS_CTL_AXI_CLK_ARES] = { .reg = 0x4201c, .bit = 2, .udelay = 400 },
+ [GCC_VIDEO_VENUS_CTL_CLK_ARES] = { .reg = 0xb6038, .bit = 2, .udelay = 400 },
};
static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
@@ -2849,25 +2849,15 @@ static int gcc_sm4450_probe(struct platform_device *pdev)
qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
- /*
- * Keep clocks always enabled:
- * gcc_camera_ahb_clk
- * gcc_camera_sleep_clk
- * gcc_camera_xo_clk
- * gcc_disp_ahb_clk
- * gcc_disp_xo_clk
- * gcc_gpu_cfg_ahb_clk
- * gcc_video_ahb_clk
- * gcc_video_xo_clk
- */
- regmap_update_bits(regmap, 0x36004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x36018, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x3601c, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x37004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x37014, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x81004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x42004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x42018, BIT(0), BIT(0));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x36004); /* GCC_CAMERA_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x36018); /* GCC_CAMERA_SLEEP_CLK */
+ qcom_branch_set_clk_en(regmap, 0x3601c); /* GCC_CAMERA_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x37004); /* GCC_DISP_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x37014); /* GCC_DISP_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x81004); /* GCC_GPU_CFG_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x42004); /* GCC_VIDEO_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x42018); /* GCC_VIDEO_XO_CLK */
regmap_update_bits(regmap, 0x4201c, BIT(21), BIT(21));
diff --git a/drivers/clk/qcom/gcc-sm6375.c b/drivers/clk/qcom/gcc-sm6375.c
index 3dd15d765b22e..84639d5b89bfb 100644
--- a/drivers/clk/qcom/gcc-sm6375.c
+++ b/drivers/clk/qcom/gcc-sm6375.c
@@ -3882,13 +3882,10 @@ static int gcc_sm6375_probe(struct platform_device *pdev)
if (ret)
return ret;
- /*
- * Keep the following clocks always on:
- * GCC_CAMERA_XO_CLK, GCC_CPUSS_GNOC_CLK, GCC_DISP_XO_CLK
- */
- regmap_update_bits(regmap, 0x17028, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x2b004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x1702c, BIT(0), BIT(0));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x17028); /* GCC_CAMERA_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x2b004); /* GCC_CPUSS_GNOC_CLK */
+ qcom_branch_set_clk_en(regmap, 0x1702c); /* GCC_DISP_XO_CLK */
clk_lucid_pll_configure(&gpll10, regmap, &gpll10_config);
clk_lucid_pll_configure(&gpll11, regmap, &gpll11_config);
diff --git a/drivers/clk/qcom/gcc-sm7150.c b/drivers/clk/qcom/gcc-sm7150.c
index d9983bb274756..44b49f7cd1783 100644
--- a/drivers/clk/qcom/gcc-sm7150.c
+++ b/drivers/clk/qcom/gcc-sm7150.c
@@ -2918,7 +2918,7 @@ static const struct qcom_reset_map gcc_sm7150_resets[] = {
[GCC_USB3_PHY_PRIM_BCR] = { 0x50000 },
[GCC_USB3_PHY_SEC_BCR] = { 0x5000c },
[GCC_QUSB2PHY_PRIM_BCR] = { 0x26000 },
- [GCC_VIDEO_AXI_CLK_BCR] = { 0xb01c, 2 },
+ [GCC_VIDEO_AXI_CLK_BCR] = { .reg = 0xb01c, .bit = 2, .udelay = 150 },
};
static const struct clk_rcg_dfs_data gcc_sm7150_dfs_desc[] = {
@@ -3002,20 +3002,15 @@ static int gcc_sm7150_probe(struct platform_device *pdev)
regmap_update_bits(regmap, 0x4d110, 0x3, 0x3);
regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
- /*
- * Keep the critical clocks always-ON
- * GCC_CPUSS_GNOC_CLK, GCC_VIDEO_AHB_CLK, GCC_CAMERA_AHB_CLK,
- * GCC_DISP_AHB_CLK, GCC_CAMERA_XO_CLK, GCC_VIDEO_XO_CLK,
- * GCC_DISP_XO_CLK, GCC_GPU_CFG_AHB_CLK
- */
- regmap_update_bits(regmap, 0x48004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x0b004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x0b008, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x0b00c, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x0b02c, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x0b028, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x0b030, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x48004); /* GCC_CPUSS_GNOC_CLK */
+ qcom_branch_set_clk_en(regmap, 0x0b004); /* GCC_VIDEO_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x0b008); /* GCC_CAMERA_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x0b00c); /* GCC_DISP_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x0b02c); /* GCC_CAMERA_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x0b028); /* GCC_VIDEO_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x0b030); /* GCC_DISP_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x71004); /* GCC_GPU_CFG_AHB_CLK */
ret = qcom_cc_register_rcg_dfs(regmap, gcc_sm7150_dfs_desc,
ARRAY_SIZE(gcc_sm7150_dfs_desc));
diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c
index 05d115c52dfeb..a47ef9dfa8080 100644
--- a/drivers/clk/qcom/gcc-sm8150.c
+++ b/drivers/clk/qcom/gcc-sm8150.c
@@ -453,19 +453,29 @@ static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
{ }
};
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
.cmd_rcgr = 0x17148,
.mnd_width = 16,
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s0_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
@@ -474,13 +484,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s1_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
@@ -489,13 +501,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s2_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
@@ -504,13 +518,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s3_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
@@ -519,13 +535,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s4_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
@@ -534,13 +552,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s5_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
@@ -549,13 +569,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s6_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
@@ -564,13 +586,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s7_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
@@ -579,13 +603,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s0_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
@@ -594,13 +620,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s1_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
@@ -609,13 +637,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s2_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
@@ -624,13 +654,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s3_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
@@ -639,13 +671,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s4_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
@@ -654,13 +688,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s5_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s0_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
@@ -669,13 +705,15 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap2_s0_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap2_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s1_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
@@ -684,13 +722,15 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap2_s1_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap2_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s2_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
@@ -699,13 +739,15 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap2_s2_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap2_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s3_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
@@ -714,13 +756,15 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap2_s3_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap2_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s4_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
@@ -729,13 +773,15 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap2_s4_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap2_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s5_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
@@ -744,13 +790,7 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap2_s5_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap2_s5_clk_src_init,
};
static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
@@ -3738,6 +3778,9 @@ static const struct qcom_reset_map gcc_sm8150_resets[] = {
[GCC_USB30_PRIM_BCR] = { 0xf000 },
[GCC_USB30_SEC_BCR] = { 0x10000 },
[GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
+ [GCC_VIDEO_AXIC_CLK_BCR] = { 0xb02c, 2 },
+ [GCC_VIDEO_AXI0_CLK_BCR] = { 0xb024, 2 },
+ [GCC_VIDEO_AXI1_CLK_BCR] = { 0xb028, 2 },
};
static struct gdsc *gcc_sm8150_gdscs[] = {
@@ -3750,6 +3793,29 @@ static struct gdsc *gcc_sm8150_gdscs[] = {
[USB30_SEC_GDSC] = &usb30_sec_gdsc,
};
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s5_clk_src),
+};
+
static const struct regmap_config gcc_sm8150_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -3777,6 +3843,7 @@ MODULE_DEVICE_TABLE(of, gcc_sm8150_match_table);
static int gcc_sm8150_probe(struct platform_device *pdev)
{
struct regmap *regmap;
+ int ret;
regmap = qcom_cc_map(pdev, &gcc_sm8150_desc);
if (IS_ERR(regmap))
@@ -3786,6 +3853,11 @@ static int gcc_sm8150_probe(struct platform_device *pdev)
regmap_update_bits(regmap, 0x4d110, 0x3, 0x3);
regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ dev_err_probe(&pdev->dev, ret, "Failed to register with DFS!\n");
+
return qcom_cc_really_probe(pdev, &gcc_sm8150_desc, regmap);
}
diff --git a/drivers/clk/qcom/gcc-sm8250.c b/drivers/clk/qcom/gcc-sm8250.c
index c6c5261264f11..e630bfa2d0c17 100644
--- a/drivers/clk/qcom/gcc-sm8250.c
+++ b/drivers/clk/qcom/gcc-sm8250.c
@@ -3576,8 +3576,8 @@ static const struct qcom_reset_map gcc_sm8250_resets[] = {
[GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 },
[GCC_USB3PHY_PHY_SEC_BCR] = { 0x50010 },
[GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
- [GCC_VIDEO_AXI0_CLK_ARES] = { 0xb024, 2 },
- [GCC_VIDEO_AXI1_CLK_ARES] = { 0xb028, 2 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { 0xb024, .bit = 2, .udelay = 150 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { 0xb028, .bit = 2, .udelay = 150 },
};
static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
@@ -3643,18 +3643,13 @@ static int gcc_sm8250_probe(struct platform_device *pdev)
regmap_update_bits(regmap, 0x4d110, 0x3, 0x3);
regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
- /*
- * Keep the clocks always-ON
- * GCC_VIDEO_AHB_CLK, GCC_CAMERA_AHB_CLK, GCC_DISP_AHB_CLK,
- * GCC_CPUSS_DVM_BUS_CLK, GCC_GPU_CFG_AHB_CLK,
- * GCC_SYS_NOC_CPUSS_AHB_CLK
- */
- regmap_update_bits(regmap, 0x0b004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x0b008, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x0b00c, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x4818c, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x52000, BIT(0), BIT(0));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x0b004); /* GCC_VIDEO_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x0b008); /* GCC_CAMERA_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x0b00c); /* GCC_DISP_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x4818c); /* GCC_CPUSS_DVM_BUS_CLK */
+ qcom_branch_set_clk_en(regmap, 0x71004); /* GCC_GPU_CFG_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x52000); /* GCC_SYS_NOC_CPUSS_AHB_CLK */
ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
ARRAY_SIZE(gcc_dfs_clocks));
diff --git a/drivers/clk/qcom/gcc-sm8350.c b/drivers/clk/qcom/gcc-sm8350.c
index 1385a98eb3bbe..fc0402e8a2a75 100644
--- a/drivers/clk/qcom/gcc-sm8350.c
+++ b/drivers/clk/qcom/gcc-sm8350.c
@@ -3743,8 +3743,8 @@ static const struct qcom_reset_map gcc_sm8350_resets[] = {
[GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 },
[GCC_USB3PHY_PHY_SEC_BCR] = { 0x50010 },
[GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
- [GCC_VIDEO_AXI0_CLK_ARES] = { 0x28010, 2 },
- [GCC_VIDEO_AXI1_CLK_ARES] = { 0x28018, 2 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { .reg = 0x28010, .bit = 2, .udelay = 400 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { .reg = 0x28018, .bit = 2, .udelay = 400 },
[GCC_VIDEO_BCR] = { 0x28000 },
};
@@ -3806,18 +3806,14 @@ static int gcc_sm8350_probe(struct platform_device *pdev)
return PTR_ERR(regmap);
}
- /*
- * Keep the critical clock always-On
- * GCC_CAMERA_AHB_CLK, GCC_CAMERA_XO_CLK, GCC_DISP_AHB_CLK, GCC_DISP_XO_CLK,
- * GCC_GPU_CFG_AHB_CLK, GCC_VIDEO_AHB_CLK, GCC_VIDEO_XO_CLK
- */
- regmap_update_bits(regmap, 0x26004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x26018, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x27004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x2701c, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x28004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x28020, BIT(0), BIT(0));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x26004); /* GCC_CAMERA_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x26018); /* GCC_CAMERA_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x27004); /* GCC_DISP_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x2701c); /* GCC_DISP_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x71004); /* GCC_GPU_CFG_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x28004); /* GCC_VIDEO_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x28020); /* GCC_VIDEO_XO_CLK */
ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks, ARRAY_SIZE(gcc_dfs_clocks));
if (ret)
diff --git a/drivers/clk/qcom/gcc-sm8450.c b/drivers/clk/qcom/gcc-sm8450.c
index 5635429825516..e86c58bc5e48b 100644
--- a/drivers/clk/qcom/gcc-sm8450.c
+++ b/drivers/clk/qcom/gcc-sm8450.c
@@ -3202,8 +3202,8 @@ static const struct qcom_reset_map gcc_sm8450_resets[] = {
[GCC_USB3PHY_PHY_PRIM_BCR] = { 0x60004 },
[GCC_USB3PHY_PHY_SEC_BCR] = { 0x60010 },
[GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x7a000 },
- [GCC_VIDEO_AXI0_CLK_ARES] = { 0x42018, 2 },
- [GCC_VIDEO_AXI1_CLK_ARES] = { 0x42020, 2 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { .reg = 0x42018, .bit = 2, .udelay = 1000 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { .reg = 0x42020, .bit = 2, .udelay = 1000 },
[GCC_VIDEO_BCR] = { 0x42000 },
};
@@ -3280,19 +3280,14 @@ static int gcc_sm8450_probe(struct platform_device *pdev)
/* FORCE_MEM_CORE_ON for ufs phy ice core clocks */
regmap_update_bits(regmap, gcc_ufs_phy_ice_core_clk.halt_reg, BIT(14), BIT(14));
- /*
- * Keep the critical clock always-On
- * gcc_camera_ahb_clk, gcc_camera_xo_clk, gcc_disp_ahb_clk,
- * gcc_disp_xo_clk, gcc_gpu_cfg_ahb_clk, gcc_video_ahb_clk,
- * gcc_video_xo_clk
- */
- regmap_update_bits(regmap, 0x36004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x36020, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x37004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x3701c, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x81004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x42004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x42028, BIT(0), BIT(0));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x36004); /* GCC_CAMERA_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x36020); /* GCC_CAMERA_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x37004); /* GCC_DISP_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x3701c); /* GCC_DISP_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x81004); /* GCC_GPU_CFG_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x42004); /* GCC_VIDEO_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x42028); /* GCC_VIDEO_XO_CLK */
return qcom_cc_really_probe(pdev, &gcc_sm8450_desc, regmap);
}
diff --git a/drivers/clk/qcom/gcc-sm8550.c b/drivers/clk/qcom/gcc-sm8550.c
index b883dffe5f7aa..26d7349e76424 100644
--- a/drivers/clk/qcom/gcc-sm8550.c
+++ b/drivers/clk/qcom/gcc-sm8550.c
@@ -3276,8 +3276,8 @@ static const struct qcom_reset_map gcc_sm8550_resets[] = {
[GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 },
[GCC_USB3PHY_PHY_SEC_BCR] = { 0x50010 },
[GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
- [GCC_VIDEO_AXI0_CLK_ARES] = { 0x32018, 2 },
- [GCC_VIDEO_AXI1_CLK_ARES] = { 0x32024, 2 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { .reg = 0x32018, .bit = 2, .udelay = 1000 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { .reg = 0x32024, .bit = 2, .udelay = 1000 },
[GCC_VIDEO_BCR] = { 0x32000 },
};
@@ -3352,19 +3352,14 @@ static int gcc_sm8550_probe(struct platform_device *pdev)
/* FORCE_MEM_CORE_ON for ufs phy ice core clocks */
regmap_update_bits(regmap, gcc_ufs_phy_ice_core_clk.halt_reg, BIT(14), BIT(14));
- /*
- * Keep the critical clock always-On
- * gcc_camera_ahb_clk, gcc_camera_xo_clk, gcc_disp_ahb_clk,
- * gcc_disp_xo_clk, gcc_gpu_cfg_ahb_clk, gcc_video_ahb_clk,
- * gcc_video_xo_clk
- */
- regmap_update_bits(regmap, 0x26004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x26028, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x27004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x27018, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x32004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x32030, BIT(0), BIT(0));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x26004); /* GCC_CAMERA_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x26028); /* GCC_CAMERA_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x27004); /* GCC_DISP_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x27018); /* GCC_DISP_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x71004); /* GCC_GPU_CFG_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x32004); /* GCC_VIDEO_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x32030); /* GCC_VIDEO_XO_CLK */
/* Clear GDSC_SLEEP_ENA_VOTE to stop votes being auto-removed in sleep. */
regmap_write(regmap, 0x52024, 0x0);
diff --git a/drivers/clk/qcom/gcc-sm8650.c b/drivers/clk/qcom/gcc-sm8650.c
index 9174dd82308c2..9d1cbdf860fb3 100644
--- a/drivers/clk/qcom/gcc-sm8650.c
+++ b/drivers/clk/qcom/gcc-sm8650.c
@@ -3734,8 +3734,8 @@ static const struct qcom_reset_map gcc_sm8650_resets[] = {
[GCC_USB3_PHY_SEC_BCR] = { 0x5000c },
[GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 },
[GCC_USB3PHY_PHY_SEC_BCR] = { 0x50010 },
- [GCC_VIDEO_AXI0_CLK_ARES] = { 0x32018, 2 },
- [GCC_VIDEO_AXI1_CLK_ARES] = { 0x32024, 2 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { .reg = 0x32018, .bit = 2, .udelay = 1000 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { .reg = 0x32024, .bit = 2, .udelay = 1000 },
[GCC_VIDEO_BCR] = { 0x32000 },
};
@@ -3808,14 +3808,14 @@ static int gcc_sm8650_probe(struct platform_device *pdev)
if (ret)
return ret;
- /* Keep the critical clock always-On */
- regmap_update_bits(regmap, 0x26004, BIT(0), BIT(0)); /* gcc_camera_ahb_clk */
- regmap_update_bits(regmap, 0x26028, BIT(0), BIT(0)); /* gcc_camera_xo_clk */
- regmap_update_bits(regmap, 0x27004, BIT(0), BIT(0)); /* gcc_disp_ahb_clk */
- regmap_update_bits(regmap, 0x27018, BIT(0), BIT(0)); /* gcc_disp_xo_clk */
- regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0)); /* gcc_gpu_cfg_ahb_clk */
- regmap_update_bits(regmap, 0x32004, BIT(0), BIT(0)); /* gcc_video_ahb_clk */
- regmap_update_bits(regmap, 0x32030, BIT(0), BIT(0)); /* gcc_video_xo_clk */
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x26004); /* GCC_CAMERA_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x26028); /* GCC_CAMERA_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x27004); /* GCC_DISP_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x27018); /* GCC_DISP_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x71004); /* GCC_GPU_CFG_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x32004); /* GCC_VIDEO_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x32030); /* GCC_VIDEO_XO_CLK */
qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c
index d7182d6e97837..1404017be9180 100644
--- a/drivers/clk/qcom/gcc-x1e80100.c
+++ b/drivers/clk/qcom/gcc-x1e80100.c
@@ -6769,14 +6769,14 @@ static int gcc_x1e80100_probe(struct platform_device *pdev)
if (ret)
return ret;
- /* Keep the critical clock always-On */
- regmap_update_bits(regmap, 0x26004, BIT(0), BIT(0)); /* gcc_camera_ahb_clk */
- regmap_update_bits(regmap, 0x26028, BIT(0), BIT(0)); /* gcc_camera_xo_clk */
- regmap_update_bits(regmap, 0x27004, BIT(0), BIT(0)); /* gcc_disp_ahb_clk */
- regmap_update_bits(regmap, 0x27018, BIT(0), BIT(0)); /* gcc_disp_xo_clk */
- regmap_update_bits(regmap, 0x32004, BIT(0), BIT(0)); /* gcc_video_ahb_clk */
- regmap_update_bits(regmap, 0x32030, BIT(0), BIT(0)); /* gcc_video_xo_clk */
- regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0)); /* gcc_gpu_cfg_ahb_clk */
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x26004); /* GCC_CAMERA_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x26028); /* GCC_CAMERA_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x27004); /* GCC_DISP_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x27018); /* GCC_DISP_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x32004); /* GCC_VIDEO_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x32030); /* GCC_VIDEO_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x71004); /* GCC_GPU_CFG_AHB_CLK */
/* Clear GDSC_SLEEP_ENA_VOTE to stop votes being auto-removed in sleep. */
regmap_write(regmap, 0x52224, 0x0);
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index 5358e28122abe..e7a4068b9f390 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -557,7 +557,15 @@ void gdsc_unregister(struct gdsc_desc *desc)
*/
int gdsc_gx_do_nothing_enable(struct generic_pm_domain *domain)
{
- /* Do nothing but give genpd the impression that we were successful */
- return 0;
+ struct gdsc *sc = domain_to_gdsc(domain);
+ int ret = 0;
+
+ /* Enable the parent supply, when controlled through the regulator framework. */
+ if (sc->rsupply)
+ ret = regulator_enable(sc->rsupply);
+
+ /* Do nothing with the GDSC itself */
+
+ return ret;
}
EXPORT_SYMBOL_GPL(gdsc_gx_do_nothing_enable);
diff --git a/drivers/clk/qcom/gpucc-sa8775p.c b/drivers/clk/qcom/gpucc-sa8775p.c
index 26ecfa63be193..1167c42da39db 100644
--- a/drivers/clk/qcom/gpucc-sa8775p.c
+++ b/drivers/clk/qcom/gpucc-sa8775p.c
@@ -609,17 +609,7 @@ static struct platform_driver gpu_cc_sa8775p_driver = {
},
};
-static int __init gpu_cc_sa8775p_init(void)
-{
- return platform_driver_register(&gpu_cc_sa8775p_driver);
-}
-subsys_initcall(gpu_cc_sa8775p_init);
-
-static void __exit gpu_cc_sa8775p_exit(void)
-{
- platform_driver_unregister(&gpu_cc_sa8775p_driver);
-}
-module_exit(gpu_cc_sa8775p_exit);
+module_platform_driver(gpu_cc_sa8775p_driver);
MODULE_DESCRIPTION("SA8775P GPUCC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gpucc-sc7180.c b/drivers/clk/qcom/gpucc-sc7180.c
index 3f92f0b43be60..66f5b48cbf879 100644
--- a/drivers/clk/qcom/gpucc-sc7180.c
+++ b/drivers/clk/qcom/gpucc-sc7180.c
@@ -252,17 +252,7 @@ static struct platform_driver gpu_cc_sc7180_driver = {
},
};
-static int __init gpu_cc_sc7180_init(void)
-{
- return platform_driver_register(&gpu_cc_sc7180_driver);
-}
-subsys_initcall(gpu_cc_sc7180_init);
-
-static void __exit gpu_cc_sc7180_exit(void)
-{
- platform_driver_unregister(&gpu_cc_sc7180_driver);
-}
-module_exit(gpu_cc_sc7180_exit);
+module_platform_driver(gpu_cc_sc7180_driver);
MODULE_DESCRIPTION("QTI GPU_CC SC7180 Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gpucc-sc7280.c b/drivers/clk/qcom/gpucc-sc7280.c
index 1490cd45a654a..35b394feb68da 100644
--- a/drivers/clk/qcom/gpucc-sc7280.c
+++ b/drivers/clk/qcom/gpucc-sc7280.c
@@ -457,12 +457,9 @@ static int gpu_cc_sc7280_probe(struct platform_device *pdev)
clk_lucid_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
- /*
- * Keep the clocks always-ON
- * GPU_CC_CB_CLK, GPUCC_CX_GMU_CLK
- */
- regmap_update_bits(regmap, 0x1170, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x1098, BIT(0), BIT(0));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x1170); /* GPU_CC_CB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x1098); /* GPUCC_CX_GMU_CLK */
regmap_update_bits(regmap, 0x1098, BIT(13), BIT(13));
return qcom_cc_really_probe(pdev, &gpu_cc_sc7280_desc, regmap);
@@ -476,17 +473,7 @@ static struct platform_driver gpu_cc_sc7280_driver = {
},
};
-static int __init gpu_cc_sc7280_init(void)
-{
- return platform_driver_register(&gpu_cc_sc7280_driver);
-}
-subsys_initcall(gpu_cc_sc7280_init);
-
-static void __exit gpu_cc_sc7280_exit(void)
-{
- platform_driver_unregister(&gpu_cc_sc7280_driver);
-}
-module_exit(gpu_cc_sc7280_exit);
+module_platform_driver(gpu_cc_sc7280_driver);
MODULE_DESCRIPTION("QTI GPU_CC SC7280 Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gpucc-sc8280xp.c b/drivers/clk/qcom/gpucc-sc8280xp.c
index 8e147ee294eef..3611d2d1823db 100644
--- a/drivers/clk/qcom/gpucc-sc8280xp.c
+++ b/drivers/clk/qcom/gpucc-sc8280xp.c
@@ -399,6 +399,7 @@ static struct gdsc gx_gdsc = {
},
.pwrsts = PWRSTS_OFF_ON,
.flags = CLAMP_IO | RETAIN_FF_ENABLE,
+ .supply = "vdd-gfx",
};
static struct gdsc *gpu_cc_sc8280xp_gdscs[] = {
@@ -444,12 +445,9 @@ static int gpu_cc_sc8280xp_probe(struct platform_device *pdev)
clk_lucid_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
clk_lucid_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
- /*
- * Keep the clocks always-ON
- * GPU_CC_CB_CLK, GPU_CC_CXO_CLK
- */
- regmap_update_bits(regmap, 0x1170, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x109c, BIT(0), BIT(0));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x1170); /* GPU_CC_CB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x109c); /* GPU_CC_CXO_CLK */
ret = qcom_cc_really_probe(pdev, &gpu_cc_sc8280xp_desc, regmap);
pm_runtime_put(&pdev->dev);
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
index 970d7414bdf0e..c87c3215dfe35 100644
--- a/drivers/clk/qcom/gpucc-sdm845.c
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -203,17 +203,7 @@ static struct platform_driver gpu_cc_sdm845_driver = {
},
};
-static int __init gpu_cc_sdm845_init(void)
-{
- return platform_driver_register(&gpu_cc_sdm845_driver);
-}
-subsys_initcall(gpu_cc_sdm845_init);
-
-static void __exit gpu_cc_sdm845_exit(void)
-{
- platform_driver_unregister(&gpu_cc_sdm845_driver);
-}
-module_exit(gpu_cc_sdm845_exit);
+module_platform_driver(gpu_cc_sdm845_driver);
MODULE_DESCRIPTION("QTI GPUCC SDM845 Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gpucc-sm8150.c b/drivers/clk/qcom/gpucc-sm8150.c
index c89a5b59ddb7c..135601629cba7 100644
--- a/drivers/clk/qcom/gpucc-sm8150.c
+++ b/drivers/clk/qcom/gpucc-sm8150.c
@@ -315,17 +315,7 @@ static struct platform_driver gpu_cc_sm8150_driver = {
},
};
-static int __init gpu_cc_sm8150_init(void)
-{
- return platform_driver_register(&gpu_cc_sm8150_driver);
-}
-subsys_initcall(gpu_cc_sm8150_init);
-
-static void __exit gpu_cc_sm8150_exit(void)
-{
- platform_driver_unregister(&gpu_cc_sm8150_driver);
-}
-module_exit(gpu_cc_sm8150_exit);
+module_platform_driver(gpu_cc_sm8150_driver);
MODULE_DESCRIPTION("QTI GPUCC SM8150 Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gpucc-sm8250.c b/drivers/clk/qcom/gpucc-sm8250.c
index 9c1f8ce32da49..84f7f65c8d428 100644
--- a/drivers/clk/qcom/gpucc-sm8250.c
+++ b/drivers/clk/qcom/gpucc-sm8250.c
@@ -331,17 +331,7 @@ static struct platform_driver gpu_cc_sm8250_driver = {
},
};
-static int __init gpu_cc_sm8250_init(void)
-{
- return platform_driver_register(&gpu_cc_sm8250_driver);
-}
-subsys_initcall(gpu_cc_sm8250_init);
-
-static void __exit gpu_cc_sm8250_exit(void)
-{
- platform_driver_unregister(&gpu_cc_sm8250_driver);
-}
-module_exit(gpu_cc_sm8250_exit);
+module_platform_driver(gpu_cc_sm8250_driver);
MODULE_DESCRIPTION("QTI GPU_CC SM8250 Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gpucc-sm8350.c b/drivers/clk/qcom/gpucc-sm8350.c
index 8dc54dff983f3..38505d1388b67 100644
--- a/drivers/clk/qcom/gpucc-sm8350.c
+++ b/drivers/clk/qcom/gpucc-sm8350.c
@@ -621,17 +621,7 @@ static struct platform_driver gpu_cc_sm8350_driver = {
},
};
-static int __init gpu_cc_sm8350_init(void)
-{
- return platform_driver_register(&gpu_cc_sm8350_driver);
-}
-subsys_initcall(gpu_cc_sm8350_init);
-
-static void __exit gpu_cc_sm8350_exit(void)
-{
- platform_driver_unregister(&gpu_cc_sm8350_driver);
-}
-module_exit(gpu_cc_sm8350_exit);
+module_platform_driver(gpu_cc_sm8350_driver);
MODULE_DESCRIPTION("QTI GPU_CC SM8350 Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gpucc-sm8550.c b/drivers/clk/qcom/gpucc-sm8550.c
index 2fa8673424d78..4fc69c6026e5e 100644
--- a/drivers/clk/qcom/gpucc-sm8550.c
+++ b/drivers/clk/qcom/gpucc-sm8550.c
@@ -575,13 +575,9 @@ static int gpu_cc_sm8550_probe(struct platform_device *pdev)
clk_lucid_ole_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
clk_lucid_ole_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
- /*
- * Keep clocks always enabled:
- * gpu_cc_cxo_aon_clk
- * gpu_cc_demet_clk
- */
- regmap_update_bits(regmap, 0x9004, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x900c, BIT(0), BIT(0));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x9004); /* GPU_CC_CXO_AON_CLK */
+ qcom_branch_set_clk_en(regmap, 0x900c); /* GPU_CC_DEMET_CLK */
return qcom_cc_really_probe(pdev, &gpu_cc_sm8550_desc, regmap);
}
@@ -594,17 +590,7 @@ static struct platform_driver gpu_cc_sm8550_driver = {
},
};
-static int __init gpu_cc_sm8550_init(void)
-{
- return platform_driver_register(&gpu_cc_sm8550_driver);
-}
-subsys_initcall(gpu_cc_sm8550_init);
-
-static void __exit gpu_cc_sm8550_exit(void)
-{
- platform_driver_unregister(&gpu_cc_sm8550_driver);
-}
-module_exit(gpu_cc_sm8550_exit);
+module_platform_driver(gpu_cc_sm8550_driver);
MODULE_DESCRIPTION("QTI GPUCC SM8550 Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gpucc-x1e80100.c b/drivers/clk/qcom/gpucc-x1e80100.c
new file mode 100644
index 0000000000000..b7e79d118d6ef
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-x1e80100.c
@@ -0,0 +1,656 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,x1e80100-gpucc.h>
+#include <dt-bindings/reset/qcom,x1e80100-gpucc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_GPLL0_OUT_MAIN,
+ DT_GPLL0_OUT_MAIN_DIV,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static const struct pll_vco lucid_ole_vco[] = {
+ { 249600000, 2300000000, 0 },
+};
+
+static const struct pll_vco zonda_ole_vco[] = {
+ { 700000000, 3600000000, 0 },
+};
+
+static const struct alpha_pll_config gpu_cc_pll0_config = {
+ .l = 0x29,
+ .alpha = 0xa000,
+ .config_ctl_val = 0x08240800,
+ .config_ctl_hi_val = 0x05008001,
+ .config_ctl_hi1_val = 0x00000000,
+ .config_ctl_hi2_val = 0x00000000,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x02000000,
+};
+
+static struct clk_alpha_pll gpu_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = zonda_ole_vco,
+ .num_vco = ARRAY_SIZE(zonda_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_ZONDA_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_zonda_ole_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config gpu_cc_pll1_config = {
+ .l = 0x16,
+ .alpha = 0xeaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_ff_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_ff_clk_src = {
+ .cmd_rcgr = 0x9474,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_ff_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_ff_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(220000000, P_GPU_CC_PLL1_OUT_MAIN, 2, 0, 0),
+ F(550000000, P_GPU_CC_PLL1_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x9318,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gpu_cc_hub_clk_src = {
+ .cmd_rcgr = 0x93ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_2,
+ .freq_tbl = ftbl_gpu_cc_ff_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_clk_src",
+ .parent_data = gpu_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gpu_cc_xo_clk_src = {
+ .cmd_rcgr = 0x9010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_3,
+ .freq_tbl = NULL,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_xo_clk_src",
+ .parent_data = gpu_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_demet_div_clk_src = {
+ .reg = 0x9054,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_demet_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_xo_div_clk_src = {
+ .reg = 0x9050,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_xo_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+ .halt_reg = 0x911c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x911c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+ .halt_reg = 0x9120,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9120,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_crc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_ff_clk = {
+ .halt_reg = 0x914c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x914c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_ff_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x913c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x913c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+ .halt_reg = 0x9004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cxo_aon_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x9144,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9144,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cxo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_demet_clk = {
+ .halt_reg = 0x900c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x900c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_demet_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_demet_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_freq_measure_clk = {
+ .halt_reg = 0x9008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_freq_measure_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_xo_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
+ .halt_reg = 0x7000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+ .halt_reg = 0x90bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_vsense_clk = {
+ .halt_reg = 0x90b0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x90b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_vsense_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_aon_clk = {
+ .halt_reg = 0x93e8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x93e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_aon_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_cx_int_clk = {
+ .halt_reg = 0x9148,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9148,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_cx_int_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_memnoc_gfx_clk = {
+ .halt_reg = 0x9150,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9150,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_mnd1x_0_gfx3d_clk = {
+ .halt_reg = 0x9288,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9288,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_mnd1x_0_gfx3d_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_mnd1x_1_gfx3d_clk = {
+ .halt_reg = 0x928c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x928c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_mnd1x_1_gfx3d_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_sleep_clk = {
+ .halt_reg = 0x9134,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9134,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gpu_cx_gdsc = {
+ .gdscr = 0x9108,
+ .gds_hw_ctrl = 0x953c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gpu_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gpu_gx_gdsc = {
+ .gdscr = 0x905c,
+ .clamp_io_ctrl = 0x9504,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gpu_gx_gdsc",
+ .power_on = gdsc_gx_do_nothing_enable,
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO | AON_RESET | SW_RESET | POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *gpu_cc_x1e80100_clocks[] = {
+ [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+ [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_FF_CLK] = &gpu_cc_cx_ff_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_DEMET_CLK] = &gpu_cc_demet_clk.clkr,
+ [GPU_CC_DEMET_DIV_CLK_SRC] = &gpu_cc_demet_div_clk_src.clkr,
+ [GPU_CC_FF_CLK_SRC] = &gpu_cc_ff_clk_src.clkr,
+ [GPU_CC_FREQ_MEASURE_CLK] = &gpu_cc_freq_measure_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+ [GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
+ [GPU_CC_GX_VSENSE_CLK] = &gpu_cc_gx_vsense_clk.clkr,
+ [GPU_CC_HUB_AON_CLK] = &gpu_cc_hub_aon_clk.clkr,
+ [GPU_CC_HUB_CLK_SRC] = &gpu_cc_hub_clk_src.clkr,
+ [GPU_CC_HUB_CX_INT_CLK] = &gpu_cc_hub_cx_int_clk.clkr,
+ [GPU_CC_MEMNOC_GFX_CLK] = &gpu_cc_memnoc_gfx_clk.clkr,
+ [GPU_CC_MND1X_0_GFX3D_CLK] = &gpu_cc_mnd1x_0_gfx3d_clk.clkr,
+ [GPU_CC_MND1X_1_GFX3D_CLK] = &gpu_cc_mnd1x_1_gfx3d_clk.clkr,
+ [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+ [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+ [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+ [GPU_CC_XO_CLK_SRC] = &gpu_cc_xo_clk_src.clkr,
+ [GPU_CC_XO_DIV_CLK_SRC] = &gpu_cc_xo_div_clk_src.clkr,
+};
+
+static const struct qcom_reset_map gpu_cc_x1e80100_resets[] = {
+ [GPUCC_GPU_CC_XO_BCR] = { 0x9000 },
+ [GPUCC_GPU_CC_GX_BCR] = { 0x9058 },
+ [GPUCC_GPU_CC_CX_BCR] = { 0x9104 },
+ [GPUCC_GPU_CC_GFX3D_AON_BCR] = { 0x9198 },
+ [GPUCC_GPU_CC_ACD_BCR] = { 0x9358 },
+ [GPUCC_GPU_CC_FAST_HUB_BCR] = { 0x93e4 },
+ [GPUCC_GPU_CC_FF_BCR] = { 0x9470 },
+ [GPUCC_GPU_CC_GMU_BCR] = { 0x9314 },
+ [GPUCC_GPU_CC_CB_BCR] = { 0x93a0 },
+};
+
+static struct gdsc *gpu_cc_x1e80100_gdscs[] = {
+ [GPU_CX_GDSC] = &gpu_cx_gdsc,
+ [GPU_GX_GDSC] = &gpu_gx_gdsc,
+};
+
+static const struct regmap_config gpu_cc_x1e80100_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9988,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpu_cc_x1e80100_desc = {
+ .config = &gpu_cc_x1e80100_regmap_config,
+ .clks = gpu_cc_x1e80100_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_x1e80100_clocks),
+ .resets = gpu_cc_x1e80100_resets,
+ .num_resets = ARRAY_SIZE(gpu_cc_x1e80100_resets),
+ .gdscs = gpu_cc_x1e80100_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_x1e80100_gdscs),
+};
+
+static const struct of_device_id gpu_cc_x1e80100_match_table[] = {
+ { .compatible = "qcom,x1e80100-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_x1e80100_match_table);
+
+static int gpu_cc_x1e80100_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gpu_cc_x1e80100_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_zonda_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+ /* Keep clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0x93a4); /* GPU_CC_CB_CLK */
+
+ return qcom_cc_really_probe(pdev, &gpu_cc_x1e80100_desc, regmap);
+}
+
+static struct platform_driver gpu_cc_x1e80100_driver = {
+ .probe = gpu_cc_x1e80100_probe,
+ .driver = {
+ .name = "gpucc-x1e80100",
+ .of_match_table = gpu_cc_x1e80100_match_table,
+ },
+};
+module_platform_driver(gpu_cc_x1e80100_driver);
+
+MODULE_DESCRIPTION("QTI GPU Clock Controller X1E80100 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/lpasscorecc-sc7180.c b/drivers/clk/qcom/lpasscorecc-sc7180.c
index 9051fd5671125..fd9cd2e3f9565 100644
--- a/drivers/clk/qcom/lpasscorecc-sc7180.c
+++ b/drivers/clk/qcom/lpasscorecc-sc7180.c
@@ -401,11 +401,8 @@ static int lpass_core_cc_sc7180_probe(struct platform_device *pdev)
goto exit;
}
- /*
- * Keep the CLK always-ON
- * LPASS_AUDIO_CORE_SYSNOC_SWAY_CORE_CLK
- */
- regmap_update_bits(regmap, 0x24000, BIT(0), BIT(0));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x24000); /* LPASS_AUDIO_CORE_SYSNOC_SWAY_CORE_CLK */
/* PLL settings */
regmap_write(regmap, 0x1008, 0x20);
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
index 02fc21208dd14..c89700ab93f9c 100644
--- a/drivers/clk/qcom/mmcc-apq8084.c
+++ b/drivers/clk/qcom/mmcc-apq8084.c
@@ -348,6 +348,7 @@ static struct freq_tbl ftbl_mmss_axi_clk[] = {
F(333430000, P_MMPLL1, 3.5, 0, 0),
F(400000000, P_MMPLL0, 2, 0, 0),
F(466800000, P_MMPLL1, 2.5, 0, 0),
+ { }
};
static struct clk_rcg2 mmss_axi_clk_src = {
@@ -372,6 +373,7 @@ static struct freq_tbl ftbl_ocmemnoc_clk[] = {
F(150000000, P_GPLL0, 4, 0, 0),
F(228570000, P_MMPLL0, 3.5, 0, 0),
F(320000000, P_MMPLL0, 2.5, 0, 0),
+ { }
};
static struct clk_rcg2 ocmemnoc_clk_src = {
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index a31f6cf0c4e0c..36f460b78be2c 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -290,6 +290,7 @@ static struct freq_tbl ftbl_mmss_axi_clk[] = {
F(291750000, P_MMPLL1, 4, 0, 0),
F(400000000, P_MMPLL0, 2, 0, 0),
F(466800000, P_MMPLL1, 2.5, 0, 0),
+ { }
};
static struct clk_rcg2 mmss_axi_clk_src = {
@@ -314,6 +315,7 @@ static struct freq_tbl ftbl_ocmemnoc_clk[] = {
F(150000000, P_GPLL0, 4, 0, 0),
F(291750000, P_MMPLL1, 4, 0, 0),
F(400000000, P_MMPLL0, 2, 0, 0),
+ { }
};
static struct clk_rcg2 ocmemnoc_clk_src = {
diff --git a/drivers/clk/qcom/mss-sc7180.c b/drivers/clk/qcom/mss-sc7180.c
deleted file mode 100644
index d106bc65470e1..0000000000000
--- a/drivers/clk/qcom/mss-sc7180.c
+++ /dev/null
@@ -1,140 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2020, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/clk-provider.h>
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/pm_clock.h>
-#include <linux/pm_runtime.h>
-#include <linux/regmap.h>
-
-#include <dt-bindings/clock/qcom,mss-sc7180.h>
-
-#include "clk-regmap.h"
-#include "clk-branch.h"
-#include "common.h"
-
-static struct clk_branch mss_axi_nav_clk = {
- .halt_reg = 0x20bc,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x20bc,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "mss_axi_nav_clk",
- .parent_data = &(const struct clk_parent_data){
- .fw_name = "gcc_mss_nav_axi",
- },
- .num_parents = 1,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch mss_axi_crypto_clk = {
- .halt_reg = 0x20cc,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x20cc,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "mss_axi_crypto_clk",
- .parent_data = &(const struct clk_parent_data){
- .fw_name = "gcc_mss_mfab_axis",
- },
- .num_parents = 1,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static const struct regmap_config mss_regmap_config = {
- .reg_bits = 32,
- .reg_stride = 4,
- .val_bits = 32,
- .fast_io = true,
- .max_register = 0x41aa0cc,
-};
-
-static struct clk_regmap *mss_sc7180_clocks[] = {
- [MSS_AXI_CRYPTO_CLK] = &mss_axi_crypto_clk.clkr,
- [MSS_AXI_NAV_CLK] = &mss_axi_nav_clk.clkr,
-};
-
-static const struct qcom_cc_desc mss_sc7180_desc = {
- .config = &mss_regmap_config,
- .clks = mss_sc7180_clocks,
- .num_clks = ARRAY_SIZE(mss_sc7180_clocks),
-};
-
-static int mss_sc7180_probe(struct platform_device *pdev)
-{
- int ret;
-
- ret = devm_pm_runtime_enable(&pdev->dev);
- if (ret)
- return ret;
-
- ret = devm_pm_clk_create(&pdev->dev);
- if (ret)
- return ret;
-
- ret = pm_clk_add(&pdev->dev, "cfg_ahb");
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to acquire iface clock\n");
- return ret;
- }
-
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret)
- return ret;
-
- ret = qcom_cc_probe(pdev, &mss_sc7180_desc);
- if (ret < 0)
- goto err_put_rpm;
-
- pm_runtime_put(&pdev->dev);
-
- return 0;
-
-err_put_rpm:
- pm_runtime_put_sync(&pdev->dev);
-
- return ret;
-}
-
-static const struct dev_pm_ops mss_sc7180_pm_ops = {
- SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
-};
-
-static const struct of_device_id mss_sc7180_match_table[] = {
- { .compatible = "qcom,sc7180-mss" },
- { }
-};
-MODULE_DEVICE_TABLE(of, mss_sc7180_match_table);
-
-static struct platform_driver mss_sc7180_driver = {
- .probe = mss_sc7180_probe,
- .driver = {
- .name = "sc7180-mss",
- .of_match_table = mss_sc7180_match_table,
- .pm = &mss_sc7180_pm_ops,
- },
-};
-
-static int __init mss_sc7180_init(void)
-{
- return platform_driver_register(&mss_sc7180_driver);
-}
-subsys_initcall(mss_sc7180_init);
-
-static void __exit mss_sc7180_exit(void)
-{
- platform_driver_unregister(&mss_sc7180_driver);
-}
-module_exit(mss_sc7180_exit);
-
-MODULE_DESCRIPTION("QTI MSS SC7180 Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/reset.c b/drivers/clk/qcom/reset.c
index e45e32804d2c7..d96c96a9089f4 100644
--- a/drivers/clk/qcom/reset.c
+++ b/drivers/clk/qcom/reset.c
@@ -22,8 +22,8 @@ static int qcom_reset(struct reset_controller_dev *rcdev, unsigned long id)
return 0;
}
-static int
-qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+static int qcom_reset_set_assert(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
{
struct qcom_reset_controller *rst;
const struct qcom_reset_map *map;
@@ -33,21 +33,22 @@ qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
map = &rst->reset_map[id];
mask = map->bitmask ? map->bitmask : BIT(map->bit);
- return regmap_update_bits(rst->regmap, map->reg, mask, mask);
+ regmap_update_bits(rst->regmap, map->reg, mask, assert ? mask : 0);
+
+ /* Read back the register to ensure write completion, ignore the value */
+ regmap_read(rst->regmap, map->reg, &mask);
+
+ return 0;
}
-static int
-qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+static int qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
{
- struct qcom_reset_controller *rst;
- const struct qcom_reset_map *map;
- u32 mask;
-
- rst = to_qcom_reset_controller(rcdev);
- map = &rst->reset_map[id];
- mask = map->bitmask ? map->bitmask : BIT(map->bit);
+ return qcom_reset_set_assert(rcdev, id, true);
+}
- return regmap_update_bits(rst->regmap, map->reg, mask, 0);
+static int qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ return qcom_reset_set_assert(rcdev, id, false);
}
const struct reset_control_ops qcom_reset_ops = {
diff --git a/drivers/clk/qcom/reset.h b/drivers/clk/qcom/reset.h
index 9a47c838d9b1b..fe0561bf53d49 100644
--- a/drivers/clk/qcom/reset.h
+++ b/drivers/clk/qcom/reset.h
@@ -11,7 +11,7 @@
struct qcom_reset_map {
unsigned int reg;
u8 bit;
- u8 udelay;
+ u16 udelay;
u32 bitmask;
};
diff --git a/drivers/clk/qcom/tcsrcc-x1e80100.c b/drivers/clk/qcom/tcsrcc-x1e80100.c
new file mode 100644
index 0000000000000..ff61769a08077
--- /dev/null
+++ b/drivers/clk/qcom/tcsrcc-x1e80100.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,x1e80100-tcsr.h>
+
+#include "clk-branch.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO_PAD,
+};
+
+static struct clk_branch tcsr_edp_clkref_en = {
+ .halt_reg = 0x15130,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x15130,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_edp_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_pcie_2l_4_clkref_en = {
+ .halt_reg = 0x15100,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x15100,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "tcsr_pcie_2l_4_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_pcie_2l_5_clkref_en = {
+ .halt_reg = 0x15104,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x15104,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "tcsr_pcie_2l_5_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_pcie_8l_clkref_en = {
+ .halt_reg = 0x15108,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x15108,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "tcsr_pcie_8l_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb3_mp0_clkref_en = {
+ .halt_reg = 0x1510c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1510c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "tcsr_usb3_mp0_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb3_mp1_clkref_en = {
+ .halt_reg = 0x15110,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x15110,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "tcsr_usb3_mp1_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb2_1_clkref_en = {
+ .halt_reg = 0x15114,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x15114,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "tcsr_usb2_1_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_ufs_phy_clkref_en = {
+ .halt_reg = 0x15118,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x15118,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "tcsr_ufs_phy_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb4_1_clkref_en = {
+ .halt_reg = 0x15120,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x15120,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "tcsr_usb4_1_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb4_2_clkref_en = {
+ .halt_reg = 0x15124,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x15124,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "tcsr_usb4_2_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb2_2_clkref_en = {
+ .halt_reg = 0x15128,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x15128,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "tcsr_usb2_2_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_pcie_4l_clkref_en = {
+ .halt_reg = 0x1512c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1512c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "tcsr_pcie_4l_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *tcsr_cc_x1e80100_clocks[] = {
+ [TCSR_EDP_CLKREF_EN] = &tcsr_edp_clkref_en.clkr,
+ [TCSR_PCIE_2L_4_CLKREF_EN] = &tcsr_pcie_2l_4_clkref_en.clkr,
+ [TCSR_PCIE_2L_5_CLKREF_EN] = &tcsr_pcie_2l_5_clkref_en.clkr,
+ [TCSR_PCIE_8L_CLKREF_EN] = &tcsr_pcie_8l_clkref_en.clkr,
+ [TCSR_USB3_MP0_CLKREF_EN] = &tcsr_usb3_mp0_clkref_en.clkr,
+ [TCSR_USB3_MP1_CLKREF_EN] = &tcsr_usb3_mp1_clkref_en.clkr,
+ [TCSR_USB2_1_CLKREF_EN] = &tcsr_usb2_1_clkref_en.clkr,
+ [TCSR_UFS_PHY_CLKREF_EN] = &tcsr_ufs_phy_clkref_en.clkr,
+ [TCSR_USB4_1_CLKREF_EN] = &tcsr_usb4_1_clkref_en.clkr,
+ [TCSR_USB4_2_CLKREF_EN] = &tcsr_usb4_2_clkref_en.clkr,
+ [TCSR_USB2_2_CLKREF_EN] = &tcsr_usb2_2_clkref_en.clkr,
+ [TCSR_PCIE_4L_CLKREF_EN] = &tcsr_pcie_4l_clkref_en.clkr,
+};
+
+static const struct regmap_config tcsr_cc_x1e80100_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x2f000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc tcsr_cc_x1e80100_desc = {
+ .config = &tcsr_cc_x1e80100_regmap_config,
+ .clks = tcsr_cc_x1e80100_clocks,
+ .num_clks = ARRAY_SIZE(tcsr_cc_x1e80100_clocks),
+};
+
+static const struct of_device_id tcsr_cc_x1e80100_match_table[] = {
+ { .compatible = "qcom,x1e80100-tcsr" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tcsr_cc_x1e80100_match_table);
+
+static int tcsr_cc_x1e80100_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &tcsr_cc_x1e80100_desc);
+}
+
+static struct platform_driver tcsr_cc_x1e80100_driver = {
+ .probe = tcsr_cc_x1e80100_probe,
+ .driver = {
+ .name = "tcsrcc-x1e80100",
+ .of_match_table = tcsr_cc_x1e80100_match_table,
+ },
+};
+
+static int __init tcsr_cc_x1e80100_init(void)
+{
+ return platform_driver_register(&tcsr_cc_x1e80100_driver);
+}
+subsys_initcall(tcsr_cc_x1e80100_init);
+
+static void __exit tcsr_cc_x1e80100_exit(void)
+{
+ platform_driver_unregister(&tcsr_cc_x1e80100_driver);
+}
+module_exit(tcsr_cc_x1e80100_exit);
+
+MODULE_DESCRIPTION("QTI TCSR Clock Controller X1E80100 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/videocc-sc7180.c b/drivers/clk/qcom/videocc-sc7180.c
index 5b9b54f616b85..ae0f812f83e8e 100644
--- a/drivers/clk/qcom/videocc-sc7180.c
+++ b/drivers/clk/qcom/videocc-sc7180.c
@@ -237,17 +237,7 @@ static struct platform_driver video_cc_sc7180_driver = {
},
};
-static int __init video_cc_sc7180_init(void)
-{
- return platform_driver_register(&video_cc_sc7180_driver);
-}
-subsys_initcall(video_cc_sc7180_init);
-
-static void __exit video_cc_sc7180_exit(void)
-{
- platform_driver_unregister(&video_cc_sc7180_driver);
-}
-module_exit(video_cc_sc7180_exit);
+module_platform_driver(video_cc_sc7180_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("QTI VIDEOCC SC7180 Driver");
diff --git a/drivers/clk/qcom/videocc-sc7280.c b/drivers/clk/qcom/videocc-sc7280.c
index 615695d823190..cdd59c6f60df8 100644
--- a/drivers/clk/qcom/videocc-sc7280.c
+++ b/drivers/clk/qcom/videocc-sc7280.c
@@ -309,17 +309,7 @@ static struct platform_driver video_cc_sc7280_driver = {
},
};
-static int __init video_cc_sc7280_init(void)
-{
- return platform_driver_register(&video_cc_sc7280_driver);
-}
-subsys_initcall(video_cc_sc7280_init);
-
-static void __exit video_cc_sc7280_exit(void)
-{
- platform_driver_unregister(&video_cc_sc7280_driver);
-}
-module_exit(video_cc_sc7280_exit);
+module_platform_driver(video_cc_sc7280_driver);
MODULE_DESCRIPTION("QTI VIDEO_CC sc7280 Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/videocc-sdm845.c b/drivers/clk/qcom/videocc-sdm845.c
index c77a4dd5d39c9..b7f21ecad9612 100644
--- a/drivers/clk/qcom/videocc-sdm845.c
+++ b/drivers/clk/qcom/videocc-sdm845.c
@@ -340,16 +340,6 @@ static struct platform_driver video_cc_sdm845_driver = {
},
};
-static int __init video_cc_sdm845_init(void)
-{
- return platform_driver_register(&video_cc_sdm845_driver);
-}
-subsys_initcall(video_cc_sdm845_init);
-
-static void __exit video_cc_sdm845_exit(void)
-{
- platform_driver_unregister(&video_cc_sdm845_driver);
-}
-module_exit(video_cc_sdm845_exit);
+module_platform_driver(video_cc_sdm845_driver);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/videocc-sm8150.c b/drivers/clk/qcom/videocc-sm8150.c
index f1456eaa87c40..a0329260157a0 100644
--- a/drivers/clk/qcom/videocc-sm8150.c
+++ b/drivers/clk/qcom/videocc-sm8150.c
@@ -215,7 +215,7 @@ static const struct regmap_config video_cc_sm8150_regmap_config = {
};
static const struct qcom_reset_map video_cc_sm8150_resets[] = {
- [VIDEO_CC_MVSC_CORE_CLK_BCR] = { 0x850, 2 },
+ [VIDEO_CC_MVSC_CORE_CLK_BCR] = { .reg = 0x850, .bit = 2, .udelay = 150 },
[VIDEO_CC_INTERFACE_BCR] = { 0x8f0 },
[VIDEO_CC_MVS0_BCR] = { 0x870 },
[VIDEO_CC_MVS1_BCR] = { 0x8b0 },
@@ -277,17 +277,7 @@ static struct platform_driver video_cc_sm8150_driver = {
},
};
-static int __init video_cc_sm8150_init(void)
-{
- return platform_driver_register(&video_cc_sm8150_driver);
-}
-subsys_initcall(video_cc_sm8150_init);
-
-static void __exit video_cc_sm8150_exit(void)
-{
- platform_driver_unregister(&video_cc_sm8150_driver);
-}
-module_exit(video_cc_sm8150_exit);
+module_platform_driver(video_cc_sm8150_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("QTI VIDEOCC SM8150 Driver");
diff --git a/drivers/clk/qcom/videocc-sm8250.c b/drivers/clk/qcom/videocc-sm8250.c
index ad46c4014a405..016b596e03b30 100644
--- a/drivers/clk/qcom/videocc-sm8250.c
+++ b/drivers/clk/qcom/videocc-sm8250.c
@@ -323,10 +323,10 @@ static struct clk_regmap *video_cc_sm8250_clocks[] = {
static const struct qcom_reset_map video_cc_sm8250_resets[] = {
[VIDEO_CC_CVP_INTERFACE_BCR] = { 0xe54 },
[VIDEO_CC_CVP_MVS0_BCR] = { 0xd14 },
- [VIDEO_CC_MVS0C_CLK_ARES] = { 0xc34, 2 },
+ [VIDEO_CC_MVS0C_CLK_ARES] = { 0xc34, .bit = 2, .udelay = 150 },
[VIDEO_CC_CVP_MVS0C_BCR] = { 0xbf4 },
[VIDEO_CC_CVP_MVS1_BCR] = { 0xd94 },
- [VIDEO_CC_MVS1C_CLK_ARES] = { 0xcd4, 2 },
+ [VIDEO_CC_MVS1C_CLK_ARES] = { 0xcd4, .bit = 2, .udelay = 150 },
[VIDEO_CC_CVP_MVS1C_BCR] = { 0xc94 },
};
@@ -383,9 +383,9 @@ static int video_cc_sm8250_probe(struct platform_device *pdev)
clk_lucid_pll_configure(&video_pll0, regmap, &video_pll0_config);
clk_lucid_pll_configure(&video_pll1, regmap, &video_pll1_config);
- /* Keep VIDEO_CC_AHB_CLK and VIDEO_CC_XO_CLK ALWAYS-ON */
- regmap_update_bits(regmap, 0xe58, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0xeec, BIT(0), BIT(0));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0xe58); /* VIDEO_CC_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0xeec); /* VIDEO_CC_XO_CLK */
ret = qcom_cc_really_probe(pdev, &video_cc_sm8250_desc, regmap);
@@ -402,17 +402,7 @@ static struct platform_driver video_cc_sm8250_driver = {
},
};
-static int __init video_cc_sm8250_init(void)
-{
- return platform_driver_register(&video_cc_sm8250_driver);
-}
-subsys_initcall(video_cc_sm8250_init);
-
-static void __exit video_cc_sm8250_exit(void)
-{
- platform_driver_unregister(&video_cc_sm8250_driver);
-}
-module_exit(video_cc_sm8250_exit);
+module_platform_driver(video_cc_sm8250_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("QTI VIDEOCC SM8250 Driver");
diff --git a/drivers/clk/qcom/videocc-sm8350.c b/drivers/clk/qcom/videocc-sm8350.c
index 7246f3c994922..f7aec28d4c872 100644
--- a/drivers/clk/qcom/videocc-sm8350.c
+++ b/drivers/clk/qcom/videocc-sm8350.c
@@ -488,10 +488,10 @@ static struct clk_regmap *video_cc_sm8350_clocks[] = {
static const struct qcom_reset_map video_cc_sm8350_resets[] = {
[VIDEO_CC_CVP_INTERFACE_BCR] = { 0xe54 },
[VIDEO_CC_CVP_MVS0_BCR] = { 0xd14 },
- [VIDEO_CC_MVS0C_CLK_ARES] = { 0xc34, 2 },
+ [VIDEO_CC_MVS0C_CLK_ARES] = { .reg = 0xc34, .bit = 2, .udelay = 400 },
[VIDEO_CC_CVP_MVS0C_BCR] = { 0xbf4 },
[VIDEO_CC_CVP_MVS1_BCR] = { 0xd94 },
- [VIDEO_CC_MVS1C_CLK_ARES] = { 0xcd4, 2 },
+ [VIDEO_CC_MVS1C_CLK_ARES] = { .reg = 0xcd4, .bit = 2, .udelay = 400 },
[VIDEO_CC_CVP_MVS1C_BCR] = { 0xc94 },
};
@@ -558,13 +558,9 @@ static int video_cc_sm8350_probe(struct platform_device *pdev)
clk_lucid_pll_configure(&video_pll0, regmap, &video_pll0_config);
clk_lucid_pll_configure(&video_pll1, regmap, &video_pll1_config);
- /*
- * Keep clocks always enabled:
- * video_cc_ahb_clk
- * video_cc_xo_clk
- */
- regmap_update_bits(regmap, 0xe58, BIT(0), BIT(0));
- regmap_update_bits(regmap, video_cc_xo_clk_cbcr, BIT(0), BIT(0));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0xe58); /* VIDEO_CC_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, video_cc_xo_clk_cbcr); /* VIDEO_CC_XO_CLK */
ret = qcom_cc_really_probe(pdev, &video_cc_sm8350_desc, regmap);
pm_runtime_put(&pdev->dev);
diff --git a/drivers/clk/qcom/videocc-sm8450.c b/drivers/clk/qcom/videocc-sm8450.c
index 16a61146e6195..67df40f164231 100644
--- a/drivers/clk/qcom/videocc-sm8450.c
+++ b/drivers/clk/qcom/videocc-sm8450.c
@@ -373,8 +373,8 @@ static const struct qcom_reset_map video_cc_sm8450_resets[] = {
[CVP_VIDEO_CC_MVS0C_BCR] = { 0x8048 },
[CVP_VIDEO_CC_MVS1_BCR] = { 0x80bc },
[CVP_VIDEO_CC_MVS1C_BCR] = { 0x8070 },
- [VIDEO_CC_MVS0C_CLK_ARES] = { 0x8064, 2 },
- [VIDEO_CC_MVS1C_CLK_ARES] = { 0x808c, 2 },
+ [VIDEO_CC_MVS0C_CLK_ARES] = { .reg = 0x8064, .bit = 2, .udelay = 1000 },
+ [VIDEO_CC_MVS1C_CLK_ARES] = { .reg = 0x808c, .bit = 2, .udelay = 1000 },
};
static const struct regmap_config video_cc_sm8450_regmap_config = {
@@ -423,15 +423,10 @@ static int video_cc_sm8450_probe(struct platform_device *pdev)
clk_lucid_evo_pll_configure(&video_cc_pll0, regmap, &video_cc_pll0_config);
clk_lucid_evo_pll_configure(&video_cc_pll1, regmap, &video_cc_pll1_config);
- /*
- * Keep clocks always enabled:
- * video_cc_ahb_clk
- * video_cc_sleep_clk
- * video_cc_xo_clk
- */
- regmap_update_bits(regmap, 0x80e4, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x8130, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x8114, BIT(0), BIT(0));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x80e4); /* VIDEO_CC_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x8130); /* VIDEO_CC_SLEEP_CLK */
+ qcom_branch_set_clk_en(regmap, 0x8114); /* VIDEO_CC_XO_CLK */
ret = qcom_cc_really_probe(pdev, &video_cc_sm8450_desc, regmap);
@@ -448,17 +443,7 @@ static struct platform_driver video_cc_sm8450_driver = {
},
};
-static int __init video_cc_sm8450_init(void)
-{
- return platform_driver_register(&video_cc_sm8450_driver);
-}
-subsys_initcall(video_cc_sm8450_init);
-
-static void __exit video_cc_sm8450_exit(void)
-{
- platform_driver_unregister(&video_cc_sm8450_driver);
-}
-module_exit(video_cc_sm8450_exit);
+module_platform_driver(video_cc_sm8450_driver);
MODULE_DESCRIPTION("QTI VIDEOCC SM8450 Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/videocc-sm8550.c b/drivers/clk/qcom/videocc-sm8550.c
index f3c9dfaee968f..d73f747d24740 100644
--- a/drivers/clk/qcom/videocc-sm8550.c
+++ b/drivers/clk/qcom/videocc-sm8550.c
@@ -378,8 +378,8 @@ static const struct qcom_reset_map video_cc_sm8550_resets[] = {
[CVP_VIDEO_CC_MVS0C_BCR] = { 0x8048 },
[CVP_VIDEO_CC_MVS1_BCR] = { 0x80c8 },
[CVP_VIDEO_CC_MVS1C_BCR] = { 0x8074 },
- [VIDEO_CC_MVS0C_CLK_ARES] = { 0x8064, 2 },
- [VIDEO_CC_MVS1C_CLK_ARES] = { 0x8090, 2 },
+ [VIDEO_CC_MVS0C_CLK_ARES] = { .reg = 0x8064, .bit = 2, .udelay = 1000 },
+ [VIDEO_CC_MVS1C_CLK_ARES] = { .reg = 0x8090, .bit = 2, .udelay = 1000 },
};
static const struct regmap_config video_cc_sm8550_regmap_config = {
@@ -428,15 +428,10 @@ static int video_cc_sm8550_probe(struct platform_device *pdev)
clk_lucid_ole_pll_configure(&video_cc_pll0, regmap, &video_cc_pll0_config);
clk_lucid_ole_pll_configure(&video_cc_pll1, regmap, &video_cc_pll1_config);
- /*
- * Keep clocks always enabled:
- * video_cc_ahb_clk
- * video_cc_sleep_clk
- * video_cc_xo_clk
- */
- regmap_update_bits(regmap, 0x80f4, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x8140, BIT(0), BIT(0));
- regmap_update_bits(regmap, 0x8124, BIT(0), BIT(0));
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x80f4); /* VIDEO_CC_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x8140); /* VIDEO_CC_SLEEP_CLK */
+ qcom_branch_set_clk_en(regmap, 0x8124); /* VIDEO_CC_XO_CLK */
ret = qcom_cc_really_probe(pdev, &video_cc_sm8550_desc, regmap);
@@ -453,17 +448,7 @@ static struct platform_driver video_cc_sm8550_driver = {
},
};
-static int __init video_cc_sm8550_init(void)
-{
- return platform_driver_register(&video_cc_sm8550_driver);
-}
-subsys_initcall(video_cc_sm8550_init);
-
-static void __exit video_cc_sm8550_exit(void)
-{
- platform_driver_unregister(&video_cc_sm8550_driver);
-}
-module_exit(video_cc_sm8550_exit);
+module_platform_driver(video_cc_sm8550_driver);
MODULE_DESCRIPTION("QTI VIDEOCC SM8550 Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index 69396e1979590..d252150402e86 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -33,6 +33,7 @@ config CLK_RENESAS
select CLK_R8A779A0 if ARCH_R8A779A0
select CLK_R8A779F0 if ARCH_R8A779F0
select CLK_R8A779G0 if ARCH_R8A779G0
+ select CLK_R8A779H0 if ARCH_R8A779H0
select CLK_R9A06G032 if ARCH_R9A06G032
select CLK_R9A07G043 if ARCH_R9A07G043
select CLK_R9A07G044 if ARCH_R9A07G044
@@ -165,6 +166,10 @@ config CLK_R8A779G0
bool "R-Car V4H clock support" if COMPILE_TEST
select CLK_RCAR_GEN4_CPG
+config CLK_R8A779H0
+ bool "R-Car V4M clock support" if COMPILE_TEST
+ select CLK_RCAR_GEN4_CPG
+
config CLK_R9A06G032
bool "RZ/N1D clock support" if COMPILE_TEST
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index 879a07d445f90..f7e18679c3b81 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_CLK_R8A77995) += r8a77995-cpg-mssr.o
obj-$(CONFIG_CLK_R8A779A0) += r8a779a0-cpg-mssr.o
obj-$(CONFIG_CLK_R8A779F0) += r8a779f0-cpg-mssr.o
obj-$(CONFIG_CLK_R8A779G0) += r8a779g0-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A779H0) += r8a779h0-cpg-mssr.o
obj-$(CONFIG_CLK_R9A06G032) += r9a06g032-clocks.o
obj-$(CONFIG_CLK_R9A07G043) += r9a07g043-cpg.o
obj-$(CONFIG_CLK_R9A07G044) += r9a07g044-cpg.o
diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
index 6280f4dfed714..5304c977562fc 100644
--- a/drivers/clk/renesas/clk-mstp.c
+++ b/drivers/clk/renesas/clk-mstp.c
@@ -10,7 +10,6 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
#include <linux/clk/renesas.h>
#include <linux/device.h>
#include <linux/io.h>
@@ -19,6 +18,7 @@
#include <linux/of_address.h>
#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
+#include <linux/slab.h>
#include <linux/spinlock.h>
/*
@@ -237,22 +237,12 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
clks[clkidx] = cpg_mstp_clock_register(name, parent_name,
clkidx, group);
- if (!IS_ERR(clks[clkidx])) {
+ if (!IS_ERR(clks[clkidx]))
group->data.clk_num = max(group->data.clk_num,
clkidx + 1);
- /*
- * Register a clkdev to let board code retrieve the
- * clock by name and register aliases for non-DT
- * devices.
- *
- * FIXME: Remove this when all devices that require a
- * clock will be instantiated from DT.
- */
- clk_register_clkdev(clks[clkidx], name, NULL);
- } else {
+ else
pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
__func__, np, name, PTR_ERR(clks[clkidx]));
- }
}
of_clk_add_provider(np, of_clk_src_onecell_get, &group->data);
diff --git a/drivers/clk/renesas/r8a779f0-cpg-mssr.c b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
index f721835c7e212..cc06127406ab5 100644
--- a/drivers/clk/renesas/r8a779f0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
@@ -161,7 +161,7 @@ static const struct mssr_mod_clk r8a779f0_mod_clks[] __initconst = {
DEF_MOD("cmt1", 911, R8A779F0_CLK_R),
DEF_MOD("cmt2", 912, R8A779F0_CLK_R),
DEF_MOD("cmt3", 913, R8A779F0_CLK_R),
- DEF_MOD("pfc0", 915, R8A779F0_CLK_CL16M),
+ DEF_MOD("pfc0", 915, R8A779F0_CLK_CPEX),
DEF_MOD("tsc", 919, R8A779F0_CLK_CL16M),
DEF_MOD("rswitch2", 1505, R8A779F0_CLK_RSW2),
DEF_MOD("ether-serdes", 1506, R8A779F0_CLK_S0D2_HSC),
diff --git a/drivers/clk/renesas/r8a779g0-cpg-mssr.c b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
index 5974adcef3eda..c4b1938db76b3 100644
--- a/drivers/clk/renesas/r8a779g0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
@@ -22,7 +22,7 @@
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R8A779G0_CLK_R,
+ LAST_DT_CORE_CLK = R8A779G0_CLK_CP,
/* External Input Clocks */
CLK_EXTAL,
@@ -141,6 +141,7 @@ static const struct cpg_core_clk r8a779g0_core_clks[] __initconst = {
DEF_FIXED("svd2_vip", R8A779G0_CLK_SVD2_VIP, CLK_SV_VIP, 2, 1),
DEF_FIXED("cbfusa", R8A779G0_CLK_CBFUSA, CLK_EXTAL, 2, 1),
DEF_FIXED("cpex", R8A779G0_CLK_CPEX, CLK_EXTAL, 2, 1),
+ DEF_FIXED("cp", R8A779G0_CLK_CP, CLK_EXTAL, 2, 1),
DEF_FIXED("viobus", R8A779G0_CLK_VIOBUS, CLK_VIO, 1, 1),
DEF_FIXED("viobusd2", R8A779G0_CLK_VIOBUSD2, CLK_VIO, 2, 1),
DEF_FIXED("vcbus", R8A779G0_CLK_VCBUS, CLK_VC, 1, 1),
@@ -193,7 +194,7 @@ static const struct mssr_mod_clk r8a779g0_mod_clks[] __initconst = {
DEF_MOD("msi4", 622, R8A779G0_CLK_MSO),
DEF_MOD("msi5", 623, R8A779G0_CLK_MSO),
DEF_MOD("pciec0", 624, R8A779G0_CLK_S0D2_HSC),
- DEF_MOD("pscie1", 625, R8A779G0_CLK_S0D2_HSC),
+ DEF_MOD("pciec1", 625, R8A779G0_CLK_S0D2_HSC),
DEF_MOD("pwm", 628, R8A779G0_CLK_SASYNCPERD4),
DEF_MOD("rpc-if", 629, R8A779G0_CLK_RPCD2),
DEF_MOD("scif0", 702, R8A779G0_CLK_SASYNCPERD4),
@@ -232,10 +233,10 @@ static const struct mssr_mod_clk r8a779g0_mod_clks[] __initconst = {
DEF_MOD("cmt1", 911, R8A779G0_CLK_R),
DEF_MOD("cmt2", 912, R8A779G0_CLK_R),
DEF_MOD("cmt3", 913, R8A779G0_CLK_R),
- DEF_MOD("pfc0", 915, R8A779G0_CLK_CL16M),
- DEF_MOD("pfc1", 916, R8A779G0_CLK_CL16M),
- DEF_MOD("pfc2", 917, R8A779G0_CLK_CL16M),
- DEF_MOD("pfc3", 918, R8A779G0_CLK_CL16M),
+ DEF_MOD("pfc0", 915, R8A779G0_CLK_CP),
+ DEF_MOD("pfc1", 916, R8A779G0_CLK_CP),
+ DEF_MOD("pfc2", 917, R8A779G0_CLK_CP),
+ DEF_MOD("pfc3", 918, R8A779G0_CLK_CP),
DEF_MOD("tsc", 919, R8A779G0_CLK_CL16M),
DEF_MOD("tsn", 2723, R8A779G0_CLK_S0D4_HSC),
DEF_MOD("ssiu", 2926, R8A779G0_CLK_S0D6_PER),
diff --git a/drivers/clk/renesas/r8a779h0-cpg-mssr.c b/drivers/clk/renesas/r8a779h0-cpg-mssr.c
new file mode 100644
index 0000000000000..71f67a1c86d80
--- /dev/null
+++ b/drivers/clk/renesas/r8a779h0-cpg-mssr.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * r8a779h0 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2023 Renesas Electronics Corp.
+ *
+ * Based on r8a779g0-cpg-mssr.c
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include <dt-bindings/clock/renesas,r8a779h0-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen4-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R8A779H0_CLK_R,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+ CLK_EXTALR,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL1,
+ CLK_PLL2,
+ CLK_PLL3,
+ CLK_PLL4,
+ CLK_PLL5,
+ CLK_PLL6,
+ CLK_PLL1_DIV2,
+ CLK_PLL2_DIV2,
+ CLK_PLL3_DIV2,
+ CLK_PLL4_DIV2,
+ CLK_PLL4_DIV5,
+ CLK_PLL5_DIV2,
+ CLK_PLL5_DIV4,
+ CLK_PLL6_DIV2,
+ CLK_S0,
+ CLK_S0_VIO,
+ CLK_S0_VC,
+ CLK_S0_HSC,
+ CLK_SASYNCPER,
+ CLK_SV_VIP,
+ CLK_SV_IR,
+ CLK_IMPASRC,
+ CLK_IMPBSRC,
+ CLK_VIOSRC,
+ CLK_VCSRC,
+ CLK_SDSRC,
+ CLK_RPCSRC,
+ CLK_OCO,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+static const struct cpg_core_clk r8a779h0_core_clks[] = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+ DEF_INPUT("extalr", CLK_EXTALR),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN4_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN4_PLL1, CLK_MAIN),
+ DEF_BASE(".pll2", CLK_PLL2, CLK_TYPE_GEN4_PLL2, CLK_MAIN),
+ DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN4_PLL3, CLK_MAIN),
+ DEF_BASE(".pll4", CLK_PLL4, CLK_TYPE_GEN4_PLL4, CLK_MAIN),
+ DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_GEN4_PLL5, CLK_MAIN),
+ DEF_BASE(".pll6", CLK_PLL6, CLK_TYPE_GEN4_PLL6, CLK_MAIN),
+
+ DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
+ DEF_FIXED(".pll2_div2", CLK_PLL2_DIV2, CLK_PLL2, 2, 1),
+ DEF_FIXED(".pll3_div2", CLK_PLL3_DIV2, CLK_PLL3, 2, 1),
+ DEF_FIXED(".pll4_div2", CLK_PLL4_DIV2, CLK_PLL4, 2, 1),
+ DEF_FIXED(".pll4_div5", CLK_PLL4_DIV5, CLK_PLL4, 5, 1),
+ DEF_FIXED(".pll5_div2", CLK_PLL5_DIV2, CLK_PLL5, 2, 1),
+ DEF_FIXED(".pll5_div4", CLK_PLL5_DIV4, CLK_PLL5_DIV2, 2, 1),
+ DEF_FIXED(".pll6_div2", CLK_PLL6_DIV2, CLK_PLL6, 2, 1),
+ DEF_FIXED(".s0", CLK_S0, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED(".s0_vio", CLK_S0_VIO, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED(".s0_vc", CLK_S0_VC, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED(".s0_hsc", CLK_S0_HSC, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED(".sasyncper", CLK_SASYNCPER, CLK_PLL5_DIV4, 3, 1),
+ DEF_FIXED(".sv_vip", CLK_SV_VIP, CLK_PLL1, 5, 1),
+ DEF_FIXED(".sv_ir", CLK_SV_IR, CLK_PLL1, 5, 1),
+ DEF_FIXED(".impasrc", CLK_IMPASRC, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED(".impbsrc", CLK_IMPBSRC, CLK_PLL1, 4, 1),
+ DEF_FIXED(".viosrc", CLK_VIOSRC, CLK_PLL1, 6, 1),
+ DEF_FIXED(".vcsrc", CLK_VCSRC, CLK_PLL1, 6, 1),
+ DEF_BASE(".sdsrc", CLK_SDSRC, CLK_TYPE_GEN4_SDSRC, CLK_PLL5),
+ DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN4_RPCSRC, CLK_PLL5),
+ DEF_RATE(".oco", CLK_OCO, 32768),
+
+ /* Core Clock Outputs */
+ DEF_GEN4_Z("zc0", R8A779H0_CLK_ZC0, CLK_TYPE_GEN4_Z, CLK_PLL2_DIV2, 2, 0),
+ DEF_GEN4_Z("zc1", R8A779H0_CLK_ZC1, CLK_TYPE_GEN4_Z, CLK_PLL2_DIV2, 2, 8),
+ DEF_GEN4_Z("zc2", R8A779H0_CLK_ZC2, CLK_TYPE_GEN4_Z, CLK_PLL2_DIV2, 2, 32),
+ DEF_GEN4_Z("zc3", R8A779H0_CLK_ZC3, CLK_TYPE_GEN4_Z, CLK_PLL2_DIV2, 2, 40),
+ DEF_FIXED("s0d2", R8A779H0_CLK_S0D2, CLK_S0, 2, 1),
+ DEF_FIXED("s0d3", R8A779H0_CLK_S0D3, CLK_S0, 3, 1),
+ DEF_FIXED("s0d4", R8A779H0_CLK_S0D4, CLK_S0, 4, 1),
+ DEF_FIXED("cl16m", R8A779H0_CLK_CL16M, CLK_S0, 48, 1),
+ DEF_FIXED("s0d2_rt", R8A779H0_CLK_S0D2_RT, CLK_S0, 2, 1),
+ DEF_FIXED("s0d3_rt", R8A779H0_CLK_S0D3_RT, CLK_S0, 3, 1),
+ DEF_FIXED("s0d4_rt", R8A779H0_CLK_S0D4_RT, CLK_S0, 4, 1),
+ DEF_FIXED("s0d6_rt", R8A779H0_CLK_S0D6_RT, CLK_S0, 6, 1),
+ DEF_FIXED("cl16m_rt", R8A779H0_CLK_CL16M_RT, CLK_S0, 48, 1),
+ DEF_FIXED("s0d2_per", R8A779H0_CLK_S0D2_PER, CLK_S0, 2, 1),
+ DEF_FIXED("s0d3_per", R8A779H0_CLK_S0D3_PER, CLK_S0, 3, 1),
+ DEF_FIXED("s0d4_per", R8A779H0_CLK_S0D4_PER, CLK_S0, 4, 1),
+ DEF_FIXED("s0d6_per", R8A779H0_CLK_S0D6_PER, CLK_S0, 6, 1),
+ DEF_FIXED("s0d12_per", R8A779H0_CLK_S0D12_PER, CLK_S0, 12, 1),
+ DEF_FIXED("s0d24_per", R8A779H0_CLK_S0D24_PER, CLK_S0, 24, 1),
+ DEF_FIXED("cl16m_per", R8A779H0_CLK_CL16M_PER, CLK_S0, 48, 1),
+ DEF_FIXED("s0d2_mm", R8A779H0_CLK_S0D2_MM, CLK_S0, 2, 1),
+ DEF_FIXED("s0d4_mm", R8A779H0_CLK_S0D4_MM, CLK_S0, 4, 1),
+ DEF_FIXED("cl16m_mm", R8A779H0_CLK_CL16M_MM, CLK_S0, 48, 1),
+ DEF_FIXED("s0d2_u3dg", R8A779H0_CLK_S0D2_U3DG, CLK_S0, 2, 1),
+ DEF_FIXED("s0d4_u3dg", R8A779H0_CLK_S0D4_U3DG, CLK_S0, 4, 1),
+ DEF_FIXED("s0d1_vio", R8A779H0_CLK_S0D1_VIO, CLK_S0_VIO, 1, 1),
+ DEF_FIXED("s0d2_vio", R8A779H0_CLK_S0D2_VIO, CLK_S0_VIO, 2, 1),
+ DEF_FIXED("s0d4_vio", R8A779H0_CLK_S0D4_VIO, CLK_S0_VIO, 4, 1),
+ DEF_FIXED("s0d8_vio", R8A779H0_CLK_S0D8_VIO, CLK_S0_VIO, 8, 1),
+ DEF_FIXED("s0d1_vc", R8A779H0_CLK_S0D1_VC, CLK_S0_VC, 1, 1),
+ DEF_FIXED("s0d2_vc", R8A779H0_CLK_S0D2_VC, CLK_S0_VC, 2, 1),
+ DEF_FIXED("s0d4_vc", R8A779H0_CLK_S0D4_VC, CLK_S0_VC, 4, 1),
+ DEF_FIXED("s0d1_hsc", R8A779H0_CLK_S0D1_HSC, CLK_S0_HSC, 1, 1),
+ DEF_FIXED("s0d2_hsc", R8A779H0_CLK_S0D2_HSC, CLK_S0_HSC, 2, 1),
+ DEF_FIXED("s0d4_hsc", R8A779H0_CLK_S0D4_HSC, CLK_S0_HSC, 4, 1),
+ DEF_FIXED("s0d8_hsc", R8A779H0_CLK_S0D8_HSC, CLK_S0_HSC, 8, 1),
+ DEF_FIXED("cl16m_hsc", R8A779H0_CLK_CL16M_HSC, CLK_S0_HSC, 48, 1),
+ DEF_FIXED("sasyncrt", R8A779H0_CLK_SASYNCRT, CLK_PLL5_DIV4, 48, 1),
+ DEF_FIXED("sasyncperd1", R8A779H0_CLK_SASYNCPERD1, CLK_SASYNCPER, 1, 1),
+ DEF_FIXED("sasyncperd2", R8A779H0_CLK_SASYNCPERD2, CLK_SASYNCPER, 2, 1),
+ DEF_FIXED("sasyncperd4", R8A779H0_CLK_SASYNCPERD4, CLK_SASYNCPER, 4, 1),
+ DEF_FIXED("svd1_vip", R8A779H0_CLK_SVD1_VIP, CLK_SV_VIP, 1, 1),
+ DEF_FIXED("svd2_vip", R8A779H0_CLK_SVD2_VIP, CLK_SV_VIP, 2, 1),
+ DEF_FIXED("svd1_ir", R8A779H0_CLK_SVD1_IR, CLK_SV_IR, 1, 1),
+ DEF_FIXED("svd2_ir", R8A779H0_CLK_SVD2_IR, CLK_SV_IR, 2, 1),
+ DEF_FIXED("cbfusa", R8A779H0_CLK_CBFUSA, CLK_EXTAL, 2, 1),
+ DEF_FIXED("cpex", R8A779H0_CLK_CPEX, CLK_EXTAL, 2, 1),
+ DEF_FIXED("cp", R8A779H0_CLK_CP, CLK_EXTAL, 2, 1),
+ DEF_FIXED("impad1", R8A779H0_CLK_IMPAD1, CLK_IMPASRC, 1, 1),
+ DEF_FIXED("impad4", R8A779H0_CLK_IMPAD4, CLK_IMPASRC, 4, 1),
+ DEF_FIXED("impb", R8A779H0_CLK_IMPB, CLK_IMPBSRC, 1, 1),
+ DEF_FIXED("viobusd1", R8A779H0_CLK_VIOBUSD1, CLK_VIOSRC, 1, 1),
+ DEF_FIXED("viobusd2", R8A779H0_CLK_VIOBUSD2, CLK_VIOSRC, 2, 1),
+ DEF_FIXED("vcbusd1", R8A779H0_CLK_VCBUSD1, CLK_VCSRC, 1, 1),
+ DEF_FIXED("vcbusd2", R8A779H0_CLK_VCBUSD2, CLK_VCSRC, 2, 1),
+ DEF_DIV6P1("canfd", R8A779H0_CLK_CANFD, CLK_PLL5_DIV4, 0x878),
+ DEF_DIV6P1("csi", R8A779H0_CLK_CSI, CLK_PLL5_DIV4, 0x880),
+ DEF_FIXED("dsiref", R8A779H0_CLK_DSIREF, CLK_PLL5_DIV4, 48, 1),
+ DEF_DIV6P1("dsiext", R8A779H0_CLK_DSIEXT, CLK_PLL5_DIV4, 0x884),
+ DEF_DIV6P1("mso", R8A779H0_CLK_MSO, CLK_PLL5_DIV4, 0x87c),
+
+ DEF_GEN4_SDH("sd0h", R8A779H0_CLK_SD0H, CLK_SDSRC, 0x870),
+ DEF_GEN4_SD("sd0", R8A779H0_CLK_SD0, R8A779H0_CLK_SD0H, 0x870),
+
+ DEF_BASE("rpc", R8A779H0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A779H0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2, R8A779H0_CLK_RPC),
+
+ DEF_GEN4_OSC("osc", R8A779H0_CLK_OSC, CLK_EXTAL, 8),
+ DEF_GEN4_MDSEL("r", R8A779H0_CLK_R, 29, CLK_EXTALR, 1, CLK_OCO, 1),
+};
+
+static const struct mssr_mod_clk r8a779h0_mod_clks[] = {
+ DEF_MOD("avb0:rgmii0", 211, R8A779H0_CLK_S0D8_HSC),
+ DEF_MOD("avb1:rgmii1", 212, R8A779H0_CLK_S0D8_HSC),
+ DEF_MOD("avb2:rgmii2", 213, R8A779H0_CLK_S0D8_HSC),
+ DEF_MOD("hscif0", 514, R8A779H0_CLK_SASYNCPERD1),
+ DEF_MOD("hscif1", 515, R8A779H0_CLK_SASYNCPERD1),
+ DEF_MOD("hscif2", 516, R8A779H0_CLK_SASYNCPERD1),
+ DEF_MOD("hscif3", 517, R8A779H0_CLK_SASYNCPERD1),
+ DEF_MOD("i2c0", 518, R8A779H0_CLK_S0D6_PER),
+ DEF_MOD("i2c1", 519, R8A779H0_CLK_S0D6_PER),
+ DEF_MOD("i2c2", 520, R8A779H0_CLK_S0D6_PER),
+ DEF_MOD("i2c3", 521, R8A779H0_CLK_S0D6_PER),
+ DEF_MOD("rpc-if", 629, R8A779H0_CLK_RPCD2),
+ DEF_MOD("sdhi0", 706, R8A779H0_CLK_SD0),
+ DEF_MOD("sydm1", 709, R8A779H0_CLK_S0D6_PER),
+ DEF_MOD("sydm2", 710, R8A779H0_CLK_S0D6_PER),
+ DEF_MOD("wdt1:wdt0", 907, R8A779H0_CLK_R),
+ DEF_MOD("pfc0", 915, R8A779H0_CLK_CP),
+ DEF_MOD("pfc1", 916, R8A779H0_CLK_CP),
+ DEF_MOD("pfc2", 917, R8A779H0_CLK_CP),
+};
+
+/*
+ * CPG Clock Data
+ */
+/*
+ * MD EXTAL PLL1 PLL2 PLL3 PLL4 PLL5 PLL6 OSC
+ * 14 13 (MHz)
+ * ------------------------------------------------------------------------
+ * 0 0 16.66 / 1 x192 x204 x192 x144 x192 x168 /16
+ * 0 1 20 / 1 x160 x170 x160 x120 x160 x140 /19
+ * 1 0 Prohibited setting
+ * 1 1 33.33 / 2 x192 x204 x192 x144 x192 x168 /32
+ */
+#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \
+ (((md) & BIT(13)) >> 13))
+
+static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] = {
+ /* EXTAL div PLL1 mult/div PLL2 mult/div PLL3 mult/div PLL4 mult/div PLL5 mult/div PLL6 mult/div OSC prediv */
+ { 1, 192, 1, 240, 1, 192, 1, 240, 1, 192, 1, 168, 1, 16, },
+ { 1, 160, 1, 200, 1, 160, 1, 200, 1, 160, 1, 140, 1, 19, },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ { 2, 192, 1, 240, 1, 192, 1, 240, 1, 192, 1, 168, 1, 32, },
+};
+
+static int __init r8a779h0_cpg_mssr_init(struct device *dev)
+{
+ const struct rcar_gen4_cpg_pll_config *cpg_pll_config;
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
+
+ cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+ if (!cpg_pll_config->extal_div) {
+ dev_err(dev, "Prohibited setting (cpg_mode=0x%x)\n", cpg_mode);
+ return -EINVAL;
+ }
+
+ return rcar_gen4_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode);
+}
+
+const struct cpg_mssr_info r8a779h0_cpg_mssr_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r8a779h0_core_clks,
+ .num_core_clks = ARRAY_SIZE(r8a779h0_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r8a779h0_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r8a779h0_mod_clks),
+ .num_hw_mod_clks = 30 * 32,
+
+ /* Callbacks */
+ .init = r8a779h0_cpg_mssr_init,
+ .cpg_clk_register = rcar_gen4_cpg_clk_register,
+
+ .reg_layout = CLK_REG_LAYOUT_RCAR_GEN4,
+};
diff --git a/drivers/clk/renesas/r9a07g043-cpg.c b/drivers/clk/renesas/r9a07g043-cpg.c
index b70bb378ab469..33532673d25d7 100644
--- a/drivers/clk/renesas/r9a07g043-cpg.c
+++ b/drivers/clk/renesas/r9a07g043-cpg.c
@@ -48,6 +48,7 @@ enum clk_ids {
CLK_SEL_PLL3_3,
CLK_DIV_PLL3_C,
#ifdef CONFIG_ARM64
+ CLK_M2_DIV2,
CLK_PLL5,
CLK_PLL5_500,
CLK_PLL5_250,
@@ -87,7 +88,7 @@ static const struct clk_div_table dtable_1_32[] = {
/* Mux clock tables */
static const char * const sel_pll3_3[] = { ".pll3_533", ".pll3_400" };
static const char * const sel_pll6_2[] = { ".pll6_250", ".pll5_250" };
-static const char * const sel_shdi[] = { ".clk_533", ".clk_400", ".clk_266" };
+static const char * const sel_sdhi[] = { ".clk_533", ".clk_400", ".clk_266" };
static const u32 mtable_sdhi[] = { 1, 2, 3 };
@@ -136,12 +137,16 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = {
DEF_MUX("HP", R9A07G043_CLK_HP, SEL_PLL6_2, sel_pll6_2),
DEF_FIXED("SPI0", R9A07G043_CLK_SPI0, CLK_DIV_PLL3_C, 1, 2),
DEF_FIXED("SPI1", R9A07G043_CLK_SPI1, CLK_DIV_PLL3_C, 1, 4),
- DEF_SD_MUX("SD0", R9A07G043_CLK_SD0, SEL_SDHI0, SEL_SDHI0_STS, sel_shdi,
+ DEF_SD_MUX("SD0", R9A07G043_CLK_SD0, SEL_SDHI0, SEL_SDHI0_STS, sel_sdhi,
mtable_sdhi, 0, rzg2l_cpg_sd_clk_mux_notifier),
- DEF_SD_MUX("SD1", R9A07G043_CLK_SD1, SEL_SDHI1, SEL_SDHI0_STS, sel_shdi,
+ DEF_SD_MUX("SD1", R9A07G043_CLK_SD1, SEL_SDHI1, SEL_SDHI1_STS, sel_sdhi,
mtable_sdhi, 0, rzg2l_cpg_sd_clk_mux_notifier),
DEF_FIXED("SD0_DIV4", CLK_SD0_DIV4, R9A07G043_CLK_SD0, 1, 4),
DEF_FIXED("SD1_DIV4", CLK_SD1_DIV4, R9A07G043_CLK_SD1, 1, 4),
+#ifdef CONFIG_ARM64
+ DEF_FIXED("M2", R9A07G043_CLK_M2, CLK_PLL3_533, 1, 2),
+ DEF_FIXED("M2_DIV2", CLK_M2_DIV2, R9A07G043_CLK_M2, 1, 2),
+#endif
};
static struct rzg2l_mod_clk r9a07g043_mod_clks[] = {
@@ -195,6 +200,16 @@ static struct rzg2l_mod_clk r9a07g043_mod_clks[] = {
0x554, 6),
DEF_MOD("sdhi1_aclk", R9A07G043_SDHI1_ACLK, R9A07G043_CLK_P1,
0x554, 7),
+#ifdef CONFIG_ARM64
+ DEF_MOD("cru_sysclk", R9A07G043_CRU_SYSCLK, CLK_M2_DIV2,
+ 0x564, 0),
+ DEF_MOD("cru_vclk", R9A07G043_CRU_VCLK, R9A07G043_CLK_M2,
+ 0x564, 1),
+ DEF_MOD("cru_pclk", R9A07G043_CRU_PCLK, R9A07G043_CLK_ZT,
+ 0x564, 2),
+ DEF_MOD("cru_aclk", R9A07G043_CRU_ACLK, R9A07G043_CLK_M0,
+ 0x564, 3),
+#endif
DEF_MOD("ssi0_pclk", R9A07G043_SSI0_PCLK2, R9A07G043_CLK_P0,
0x570, 0),
DEF_MOD("ssi0_sfr", R9A07G043_SSI0_PCLK_SFR, R9A07G043_CLK_P0,
@@ -286,6 +301,11 @@ static struct rzg2l_reset r9a07g043_resets[] = {
DEF_RST(R9A07G043_SPI_RST, 0x850, 0),
DEF_RST(R9A07G043_SDHI0_IXRST, 0x854, 0),
DEF_RST(R9A07G043_SDHI1_IXRST, 0x854, 1),
+#ifdef CONFIG_ARM64
+ DEF_RST(R9A07G043_CRU_CMN_RSTB, 0x864, 0),
+ DEF_RST(R9A07G043_CRU_PRESETN, 0x864, 1),
+ DEF_RST(R9A07G043_CRU_ARESETN, 0x864, 2),
+#endif
DEF_RST(R9A07G043_SSI0_RST_M2_REG, 0x870, 0),
DEF_RST(R9A07G043_SSI1_RST_M2_REG, 0x870, 1),
DEF_RST(R9A07G043_SSI2_RST_M2_REG, 0x870, 2),
@@ -331,6 +351,13 @@ static const unsigned int r9a07g043_crit_mod_clks[] __initconst = {
MOD_CLK_BASE + R9A07G043_DMAC_ACLK,
};
+#ifdef CONFIG_ARM64
+static const unsigned int r9a07g043_no_pm_mod_clks[] = {
+ MOD_CLK_BASE + R9A07G043_CRU_SYSCLK,
+ MOD_CLK_BASE + R9A07G043_CRU_VCLK,
+};
+#endif
+
const struct rzg2l_cpg_info r9a07g043_cpg_info = {
/* Core Clocks */
.core_clks = r9a07g043_core_clks,
@@ -347,6 +374,10 @@ const struct rzg2l_cpg_info r9a07g043_cpg_info = {
.num_mod_clks = ARRAY_SIZE(r9a07g043_mod_clks),
#ifdef CONFIG_ARM64
.num_hw_mod_clks = R9A07G043_TSU_PCLK + 1,
+
+ /* No PM Module Clocks */
+ .no_pm_mod_clks = r9a07g043_no_pm_mod_clks,
+ .num_no_pm_mod_clks = ARRAY_SIZE(r9a07g043_no_pm_mod_clks),
#endif
#ifdef CONFIG_RISCV
.num_hw_mod_clks = R9A07G043_IAX45_PCLK + 1,
diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c
index 1047278c9079a..48404cafea3f5 100644
--- a/drivers/clk/renesas/r9a07g044-cpg.c
+++ b/drivers/clk/renesas/r9a07g044-cpg.c
@@ -106,7 +106,7 @@ static const struct clk_div_table dtable_16_128[] = {
static const char * const sel_pll3_3[] = { ".pll3_533", ".pll3_400" };
static const char * const sel_pll5_4[] = { ".pll5_foutpostdiv", ".pll5_fout1ph0" };
static const char * const sel_pll6_2[] = { ".pll6_250", ".pll5_250" };
-static const char * const sel_shdi[] = { ".clk_533", ".clk_400", ".clk_266" };
+static const char * const sel_sdhi[] = { ".clk_533", ".clk_400", ".clk_266" };
static const char * const sel_gpu2[] = { ".pll6", ".pll3_div2_2" };
static const u32 mtable_sdhi[] = { 1, 2, 3 };
@@ -176,9 +176,9 @@ static const struct {
DEF_MUX("HP", R9A07G044_CLK_HP, SEL_PLL6_2, sel_pll6_2),
DEF_FIXED("SPI0", R9A07G044_CLK_SPI0, CLK_DIV_PLL3_C, 1, 2),
DEF_FIXED("SPI1", R9A07G044_CLK_SPI1, CLK_DIV_PLL3_C, 1, 4),
- DEF_SD_MUX("SD0", R9A07G044_CLK_SD0, SEL_SDHI0, SEL_SDHI0_STS, sel_shdi,
+ DEF_SD_MUX("SD0", R9A07G044_CLK_SD0, SEL_SDHI0, SEL_SDHI0_STS, sel_sdhi,
mtable_sdhi, 0, rzg2l_cpg_sd_clk_mux_notifier),
- DEF_SD_MUX("SD1", R9A07G044_CLK_SD1, SEL_SDHI1, SEL_SDHI0_STS, sel_shdi,
+ DEF_SD_MUX("SD1", R9A07G044_CLK_SD1, SEL_SDHI1, SEL_SDHI1_STS, sel_sdhi,
mtable_sdhi, 0, rzg2l_cpg_sd_clk_mux_notifier),
DEF_FIXED("SD0_DIV4", CLK_SD0_DIV4, R9A07G044_CLK_SD0, 1, 4),
DEF_FIXED("SD1_DIV4", CLK_SD1_DIV4, R9A07G044_CLK_SD1, 1, 4),
diff --git a/drivers/clk/renesas/r9a08g045-cpg.c b/drivers/clk/renesas/r9a08g045-cpg.c
index 2582ba95256ea..c3e6da2de197f 100644
--- a/drivers/clk/renesas/r9a08g045-cpg.c
+++ b/drivers/clk/renesas/r9a08g045-cpg.c
@@ -193,6 +193,8 @@ static const struct rzg2l_mod_clk r9a08g045_mod_clks[] = {
DEF_MOD("ia55_pclk", R9A08G045_IA55_PCLK, R9A08G045_CLK_P2, 0x518, 0),
DEF_MOD("ia55_clk", R9A08G045_IA55_CLK, R9A08G045_CLK_P1, 0x518, 1),
DEF_MOD("dmac_aclk", R9A08G045_DMAC_ACLK, R9A08G045_CLK_P3, 0x52c, 0),
+ DEF_MOD("wdt0_pclk", R9A08G045_WDT0_PCLK, R9A08G045_CLK_P0, 0x548, 0),
+ DEF_MOD("wdt0_clk", R9A08G045_WDT0_CLK, R9A08G045_OSCCLK, 0x548, 1),
DEF_MOD("sdhi0_imclk", R9A08G045_SDHI0_IMCLK, CLK_SD0_DIV4, 0x554, 0),
DEF_MOD("sdhi0_imclk2", R9A08G045_SDHI0_IMCLK2, CLK_SD0_DIV4, 0x554, 1),
DEF_MOD("sdhi0_clk_hs", R9A08G045_SDHI0_CLK_HS, R9A08G045_CLK_SD0, 0x554, 2),
@@ -219,6 +221,7 @@ static const struct rzg2l_reset r9a08g045_resets[] = {
DEF_RST(R9A08G045_GIC600_GICRESET_N, 0x814, 0),
DEF_RST(R9A08G045_GIC600_DBG_GICRESET_N, 0x814, 1),
DEF_RST(R9A08G045_IA55_RESETN, 0x818, 0),
+ DEF_RST(R9A08G045_WDT0_PRESETN, 0x848, 0),
DEF_RST(R9A08G045_SDHI0_IXRST, 0x854, 0),
DEF_RST(R9A08G045_SDHI1_IXRST, 0x854, 1),
DEF_RST(R9A08G045_SDHI2_IXRST, 0x854, 2),
diff --git a/drivers/clk/renesas/rcar-gen4-cpg.c b/drivers/clk/renesas/rcar-gen4-cpg.c
index c68d8b9870541..a2bbdad021ed8 100644
--- a/drivers/clk/renesas/rcar-gen4-cpg.c
+++ b/drivers/clk/renesas/rcar-gen4-cpg.c
@@ -179,7 +179,8 @@ static struct clk * __init cpg_pll_clk_register(const char *name,
*/
#define CPG_FRQCRB 0x00000804
#define CPG_FRQCRB_KICK BIT(31)
-#define CPG_FRQCRC 0x00000808
+#define CPG_FRQCRC0 0x00000808
+#define CPG_FRQCRC1 0x000008e0
struct cpg_z_clk {
struct clk_hw hw;
@@ -304,7 +305,12 @@ static struct clk * __init cpg_z_clk_register(const char *name,
init.parent_names = &parent_name;
init.num_parents = 1;
- zclk->reg = reg + CPG_FRQCRC;
+ if (offset < 32) {
+ zclk->reg = reg + CPG_FRQCRC0;
+ } else {
+ zclk->reg = reg + CPG_FRQCRC1;
+ offset -= 32;
+ }
zclk->kick_reg = reg + CPG_FRQCRB;
zclk->hw.init = &init;
zclk->mask = GENMASK(offset + 4, offset);
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index cb80d1bf6c7c6..1b421b8097965 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -142,6 +142,8 @@ static const u16 srstclr_for_gen4[] = {
* @reset_clear_regs: Pointer to reset clearing registers array
* @smstpcr_saved: [].mask: Mask of SMSTPCR[] bits under our control
* [].val: Saved values of SMSTPCR[]
+ * @reserved_ids: Temporary used, reserved id list
+ * @num_reserved_ids: Temporary used, number of reserved id list
* @clks: Array containing all Core and Module Clocks
*/
struct cpg_mssr_priv {
@@ -168,6 +170,9 @@ struct cpg_mssr_priv {
u32 val;
} smstpcr_saved[ARRAY_SIZE(mstpsr_for_gen4)];
+ unsigned int *reserved_ids;
+ unsigned int num_reserved_ids;
+
struct clk *clks[];
};
@@ -453,6 +458,19 @@ static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod,
break;
}
+ /*
+ * Ignore reserved device.
+ * see
+ * cpg_mssr_reserved_init()
+ */
+ for (i = 0; i < priv->num_reserved_ids; i++) {
+ if (id == priv->reserved_ids[i]) {
+ dev_info(dev, "Ignore Linux non-assigned mod (%s)\n", mod->name);
+ init.flags |= CLK_IGNORE_UNUSED;
+ break;
+ }
+ }
+
clk = clk_register(NULL, &clock->hw);
if (IS_ERR(clk))
goto fail;
@@ -854,6 +872,12 @@ static const struct of_device_id cpg_mssr_match[] = {
.data = &r8a779g0_cpg_mssr_info,
},
#endif
+#ifdef CONFIG_CLK_R8A779H0
+ {
+ .compatible = "renesas,r8a779h0-cpg-mssr",
+ .data = &r8a779h0_cpg_mssr_info,
+ },
+#endif
{ /* sentinel */ }
};
@@ -949,6 +973,78 @@ static const struct dev_pm_ops cpg_mssr_pm = {
#define DEV_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP && CONFIG_ARM_PSCI_FW */
+static void __init cpg_mssr_reserved_exit(struct cpg_mssr_priv *priv)
+{
+ kfree(priv->reserved_ids);
+}
+
+static int __init cpg_mssr_reserved_init(struct cpg_mssr_priv *priv,
+ const struct cpg_mssr_info *info)
+{
+ struct device_node *soc = of_find_node_by_path("/soc");
+ struct device_node *node;
+ uint32_t args[MAX_PHANDLE_ARGS];
+ unsigned int *ids = NULL;
+ unsigned int num = 0;
+
+ /*
+ * Because clk_disable_unused() will disable all unused clocks, the device which is assigned
+ * to a non-Linux system will be disabled when Linux is booted.
+ *
+ * To avoid such situation, renesas-cpg-mssr assumes the device which has
+ * status = "reserved" is assigned to a non-Linux system, and adds CLK_IGNORE_UNUSED flag
+ * to its CPG_MOD clocks.
+ * see also
+ * cpg_mssr_register_mod_clk()
+ *
+ * scif5: serial@e6f30000 {
+ * ...
+ * => clocks = <&cpg CPG_MOD 202>,
+ * <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+ * <&scif_clk>;
+ * ...
+ * status = "reserved";
+ * };
+ */
+ for_each_reserved_child_of_node(soc, node) {
+ struct of_phandle_iterator it;
+ int rc;
+
+ of_for_each_phandle(&it, rc, node, "clocks", "#clock-cells", -1) {
+ int idx;
+
+ if (it.node != priv->np)
+ continue;
+
+ if (of_phandle_iterator_args(&it, args, MAX_PHANDLE_ARGS) != 2)
+ continue;
+
+ if (args[0] != CPG_MOD)
+ continue;
+
+ ids = krealloc_array(ids, (num + 1), sizeof(*ids), GFP_KERNEL);
+ if (!ids) {
+ of_node_put(it.node);
+ return -ENOMEM;
+ }
+
+ if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
+ idx = MOD_CLK_PACK_10(args[1]); /* for DEF_MOD_STB() */
+ else
+ idx = MOD_CLK_PACK(args[1]); /* for DEF_MOD() */
+
+ ids[num] = info->num_total_core_clks + idx;
+
+ num++;
+ }
+ }
+
+ priv->num_reserved_ids = num;
+ priv->reserved_ids = ids;
+
+ return 0;
+}
+
static int __init cpg_mssr_common_init(struct device *dev,
struct device_node *np,
const struct cpg_mssr_info *info)
@@ -1003,14 +1099,20 @@ static int __init cpg_mssr_common_init(struct device *dev,
for (i = 0; i < nclks; i++)
priv->clks[i] = ERR_PTR(-ENOENT);
- error = of_clk_add_provider(np, cpg_mssr_clk_src_twocell_get, priv);
+ error = cpg_mssr_reserved_init(priv, info);
if (error)
goto out_err;
+ error = of_clk_add_provider(np, cpg_mssr_clk_src_twocell_get, priv);
+ if (error)
+ goto reserve_err;
+
cpg_mssr_priv = priv;
return 0;
+reserve_err:
+ cpg_mssr_reserved_exit(priv);
out_err:
if (priv->base)
iounmap(priv->base);
@@ -1070,22 +1172,23 @@ static int __init cpg_mssr_probe(struct platform_device *pdev)
cpg_mssr_del_clk_provider,
np);
if (error)
- return error;
+ goto reserve_exit;
error = cpg_mssr_add_clk_domain(dev, info->core_pm_clks,
info->num_core_pm_clks);
if (error)
- return error;
+ goto reserve_exit;
/* Reset Controller not supported for Standby Control SoCs */
if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
- return 0;
+ goto reserve_exit;
error = cpg_mssr_reset_controller_register(priv);
- if (error)
- return error;
- return 0;
+reserve_exit:
+ cpg_mssr_reserved_exit(priv);
+
+ return error;
}
static struct platform_driver cpg_mssr_driver = {
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index 80c5b462924ac..a1d6e0cbcff94 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -180,6 +180,7 @@ extern const struct cpg_mssr_info r8a77995_cpg_mssr_info;
extern const struct cpg_mssr_info r8a779a0_cpg_mssr_info;
extern const struct cpg_mssr_info r8a779f0_cpg_mssr_info;
extern const struct cpg_mssr_info r8a779g0_cpg_mssr_info;
+extern const struct cpg_mssr_info r8a779h0_cpg_mssr_info;
void __init cpg_mssr_early_init(struct device_node *np,
const struct cpg_mssr_info *info);
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index 9316e5c8a0ea5..4f1a5782c2308 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -597,7 +597,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
COMPOSITE(0, "clk_i2s0_div", mux_pll_src_cpll_gpll_p, 0,
RK3399_CLKSEL_CON(28), 7, 1, MFLAGS, 0, 7, DFLAGS,
RK3399_CLKGATE_CON(8), 3, GFLAGS),
- COMPOSITE_FRACMUX(0, "clk_i2s0_frac", "clk_i2s0_div", 0,
+ COMPOSITE_FRACMUX(0, "clk_i2s0_frac", "clk_i2s0_div", CLK_SET_RATE_PARENT,
RK3399_CLKSEL_CON(96), 0,
RK3399_CLKGATE_CON(8), 4, GFLAGS,
&rk3399_i2s0_fracmux),
@@ -607,7 +607,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
COMPOSITE(0, "clk_i2s1_div", mux_pll_src_cpll_gpll_p, 0,
RK3399_CLKSEL_CON(29), 7, 1, MFLAGS, 0, 7, DFLAGS,
RK3399_CLKGATE_CON(8), 6, GFLAGS),
- COMPOSITE_FRACMUX(0, "clk_i2s1_frac", "clk_i2s1_div", 0,
+ COMPOSITE_FRACMUX(0, "clk_i2s1_frac", "clk_i2s1_div", CLK_SET_RATE_PARENT,
RK3399_CLKSEL_CON(97), 0,
RK3399_CLKGATE_CON(8), 7, GFLAGS,
&rk3399_i2s1_fracmux),
@@ -617,7 +617,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
COMPOSITE(0, "clk_i2s2_div", mux_pll_src_cpll_gpll_p, 0,
RK3399_CLKSEL_CON(30), 7, 1, MFLAGS, 0, 7, DFLAGS,
RK3399_CLKGATE_CON(8), 9, GFLAGS),
- COMPOSITE_FRACMUX(0, "clk_i2s2_frac", "clk_i2s2_div", 0,
+ COMPOSITE_FRACMUX(0, "clk_i2s2_frac", "clk_i2s2_div", CLK_SET_RATE_PARENT,
RK3399_CLKSEL_CON(98), 0,
RK3399_CLKGATE_CON(8), 10, GFLAGS,
&rk3399_i2s2_fracmux),
diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c
index b786ddc9af2af..8cb21d10beca2 100644
--- a/drivers/clk/rockchip/clk-rk3568.c
+++ b/drivers/clk/rockchip/clk-rk3568.c
@@ -78,6 +78,7 @@ static struct rockchip_pll_rate_table rk3568_pll_rates[] = {
RK3036_PLL_RATE(200000000, 1, 100, 3, 4, 1, 0),
RK3036_PLL_RATE(148500000, 1, 99, 4, 4, 1, 0),
RK3036_PLL_RATE(135000000, 2, 45, 4, 1, 1, 0),
+ RK3036_PLL_RATE(128000000, 1, 16, 3, 1, 1, 0),
RK3036_PLL_RATE(126400000, 1, 79, 5, 3, 1, 0),
RK3036_PLL_RATE(119000000, 3, 119, 4, 2, 1, 0),
RK3036_PLL_RATE(115200000, 1, 24, 5, 1, 1, 0),
diff --git a/drivers/clk/rockchip/clk-rk3588.c b/drivers/clk/rockchip/clk-rk3588.c
index 0b60ae78f9d88..b30279a96dc8a 100644
--- a/drivers/clk/rockchip/clk-rk3588.c
+++ b/drivers/clk/rockchip/clk-rk3588.c
@@ -29,7 +29,7 @@
* power, but avoids leaking implementation details into DT or hanging the
* system.
*/
-#define GATE_LINK(_id, cname, pname, linkname, f, o, b, gf) \
+#define GATE_LINK(_id, cname, pname, linkedclk, f, o, b, gf) \
GATE(_id, cname, pname, f, o, b, gf)
#define RK3588_LINKED_CLK CLK_IS_CRITICAL
@@ -1004,7 +1004,7 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
GATE(PCLK_MAILBOX1, "pclk_mailbox1", "pclk_top_root", 0,
RK3588_CLKGATE_CON(16), 12, GFLAGS),
GATE(PCLK_MAILBOX2, "pclk_mailbox2", "pclk_top_root", 0,
- RK3588_CLKGATE_CON(16), 13, GFLAGS),
+ RK3588_CLKGATE_CON(16), 13, GFLAGS),
GATE(PCLK_PMU2, "pclk_pmu2", "pclk_top_root", CLK_IS_CRITICAL,
RK3588_CLKGATE_CON(19), 3, GFLAGS),
GATE(PCLK_PMUCM0_INTMUX, "pclk_pmucm0_intmux", "pclk_top_root", CLK_IS_CRITICAL,
@@ -1851,8 +1851,6 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
RK3588_CLKGATE_CON(56), 0, GFLAGS),
GATE(PCLK_TRNG0, "pclk_trng0", "pclk_vo0_root", 0,
RK3588_CLKGATE_CON(56), 1, GFLAGS),
- GATE(PCLK_VO0GRF, "pclk_vo0grf", "pclk_vo0_root", CLK_IGNORE_UNUSED,
- RK3588_CLKGATE_CON(55), 10, GFLAGS),
COMPOSITE(CLK_I2S4_8CH_TX_SRC, "clk_i2s4_8ch_tx_src", gpll_aupll_p, 0,
RK3588_CLKSEL_CON(118), 5, 1, MFLAGS, 0, 5, DFLAGS,
RK3588_CLKGATE_CON(56), 11, GFLAGS),
@@ -1998,8 +1996,6 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
RK3588_CLKGATE_CON(60), 9, GFLAGS),
GATE(PCLK_TRNG1, "pclk_trng1", "pclk_vo1_root", 0,
RK3588_CLKGATE_CON(60), 10, GFLAGS),
- GATE(0, "pclk_vo1grf", "pclk_vo1_root", CLK_IGNORE_UNUSED,
- RK3588_CLKGATE_CON(59), 12, GFLAGS),
GATE(PCLK_S_EDP0, "pclk_s_edp0", "pclk_vo1_s_root", 0,
RK3588_CLKGATE_CON(59), 14, GFLAGS),
GATE(PCLK_S_EDP1, "pclk_s_edp1", "pclk_vo1_s_root", 0,
@@ -2433,26 +2429,28 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
GATE(ACLK_AV1, "aclk_av1", "aclk_av1_pre", 0,
RK3588_CLKGATE_CON(68), 2, GFLAGS),
- GATE_LINK(ACLK_ISP1_PRE, "aclk_isp1_pre", "aclk_isp1_root", "aclk_vi_root", 0, RK3588_CLKGATE_CON(26), 6, GFLAGS),
- GATE_LINK(HCLK_ISP1_PRE, "hclk_isp1_pre", "hclk_isp1_root", "hclk_vi_root", 0, RK3588_CLKGATE_CON(26), 8, GFLAGS),
- GATE_LINK(HCLK_NVM, "hclk_nvm", "hclk_nvm_root", "aclk_nvm_root", RK3588_LINKED_CLK, RK3588_CLKGATE_CON(31), 2, GFLAGS),
- GATE_LINK(ACLK_USB, "aclk_usb", "aclk_usb_root", "aclk_vo1usb_top_root", 0, RK3588_CLKGATE_CON(42), 2, GFLAGS),
- GATE_LINK(HCLK_USB, "hclk_usb", "hclk_usb_root", "hclk_vo1usb_top_root", 0, RK3588_CLKGATE_CON(42), 3, GFLAGS),
- GATE_LINK(ACLK_JPEG_DECODER_PRE, "aclk_jpeg_decoder_pre", "aclk_jpeg_decoder_root", "aclk_vdpu_root", 0, RK3588_CLKGATE_CON(44), 7, GFLAGS),
- GATE_LINK(ACLK_VDPU_LOW_PRE, "aclk_vdpu_low_pre", "aclk_vdpu_low_root", "aclk_vdpu_root", 0, RK3588_CLKGATE_CON(44), 5, GFLAGS),
- GATE_LINK(ACLK_RKVENC1_PRE, "aclk_rkvenc1_pre", "aclk_rkvenc1_root", "aclk_rkvenc0", 0, RK3588_CLKGATE_CON(48), 3, GFLAGS),
- GATE_LINK(HCLK_RKVENC1_PRE, "hclk_rkvenc1_pre", "hclk_rkvenc1_root", "hclk_rkvenc0", 0, RK3588_CLKGATE_CON(48), 2, GFLAGS),
- GATE_LINK(HCLK_RKVDEC0_PRE, "hclk_rkvdec0_pre", "hclk_rkvdec0_root", "hclk_vdpu_root", 0, RK3588_CLKGATE_CON(40), 5, GFLAGS),
- GATE_LINK(ACLK_RKVDEC0_PRE, "aclk_rkvdec0_pre", "aclk_rkvdec0_root", "aclk_vdpu_root", 0, RK3588_CLKGATE_CON(40), 6, GFLAGS),
- GATE_LINK(HCLK_RKVDEC1_PRE, "hclk_rkvdec1_pre", "hclk_rkvdec1_root", "hclk_vdpu_root", 0, RK3588_CLKGATE_CON(41), 4, GFLAGS),
- GATE_LINK(ACLK_RKVDEC1_PRE, "aclk_rkvdec1_pre", "aclk_rkvdec1_root", "aclk_vdpu_root", 0, RK3588_CLKGATE_CON(41), 5, GFLAGS),
- GATE_LINK(ACLK_HDCP0_PRE, "aclk_hdcp0_pre", "aclk_vo0_root", "aclk_vop_low_root", 0, RK3588_CLKGATE_CON(55), 9, GFLAGS),
- GATE_LINK(HCLK_VO0, "hclk_vo0", "hclk_vo0_root", "hclk_vop_root", 0, RK3588_CLKGATE_CON(55), 5, GFLAGS),
- GATE_LINK(ACLK_HDCP1_PRE, "aclk_hdcp1_pre", "aclk_hdcp1_root", "aclk_vo1usb_top_root", 0, RK3588_CLKGATE_CON(59), 6, GFLAGS),
- GATE_LINK(HCLK_VO1, "hclk_vo1", "hclk_vo1_root", "hclk_vo1usb_top_root", 0, RK3588_CLKGATE_CON(59), 9, GFLAGS),
- GATE_LINK(ACLK_AV1_PRE, "aclk_av1_pre", "aclk_av1_root", "aclk_vdpu_root", 0, RK3588_CLKGATE_CON(68), 1, GFLAGS),
- GATE_LINK(PCLK_AV1_PRE, "pclk_av1_pre", "pclk_av1_root", "hclk_vdpu_root", 0, RK3588_CLKGATE_CON(68), 4, GFLAGS),
- GATE_LINK(HCLK_SDIO_PRE, "hclk_sdio_pre", "hclk_sdio_root", "hclk_nvm", 0, RK3588_CLKGATE_CON(75), 1, GFLAGS),
+ GATE_LINK(ACLK_ISP1_PRE, "aclk_isp1_pre", "aclk_isp1_root", ACLK_VI_ROOT, 0, RK3588_CLKGATE_CON(26), 6, GFLAGS),
+ GATE_LINK(HCLK_ISP1_PRE, "hclk_isp1_pre", "hclk_isp1_root", HCLK_VI_ROOT, 0, RK3588_CLKGATE_CON(26), 8, GFLAGS),
+ GATE_LINK(HCLK_NVM, "hclk_nvm", "hclk_nvm_root", ACLK_NVM_ROOT, RK3588_LINKED_CLK, RK3588_CLKGATE_CON(31), 2, GFLAGS),
+ GATE_LINK(ACLK_USB, "aclk_usb", "aclk_usb_root", ACLK_VO1USB_TOP_ROOT, 0, RK3588_CLKGATE_CON(42), 2, GFLAGS),
+ GATE_LINK(HCLK_USB, "hclk_usb", "hclk_usb_root", HCLK_VO1USB_TOP_ROOT, 0, RK3588_CLKGATE_CON(42), 3, GFLAGS),
+ GATE_LINK(ACLK_JPEG_DECODER_PRE, "aclk_jpeg_decoder_pre", "aclk_jpeg_decoder_root", ACLK_VDPU_ROOT, 0, RK3588_CLKGATE_CON(44), 7, GFLAGS),
+ GATE_LINK(ACLK_VDPU_LOW_PRE, "aclk_vdpu_low_pre", "aclk_vdpu_low_root", ACLK_VDPU_ROOT, 0, RK3588_CLKGATE_CON(44), 5, GFLAGS),
+ GATE_LINK(ACLK_RKVENC1_PRE, "aclk_rkvenc1_pre", "aclk_rkvenc1_root", ACLK_RKVENC0, 0, RK3588_CLKGATE_CON(48), 3, GFLAGS),
+ GATE_LINK(HCLK_RKVENC1_PRE, "hclk_rkvenc1_pre", "hclk_rkvenc1_root", HCLK_RKVENC0, 0, RK3588_CLKGATE_CON(48), 2, GFLAGS),
+ GATE_LINK(HCLK_RKVDEC0_PRE, "hclk_rkvdec0_pre", "hclk_rkvdec0_root", HCLK_VDPU_ROOT, 0, RK3588_CLKGATE_CON(40), 5, GFLAGS),
+ GATE_LINK(ACLK_RKVDEC0_PRE, "aclk_rkvdec0_pre", "aclk_rkvdec0_root", ACLK_VDPU_ROOT, 0, RK3588_CLKGATE_CON(40), 6, GFLAGS),
+ GATE_LINK(HCLK_RKVDEC1_PRE, "hclk_rkvdec1_pre", "hclk_rkvdec1_root", HCLK_VDPU_ROOT, 0, RK3588_CLKGATE_CON(41), 4, GFLAGS),
+ GATE_LINK(ACLK_RKVDEC1_PRE, "aclk_rkvdec1_pre", "aclk_rkvdec1_root", ACLK_VDPU_ROOT, 0, RK3588_CLKGATE_CON(41), 5, GFLAGS),
+ GATE_LINK(ACLK_HDCP0_PRE, "aclk_hdcp0_pre", "aclk_vo0_root", ACLK_VOP_LOW_ROOT, 0, RK3588_CLKGATE_CON(55), 9, GFLAGS),
+ GATE_LINK(HCLK_VO0, "hclk_vo0", "hclk_vo0_root", HCLK_VOP_ROOT, RK3588_LINKED_CLK, RK3588_CLKGATE_CON(55), 5, GFLAGS),
+ GATE_LINK(ACLK_HDCP1_PRE, "aclk_hdcp1_pre", "aclk_hdcp1_root", ACLK_VO1USB_TOP_ROOT, 0, RK3588_CLKGATE_CON(59), 6, GFLAGS),
+ GATE_LINK(HCLK_VO1, "hclk_vo1", "hclk_vo1_root", HCLK_VO1USB_TOP_ROOT, RK3588_LINKED_CLK, RK3588_CLKGATE_CON(59), 9, GFLAGS),
+ GATE_LINK(ACLK_AV1_PRE, "aclk_av1_pre", "aclk_av1_root", ACLK_VDPU_ROOT, 0, RK3588_CLKGATE_CON(68), 1, GFLAGS),
+ GATE_LINK(PCLK_AV1_PRE, "pclk_av1_pre", "pclk_av1_root", HCLK_VDPU_ROOT, 0, RK3588_CLKGATE_CON(68), 4, GFLAGS),
+ GATE_LINK(HCLK_SDIO_PRE, "hclk_sdio_pre", "hclk_sdio_root", HCLK_NVM, 0, RK3588_CLKGATE_CON(75), 1, GFLAGS),
+ GATE_LINK(PCLK_VO0GRF, "pclk_vo0grf", "pclk_vo0_root", HCLK_VO0, CLK_IGNORE_UNUSED, RK3588_CLKGATE_CON(55), 10, GFLAGS),
+ GATE_LINK(PCLK_VO1GRF, "pclk_vo1grf", "pclk_vo1_root", HCLK_VO1, CLK_IGNORE_UNUSED, RK3588_CLKGATE_CON(59), 12, GFLAGS),
};
static void __init rk3588_clk_init(struct device_node *np)
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
index 3e62ade120c5d..fbf4c4208e06a 100644
--- a/drivers/clk/samsung/clk-cpu.c
+++ b/drivers/clk/samsung/clk-cpu.c
@@ -16,51 +16,106 @@
* of the SoC or supplied after the SoC characterization.
*
* The below implementation of the CPU clock allows the rate changes of the CPU
- * clock and the corresponding rate changes of the auxillary clocks of the CPU
+ * clock and the corresponding rate changes of the auxiliary clocks of the CPU
* domain. The platform clock driver provides a clock register configuration
* for each configurable rate which is then used to program the clock hardware
- * registers to acheive a fast co-oridinated rate change for all the CPU domain
+ * registers to achieve a fast coordinated rate change for all the CPU domain
* clocks.
*
* On a rate change request for the CPU clock, the rate change is propagated
- * upto the PLL supplying the clock to the CPU domain clock blocks. While the
+ * up to the PLL supplying the clock to the CPU domain clock blocks. While the
* CPU domain PLL is reconfigured, the CPU domain clocks are driven using an
* alternate clock source. If required, the alternate clock source is divided
* down in order to keep the output clock rate within the previous OPP limits.
-*/
+ */
+#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+
+#include "clk.h"
#include "clk-cpu.h"
-#define E4210_SRC_CPU 0x0
-#define E4210_STAT_CPU 0x200
-#define E4210_DIV_CPU0 0x300
-#define E4210_DIV_CPU1 0x304
-#define E4210_DIV_STAT_CPU0 0x400
-#define E4210_DIV_STAT_CPU1 0x404
-
-#define E5433_MUX_SEL2 0x008
-#define E5433_MUX_STAT2 0x208
-#define E5433_DIV_CPU0 0x400
-#define E5433_DIV_CPU1 0x404
-#define E5433_DIV_STAT_CPU0 0x500
-#define E5433_DIV_STAT_CPU1 0x504
-
-#define E4210_DIV0_RATIO0_MASK 0x7
-#define E4210_DIV1_HPM_MASK (0x7 << 4)
-#define E4210_DIV1_COPY_MASK (0x7 << 0)
-#define E4210_MUX_HPM_MASK (1 << 20)
-#define E4210_DIV0_ATB_SHIFT 16
-#define E4210_DIV0_ATB_MASK (DIV_MASK << E4210_DIV0_ATB_SHIFT)
+struct exynos_cpuclk;
+
+typedef int (*exynos_rate_change_fn_t)(struct clk_notifier_data *ndata,
+ struct exynos_cpuclk *cpuclk);
+
+/**
+ * struct exynos_cpuclk_regs - Register offsets for CPU related clocks
+ * @mux_sel: offset of CPU MUX_SEL register (for selecting MUX clock parent)
+ * @mux_stat: offset of CPU MUX_STAT register (for checking MUX clock status)
+ * @div_cpu0: offset of CPU DIV0 register (for modifying divider values)
+ * @div_cpu1: offset of CPU DIV1 register (for modifying divider values)
+ * @div_stat_cpu0: offset of CPU DIV0_STAT register (for checking DIV status)
+ * @div_stat_cpu1: offset of CPU DIV1_STAT register (for checking DIV status)
+ * @mux: offset of MUX register for choosing CPU clock source
+ * @divs: offsets of DIV registers (ACLK, ATCLK, PCLKDBG and PERIPHCLK)
+ */
+struct exynos_cpuclk_regs {
+ u32 mux_sel;
+ u32 mux_stat;
+ u32 div_cpu0;
+ u32 div_cpu1;
+ u32 div_stat_cpu0;
+ u32 div_stat_cpu1;
+
+ u32 mux;
+ u32 divs[4];
+};
+
+/**
+ * struct exynos_cpuclk_chip - Chip specific data for CPU clock
+ * @regs: register offsets for CPU related clocks
+ * @pre_rate_cb: callback to run before CPU clock rate change
+ * @post_rate_cb: callback to run after CPU clock rate change
+ */
+struct exynos_cpuclk_chip {
+ const struct exynos_cpuclk_regs *regs;
+ exynos_rate_change_fn_t pre_rate_cb;
+ exynos_rate_change_fn_t post_rate_cb;
+};
+
+/**
+ * struct exynos_cpuclk - information about clock supplied to a CPU core
+ * @hw: handle between CCF and CPU clock
+ * @alt_parent: alternate parent clock to use when switching the speed
+ * of the primary parent clock
+ * @base: start address of the CPU clock registers block
+ * @lock: cpu clock domain register access lock
+ * @cfg: cpu clock rate configuration data
+ * @num_cfgs: number of array elements in @cfg array
+ * @clk_nb: clock notifier registered for changes in clock speed of the
+ * primary parent clock
+ * @flags: configuration flags for the CPU clock
+ * @chip: chip-specific data for the CPU clock
+ *
+ * This structure holds information required for programming the CPU clock for
+ * various clock speeds.
+ */
+struct exynos_cpuclk {
+ struct clk_hw hw;
+ const struct clk_hw *alt_parent;
+ void __iomem *base;
+ spinlock_t *lock;
+ const struct exynos_cpuclk_cfg_data *cfg;
+ const unsigned long num_cfgs;
+ struct notifier_block clk_nb;
+ unsigned long flags;
+ const struct exynos_cpuclk_chip *chip;
+};
+/* ---- Common code --------------------------------------------------------- */
+
+/* Divider stabilization time, msec */
+#define MAX_STAB_TIME 10
#define MAX_DIV 8
-#define DIV_MASK 7
-#define DIV_MASK_ALL 0xffffffff
-#define MUX_MASK 7
+#define DIV_MASK GENMASK(2, 0)
+#define DIV_MASK_ALL GENMASK(31, 0)
+#define MUX_MASK GENMASK(2, 0)
/*
* Helper function to wait until divider(s) have stabilized after the divider
@@ -68,7 +123,7 @@
*/
static void wait_until_divider_stable(void __iomem *div_reg, unsigned long mask)
{
- unsigned long timeout = jiffies + msecs_to_jiffies(10);
+ unsigned long timeout = jiffies + msecs_to_jiffies(MAX_STAB_TIME);
do {
if (!(readl(div_reg) & mask))
@@ -86,72 +141,65 @@ static void wait_until_divider_stable(void __iomem *div_reg, unsigned long mask)
* value was changed.
*/
static void wait_until_mux_stable(void __iomem *mux_reg, u32 mux_pos,
- unsigned long mux_value)
+ unsigned long mask, unsigned long mux_value)
{
- unsigned long timeout = jiffies + msecs_to_jiffies(10);
+ unsigned long timeout = jiffies + msecs_to_jiffies(MAX_STAB_TIME);
do {
- if (((readl(mux_reg) >> mux_pos) & MUX_MASK) == mux_value)
+ if (((readl(mux_reg) >> mux_pos) & mask) == mux_value)
return;
} while (time_before(jiffies, timeout));
- if (((readl(mux_reg) >> mux_pos) & MUX_MASK) == mux_value)
+ if (((readl(mux_reg) >> mux_pos) & mask) == mux_value)
return;
pr_err("%s: re-parenting mux timed-out\n", __func__);
}
-/* common round rate callback useable for all types of CPU clocks */
-static long exynos_cpuclk_round_rate(struct clk_hw *hw,
- unsigned long drate, unsigned long *prate)
-{
- struct clk_hw *parent = clk_hw_get_parent(hw);
- *prate = clk_hw_round_rate(parent, drate);
- return *prate;
-}
-
-/* common recalc rate callback useable for all types of CPU clocks */
-static unsigned long exynos_cpuclk_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- /*
- * The CPU clock output (armclk) rate is the same as its parent
- * rate. Although there exist certain dividers inside the CPU
- * clock block that could be used to divide the parent clock,
- * the driver does not make use of them currently, except during
- * frequency transitions.
- */
- return parent_rate;
-}
-
-static const struct clk_ops exynos_cpuclk_clk_ops = {
- .recalc_rate = exynos_cpuclk_recalc_rate,
- .round_rate = exynos_cpuclk_round_rate,
-};
-
/*
* Helper function to set the 'safe' dividers for the CPU clock. The parameters
* div and mask contain the divider value and the register bit mask of the
* dividers to be programmed.
*/
-static void exynos_set_safe_div(void __iomem *base, unsigned long div,
- unsigned long mask)
+static void exynos_set_safe_div(struct exynos_cpuclk *cpuclk, unsigned long div,
+ unsigned long mask)
{
+ const struct exynos_cpuclk_regs * const regs = cpuclk->chip->regs;
+ void __iomem *base = cpuclk->base;
unsigned long div0;
- div0 = readl(base + E4210_DIV_CPU0);
+ div0 = readl(base + regs->div_cpu0);
div0 = (div0 & ~mask) | (div & mask);
- writel(div0, base + E4210_DIV_CPU0);
- wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, mask);
+ writel(div0, base + regs->div_cpu0);
+ wait_until_divider_stable(base + regs->div_stat_cpu0, mask);
}
+/* ---- Exynos 3/4/5 -------------------------------------------------------- */
+
+#define E4210_DIV0_RATIO0_MASK GENMASK(2, 0)
+#define E4210_DIV1_HPM_MASK GENMASK(6, 4)
+#define E4210_DIV1_COPY_MASK GENMASK(2, 0)
+#define E4210_MUX_HPM_MASK BIT(20)
+#define E4210_DIV0_ATB_SHIFT 16
+#define E4210_DIV0_ATB_MASK (DIV_MASK << E4210_DIV0_ATB_SHIFT)
+
+static const struct exynos_cpuclk_regs e4210_cpuclk_regs = {
+ .mux_sel = 0x200,
+ .mux_stat = 0x400,
+ .div_cpu0 = 0x500,
+ .div_cpu1 = 0x504,
+ .div_stat_cpu0 = 0x600,
+ .div_stat_cpu1 = 0x604,
+};
+
/* handler for pre-rate change notification from parent clock */
static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
- struct exynos_cpuclk *cpuclk, void __iomem *base)
+ struct exynos_cpuclk *cpuclk)
{
const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
+ const struct exynos_cpuclk_regs * const regs = cpuclk->chip->regs;
+ void __iomem *base = cpuclk->base;
unsigned long alt_prate = clk_hw_get_rate(cpuclk->alt_parent);
- unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
unsigned long div0, div1 = 0, mux_reg;
unsigned long flags;
@@ -172,8 +220,8 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
div0 = cfg_data->div0;
if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
div1 = cfg_data->div1;
- if (readl(base + E4210_SRC_CPU) & E4210_MUX_HPM_MASK)
- div1 = readl(base + E4210_DIV_CPU1) &
+ if (readl(base + regs->mux_sel) & E4210_MUX_HPM_MASK)
+ div1 = readl(base + regs->div_cpu1) &
(E4210_DIV1_HPM_MASK | E4210_DIV1_COPY_MASK);
}
@@ -187,6 +235,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
*/
if (alt_prate > ndata->old_rate || ndata->old_rate > ndata->new_rate) {
unsigned long tmp_rate = min(ndata->old_rate, ndata->new_rate);
+ unsigned long alt_div, alt_div_mask = DIV_MASK;
alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1;
WARN_ON(alt_div >= MAX_DIV);
@@ -199,23 +248,23 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
alt_div |= E4210_DIV0_ATB_MASK;
alt_div_mask |= E4210_DIV0_ATB_MASK;
}
- exynos_set_safe_div(base, alt_div, alt_div_mask);
+ exynos_set_safe_div(cpuclk, alt_div, alt_div_mask);
div0 |= alt_div;
}
/* select sclk_mpll as the alternate parent */
- mux_reg = readl(base + E4210_SRC_CPU);
- writel(mux_reg | (1 << 16), base + E4210_SRC_CPU);
- wait_until_mux_stable(base + E4210_STAT_CPU, 16, 2);
+ mux_reg = readl(base + regs->mux_sel);
+ writel(mux_reg | (1 << 16), base + regs->mux_sel);
+ wait_until_mux_stable(base + regs->mux_stat, 16, MUX_MASK, 2);
/* alternate parent is active now. set the dividers */
- writel(div0, base + E4210_DIV_CPU0);
- wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, DIV_MASK_ALL);
+ writel(div0, base + regs->div_cpu0);
+ wait_until_divider_stable(base + regs->div_stat_cpu0, DIV_MASK_ALL);
if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
- writel(div1, base + E4210_DIV_CPU1);
- wait_until_divider_stable(base + E4210_DIV_STAT_CPU1,
- DIV_MASK_ALL);
+ writel(div1, base + regs->div_cpu1);
+ wait_until_divider_stable(base + regs->div_stat_cpu1,
+ DIV_MASK_ALL);
}
spin_unlock_irqrestore(cpuclk->lock, flags);
@@ -224,9 +273,11 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
/* handler for post-rate change notification from parent clock */
static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
- struct exynos_cpuclk *cpuclk, void __iomem *base)
+ struct exynos_cpuclk *cpuclk)
{
const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
+ const struct exynos_cpuclk_regs * const regs = cpuclk->chip->regs;
+ void __iomem *base = cpuclk->base;
unsigned long div = 0, div_mask = DIV_MASK;
unsigned long mux_reg;
unsigned long flags;
@@ -243,43 +294,39 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
spin_lock_irqsave(cpuclk->lock, flags);
/* select mout_apll as the alternate parent */
- mux_reg = readl(base + E4210_SRC_CPU);
- writel(mux_reg & ~(1 << 16), base + E4210_SRC_CPU);
- wait_until_mux_stable(base + E4210_STAT_CPU, 16, 1);
+ mux_reg = readl(base + regs->mux_sel);
+ writel(mux_reg & ~(1 << 16), base + regs->mux_sel);
+ wait_until_mux_stable(base + regs->mux_stat, 16, MUX_MASK, 1);
if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
div |= (cfg_data->div0 & E4210_DIV0_ATB_MASK);
div_mask |= E4210_DIV0_ATB_MASK;
}
- exynos_set_safe_div(base, div, div_mask);
+ exynos_set_safe_div(cpuclk, div, div_mask);
spin_unlock_irqrestore(cpuclk->lock, flags);
return 0;
}
-/*
- * Helper function to set the 'safe' dividers for the CPU clock. The parameters
- * div and mask contain the divider value and the register bit mask of the
- * dividers to be programmed.
- */
-static void exynos5433_set_safe_div(void __iomem *base, unsigned long div,
- unsigned long mask)
-{
- unsigned long div0;
+/* ---- Exynos5433 ---------------------------------------------------------- */
- div0 = readl(base + E5433_DIV_CPU0);
- div0 = (div0 & ~mask) | (div & mask);
- writel(div0, base + E5433_DIV_CPU0);
- wait_until_divider_stable(base + E5433_DIV_STAT_CPU0, mask);
-}
+static const struct exynos_cpuclk_regs e5433_cpuclk_regs = {
+ .mux_sel = 0x208,
+ .mux_stat = 0x408,
+ .div_cpu0 = 0x600,
+ .div_cpu1 = 0x604,
+ .div_stat_cpu0 = 0x700,
+ .div_stat_cpu1 = 0x704,
+};
/* handler for pre-rate change notification from parent clock */
static int exynos5433_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
- struct exynos_cpuclk *cpuclk, void __iomem *base)
+ struct exynos_cpuclk *cpuclk)
{
const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
+ const struct exynos_cpuclk_regs * const regs = cpuclk->chip->regs;
+ void __iomem *base = cpuclk->base;
unsigned long alt_prate = clk_hw_get_rate(cpuclk->alt_parent);
- unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
unsigned long div0, div1 = 0, mux_reg;
unsigned long flags;
@@ -309,25 +356,26 @@ static int exynos5433_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
*/
if (alt_prate > ndata->old_rate || ndata->old_rate > ndata->new_rate) {
unsigned long tmp_rate = min(ndata->old_rate, ndata->new_rate);
+ unsigned long alt_div, alt_div_mask = DIV_MASK;
alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1;
WARN_ON(alt_div >= MAX_DIV);
- exynos5433_set_safe_div(base, alt_div, alt_div_mask);
+ exynos_set_safe_div(cpuclk, alt_div, alt_div_mask);
div0 |= alt_div;
}
/* select the alternate parent */
- mux_reg = readl(base + E5433_MUX_SEL2);
- writel(mux_reg | 1, base + E5433_MUX_SEL2);
- wait_until_mux_stable(base + E5433_MUX_STAT2, 0, 2);
+ mux_reg = readl(base + regs->mux_sel);
+ writel(mux_reg | 1, base + regs->mux_sel);
+ wait_until_mux_stable(base + regs->mux_stat, 0, MUX_MASK, 2);
/* alternate parent is active now. set the dividers */
- writel(div0, base + E5433_DIV_CPU0);
- wait_until_divider_stable(base + E5433_DIV_STAT_CPU0, DIV_MASK_ALL);
+ writel(div0, base + regs->div_cpu0);
+ wait_until_divider_stable(base + regs->div_stat_cpu0, DIV_MASK_ALL);
- writel(div1, base + E5433_DIV_CPU1);
- wait_until_divider_stable(base + E5433_DIV_STAT_CPU1, DIV_MASK_ALL);
+ writel(div1, base + regs->div_cpu1);
+ wait_until_divider_stable(base + regs->div_stat_cpu1, DIV_MASK_ALL);
spin_unlock_irqrestore(cpuclk->lock, flags);
return 0;
@@ -335,8 +383,10 @@ static int exynos5433_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
/* handler for post-rate change notification from parent clock */
static int exynos5433_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
- struct exynos_cpuclk *cpuclk, void __iomem *base)
+ struct exynos_cpuclk *cpuclk)
{
+ const struct exynos_cpuclk_regs * const regs = cpuclk->chip->regs;
+ void __iomem *base = cpuclk->base;
unsigned long div = 0, div_mask = DIV_MASK;
unsigned long mux_reg;
unsigned long flags;
@@ -344,73 +394,265 @@ static int exynos5433_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
spin_lock_irqsave(cpuclk->lock, flags);
/* select apll as the alternate parent */
- mux_reg = readl(base + E5433_MUX_SEL2);
- writel(mux_reg & ~1, base + E5433_MUX_SEL2);
- wait_until_mux_stable(base + E5433_MUX_STAT2, 0, 1);
+ mux_reg = readl(base + regs->mux_sel);
+ writel(mux_reg & ~1, base + regs->mux_sel);
+ wait_until_mux_stable(base + regs->mux_stat, 0, MUX_MASK, 1);
- exynos5433_set_safe_div(base, div, div_mask);
+ exynos_set_safe_div(cpuclk, div, div_mask);
spin_unlock_irqrestore(cpuclk->lock, flags);
return 0;
}
+/* ---- Exynos850 ----------------------------------------------------------- */
+
+#define E850_DIV_RATIO_MASK GENMASK(3, 0)
+#define E850_BUSY_MASK BIT(16)
+
+/* Max time for divider or mux to stabilize, usec */
+#define E850_DIV_MUX_STAB_TIME 100
+/* OSCCLK clock rate, Hz */
+#define E850_OSCCLK (26 * MHZ)
+
+static const struct exynos_cpuclk_regs e850cl0_cpuclk_regs = {
+ .mux = 0x100c,
+ .divs = { 0x1800, 0x1808, 0x180c, 0x1810 },
+};
+
+static const struct exynos_cpuclk_regs e850cl1_cpuclk_regs = {
+ .mux = 0x1000,
+ .divs = { 0x1800, 0x1808, 0x180c, 0x1810 },
+};
+
/*
- * This notifier function is called for the pre-rate and post-rate change
- * notifications of the parent clock of cpuclk.
+ * Set alternate parent rate to "rate" value or less.
+ *
+ * rate: Desired alt_parent rate, or 0 for max alt_parent rate
+ *
+ * Exynos850 doesn't have CPU clock divider in CMU_CPUCLx block (CMUREF divider
+ * doesn't affect CPU speed). So CPUCLx_SWITCH divider from CMU_TOP is used
+ * instead to adjust alternate parent speed.
+ *
+ * It's possible to use clk_set_max_rate() instead of this function, but it
+ * would set overly pessimistic rate values to alternate parent.
*/
-static int exynos_cpuclk_notifier_cb(struct notifier_block *nb,
- unsigned long event, void *data)
+static int exynos850_alt_parent_set_max_rate(const struct clk_hw *alt_parent,
+ unsigned long rate)
{
- struct clk_notifier_data *ndata = data;
- struct exynos_cpuclk *cpuclk;
- void __iomem *base;
- int err = 0;
+ struct clk_hw *clk_div, *clk_divp;
+ unsigned long divp_rate, div_rate, div;
+ int ret;
+
+ /* Divider from CMU_TOP */
+ clk_div = clk_hw_get_parent(alt_parent);
+ if (!clk_div)
+ return -ENOENT;
+ /* Divider's parent from CMU_TOP */
+ clk_divp = clk_hw_get_parent(clk_div);
+ if (!clk_divp)
+ return -ENOENT;
+ /* Divider input rate */
+ divp_rate = clk_hw_get_rate(clk_divp);
+ if (!divp_rate)
+ return -EINVAL;
- cpuclk = container_of(nb, struct exynos_cpuclk, clk_nb);
- base = cpuclk->ctrl_base;
+ /* Calculate new alt_parent rate for integer divider value */
+ if (rate == 0)
+ div = 1;
+ else
+ div = DIV_ROUND_UP(divp_rate, rate);
+ div_rate = DIV_ROUND_UP(divp_rate, div);
+ WARN_ON(div >= MAX_DIV);
- if (event == PRE_RATE_CHANGE)
- err = exynos_cpuclk_pre_rate_change(ndata, cpuclk, base);
- else if (event == POST_RATE_CHANGE)
- err = exynos_cpuclk_post_rate_change(ndata, cpuclk, base);
+ /* alt_parent will propagate this change up to the divider */
+ ret = clk_set_rate(alt_parent->clk, div_rate);
+ if (ret)
+ return ret;
+ udelay(E850_DIV_MUX_STAB_TIME);
- return notifier_from_errno(err);
+ return 0;
+}
+
+/* Handler for pre-rate change notification from parent clock */
+static int exynos850_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
+ struct exynos_cpuclk *cpuclk)
+{
+ const unsigned int shifts[4] = { 16, 12, 8, 4 }; /* E850_CPU_DIV0() */
+ const struct exynos_cpuclk_regs * const regs = cpuclk->chip->regs;
+ const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
+ const struct clk_hw *alt_parent = cpuclk->alt_parent;
+ void __iomem *base = cpuclk->base;
+ unsigned long alt_prate = clk_hw_get_rate(alt_parent);
+ unsigned long flags;
+ u32 mux_reg;
+ size_t i;
+ int ret;
+
+ /* No actions are needed when switching to or from OSCCLK parent */
+ if (ndata->new_rate == E850_OSCCLK || ndata->old_rate == E850_OSCCLK)
+ return 0;
+
+ /* Find out the divider values to use for clock data */
+ while ((cfg_data->prate * 1000) != ndata->new_rate) {
+ if (cfg_data->prate == 0)
+ return -EINVAL;
+ cfg_data++;
+ }
+
+ /*
+ * If the old parent clock speed is less than the clock speed of
+ * the alternate parent, then it should be ensured that at no point
+ * the armclk speed is more than the old_prate until the dividers are
+ * set. Also workaround the issue of the dividers being set to lower
+ * values before the parent clock speed is set to new lower speed
+ * (this can result in too high speed of armclk output clocks).
+ */
+ if (alt_prate > ndata->old_rate || ndata->old_rate > ndata->new_rate) {
+ unsigned long tmp_rate = min(ndata->old_rate, ndata->new_rate);
+
+ ret = exynos850_alt_parent_set_max_rate(alt_parent, tmp_rate);
+ if (ret)
+ return ret;
+ }
+
+ spin_lock_irqsave(cpuclk->lock, flags);
+
+ /* Select the alternate parent */
+ mux_reg = readl(base + regs->mux);
+ writel(mux_reg | 1, base + regs->mux);
+ wait_until_mux_stable(base + regs->mux, 16, 1, 0);
+
+ /* Alternate parent is active now. Set the dividers */
+ for (i = 0; i < ARRAY_SIZE(shifts); ++i) {
+ unsigned long div = (cfg_data->div0 >> shifts[i]) & 0xf;
+ u32 val;
+
+ val = readl(base + regs->divs[i]);
+ val = (val & ~E850_DIV_RATIO_MASK) | div;
+ writel(val, base + regs->divs[i]);
+ wait_until_divider_stable(base + regs->divs[i], E850_BUSY_MASK);
+ }
+
+ spin_unlock_irqrestore(cpuclk->lock, flags);
+
+ return 0;
+}
+
+/* Handler for post-rate change notification from parent clock */
+static int exynos850_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
+ struct exynos_cpuclk *cpuclk)
+{
+ const struct exynos_cpuclk_regs * const regs = cpuclk->chip->regs;
+ const struct clk_hw *alt_parent = cpuclk->alt_parent;
+ void __iomem *base = cpuclk->base;
+ unsigned long flags;
+ u32 mux_reg;
+
+ /* No actions are needed when switching to or from OSCCLK parent */
+ if (ndata->new_rate == E850_OSCCLK || ndata->old_rate == E850_OSCCLK)
+ return 0;
+
+ spin_lock_irqsave(cpuclk->lock, flags);
+
+ /* Select main parent (PLL) for mux */
+ mux_reg = readl(base + regs->mux);
+ writel(mux_reg & ~1, base + regs->mux);
+ wait_until_mux_stable(base + regs->mux, 16, 1, 0);
+
+ spin_unlock_irqrestore(cpuclk->lock, flags);
+
+ /* Set alt_parent rate back to max */
+ return exynos850_alt_parent_set_max_rate(alt_parent, 0);
+}
+
+/* -------------------------------------------------------------------------- */
+
+/* Common round rate callback usable for all types of CPU clocks */
+static long exynos_cpuclk_round_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long *prate)
+{
+ struct clk_hw *parent = clk_hw_get_parent(hw);
+ *prate = clk_hw_round_rate(parent, drate);
+ return *prate;
}
+/* Common recalc rate callback usable for all types of CPU clocks */
+static unsigned long exynos_cpuclk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ /*
+ * The CPU clock output (armclk) rate is the same as its parent
+ * rate. Although there exist certain dividers inside the CPU
+ * clock block that could be used to divide the parent clock,
+ * the driver does not make use of them currently, except during
+ * frequency transitions.
+ */
+ return parent_rate;
+}
+
+static const struct clk_ops exynos_cpuclk_clk_ops = {
+ .recalc_rate = exynos_cpuclk_recalc_rate,
+ .round_rate = exynos_cpuclk_round_rate,
+};
+
/*
* This notifier function is called for the pre-rate and post-rate change
* notifications of the parent clock of cpuclk.
*/
-static int exynos5433_cpuclk_notifier_cb(struct notifier_block *nb,
- unsigned long event, void *data)
+static int exynos_cpuclk_notifier_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
{
struct clk_notifier_data *ndata = data;
struct exynos_cpuclk *cpuclk;
- void __iomem *base;
int err = 0;
cpuclk = container_of(nb, struct exynos_cpuclk, clk_nb);
- base = cpuclk->ctrl_base;
if (event == PRE_RATE_CHANGE)
- err = exynos5433_cpuclk_pre_rate_change(ndata, cpuclk, base);
+ err = cpuclk->chip->pre_rate_cb(ndata, cpuclk);
else if (event == POST_RATE_CHANGE)
- err = exynos5433_cpuclk_post_rate_change(ndata, cpuclk, base);
+ err = cpuclk->chip->post_rate_cb(ndata, cpuclk);
return notifier_from_errno(err);
}
+static const struct exynos_cpuclk_chip exynos_clkcpu_chips[] = {
+ [CPUCLK_LAYOUT_E4210] = {
+ .regs = &e4210_cpuclk_regs,
+ .pre_rate_cb = exynos_cpuclk_pre_rate_change,
+ .post_rate_cb = exynos_cpuclk_post_rate_change,
+ },
+ [CPUCLK_LAYOUT_E5433] = {
+ .regs = &e5433_cpuclk_regs,
+ .pre_rate_cb = exynos5433_cpuclk_pre_rate_change,
+ .post_rate_cb = exynos5433_cpuclk_post_rate_change,
+ },
+ [CPUCLK_LAYOUT_E850_CL0] = {
+ .regs = &e850cl0_cpuclk_regs,
+ .pre_rate_cb = exynos850_cpuclk_pre_rate_change,
+ .post_rate_cb = exynos850_cpuclk_post_rate_change,
+ },
+ [CPUCLK_LAYOUT_E850_CL1] = {
+ .regs = &e850cl1_cpuclk_regs,
+ .pre_rate_cb = exynos850_cpuclk_pre_rate_change,
+ .post_rate_cb = exynos850_cpuclk_post_rate_change,
+ },
+};
+
/* helper function to register a CPU clock */
static int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
- unsigned int lookup_id, const char *name,
- const struct clk_hw *parent, const struct clk_hw *alt_parent,
- unsigned long offset, const struct exynos_cpuclk_cfg_data *cfg,
- unsigned long num_cfgs, unsigned long flags)
+ const struct samsung_cpu_clock *clk_data)
{
+ const struct clk_hw *parent, *alt_parent;
+ struct clk_hw **hws;
struct exynos_cpuclk *cpuclk;
struct clk_init_data init;
const char *parent_name;
+ unsigned int num_cfgs;
int ret = 0;
+ hws = ctx->clk_data.hws;
+ parent = hws[clk_data->parent_id];
+ alt_parent = hws[clk_data->alt_parent_id];
if (IS_ERR(parent) || IS_ERR(alt_parent)) {
pr_err("%s: invalid parent clock(s)\n", __func__);
return -EINVAL;
@@ -422,7 +664,7 @@ static int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
parent_name = clk_hw_get_name(parent);
- init.name = name;
+ init.name = clk_data->name;
init.flags = CLK_SET_RATE_PARENT;
init.parent_names = &parent_name;
init.num_parents = 1;
@@ -430,23 +672,25 @@ static int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
cpuclk->alt_parent = alt_parent;
cpuclk->hw.init = &init;
- cpuclk->ctrl_base = ctx->reg_base + offset;
+ cpuclk->base = ctx->reg_base + clk_data->offset;
cpuclk->lock = &ctx->lock;
- cpuclk->flags = flags;
- if (flags & CLK_CPU_HAS_E5433_REGS_LAYOUT)
- cpuclk->clk_nb.notifier_call = exynos5433_cpuclk_notifier_cb;
- else
- cpuclk->clk_nb.notifier_call = exynos_cpuclk_notifier_cb;
-
+ cpuclk->flags = clk_data->flags;
+ cpuclk->clk_nb.notifier_call = exynos_cpuclk_notifier_cb;
+ cpuclk->chip = &exynos_clkcpu_chips[clk_data->reg_layout];
ret = clk_notifier_register(parent->clk, &cpuclk->clk_nb);
if (ret) {
pr_err("%s: failed to register clock notifier for %s\n",
- __func__, name);
+ __func__, clk_data->name);
goto free_cpuclk;
}
- cpuclk->cfg = kmemdup(cfg, sizeof(*cfg) * num_cfgs, GFP_KERNEL);
+ /* Find count of configuration rates in cfg */
+ for (num_cfgs = 0; clk_data->cfg[num_cfgs].prate != 0; )
+ num_cfgs++;
+
+ cpuclk->cfg = kmemdup(clk_data->cfg, sizeof(*clk_data->cfg) * num_cfgs,
+ GFP_KERNEL);
if (!cpuclk->cfg) {
ret = -ENOMEM;
goto unregister_clk_nb;
@@ -454,11 +698,12 @@ static int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
ret = clk_hw_register(NULL, &cpuclk->hw);
if (ret) {
- pr_err("%s: could not register cpuclk %s\n", __func__, name);
+ pr_err("%s: could not register cpuclk %s\n", __func__,
+ clk_data->name);
goto free_cpuclk_data;
}
- samsung_clk_add_lookup(ctx, &cpuclk->hw, lookup_id);
+ samsung_clk_add_lookup(ctx, &cpuclk->hw, clk_data->id);
return 0;
free_cpuclk_data:
@@ -474,16 +719,7 @@ void __init samsung_clk_register_cpu(struct samsung_clk_provider *ctx,
const struct samsung_cpu_clock *list, unsigned int nr_clk)
{
unsigned int idx;
- unsigned int num_cfgs;
- struct clk_hw **hws = ctx->clk_data.hws;
- for (idx = 0; idx < nr_clk; idx++, list++) {
- /* find count of configuration rates in cfg */
- for (num_cfgs = 0; list->cfg[num_cfgs].prate != 0; )
- num_cfgs++;
-
- exynos_register_cpu_clock(ctx, list->id, list->name, hws[list->parent_id],
- hws[list->alt_parent_id], list->offset, list->cfg, num_cfgs,
- list->flags);
- }
+ for (idx = 0; idx < nr_clk; idx++)
+ exynos_register_cpu_clock(ctx, &list[idx]);
}
diff --git a/drivers/clk/samsung/clk-cpu.h b/drivers/clk/samsung/clk-cpu.h
index 0164bd9ad021b..892843611b0ac 100644
--- a/drivers/clk/samsung/clk-cpu.h
+++ b/drivers/clk/samsung/clk-cpu.h
@@ -8,7 +8,24 @@
#ifndef __SAMSUNG_CLK_CPU_H
#define __SAMSUNG_CLK_CPU_H
-#include "clk.h"
+/* The CPU clock registers have DIV1 configuration register */
+#define CLK_CPU_HAS_DIV1 BIT(0)
+/* When ALT parent is active, debug clocks need safe divider values */
+#define CLK_CPU_NEEDS_DEBUG_ALT_DIV BIT(1)
+
+/**
+ * enum exynos_cpuclk_layout - CPU clock registers layout compatibility
+ * @CPUCLK_LAYOUT_E4210: Exynos4210 compatible layout
+ * @CPUCLK_LAYOUT_E5433: Exynos5433 compatible layout
+ * @CPUCLK_LAYOUT_E850_CL0: Exynos850 cluster 0 compatible layout
+ * @CPUCLK_LAYOUT_E850_CL1: Exynos850 cluster 1 compatible layout
+ */
+enum exynos_cpuclk_layout {
+ CPUCLK_LAYOUT_E4210,
+ CPUCLK_LAYOUT_E5433,
+ CPUCLK_LAYOUT_E850_CL0,
+ CPUCLK_LAYOUT_E850_CL1,
+};
/**
* struct exynos_cpuclk_cfg_data - config data to setup cpu clocks
@@ -28,38 +45,4 @@ struct exynos_cpuclk_cfg_data {
unsigned long div1;
};
-/**
- * struct exynos_cpuclk - information about clock supplied to a CPU core
- * @hw: handle between CCF and CPU clock
- * @alt_parent: alternate parent clock to use when switching the speed
- * of the primary parent clock
- * @ctrl_base: base address of the clock controller
- * @lock: cpu clock domain register access lock
- * @cfg: cpu clock rate configuration data
- * @num_cfgs: number of array elements in @cfg array
- * @clk_nb: clock notifier registered for changes in clock speed of the
- * primary parent clock
- * @flags: configuration flags for the CPU clock
- *
- * This structure holds information required for programming the CPU clock for
- * various clock speeds.
- */
-struct exynos_cpuclk {
- struct clk_hw hw;
- const struct clk_hw *alt_parent;
- void __iomem *ctrl_base;
- spinlock_t *lock;
- const struct exynos_cpuclk_cfg_data *cfg;
- const unsigned long num_cfgs;
- struct notifier_block clk_nb;
- unsigned long flags;
-
-/* The CPU clock registers have DIV1 configuration register */
-#define CLK_CPU_HAS_DIV1 (1 << 0)
-/* When ALT parent is active, debug clocks need safe divider values */
-#define CLK_CPU_NEEDS_DEBUG_ALT_DIV (1 << 1)
-/* The CPU clock registers have Exynos5433-compatible layout */
-#define CLK_CPU_HAS_E5433_REGS_LAYOUT (1 << 2)
-};
-
#endif /* __SAMSUNG_CLK_CPU_H */
diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
index a024616676640..cd4fec323a427 100644
--- a/drivers/clk/samsung/clk-exynos3250.c
+++ b/drivers/clk/samsung/clk-exynos3250.c
@@ -775,7 +775,7 @@ static const struct exynos_cpuclk_cfg_data e3250_armclk_d[] __initconst = {
static const struct samsung_cpu_clock exynos3250_cpu_clks[] __initconst = {
CPU_CLK(CLK_ARM_CLK, "armclk", CLK_MOUT_APLL, CLK_MOUT_MPLL_USER_C,
- CLK_CPU_HAS_DIV1, 0x14200, e3250_armclk_d),
+ CLK_CPU_HAS_DIV1, 0x14000, CPUCLK_LAYOUT_E4210, e3250_armclk_d),
};
static void __init exynos3_core_down_clock(void __iomem *reg_base)
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 4ec41221e68f4..a026ccca7315f 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -1252,17 +1252,20 @@ static const struct exynos_cpuclk_cfg_data e4412_armclk_d[] __initconst = {
static const struct samsung_cpu_clock exynos4210_cpu_clks[] __initconst = {
CPU_CLK(CLK_ARM_CLK, "armclk", CLK_MOUT_APLL, CLK_SCLK_MPLL,
- CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1, 0x14200, e4210_armclk_d),
+ CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1, 0x14000,
+ CPUCLK_LAYOUT_E4210, e4210_armclk_d),
};
static const struct samsung_cpu_clock exynos4212_cpu_clks[] __initconst = {
CPU_CLK(CLK_ARM_CLK, "armclk", CLK_MOUT_APLL, CLK_MOUT_MPLL_USER_C,
- CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1, 0x14200, e4212_armclk_d),
+ CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1, 0x14000,
+ CPUCLK_LAYOUT_E4210, e4212_armclk_d),
};
static const struct samsung_cpu_clock exynos4412_cpu_clks[] __initconst = {
CPU_CLK(CLK_ARM_CLK, "armclk", CLK_MOUT_APLL, CLK_MOUT_MPLL_USER_C,
- CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1, 0x14200, e4412_armclk_d),
+ CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1, 0x14000,
+ CPUCLK_LAYOUT_E4210, e4412_armclk_d),
};
/* register exynos4 clocks */
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 8ebe6155d8b70..e02e7c013f3d2 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -776,8 +776,9 @@ static const struct exynos_cpuclk_cfg_data exynos5250_armclk_d[] __initconst = {
};
static const struct samsung_cpu_clock exynos5250_cpu_clks[] __initconst = {
- CPU_CLK(CLK_ARM_CLK, "armclk", CLK_MOUT_APLL, CLK_MOUT_MPLL, CLK_CPU_HAS_DIV1, 0x200,
- exynos5250_armclk_d),
+ CPU_CLK(CLK_ARM_CLK, "armclk", CLK_MOUT_APLL, CLK_MOUT_MPLL,
+ CLK_CPU_HAS_DIV1, 0x0, CPUCLK_LAYOUT_E4210,
+ exynos5250_armclk_d),
};
static const struct of_device_id ext_clk_match[] __initconst = {
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 199843f12ae56..c630135c686bb 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -1555,17 +1555,17 @@ static const struct exynos_cpuclk_cfg_data exynos5420_kfcclk_d[] __initconst = {
};
static const struct samsung_cpu_clock exynos5420_cpu_clks[] __initconst = {
- CPU_CLK(CLK_ARM_CLK, "armclk", CLK_MOUT_APLL, CLK_MOUT_MSPLL_CPU, 0, 0x200,
- exynos5420_eglclk_d),
- CPU_CLK(CLK_KFC_CLK, "kfcclk", CLK_MOUT_KPLL, CLK_MOUT_MSPLL_KFC, 0, 0x28200,
- exynos5420_kfcclk_d),
+ CPU_CLK(CLK_ARM_CLK, "armclk", CLK_MOUT_APLL, CLK_MOUT_MSPLL_CPU, 0,
+ 0x0, CPUCLK_LAYOUT_E4210, exynos5420_eglclk_d),
+ CPU_CLK(CLK_KFC_CLK, "kfcclk", CLK_MOUT_KPLL, CLK_MOUT_MSPLL_KFC, 0,
+ 0x28000, CPUCLK_LAYOUT_E4210, exynos5420_kfcclk_d),
};
static const struct samsung_cpu_clock exynos5800_cpu_clks[] __initconst = {
- CPU_CLK(CLK_ARM_CLK, "armclk", CLK_MOUT_APLL, CLK_MOUT_MSPLL_CPU, 0, 0x200,
- exynos5800_eglclk_d),
- CPU_CLK(CLK_KFC_CLK, "kfcclk", CLK_MOUT_KPLL, CLK_MOUT_MSPLL_KFC, 0, 0x28200,
- exynos5420_kfcclk_d),
+ CPU_CLK(CLK_ARM_CLK, "armclk", CLK_MOUT_APLL, CLK_MOUT_MSPLL_CPU, 0,
+ 0x0, CPUCLK_LAYOUT_E4210, exynos5800_eglclk_d),
+ CPU_CLK(CLK_KFC_CLK, "kfcclk", CLK_MOUT_KPLL, CLK_MOUT_MSPLL_KFC, 0,
+ 0x28000, CPUCLK_LAYOUT_E4210, exynos5420_kfcclk_d),
};
static const struct of_device_id ext_clk_match[] __initconst = {
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index 6bfc5d0cd9247..609d31a7aa524 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -3700,9 +3700,8 @@ static const struct exynos_cpuclk_cfg_data exynos5433_apolloclk_d[] __initconst
static const struct samsung_cpu_clock apollo_cpu_clks[] __initconst = {
CPU_CLK(CLK_SCLK_APOLLO, "apolloclk", CLK_MOUT_APOLLO_PLL,
- CLK_MOUT_BUS_PLL_APOLLO_USER,
- CLK_CPU_HAS_E5433_REGS_LAYOUT, 0x200,
- exynos5433_apolloclk_d),
+ CLK_MOUT_BUS_PLL_APOLLO_USER, 0, 0x0,
+ CPUCLK_LAYOUT_E5433, exynos5433_apolloclk_d),
};
static const struct samsung_cmu_info apollo_cmu_info __initconst = {
@@ -3945,9 +3944,8 @@ static const struct exynos_cpuclk_cfg_data exynos5433_atlasclk_d[] __initconst =
static const struct samsung_cpu_clock atlas_cpu_clks[] __initconst = {
CPU_CLK(CLK_SCLK_ATLAS, "atlasclk", CLK_MOUT_ATLAS_PLL,
- CLK_MOUT_BUS_PLL_ATLAS_USER,
- CLK_CPU_HAS_E5433_REGS_LAYOUT, 0x200,
- exynos5433_atlasclk_d),
+ CLK_MOUT_BUS_PLL_ATLAS_USER, 0, 0x0,
+ CPUCLK_LAYOUT_E5433, exynos5433_atlasclk_d),
};
static const struct samsung_cmu_info atlas_cmu_info __initconst = {
diff --git a/drivers/clk/samsung/clk-exynos850.c b/drivers/clk/samsung/clk-exynos850.c
index bdc1eef7d6e54..82cfa22c07888 100644
--- a/drivers/clk/samsung/clk-exynos850.c
+++ b/drivers/clk/samsung/clk-exynos850.c
@@ -26,7 +26,7 @@
#define CLKS_NR_IS (CLK_GOUT_IS_SYSREG_PCLK + 1)
#define CLKS_NR_MFCMSCL (CLK_GOUT_MFCMSCL_SYSREG_PCLK + 1)
#define CLKS_NR_PERI (CLK_GOUT_WDT1_PCLK + 1)
-#define CLKS_NR_CORE (CLK_GOUT_SYSREG_CORE_PCLK + 1)
+#define CLKS_NR_CORE (CLK_GOUT_SPDMA_CORE_ACLK + 1)
#define CLKS_NR_DPU (CLK_GOUT_DPU_SYSREG_PCLK + 1)
/* ---- CMU_TOP ------------------------------------------------------------- */
@@ -605,7 +605,7 @@ static const struct samsung_div_clock apm_div_clks[] __initconst = {
static const struct samsung_gate_clock apm_gate_clks[] __initconst = {
GATE(CLK_GOUT_CLKCMU_CMGP_BUS, "gout_clkcmu_cmgp_bus", "dout_apm_bus",
- CLK_CON_GAT_CLKCMU_CMGP_BUS, 21, 0, 0),
+ CLK_CON_GAT_CLKCMU_CMGP_BUS, 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_CLKCMU_CHUB_BUS, "gout_clkcmu_chub_bus",
"mout_clkcmu_chub_bus",
CLK_CON_GAT_GATE_CLKCMU_CHUB_BUS, 21, 0, 0),
@@ -974,19 +974,19 @@ static const struct samsung_fixed_rate_clock cmgp_fixed_clks[] __initconst = {
static const struct samsung_mux_clock cmgp_mux_clks[] __initconst = {
MUX(CLK_MOUT_CMGP_ADC, "mout_cmgp_adc", mout_cmgp_adc_p,
CLK_CON_MUX_CLK_CMGP_ADC, 0, 1),
- MUX(CLK_MOUT_CMGP_USI0, "mout_cmgp_usi0", mout_cmgp_usi0_p,
- CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP0, 0, 1),
- MUX(CLK_MOUT_CMGP_USI1, "mout_cmgp_usi1", mout_cmgp_usi1_p,
- CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP1, 0, 1),
+ MUX_F(CLK_MOUT_CMGP_USI0, "mout_cmgp_usi0", mout_cmgp_usi0_p,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP0, 0, 1, CLK_SET_RATE_PARENT, 0),
+ MUX_F(CLK_MOUT_CMGP_USI1, "mout_cmgp_usi1", mout_cmgp_usi1_p,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP1, 0, 1, CLK_SET_RATE_PARENT, 0),
};
static const struct samsung_div_clock cmgp_div_clks[] __initconst = {
DIV(CLK_DOUT_CMGP_ADC, "dout_cmgp_adc", "gout_clkcmu_cmgp_bus",
CLK_CON_DIV_DIV_CLK_CMGP_ADC, 0, 4),
- DIV(CLK_DOUT_CMGP_USI0, "dout_cmgp_usi0", "mout_cmgp_usi0",
- CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP0, 0, 5),
- DIV(CLK_DOUT_CMGP_USI1, "dout_cmgp_usi1", "mout_cmgp_usi1",
- CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP1, 0, 5),
+ DIV_F(CLK_DOUT_CMGP_USI0, "dout_cmgp_usi0", "mout_cmgp_usi0",
+ CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP0, 0, 5, CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_CMGP_USI1, "dout_cmgp_usi1", "mout_cmgp_usi1",
+ CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP1, 0, 5, CLK_SET_RATE_PARENT, 0),
};
static const struct samsung_gate_clock cmgp_gate_clks[] __initconst = {
@@ -1001,12 +1001,12 @@ static const struct samsung_gate_clock cmgp_gate_clks[] __initconst = {
"gout_clkcmu_cmgp_bus",
CLK_CON_GAT_GOUT_CMGP_GPIO_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CLK_GOUT_CMGP_USI0_IPCLK, "gout_cmgp_usi0_ipclk", "dout_cmgp_usi0",
- CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_IPCLK, 21, 0, 0),
+ CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_IPCLK, 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_CMGP_USI0_PCLK, "gout_cmgp_usi0_pclk",
"gout_clkcmu_cmgp_bus",
CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_PCLK, 21, 0, 0),
GATE(CLK_GOUT_CMGP_USI1_IPCLK, "gout_cmgp_usi1_ipclk", "dout_cmgp_usi1",
- CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_IPCLK, 21, 0, 0),
+ CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_IPCLK, 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_CMGP_USI1_PCLK, "gout_cmgp_usi1_pclk",
"gout_clkcmu_cmgp_bus",
CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_PCLK, 21, 0, 0),
@@ -1557,8 +1557,9 @@ static const struct samsung_mux_clock peri_mux_clks[] __initconst = {
mout_peri_uart_user_p, PLL_CON0_MUX_CLKCMU_PERI_UART_USER, 4, 1),
MUX(CLK_MOUT_PERI_HSI2C_USER, "mout_peri_hsi2c_user",
mout_peri_hsi2c_user_p, PLL_CON0_MUX_CLKCMU_PERI_HSI2C_USER, 4, 1),
- MUX(CLK_MOUT_PERI_SPI_USER, "mout_peri_spi_user", mout_peri_spi_user_p,
- PLL_CON0_MUX_CLKCMU_PERI_SPI_USER, 4, 1),
+ MUX_F(CLK_MOUT_PERI_SPI_USER, "mout_peri_spi_user",
+ mout_peri_spi_user_p, PLL_CON0_MUX_CLKCMU_PERI_SPI_USER, 4, 1,
+ CLK_SET_RATE_PARENT, 0),
};
static const struct samsung_div_clock peri_div_clks[] __initconst = {
@@ -1568,8 +1569,8 @@ static const struct samsung_div_clock peri_div_clks[] __initconst = {
CLK_CON_DIV_DIV_CLK_PERI_HSI2C_1, 0, 5),
DIV(CLK_DOUT_PERI_HSI2C2, "dout_peri_hsi2c2", "gout_peri_hsi2c2",
CLK_CON_DIV_DIV_CLK_PERI_HSI2C_2, 0, 5),
- DIV(CLK_DOUT_PERI_SPI0, "dout_peri_spi0", "mout_peri_spi_user",
- CLK_CON_DIV_DIV_CLK_PERI_SPI_0, 0, 5),
+ DIV_F(CLK_DOUT_PERI_SPI0, "dout_peri_spi0", "mout_peri_spi_user",
+ CLK_CON_DIV_DIV_CLK_PERI_SPI_0, 0, 5, CLK_SET_RATE_PARENT, 0),
};
static const struct samsung_gate_clock peri_gate_clks[] __initconst = {
@@ -1611,7 +1612,7 @@ static const struct samsung_gate_clock peri_gate_clks[] __initconst = {
"mout_peri_bus_user",
CLK_CON_GAT_GOUT_PERI_PWM_MOTOR_PCLK, 21, 0, 0),
GATE(CLK_GOUT_SPI0_IPCLK, "gout_spi0_ipclk", "dout_peri_spi0",
- CLK_CON_GAT_GOUT_PERI_SPI_0_IPCLK, 21, 0, 0),
+ CLK_CON_GAT_GOUT_PERI_SPI_0_IPCLK, 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_SPI0_PCLK, "gout_spi0_pclk", "mout_peri_bus_user",
CLK_CON_GAT_GOUT_PERI_SPI_0_PCLK, 21, 0, 0),
GATE(CLK_GOUT_SYSREG_PERI_PCLK, "gout_sysreg_peri_pclk",
@@ -1667,6 +1668,8 @@ CLK_OF_DECLARE(exynos850_cmu_peri, "samsung,exynos850-cmu-peri",
#define CLK_CON_GAT_GOUT_CORE_GPIO_CORE_PCLK 0x2044
#define CLK_CON_GAT_GOUT_CORE_MMC_EMBD_I_ACLK 0x20e8
#define CLK_CON_GAT_GOUT_CORE_MMC_EMBD_SDCLKIN 0x20ec
+#define CLK_CON_GAT_GOUT_CORE_PDMA_ACLK 0x20f0
+#define CLK_CON_GAT_GOUT_CORE_SPDMA_ACLK 0x2124
#define CLK_CON_GAT_GOUT_CORE_SSS_I_ACLK 0x2128
#define CLK_CON_GAT_GOUT_CORE_SSS_I_PCLK 0x212c
#define CLK_CON_GAT_GOUT_CORE_SYSREG_CORE_PCLK 0x2130
@@ -1683,6 +1686,8 @@ static const unsigned long core_clk_regs[] __initconst = {
CLK_CON_GAT_GOUT_CORE_GPIO_CORE_PCLK,
CLK_CON_GAT_GOUT_CORE_MMC_EMBD_I_ACLK,
CLK_CON_GAT_GOUT_CORE_MMC_EMBD_SDCLKIN,
+ CLK_CON_GAT_GOUT_CORE_PDMA_ACLK,
+ CLK_CON_GAT_GOUT_CORE_SPDMA_ACLK,
CLK_CON_GAT_GOUT_CORE_SSS_I_ACLK,
CLK_CON_GAT_GOUT_CORE_SSS_I_PCLK,
CLK_CON_GAT_GOUT_CORE_SYSREG_CORE_PCLK,
@@ -1726,6 +1731,10 @@ static const struct samsung_gate_clock core_gate_clks[] __initconst = {
GATE(CLK_GOUT_MMC_EMBD_SDCLKIN, "gout_mmc_embd_sdclkin",
"mout_core_mmc_embd_user", CLK_CON_GAT_GOUT_CORE_MMC_EMBD_SDCLKIN,
21, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PDMA_CORE_ACLK, "gout_pdma_core_aclk",
+ "mout_core_bus_user", CLK_CON_GAT_GOUT_CORE_PDMA_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_SPDMA_CORE_ACLK, "gout_spdma_core_aclk",
+ "mout_core_bus_user", CLK_CON_GAT_GOUT_CORE_SPDMA_ACLK, 21, 0, 0),
GATE(CLK_GOUT_SSS_ACLK, "gout_sss_aclk", "mout_core_sss_user",
CLK_CON_GAT_GOUT_CORE_SSS_I_ACLK, 21, 0, 0),
GATE(CLK_GOUT_SSS_PCLK, "gout_sss_pclk", "dout_core_busp",
diff --git a/drivers/clk/samsung/clk-gs101.c b/drivers/clk/samsung/clk-gs101.c
index 782993951fff8..d065e343a85dd 100644
--- a/drivers/clk/samsung/clk-gs101.c
+++ b/drivers/clk/samsung/clk-gs101.c
@@ -20,11 +20,12 @@
#define CLKS_NR_TOP (CLK_GOUT_CMU_TPU_UART + 1)
#define CLKS_NR_APM (CLK_APM_PLL_DIV16_APM + 1)
#define CLKS_NR_MISC (CLK_GOUT_MISC_XIU_D_MISC_ACLK + 1)
+#define CLKS_NR_PERIC0 (CLK_GOUT_PERIC0_SYSREG_PERIC0_PCLK + 1)
+#define CLKS_NR_PERIC1 (CLK_GOUT_PERIC1_SYSREG_PERIC1_PCLK + 1)
/* ---- CMU_TOP ------------------------------------------------------------- */
/* Register Offset definitions for CMU_TOP (0x1e080000) */
-
#define PLL_LOCKTIME_PLL_SHARED0 0x0000
#define PLL_LOCKTIME_PLL_SHARED1 0x0004
#define PLL_LOCKTIME_PLL_SHARED2 0x0008
@@ -2478,6 +2479,936 @@ static const struct samsung_cmu_info misc_cmu_info __initconst = {
.clk_name = "bus",
};
+static void __init gs101_cmu_misc_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &misc_cmu_info);
+}
+
+/* Register CMU_MISC early, as it's needed for MCT timer */
+CLK_OF_DECLARE(gs101_cmu_misc, "google,gs101-cmu-misc",
+ gs101_cmu_misc_init);
+
+/* ---- CMU_PERIC0 ---------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIC0 (0x10800000) */
+#define PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER 0x0600
+#define PLL_CON1_MUX_CLKCMU_PERIC0_BUS_USER 0x0604
+#define PLL_CON0_MUX_CLKCMU_PERIC0_I3C_USER 0x0610
+#define PLL_CON1_MUX_CLKCMU_PERIC0_I3C_USER 0x0614
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI0_UART_USER 0x0620
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI0_UART_USER 0x0624
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI14_USI_USER 0x0640
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI14_USI_USER 0x0644
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI1_USI_USER 0x0650
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI1_USI_USER 0x0654
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI2_USI_USER 0x0660
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI2_USI_USER 0x0664
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI3_USI_USER 0x0670
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI3_USI_USER 0x0674
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI4_USI_USER 0x0680
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI4_USI_USER 0x0684
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI5_USI_USER 0x0690
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI5_USI_USER 0x0694
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI6_USI_USER 0x06a0
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI6_USI_USER 0x06a4
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI7_USI_USER 0x06b0
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI7_USI_USER 0x06b4
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI8_USI_USER 0x06c0
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI8_USI_USER 0x06c4
+#define PERIC0_CMU_PERIC0_CONTROLLER_OPTION 0x0800
+#define CLKOUT_CON_BLK_PERIC0_CMU_PERIC0_CLKOUT0 0x0810
+#define CLK_CON_DIV_DIV_CLK_PERIC0_I3C 0x1800
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI0_UART 0x1804
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI14_USI 0x180c
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI1_USI 0x1810
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI2_USI 0x1814
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI3_USI 0x1820
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI4_USI 0x1824
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI5_USI 0x1828
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI6_USI 0x182c
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI7_USI 0x1830
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI8_USI 0x1834
+#define CLK_CON_BUF_CLKBUF_PERIC0_IP 0x2000
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_PERIC0_CMU_PERIC0_IPCLKPORT_PCLK 0x2004
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_OSCCLK_IPCLKPORT_CLK 0x2008
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_D_TZPC_PERIC0_IPCLKPORT_PCLK 0x200c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_GPC_PERIC0_IPCLKPORT_PCLK 0x2010
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_GPIO_PERIC0_IPCLKPORT_PCLK 0x2014
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_LHM_AXI_P_PERIC0_IPCLKPORT_I_CLK 0x2018
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_0 0x201c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_1 0x2020
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_10 0x2024
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_11 0x2028
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_12 0x202c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_13 0x2030
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_14 0x2034
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_15 0x2038
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_2 0x203c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_3 0x2040
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_4 0x2044
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_5 0x2048
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_6 0x204c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_7 0x2050
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_8 0x2054
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_9 0x2058
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_0 0x205c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_1 0x2060
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_10 0x2064
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_11 0x2068
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_12 0x206c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_13 0x2070
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_14 0x2074
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_15 0x2078
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_2 0x207c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_3 0x2080
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_4 0x2084
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_5 0x2088
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_6 0x208c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_7 0x2090
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_8 0x2094
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_9 0x2098
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_0 0x209c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_2 0x20a4
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_0 0x20a8
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_2 0x20b0
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_BUSP_IPCLKPORT_CLK 0x20b4
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_I3C_IPCLKPORT_CLK 0x20b8
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI0_UART_IPCLKPORT_CLK 0x20bc
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI14_USI_IPCLKPORT_CLK 0x20c4
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI1_USI_IPCLKPORT_CLK 0x20c8
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI2_USI_IPCLKPORT_CLK 0x20cc
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI3_USI_IPCLKPORT_CLK 0x20d0
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI4_USI_IPCLKPORT_CLK 0x20d4
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI5_USI_IPCLKPORT_CLK 0x20d8
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI6_USI_IPCLKPORT_CLK 0x20dc
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI7_USI_IPCLKPORT_CLK 0x20e0
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI8_USI_IPCLKPORT_CLK 0x20e4
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SYSREG_PERIC0_IPCLKPORT_PCLK 0x20e8
+#define DMYQCH_CON_PERIC0_TOP0_QCH_S1 0x3000
+#define DMYQCH_CON_PERIC0_TOP0_QCH_S2 0x3004
+#define DMYQCH_CON_PERIC0_TOP0_QCH_S3 0x3008
+#define DMYQCH_CON_PERIC0_TOP0_QCH_S4 0x300c
+#define DMYQCH_CON_PERIC0_TOP0_QCH_S5 0x3010
+#define DMYQCH_CON_PERIC0_TOP0_QCH_S6 0x3014
+#define DMYQCH_CON_PERIC0_TOP0_QCH_S7 0x3018
+#define DMYQCH_CON_PERIC0_TOP0_QCH_S8 0x301c
+#define PCH_CON_LHM_AXI_P_PERIC0_PCH 0x3020
+#define QCH_CON_D_TZPC_PERIC0_QCH 0x3024
+#define QCH_CON_GPC_PERIC0_QCH 0x3028
+#define QCH_CON_GPIO_PERIC0_QCH 0x302c
+#define QCH_CON_LHM_AXI_P_PERIC0_QCH 0x3030
+#define QCH_CON_PERIC0_CMU_PERIC0_QCH 0x3034
+#define QCH_CON_PERIC0_TOP0_QCH_I3C1 0x3038
+#define QCH_CON_PERIC0_TOP0_QCH_I3C2 0x303c
+#define QCH_CON_PERIC0_TOP0_QCH_I3C3 0x3040
+#define QCH_CON_PERIC0_TOP0_QCH_I3C4 0x3044
+#define QCH_CON_PERIC0_TOP0_QCH_I3C5 0x3048
+#define QCH_CON_PERIC0_TOP0_QCH_I3C6 0x304c
+#define QCH_CON_PERIC0_TOP0_QCH_I3C7 0x3050
+#define QCH_CON_PERIC0_TOP0_QCH_I3C8 0x3054
+#define QCH_CON_PERIC0_TOP0_QCH_USI1_USI 0x3058
+#define QCH_CON_PERIC0_TOP0_QCH_USI2_USI 0x305c
+#define QCH_CON_PERIC0_TOP0_QCH_USI3_USI 0x3060
+#define QCH_CON_PERIC0_TOP0_QCH_USI4_USI 0x3064
+#define QCH_CON_PERIC0_TOP0_QCH_USI5_USI 0x3068
+#define QCH_CON_PERIC0_TOP0_QCH_USI6_USI 0x306c
+#define QCH_CON_PERIC0_TOP0_QCH_USI7_USI 0x3070
+#define QCH_CON_PERIC0_TOP0_QCH_USI8_USI 0x3074
+#define QCH_CON_PERIC0_TOP1_QCH_USI0_UART 0x3078
+#define QCH_CON_PERIC0_TOP1_QCH_USI14_UART 0x307c
+#define QCH_CON_SYSREG_PERIC0_QCH 0x3080
+#define QUEUE_CTRL_REG_BLK_PERIC0_CMU_PERIC0 0x3c00
+
+static const unsigned long peric0_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_I3C_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_I3C_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI0_UART_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI0_UART_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI14_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI14_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI1_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI1_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI2_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI2_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI3_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI3_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI4_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI4_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI5_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI5_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI6_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI6_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI7_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI7_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI8_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI8_USI_USER,
+ PERIC0_CMU_PERIC0_CONTROLLER_OPTION,
+ CLKOUT_CON_BLK_PERIC0_CMU_PERIC0_CLKOUT0,
+ CLK_CON_DIV_DIV_CLK_PERIC0_I3C,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI0_UART,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI14_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI1_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI2_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI3_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI4_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI5_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI6_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI6_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI8_USI,
+ CLK_CON_BUF_CLKBUF_PERIC0_IP,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_PERIC0_CMU_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_D_TZPC_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_GPC_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_GPIO_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_LHM_AXI_P_PERIC0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_0,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_1,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_11,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_12,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_13,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_14,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_2,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_3,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_8,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_9,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_0,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_1,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_11,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_12,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_13,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_14,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_2,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_3,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_8,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_9,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_0,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_2,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_0,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_2,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_BUSP_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_I3C_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI0_UART_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI14_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI1_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI2_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI3_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI4_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI5_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI6_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI7_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI8_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SYSREG_PERIC0_IPCLKPORT_PCLK,
+ DMYQCH_CON_PERIC0_TOP0_QCH_S1,
+ DMYQCH_CON_PERIC0_TOP0_QCH_S2,
+ DMYQCH_CON_PERIC0_TOP0_QCH_S3,
+ DMYQCH_CON_PERIC0_TOP0_QCH_S4,
+ DMYQCH_CON_PERIC0_TOP0_QCH_S5,
+ DMYQCH_CON_PERIC0_TOP0_QCH_S6,
+ DMYQCH_CON_PERIC0_TOP0_QCH_S7,
+ DMYQCH_CON_PERIC0_TOP0_QCH_S8,
+ PCH_CON_LHM_AXI_P_PERIC0_PCH,
+ QCH_CON_D_TZPC_PERIC0_QCH,
+ QCH_CON_GPC_PERIC0_QCH,
+ QCH_CON_GPIO_PERIC0_QCH,
+ QCH_CON_LHM_AXI_P_PERIC0_QCH,
+ QCH_CON_PERIC0_CMU_PERIC0_QCH,
+ QCH_CON_PERIC0_TOP0_QCH_I3C1,
+ QCH_CON_PERIC0_TOP0_QCH_I3C2,
+ QCH_CON_PERIC0_TOP0_QCH_I3C3,
+ QCH_CON_PERIC0_TOP0_QCH_I3C4,
+ QCH_CON_PERIC0_TOP0_QCH_I3C5,
+ QCH_CON_PERIC0_TOP0_QCH_I3C6,
+ QCH_CON_PERIC0_TOP0_QCH_I3C7,
+ QCH_CON_PERIC0_TOP0_QCH_I3C8,
+ QCH_CON_PERIC0_TOP0_QCH_USI1_USI,
+ QCH_CON_PERIC0_TOP0_QCH_USI2_USI,
+ QCH_CON_PERIC0_TOP0_QCH_USI3_USI,
+ QCH_CON_PERIC0_TOP0_QCH_USI4_USI,
+ QCH_CON_PERIC0_TOP0_QCH_USI5_USI,
+ QCH_CON_PERIC0_TOP0_QCH_USI6_USI,
+ QCH_CON_PERIC0_TOP0_QCH_USI7_USI,
+ QCH_CON_PERIC0_TOP0_QCH_USI8_USI,
+ QCH_CON_PERIC0_TOP1_QCH_USI0_UART,
+ QCH_CON_PERIC0_TOP1_QCH_USI14_UART,
+ QCH_CON_SYSREG_PERIC0_QCH,
+ QUEUE_CTRL_REG_BLK_PERIC0_CMU_PERIC0,
+};
+
+/* List of parent clocks for Muxes in CMU_PERIC0 */
+PNAME(mout_peric0_bus_user_p) = { "oscclk", "dout_cmu_peric0_bus" };
+PNAME(mout_peric0_i3c_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi0_uart_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi_usi_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+
+static const struct samsung_mux_clock peric0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIC0_BUS_USER, "mout_peric0_bus_user",
+ mout_peric0_bus_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC0_I3C_USER, "mout_peric0_i3c_user",
+ mout_peric0_i3c_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_I3C_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI0_UART_USER,
+ "mout_peric0_usi0_uart_user", mout_peric0_usi0_uart_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI0_UART_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI14_USI_USER,
+ "mout_peric0_usi14_usi_user", mout_peric0_usi_usi_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI14_USI_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI1_USI_USER,
+ "mout_peric0_usi1_usi_user", mout_peric0_usi_usi_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI1_USI_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI2_USI_USER,
+ "mout_peric0_usi2_usi_user", mout_peric0_usi_usi_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI2_USI_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI3_USI_USER,
+ "mout_peric0_usi3_usi_user", mout_peric0_usi_usi_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI3_USI_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI4_USI_USER,
+ "mout_peric0_usi4_usi_user", mout_peric0_usi_usi_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI4_USI_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI5_USI_USER,
+ "mout_peric0_usi5_usi_user", mout_peric0_usi_usi_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI5_USI_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI6_USI_USER,
+ "mout_peric0_usi6_usi_user", mout_peric0_usi_usi_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI6_USI_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI7_USI_USER,
+ "mout_peric0_usi7_usi_user", mout_peric0_usi_usi_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI7_USI_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI8_USI_USER,
+ "mout_peric0_usi8_usi_user", mout_peric0_usi_usi_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI8_USI_USER, 4, 1),
+};
+
+static const struct samsung_div_clock peric0_div_clks[] __initconst = {
+ DIV(CLK_DOUT_PERIC0_I3C, "dout_peric0_i3c", "mout_peric0_i3c_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_I3C, 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI0_UART,
+ "dout_peric0_usi0_uart", "mout_peric0_usi0_uart_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI0_UART, 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI14_USI,
+ "dout_peric0_usi14_usi", "mout_peric0_usi14_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI14_USI, 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI1_USI,
+ "dout_peric0_usi1_usi", "mout_peric0_usi1_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI1_USI, 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI2_USI,
+ "dout_peric0_usi2_usi", "mout_peric0_usi2_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI2_USI, 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI3_USI,
+ "dout_peric0_usi3_usi", "mout_peric0_usi3_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI3_USI, 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI4_USI,
+ "dout_peric0_usi4_usi", "mout_peric0_usi4_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI4_USI, 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI5_USI,
+ "dout_peric0_usi5_usi", "mout_peric0_usi5_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI5_USI, 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI6_USI,
+ "dout_peric0_usi6_usi", "mout_peric0_usi6_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI6_USI, 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI7_USI,
+ "dout_peric0_usi7_usi", "mout_peric0_usi7_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI7_USI, 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI8_USI,
+ "dout_peric0_usi8_usi", "mout_peric0_usi8_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI8_USI, 0, 4),
+};
+
+static const struct samsung_gate_clock peric0_gate_clks[] __initconst = {
+ /* Disabling this clock makes the system hang. Mark the clock as critical. */
+ GATE(CLK_GOUT_PERIC0_PERIC0_CMU_PERIC0_PCLK,
+ "gout_peric0_peric0_cmu_peric0_pclk", "mout_peric0_bus_user",
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_PERIC0_CMU_PERIC0_IPCLKPORT_PCLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC0_CLK_PERIC0_OSCCLK_CLK,
+ "gout_peric0_clk_peric0_oscclk_clk", "oscclk",
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_OSCCLK_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_D_TZPC_PERIC0_PCLK,
+ "gout_peric0_d_tzpc_peric0_pclk", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_D_TZPC_PERIC0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_GPC_PERIC0_PCLK,
+ "gout_peric0_gpc_peric0_pclk", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_GPC_PERIC0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_GPIO_PERIC0_PCLK,
+ "gout_peric0_gpio_peric0_pclk", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_GPIO_PERIC0_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ /* Disabling this clock makes the system hang. Mark the clock as critical. */
+ GATE(CLK_GOUT_PERIC0_LHM_AXI_P_PERIC0_I_CLK,
+ "gout_peric0_lhm_axi_p_peric0_i_clk", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_LHM_AXI_P_PERIC0_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_0,
+ "gout_peric0_peric0_top0_ipclk_0", "dout_peric0_usi1_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_1,
+ "gout_peric0_peric0_top0_ipclk_1", "dout_peric0_usi2_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_1,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_10,
+ "gout_peric0_peric0_top0_ipclk_10", "dout_peric0_i3c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_11,
+ "gout_peric0_peric0_top0_ipclk_11", "dout_peric0_i3c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_11,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_12,
+ "gout_peric0_peric0_top0_ipclk_12", "dout_peric0_i3c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_12,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_13,
+ "gout_peric0_peric0_top0_ipclk_13", "dout_peric0_i3c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_13,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_14,
+ "gout_peric0_peric0_top0_ipclk_14", "dout_peric0_i3c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_14,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_15,
+ "gout_peric0_peric0_top0_ipclk_15", "dout_peric0_i3c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_2,
+ "gout_peric0_peric0_top0_ipclk_2", "dout_peric0_usi3_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_2,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_3,
+ "gout_peric0_peric0_top0_ipclk_3", "dout_peric0_usi4_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_3,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_4,
+ "gout_peric0_peric0_top0_ipclk_4", "dout_peric0_usi5_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_5,
+ "gout_peric0_peric0_top0_ipclk_5", "dout_peric0_usi6_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_6,
+ "gout_peric0_peric0_top0_ipclk_6", "dout_peric0_usi7_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_7,
+ "gout_peric0_peric0_top0_ipclk_7", "dout_peric0_usi8_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_8,
+ "gout_peric0_peric0_top0_ipclk_8", "dout_peric0_i3c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_8,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_9,
+ "gout_peric0_peric0_top0_ipclk_9", "dout_peric0_i3c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_9,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_0,
+ "gout_peric0_peric0_top0_pclk_0", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_1,
+ "gout_peric0_peric0_top0_pclk_1", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_1,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_10,
+ "gout_peric0_peric0_top0_pclk_10", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_11,
+ "gout_peric0_peric0_top0_pclk_11", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_11,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_12,
+ "gout_peric0_peric0_top0_pclk_12", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_12,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_13,
+ "gout_peric0_peric0_top0_pclk_13", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_13,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_14,
+ "gout_peric0_peric0_top0_pclk_14", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_14,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_15,
+ "gout_peric0_peric0_top0_pclk_15", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_2,
+ "gout_peric0_peric0_top0_pclk_2", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_2,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_3,
+ "gout_peric0_peric0_top0_pclk_3", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_3,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_4,
+ "gout_peric0_peric0_top0_pclk_4", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_5,
+ "gout_peric0_peric0_top0_pclk_5", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_6,
+ "gout_peric0_peric0_top0_pclk_6", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_7,
+ "gout_peric0_peric0_top0_pclk_7", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_8,
+ "gout_peric0_peric0_top0_pclk_8", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_8,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_9,
+ "gout_peric0_peric0_top0_pclk_9", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_9,
+ 21, 0, 0),
+ /* Disabling this clock makes the system hang. Mark the clock as critical. */
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP1_IPCLK_0,
+ "gout_peric0_peric0_top1_ipclk_0", "dout_peric0_usi0_uart",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_0,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP1_IPCLK_2,
+ "gout_peric0_peric0_top1_ipclk_2", "dout_peric0_usi14_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_2,
+ 21, 0, 0),
+ /* Disabling this clock makes the system hang. Mark the clock as critical. */
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP1_PCLK_0,
+ "gout_peric0_peric0_top1_pclk_0", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_0,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC0_PERIC0_TOP1_PCLK_2,
+ "gout_peric0_peric0_top1_pclk_2", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_2,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_CLK_PERIC0_BUSP_CLK,
+ "gout_peric0_clk_peric0_busp_clk", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_BUSP_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_CLK_PERIC0_I3C_CLK,
+ "gout_peric0_clk_peric0_i3c_clk", "dout_peric0_i3c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_I3C_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_CLK_PERIC0_USI0_UART_CLK,
+ "gout_peric0_clk_peric0_usi0_uart_clk", "dout_peric0_usi0_uart",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI0_UART_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_CLK_PERIC0_USI14_USI_CLK,
+ "gout_peric0_clk_peric0_usi14_usi_clk", "dout_peric0_usi14_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI14_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_CLK_PERIC0_USI1_USI_CLK,
+ "gout_peric0_clk_peric0_usi1_usi_clk", "dout_peric0_usi1_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI1_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_CLK_PERIC0_USI2_USI_CLK,
+ "gout_peric0_clk_peric0_usi2_usi_clk", "dout_peric0_usi2_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI2_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_CLK_PERIC0_USI3_USI_CLK,
+ "gout_peric0_clk_peric0_usi3_usi_clk", "dout_peric0_usi3_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI3_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_CLK_PERIC0_USI4_USI_CLK,
+ "gout_peric0_clk_peric0_usi4_usi_clk", "dout_peric0_usi4_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI4_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_CLK_PERIC0_USI5_USI_CLK,
+ "gout_peric0_clk_peric0_usi5_usi_clk", "dout_peric0_usi5_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI5_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_CLK_PERIC0_USI6_USI_CLK,
+ "gout_peric0_clk_peric0_usi6_usi_clk", "dout_peric0_usi6_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI6_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_CLK_PERIC0_USI7_USI_CLK,
+ "gout_peric0_clk_peric0_usi7_usi_clk", "dout_peric0_usi7_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI7_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_CLK_PERIC0_USI8_USI_CLK,
+ "gout_peric0_clk_peric0_usi8_usi_clk", "dout_peric0_usi8_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI8_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_SYSREG_PERIC0_PCLK,
+ "gout_peric0_sysreg_peric0_pclk", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SYSREG_PERIC0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+};
+
+static const struct samsung_cmu_info peric0_cmu_info __initconst = {
+ .mux_clks = peric0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peric0_mux_clks),
+ .div_clks = peric0_div_clks,
+ .nr_div_clks = ARRAY_SIZE(peric0_div_clks),
+ .gate_clks = peric0_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peric0_gate_clks),
+ .nr_clk_ids = CLKS_NR_PERIC0,
+ .clk_regs = peric0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric0_clk_regs),
+ .clk_name = "bus",
+};
+
+/* ---- CMU_PERIC1 ---------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIC1 (0x10c00000) */
+#define PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER 0x0600
+#define PLL_CON1_MUX_CLKCMU_PERIC1_BUS_USER 0x0604
+#define PLL_CON0_MUX_CLKCMU_PERIC1_I3C_USER 0x0610
+#define PLL_CON1_MUX_CLKCMU_PERIC1_I3C_USER 0x0614
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI0_USI_USER 0x0620
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI0_USI_USER 0x0624
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USI_USER 0x0630
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI10_USI_USER 0x0634
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USI_USER 0x0640
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI11_USI_USER 0x0644
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USI_USER 0x0650
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI12_USI_USER 0x0654
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI13_USI_USER 0x0660
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI13_USI_USER 0x0664
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI9_USI_USER 0x0670
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI9_USI_USER 0x0674
+#define PERIC1_CMU_PERIC1_CONTROLLER_OPTION 0x0800
+#define CLKOUT_CON_BLK_PERIC1_CMU_PERIC1_CLKOUT0 0x0810
+#define CLK_CON_DIV_DIV_CLK_PERIC1_I3C 0x1800
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI0_USI 0x1804
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI 0x1808
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI 0x180c
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI 0x1810
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI13_USI 0x1814
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI9_USI 0x1818
+#define CLK_CON_BUF_CLKBUF_PERIC1_IP 0x2000
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_PERIC1_CMU_PERIC1_IPCLKPORT_PCLK 0x2004
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_I3C_IPCLKPORT_CLK 0x2008
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_OSCCLK_IPCLKPORT_CLK 0x200c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_D_TZPC_PERIC1_IPCLKPORT_PCLK 0x2010
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_GPC_PERIC1_IPCLKPORT_PCLK 0x2014
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_GPIO_PERIC1_IPCLKPORT_PCLK 0x2018
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_PERIC1_IPCLKPORT_I_CLK 0x201c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_1 0x2020
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_2 0x2024
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_3 0x2028
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_4 0x202c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_5 0x2030
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_6 0x2034
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_8 0x2038
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_1 0x203c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_15 0x2040
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_2 0x2044
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_3 0x2048
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_4 0x204c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_5 0x2050
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_6 0x2054
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_8 0x2058
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_BUSP_IPCLKPORT_CLK 0x205c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI0_USI_IPCLKPORT_CLK 0x2060
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI10_USI_IPCLKPORT_CLK 0x2064
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI11_USI_IPCLKPORT_CLK 0x2068
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI12_USI_IPCLKPORT_CLK 0x206c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI13_USI_IPCLKPORT_CLK 0x2070
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI9_USI_IPCLKPORT_CLK 0x2074
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SYSREG_PERIC1_IPCLKPORT_PCLK 0x2078
+#define DMYQCH_CON_PERIC1_TOP0_QCH_S 0x3000
+#define PCH_CON_LHM_AXI_P_PERIC1_PCH 0x3004
+#define QCH_CON_D_TZPC_PERIC1_QCH 0x3008
+#define QCH_CON_GPC_PERIC1_QCH 0x300c
+#define QCH_CON_GPIO_PERIC1_QCH 0x3010
+#define QCH_CON_LHM_AXI_P_PERIC1_QCH 0x3014
+#define QCH_CON_PERIC1_CMU_PERIC1_QCH 0x3018
+#define QCH_CON_PERIC1_TOP0_QCH_I3C0 0x301c
+#define QCH_CON_PERIC1_TOP0_QCH_PWM 0x3020
+#define QCH_CON_PERIC1_TOP0_QCH_USI0_USI 0x3024
+#define QCH_CON_PERIC1_TOP0_QCH_USI10_USI 0x3028
+#define QCH_CON_PERIC1_TOP0_QCH_USI11_USI 0x302c
+#define QCH_CON_PERIC1_TOP0_QCH_USI12_USI 0x3030
+#define QCH_CON_PERIC1_TOP0_QCH_USI13_USI 0x3034
+#define QCH_CON_PERIC1_TOP0_QCH_USI9_USI 0x3038
+#define QCH_CON_SYSREG_PERIC1_QCH 0x303c
+#define QUEUE_CTRL_REG_BLK_PERIC1_CMU_PERIC1 0x3c00
+
+static const unsigned long peric1_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_I3C_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_I3C_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI0_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI0_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI10_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI11_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI12_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI13_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI13_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI9_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI9_USI_USER,
+ PERIC1_CMU_PERIC1_CONTROLLER_OPTION,
+ CLKOUT_CON_BLK_PERIC1_CMU_PERIC1_CLKOUT0,
+ CLK_CON_DIV_DIV_CLK_PERIC1_I3C,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI0_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI13_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI9_USI,
+ CLK_CON_BUF_CLKBUF_PERIC1_IP,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_PERIC1_CMU_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_I3C_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_D_TZPC_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_GPC_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_GPIO_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_PERIC1_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_1,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_2,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_3,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_8,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_1,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_2,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_3,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_8,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_BUSP_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI0_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI10_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI11_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI12_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI13_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI9_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SYSREG_PERIC1_IPCLKPORT_PCLK,
+ DMYQCH_CON_PERIC1_TOP0_QCH_S,
+ PCH_CON_LHM_AXI_P_PERIC1_PCH,
+ QCH_CON_D_TZPC_PERIC1_QCH,
+ QCH_CON_GPC_PERIC1_QCH,
+ QCH_CON_GPIO_PERIC1_QCH,
+ QCH_CON_LHM_AXI_P_PERIC1_QCH,
+ QCH_CON_PERIC1_CMU_PERIC1_QCH,
+ QCH_CON_PERIC1_TOP0_QCH_I3C0,
+ QCH_CON_PERIC1_TOP0_QCH_PWM,
+ QCH_CON_PERIC1_TOP0_QCH_USI0_USI,
+ QCH_CON_PERIC1_TOP0_QCH_USI10_USI,
+ QCH_CON_PERIC1_TOP0_QCH_USI11_USI,
+ QCH_CON_PERIC1_TOP0_QCH_USI12_USI,
+ QCH_CON_PERIC1_TOP0_QCH_USI13_USI,
+ QCH_CON_PERIC1_TOP0_QCH_USI9_USI,
+ QCH_CON_SYSREG_PERIC1_QCH,
+ QUEUE_CTRL_REG_BLK_PERIC1_CMU_PERIC1,
+};
+
+/* List of parent clocks for Muxes in CMU_PERIC1 */
+PNAME(mout_peric1_bus_user_p) = { "oscclk", "dout_cmu_peric1_bus" };
+PNAME(mout_peric1_nonbususer_p) = { "oscclk", "dout_cmu_peric1_ip" };
+
+static const struct samsung_mux_clock peric1_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIC1_BUS_USER, "mout_peric1_bus_user",
+ mout_peric1_bus_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC1_I3C_USER,
+ "mout_peric1_i3c_user", mout_peric1_nonbususer_p,
+ PLL_CON0_MUX_CLKCMU_PERIC1_I3C_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI0_USI_USER,
+ "mout_peric1_usi0_usi_user", mout_peric1_nonbususer_p,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI0_USI_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI10_USI_USER,
+ "mout_peric1_usi10_usi_user", mout_peric1_nonbususer_p,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USI_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI11_USI_USER,
+ "mout_peric1_usi11_usi_user", mout_peric1_nonbususer_p,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USI_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI12_USI_USER,
+ "mout_peric1_usi12_usi_user", mout_peric1_nonbususer_p,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USI_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI13_USI_USER,
+ "mout_peric1_usi13_usi_user", mout_peric1_nonbususer_p,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI13_USI_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI9_USI_USER,
+ "mout_peric1_usi9_usi_user", mout_peric1_nonbususer_p,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI9_USI_USER, 4, 1),
+};
+
+static const struct samsung_div_clock peric1_div_clks[] __initconst = {
+ DIV(CLK_DOUT_PERIC1_I3C, "dout_peric1_i3c", "mout_peric1_i3c_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_I3C, 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI0_USI,
+ "dout_peric1_usi0_usi", "mout_peric1_usi0_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI0_USI, 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI10_USI,
+ "dout_peric1_usi10_usi", "mout_peric1_usi10_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI, 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI11_USI,
+ "dout_peric1_usi11_usi", "mout_peric1_usi11_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI, 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI12_USI,
+ "dout_peric1_usi12_usi", "mout_peric1_usi12_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI, 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI13_USI,
+ "dout_peric1_usi13_usi", "mout_peric1_usi13_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI13_USI, 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI9_USI,
+ "dout_peric1_usi9_usi", "mout_peric1_usi9_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI9_USI, 0, 4),
+};
+
+static const struct samsung_gate_clock peric1_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_PERIC1_PCLK,
+ "gout_peric1_peric1_pclk", "mout_peric1_bus_user",
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_PERIC1_CMU_PERIC1_IPCLKPORT_PCLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC1_CLK_PERIC1_I3C_CLK,
+ "gout_peric1_clk_peric1_i3c_clk", "dout_peric1_i3c",
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_I3C_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_CLK_PERIC1_OSCCLK_CLK,
+ "gout_peric1_clk_peric1_oscclk_clk", "oscclk",
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_OSCCLK_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_D_TZPC_PERIC1_PCLK,
+ "gout_peric1_d_tzpc_peric1_pclk", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_D_TZPC_PERIC1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_GPC_PERIC1_PCLK,
+ "gout_peric1_gpc_peric1_pclk", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_GPC_PERIC1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_GPIO_PERIC1_PCLK,
+ "gout_peric1_gpio_peric1_pclk", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_GPIO_PERIC1_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERIC1_LHM_AXI_P_PERIC1_I_CLK,
+ "gout_peric1_lhm_axi_p_peric1_i_clk", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_PERIC1_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_1,
+ "gout_peric1_peric1_top0_ipclk_1", "dout_peric1_usi0_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_1,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_2,
+ "gout_peric1_peric1_top0_ipclk_2", "dout_peric1_usi9_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_2,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_3,
+ "gout_peric1_peric1_top0_ipclk_3", "dout_peric1_usi10_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_3,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_4,
+ "gout_peric1_peric1_top0_ipclk_4", "dout_peric1_usi11_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_5,
+ "gout_peric1_peric1_top0_ipclk_5", "dout_peric1_usi12_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_6,
+ "gout_peric1_peric1_top0_ipclk_6", "dout_peric1_usi13_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_8,
+ "gout_peric1_peric1_top0_ipclk_8", "dout_peric1_i3c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_8,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_1,
+ "gout_peric1_peric1_top0_pclk_1", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_1,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_15,
+ "gout_peric1_peric1_top0_pclk_15", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_2,
+ "gout_peric1_peric1_top0_pclk_2", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_2,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_3,
+ "gout_peric1_peric1_top0_pclk_3", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_3,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_4,
+ "gout_peric1_peric1_top0_pclk_4", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_5,
+ "gout_peric1_peric1_top0_pclk_5", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_6,
+ "gout_peric1_peric1_top0_pclk_6", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_8,
+ "gout_peric1_peric1_top0_pclk_8", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_8,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_CLK_PERIC1_BUSP_CLK,
+ "gout_peric1_clk_peric1_busp_clk", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_BUSP_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_CLK_PERIC1_USI0_USI_CLK,
+ "gout_peric1_clk_peric1_usi0_usi_clk", "dout_peric1_usi0_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI0_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_CLK_PERIC1_USI10_USI_CLK,
+ "gout_peric1_clk_peric1_usi10_usi_clk", "dout_peric1_usi10_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI10_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_CLK_PERIC1_USI11_USI_CLK,
+ "gout_peric1_clk_peric1_usi11_usi_clk", "dout_peric1_usi11_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI11_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_CLK_PERIC1_USI12_USI_CLK,
+ "gout_peric1_clk_peric1_usi12_usi_clk", "dout_peric1_usi12_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI12_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_CLK_PERIC1_USI13_USI_CLK,
+ "gout_peric1_clk_peric1_usi13_usi_clk", "dout_peric1_usi13_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI13_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_CLK_PERIC1_USI9_USI_CLK,
+ "gout_peric1_clk_peric1_usi9_usi_clk", "dout_peric1_usi9_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI9_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SYSREG_PERIC1_PCLK,
+ "gout_peric1_sysreg_peric1_pclk", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SYSREG_PERIC1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+};
+
+static const struct samsung_cmu_info peric1_cmu_info __initconst = {
+ .mux_clks = peric1_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peric1_mux_clks),
+ .div_clks = peric1_div_clks,
+ .nr_div_clks = ARRAY_SIZE(peric1_div_clks),
+ .gate_clks = peric1_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peric1_gate_clks),
+ .nr_clk_ids = CLKS_NR_PERIC1,
+ .clk_regs = peric1_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric1_clk_regs),
+ .clk_name = "bus",
+};
+
/* ---- platform_driver ----------------------------------------------------- */
static int __init gs101_cmu_probe(struct platform_device *pdev)
@@ -2496,8 +3427,11 @@ static const struct of_device_id gs101_cmu_of_match[] = {
.compatible = "google,gs101-cmu-apm",
.data = &apm_cmu_info,
}, {
- .compatible = "google,gs101-cmu-misc",
- .data = &misc_cmu_info,
+ .compatible = "google,gs101-cmu-peric0",
+ .data = &peric0_cmu_info,
+ }, {
+ .compatible = "google,gs101-cmu-peric1",
+ .data = &peric1_cmu_info,
}, {
},
};
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index 516b716407e55..a763309e6f129 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -12,6 +12,7 @@
#include <linux/clk-provider.h>
#include "clk-pll.h"
+#include "clk-cpu.h"
/**
* struct samsung_clk_provider - information about clock provider
@@ -282,10 +283,11 @@ struct samsung_cpu_clock {
unsigned int alt_parent_id;
unsigned long flags;
int offset;
+ enum exynos_cpuclk_layout reg_layout;
const struct exynos_cpuclk_cfg_data *cfg;
};
-#define CPU_CLK(_id, _name, _pid, _apid, _flags, _offset, _cfg) \
+#define CPU_CLK(_id, _name, _pid, _apid, _flags, _offset, _layout, _cfg) \
{ \
.id = _id, \
.name = _name, \
@@ -293,6 +295,7 @@ struct samsung_cpu_clock {
.alt_parent_id = _apid, \
.flags = _flags, \
.offset = _offset, \
+ .reg_layout = _layout, \
.cfg = _cfg, \
}
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-isp.c b/drivers/clk/starfive/clk-starfive-jh7110-isp.c
index 929b8788279ea..d3c85421f948f 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-isp.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-isp.c
@@ -202,12 +202,10 @@ err_exit:
return ret;
}
-static int jh7110_ispcrg_remove(struct platform_device *pdev)
+static void jh7110_ispcrg_remove(struct platform_device *pdev)
{
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static const struct of_device_id jh7110_ispcrg_match[] = {
@@ -218,7 +216,7 @@ MODULE_DEVICE_TABLE(of, jh7110_ispcrg_match);
static struct platform_driver jh7110_ispcrg_driver = {
.probe = jh7110_ispcrg_probe,
- .remove = jh7110_ispcrg_remove,
+ .remove_new = jh7110_ispcrg_remove,
.driver = {
.name = "clk-starfive-jh7110-isp",
.of_match_table = jh7110_ispcrg_match,
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-vout.c b/drivers/clk/starfive/clk-starfive-jh7110-vout.c
index 10cc1ec439251..53f7af234cc23 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-vout.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-vout.c
@@ -209,12 +209,10 @@ err_exit:
return ret;
}
-static int jh7110_voutcrg_remove(struct platform_device *pdev)
+static void jh7110_voutcrg_remove(struct platform_device *pdev)
{
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static const struct of_device_id jh7110_voutcrg_match[] = {
@@ -225,7 +223,7 @@ MODULE_DEVICE_TABLE(of, jh7110_voutcrg_match);
static struct platform_driver jh7110_voutcrg_driver = {
.probe = jh7110_voutcrg_probe,
- .remove = jh7110_voutcrg_remove,
+ .remove_new = jh7110_voutcrg_remove,
.driver = {
.name = "clk-starfive-jh7110-vout",
.of_match_table = jh7110_voutcrg_match,
diff --git a/drivers/clk/sunxi/clk-a20-gmac.c b/drivers/clk/sunxi/clk-a20-gmac.c
index 0b09230a0d4ed..43080c7d045b3 100644
--- a/drivers/clk/sunxi/clk-a20-gmac.c
+++ b/drivers/clk/sunxi/clk-a20-gmac.c
@@ -15,8 +15,19 @@
static DEFINE_SPINLOCK(gmac_lock);
+
+#define SUN7I_A20_GMAC_GPIT 2
+#define SUN7I_A20_GMAC_MASK 0x3
+#define SUN7I_A20_GMAC_PARENTS 2
+
+static u32 sun7i_a20_gmac_mux_table[SUN7I_A20_GMAC_PARENTS] = {
+ 0x00, /* Select mii_phy_tx_clk */
+ 0x02, /* Select gmac_int_tx_clk */
+};
+
/**
* sun7i_a20_gmac_clk_setup - Setup function for A20/A31 GMAC clock module
+ * @node: &struct device_node for the clock
*
* This clock looks something like this
* ________________________
@@ -39,16 +50,6 @@ static DEFINE_SPINLOCK(gmac_lock);
* enable/disable this clock to configure the required state. The clock
* driver then responds by auto-reparenting the clock.
*/
-
-#define SUN7I_A20_GMAC_GPIT 2
-#define SUN7I_A20_GMAC_MASK 0x3
-#define SUN7I_A20_GMAC_PARENTS 2
-
-static u32 sun7i_a20_gmac_mux_table[SUN7I_A20_GMAC_PARENTS] = {
- 0x00, /* Select mii_phy_tx_clk */
- 0x02, /* Select gmac_int_tx_clk */
-};
-
static void __init sun7i_a20_gmac_clk_setup(struct device_node *node)
{
struct clk *clk;
diff --git a/drivers/clk/sunxi/clk-sun9i-cpus.c b/drivers/clk/sunxi/clk-sun9i-cpus.c
index 01255d827fc97..48bf899bb2bcd 100644
--- a/drivers/clk/sunxi/clk-sun9i-cpus.c
+++ b/drivers/clk/sunxi/clk-sun9i-cpus.c
@@ -18,9 +18,6 @@
static DEFINE_SPINLOCK(sun9i_a80_cpus_lock);
-/**
- * sun9i_a80_cpus_clk_setup() - Setup function for a80 cpus composite clk
- */
#define SUN9I_CPUS_MAX_PARENTS 4
#define SUN9I_CPUS_MUX_PARENT_PLL4 3
@@ -180,6 +177,10 @@ static const struct clk_ops sun9i_a80_cpus_clk_ops = {
.set_rate = sun9i_a80_cpus_clk_set_rate,
};
+/**
+ * sun9i_a80_cpus_setup() - Setup function for a80 cpus composite clk
+ * @node: &struct device_node for the clock
+ */
static void sun9i_a80_cpus_setup(struct device_node *node)
{
const char *clk_name = node->name;
diff --git a/drivers/clk/sunxi/clk-usb.c b/drivers/clk/sunxi/clk-usb.c
index 5460218f3467a..3c53f65002a28 100644
--- a/drivers/clk/sunxi/clk-usb.c
+++ b/drivers/clk/sunxi/clk-usb.c
@@ -73,9 +73,6 @@ static const struct reset_control_ops sunxi_usb_reset_ops = {
.deassert = sunxi_usb_reset_deassert,
};
-/**
- * sunxi_usb_clk_setup() - Setup function for usb gate clocks
- */
#define SUNXI_USB_MAX_SIZE 32
@@ -85,6 +82,12 @@ struct usb_clk_data {
bool reset_needs_clk;
};
+/**
+ * sunxi_usb_clk_setup() - Setup function for usb gate clocks
+ * @node: &struct device_node for the clock
+ * @data: &struct usb_clk_data for the clock
+ * @lock: spinlock for the clock
+ */
static void __init sunxi_usb_clk_setup(struct device_node *node,
const struct usb_clk_data *data,
spinlock_t *lock)
diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c
index 93183287c58db..43514e6f3b780 100644
--- a/drivers/clk/ti/apll.c
+++ b/drivers/clk/ti/apll.c
@@ -376,14 +376,9 @@ static void __init of_omap2_apll_setup(struct device_node *node)
}
clk_hw->fixed_rate = val;
- if (of_property_read_u32(node, "ti,bit-shift", &val)) {
- pr_err("%pOFn missing bit-shift\n", node);
- goto cleanup;
- }
-
- clk_hw->enable_bit = val;
- ad->enable_mask = 0x3 << val;
- ad->autoidle_mask = 0x3 << val;
+ clk_hw->enable_bit = ti_clk_get_legacy_bit_shift(node);
+ ad->enable_mask = 0x3 << clk_hw->enable_bit;
+ ad->autoidle_mask = 0x3 << clk_hw->enable_bit;
if (of_property_read_u32(node, "ti,idlest-shift", &val)) {
pr_err("%pOFn missing idlest-shift\n", node);
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index 1862958ab412c..f2117fef7c7d6 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -7,6 +7,7 @@
* Tero Kristo <t-kristo@ti.com>
*/
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
@@ -15,6 +16,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/list.h>
+#include <linux/minmax.h>
#include <linux/regmap.h>
#include <linux/string_helpers.h>
#include <linux/memblock.h>
@@ -114,20 +116,26 @@ int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops)
/*
* Eventually we could standardize to using '_' for clk-*.c files to follow the
- * TRM naming and leave out the tmp name here.
+ * TRM naming.
*/
static struct device_node *ti_find_clock_provider(struct device_node *from,
const char *name)
{
+ char *tmp __free(kfree) = NULL;
struct device_node *np;
bool found = false;
const char *n;
- char *tmp;
+ char *p;
tmp = kstrdup_and_replace(name, '-', '_', GFP_KERNEL);
if (!tmp)
return NULL;
+ /* Ignore a possible address for the node name */
+ p = strchr(tmp, '@');
+ if (p)
+ *p = '\0';
+
/* Node named "clock" with "clock-output-names" */
for_each_of_allnodes_from(from, np) {
if (of_property_read_string_index(np, "clock-output-names",
@@ -140,7 +148,6 @@ static struct device_node *ti_find_clock_provider(struct device_node *from,
break;
}
}
- kfree(tmp);
if (found) {
of_node_put(from);
@@ -148,7 +155,7 @@ static struct device_node *ti_find_clock_provider(struct device_node *from,
}
/* Fall back to using old node name base provider name */
- return of_find_node_by_name(from, name);
+ return of_find_node_by_name(from, tmp);
}
/**
@@ -301,8 +308,9 @@ int __init ti_clk_retry_init(struct device_node *node, void *user,
int ti_clk_get_reg_addr(struct device_node *node, int index,
struct clk_omap_reg *reg)
{
- u32 val;
- int i;
+ u32 clksel_addr, val;
+ bool is_clksel = false;
+ int i, err;
for (i = 0; i < CLK_MAX_MEMMAPS; i++) {
if (clocks_node_ptr[i] == node->parent)
@@ -318,21 +326,62 @@ int ti_clk_get_reg_addr(struct device_node *node, int index,
reg->index = i;
- if (of_property_read_u32_index(node, "reg", index, &val)) {
- if (of_property_read_u32_index(node->parent, "reg",
- index, &val)) {
- pr_err("%pOFn or parent must have reg[%d]!\n",
- node, index);
+ if (of_device_is_compatible(node->parent, "ti,clksel")) {
+ err = of_property_read_u32_index(node->parent, "reg", index, &clksel_addr);
+ if (err) {
+ pr_err("%pOFn parent clksel must have reg[%d]!\n", node, index);
return -EINVAL;
}
+ is_clksel = true;
}
+ err = of_property_read_u32_index(node, "reg", index, &val);
+ if (err && is_clksel) {
+ /* Legacy clksel with no reg and a possible ti,bit-shift property */
+ reg->offset = clksel_addr;
+ reg->bit = ti_clk_get_legacy_bit_shift(node);
+ reg->ptr = NULL;
+
+ return 0;
+ }
+
+ /* Updated clksel clock with a proper reg property */
+ if (is_clksel) {
+ reg->offset = clksel_addr;
+ reg->bit = val;
+ reg->ptr = NULL;
+ return 0;
+ }
+
+ /* Other clocks that may or may not have ti,bit-shift property */
reg->offset = val;
+ reg->bit = ti_clk_get_legacy_bit_shift(node);
reg->ptr = NULL;
return 0;
}
+/**
+ * ti_clk_get_legacy_bit_shift - get bit shift for a clock register
+ * @node: device node for the clock
+ *
+ * Gets the clock register bit shift using the legacy ti,bit-shift
+ * property. Only needed for legacy clock, and can be eventually
+ * dropped once all the composite clocks use a clksel node with a
+ * proper reg property.
+ */
+int ti_clk_get_legacy_bit_shift(struct device_node *node)
+{
+ int err;
+ u32 val;
+
+ err = of_property_read_u32(node, "ti,bit-shift", &val);
+ if (!err && in_range(val, 0, 32))
+ return val;
+
+ return 0;
+}
+
void ti_clk_latch(struct clk_omap_reg *reg, s8 shift)
{
u32 latch;
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
index 16a9f7c2280a5..2de7acea1ea05 100644
--- a/drivers/clk/ti/clock.h
+++ b/drivers/clk/ti/clock.h
@@ -216,6 +216,7 @@ int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
int ti_clk_get_reg_addr(struct device_node *node, int index,
struct clk_omap_reg *reg);
+int ti_clk_get_legacy_bit_shift(struct device_node *node);
void ti_dt_clocks_register(struct ti_dt_clk *oclks);
int ti_clk_retry_init(struct device_node *node, void *user,
ti_of_clk_init_cb_t func);
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index 5d5bb123ba949..ade99ab6cfa9b 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -477,10 +477,7 @@ static int __init ti_clk_divider_populate(struct device_node *node,
if (ret)
return ret;
- if (!of_property_read_u32(node, "ti,bit-shift", &val))
- div->shift = val;
- else
- div->shift = 0;
+ div->shift = div->reg.bit;
if (!of_property_read_u32(node, "ti,latch-bit", &val))
div->latch = val;
diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c
index e32b3515f9e76..00680486b1bd0 100644
--- a/drivers/clk/ti/dpll3xxx.c
+++ b/drivers/clk/ti/dpll3xxx.c
@@ -928,7 +928,7 @@ void omap3_core_dpll_restore_context(struct clk_hw *hw)
}
/**
- * omap3_non_core_dpll_save_context - Save the m and n values of the divider
+ * omap3_noncore_dpll_save_context - Save the m and n values of the divider
* @hw: pointer struct clk_hw
*
* Before the dpll registers are lost save the last rounded rate m and n
@@ -957,7 +957,7 @@ int omap3_noncore_dpll_save_context(struct clk_hw *hw)
}
/**
- * omap3_core_dpll_restore_context - restore the m and n values of the divider
+ * omap3_noncore_dpll_restore_context - restore the m and n values of the divider
* @hw: pointer struct clk_hw
*
* Restore the last rounded rate m and n
diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c
index 8e477d50d0fdb..a9febd6356b82 100644
--- a/drivers/clk/ti/gate.c
+++ b/drivers/clk/ti/gate.c
@@ -132,7 +132,6 @@ static void __init _of_ti_gate_clk_setup(struct device_node *node,
struct clk_omap_reg reg;
const char *name;
u8 enable_bit = 0;
- u32 val;
u32 flags = 0;
u8 clk_gate_flags = 0;
@@ -140,8 +139,7 @@ static void __init _of_ti_gate_clk_setup(struct device_node *node,
if (ti_clk_get_reg_addr(node, 0, &reg))
return;
- if (!of_property_read_u32(node, "ti,bit-shift", &val))
- enable_bit = val;
+ enable_bit = reg.bit;
}
if (of_clk_get_parent_count(node) != 1) {
@@ -170,7 +168,6 @@ _of_ti_composite_gate_clk_setup(struct device_node *node,
const struct clk_hw_omap_ops *hw_ops)
{
struct clk_hw_omap *gate;
- u32 val = 0;
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate)
@@ -179,9 +176,7 @@ _of_ti_composite_gate_clk_setup(struct device_node *node,
if (ti_clk_get_reg_addr(node, 0, &gate->enable_reg))
goto cleanup;
- of_property_read_u32(node, "ti,bit-shift", &val);
-
- gate->enable_bit = val;
+ gate->enable_bit = gate->enable_reg.bit;
gate->ops = hw_ops;
if (!ti_clk_add_component(node, &gate->hw, CLK_COMPONENT_TYPE_GATE))
diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c
index 172301c646f85..3eb35c87c0ed5 100644
--- a/drivers/clk/ti/interface.c
+++ b/drivers/clk/ti/interface.c
@@ -66,13 +66,11 @@ static void __init _of_ti_interface_clk_setup(struct device_node *node,
struct clk_omap_reg reg;
u8 enable_bit = 0;
const char *name;
- u32 val;
if (ti_clk_get_reg_addr(node, 0, &reg))
return;
- if (!of_property_read_u32(node, "ti,bit-shift", &val))
- enable_bit = val;
+ enable_bit = reg.bit;
parent_name = of_clk_get_parent_name(node, 0);
if (!parent_name) {
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index 1ebafa386be61..216d85d6aac6c 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -189,7 +189,7 @@ static void of_mux_clk_setup(struct device_node *node)
if (ti_clk_get_reg_addr(node, 0, &reg))
goto cleanup;
- of_property_read_u32(node, "ti,bit-shift", &shift);
+ shift = reg.bit;
of_property_read_u32(node, "ti,latch-bit", &latch);
@@ -252,7 +252,6 @@ static void __init of_ti_composite_mux_clk_setup(struct device_node *node)
{
struct clk_omap_mux *mux;
unsigned int num_parents;
- u32 val;
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
if (!mux)
@@ -261,8 +260,7 @@ static void __init of_ti_composite_mux_clk_setup(struct device_node *node)
if (ti_clk_get_reg_addr(node, 0, &mux->reg))
goto cleanup;
- if (!of_property_read_u32(node, "ti,bit-shift", &val))
- mux->shift = val;
+ mux->shift = mux->reg.bit;
if (of_property_read_bool(node, "ti,index-starts-at-one"))
mux->flags |= CLK_MUX_INDEX_ONE;
diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c
index 2974dd0ec6f4d..5ec9255e33faf 100644
--- a/drivers/clk/x86/clk-pmc-atom.c
+++ b/drivers/clk/x86/clk-pmc-atom.c
@@ -11,23 +11,12 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/platform_data/x86/clk-pmc-atom.h>
+#include <linux/platform_data/x86/pmc_atom.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#define PLT_CLK_NAME_BASE "pmc_plt_clk"
-#define PMC_CLK_CTL_OFFSET 0x60
-#define PMC_CLK_CTL_SIZE 4
-#define PMC_CLK_NUM 6
-#define PMC_CLK_CTL_GATED_ON_D3 0x0
-#define PMC_CLK_CTL_FORCE_ON 0x1
-#define PMC_CLK_CTL_FORCE_OFF 0x2
-#define PMC_CLK_CTL_RESERVED 0x3
-#define PMC_MASK_CLK_CTL GENMASK(1, 0)
-#define PMC_MASK_CLK_FREQ BIT(2)
-#define PMC_CLK_FREQ_XTAL (0 << 2) /* 25 MHz */
-#define PMC_CLK_FREQ_PLL (1 << 2) /* 19.2 MHz */
-
struct clk_plt_fixed {
struct clk_hw *clk;
struct clk_lookup *lookup;
diff --git a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
index 6a6e5d9292e87..19eb3fb7ae319 100644
--- a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
+++ b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
@@ -498,7 +498,7 @@ static int clk_wzrd_dynamic_all_nolock(struct clk_hw *hw, unsigned long rate,
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
unsigned long vco_freq, rate_div, clockout0_div;
- void __iomem *div_addr = divider->base;
+ void __iomem *div_addr;
u32 reg, pre, f;
int err;
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index 7bdeaff2bfd68..c28d3dacf0fb2 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -42,6 +42,7 @@ static void __iomem *zynq_clkc_base;
#define SLCR_SWDT_CLK_SEL (zynq_clkc_base + 0x204)
#define NUM_MIO_PINS 54
+#define CLK_NAME_LEN 16
#define DBG_CLK_CTRL_CLKACT_TRC BIT(0)
#define DBG_CLK_CTRL_CPU_1XCLKACT BIT(1)
@@ -215,7 +216,7 @@ static void __init zynq_clk_setup(struct device_node *np)
int i;
u32 tmp;
int ret;
- char *clk_name;
+ char clk_name[CLK_NAME_LEN];
unsigned int fclk_enable = 0;
const char *clk_output_name[clk_max];
const char *cpu_parents[4];
@@ -426,12 +427,10 @@ static void __init zynq_clk_setup(struct device_node *np)
"gem1_emio_mux", CLK_SET_RATE_PARENT,
SLCR_GEM1_CLK_CTRL, 0, 0, &gem1clk_lock);
- tmp = strlen("mio_clk_00x");
- clk_name = kmalloc(tmp, GFP_KERNEL);
for (i = 0; i < NUM_MIO_PINS; i++) {
int idx;
- snprintf(clk_name, tmp, "mio_clk_%2.2d", i);
+ snprintf(clk_name, CLK_NAME_LEN, "mio_clk_%2.2d", i);
idx = of_property_match_string(np, "clock-names", clk_name);
if (idx >= 0)
can_mio_mux_parents[i] = of_clk_get_parent_name(np,
@@ -439,7 +438,6 @@ static void __init zynq_clk_setup(struct device_node *np)
else
can_mio_mux_parents[i] = dummy_nm;
}
- kfree(clk_name);
clk_register_mux(NULL, "can_mux", periph_parents, 4,
CLK_SET_RATE_NO_REPARENT, SLCR_CAN_CLK_CTRL, 4, 2, 0,
&canclk_lock);
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index 44a61dc6f9320..ab1c8c2b66b88 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/bitfield.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
@@ -31,10 +32,7 @@
#define GT_CONTROL_COMP_ENABLE BIT(1) /* banked */
#define GT_CONTROL_IRQ_ENABLE BIT(2) /* banked */
#define GT_CONTROL_AUTO_INC BIT(3) /* banked */
-#define GT_CONTROL_PRESCALER_SHIFT 8
-#define GT_CONTROL_PRESCALER_MAX 0xF
-#define GT_CONTROL_PRESCALER_MASK (GT_CONTROL_PRESCALER_MAX << \
- GT_CONTROL_PRESCALER_SHIFT)
+#define GT_CONTROL_PRESCALER_MASK GENMASK(15, 8)
#define GT_INT_STATUS 0x0c
#define GT_INT_STATUS_EVENT_FLAG BIT(0)
@@ -52,7 +50,8 @@
*/
static void __iomem *gt_base;
static struct notifier_block gt_clk_rate_change_nb;
-static u32 gt_psv_new, gt_psv_bck, gt_target_rate;
+static u32 gt_psv_new, gt_psv_bck;
+static unsigned long gt_target_rate;
static int gt_ppi;
static struct clock_event_device __percpu *gt_evt;
@@ -88,7 +87,7 @@ static u64 gt_counter_read(void)
return _gt_counter_read();
}
-/**
+/*
* To ensure that updates to comparator value register do not set the
* Interrupt Status Register proceed as follows:
* 1. Clear the Comp Enable bit in the Timer Control Register.
@@ -247,7 +246,7 @@ static void gt_write_presc(u32 psv)
reg = readl(gt_base + GT_CONTROL);
reg &= ~GT_CONTROL_PRESCALER_MASK;
- reg |= psv << GT_CONTROL_PRESCALER_SHIFT;
+ reg |= FIELD_PREP(GT_CONTROL_PRESCALER_MASK, psv);
writel(reg, gt_base + GT_CONTROL);
}
@@ -256,8 +255,7 @@ static u32 gt_read_presc(void)
u32 reg;
reg = readl(gt_base + GT_CONTROL);
- reg &= GT_CONTROL_PRESCALER_MASK;
- return reg >> GT_CONTROL_PRESCALER_SHIFT;
+ return FIELD_GET(GT_CONTROL_PRESCALER_MASK, reg);
}
static void __init gt_delay_timer_init(void)
@@ -272,9 +270,9 @@ static int __init gt_clocksource_init(void)
writel(0, gt_base + GT_COUNTER0);
writel(0, gt_base + GT_COUNTER1);
/* set prescaler and enable timer on all the cores */
- writel(((CONFIG_ARM_GT_INITIAL_PRESCALER_VAL - 1) <<
- GT_CONTROL_PRESCALER_SHIFT)
- | GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL);
+ writel(FIELD_PREP(GT_CONTROL_PRESCALER_MASK,
+ CONFIG_ARM_GT_INITIAL_PRESCALER_VAL - 1) |
+ GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL);
#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
sched_clock_register(gt_sched_clock_read, 64, gt_target_rate);
@@ -290,18 +288,17 @@ static int gt_clk_rate_change_cb(struct notifier_block *nb,
switch (event) {
case PRE_RATE_CHANGE:
{
- int psv;
-
- psv = DIV_ROUND_CLOSEST(ndata->new_rate,
- gt_target_rate);
+ unsigned long psv;
- if (abs(gt_target_rate - (ndata->new_rate / psv)) > MAX_F_ERR)
+ psv = DIV_ROUND_CLOSEST(ndata->new_rate, gt_target_rate);
+ if (!psv ||
+ abs(gt_target_rate - (ndata->new_rate / psv)) > MAX_F_ERR)
return NOTIFY_BAD;
psv--;
/* prescaler within legal range? */
- if (psv < 0 || psv > GT_CONTROL_PRESCALER_MAX)
+ if (!FIELD_FIT(GT_CONTROL_PRESCALER_MASK, psv))
return NOTIFY_BAD;
/*
@@ -411,7 +408,7 @@ static int __init global_timer_of_register(struct device_node *np)
err = gt_clocksource_init();
if (err)
goto out_irq;
-
+
err = cpuhp_setup_state(CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
"clockevents/arm/global_timer:starting",
gt_starting_cpu, gt_dying_cpu);
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index 8ff7cd4e20bb1..b2a080647e413 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -81,14 +81,14 @@ static int hv_ce_set_next_event(unsigned long delta,
current_tick = hv_read_reference_counter();
current_tick += delta;
- hv_set_register(HV_REGISTER_STIMER0_COUNT, current_tick);
+ hv_set_msr(HV_MSR_STIMER0_COUNT, current_tick);
return 0;
}
static int hv_ce_shutdown(struct clock_event_device *evt)
{
- hv_set_register(HV_REGISTER_STIMER0_COUNT, 0);
- hv_set_register(HV_REGISTER_STIMER0_CONFIG, 0);
+ hv_set_msr(HV_MSR_STIMER0_COUNT, 0);
+ hv_set_msr(HV_MSR_STIMER0_CONFIG, 0);
if (direct_mode_enabled && stimer0_irq >= 0)
disable_percpu_irq(stimer0_irq);
@@ -119,7 +119,7 @@ static int hv_ce_set_oneshot(struct clock_event_device *evt)
timer_cfg.direct_mode = 0;
timer_cfg.sintx = stimer0_message_sint;
}
- hv_set_register(HV_REGISTER_STIMER0_CONFIG, timer_cfg.as_uint64);
+ hv_set_msr(HV_MSR_STIMER0_CONFIG, timer_cfg.as_uint64);
return 0;
}
@@ -372,11 +372,11 @@ static __always_inline u64 read_hv_clock_msr(void)
* is set to 0 when the partition is created and is incremented in 100
* nanosecond units.
*
- * Use hv_raw_get_register() because this function is used from
- * noinstr. Notable; while HV_REGISTER_TIME_REF_COUNT is a synthetic
+ * Use hv_raw_get_msr() because this function is used from
+ * noinstr. Notable; while HV_MSR_TIME_REF_COUNT is a synthetic
* register it doesn't need the GHCB path.
*/
- return hv_raw_get_register(HV_REGISTER_TIME_REF_COUNT);
+ return hv_raw_get_msr(HV_MSR_TIME_REF_COUNT);
}
/*
@@ -439,9 +439,9 @@ static void suspend_hv_clock_tsc(struct clocksource *arg)
union hv_reference_tsc_msr tsc_msr;
/* Disable the TSC page */
- tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC);
+ tsc_msr.as_uint64 = hv_get_msr(HV_MSR_REFERENCE_TSC);
tsc_msr.enable = 0;
- hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
+ hv_set_msr(HV_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
}
@@ -450,10 +450,10 @@ static void resume_hv_clock_tsc(struct clocksource *arg)
union hv_reference_tsc_msr tsc_msr;
/* Re-enable the TSC page */
- tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC);
+ tsc_msr.as_uint64 = hv_get_msr(HV_MSR_REFERENCE_TSC);
tsc_msr.enable = 1;
tsc_msr.pfn = tsc_pfn;
- hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
+ hv_set_msr(HV_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
}
#ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK
@@ -555,14 +555,14 @@ static void __init hv_init_tsc_clocksource(void)
* thus TSC clocksource will work even without the real TSC page
* mapped.
*/
- tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC);
+ tsc_msr.as_uint64 = hv_get_msr(HV_MSR_REFERENCE_TSC);
if (hv_root_partition)
tsc_pfn = tsc_msr.pfn;
else
tsc_pfn = HVPFN_DOWN(virt_to_phys(tsc_page));
tsc_msr.enable = 1;
tsc_msr.pfn = tsc_pfn;
- hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
+ hv_set_msr(HV_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
diff --git a/drivers/clocksource/timer-clint.c b/drivers/clocksource/timer-clint.c
index 9a55e733ae995..09fd292eb83df 100644
--- a/drivers/clocksource/timer-clint.c
+++ b/drivers/clocksource/timer-clint.c
@@ -131,7 +131,7 @@ static int clint_timer_starting_cpu(unsigned int cpu)
struct clock_event_device *ce = per_cpu_ptr(&clint_clock_event, cpu);
ce->cpumask = cpumask_of(cpu);
- clockevents_config_and_register(ce, clint_timer_freq, 100, 0x7fffffff);
+ clockevents_config_and_register(ce, clint_timer_freq, 100, ULONG_MAX);
enable_percpu_irq(clint_timer_irq,
irq_get_trigger_type(clint_timer_irq));
diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
index 6a878d227a13b..489e69169ed4e 100644
--- a/drivers/clocksource/timer-imx-gpt.c
+++ b/drivers/clocksource/timer-imx-gpt.c
@@ -258,9 +258,8 @@ static irqreturn_t mxc_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *ced = dev_id;
struct imx_timer *imxtm = to_imx_timer(ced);
- uint32_t tstat;
- tstat = readl_relaxed(imxtm->base + imxtm->gpt->reg_tstat);
+ readl_relaxed(imxtm->base + imxtm->gpt->reg_tstat);
imxtm->gpt->gpt_irq_acknowledge(imxtm);
diff --git a/drivers/clocksource/timer-imx-sysctr.c b/drivers/clocksource/timer-imx-sysctr.c
index 5a7a951c4efcd..44525813be1e2 100644
--- a/drivers/clocksource/timer-imx-sysctr.c
+++ b/drivers/clocksource/timer-imx-sysctr.c
@@ -4,48 +4,62 @@
#include <linux/interrupt.h>
#include <linux/clockchips.h>
+#include <linux/slab.h>
#include "timer-of.h"
#define CMP_OFFSET 0x10000
+#define RD_OFFSET 0x20000
#define CNTCV_LO 0x8
#define CNTCV_HI 0xc
#define CMPCV_LO (CMP_OFFSET + 0x20)
#define CMPCV_HI (CMP_OFFSET + 0x24)
#define CMPCR (CMP_OFFSET + 0x2c)
+#define CNTCV_LO_IMX95 (RD_OFFSET + 0x8)
+#define CNTCV_HI_IMX95 (RD_OFFSET + 0xc)
#define SYS_CTR_EN 0x1
#define SYS_CTR_IRQ_MASK 0x2
#define SYS_CTR_CLK_DIV 0x3
-static void __iomem *sys_ctr_base __ro_after_init;
-static u32 cmpcr __ro_after_init;
+struct sysctr_private {
+ u32 cmpcr;
+ u32 lo_off;
+ u32 hi_off;
+};
-static void sysctr_timer_enable(bool enable)
+static void sysctr_timer_enable(struct clock_event_device *evt, bool enable)
{
- writel(enable ? cmpcr | SYS_CTR_EN : cmpcr, sys_ctr_base + CMPCR);
+ struct timer_of *to = to_timer_of(evt);
+ struct sysctr_private *priv = to->private_data;
+ void __iomem *base = timer_of_base(to);
+
+ writel(enable ? priv->cmpcr | SYS_CTR_EN : priv->cmpcr, base + CMPCR);
}
-static void sysctr_irq_acknowledge(void)
+static void sysctr_irq_acknowledge(struct clock_event_device *evt)
{
/*
* clear the enable bit(EN =0) will clear
* the status bit(ISTAT = 0), then the interrupt
* signal will be negated(acknowledged).
*/
- sysctr_timer_enable(false);
+ sysctr_timer_enable(evt, false);
}
-static inline u64 sysctr_read_counter(void)
+static inline u64 sysctr_read_counter(struct clock_event_device *evt)
{
+ struct timer_of *to = to_timer_of(evt);
+ struct sysctr_private *priv = to->private_data;
+ void __iomem *base = timer_of_base(to);
u32 cnt_hi, tmp_hi, cnt_lo;
do {
- cnt_hi = readl_relaxed(sys_ctr_base + CNTCV_HI);
- cnt_lo = readl_relaxed(sys_ctr_base + CNTCV_LO);
- tmp_hi = readl_relaxed(sys_ctr_base + CNTCV_HI);
+ cnt_hi = readl_relaxed(base + priv->hi_off);
+ cnt_lo = readl_relaxed(base + priv->lo_off);
+ tmp_hi = readl_relaxed(base + priv->hi_off);
} while (tmp_hi != cnt_hi);
return ((u64) cnt_hi << 32) | cnt_lo;
@@ -54,22 +68,24 @@ static inline u64 sysctr_read_counter(void)
static int sysctr_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
+ struct timer_of *to = to_timer_of(evt);
+ void __iomem *base = timer_of_base(to);
u32 cmp_hi, cmp_lo;
u64 next;
- sysctr_timer_enable(false);
+ sysctr_timer_enable(evt, false);
- next = sysctr_read_counter();
+ next = sysctr_read_counter(evt);
next += delta;
cmp_hi = (next >> 32) & 0x00fffff;
cmp_lo = next & 0xffffffff;
- writel_relaxed(cmp_hi, sys_ctr_base + CMPCV_HI);
- writel_relaxed(cmp_lo, sys_ctr_base + CMPCV_LO);
+ writel_relaxed(cmp_hi, base + CMPCV_HI);
+ writel_relaxed(cmp_lo, base + CMPCV_LO);
- sysctr_timer_enable(true);
+ sysctr_timer_enable(evt, true);
return 0;
}
@@ -81,7 +97,7 @@ static int sysctr_set_state_oneshot(struct clock_event_device *evt)
static int sysctr_set_state_shutdown(struct clock_event_device *evt)
{
- sysctr_timer_enable(false);
+ sysctr_timer_enable(evt, false);
return 0;
}
@@ -90,7 +106,7 @@ static irqreturn_t sysctr_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
- sysctr_irq_acknowledge();
+ sysctr_irq_acknowledge(evt);
evt->event_handler(evt);
@@ -117,34 +133,75 @@ static struct timer_of to_sysctr = {
},
};
-static void __init sysctr_clockevent_init(void)
+static int __init __sysctr_timer_init(struct device_node *np)
{
+ struct sysctr_private *priv;
+ void __iomem *base;
+ int ret;
+
+ priv = kzalloc(sizeof(struct sysctr_private), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = timer_of_init(np, &to_sysctr);
+ if (ret) {
+ kfree(priv);
+ return ret;
+ }
+
+ if (!of_property_read_bool(np, "nxp,no-divider")) {
+ /* system counter clock is divided by 3 internally */
+ to_sysctr.of_clk.rate /= SYS_CTR_CLK_DIV;
+ }
+
to_sysctr.clkevt.cpumask = cpu_possible_mask;
+ to_sysctr.private_data = priv;
+
+ base = timer_of_base(&to_sysctr);
+ priv->cmpcr = readl(base + CMPCR) & ~SYS_CTR_EN;
+
+ return 0;
+}
+
+static int __init sysctr_timer_init(struct device_node *np)
+{
+ struct sysctr_private *priv;
+ int ret;
+
+ ret = __sysctr_timer_init(np);
+ if (ret)
+ return ret;
+
+ priv = to_sysctr.private_data;
+ priv->lo_off = CNTCV_LO;
+ priv->hi_off = CNTCV_HI;
clockevents_config_and_register(&to_sysctr.clkevt,
timer_of_rate(&to_sysctr),
0xff, 0x7fffffff);
+
+ return 0;
}
-static int __init sysctr_timer_init(struct device_node *np)
+static int __init sysctr_timer_imx95_init(struct device_node *np)
{
- int ret = 0;
+ struct sysctr_private *priv;
+ int ret;
- ret = timer_of_init(np, &to_sysctr);
+ ret = __sysctr_timer_init(np);
if (ret)
return ret;
- if (!of_property_read_bool(np, "nxp,no-divider")) {
- /* system counter clock is divided by 3 internally */
- to_sysctr.of_clk.rate /= SYS_CTR_CLK_DIV;
- }
-
- sys_ctr_base = timer_of_base(&to_sysctr);
- cmpcr = readl(sys_ctr_base + CMPCR);
- cmpcr &= ~SYS_CTR_EN;
+ priv = to_sysctr.private_data;
+ priv->lo_off = CNTCV_LO_IMX95;
+ priv->hi_off = CNTCV_HI_IMX95;
- sysctr_clockevent_init();
+ clockevents_config_and_register(&to_sysctr.clkevt,
+ timer_of_rate(&to_sysctr),
+ 0xff, 0x7fffffff);
return 0;
}
+
TIMER_OF_DECLARE(sysctr_timer, "nxp,sysctr-timer", sysctr_timer_init);
+TIMER_OF_DECLARE(sysctr_timer_imx95, "nxp,imx95-sysctr-timer", sysctr_timer_imx95_init);
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index e66dcbd665665..48ce50c5f5e68 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -108,13 +108,16 @@ static int riscv_timer_starting_cpu(unsigned int cpu)
{
struct clock_event_device *ce = per_cpu_ptr(&riscv_clock_event, cpu);
+ /* Clear timer interrupt */
+ riscv_clock_event_stop();
+
ce->cpumask = cpumask_of(cpu);
ce->irq = riscv_clock_event_irq;
if (riscv_timer_cannot_wake_cpu)
ce->features |= CLOCK_EVT_FEAT_C3STOP;
if (static_branch_likely(&riscv_sstc_available))
ce->rating = 450;
- clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff);
+ clockevents_config_and_register(ce, riscv_timebase, 100, ULONG_MAX);
enable_percpu_irq(riscv_clock_event_irq,
irq_get_trigger_type(riscv_clock_event_irq));
diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c
index c9a753f96ba12..0a4ea3288bfbe 100644
--- a/drivers/clocksource/timer-stm32.c
+++ b/drivers/clocksource/timer-stm32.c
@@ -73,7 +73,7 @@ static void stm32_timer_of_bits_set(struct timer_of *to, int bits)
* Accessor helper to get the number of bits in the timer-of private
* structure.
*
- * Returns an integer corresponding to the number of bits.
+ * Returns: an integer corresponding to the number of bits.
*/
static int stm32_timer_of_bits_get(struct timer_of *to)
{
@@ -177,7 +177,7 @@ static irqreturn_t stm32_clock_event_handler(int irq, void *dev_id)
}
/**
- * stm32_timer_width - Sort out the timer width (32/16)
+ * stm32_timer_set_width - Sort out the timer width (32/16)
* @to: a pointer to a timer-of structure
*
* Write the 32-bit max value and read/return the result. If the timer
diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c
index 59b0be482f32c..a86529a707370 100644
--- a/drivers/clocksource/timer-ti-32k.c
+++ b/drivers/clocksource/timer-ti-32k.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* timer-ti-32k.c - OMAP2 32k Timer Support
*
* Copyright (C) 2009 Nokia Corporation
diff --git a/drivers/comedi/drivers/das08.c b/drivers/comedi/drivers/das08.c
index 5d5b9174f88a9..49944ce1f8135 100644
--- a/drivers/comedi/drivers/das08.c
+++ b/drivers/comedi/drivers/das08.c
@@ -177,7 +177,6 @@ static int das08_ai_insn_read(struct comedi_device *dev,
int ret;
chan = CR_CHAN(insn->chanspec);
- range = CR_RANGE(insn->chanspec);
/* clear crap */
inb(dev->iobase + DAS08_AI_LSB_REG);
diff --git a/drivers/comedi/drivers/vmk80xx.c b/drivers/comedi/drivers/vmk80xx.c
index 4536ed43f65b2..84dce5184a77a 100644
--- a/drivers/comedi/drivers/vmk80xx.c
+++ b/drivers/comedi/drivers/vmk80xx.c
@@ -641,33 +641,22 @@ static int vmk80xx_find_usb_endpoints(struct comedi_device *dev)
struct vmk80xx_private *devpriv = dev->private;
struct usb_interface *intf = comedi_to_usb_interface(dev);
struct usb_host_interface *iface_desc = intf->cur_altsetting;
- struct usb_endpoint_descriptor *ep_desc;
- int i;
-
- if (iface_desc->desc.bNumEndpoints != 2)
- return -ENODEV;
-
- for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
- ep_desc = &iface_desc->endpoint[i].desc;
-
- if (usb_endpoint_is_int_in(ep_desc) ||
- usb_endpoint_is_bulk_in(ep_desc)) {
- if (!devpriv->ep_rx)
- devpriv->ep_rx = ep_desc;
- continue;
- }
+ struct usb_endpoint_descriptor *ep_rx_desc, *ep_tx_desc;
+ int ret;
- if (usb_endpoint_is_int_out(ep_desc) ||
- usb_endpoint_is_bulk_out(ep_desc)) {
- if (!devpriv->ep_tx)
- devpriv->ep_tx = ep_desc;
- continue;
- }
- }
+ if (devpriv->model == VMK8061_MODEL)
+ ret = usb_find_common_endpoints(iface_desc, &ep_rx_desc,
+ &ep_tx_desc, NULL, NULL);
+ else
+ ret = usb_find_common_endpoints(iface_desc, NULL, NULL,
+ &ep_rx_desc, &ep_tx_desc);
- if (!devpriv->ep_rx || !devpriv->ep_tx)
+ if (ret)
return -ENODEV;
+ devpriv->ep_rx = ep_rx_desc;
+ devpriv->ep_tx = ep_tx_desc;
+
if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx))
return -EINVAL;
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 35efb53d5492a..94e55c40970a6 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -302,4 +302,33 @@ config QORIQ_CPUFREQ
which are capable of changing the CPU's frequency dynamically.
endif
+
+config ACPI_CPPC_CPUFREQ
+ tristate "CPUFreq driver based on the ACPI CPPC spec"
+ depends on ACPI_PROCESSOR
+ depends on ARM || ARM64 || RISCV
+ select ACPI_CPPC_LIB
+ help
+ This adds a CPUFreq driver which uses CPPC methods
+ as described in the ACPIv5.1 spec. CPPC stands for
+ Collaborative Processor Performance Controls. It
+ is based on an abstract continuous scale of CPU
+ performance values which allows the remote power
+ processor to flexibly optimize for power and
+ performance. CPPC relies on power management firmware
+ support for its operation.
+
+ If in doubt, say N.
+
+config ACPI_CPPC_CPUFREQ_FIE
+ bool "Frequency Invariance support for CPPC cpufreq driver"
+ depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY
+ depends on ARM || ARM64 || RISCV
+ default y
+ help
+ This extends frequency invariance support in the CPPC cpufreq driver,
+ by using CPPC delivered and reference performance counters.
+
+ If in doubt, say N.
+
endmenu
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index a0ebad77666e3..96b404ce829f3 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -3,32 +3,6 @@
# ARM CPU Frequency scaling drivers
#
-config ACPI_CPPC_CPUFREQ
- tristate "CPUFreq driver based on the ACPI CPPC spec"
- depends on ACPI_PROCESSOR
- select ACPI_CPPC_LIB
- help
- This adds a CPUFreq driver which uses CPPC methods
- as described in the ACPIv5.1 spec. CPPC stands for
- Collaborative Processor Performance Controls. It
- is based on an abstract continuous scale of CPU
- performance values which allows the remote power
- processor to flexibly optimize for power and
- performance. CPPC relies on power management firmware
- support for its operation.
-
- If in doubt, say N.
-
-config ACPI_CPPC_CPUFREQ_FIE
- bool "Frequency Invariance support for CPPC cpufreq driver"
- depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY
- default y
- help
- This extends frequency invariance support in the CPPC cpufreq driver,
- by using CPPC delivered and reference performance counters.
-
- If in doubt, say N.
-
config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
tristate "Allwinner nvmem based SUN50I CPUFreq driver"
depends on ARCH_SUNXI
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 8bd6e5e8f121c..2d83bbc65dd0b 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -208,7 +208,7 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
if (!priv)
return -ENOMEM;
- if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&priv->cpus, GFP_KERNEL))
return -ENOMEM;
cpumask_set_cpu(cpu, priv->cpus);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index f6f8d7f450e7f..66e10a19d76ab 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -653,14 +653,16 @@ static ssize_t store_local_boost(struct cpufreq_policy *policy,
if (policy->boost_enabled == enable)
return count;
+ policy->boost_enabled = enable;
+
cpus_read_lock();
ret = cpufreq_driver->set_boost(policy, enable);
cpus_read_unlock();
- if (ret)
+ if (ret) {
+ policy->boost_enabled = !policy->boost_enabled;
return ret;
-
- policy->boost_enabled = enable;
+ }
return count;
}
@@ -1428,6 +1430,9 @@ static int cpufreq_online(unsigned int cpu)
goto out_free_policy;
}
+ /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
+ policy->boost_enabled = cpufreq_boost_enabled() && policy_has_boost_freq(policy);
+
/*
* The initialization has succeeded and the policy is online.
* If there is a problem with its frequency table, take it
@@ -2769,11 +2774,12 @@ int cpufreq_boost_trigger_state(int state)
cpus_read_lock();
for_each_active_policy(policy) {
+ policy->boost_enabled = state;
ret = cpufreq_driver->set_boost(policy, state);
- if (ret)
+ if (ret) {
+ policy->boost_enabled = !policy->boost_enabled;
goto err_reset_state;
-
- policy->boost_enabled = state;
+ }
}
cpus_read_unlock();
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index c4d4643b6ca65..c17dc51a5a022 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -40,7 +40,7 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
cpufreq_for_each_valid_entry(pos, table) {
freq = pos->frequency;
- if (!cpufreq_boost_enabled()
+ if ((!cpufreq_boost_enabled() || !policy->boost_enabled)
&& (pos->flags & CPUFREQ_BOOST_FREQ))
continue;
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 0b483bd0d3ca6..3b4f6bfb2f4cf 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -30,6 +30,7 @@ struct scmi_data {
static struct scmi_protocol_handle *ph;
static const struct scmi_perf_proto_ops *perf_ops;
+static struct cpufreq_driver scmi_cpufreq_driver;
static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
{
@@ -167,6 +168,12 @@ scmi_get_rate_limit(u32 domain, bool has_fast_switch)
return rate_limit;
}
+static struct freq_attr *scmi_cpufreq_hw_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+ NULL,
+};
+
static int scmi_cpufreq_init(struct cpufreq_policy *policy)
{
int ret, nr_opp, domain;
@@ -276,6 +283,17 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
policy->transition_delay_us =
scmi_get_rate_limit(domain, policy->fast_switch_possible);
+ if (policy_has_boost_freq(policy)) {
+ ret = cpufreq_enable_boost_support();
+ if (ret) {
+ dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
+ goto out_free_opp;
+ } else {
+ scmi_cpufreq_hw_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
+ scmi_cpufreq_driver.boost_enabled = true;
+ }
+ }
+
return 0;
out_free_opp:
@@ -334,7 +352,7 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
- .attr = cpufreq_generic_attr,
+ .attr = scmi_cpufreq_hw_attr,
.target_index = scmi_cpufreq_set_target,
.fast_switch = scmi_cpufreq_fast_switch,
.get = scmi_cpufreq_get_rate,
diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
index e8094fc92491e..a6e123dfe394d 100644
--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
+++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
@@ -73,26 +73,6 @@ static inline bool sbi_is_domain_state_available(void)
return data->available;
}
-static int sbi_suspend_finisher(unsigned long suspend_type,
- unsigned long resume_addr,
- unsigned long opaque)
-{
- struct sbiret ret;
-
- ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND,
- suspend_type, resume_addr, opaque, 0, 0, 0);
-
- return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0;
-}
-
-static int sbi_suspend(u32 state)
-{
- if (state & SBI_HSM_SUSP_NON_RET_BIT)
- return cpu_suspend(state, sbi_suspend_finisher);
- else
- return sbi_suspend_finisher(state, 0, 0);
-}
-
static __cpuidle int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int idx)
{
@@ -100,9 +80,9 @@ static __cpuidle int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
u32 state = states[idx];
if (state & SBI_HSM_SUSP_NON_RET_BIT)
- return CPU_PM_CPU_IDLE_ENTER_PARAM(sbi_suspend, idx, state);
+ return CPU_PM_CPU_IDLE_ENTER_PARAM(riscv_sbi_hart_suspend, idx, state);
else
- return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(sbi_suspend,
+ return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(riscv_sbi_hart_suspend,
idx, state);
}
@@ -133,7 +113,7 @@ static __cpuidle int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
else
state = states[idx];
- ret = sbi_suspend(state) ? -1 : idx;
+ ret = riscv_sbi_hart_suspend(state) ? -1 : idx;
ct_cpuidle_exit();
@@ -206,17 +186,6 @@ static const struct of_device_id sbi_cpuidle_state_match[] = {
{ },
};
-static bool sbi_suspend_state_is_valid(u32 state)
-{
- if (state > SBI_HSM_SUSPEND_RET_DEFAULT &&
- state < SBI_HSM_SUSPEND_RET_PLATFORM)
- return false;
- if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT &&
- state < SBI_HSM_SUSPEND_NON_RET_PLATFORM)
- return false;
- return true;
-}
-
static int sbi_dt_parse_state_node(struct device_node *np, u32 *state)
{
int err = of_property_read_u32(np, "riscv,sbi-suspend-param", state);
@@ -226,7 +195,7 @@ static int sbi_dt_parse_state_node(struct device_node *np, u32 *state)
return err;
}
- if (!sbi_suspend_state_is_valid(*state)) {
+ if (!riscv_sbi_suspend_state_is_valid(*state)) {
pr_warn("Invalid SBI suspend state %#x\n", *state);
return -EINVAL;
}
@@ -607,16 +576,8 @@ static int __init sbi_cpuidle_init(void)
int ret;
struct platform_device *pdev;
- /*
- * The SBI HSM suspend function is only available when:
- * 1) SBI version is 0.3 or higher
- * 2) SBI HSM extension is available
- */
- if ((sbi_spec_version < sbi_mk_version(0, 3)) ||
- !sbi_probe_extension(SBI_EXT_HSM)) {
- pr_info("HSM suspend not available\n");
+ if (!riscv_sbi_hsm_is_supported())
return 0;
- }
ret = platform_driver_register(&sbi_cpuidle_driver);
if (ret)
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 737a026ef58a3..02e40fd7d948c 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -237,7 +237,7 @@ noinstr int cpuidle_enter_state(struct cpuidle_device *dev,
}
if (target_state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
- leave_mm(dev->cpu);
+ leave_mm();
/* Take note of the planned idle state. */
sched_idle_set_state(target_state);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 0991f026cb070..3d02702456a50 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -611,13 +611,13 @@ config CRYPTO_DEV_QCOM_RNG
To compile this driver as a module, choose M here. The
module will be called qcom-rng. If unsure, say N.
-config CRYPTO_DEV_VMX
- bool "Support for VMX cryptographic acceleration instructions"
- depends on PPC64 && VSX
- help
- Support for VMX cryptographic acceleration instructions.
-
-source "drivers/crypto/vmx/Kconfig"
+#config CRYPTO_DEV_VMX
+# bool "Support for VMX cryptographic acceleration instructions"
+# depends on PPC64 && VSX
+# help
+# Support for VMX cryptographic acceleration instructions.
+#
+#source "drivers/crypto/vmx/Kconfig"
config CRYPTO_DEV_IMGTEC_HASH
tristate "Imagination Technologies hardware hash accelerator"
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index d859d6a5f3a45..95331bc6456b7 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -42,7 +42,7 @@ obj-$(CONFIG_CRYPTO_DEV_SL3516) += gemini/
obj-y += stm32/
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
-obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
+#obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
index d358334e59811..ee2a28c906ede 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
@@ -362,7 +362,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
digestsize = SHA512_DIGEST_SIZE;
/* the padding could be up to two block. */
- buf = kzalloc(bs * 2, GFP_KERNEL | GFP_DMA);
+ buf = kcalloc(2, bs, GFP_KERNEL | GFP_DMA);
if (!buf) {
err = -ENOMEM;
goto theend;
diff --git a/drivers/crypto/ccp/platform-access.c b/drivers/crypto/ccp/platform-access.c
index 94367bc49e35b..1b8ed33897332 100644
--- a/drivers/crypto/ccp/platform-access.c
+++ b/drivers/crypto/ccp/platform-access.c
@@ -118,9 +118,16 @@ int psp_send_platform_access_msg(enum psp_platform_access_msg msg,
goto unlock;
}
- /* Store the status in request header for caller to investigate */
+ /*
+ * Read status from PSP. If status is non-zero, it indicates an error
+ * occurred during "processing" of the command.
+ * If status is zero, it indicates the command was "processed"
+ * successfully, but the result of the command is in the payload.
+ * Return both cases to the caller as -EIO to investigate.
+ */
cmd_reg = ioread32(cmd);
- req->header.status = FIELD_GET(PSP_CMDRESP_STS, cmd_reg);
+ if (FIELD_GET(PSP_CMDRESP_STS, cmd_reg))
+ req->header.status = FIELD_GET(PSP_CMDRESP_STS, cmd_reg);
if (req->header.status) {
ret = -EIO;
goto unlock;
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index 124a2e0c89993..56bf832c29477 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -156,11 +156,14 @@ static unsigned int psp_get_capability(struct psp_device *psp)
}
psp->capability = val;
- /* Detect if TSME and SME are both enabled */
+ /* Detect TSME and/or SME status */
if (PSP_CAPABILITY(psp, PSP_SECURITY_REPORTING) &&
- psp->capability & (PSP_SECURITY_TSME_STATUS << PSP_CAPABILITY_PSP_SECURITY_OFFSET) &&
- cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
- dev_notice(psp->dev, "psp: Both TSME and SME are active, SME is unnecessary when TSME is active.\n");
+ psp->capability & (PSP_SECURITY_TSME_STATUS << PSP_CAPABILITY_PSP_SECURITY_OFFSET)) {
+ if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
+ dev_notice(psp->dev, "psp: Both TSME and SME are active, SME is unnecessary when TSME is active.\n");
+ else
+ dev_notice(psp->dev, "psp: TSME enabled\n");
+ }
return 0;
}
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index f44efbb89c346..2102377f727b1 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -1090,7 +1090,7 @@ static int __sev_snp_init_locked(int *error)
void *arg = &data;
int cmd, rc = 0;
- if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+ if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return -ENODEV;
sev = psp->sev_data;
diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c
index 80ed4b2d209ca..cd67fa348ca72 100644
--- a/drivers/crypto/hisilicon/debugfs.c
+++ b/drivers/crypto/hisilicon/debugfs.c
@@ -24,6 +24,8 @@
#define QM_DFX_QN_SHIFT 16
#define QM_DFX_CNT_CLR_CE 0x100118
#define QM_DBG_WRITE_LEN 1024
+#define QM_IN_IDLE_ST_REG 0x1040e4
+#define QM_IN_IDLE_STATE 0x1
static const char * const qm_debug_file_name[] = {
[CURRENT_QM] = "current_qm",
@@ -81,6 +83,30 @@ static const struct debugfs_reg32 qm_dfx_regs[] = {
{"QM_DFX_FF_ST5 ", 0x1040dc},
{"QM_DFX_FF_ST6 ", 0x1040e0},
{"QM_IN_IDLE_ST ", 0x1040e4},
+ {"QM_CACHE_CTL ", 0x100050},
+ {"QM_TIMEOUT_CFG ", 0x100070},
+ {"QM_DB_TIMEOUT_CFG ", 0x100074},
+ {"QM_FLR_PENDING_TIME_CFG ", 0x100078},
+ {"QM_ARUSR_MCFG1 ", 0x100088},
+ {"QM_AWUSR_MCFG1 ", 0x100098},
+ {"QM_AXI_M_CFG_ENABLE ", 0x1000B0},
+ {"QM_RAS_CE_THRESHOLD ", 0x1000F8},
+ {"QM_AXI_TIMEOUT_CTRL ", 0x100120},
+ {"QM_AXI_TIMEOUT_STATUS ", 0x100124},
+ {"QM_CQE_AGGR_TIMEOUT_CTRL ", 0x100144},
+ {"ACC_RAS_MSI_INT_SEL ", 0x1040fc},
+ {"QM_CQE_OUT ", 0x104100},
+ {"QM_EQE_OUT ", 0x104104},
+ {"QM_AEQE_OUT ", 0x104108},
+ {"QM_DB_INFO0 ", 0x104180},
+ {"QM_DB_INFO1 ", 0x104184},
+ {"QM_AM_CTRL_GLOBAL ", 0x300000},
+ {"QM_AM_CURR_PORT_STS ", 0x300100},
+ {"QM_AM_CURR_TRANS_RETURN ", 0x300150},
+ {"QM_AM_CURR_RD_MAX_TXID ", 0x300154},
+ {"QM_AM_CURR_WR_MAX_TXID ", 0x300158},
+ {"QM_AM_ALARM_RRESP ", 0x300180},
+ {"QM_AM_ALARM_BRESP ", 0x300184},
};
static const struct debugfs_reg32 qm_vf_dfx_regs[] = {
@@ -1001,6 +1027,30 @@ static int qm_diff_regs_show(struct seq_file *s, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(qm_diff_regs);
+static int qm_state_show(struct seq_file *s, void *unused)
+{
+ struct hisi_qm *qm = s->private;
+ u32 val;
+ int ret;
+
+ /* If device is in suspended, directly return the idle state. */
+ ret = hisi_qm_get_dfx_access(qm);
+ if (!ret) {
+ val = readl(qm->io_base + QM_IN_IDLE_ST_REG);
+ hisi_qm_put_dfx_access(qm);
+ } else if (ret == -EAGAIN) {
+ val = QM_IN_IDLE_STATE;
+ } else {
+ return ret;
+ }
+
+ seq_printf(s, "%u\n", val);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(qm_state);
+
static ssize_t qm_status_read(struct file *filp, char __user *buffer,
size_t count, loff_t *pos)
{
@@ -1062,6 +1112,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
void hisi_qm_debug_init(struct hisi_qm *qm)
{
struct dfx_diff_registers *qm_regs = qm->debug.qm_diff_regs;
+ struct qm_dev_dfx *dev_dfx = &qm->debug.dev_dfx;
struct qm_dfx *dfx = &qm->debug.dfx;
struct dentry *qm_d;
void *data;
@@ -1072,6 +1123,9 @@ void hisi_qm_debug_init(struct hisi_qm *qm)
/* only show this in PF */
if (qm->fun_type == QM_HW_PF) {
+ debugfs_create_file("qm_state", 0444, qm->debug.qm_d,
+ qm, &qm_state_fops);
+
qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM);
for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++)
qm_create_debugfs_file(qm, qm->debug.qm_d, i);
@@ -1087,6 +1141,10 @@ void hisi_qm_debug_init(struct hisi_qm *qm)
debugfs_create_file("status", 0444, qm->debug.qm_d, qm,
&qm_status_fops);
+
+ debugfs_create_u32("dev_state", 0444, qm->debug.qm_d, &dev_dfx->dev_state);
+ debugfs_create_u32("dev_timeout", 0644, qm->debug.qm_d, &dev_dfx->dev_timeout);
+
for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) {
data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset);
debugfs_create_file(qm_dfx_files[i].name,
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 3255b2a070c78..d93aa6630a578 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -440,7 +440,7 @@ MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
struct hisi_qp *hpre_create_qp(u8 type)
{
- int node = cpu_to_node(smp_processor_id());
+ int node = cpu_to_node(raw_smp_processor_id());
struct hisi_qp *qp = NULL;
int ret;
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 4b20b94e6371a..92f0a1d9b4a6b 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -236,6 +236,12 @@
#define QM_DEV_ALG_MAX_LEN 256
+ /* abnormal status value for stopping queue */
+#define QM_STOP_QUEUE_FAIL 1
+#define QM_DUMP_SQC_FAIL 3
+#define QM_DUMP_CQC_FAIL 4
+#define QM_FINISH_WAIT 5
+
#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
(((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \
@@ -312,6 +318,7 @@ static const struct hisi_qm_cap_info qm_cap_info_comm[] = {
{QM_SUPPORT_DB_ISOLATION, 0x30, 0, BIT(0), 0x0, 0x0, 0x0},
{QM_SUPPORT_FUNC_QOS, 0x3100, 0, BIT(8), 0x0, 0x0, 0x1},
{QM_SUPPORT_STOP_QP, 0x3100, 0, BIT(9), 0x0, 0x0, 0x1},
+ {QM_SUPPORT_STOP_FUNC, 0x3100, 0, BIT(10), 0x0, 0x0, 0x1},
{QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1},
{QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1},
};
@@ -1674,6 +1681,11 @@ unlock:
return ret;
}
+static int qm_drain_qm(struct hisi_qm *qm)
+{
+ return hisi_qm_mb(qm, QM_MB_CMD_FLUSH_QM, 0, 0, 0);
+}
+
static int qm_stop_qp(struct hisi_qp *qp)
{
return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0);
@@ -2031,43 +2043,25 @@ static void qp_stop_fail_cb(struct hisi_qp *qp)
}
}
-/**
- * qm_drain_qp() - Drain a qp.
- * @qp: The qp we want to drain.
- *
- * Determine whether the queue is cleared by judging the tail pointers of
- * sq and cq.
- */
-static int qm_drain_qp(struct hisi_qp *qp)
+static int qm_wait_qp_empty(struct hisi_qm *qm, u32 *state, u32 qp_id)
{
- struct hisi_qm *qm = qp->qm;
struct device *dev = &qm->pdev->dev;
struct qm_sqc sqc;
struct qm_cqc cqc;
int ret, i = 0;
- /* No need to judge if master OOO is blocked. */
- if (qm_check_dev_error(qm))
- return 0;
-
- /* Kunpeng930 supports drain qp by device */
- if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) {
- ret = qm_stop_qp(qp);
- if (ret)
- dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id);
- return ret;
- }
-
while (++i) {
- ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp->qp_id, 1);
+ ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 1);
if (ret) {
dev_err_ratelimited(dev, "Failed to dump sqc!\n");
+ *state = QM_DUMP_SQC_FAIL;
return ret;
}
- ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp->qp_id, 1);
+ ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 1);
if (ret) {
dev_err_ratelimited(dev, "Failed to dump cqc!\n");
+ *state = QM_DUMP_CQC_FAIL;
return ret;
}
@@ -2076,8 +2070,9 @@ static int qm_drain_qp(struct hisi_qp *qp)
break;
if (i == MAX_WAIT_COUNTS) {
- dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id);
- return -EBUSY;
+ dev_err(dev, "Fail to empty queue %u!\n", qp_id);
+ *state = QM_STOP_QUEUE_FAIL;
+ return -ETIMEDOUT;
}
usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX);
@@ -2086,9 +2081,53 @@ static int qm_drain_qp(struct hisi_qp *qp)
return 0;
}
-static int qm_stop_qp_nolock(struct hisi_qp *qp)
+/**
+ * qm_drain_qp() - Drain a qp.
+ * @qp: The qp we want to drain.
+ *
+ * If the device does not support stopping queue by sending mailbox,
+ * determine whether the queue is cleared by judging the tail pointers of
+ * sq and cq.
+ */
+static int qm_drain_qp(struct hisi_qp *qp)
+{
+ struct hisi_qm *qm = qp->qm;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
+ u32 state = 0;
+ int ret;
+
+ /* No need to judge if master OOO is blocked. */
+ if (qm_check_dev_error(pf_qm))
+ return 0;
+
+ /* HW V3 supports drain qp by device */
+ if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) {
+ ret = qm_stop_qp(qp);
+ if (ret) {
+ dev_err(&qm->pdev->dev, "Failed to stop qp!\n");
+ state = QM_STOP_QUEUE_FAIL;
+ goto set_dev_state;
+ }
+ return ret;
+ }
+
+ ret = qm_wait_qp_empty(qm, &state, qp->qp_id);
+ if (ret)
+ goto set_dev_state;
+
+ return 0;
+
+set_dev_state:
+ if (qm->debug.dev_dfx.dev_timeout)
+ qm->debug.dev_dfx.dev_state = state;
+
+ return ret;
+}
+
+static void qm_stop_qp_nolock(struct hisi_qp *qp)
{
- struct device *dev = &qp->qm->pdev->dev;
+ struct hisi_qm *qm = qp->qm;
+ struct device *dev = &qm->pdev->dev;
int ret;
/*
@@ -2099,39 +2138,36 @@ static int qm_stop_qp_nolock(struct hisi_qp *qp)
*/
if (atomic_read(&qp->qp_status.flags) != QP_START) {
qp->is_resetting = false;
- return 0;
+ return;
}
atomic_set(&qp->qp_status.flags, QP_STOP);
- ret = qm_drain_qp(qp);
- if (ret)
- dev_err(dev, "Failed to drain out data for stopping!\n");
+ /* V3 supports direct stop function when FLR prepare */
+ if (qm->ver < QM_HW_V3 || qm->status.stop_reason == QM_NORMAL) {
+ ret = qm_drain_qp(qp);
+ if (ret)
+ dev_err(dev, "Failed to drain out data for stopping qp(%u)!\n", qp->qp_id);
+ }
- flush_workqueue(qp->qm->wq);
+ flush_workqueue(qm->wq);
if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used)))
qp_stop_fail_cb(qp);
dev_dbg(dev, "stop queue %u!", qp->qp_id);
-
- return 0;
}
/**
* hisi_qm_stop_qp() - Stop a qp in qm.
* @qp: The qp we want to stop.
*
- * This function is reverse of hisi_qm_start_qp. Return 0 if successful.
+ * This function is reverse of hisi_qm_start_qp.
*/
-int hisi_qm_stop_qp(struct hisi_qp *qp)
+void hisi_qm_stop_qp(struct hisi_qp *qp)
{
- int ret;
-
down_write(&qp->qm->qps_lock);
- ret = qm_stop_qp_nolock(qp);
+ qm_stop_qp_nolock(qp);
up_write(&qp->qm->qps_lock);
-
- return ret;
}
EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
@@ -2309,7 +2345,31 @@ static int hisi_qm_uacce_start_queue(struct uacce_queue *q)
static void hisi_qm_uacce_stop_queue(struct uacce_queue *q)
{
- hisi_qm_stop_qp(q->priv);
+ struct hisi_qp *qp = q->priv;
+ struct hisi_qm *qm = qp->qm;
+ struct qm_dev_dfx *dev_dfx = &qm->debug.dev_dfx;
+ u32 i = 0;
+
+ hisi_qm_stop_qp(qp);
+
+ if (!dev_dfx->dev_timeout || !dev_dfx->dev_state)
+ return;
+
+ /*
+ * After the queue fails to be stopped,
+ * wait for a period of time before releasing the queue.
+ */
+ while (++i) {
+ msleep(WAIT_PERIOD);
+
+ /* Since dev_timeout maybe modified, check i >= dev_timeout */
+ if (i >= dev_dfx->dev_timeout) {
+ dev_err(&qm->pdev->dev, "Stop q %u timeout, state %u\n",
+ qp->qp_id, dev_dfx->dev_state);
+ dev_dfx->dev_state = QM_FINISH_WAIT;
+ break;
+ }
+ }
}
static int hisi_qm_is_q_updated(struct uacce_queue *q)
@@ -3054,25 +3114,18 @@ static int qm_restart(struct hisi_qm *qm)
}
/* Stop started qps in reset flow */
-static int qm_stop_started_qp(struct hisi_qm *qm)
+static void qm_stop_started_qp(struct hisi_qm *qm)
{
- struct device *dev = &qm->pdev->dev;
struct hisi_qp *qp;
- int i, ret;
+ int i;
for (i = 0; i < qm->qp_num; i++) {
qp = &qm->qp_array[i];
- if (qp && atomic_read(&qp->qp_status.flags) == QP_START) {
+ if (atomic_read(&qp->qp_status.flags) == QP_START) {
qp->is_resetting = true;
- ret = qm_stop_qp_nolock(qp);
- if (ret < 0) {
- dev_err(dev, "Failed to stop qp%d!\n", i);
- return ret;
- }
+ qm_stop_qp_nolock(qp);
}
}
-
- return 0;
}
/**
@@ -3112,21 +3165,31 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
down_write(&qm->qps_lock);
- qm->status.stop_reason = r;
if (atomic_read(&qm->status.flags) == QM_STOP)
goto err_unlock;
/* Stop all the request sending at first. */
atomic_set(&qm->status.flags, QM_STOP);
+ qm->status.stop_reason = r;
- if (qm->status.stop_reason == QM_SOFT_RESET ||
- qm->status.stop_reason == QM_DOWN) {
+ if (qm->status.stop_reason != QM_NORMAL) {
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
- ret = qm_stop_started_qp(qm);
- if (ret < 0) {
- dev_err(dev, "Failed to stop started qp!\n");
- goto err_unlock;
+ /*
+ * When performing soft reset, the hardware will no longer
+ * do tasks, and the tasks in the device will be flushed
+ * out directly since the master ooo is closed.
+ */
+ if (test_bit(QM_SUPPORT_STOP_FUNC, &qm->caps) &&
+ r != QM_SOFT_RESET) {
+ ret = qm_drain_qm(qm);
+ if (ret) {
+ dev_err(dev, "failed to drain qm!\n");
+ goto err_unlock;
+ }
}
+
+ qm_stop_started_qp(qm);
+
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
}
@@ -3141,6 +3204,7 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
}
qm_clear_queues(qm);
+ qm->status.stop_reason = QM_NORMAL;
err_unlock:
up_write(&qm->qps_lock);
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index f028dcfd0ead7..93a972fcbf638 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -118,7 +118,7 @@ struct sec_aead {
};
/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
-static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
+static inline u32 sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
{
if (req->c_req.encrypt)
return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
@@ -485,8 +485,7 @@ static void sec_alg_resource_free(struct sec_ctx *ctx,
sec_free_mac_resource(dev, qp_ctx->res);
}
-static int sec_alloc_qp_ctx_resource(struct hisi_qm *qm, struct sec_ctx *ctx,
- struct sec_qp_ctx *qp_ctx)
+static int sec_alloc_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
{
u16 q_depth = qp_ctx->qp->sq_depth;
struct device *dev = ctx->dev;
@@ -541,8 +540,7 @@ static void sec_free_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_
kfree(qp_ctx->req_list);
}
-static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
- int qp_ctx_id, int alg_type)
+static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id)
{
struct sec_qp_ctx *qp_ctx;
struct hisi_qp *qp;
@@ -561,7 +559,7 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
idr_init(&qp_ctx->req_idr);
INIT_LIST_HEAD(&qp_ctx->backlog);
- ret = sec_alloc_qp_ctx_resource(qm, ctx, qp_ctx);
+ ret = sec_alloc_qp_ctx_resource(ctx, qp_ctx);
if (ret)
goto err_destroy_idr;
@@ -614,7 +612,7 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
}
for (i = 0; i < sec->ctx_q_num; i++) {
- ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0);
+ ret = sec_create_qp_ctx(ctx, i);
if (ret)
goto err_sec_release_qp_ctx;
}
@@ -750,9 +748,7 @@ static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
sec_ctx_base_uninit(ctx);
}
-static int sec_skcipher_3des_setkey(struct crypto_skcipher *tfm, const u8 *key,
- const u32 keylen,
- const enum sec_cmode c_mode)
+static int sec_skcipher_3des_setkey(struct crypto_skcipher *tfm, const u8 *key, const u32 keylen)
{
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
@@ -843,7 +839,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
switch (c_alg) {
case SEC_CALG_3DES:
- ret = sec_skcipher_3des_setkey(tfm, key, keylen, c_mode);
+ ret = sec_skcipher_3des_setkey(tfm, key, keylen);
break;
case SEC_CALG_AES:
case SEC_CALG_SM4:
@@ -1371,7 +1367,7 @@ static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
sec_sqe3->bd_param = cpu_to_le32(bd_param);
sec_sqe3->c_len_ivin |= cpu_to_le32(c_req->c_len);
- sec_sqe3->tag = cpu_to_le64(req);
+ sec_sqe3->tag = cpu_to_le64((unsigned long)req);
return 0;
}
@@ -2145,8 +2141,8 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
return sec_skcipher_crypto(sk_req, false);
}
-#define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \
- sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\
+#define SEC_SKCIPHER_ALG(sec_cra_name, sec_set_key, \
+ sec_min_key_size, sec_max_key_size, blk_size, iv_size)\
{\
.base = {\
.cra_name = sec_cra_name,\
@@ -2158,8 +2154,8 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
.cra_ctxsize = sizeof(struct sec_ctx),\
.cra_module = THIS_MODULE,\
},\
- .init = ctx_init,\
- .exit = ctx_exit,\
+ .init = sec_skcipher_ctx_init,\
+ .exit = sec_skcipher_ctx_exit,\
.setkey = sec_set_key,\
.decrypt = sec_skcipher_decrypt,\
.encrypt = sec_skcipher_encrypt,\
@@ -2168,11 +2164,6 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
.ivsize = iv_size,\
}
-#define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
- max_key_size, blk_size, iv_size) \
- SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
- sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
-
static struct sec_skcipher sec_skciphers[] = {
{
.alg_msk = BIT(0),
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 7bb99381bbdfb..c290d8937b19c 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -282,6 +282,11 @@ static const struct debugfs_reg32 sec_dfx_regs[] = {
{"SEC_BD_SAA6 ", 0x301C38},
{"SEC_BD_SAA7 ", 0x301C3C},
{"SEC_BD_SAA8 ", 0x301C40},
+ {"SEC_RAS_CE_ENABLE ", 0x301050},
+ {"SEC_RAS_FE_ENABLE ", 0x301054},
+ {"SEC_RAS_NFE_ENABLE ", 0x301058},
+ {"SEC_REQ_TRNG_TIME_TH ", 0x30112C},
+ {"SEC_CHANNEL_RNG_REQ_THLD ", 0x302110},
};
/* define the SEC's dfx regs region and region length */
@@ -374,7 +379,7 @@ void sec_destroy_qps(struct hisi_qp **qps, int qp_num)
struct hisi_qp **sec_create_qps(void)
{
- int node = cpu_to_node(smp_processor_id());
+ int node = cpu_to_node(raw_smp_processor_id());
u32 ctx_num = ctx_q_num;
struct hisi_qp **qps;
int ret;
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index c650c741a18d8..94e2d66b04b65 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -591,6 +591,7 @@ static struct acomp_alg hisi_zip_acomp_deflate = {
.base = {
.cra_name = "deflate",
.cra_driver_name = "hisi-deflate-acomp",
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_module = THIS_MODULE,
.cra_priority = HZIP_ALG_PRIORITY,
.cra_ctxsize = sizeof(struct hisi_zip_ctx),
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 479ba8a1d6b5d..c065fd867161d 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -454,7 +454,7 @@ MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids);
int zip_create_qps(struct hisi_qp **qps, int qp_num, int node)
{
if (node == NUMA_NO_NODE)
- node = cpu_to_node(smp_processor_id());
+ node = cpu_to_node(raw_smp_processor_id());
return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps);
}
diff --git a/drivers/crypto/intel/iaa/iaa_crypto.h b/drivers/crypto/intel/iaa/iaa_crypto.h
index 014420f7beb03..2524091a5f70a 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto.h
+++ b/drivers/crypto/intel/iaa/iaa_crypto.h
@@ -59,10 +59,8 @@ struct iaa_device_compression_mode {
const char *name;
struct aecs_comp_table_record *aecs_comp_table;
- struct aecs_decomp_table_record *aecs_decomp_table;
dma_addr_t aecs_comp_table_dma_addr;
- dma_addr_t aecs_decomp_table_dma_addr;
};
/* Representation of IAA device with wqs, populated by probe */
@@ -107,23 +105,6 @@ struct aecs_comp_table_record {
u32 reserved_padding[2];
} __packed;
-/* AECS for decompress */
-struct aecs_decomp_table_record {
- u32 crc;
- u32 xor_checksum;
- u32 low_filter_param;
- u32 high_filter_param;
- u32 output_mod_idx;
- u32 drop_init_decomp_out_bytes;
- u32 reserved[36];
- u32 output_accum_data[2];
- u32 out_bits_valid;
- u32 bit_off_indexing;
- u32 input_accum_data[64];
- u8 size_qw[32];
- u32 decomp_state[1220];
-} __packed;
-
int iaa_aecs_init_fixed(void);
void iaa_aecs_cleanup_fixed(void);
@@ -136,9 +117,6 @@ struct iaa_compression_mode {
int ll_table_size;
u32 *d_table;
int d_table_size;
- u32 *header_table;
- int header_table_size;
- u16 gen_decomp_table_flags;
iaa_dev_comp_init_fn_t init;
iaa_dev_comp_free_fn_t free;
};
@@ -148,9 +126,6 @@ int add_iaa_compression_mode(const char *name,
int ll_table_size,
const u32 *d_table,
int d_table_size,
- const u8 *header_table,
- int header_table_size,
- u16 gen_decomp_table_flags,
iaa_dev_comp_init_fn_t init,
iaa_dev_comp_free_fn_t free);
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c b/drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c
index 45cf5d74f0fb9..19d9a333ac49c 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c
@@ -78,7 +78,6 @@ int iaa_aecs_init_fixed(void)
sizeof(fixed_ll_sym),
fixed_d_sym,
sizeof(fixed_d_sym),
- NULL, 0, 0,
init_fixed_mode, NULL);
if (!ret)
pr_debug("IAA fixed compression mode initialized\n");
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index dfd3baf0a8d87..b2191ade9011c 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -258,16 +258,14 @@ static void free_iaa_compression_mode(struct iaa_compression_mode *mode)
kfree(mode->name);
kfree(mode->ll_table);
kfree(mode->d_table);
- kfree(mode->header_table);
kfree(mode);
}
/*
- * IAA Compression modes are defined by an ll_table, a d_table, and an
- * optional header_table. These tables are typically generated and
- * captured using statistics collected from running actual
- * compress/decompress workloads.
+ * IAA Compression modes are defined by an ll_table and a d_table.
+ * These tables are typically generated and captured using statistics
+ * collected from running actual compress/decompress workloads.
*
* A module or other kernel code can add and remove compression modes
* with a given name using the exported @add_iaa_compression_mode()
@@ -315,9 +313,6 @@ EXPORT_SYMBOL_GPL(remove_iaa_compression_mode);
* @ll_table_size: The ll table size in bytes
* @d_table: The d table
* @d_table_size: The d table size in bytes
- * @header_table: Optional header table
- * @header_table_size: Optional header table size in bytes
- * @gen_decomp_table_flags: Otional flags used to generate the decomp table
* @init: Optional callback function to init the compression mode data
* @free: Optional callback function to free the compression mode data
*
@@ -330,9 +325,6 @@ int add_iaa_compression_mode(const char *name,
int ll_table_size,
const u32 *d_table,
int d_table_size,
- const u8 *header_table,
- int header_table_size,
- u16 gen_decomp_table_flags,
iaa_dev_comp_init_fn_t init,
iaa_dev_comp_free_fn_t free)
{
@@ -370,16 +362,6 @@ int add_iaa_compression_mode(const char *name,
mode->d_table_size = d_table_size;
}
- if (header_table) {
- mode->header_table = kzalloc(header_table_size, GFP_KERNEL);
- if (!mode->header_table)
- goto free;
- memcpy(mode->header_table, header_table, header_table_size);
- mode->header_table_size = header_table_size;
- }
-
- mode->gen_decomp_table_flags = gen_decomp_table_flags;
-
mode->init = init;
mode->free = free;
@@ -420,10 +402,6 @@ static void free_device_compression_mode(struct iaa_device *iaa_device,
if (device_mode->aecs_comp_table)
dma_free_coherent(dev, size, device_mode->aecs_comp_table,
device_mode->aecs_comp_table_dma_addr);
- if (device_mode->aecs_decomp_table)
- dma_free_coherent(dev, size, device_mode->aecs_decomp_table,
- device_mode->aecs_decomp_table_dma_addr);
-
kfree(device_mode);
}
@@ -440,73 +418,6 @@ static int check_completion(struct device *dev,
bool compress,
bool only_once);
-static int decompress_header(struct iaa_device_compression_mode *device_mode,
- struct iaa_compression_mode *mode,
- struct idxd_wq *wq)
-{
- dma_addr_t src_addr, src2_addr;
- struct idxd_desc *idxd_desc;
- struct iax_hw_desc *desc;
- struct device *dev;
- int ret = 0;
-
- idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
- if (IS_ERR(idxd_desc))
- return PTR_ERR(idxd_desc);
-
- desc = idxd_desc->iax_hw;
-
- dev = &wq->idxd->pdev->dev;
-
- src_addr = dma_map_single(dev, (void *)mode->header_table,
- mode->header_table_size, DMA_TO_DEVICE);
- dev_dbg(dev, "%s: mode->name %s, src_addr %llx, dev %p, src %p, slen %d\n",
- __func__, mode->name, src_addr, dev,
- mode->header_table, mode->header_table_size);
- if (unlikely(dma_mapping_error(dev, src_addr))) {
- dev_dbg(dev, "dma_map_single err, exiting\n");
- ret = -ENOMEM;
- return ret;
- }
-
- desc->flags = IAX_AECS_GEN_FLAG;
- desc->opcode = IAX_OPCODE_DECOMPRESS;
-
- desc->src1_addr = (u64)src_addr;
- desc->src1_size = mode->header_table_size;
-
- src2_addr = device_mode->aecs_decomp_table_dma_addr;
- desc->src2_addr = (u64)src2_addr;
- desc->src2_size = 1088;
- dev_dbg(dev, "%s: mode->name %s, src2_addr %llx, dev %p, src2_size %d\n",
- __func__, mode->name, desc->src2_addr, dev, desc->src2_size);
- desc->max_dst_size = 0; // suppressed output
-
- desc->decompr_flags = mode->gen_decomp_table_flags;
-
- desc->priv = 0;
-
- desc->completion_addr = idxd_desc->compl_dma;
-
- ret = idxd_submit_desc(wq, idxd_desc);
- if (ret) {
- pr_err("%s: submit_desc failed ret=0x%x\n", __func__, ret);
- goto out;
- }
-
- ret = check_completion(dev, idxd_desc->iax_completion, false, false);
- if (ret)
- dev_dbg(dev, "%s: mode->name %s check_completion failed ret=%d\n",
- __func__, mode->name, ret);
- else
- dev_dbg(dev, "%s: mode->name %s succeeded\n", __func__,
- mode->name);
-out:
- dma_unmap_single(dev, src_addr, 1088, DMA_TO_DEVICE);
-
- return ret;
-}
-
static int init_device_compression_mode(struct iaa_device *iaa_device,
struct iaa_compression_mode *mode,
int idx, struct idxd_wq *wq)
@@ -529,24 +440,11 @@ static int init_device_compression_mode(struct iaa_device *iaa_device,
if (!device_mode->aecs_comp_table)
goto free;
- device_mode->aecs_decomp_table = dma_alloc_coherent(dev, size,
- &device_mode->aecs_decomp_table_dma_addr, GFP_KERNEL);
- if (!device_mode->aecs_decomp_table)
- goto free;
-
/* Add Huffman table to aecs */
memset(device_mode->aecs_comp_table, 0, sizeof(*device_mode->aecs_comp_table));
memcpy(device_mode->aecs_comp_table->ll_sym, mode->ll_table, mode->ll_table_size);
memcpy(device_mode->aecs_comp_table->d_sym, mode->d_table, mode->d_table_size);
- if (mode->header_table) {
- ret = decompress_header(device_mode, mode, wq);
- if (ret) {
- pr_debug("iaa header decompression failed: ret=%d\n", ret);
- goto free;
- }
- }
-
if (mode->init) {
ret = mode->init(device_mode);
if (ret)
@@ -908,6 +806,8 @@ static int save_iaa_wq(struct idxd_wq *wq)
return -EINVAL;
cpus_per_iaa = (nr_nodes * nr_cpus_per_node) / nr_iaa;
+ if (!cpus_per_iaa)
+ cpus_per_iaa = 1;
out:
return 0;
}
@@ -923,10 +823,12 @@ static void remove_iaa_wq(struct idxd_wq *wq)
}
}
- if (nr_iaa)
+ if (nr_iaa) {
cpus_per_iaa = (nr_nodes * nr_cpus_per_node) / nr_iaa;
- else
- cpus_per_iaa = 0;
+ if (!cpus_per_iaa)
+ cpus_per_iaa = 1;
+ } else
+ cpus_per_iaa = 1;
}
static int wq_table_add_wqs(int iaa, int cpu)
@@ -1324,7 +1226,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
*compression_crc = idxd_desc->iax_completion->crc;
- if (!ctx->async_mode)
+ if (!ctx->async_mode || disable_async)
idxd_free_desc(wq, idxd_desc);
out:
return ret;
@@ -1570,7 +1472,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
*dlen = req->dlen;
- if (!ctx->async_mode)
+ if (!ctx->async_mode || disable_async)
idxd_free_desc(wq, idxd_desc);
/* Update stats */
@@ -1596,6 +1498,7 @@ static int iaa_comp_acompress(struct acomp_req *req)
u32 compression_crc;
struct idxd_wq *wq;
struct device *dev;
+ u64 start_time_ns;
int order = -1;
compression_ctx = crypto_tfm_ctx(tfm);
@@ -1669,8 +1572,10 @@ static int iaa_comp_acompress(struct acomp_req *req)
" req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
req->dst, req->dlen, sg_dma_len(req->dst));
+ start_time_ns = iaa_get_ts();
ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr,
&req->dlen, &compression_crc, disable_async);
+ update_max_comp_delay_ns(start_time_ns);
if (ret == -EINPROGRESS)
return ret;
@@ -1717,6 +1622,7 @@ static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req)
struct iaa_wq *iaa_wq;
struct device *dev;
struct idxd_wq *wq;
+ u64 start_time_ns;
int order = -1;
cpu = get_cpu();
@@ -1773,8 +1679,10 @@ alloc_dest:
dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p,"
" req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
req->dst, req->dlen, sg_dma_len(req->dst));
+ start_time_ns = iaa_get_ts();
ret = iaa_decompress(tfm, req, wq, src_addr, req->slen,
dst_addr, &req->dlen, true);
+ update_max_decomp_delay_ns(start_time_ns);
if (ret == -EOVERFLOW) {
dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
req->dlen *= 2;
@@ -1805,6 +1713,7 @@ static int iaa_comp_adecompress(struct acomp_req *req)
int nr_sgs, cpu, ret = 0;
struct iaa_wq *iaa_wq;
struct device *dev;
+ u64 start_time_ns;
struct idxd_wq *wq;
if (!iaa_crypto_enabled) {
@@ -1864,8 +1773,10 @@ static int iaa_comp_adecompress(struct acomp_req *req)
" req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
req->dst, req->dlen, sg_dma_len(req->dst));
+ start_time_ns = iaa_get_ts();
ret = iaa_decompress(tfm, req, wq, src_addr, req->slen,
dst_addr, &req->dlen, false);
+ update_max_decomp_delay_ns(start_time_ns);
if (ret == -EINPROGRESS)
return ret;
@@ -1916,6 +1827,7 @@ static struct acomp_alg iaa_acomp_fixed_deflate = {
.base = {
.cra_name = "deflate",
.cra_driver_name = "deflate-iaa",
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_ctxsize = sizeof(struct iaa_compression_ctx),
.cra_module = THIS_MODULE,
.cra_priority = IAA_ALG_PRIORITY,
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.c b/drivers/crypto/intel/iaa/iaa_crypto_stats.c
index 2e3b7b73af204..c9f83af4b3075 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_stats.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.c
@@ -22,8 +22,6 @@ static u64 total_decomp_calls;
static u64 total_sw_decomp_calls;
static u64 max_comp_delay_ns;
static u64 max_decomp_delay_ns;
-static u64 max_acomp_delay_ns;
-static u64 max_adecomp_delay_ns;
static u64 total_comp_bytes_out;
static u64 total_decomp_bytes_in;
static u64 total_completion_einval_errors;
@@ -92,26 +90,6 @@ void update_max_decomp_delay_ns(u64 start_time_ns)
max_decomp_delay_ns = time_diff;
}
-void update_max_acomp_delay_ns(u64 start_time_ns)
-{
- u64 time_diff;
-
- time_diff = ktime_get_ns() - start_time_ns;
-
- if (time_diff > max_acomp_delay_ns)
- max_acomp_delay_ns = time_diff;
-}
-
-void update_max_adecomp_delay_ns(u64 start_time_ns)
-{
- u64 time_diff;
-
- time_diff = ktime_get_ns() - start_time_ns;
-
- if (time_diff > max_adecomp_delay_ns)
- max_adecomp_delay_ns = time_diff;
-}
-
void update_wq_comp_calls(struct idxd_wq *idxd_wq)
{
struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
@@ -151,8 +129,6 @@ static void reset_iaa_crypto_stats(void)
total_sw_decomp_calls = 0;
max_comp_delay_ns = 0;
max_decomp_delay_ns = 0;
- max_acomp_delay_ns = 0;
- max_adecomp_delay_ns = 0;
total_comp_bytes_out = 0;
total_decomp_bytes_in = 0;
total_completion_einval_errors = 0;
@@ -275,17 +251,11 @@ int __init iaa_crypto_debugfs_init(void)
return -ENODEV;
iaa_crypto_debugfs_root = debugfs_create_dir("iaa_crypto", NULL);
- if (!iaa_crypto_debugfs_root)
- return -ENOMEM;
debugfs_create_u64("max_comp_delay_ns", 0644,
iaa_crypto_debugfs_root, &max_comp_delay_ns);
debugfs_create_u64("max_decomp_delay_ns", 0644,
iaa_crypto_debugfs_root, &max_decomp_delay_ns);
- debugfs_create_u64("max_acomp_delay_ns", 0644,
- iaa_crypto_debugfs_root, &max_comp_delay_ns);
- debugfs_create_u64("max_adecomp_delay_ns", 0644,
- iaa_crypto_debugfs_root, &max_decomp_delay_ns);
debugfs_create_u64("total_comp_calls", 0644,
iaa_crypto_debugfs_root, &total_comp_calls);
debugfs_create_u64("total_decomp_calls", 0644,
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.h b/drivers/crypto/intel/iaa/iaa_crypto_stats.h
index c10b87b86fa45..c916ca83f0702 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_stats.h
+++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.h
@@ -15,8 +15,6 @@ void update_total_sw_decomp_calls(void);
void update_total_decomp_bytes_in(int n);
void update_max_comp_delay_ns(u64 start_time_ns);
void update_max_decomp_delay_ns(u64 start_time_ns);
-void update_max_acomp_delay_ns(u64 start_time_ns);
-void update_max_adecomp_delay_ns(u64 start_time_ns);
void update_completion_einval_errs(void);
void update_completion_timeout_errs(void);
void update_completion_comp_buf_overflow_errs(void);
@@ -26,6 +24,8 @@ void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n);
void update_wq_decomp_calls(struct idxd_wq *idxd_wq);
void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n);
+static inline u64 iaa_get_ts(void) { return ktime_get_ns(); }
+
#else
static inline int iaa_crypto_debugfs_init(void) { return 0; }
static inline void iaa_crypto_debugfs_cleanup(void) {}
@@ -37,8 +37,6 @@ static inline void update_total_sw_decomp_calls(void) {}
static inline void update_total_decomp_bytes_in(int n) {}
static inline void update_max_comp_delay_ns(u64 start_time_ns) {}
static inline void update_max_decomp_delay_ns(u64 start_time_ns) {}
-static inline void update_max_acomp_delay_ns(u64 start_time_ns) {}
-static inline void update_max_adecomp_delay_ns(u64 start_time_ns) {}
static inline void update_completion_einval_errs(void) {}
static inline void update_completion_timeout_errs(void) {}
static inline void update_completion_comp_buf_overflow_errs(void) {}
@@ -48,6 +46,8 @@ static inline void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n) {}
static inline void update_wq_decomp_calls(struct idxd_wq *idxd_wq) {}
static inline void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n) {}
+static inline u64 iaa_get_ts(void) { return 0; }
+
#endif // CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS
#endif
diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig
index c120f6715a09a..02fb8abe4e6ed 100644
--- a/drivers/crypto/intel/qat/Kconfig
+++ b/drivers/crypto/intel/qat/Kconfig
@@ -106,3 +106,17 @@ config CRYPTO_DEV_QAT_C62XVF
To compile this as a module, choose M here: the module
will be called qat_c62xvf.
+
+config CRYPTO_DEV_QAT_ERROR_INJECTION
+ bool "Support for Intel(R) QAT Devices Heartbeat Error Injection"
+ depends on CRYPTO_DEV_QAT
+ depends on DEBUG_FS
+ help
+ Enables a mechanism that allows to inject a heartbeat error on
+ Intel(R) QuickAssist devices for testing purposes.
+
+ This is intended for developer use only.
+ If unsure, say N.
+
+ This functionality is available via debugfs entry of the Intel(R)
+ QuickAssist device
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
index a87d29ae724fe..1102c47f8293d 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
@@ -361,53 +361,6 @@ static u32 get_ena_thd_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
}
}
-static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
-{
- enum adf_cfg_service_type rps[RP_GROUP_COUNT] = { };
- const struct adf_fw_config *fw_config;
- u16 ring_to_svc_map;
- int i, j;
-
- fw_config = get_fw_config(accel_dev);
- if (!fw_config)
- return 0;
-
- for (i = 0; i < RP_GROUP_COUNT; i++) {
- switch (fw_config[i].ae_mask) {
- case ADF_AE_GROUP_0:
- j = RP_GROUP_0;
- break;
- case ADF_AE_GROUP_1:
- j = RP_GROUP_1;
- break;
- default:
- return 0;
- }
-
- switch (fw_config[i].obj) {
- case ADF_FW_SYM_OBJ:
- rps[j] = SYM;
- break;
- case ADF_FW_ASYM_OBJ:
- rps[j] = ASYM;
- break;
- case ADF_FW_DC_OBJ:
- rps[j] = COMP;
- break;
- default:
- rps[j] = 0;
- break;
- }
- }
-
- ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT |
- rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT |
- rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT |
- rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT;
-
- return ring_to_svc_map;
-}
-
static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
const char * const fw_objs[], int num_objs)
{
@@ -433,6 +386,20 @@ static const char *uof_get_name_420xx(struct adf_accel_dev *accel_dev, u32 obj_n
return uof_get_name(accel_dev, obj_num, adf_420xx_fw_objs, num_fw_objs);
}
+static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ const struct adf_fw_config *fw_config;
+
+ if (obj_num >= uof_get_num_objs(accel_dev))
+ return -EINVAL;
+
+ fw_config = get_fw_config(accel_dev);
+ if (!fw_config)
+ return -EINVAL;
+
+ return fw_config[obj_num].obj;
+}
+
static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
{
const struct adf_fw_config *fw_config;
@@ -496,12 +463,13 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->fw_mmp_name = ADF_420XX_MMP;
hw_data->uof_get_name = uof_get_name_420xx;
hw_data->uof_get_num_objs = uof_get_num_objs;
+ hw_data->uof_get_obj_type = uof_get_obj_type;
hw_data->uof_get_ae_mask = uof_get_ae_mask;
hw_data->get_rp_group = get_rp_group;
hw_data->get_ena_thd_mask = get_ena_thd_mask;
hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable;
hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
- hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
+ hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map;
hw_data->disable_iov = adf_disable_sriov;
hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
hw_data->enable_pm = adf_gen4_enable_pm;
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index 94a0ebb03d8c9..927506cf271d0 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -320,53 +320,6 @@ static u32 get_ena_thd_mask_401xx(struct adf_accel_dev *accel_dev, u32 obj_num)
}
}
-static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
-{
- enum adf_cfg_service_type rps[RP_GROUP_COUNT];
- const struct adf_fw_config *fw_config;
- u16 ring_to_svc_map;
- int i, j;
-
- fw_config = get_fw_config(accel_dev);
- if (!fw_config)
- return 0;
-
- for (i = 0; i < RP_GROUP_COUNT; i++) {
- switch (fw_config[i].ae_mask) {
- case ADF_AE_GROUP_0:
- j = RP_GROUP_0;
- break;
- case ADF_AE_GROUP_1:
- j = RP_GROUP_1;
- break;
- default:
- return 0;
- }
-
- switch (fw_config[i].obj) {
- case ADF_FW_SYM_OBJ:
- rps[j] = SYM;
- break;
- case ADF_FW_ASYM_OBJ:
- rps[j] = ASYM;
- break;
- case ADF_FW_DC_OBJ:
- rps[j] = COMP;
- break;
- default:
- rps[j] = 0;
- break;
- }
- }
-
- ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT |
- rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT |
- rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT |
- rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT;
-
- return ring_to_svc_map;
-}
-
static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
const char * const fw_objs[], int num_objs)
{
@@ -399,6 +352,20 @@ static const char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_n
return uof_get_name(accel_dev, obj_num, adf_402xx_fw_objs, num_fw_objs);
}
+static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ const struct adf_fw_config *fw_config;
+
+ if (obj_num >= uof_get_num_objs(accel_dev))
+ return -EINVAL;
+
+ fw_config = get_fw_config(accel_dev);
+ if (!fw_config)
+ return -EINVAL;
+
+ return fw_config[obj_num].obj;
+}
+
static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
{
const struct adf_fw_config *fw_config;
@@ -479,11 +446,12 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
break;
}
hw_data->uof_get_num_objs = uof_get_num_objs;
+ hw_data->uof_get_obj_type = uof_get_obj_type;
hw_data->uof_get_ae_mask = uof_get_ae_mask;
hw_data->get_rp_group = get_rp_group;
hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable;
hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
- hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
+ hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map;
hw_data->disable_iov = adf_disable_sriov;
hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
hw_data->enable_pm = adf_gen4_enable_pm;
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index 6908727bff3b8..5915cde8a7aa4 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -53,3 +53,5 @@ intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \
adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \
adf_gen2_pfvf.o adf_gen4_pfvf.o
+
+intel_qat-$(CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION) += adf_heartbeat_inject.o
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index a16c7e6edc65d..08658c3a01e9b 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -248,6 +248,7 @@ struct adf_hw_device_data {
void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
u32 (*uof_get_num_objs)(struct adf_accel_dev *accel_dev);
+ int (*uof_get_obj_type)(struct adf_accel_dev *accel_dev, u32 obj_num);
u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
int (*get_rp_group)(struct adf_accel_dev *accel_dev, u32 ae_mask);
u32 (*get_ena_thd_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
@@ -332,6 +333,7 @@ struct adf_accel_vf_info {
struct ratelimit_state vf2pf_ratelimit;
u32 vf_nr;
bool init;
+ bool restarting;
u8 vf_compat_ver;
};
@@ -401,6 +403,7 @@ struct adf_accel_dev {
struct adf_error_counters ras_errors;
struct mutex state_lock; /* protect state of the device */
bool is_vf;
+ bool autoreset_on_error;
u32 accel_id;
};
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c
index a39e70bd4b21b..9da2278bd5b7d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c
@@ -7,8 +7,15 @@
#include <linux/delay.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
+#include "adf_pfvf_pf_msg.h"
+
+struct adf_fatal_error_data {
+ struct adf_accel_dev *accel_dev;
+ struct work_struct work;
+};
static struct workqueue_struct *device_reset_wq;
+static struct workqueue_struct *device_sriov_wq;
static pci_ers_result_t adf_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
@@ -26,6 +33,19 @@ static pci_ers_result_t adf_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_DISCONNECT;
}
+ set_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+ if (accel_dev->hw_device->exit_arb) {
+ dev_dbg(&pdev->dev, "Disabling arbitration\n");
+ accel_dev->hw_device->exit_arb(accel_dev);
+ }
+ adf_error_notifier(accel_dev);
+ adf_pf2vf_notify_fatal_error(accel_dev);
+ adf_dev_restarting_notify(accel_dev);
+ adf_pf2vf_notify_restarting(accel_dev);
+ adf_pf2vf_wait_for_restarting_complete(accel_dev);
+ pci_clear_master(pdev);
+ adf_dev_down(accel_dev, false);
+
return PCI_ERS_RESULT_NEED_RESET;
}
@@ -37,6 +57,13 @@ struct adf_reset_dev_data {
struct work_struct reset_work;
};
+/* sriov dev data */
+struct adf_sriov_dev_data {
+ struct adf_accel_dev *accel_dev;
+ struct completion compl;
+ struct work_struct sriov_work;
+};
+
void adf_reset_sbr(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
@@ -82,29 +109,57 @@ void adf_dev_restore(struct adf_accel_dev *accel_dev)
}
}
+static void adf_device_sriov_worker(struct work_struct *work)
+{
+ struct adf_sriov_dev_data *sriov_data =
+ container_of(work, struct adf_sriov_dev_data, sriov_work);
+
+ adf_reenable_sriov(sriov_data->accel_dev);
+ complete(&sriov_data->compl);
+}
+
static void adf_device_reset_worker(struct work_struct *work)
{
struct adf_reset_dev_data *reset_data =
container_of(work, struct adf_reset_dev_data, reset_work);
struct adf_accel_dev *accel_dev = reset_data->accel_dev;
+ unsigned long wait_jiffies = msecs_to_jiffies(10000);
+ struct adf_sriov_dev_data sriov_data;
adf_dev_restarting_notify(accel_dev);
if (adf_dev_restart(accel_dev)) {
/* The device hanged and we can't restart it so stop here */
dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
- if (reset_data->mode == ADF_DEV_RESET_ASYNC)
+ if (reset_data->mode == ADF_DEV_RESET_ASYNC ||
+ completion_done(&reset_data->compl))
kfree(reset_data);
WARN(1, "QAT: device restart failed. Device is unusable\n");
return;
}
+
+ sriov_data.accel_dev = accel_dev;
+ init_completion(&sriov_data.compl);
+ INIT_WORK(&sriov_data.sriov_work, adf_device_sriov_worker);
+ queue_work(device_sriov_wq, &sriov_data.sriov_work);
+ if (wait_for_completion_timeout(&sriov_data.compl, wait_jiffies))
+ adf_pf2vf_notify_restarted(accel_dev);
+
adf_dev_restarted_notify(accel_dev);
clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
- /* The dev is back alive. Notify the caller if in sync mode */
- if (reset_data->mode == ADF_DEV_RESET_SYNC)
- complete(&reset_data->compl);
- else
+ /*
+ * The dev is back alive. Notify the caller if in sync mode
+ *
+ * If device restart will take a more time than expected,
+ * the schedule_reset() function can timeout and exit. This can be
+ * detected by calling the completion_done() function. In this case
+ * the reset_data structure needs to be freed here.
+ */
+ if (reset_data->mode == ADF_DEV_RESET_ASYNC ||
+ completion_done(&reset_data->compl))
kfree(reset_data);
+ else
+ complete(&reset_data->compl);
}
static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
@@ -137,8 +192,9 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
dev_err(&GET_DEV(accel_dev),
"Reset device timeout expired\n");
ret = -EFAULT;
+ } else {
+ kfree(reset_data);
}
- kfree(reset_data);
return ret;
}
return 0;
@@ -147,14 +203,25 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev)
{
struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+ int res = 0;
if (!accel_dev) {
pr_err("QAT: Can't find acceleration device\n");
return PCI_ERS_RESULT_DISCONNECT;
}
- if (adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_SYNC))
+
+ if (!pdev->is_busmaster)
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+ res = adf_dev_up(accel_dev, false);
+ if (res && res != -EALREADY)
return PCI_ERS_RESULT_DISCONNECT;
+ adf_reenable_sriov(accel_dev);
+ adf_pf2vf_notify_restarted(accel_dev);
+ adf_dev_restarted_notify(accel_dev);
+ clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
return PCI_ERS_RESULT_RECOVERED;
}
@@ -171,11 +238,62 @@ const struct pci_error_handlers adf_err_handler = {
};
EXPORT_SYMBOL_GPL(adf_err_handler);
+int adf_dev_autoreset(struct adf_accel_dev *accel_dev)
+{
+ if (accel_dev->autoreset_on_error)
+ return adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_ASYNC);
+
+ return 0;
+}
+
+static void adf_notify_fatal_error_worker(struct work_struct *work)
+{
+ struct adf_fatal_error_data *wq_data =
+ container_of(work, struct adf_fatal_error_data, work);
+ struct adf_accel_dev *accel_dev = wq_data->accel_dev;
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+ adf_error_notifier(accel_dev);
+
+ if (!accel_dev->is_vf) {
+ /* Disable arbitration to stop processing of new requests */
+ if (accel_dev->autoreset_on_error && hw_device->exit_arb)
+ hw_device->exit_arb(accel_dev);
+ if (accel_dev->pf.vf_info)
+ adf_pf2vf_notify_fatal_error(accel_dev);
+ adf_dev_autoreset(accel_dev);
+ }
+
+ kfree(wq_data);
+}
+
+int adf_notify_fatal_error(struct adf_accel_dev *accel_dev)
+{
+ struct adf_fatal_error_data *wq_data;
+
+ wq_data = kzalloc(sizeof(*wq_data), GFP_ATOMIC);
+ if (!wq_data)
+ return -ENOMEM;
+
+ wq_data->accel_dev = accel_dev;
+ INIT_WORK(&wq_data->work, adf_notify_fatal_error_worker);
+ adf_misc_wq_queue_work(&wq_data->work);
+
+ return 0;
+}
+
int adf_init_aer(void)
{
device_reset_wq = alloc_workqueue("qat_device_reset_wq",
WQ_MEM_RECLAIM, 0);
- return !device_reset_wq ? -EFAULT : 0;
+ if (!device_reset_wq)
+ return -EFAULT;
+
+ device_sriov_wq = alloc_workqueue("qat_device_sriov_wq", 0, 0);
+ if (!device_sriov_wq)
+ return -EFAULT;
+
+ return 0;
}
void adf_exit_aer(void)
@@ -183,4 +301,8 @@ void adf_exit_aer(void)
if (device_reset_wq)
destroy_workqueue(device_reset_wq);
device_reset_wq = NULL;
+
+ if (device_sriov_wq)
+ destroy_workqueue(device_sriov_wq);
+ device_sriov_wq = NULL;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
index 322b76903a737..e015ad6cace2b 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
@@ -49,5 +49,6 @@
ADF_ETRMGR_BANK "%d" ADF_ETRMGR_CORE_AFFINITY
#define ADF_ACCEL_STR "Accelerator%d"
#define ADF_HEARTBEAT_TIMER "HeartbeatTimer"
+#define ADF_SRIOV_ENABLED "SriovEnabled"
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_clock.c b/drivers/crypto/intel/qat/qat_common/adf_clock.c
index 01e0a389e462b..cf89f57de2a70 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_clock.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_clock.c
@@ -83,6 +83,9 @@ static int measure_clock(struct adf_accel_dev *accel_dev, u32 *frequency)
}
delta_us = timespec_to_us(&ts3) - timespec_to_us(&ts1);
+ if (!delta_us)
+ return -EINVAL;
+
temp = (timestamp2 - timestamp1) * ME_CLK_DIVIDER * 10;
temp = DIV_ROUND_CLOSEST_ULL(temp, delta_us);
/*
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c
index 07119c487da01..627953a72d478 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c
@@ -16,7 +16,6 @@
#define CNV_ERR_INFO_MASK GENMASK(11, 0)
#define CNV_ERR_TYPE_MASK GENMASK(15, 12)
-#define CNV_SLICE_ERR_MASK GENMASK(7, 0)
#define CNV_SLICE_ERR_SIGN_BIT_INDEX 7
#define CNV_DELTA_ERR_SIGN_BIT_INDEX 11
diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
index f06188033a93f..57328249c89e7 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
@@ -40,6 +40,7 @@ enum adf_event {
ADF_EVENT_SHUTDOWN,
ADF_EVENT_RESTARTING,
ADF_EVENT_RESTARTED,
+ ADF_EVENT_FATAL_ERROR,
};
struct service_hndl {
@@ -60,6 +61,8 @@ int adf_dev_restart(struct adf_accel_dev *accel_dev);
void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
void adf_clean_vf_map(bool);
+int adf_notify_fatal_error(struct adf_accel_dev *accel_dev);
+void adf_error_notifier(struct adf_accel_dev *accel_dev);
int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
struct adf_accel_dev *pf);
void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
@@ -84,12 +87,14 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev);
extern const struct pci_error_handlers adf_err_handler;
void adf_reset_sbr(struct adf_accel_dev *accel_dev);
void adf_reset_flr(struct adf_accel_dev *accel_dev);
+int adf_dev_autoreset(struct adf_accel_dev *accel_dev);
void adf_dev_restore(struct adf_accel_dev *accel_dev);
int adf_init_aer(void);
void adf_exit_aer(void);
int adf_init_arb(struct adf_accel_dev *accel_dev);
void adf_exit_arb(struct adf_accel_dev *accel_dev);
void adf_update_ring_arb(struct adf_etr_ring_data *ring);
+int adf_disable_arb_thd(struct adf_accel_dev *accel_dev, u32 ae, u32 thr);
int adf_dev_get(struct adf_accel_dev *accel_dev);
void adf_dev_put(struct adf_accel_dev *accel_dev);
@@ -188,6 +193,7 @@ bool adf_misc_wq_queue_delayed_work(struct delayed_work *work,
#if defined(CONFIG_PCI_IOV)
int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
void adf_disable_sriov(struct adf_accel_dev *accel_dev);
+void adf_reenable_sriov(struct adf_accel_dev *accel_dev);
void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask);
void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev);
bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev);
@@ -208,6 +214,10 @@ static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
{
}
+static inline void adf_reenable_sriov(struct adf_accel_dev *accel_dev)
+{
+}
+
static inline int adf_init_pf_wq(void)
{
return 0;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
index 86ee36feefad3..f07b748795f7b 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
@@ -60,10 +60,10 @@ static int adf_get_vf_real_id(u32 fake)
/**
* adf_clean_vf_map() - Cleans VF id mapings
- *
- * Function cleans internal ids for virtual functions.
* @vf: flag indicating whether mappings is cleaned
* for vfs only or for vfs and pfs
+ *
+ * Function cleans internal ids for virtual functions.
*/
void adf_clean_vf_map(bool vf)
{
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
index 9985683056d5f..d28e1921940a7 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
@@ -4,6 +4,7 @@
#include "adf_accel_devices.h"
#include "adf_cfg_services.h"
#include "adf_common_drv.h"
+#include "adf_fw_config.h"
#include "adf_gen4_hw_data.h"
#include "adf_gen4_pm.h"
@@ -398,6 +399,9 @@ int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev)
ADF_GEN4_ADMIN_ACCELENGINES;
if (srv_id == SVC_DCC) {
+ if (ae_cnt > ICP_QAT_HW_AE_DELIMITER)
+ return -EINVAL;
+
memcpy(thd2arb_map, thrd_to_arb_map_dcc,
array_size(sizeof(*thd2arb_map), ae_cnt));
return 0;
@@ -430,3 +434,58 @@ int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev)
return 0;
}
EXPORT_SYMBOL_GPL(adf_gen4_init_thd2arb_map);
+
+u16 adf_gen4_get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ enum adf_cfg_service_type rps[RP_GROUP_COUNT] = { };
+ unsigned int ae_mask, start_id, worker_obj_cnt, i;
+ u16 ring_to_svc_map;
+ int rp_group;
+
+ if (!hw_data->get_rp_group || !hw_data->uof_get_ae_mask ||
+ !hw_data->uof_get_obj_type || !hw_data->uof_get_num_objs)
+ return 0;
+
+ /* If dcc, all rings handle compression requests */
+ if (adf_get_service_enabled(accel_dev) == SVC_DCC) {
+ for (i = 0; i < RP_GROUP_COUNT; i++)
+ rps[i] = COMP;
+ goto set_mask;
+ }
+
+ worker_obj_cnt = hw_data->uof_get_num_objs(accel_dev) -
+ ADF_GEN4_ADMIN_ACCELENGINES;
+ start_id = worker_obj_cnt - RP_GROUP_COUNT;
+
+ for (i = start_id; i < worker_obj_cnt; i++) {
+ ae_mask = hw_data->uof_get_ae_mask(accel_dev, i);
+ rp_group = hw_data->get_rp_group(accel_dev, ae_mask);
+ if (rp_group >= RP_GROUP_COUNT || rp_group < RP_GROUP_0)
+ return 0;
+
+ switch (hw_data->uof_get_obj_type(accel_dev, i)) {
+ case ADF_FW_SYM_OBJ:
+ rps[rp_group] = SYM;
+ break;
+ case ADF_FW_ASYM_OBJ:
+ rps[rp_group] = ASYM;
+ break;
+ case ADF_FW_DC_OBJ:
+ rps[rp_group] = COMP;
+ break;
+ default:
+ rps[rp_group] = 0;
+ break;
+ }
+ }
+
+set_mask:
+ ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT |
+ rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT |
+ rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT |
+ rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT;
+
+ return ring_to_svc_map;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_ring_to_svc_map);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
index 7d8a774cadc88..c6e80df5a85a3 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
@@ -235,5 +235,6 @@ int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number);
void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev);
void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev);
+u16 adf_gen4_get_ring_to_svc_map(struct adf_accel_dev *accel_dev);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c
index 048c246079390..2dd3772bf58a6 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c
@@ -1007,8 +1007,7 @@ static bool adf_handle_spppar_err(struct adf_accel_dev *accel_dev,
static bool adf_handle_ssmcpppar_err(struct adf_accel_dev *accel_dev,
void __iomem *csr, u32 iastatssm)
{
- u32 reg = ADF_CSR_RD(csr, ADF_GEN4_SSMCPPERR);
- u32 bits_num = BITS_PER_REG(reg);
+ u32 reg, bits_num = BITS_PER_REG(reg);
bool reset_required = false;
unsigned long errs_bits;
u32 bit_iterator;
@@ -1106,8 +1105,7 @@ static bool adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev,
static bool adf_handle_ser_err_ssmsh(struct adf_accel_dev *accel_dev,
void __iomem *csr, u32 iastatssm)
{
- u32 reg = ADF_CSR_RD(csr, ADF_GEN4_SER_ERR_SSMSH);
- u32 bits_num = BITS_PER_REG(reg);
+ u32 reg, bits_num = BITS_PER_REG(reg);
bool reset_required = false;
unsigned long errs_bits;
u32 bit_iterator;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c
index 13f48d2f6da88..b19aa1ef8eeed 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c
@@ -23,12 +23,6 @@
#define ADF_HB_EMPTY_SIG 0xA5A5A5A5
-/* Heartbeat counter pair */
-struct hb_cnt_pair {
- __u16 resp_heartbeat_cnt;
- __u16 req_heartbeat_cnt;
-};
-
static int adf_hb_check_polling_freq(struct adf_accel_dev *accel_dev)
{
u64 curr_time = adf_clock_get_current_time();
@@ -211,6 +205,19 @@ static int adf_hb_get_status(struct adf_accel_dev *accel_dev)
return ret;
}
+static void adf_heartbeat_reset(struct adf_accel_dev *accel_dev)
+{
+ u64 curr_time = adf_clock_get_current_time();
+ u64 time_since_reset = curr_time - accel_dev->heartbeat->last_hb_reset_time;
+
+ if (time_since_reset < ADF_CFG_HB_RESET_MS)
+ return;
+
+ accel_dev->heartbeat->last_hb_reset_time = curr_time;
+ if (adf_notify_fatal_error(accel_dev))
+ dev_err(&GET_DEV(accel_dev), "Failed to notify fatal error\n");
+}
+
void adf_heartbeat_status(struct adf_accel_dev *accel_dev,
enum adf_device_heartbeat_status *hb_status)
{
@@ -235,6 +242,7 @@ void adf_heartbeat_status(struct adf_accel_dev *accel_dev,
"Heartbeat ERROR: QAT is not responding.\n");
*hb_status = HB_DEV_UNRESPONSIVE;
hb->hb_failed_counter++;
+ adf_heartbeat_reset(accel_dev);
return;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h
index b22e3cb29798e..16fdfb48b196a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h
@@ -13,17 +13,26 @@ struct dentry;
#define ADF_CFG_HB_TIMER_DEFAULT_MS 500
#define ADF_CFG_HB_COUNT_THRESHOLD 3
+#define ADF_CFG_HB_RESET_MS 5000
+
enum adf_device_heartbeat_status {
HB_DEV_UNRESPONSIVE = 0,
HB_DEV_ALIVE,
HB_DEV_UNSUPPORTED,
};
+/* Heartbeat counter pair */
+struct hb_cnt_pair {
+ __u16 resp_heartbeat_cnt;
+ __u16 req_heartbeat_cnt;
+};
+
struct adf_heartbeat {
unsigned int hb_sent_counter;
unsigned int hb_failed_counter;
unsigned int hb_timer;
u64 last_hb_check_time;
+ u64 last_hb_reset_time;
bool ctrs_cnt_checked;
struct hb_dma_addr {
dma_addr_t phy_addr;
@@ -35,6 +44,9 @@ struct adf_heartbeat {
struct dentry *cfg;
struct dentry *sent;
struct dentry *failed;
+#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION
+ struct dentry *inject_error;
+#endif
} dbgfs;
};
@@ -51,6 +63,15 @@ void adf_heartbeat_status(struct adf_accel_dev *accel_dev,
enum adf_device_heartbeat_status *hb_status);
void adf_heartbeat_check_ctrs(struct adf_accel_dev *accel_dev);
+#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION
+int adf_heartbeat_inject_error(struct adf_accel_dev *accel_dev);
+#else
+static inline int adf_heartbeat_inject_error(struct adf_accel_dev *accel_dev)
+{
+ return -EPERM;
+}
+#endif
+
#else
static inline int adf_heartbeat_init(struct adf_accel_dev *accel_dev)
{
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c
index 2661af6a2ef69..cccdff24b48d6 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c
@@ -155,6 +155,44 @@ static const struct file_operations adf_hb_cfg_fops = {
.write = adf_hb_cfg_write,
};
+static ssize_t adf_hb_error_inject_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct adf_accel_dev *accel_dev = file->private_data;
+ char buf[3];
+ int ret;
+
+ /* last byte left as string termination */
+ if (*ppos != 0 || count != 2)
+ return -EINVAL;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+ buf[count] = '\0';
+
+ if (buf[0] != '1')
+ return -EINVAL;
+
+ ret = adf_heartbeat_inject_error(accel_dev);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Heartbeat error injection failed with status %d\n",
+ ret);
+ return ret;
+ }
+
+ dev_info(&GET_DEV(accel_dev), "Heartbeat error injection enabled\n");
+
+ return count;
+}
+
+static const struct file_operations adf_hb_error_inject_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = adf_hb_error_inject_write,
+};
+
void adf_heartbeat_dbgfs_add(struct adf_accel_dev *accel_dev)
{
struct adf_heartbeat *hb = accel_dev->heartbeat;
@@ -171,6 +209,17 @@ void adf_heartbeat_dbgfs_add(struct adf_accel_dev *accel_dev)
&hb->hb_failed_counter, &adf_hb_stats_fops);
hb->dbgfs.cfg = debugfs_create_file("config", 0600, hb->dbgfs.base_dir,
accel_dev, &adf_hb_cfg_fops);
+
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION)) {
+ struct dentry *inject_error __maybe_unused;
+
+ inject_error = debugfs_create_file("inject_error", 0200,
+ hb->dbgfs.base_dir, accel_dev,
+ &adf_hb_error_inject_fops);
+#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION
+ hb->dbgfs.inject_error = inject_error;
+#endif
+ }
}
EXPORT_SYMBOL_GPL(adf_heartbeat_dbgfs_add);
@@ -189,6 +238,10 @@ void adf_heartbeat_dbgfs_rm(struct adf_accel_dev *accel_dev)
hb->dbgfs.failed = NULL;
debugfs_remove(hb->dbgfs.cfg);
hb->dbgfs.cfg = NULL;
+#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION
+ debugfs_remove(hb->dbgfs.inject_error);
+ hb->dbgfs.inject_error = NULL;
+#endif
debugfs_remove(hb->dbgfs.base_dir);
hb->dbgfs.base_dir = NULL;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c
new file mode 100644
index 0000000000000..a3b474bdef6c8
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+#include <linux/random.h>
+
+#include "adf_admin.h"
+#include "adf_common_drv.h"
+#include "adf_heartbeat.h"
+
+#define MAX_HB_TICKS 0xFFFFFFFF
+
+static int adf_hb_set_timer_to_max(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+
+ accel_dev->heartbeat->hb_timer = 0;
+
+ if (hw_data->stop_timer)
+ hw_data->stop_timer(accel_dev);
+
+ return adf_send_admin_hb_timer(accel_dev, MAX_HB_TICKS);
+}
+
+static void adf_set_hb_counters_fail(struct adf_accel_dev *accel_dev, u32 ae,
+ u32 thr)
+{
+ struct hb_cnt_pair *stats = accel_dev->heartbeat->dma.virt_addr;
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ const size_t max_aes = hw_device->get_num_aes(hw_device);
+ const size_t hb_ctrs = hw_device->num_hb_ctrs;
+ size_t thr_id = ae * hb_ctrs + thr;
+ u16 num_rsp = stats[thr_id].resp_heartbeat_cnt;
+
+ /*
+ * Inject live.req != live.rsp and live.rsp == last.rsp
+ * to trigger the heartbeat error detection
+ */
+ stats[thr_id].req_heartbeat_cnt++;
+ stats += (max_aes * hb_ctrs);
+ stats[thr_id].resp_heartbeat_cnt = num_rsp;
+}
+
+int adf_heartbeat_inject_error(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ const size_t max_aes = hw_device->get_num_aes(hw_device);
+ const size_t hb_ctrs = hw_device->num_hb_ctrs;
+ u32 rand, rand_ae, rand_thr;
+ unsigned long ae_mask;
+ int ret;
+
+ ae_mask = hw_device->ae_mask;
+
+ do {
+ /* Ensure we have a valid ae */
+ get_random_bytes(&rand, sizeof(rand));
+ rand_ae = rand % max_aes;
+ } while (!test_bit(rand_ae, &ae_mask));
+
+ get_random_bytes(&rand, sizeof(rand));
+ rand_thr = rand % hb_ctrs;
+
+ /* Increase the heartbeat timer to prevent FW updating HB counters */
+ ret = adf_hb_set_timer_to_max(accel_dev);
+ if (ret)
+ return ret;
+
+ /* Configure worker threads to stop processing any packet */
+ ret = adf_disable_arb_thd(accel_dev, rand_ae, rand_thr);
+ if (ret)
+ return ret;
+
+ /* Change HB counters memory to simulate a hang */
+ adf_set_hb_counters_fail(accel_dev, rand_ae, rand_thr);
+
+ return 0;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c
index da69566992467..65bd26b25abce 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c
@@ -103,3 +103,28 @@ void adf_exit_arb(struct adf_accel_dev *accel_dev)
csr_ops->write_csr_ring_srv_arb_en(csr, i, 0);
}
EXPORT_SYMBOL_GPL(adf_exit_arb);
+
+int adf_disable_arb_thd(struct adf_accel_dev *accel_dev, u32 ae, u32 thr)
+{
+ void __iomem *csr = accel_dev->transport->banks[0].csr_addr;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ const u32 *thd_2_arb_cfg;
+ struct arb_info info;
+ u32 ae_thr_map;
+
+ if (ADF_AE_STRAND0_THREAD == thr || ADF_AE_STRAND1_THREAD == thr)
+ thr = ADF_AE_ADMIN_THREAD;
+
+ hw_data->get_arb_info(&info);
+ thd_2_arb_cfg = hw_data->get_arb_mapping(accel_dev);
+ if (!thd_2_arb_cfg)
+ return -EFAULT;
+
+ /* Disable scheduling for this particular AE and thread */
+ ae_thr_map = *(thd_2_arb_cfg + ae);
+ ae_thr_map &= ~(GENMASK(3, 0) << (thr * BIT(2)));
+
+ WRITE_CSR_ARB_WT2SAM(csr, info.arb_offset, info.wt2sam_offset, ae,
+ ae_thr_map);
+ return 0;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c
index f43ae9111553f..74f0818c07034 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_init.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_init.c
@@ -433,6 +433,18 @@ int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
return 0;
}
+void adf_error_notifier(struct adf_accel_dev *accel_dev)
+{
+ struct service_hndl *service;
+
+ list_for_each_entry(service, &service_table, list) {
+ if (service->event_hld(accel_dev, ADF_EVENT_FATAL_ERROR))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send error event to %s.\n",
+ service->name);
+ }
+}
+
static int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev)
{
char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c
index 3557a0d6dea28..cae1aee5479af 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c
@@ -139,8 +139,13 @@ static bool adf_handle_ras_int(struct adf_accel_dev *accel_dev)
if (ras_ops->handle_interrupt &&
ras_ops->handle_interrupt(accel_dev, &reset_required)) {
- if (reset_required)
+ if (reset_required) {
dev_err(&GET_DEV(accel_dev), "Fatal error, reset required\n");
+ if (adf_notify_fatal_error(accel_dev))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to notify fatal error\n");
+ }
+
return true;
}
@@ -272,7 +277,7 @@ static int adf_isr_alloc_msix_vectors_data(struct adf_accel_dev *accel_dev)
if (!accel_dev->pf.vf_info)
msix_num_entries += hw_data->num_banks;
- irqs = kzalloc_node(msix_num_entries * sizeof(*irqs),
+ irqs = kcalloc_node(msix_num_entries, sizeof(*irqs),
GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
if (!irqs)
return -ENOMEM;
@@ -375,8 +380,6 @@ EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
/**
* adf_init_misc_wq() - Init misc workqueue
*
- * Function init workqueue 'qat_misc_wq' for general purpose.
- *
* Return: 0 on success, error code otherwise.
*/
int __init adf_init_misc_wq(void)
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h
index 204a424389926..d1b3ef9cadacc 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h
@@ -99,6 +99,8 @@ enum pf2vf_msgtype {
ADF_PF2VF_MSGTYPE_RESTARTING = 0x01,
ADF_PF2VF_MSGTYPE_VERSION_RESP = 0x02,
ADF_PF2VF_MSGTYPE_BLKMSG_RESP = 0x03,
+ ADF_PF2VF_MSGTYPE_FATAL_ERROR = 0x04,
+ ADF_PF2VF_MSGTYPE_RESTARTED = 0x05,
/* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */
ADF_PF2VF_MSGTYPE_RP_RESET_RESP = 0x10,
};
@@ -112,6 +114,7 @@ enum vf2pf_msgtype {
ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ = 0x07,
ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ = 0x08,
ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ = 0x09,
+ ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE = 0x0a,
/* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */
ADF_VF2PF_MSGTYPE_RP_RESET = 0x10,
};
@@ -124,8 +127,10 @@ enum pfvf_compatibility_version {
ADF_PFVF_COMPAT_FAST_ACK = 0x03,
/* Ring to service mapping support for non-standard mappings */
ADF_PFVF_COMPAT_RING_TO_SVC_MAP = 0x04,
+ /* Fallback compat */
+ ADF_PFVF_COMPAT_FALLBACK = 0x05,
/* Reference to the latest version */
- ADF_PFVF_COMPAT_THIS_VERSION = 0x04,
+ ADF_PFVF_COMPAT_THIS_VERSION = 0x05,
};
/* PF->VF Version Response */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c
index 14c069f0d71a5..0e31f4b41844e 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c
@@ -1,21 +1,83 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <linux/delay.h>
#include <linux/pci.h>
#include "adf_accel_devices.h"
#include "adf_pfvf_msg.h"
#include "adf_pfvf_pf_msg.h"
#include "adf_pfvf_pf_proto.h"
+#define ADF_PF_WAIT_RESTARTING_COMPLETE_DELAY 100
+#define ADF_VF_SHUTDOWN_RETRY 100
+
void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
{
struct adf_accel_vf_info *vf;
struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_RESTARTING };
int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
+ dev_dbg(&GET_DEV(accel_dev), "pf2vf notify restarting\n");
for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
- if (vf->init && adf_send_pf2vf_msg(accel_dev, i, msg))
+ vf->restarting = false;
+ if (!vf->init)
+ continue;
+ if (adf_send_pf2vf_msg(accel_dev, i, msg))
dev_err(&GET_DEV(accel_dev),
"Failed to send restarting msg to VF%d\n", i);
+ else if (vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK)
+ vf->restarting = true;
+ }
+}
+
+void adf_pf2vf_wait_for_restarting_complete(struct adf_accel_dev *accel_dev)
+{
+ int num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
+ int i, retries = ADF_VF_SHUTDOWN_RETRY;
+ struct adf_accel_vf_info *vf;
+ bool vf_running;
+
+ dev_dbg(&GET_DEV(accel_dev), "pf2vf wait for restarting complete\n");
+ do {
+ vf_running = false;
+ for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++)
+ if (vf->restarting)
+ vf_running = true;
+ if (!vf_running)
+ break;
+ msleep(ADF_PF_WAIT_RESTARTING_COMPLETE_DELAY);
+ } while (--retries);
+
+ if (vf_running)
+ dev_warn(&GET_DEV(accel_dev), "Some VFs are still running\n");
+}
+
+void adf_pf2vf_notify_restarted(struct adf_accel_dev *accel_dev)
+{
+ struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_RESTARTED };
+ int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
+ struct adf_accel_vf_info *vf;
+
+ dev_dbg(&GET_DEV(accel_dev), "pf2vf notify restarted\n");
+ for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
+ if (vf->init && vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK &&
+ adf_send_pf2vf_msg(accel_dev, i, msg))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send restarted msg to VF%d\n", i);
+ }
+}
+
+void adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev)
+{
+ struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_FATAL_ERROR };
+ int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
+ struct adf_accel_vf_info *vf;
+
+ dev_dbg(&GET_DEV(accel_dev), "pf2vf notify fatal error\n");
+ for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
+ if (vf->init && vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK &&
+ adf_send_pf2vf_msg(accel_dev, i, msg))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send fatal error msg to VF%d\n", i);
}
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h
index e8982d1ac8962..f203d88c919c2 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h
@@ -5,7 +5,28 @@
#include "adf_accel_devices.h"
+#if defined(CONFIG_PCI_IOV)
void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
+void adf_pf2vf_wait_for_restarting_complete(struct adf_accel_dev *accel_dev);
+void adf_pf2vf_notify_restarted(struct adf_accel_dev *accel_dev);
+void adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev);
+#else
+static inline void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline void adf_pf2vf_wait_for_restarting_complete(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline void adf_pf2vf_notify_restarted(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline void adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev)
+{
+}
+#endif
typedef int (*adf_pf2vf_blkmsg_provider)(struct adf_accel_dev *accel_dev,
u8 *buffer, u8 compat);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c
index 388e58bcbcaf2..9ab93fbfefde9 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c
@@ -291,6 +291,14 @@ static int adf_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr,
vf_info->init = false;
}
break;
+ case ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE:
+ {
+ dev_dbg(&GET_DEV(accel_dev),
+ "Restarting Complete received from VF%d\n", vf_nr);
+ vf_info->restarting = false;
+ vf_info->init = false;
+ }
+ break;
case ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ:
case ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ:
case ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ:
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c
index 1015155b63749..dc284a089c889 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c
@@ -308,6 +308,12 @@ static bool adf_handle_pf2vf_msg(struct adf_accel_dev *accel_dev,
adf_pf2vf_handle_pf_restarting(accel_dev);
return false;
+ case ADF_PF2VF_MSGTYPE_RESTARTED:
+ dev_dbg(&GET_DEV(accel_dev), "Restarted message received from PF\n");
+ return true;
+ case ADF_PF2VF_MSGTYPE_FATAL_ERROR:
+ dev_err(&GET_DEV(accel_dev), "Fatal error received from PF\n");
+ return true;
case ADF_PF2VF_MSGTYPE_VERSION_RESP:
case ADF_PF2VF_MSGTYPE_BLKMSG_RESP:
case ADF_PF2VF_MSGTYPE_RP_RESET_RESP:
diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c
index de1b214dba1f9..d4f2db3c53d8c 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_rl.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c
@@ -788,6 +788,24 @@ static void clear_sla(struct adf_rl *rl_data, struct rl_sla *sla)
sla_type_arr[node_id] = NULL;
}
+static void free_all_sla(struct adf_accel_dev *accel_dev)
+{
+ struct adf_rl *rl_data = accel_dev->rate_limiting;
+ int sla_id;
+
+ mutex_lock(&rl_data->rl_lock);
+
+ for (sla_id = 0; sla_id < RL_NODES_CNT_MAX; sla_id++) {
+ if (!rl_data->sla[sla_id])
+ continue;
+
+ kfree(rl_data->sla[sla_id]);
+ rl_data->sla[sla_id] = NULL;
+ }
+
+ mutex_unlock(&rl_data->rl_lock);
+}
+
/**
* add_update_sla() - handles the creation and the update of an SLA
* @accel_dev: pointer to acceleration device structure
@@ -1155,7 +1173,7 @@ void adf_rl_stop(struct adf_accel_dev *accel_dev)
return;
adf_sysfs_rl_rm(accel_dev);
- adf_rl_remove_sla_all(accel_dev, true);
+ free_all_sla(accel_dev);
}
void adf_rl_exit(struct adf_accel_dev *accel_dev)
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
index f44025bb6f995..87a70c00c41ee 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
@@ -60,7 +60,6 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
/* This ptr will be populated when VFs will be created */
vf_info->accel_dev = accel_dev;
vf_info->vf_nr = i;
- vf_info->vf_compat_ver = 0;
mutex_init(&vf_info->pf2vf_lock);
ratelimit_state_init(&vf_info->vf2pf_ratelimit,
@@ -84,6 +83,32 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
return pci_enable_sriov(pdev, totalvfs);
}
+void adf_reenable_sriov(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+ char cfg[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+ unsigned long val = 0;
+
+ if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+ ADF_SRIOV_ENABLED, cfg))
+ return;
+
+ if (!accel_dev->pf.vf_info)
+ return;
+
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
+ &val, ADF_DEC))
+ return;
+
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+ &val, ADF_DEC))
+ return;
+
+ set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+ dev_dbg(&pdev->dev, "Re-enabling SRIOV\n");
+ adf_enable_sriov(accel_dev);
+}
+
/**
* adf_disable_sriov() - Disable SRIOV for the device
* @accel_dev: Pointer to accel device.
@@ -103,6 +128,7 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
return;
adf_pf2vf_notify_restarting(accel_dev);
+ adf_pf2vf_wait_for_restarting_complete(accel_dev);
pci_disable_sriov(accel_to_pci_dev(accel_dev));
/* Disable VF to PF interrupts */
@@ -115,8 +141,10 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++)
mutex_destroy(&vf->pf2vf_lock);
- kfree(accel_dev->pf.vf_info);
- accel_dev->pf.vf_info = NULL;
+ if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) {
+ kfree(accel_dev->pf.vf_info);
+ accel_dev->pf.vf_info = NULL;
+ }
}
EXPORT_SYMBOL_GPL(adf_disable_sriov);
@@ -194,6 +222,10 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
if (ret)
return ret;
+ val = 1;
+ adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, ADF_SRIOV_ENABLED,
+ &val, ADF_DEC);
+
return numvfs;
}
EXPORT_SYMBOL_GPL(adf_sriov_configure);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
index d450dad32c9e4..4e7f70d4049d3 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
@@ -204,6 +204,42 @@ static ssize_t pm_idle_enabled_store(struct device *dev, struct device_attribute
}
static DEVICE_ATTR_RW(pm_idle_enabled);
+static ssize_t auto_reset_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ char *auto_reset;
+ struct adf_accel_dev *accel_dev;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ auto_reset = accel_dev->autoreset_on_error ? "on" : "off";
+
+ return sysfs_emit(buf, "%s\n", auto_reset);
+}
+
+static ssize_t auto_reset_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct adf_accel_dev *accel_dev;
+ bool enabled = false;
+ int ret;
+
+ ret = kstrtobool(buf, &enabled);
+ if (ret)
+ return ret;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ accel_dev->autoreset_on_error = enabled;
+
+ return count;
+}
+static DEVICE_ATTR_RW(auto_reset);
+
static DEVICE_ATTR_RW(state);
static DEVICE_ATTR_RW(cfg_services);
@@ -291,6 +327,7 @@ static struct attribute *qat_attrs[] = {
&dev_attr_pm_idle_enabled.attr,
&dev_attr_rp2srv.attr,
&dev_attr_num_rps.attr,
+ &dev_attr_auto_reset.attr,
NULL,
};
diff --git a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
index b05c3957a1601..cdbb2d687b1b0 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
@@ -293,8 +293,6 @@ EXPORT_SYMBOL_GPL(adf_flush_vf_wq);
/**
* adf_init_vf_wq() - Init workqueue for VF
*
- * Function init workqueue 'adf_vf_stop_wq' for VF.
- *
* Return: 0 on success, error code otherwise.
*/
int __init adf_init_vf_wq(void)
diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
index bf8c0ee629175..2ba4aa22e0927 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
@@ -13,15 +13,6 @@
#include "qat_compression.h"
#include "qat_algs_send.h"
-#define QAT_RFC_1950_HDR_SIZE 2
-#define QAT_RFC_1950_FOOTER_SIZE 4
-#define QAT_RFC_1950_CM_DEFLATE 8
-#define QAT_RFC_1950_CM_DEFLATE_CINFO_32K 7
-#define QAT_RFC_1950_CM_MASK 0x0f
-#define QAT_RFC_1950_CM_OFFSET 4
-#define QAT_RFC_1950_DICT_MASK 0x20
-#define QAT_RFC_1950_COMP_HDR 0x785e
-
static DEFINE_MUTEX(algs_lock);
static unsigned int active_devs;
diff --git a/drivers/crypto/intel/qat/qat_common/qat_crypto.c b/drivers/crypto/intel/qat/qat_common/qat_crypto.c
index 40c8e74d1cf9e..101c6ea416738 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_crypto.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_crypto.c
@@ -105,8 +105,8 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
}
/**
- * qat_crypto_vf_dev_config()
- * create dev config required to create crypto inst.
+ * qat_crypto_vf_dev_config() - create dev config required to create
+ * crypto inst.
*
* @accel_dev: Pointer to acceleration device.
*
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 7a3083debc2bb..59d472cb11e75 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -41,7 +41,7 @@
static const char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
-MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
MODULE_DESCRIPTION("Niagara2 Crypto driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
index 70edf40bc523c..f74b3c81ba6df 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.c
+++ b/drivers/crypto/rockchip/rk3288_crypto.c
@@ -371,6 +371,11 @@ static int rk_crypto_probe(struct platform_device *pdev)
}
crypto_info->engine = crypto_engine_alloc_init(&pdev->dev, true);
+ if (!crypto_info->engine) {
+ err = -ENOMEM;
+ goto err_crypto;
+ }
+
crypto_engine_start(crypto_info->engine);
init_completion(&crypto_info->complete);
diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
index de53eddf6796b..cb92b7fa99c6f 100644
--- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
@@ -225,11 +225,11 @@ static int __virtio_crypto_akcipher_do_req(struct virtio_crypto_akcipher_request
struct virtio_crypto *vcrypto = ctx->vcrypto;
struct virtio_crypto_op_data_req *req_data = vc_req->req_data;
struct scatterlist *sgs[4], outhdr_sg, inhdr_sg, srcdata_sg, dstdata_sg;
- void *src_buf = NULL, *dst_buf = NULL;
+ void *src_buf, *dst_buf = NULL;
unsigned int num_out = 0, num_in = 0;
int node = dev_to_node(&vcrypto->vdev->dev);
unsigned long flags;
- int ret = -ENOMEM;
+ int ret;
bool verify = vc_akcipher_req->opcode == VIRTIO_CRYPTO_AKCIPHER_VERIFY;
unsigned int src_len = verify ? req->src_len + req->dst_len : req->src_len;
@@ -240,7 +240,7 @@ static int __virtio_crypto_akcipher_do_req(struct virtio_crypto_akcipher_request
/* src data */
src_buf = kcalloc_node(src_len, 1, GFP_KERNEL, node);
if (!src_buf)
- goto err;
+ return -ENOMEM;
if (verify) {
/* for verify operation, both src and dst data work as OUT direction */
@@ -255,7 +255,7 @@ static int __virtio_crypto_akcipher_do_req(struct virtio_crypto_akcipher_request
/* dst data */
dst_buf = kcalloc_node(req->dst_len, 1, GFP_KERNEL, node);
if (!dst_buf)
- goto err;
+ goto free_src;
sg_init_one(&dstdata_sg, dst_buf, req->dst_len);
sgs[num_out + num_in++] = &dstdata_sg;
@@ -278,9 +278,9 @@ static int __virtio_crypto_akcipher_do_req(struct virtio_crypto_akcipher_request
return 0;
err:
- kfree(src_buf);
kfree(dst_buf);
-
+free_src:
+ kfree(src_buf);
return -ENOMEM;
}
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index b909c6a2bf1c3..6a67d70e7f1c0 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -42,8 +42,6 @@ static void virtcrypto_ctrlq_callback(struct virtqueue *vq)
virtio_crypto_ctrlq_callback(vc_ctrl_req);
spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
}
- if (unlikely(virtqueue_is_broken(vq)))
- break;
} while (!virtqueue_enable_cb(vq));
spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
}
diff --git a/drivers/crypto/vmx/.gitignore b/drivers/crypto/vmx/.gitignore
deleted file mode 100644
index 7aa71d83f739b..0000000000000
--- a/drivers/crypto/vmx/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-aesp8-ppc.S
-ghashp8-ppc.S
diff --git a/drivers/crypto/vmx/Kconfig b/drivers/crypto/vmx/Kconfig
deleted file mode 100644
index b2c28b87f14b3..0000000000000
--- a/drivers/crypto/vmx/Kconfig
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config CRYPTO_DEV_VMX_ENCRYPT
- tristate "Encryption acceleration support on P8 CPU"
- depends on CRYPTO_DEV_VMX
- select CRYPTO_AES
- select CRYPTO_CBC
- select CRYPTO_CTR
- select CRYPTO_GHASH
- select CRYPTO_XTS
- default m
- help
- Support for VMX cryptographic acceleration instructions on Power8 CPU.
- This module supports acceleration for AES and GHASH in hardware. If you
- choose 'M' here, this module will be called vmx-crypto.
diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile
deleted file mode 100644
index 7257b8c446263..0000000000000
--- a/drivers/crypto/vmx/Makefile
+++ /dev/null
@@ -1,23 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o
-vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o
-
-ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
-override flavour := linux-ppc64le
-else
-ifdef CONFIG_PPC64_ELF_ABI_V2
-override flavour := linux-ppc64-elfv2
-else
-override flavour := linux-ppc64
-endif
-endif
-
-quiet_cmd_perl = PERL $@
- cmd_perl = $(PERL) $< $(flavour) > $@
-
-targets += aesp8-ppc.S ghashp8-ppc.S
-
-$(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE
- $(call if_changed,perl)
-
-OBJECT_FILES_NON_STANDARD_aesp8-ppc.o := y
diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
deleted file mode 100644
index ec06189fbf996..0000000000000
--- a/drivers/crypto/vmx/aes.c
+++ /dev/null
@@ -1,134 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * AES routines supporting VMX instructions on the Power 8
- *
- * Copyright (C) 2015 International Business Machines Inc.
- *
- * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
- */
-
-#include <linux/types.h>
-#include <linux/err.h>
-#include <linux/crypto.h>
-#include <linux/delay.h>
-#include <asm/simd.h>
-#include <asm/switch_to.h>
-#include <crypto/aes.h>
-#include <crypto/internal/cipher.h>
-#include <crypto/internal/simd.h>
-
-#include "aesp8-ppc.h"
-
-struct p8_aes_ctx {
- struct crypto_cipher *fallback;
- struct aes_key enc_key;
- struct aes_key dec_key;
-};
-
-static int p8_aes_init(struct crypto_tfm *tfm)
-{
- const char *alg = crypto_tfm_alg_name(tfm);
- struct crypto_cipher *fallback;
- struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
-
- fallback = crypto_alloc_cipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(fallback)) {
- printk(KERN_ERR
- "Failed to allocate transformation for '%s': %ld\n",
- alg, PTR_ERR(fallback));
- return PTR_ERR(fallback);
- }
-
- crypto_cipher_set_flags(fallback,
- crypto_cipher_get_flags((struct
- crypto_cipher *)
- tfm));
- ctx->fallback = fallback;
-
- return 0;
-}
-
-static void p8_aes_exit(struct crypto_tfm *tfm)
-{
- struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
-
- if (ctx->fallback) {
- crypto_free_cipher(ctx->fallback);
- ctx->fallback = NULL;
- }
-}
-
-static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- int ret;
- struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
-
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
- ret |= aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
-
- ret |= crypto_cipher_setkey(ctx->fallback, key, keylen);
-
- return ret ? -EINVAL : 0;
-}
-
-static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
-{
- struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
-
- if (!crypto_simd_usable()) {
- crypto_cipher_encrypt_one(ctx->fallback, dst, src);
- } else {
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- aes_p8_encrypt(src, dst, &ctx->enc_key);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
- }
-}
-
-static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
-{
- struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
-
- if (!crypto_simd_usable()) {
- crypto_cipher_decrypt_one(ctx->fallback, dst, src);
- } else {
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- aes_p8_decrypt(src, dst, &ctx->dec_key);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
- }
-}
-
-struct crypto_alg p8_aes_alg = {
- .cra_name = "aes",
- .cra_driver_name = "p8_aes",
- .cra_module = THIS_MODULE,
- .cra_priority = 1000,
- .cra_type = NULL,
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_NEED_FALLBACK,
- .cra_alignmask = 0,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct p8_aes_ctx),
- .cra_init = p8_aes_init,
- .cra_exit = p8_aes_exit,
- .cra_cipher = {
- .cia_min_keysize = AES_MIN_KEY_SIZE,
- .cia_max_keysize = AES_MAX_KEY_SIZE,
- .cia_setkey = p8_aes_setkey,
- .cia_encrypt = p8_aes_encrypt,
- .cia_decrypt = p8_aes_decrypt,
- },
-};
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
deleted file mode 100644
index ed0debc7acb5f..0000000000000
--- a/drivers/crypto/vmx/aes_cbc.c
+++ /dev/null
@@ -1,133 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * AES CBC routines supporting VMX instructions on the Power 8
- *
- * Copyright (C) 2015 International Business Machines Inc.
- *
- * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
- */
-
-#include <asm/simd.h>
-#include <asm/switch_to.h>
-#include <crypto/aes.h>
-#include <crypto/internal/simd.h>
-#include <crypto/internal/skcipher.h>
-
-#include "aesp8-ppc.h"
-
-struct p8_aes_cbc_ctx {
- struct crypto_skcipher *fallback;
- struct aes_key enc_key;
- struct aes_key dec_key;
-};
-
-static int p8_aes_cbc_init(struct crypto_skcipher *tfm)
-{
- struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_skcipher *fallback;
-
- fallback = crypto_alloc_skcipher("cbc(aes)", 0,
- CRYPTO_ALG_NEED_FALLBACK |
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(fallback)) {
- pr_err("Failed to allocate cbc(aes) fallback: %ld\n",
- PTR_ERR(fallback));
- return PTR_ERR(fallback);
- }
-
- crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
- crypto_skcipher_reqsize(fallback));
- ctx->fallback = fallback;
- return 0;
-}
-
-static void p8_aes_cbc_exit(struct crypto_skcipher *tfm)
-{
- struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- crypto_free_skcipher(ctx->fallback);
-}
-
-static int p8_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
- int ret;
-
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
- ret |= aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
-
- ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
-
- return ret ? -EINVAL : 0;
-}
-
-static int p8_aes_cbc_crypt(struct skcipher_request *req, int enc)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- const struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- unsigned int nbytes;
- int ret;
-
- if (!crypto_simd_usable()) {
- struct skcipher_request *subreq = skcipher_request_ctx(req);
-
- *subreq = *req;
- skcipher_request_set_tfm(subreq, ctx->fallback);
- return enc ? crypto_skcipher_encrypt(subreq) :
- crypto_skcipher_decrypt(subreq);
- }
-
- ret = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk.nbytes) != 0) {
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- aes_p8_cbc_encrypt(walk.src.virt.addr,
- walk.dst.virt.addr,
- round_down(nbytes, AES_BLOCK_SIZE),
- enc ? &ctx->enc_key : &ctx->dec_key,
- walk.iv, enc);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
-
- ret = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
- }
- return ret;
-}
-
-static int p8_aes_cbc_encrypt(struct skcipher_request *req)
-{
- return p8_aes_cbc_crypt(req, 1);
-}
-
-static int p8_aes_cbc_decrypt(struct skcipher_request *req)
-{
- return p8_aes_cbc_crypt(req, 0);
-}
-
-struct skcipher_alg p8_aes_cbc_alg = {
- .base.cra_name = "cbc(aes)",
- .base.cra_driver_name = "p8_aes_cbc",
- .base.cra_module = THIS_MODULE,
- .base.cra_priority = 2000,
- .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct p8_aes_cbc_ctx),
- .setkey = p8_aes_cbc_setkey,
- .encrypt = p8_aes_cbc_encrypt,
- .decrypt = p8_aes_cbc_decrypt,
- .init = p8_aes_cbc_init,
- .exit = p8_aes_cbc_exit,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
-};
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
deleted file mode 100644
index 9a3da8cd62f35..0000000000000
--- a/drivers/crypto/vmx/aes_ctr.c
+++ /dev/null
@@ -1,149 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * AES CTR routines supporting VMX instructions on the Power 8
- *
- * Copyright (C) 2015 International Business Machines Inc.
- *
- * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
- */
-
-#include <asm/simd.h>
-#include <asm/switch_to.h>
-#include <crypto/aes.h>
-#include <crypto/internal/simd.h>
-#include <crypto/internal/skcipher.h>
-
-#include "aesp8-ppc.h"
-
-struct p8_aes_ctr_ctx {
- struct crypto_skcipher *fallback;
- struct aes_key enc_key;
-};
-
-static int p8_aes_ctr_init(struct crypto_skcipher *tfm)
-{
- struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_skcipher *fallback;
-
- fallback = crypto_alloc_skcipher("ctr(aes)", 0,
- CRYPTO_ALG_NEED_FALLBACK |
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(fallback)) {
- pr_err("Failed to allocate ctr(aes) fallback: %ld\n",
- PTR_ERR(fallback));
- return PTR_ERR(fallback);
- }
-
- crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
- crypto_skcipher_reqsize(fallback));
- ctx->fallback = fallback;
- return 0;
-}
-
-static void p8_aes_ctr_exit(struct crypto_skcipher *tfm)
-{
- struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- crypto_free_skcipher(ctx->fallback);
-}
-
-static int p8_aes_ctr_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
- int ret;
-
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
-
- ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
-
- return ret ? -EINVAL : 0;
-}
-
-static void p8_aes_ctr_final(const struct p8_aes_ctr_ctx *ctx,
- struct skcipher_walk *walk)
-{
- u8 *ctrblk = walk->iv;
- u8 keystream[AES_BLOCK_SIZE];
- u8 *src = walk->src.virt.addr;
- u8 *dst = walk->dst.virt.addr;
- unsigned int nbytes = walk->nbytes;
-
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
-
- crypto_xor_cpy(dst, keystream, src, nbytes);
- crypto_inc(ctrblk, AES_BLOCK_SIZE);
-}
-
-static int p8_aes_ctr_crypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- const struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- unsigned int nbytes;
- int ret;
-
- if (!crypto_simd_usable()) {
- struct skcipher_request *subreq = skcipher_request_ctx(req);
-
- *subreq = *req;
- skcipher_request_set_tfm(subreq, ctx->fallback);
- return crypto_skcipher_encrypt(subreq);
- }
-
- ret = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
- walk.dst.virt.addr,
- nbytes / AES_BLOCK_SIZE,
- &ctx->enc_key, walk.iv);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
-
- do {
- crypto_inc(walk.iv, AES_BLOCK_SIZE);
- } while ((nbytes -= AES_BLOCK_SIZE) >= AES_BLOCK_SIZE);
-
- ret = skcipher_walk_done(&walk, nbytes);
- }
- if (nbytes) {
- p8_aes_ctr_final(ctx, &walk);
- ret = skcipher_walk_done(&walk, 0);
- }
- return ret;
-}
-
-struct skcipher_alg p8_aes_ctr_alg = {
- .base.cra_name = "ctr(aes)",
- .base.cra_driver_name = "p8_aes_ctr",
- .base.cra_module = THIS_MODULE,
- .base.cra_priority = 2000,
- .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct p8_aes_ctr_ctx),
- .setkey = p8_aes_ctr_setkey,
- .encrypt = p8_aes_ctr_crypt,
- .decrypt = p8_aes_ctr_crypt,
- .init = p8_aes_ctr_init,
- .exit = p8_aes_ctr_exit,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .chunksize = AES_BLOCK_SIZE,
-};
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
deleted file mode 100644
index dabbccb415502..0000000000000
--- a/drivers/crypto/vmx/aes_xts.c
+++ /dev/null
@@ -1,162 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * AES XTS routines supporting VMX In-core instructions on Power 8
- *
- * Copyright (C) 2015 International Business Machines Inc.
- *
- * Author: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com>
- */
-
-#include <asm/simd.h>
-#include <asm/switch_to.h>
-#include <crypto/aes.h>
-#include <crypto/internal/simd.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/xts.h>
-
-#include "aesp8-ppc.h"
-
-struct p8_aes_xts_ctx {
- struct crypto_skcipher *fallback;
- struct aes_key enc_key;
- struct aes_key dec_key;
- struct aes_key tweak_key;
-};
-
-static int p8_aes_xts_init(struct crypto_skcipher *tfm)
-{
- struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_skcipher *fallback;
-
- fallback = crypto_alloc_skcipher("xts(aes)", 0,
- CRYPTO_ALG_NEED_FALLBACK |
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(fallback)) {
- pr_err("Failed to allocate xts(aes) fallback: %ld\n",
- PTR_ERR(fallback));
- return PTR_ERR(fallback);
- }
-
- crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
- crypto_skcipher_reqsize(fallback));
- ctx->fallback = fallback;
- return 0;
-}
-
-static void p8_aes_xts_exit(struct crypto_skcipher *tfm)
-{
- struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- crypto_free_skcipher(ctx->fallback);
-}
-
-static int p8_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- int ret;
-
- ret = xts_verify_key(tfm, key, keylen);
- if (ret)
- return ret;
-
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- ret = aes_p8_set_encrypt_key(key + keylen/2, (keylen/2) * 8, &ctx->tweak_key);
- ret |= aes_p8_set_encrypt_key(key, (keylen/2) * 8, &ctx->enc_key);
- ret |= aes_p8_set_decrypt_key(key, (keylen/2) * 8, &ctx->dec_key);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
-
- ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
-
- return ret ? -EINVAL : 0;
-}
-
-static int p8_aes_xts_crypt(struct skcipher_request *req, int enc)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- const struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- unsigned int nbytes;
- u8 tweak[AES_BLOCK_SIZE];
- int ret;
-
- if (req->cryptlen < AES_BLOCK_SIZE)
- return -EINVAL;
-
- if (!crypto_simd_usable() || (req->cryptlen % XTS_BLOCK_SIZE) != 0) {
- struct skcipher_request *subreq = skcipher_request_ctx(req);
-
- *subreq = *req;
- skcipher_request_set_tfm(subreq, ctx->fallback);
- return enc ? crypto_skcipher_encrypt(subreq) :
- crypto_skcipher_decrypt(subreq);
- }
-
- ret = skcipher_walk_virt(&walk, req, false);
- if (ret)
- return ret;
-
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
-
- aes_p8_encrypt(walk.iv, tweak, &ctx->tweak_key);
-
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
-
- while ((nbytes = walk.nbytes) != 0) {
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- if (enc)
- aes_p8_xts_encrypt(walk.src.virt.addr,
- walk.dst.virt.addr,
- round_down(nbytes, AES_BLOCK_SIZE),
- &ctx->enc_key, NULL, tweak);
- else
- aes_p8_xts_decrypt(walk.src.virt.addr,
- walk.dst.virt.addr,
- round_down(nbytes, AES_BLOCK_SIZE),
- &ctx->dec_key, NULL, tweak);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
-
- ret = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
- }
- return ret;
-}
-
-static int p8_aes_xts_encrypt(struct skcipher_request *req)
-{
- return p8_aes_xts_crypt(req, 1);
-}
-
-static int p8_aes_xts_decrypt(struct skcipher_request *req)
-{
- return p8_aes_xts_crypt(req, 0);
-}
-
-struct skcipher_alg p8_aes_xts_alg = {
- .base.cra_name = "xts(aes)",
- .base.cra_driver_name = "p8_aes_xts",
- .base.cra_module = THIS_MODULE,
- .base.cra_priority = 2000,
- .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct p8_aes_xts_ctx),
- .setkey = p8_aes_xts_setkey,
- .encrypt = p8_aes_xts_encrypt,
- .decrypt = p8_aes_xts_decrypt,
- .init = p8_aes_xts_init,
- .exit = p8_aes_xts_exit,
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
-};
diff --git a/drivers/crypto/vmx/aesp8-ppc.h b/drivers/crypto/vmx/aesp8-ppc.h
deleted file mode 100644
index 5764d44383885..0000000000000
--- a/drivers/crypto/vmx/aesp8-ppc.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <linux/types.h>
-#include <crypto/aes.h>
-
-struct aes_key {
- u8 key[AES_MAX_KEYLENGTH];
- int rounds;
-};
-
-extern struct shash_alg p8_ghash_alg;
-extern struct crypto_alg p8_aes_alg;
-extern struct skcipher_alg p8_aes_cbc_alg;
-extern struct skcipher_alg p8_aes_ctr_alg;
-extern struct skcipher_alg p8_aes_xts_alg;
-
-int aes_p8_set_encrypt_key(const u8 *userKey, const int bits,
- struct aes_key *key);
-int aes_p8_set_decrypt_key(const u8 *userKey, const int bits,
- struct aes_key *key);
-void aes_p8_encrypt(const u8 *in, u8 *out, const struct aes_key *key);
-void aes_p8_decrypt(const u8 *in, u8 *out, const struct aes_key *key);
-void aes_p8_cbc_encrypt(const u8 *in, u8 *out, size_t len,
- const struct aes_key *key, u8 *iv, const int enc);
-void aes_p8_ctr32_encrypt_blocks(const u8 *in, u8 *out,
- size_t len, const struct aes_key *key,
- const u8 *iv);
-void aes_p8_xts_encrypt(const u8 *in, u8 *out, size_t len,
- const struct aes_key *key1, const struct aes_key *key2, u8 *iv);
-void aes_p8_xts_decrypt(const u8 *in, u8 *out, size_t len,
- const struct aes_key *key1, const struct aes_key *key2, u8 *iv);
diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
deleted file mode 100644
index f729589d792ea..0000000000000
--- a/drivers/crypto/vmx/aesp8-ppc.pl
+++ /dev/null
@@ -1,3889 +0,0 @@
-#! /usr/bin/env perl
-# SPDX-License-Identifier: GPL-2.0
-
-# This code is taken from CRYPTOGAMs[1] and is included here using the option
-# in the license to distribute the code under the GPL. Therefore this program
-# is free software; you can redistribute it and/or modify it under the terms of
-# the GNU General Public License version 2 as published by the Free Software
-# Foundation.
-#
-# [1] https://www.openssl.org/~appro/cryptogams/
-
-# Copyright (c) 2006-2017, CRYPTOGAMS by <appro@openssl.org>
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain copyright notices,
-# this list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials
-# provided with the distribution.
-#
-# * Neither the name of the CRYPTOGAMS nor the names of its
-# copyright holder and contributors may be used to endorse or
-# promote products derived from this software without specific
-# prior written permission.
-#
-# ALTERNATIVELY, provided that this notice is retained in full, this
-# product may be distributed under the terms of the GNU General Public
-# License (GPL), in which case the provisions of the GPL apply INSTEAD OF
-# those given above.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# ====================================================================
-# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see https://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-#
-# This module implements support for AES instructions as per PowerISA
-# specification version 2.07, first implemented by POWER8 processor.
-# The module is endian-agnostic in sense that it supports both big-
-# and little-endian cases. Data alignment in parallelizable modes is
-# handled with VSX loads and stores, which implies MSR.VSX flag being
-# set. It should also be noted that ISA specification doesn't prohibit
-# alignment exceptions for these instructions on page boundaries.
-# Initially alignment was handled in pure AltiVec/VMX way [when data
-# is aligned programmatically, which in turn guarantees exception-
-# free execution], but it turned to hamper performance when vcipher
-# instructions are interleaved. It's reckoned that eventual
-# misalignment penalties at page boundaries are in average lower
-# than additional overhead in pure AltiVec approach.
-#
-# May 2016
-#
-# Add XTS subroutine, 9x on little- and 12x improvement on big-endian
-# systems were measured.
-#
-######################################################################
-# Current large-block performance in cycles per byte processed with
-# 128-bit key (less is better).
-#
-# CBC en-/decrypt CTR XTS
-# POWER8[le] 3.96/0.72 0.74 1.1
-# POWER8[be] 3.75/0.65 0.66 1.0
-
-$flavour = shift;
-
-if ($flavour =~ /64/) {
- $SIZE_T =8;
- $LRSAVE =2*$SIZE_T;
- $STU ="stdu";
- $POP ="ld";
- $PUSH ="std";
- $UCMP ="cmpld";
- $SHL ="sldi";
-} elsif ($flavour =~ /32/) {
- $SIZE_T =4;
- $LRSAVE =$SIZE_T;
- $STU ="stwu";
- $POP ="lwz";
- $PUSH ="stw";
- $UCMP ="cmplw";
- $SHL ="slwi";
-} else { die "nonsense $flavour"; }
-
-$LITTLE_ENDIAN = ($flavour=~/le$/) ? $SIZE_T : 0;
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
-( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
-die "can't locate ppc-xlate.pl";
-
-open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
-
-$FRAME=8*$SIZE_T;
-$prefix="aes_p8";
-
-$sp="r1";
-$vrsave="r12";
-
-#########################################################################
-{{{ # Key setup procedures #
-my ($inp,$bits,$out,$ptr,$cnt,$rounds)=map("r$_",(3..8));
-my ($zero,$in0,$in1,$key,$rcon,$mask,$tmp)=map("v$_",(0..6));
-my ($stage,$outperm,$outmask,$outhead,$outtail)=map("v$_",(7..11));
-
-$code.=<<___;
-.machine "any"
-
-.text
-
-.align 7
-rcon:
-.long 0x01000000, 0x01000000, 0x01000000, 0x01000000 ?rev
-.long 0x1b000000, 0x1b000000, 0x1b000000, 0x1b000000 ?rev
-.long 0x0d0e0f0c, 0x0d0e0f0c, 0x0d0e0f0c, 0x0d0e0f0c ?rev
-.long 0,0,0,0 ?asis
-.long 0x0f102132, 0x43546576, 0x8798a9ba, 0xcbdcedfe
-Lconsts:
- mflr r0
- bcl 20,31,\$+4
- mflr $ptr #vvvvv "distance between . and rcon
- addi $ptr,$ptr,-0x58
- mtlr r0
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,0,0
-.asciz "AES for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
-
-.globl .${prefix}_set_encrypt_key
-Lset_encrypt_key:
- mflr r11
- $PUSH r11,$LRSAVE($sp)
-
- li $ptr,-1
- ${UCMP}i $inp,0
- beq- Lenc_key_abort # if ($inp==0) return -1;
- ${UCMP}i $out,0
- beq- Lenc_key_abort # if ($out==0) return -1;
- li $ptr,-2
- cmpwi $bits,128
- blt- Lenc_key_abort
- cmpwi $bits,256
- bgt- Lenc_key_abort
- andi. r0,$bits,0x3f
- bne- Lenc_key_abort
-
- lis r0,0xfff0
- mfspr $vrsave,256
- mtspr 256,r0
-
- bl Lconsts
- mtlr r11
-
- neg r9,$inp
- lvx $in0,0,$inp
- addi $inp,$inp,15 # 15 is not typo
- lvsr $key,0,r9 # borrow $key
- li r8,0x20
- cmpwi $bits,192
- lvx $in1,0,$inp
- le?vspltisb $mask,0x0f # borrow $mask
- lvx $rcon,0,$ptr
- le?vxor $key,$key,$mask # adjust for byte swap
- lvx $mask,r8,$ptr
- addi $ptr,$ptr,0x10
- vperm $in0,$in0,$in1,$key # align [and byte swap in LE]
- li $cnt,8
- vxor $zero,$zero,$zero
- mtctr $cnt
-
- ?lvsr $outperm,0,$out
- vspltisb $outmask,-1
- lvx $outhead,0,$out
- ?vperm $outmask,$zero,$outmask,$outperm
-
- blt Loop128
- addi $inp,$inp,8
- beq L192
- addi $inp,$inp,8
- b L256
-
-.align 4
-Loop128:
- vperm $key,$in0,$in0,$mask # rotate-n-splat
- vsldoi $tmp,$zero,$in0,12 # >>32
- vperm $outtail,$in0,$in0,$outperm # rotate
- vsel $stage,$outhead,$outtail,$outmask
- vmr $outhead,$outtail
- vcipherlast $key,$key,$rcon
- stvx $stage,0,$out
- addi $out,$out,16
-
- vxor $in0,$in0,$tmp
- vsldoi $tmp,$zero,$tmp,12 # >>32
- vxor $in0,$in0,$tmp
- vsldoi $tmp,$zero,$tmp,12 # >>32
- vxor $in0,$in0,$tmp
- vadduwm $rcon,$rcon,$rcon
- vxor $in0,$in0,$key
- bdnz Loop128
-
- lvx $rcon,0,$ptr # last two round keys
-
- vperm $key,$in0,$in0,$mask # rotate-n-splat
- vsldoi $tmp,$zero,$in0,12 # >>32
- vperm $outtail,$in0,$in0,$outperm # rotate
- vsel $stage,$outhead,$outtail,$outmask
- vmr $outhead,$outtail
- vcipherlast $key,$key,$rcon
- stvx $stage,0,$out
- addi $out,$out,16
-
- vxor $in0,$in0,$tmp
- vsldoi $tmp,$zero,$tmp,12 # >>32
- vxor $in0,$in0,$tmp
- vsldoi $tmp,$zero,$tmp,12 # >>32
- vxor $in0,$in0,$tmp
- vadduwm $rcon,$rcon,$rcon
- vxor $in0,$in0,$key
-
- vperm $key,$in0,$in0,$mask # rotate-n-splat
- vsldoi $tmp,$zero,$in0,12 # >>32
- vperm $outtail,$in0,$in0,$outperm # rotate
- vsel $stage,$outhead,$outtail,$outmask
- vmr $outhead,$outtail
- vcipherlast $key,$key,$rcon
- stvx $stage,0,$out
- addi $out,$out,16
-
- vxor $in0,$in0,$tmp
- vsldoi $tmp,$zero,$tmp,12 # >>32
- vxor $in0,$in0,$tmp
- vsldoi $tmp,$zero,$tmp,12 # >>32
- vxor $in0,$in0,$tmp
- vxor $in0,$in0,$key
- vperm $outtail,$in0,$in0,$outperm # rotate
- vsel $stage,$outhead,$outtail,$outmask
- vmr $outhead,$outtail
- stvx $stage,0,$out
-
- addi $inp,$out,15 # 15 is not typo
- addi $out,$out,0x50
-
- li $rounds,10
- b Ldone
-
-.align 4
-L192:
- lvx $tmp,0,$inp
- li $cnt,4
- vperm $outtail,$in0,$in0,$outperm # rotate
- vsel $stage,$outhead,$outtail,$outmask
- vmr $outhead,$outtail
- stvx $stage,0,$out
- addi $out,$out,16
- vperm $in1,$in1,$tmp,$key # align [and byte swap in LE]
- vspltisb $key,8 # borrow $key
- mtctr $cnt
- vsububm $mask,$mask,$key # adjust the mask
-
-Loop192:
- vperm $key,$in1,$in1,$mask # roate-n-splat
- vsldoi $tmp,$zero,$in0,12 # >>32
- vcipherlast $key,$key,$rcon
-
- vxor $in0,$in0,$tmp
- vsldoi $tmp,$zero,$tmp,12 # >>32
- vxor $in0,$in0,$tmp
- vsldoi $tmp,$zero,$tmp,12 # >>32
- vxor $in0,$in0,$tmp
-
- vsldoi $stage,$zero,$in1,8
- vspltw $tmp,$in0,3
- vxor $tmp,$tmp,$in1
- vsldoi $in1,$zero,$in1,12 # >>32
- vadduwm $rcon,$rcon,$rcon
- vxor $in1,$in1,$tmp
- vxor $in0,$in0,$key
- vxor $in1,$in1,$key
- vsldoi $stage,$stage,$in0,8
-
- vperm $key,$in1,$in1,$mask # rotate-n-splat
- vsldoi $tmp,$zero,$in0,12 # >>32
- vperm $outtail,$stage,$stage,$outperm # rotate
- vsel $stage,$outhead,$outtail,$outmask
- vmr $outhead,$outtail
- vcipherlast $key,$key,$rcon
- stvx $stage,0,$out
- addi $out,$out,16
-
- vsldoi $stage,$in0,$in1,8
- vxor $in0,$in0,$tmp
- vsldoi $tmp,$zero,$tmp,12 # >>32
- vperm $outtail,$stage,$stage,$outperm # rotate
- vsel $stage,$outhead,$outtail,$outmask
- vmr $outhead,$outtail
- vxor $in0,$in0,$tmp
- vsldoi $tmp,$zero,$tmp,12 # >>32
- vxor $in0,$in0,$tmp
- stvx $stage,0,$out
- addi $out,$out,16
-
- vspltw $tmp,$in0,3
- vxor $tmp,$tmp,$in1
- vsldoi $in1,$zero,$in1,12 # >>32
- vadduwm $rcon,$rcon,$rcon
- vxor $in1,$in1,$tmp
- vxor $in0,$in0,$key
- vxor $in1,$in1,$key
- vperm $outtail,$in0,$in0,$outperm # rotate
- vsel $stage,$outhead,$outtail,$outmask
- vmr $outhead,$outtail
- stvx $stage,0,$out
- addi $inp,$out,15 # 15 is not typo
- addi $out,$out,16
- bdnz Loop192
-
- li $rounds,12
- addi $out,$out,0x20
- b Ldone
-
-.align 4
-L256:
- lvx $tmp,0,$inp
- li $cnt,7
- li $rounds,14
- vperm $outtail,$in0,$in0,$outperm # rotate
- vsel $stage,$outhead,$outtail,$outmask
- vmr $outhead,$outtail
- stvx $stage,0,$out
- addi $out,$out,16
- vperm $in1,$in1,$tmp,$key # align [and byte swap in LE]
- mtctr $cnt
-
-Loop256:
- vperm $key,$in1,$in1,$mask # rotate-n-splat
- vsldoi $tmp,$zero,$in0,12 # >>32
- vperm $outtail,$in1,$in1,$outperm # rotate
- vsel $stage,$outhead,$outtail,$outmask
- vmr $outhead,$outtail
- vcipherlast $key,$key,$rcon
- stvx $stage,0,$out
- addi $out,$out,16
-
- vxor $in0,$in0,$tmp
- vsldoi $tmp,$zero,$tmp,12 # >>32
- vxor $in0,$in0,$tmp
- vsldoi $tmp,$zero,$tmp,12 # >>32
- vxor $in0,$in0,$tmp
- vadduwm $rcon,$rcon,$rcon
- vxor $in0,$in0,$key
- vperm $outtail,$in0,$in0,$outperm # rotate
- vsel $stage,$outhead,$outtail,$outmask
- vmr $outhead,$outtail
- stvx $stage,0,$out
- addi $inp,$out,15 # 15 is not typo
- addi $out,$out,16
- bdz Ldone
-
- vspltw $key,$in0,3 # just splat
- vsldoi $tmp,$zero,$in1,12 # >>32
- vsbox $key,$key
-
- vxor $in1,$in1,$tmp
- vsldoi $tmp,$zero,$tmp,12 # >>32
- vxor $in1,$in1,$tmp
- vsldoi $tmp,$zero,$tmp,12 # >>32
- vxor $in1,$in1,$tmp
-
- vxor $in1,$in1,$key
- b Loop256
-
-.align 4
-Ldone:
- lvx $in1,0,$inp # redundant in aligned case
- vsel $in1,$outhead,$in1,$outmask
- stvx $in1,0,$inp
- li $ptr,0
- mtspr 256,$vrsave
- stw $rounds,0($out)
-
-Lenc_key_abort:
- mr r3,$ptr
- blr
- .long 0
- .byte 0,12,0x14,1,0,0,3,0
- .long 0
-.size .${prefix}_set_encrypt_key,.-.${prefix}_set_encrypt_key
-
-.globl .${prefix}_set_decrypt_key
- $STU $sp,-$FRAME($sp)
- mflr r10
- $PUSH r10,$FRAME+$LRSAVE($sp)
- bl Lset_encrypt_key
- mtlr r10
-
- cmpwi r3,0
- bne- Ldec_key_abort
-
- slwi $cnt,$rounds,4
- subi $inp,$out,240 # first round key
- srwi $rounds,$rounds,1
- add $out,$inp,$cnt # last round key
- mtctr $rounds
-
-Ldeckey:
- lwz r0, 0($inp)
- lwz r6, 4($inp)
- lwz r7, 8($inp)
- lwz r8, 12($inp)
- addi $inp,$inp,16
- lwz r9, 0($out)
- lwz r10,4($out)
- lwz r11,8($out)
- lwz r12,12($out)
- stw r0, 0($out)
- stw r6, 4($out)
- stw r7, 8($out)
- stw r8, 12($out)
- subi $out,$out,16
- stw r9, -16($inp)
- stw r10,-12($inp)
- stw r11,-8($inp)
- stw r12,-4($inp)
- bdnz Ldeckey
-
- xor r3,r3,r3 # return value
-Ldec_key_abort:
- addi $sp,$sp,$FRAME
- blr
- .long 0
- .byte 0,12,4,1,0x80,0,3,0
- .long 0
-.size .${prefix}_set_decrypt_key,.-.${prefix}_set_decrypt_key
-___
-}}}
-#########################################################################
-{{{ # Single block en- and decrypt procedures #
-sub gen_block () {
-my $dir = shift;
-my $n = $dir eq "de" ? "n" : "";
-my ($inp,$out,$key,$rounds,$idx)=map("r$_",(3..7));
-
-$code.=<<___;
-.globl .${prefix}_${dir}crypt
- lwz $rounds,240($key)
- lis r0,0xfc00
- mfspr $vrsave,256
- li $idx,15 # 15 is not typo
- mtspr 256,r0
-
- lvx v0,0,$inp
- neg r11,$out
- lvx v1,$idx,$inp
- lvsl v2,0,$inp # inpperm
- le?vspltisb v4,0x0f
- ?lvsl v3,0,r11 # outperm
- le?vxor v2,v2,v4
- li $idx,16
- vperm v0,v0,v1,v2 # align [and byte swap in LE]
- lvx v1,0,$key
- ?lvsl v5,0,$key # keyperm
- srwi $rounds,$rounds,1
- lvx v2,$idx,$key
- addi $idx,$idx,16
- subi $rounds,$rounds,1
- ?vperm v1,v1,v2,v5 # align round key
-
- vxor v0,v0,v1
- lvx v1,$idx,$key
- addi $idx,$idx,16
- mtctr $rounds
-
-Loop_${dir}c:
- ?vperm v2,v2,v1,v5
- v${n}cipher v0,v0,v2
- lvx v2,$idx,$key
- addi $idx,$idx,16
- ?vperm v1,v1,v2,v5
- v${n}cipher v0,v0,v1
- lvx v1,$idx,$key
- addi $idx,$idx,16
- bdnz Loop_${dir}c
-
- ?vperm v2,v2,v1,v5
- v${n}cipher v0,v0,v2
- lvx v2,$idx,$key
- ?vperm v1,v1,v2,v5
- v${n}cipherlast v0,v0,v1
-
- vspltisb v2,-1
- vxor v1,v1,v1
- li $idx,15 # 15 is not typo
- ?vperm v2,v1,v2,v3 # outmask
- le?vxor v3,v3,v4
- lvx v1,0,$out # outhead
- vperm v0,v0,v0,v3 # rotate [and byte swap in LE]
- vsel v1,v1,v0,v2
- lvx v4,$idx,$out
- stvx v1,0,$out
- vsel v0,v0,v4,v2
- stvx v0,$idx,$out
-
- mtspr 256,$vrsave
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,3,0
- .long 0
-.size .${prefix}_${dir}crypt,.-.${prefix}_${dir}crypt
-___
-}
-&gen_block("en");
-&gen_block("de");
-}}}
-#########################################################################
-{{{ # CBC en- and decrypt procedures #
-my ($inp,$out,$len,$key,$ivp,$enc,$rounds,$idx)=map("r$_",(3..10));
-my ($rndkey0,$rndkey1,$inout,$tmp)= map("v$_",(0..3));
-my ($ivec,$inptail,$inpperm,$outhead,$outperm,$outmask,$keyperm)=
- map("v$_",(4..10));
-$code.=<<___;
-.globl .${prefix}_cbc_encrypt
- ${UCMP}i $len,16
- bltlr-
-
- cmpwi $enc,0 # test direction
- lis r0,0xffe0
- mfspr $vrsave,256
- mtspr 256,r0
-
- li $idx,15
- vxor $rndkey0,$rndkey0,$rndkey0
- le?vspltisb $tmp,0x0f
-
- lvx $ivec,0,$ivp # load [unaligned] iv
- lvsl $inpperm,0,$ivp
- lvx $inptail,$idx,$ivp
- le?vxor $inpperm,$inpperm,$tmp
- vperm $ivec,$ivec,$inptail,$inpperm
-
- neg r11,$inp
- ?lvsl $keyperm,0,$key # prepare for unaligned key
- lwz $rounds,240($key)
-
- lvsr $inpperm,0,r11 # prepare for unaligned load
- lvx $inptail,0,$inp
- addi $inp,$inp,15 # 15 is not typo
- le?vxor $inpperm,$inpperm,$tmp
-
- ?lvsr $outperm,0,$out # prepare for unaligned store
- vspltisb $outmask,-1
- lvx $outhead,0,$out
- ?vperm $outmask,$rndkey0,$outmask,$outperm
- le?vxor $outperm,$outperm,$tmp
-
- srwi $rounds,$rounds,1
- li $idx,16
- subi $rounds,$rounds,1
- beq Lcbc_dec
-
-Lcbc_enc:
- vmr $inout,$inptail
- lvx $inptail,0,$inp
- addi $inp,$inp,16
- mtctr $rounds
- subi $len,$len,16 # len-=16
-
- lvx $rndkey0,0,$key
- vperm $inout,$inout,$inptail,$inpperm
- lvx $rndkey1,$idx,$key
- addi $idx,$idx,16
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vxor $inout,$inout,$rndkey0
- lvx $rndkey0,$idx,$key
- addi $idx,$idx,16
- vxor $inout,$inout,$ivec
-
-Loop_cbc_enc:
- ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
- vcipher $inout,$inout,$rndkey1
- lvx $rndkey1,$idx,$key
- addi $idx,$idx,16
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vcipher $inout,$inout,$rndkey0
- lvx $rndkey0,$idx,$key
- addi $idx,$idx,16
- bdnz Loop_cbc_enc
-
- ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
- vcipher $inout,$inout,$rndkey1
- lvx $rndkey1,$idx,$key
- li $idx,16
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vcipherlast $ivec,$inout,$rndkey0
- ${UCMP}i $len,16
-
- vperm $tmp,$ivec,$ivec,$outperm
- vsel $inout,$outhead,$tmp,$outmask
- vmr $outhead,$tmp
- stvx $inout,0,$out
- addi $out,$out,16
- bge Lcbc_enc
-
- b Lcbc_done
-
-.align 4
-Lcbc_dec:
- ${UCMP}i $len,128
- bge _aesp8_cbc_decrypt8x
- vmr $tmp,$inptail
- lvx $inptail,0,$inp
- addi $inp,$inp,16
- mtctr $rounds
- subi $len,$len,16 # len-=16
-
- lvx $rndkey0,0,$key
- vperm $tmp,$tmp,$inptail,$inpperm
- lvx $rndkey1,$idx,$key
- addi $idx,$idx,16
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vxor $inout,$tmp,$rndkey0
- lvx $rndkey0,$idx,$key
- addi $idx,$idx,16
-
-Loop_cbc_dec:
- ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
- vncipher $inout,$inout,$rndkey1
- lvx $rndkey1,$idx,$key
- addi $idx,$idx,16
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vncipher $inout,$inout,$rndkey0
- lvx $rndkey0,$idx,$key
- addi $idx,$idx,16
- bdnz Loop_cbc_dec
-
- ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
- vncipher $inout,$inout,$rndkey1
- lvx $rndkey1,$idx,$key
- li $idx,16
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vncipherlast $inout,$inout,$rndkey0
- ${UCMP}i $len,16
-
- vxor $inout,$inout,$ivec
- vmr $ivec,$tmp
- vperm $tmp,$inout,$inout,$outperm
- vsel $inout,$outhead,$tmp,$outmask
- vmr $outhead,$tmp
- stvx $inout,0,$out
- addi $out,$out,16
- bge Lcbc_dec
-
-Lcbc_done:
- addi $out,$out,-1
- lvx $inout,0,$out # redundant in aligned case
- vsel $inout,$outhead,$inout,$outmask
- stvx $inout,0,$out
-
- neg $enc,$ivp # write [unaligned] iv
- li $idx,15 # 15 is not typo
- vxor $rndkey0,$rndkey0,$rndkey0
- vspltisb $outmask,-1
- le?vspltisb $tmp,0x0f
- ?lvsl $outperm,0,$enc
- ?vperm $outmask,$rndkey0,$outmask,$outperm
- le?vxor $outperm,$outperm,$tmp
- lvx $outhead,0,$ivp
- vperm $ivec,$ivec,$ivec,$outperm
- vsel $inout,$outhead,$ivec,$outmask
- lvx $inptail,$idx,$ivp
- stvx $inout,0,$ivp
- vsel $inout,$ivec,$inptail,$outmask
- stvx $inout,$idx,$ivp
-
- mtspr 256,$vrsave
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,6,0
- .long 0
-___
-#########################################################################
-{{ # Optimized CBC decrypt procedure #
-my $key_="r11";
-my ($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,8,26..31));
-my ($in0, $in1, $in2, $in3, $in4, $in5, $in6, $in7 )=map("v$_",(0..3,10..13));
-my ($out0,$out1,$out2,$out3,$out4,$out5,$out6,$out7)=map("v$_",(14..21));
-my $rndkey0="v23"; # v24-v25 rotating buffer for first found keys
- # v26-v31 last 6 round keys
-my ($tmp,$keyperm)=($in3,$in4); # aliases with "caller", redundant assignment
-
-$code.=<<___;
-.align 5
-_aesp8_cbc_decrypt8x:
- $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
- li r10,`$FRAME+8*16+15`
- li r11,`$FRAME+8*16+31`
- stvx v20,r10,$sp # ABI says so
- addi r10,r10,32
- stvx v21,r11,$sp
- addi r11,r11,32
- stvx v22,r10,$sp
- addi r10,r10,32
- stvx v23,r11,$sp
- addi r11,r11,32
- stvx v24,r10,$sp
- addi r10,r10,32
- stvx v25,r11,$sp
- addi r11,r11,32
- stvx v26,r10,$sp
- addi r10,r10,32
- stvx v27,r11,$sp
- addi r11,r11,32
- stvx v28,r10,$sp
- addi r10,r10,32
- stvx v29,r11,$sp
- addi r11,r11,32
- stvx v30,r10,$sp
- stvx v31,r11,$sp
- li r0,-1
- stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave
- li $x10,0x10
- $PUSH r26,`$FRAME+21*16+0*$SIZE_T`($sp)
- li $x20,0x20
- $PUSH r27,`$FRAME+21*16+1*$SIZE_T`($sp)
- li $x30,0x30
- $PUSH r28,`$FRAME+21*16+2*$SIZE_T`($sp)
- li $x40,0x40
- $PUSH r29,`$FRAME+21*16+3*$SIZE_T`($sp)
- li $x50,0x50
- $PUSH r30,`$FRAME+21*16+4*$SIZE_T`($sp)
- li $x60,0x60
- $PUSH r31,`$FRAME+21*16+5*$SIZE_T`($sp)
- li $x70,0x70
- mtspr 256,r0
-
- subi $rounds,$rounds,3 # -4 in total
- subi $len,$len,128 # bias
-
- lvx $rndkey0,$x00,$key # load key schedule
- lvx v30,$x10,$key
- addi $key,$key,0x20
- lvx v31,$x00,$key
- ?vperm $rndkey0,$rndkey0,v30,$keyperm
- addi $key_,$sp,$FRAME+15
- mtctr $rounds
-
-Load_cbc_dec_key:
- ?vperm v24,v30,v31,$keyperm
- lvx v30,$x10,$key
- addi $key,$key,0x20
- stvx v24,$x00,$key_ # off-load round[1]
- ?vperm v25,v31,v30,$keyperm
- lvx v31,$x00,$key
- stvx v25,$x10,$key_ # off-load round[2]
- addi $key_,$key_,0x20
- bdnz Load_cbc_dec_key
-
- lvx v26,$x10,$key
- ?vperm v24,v30,v31,$keyperm
- lvx v27,$x20,$key
- stvx v24,$x00,$key_ # off-load round[3]
- ?vperm v25,v31,v26,$keyperm
- lvx v28,$x30,$key
- stvx v25,$x10,$key_ # off-load round[4]
- addi $key_,$sp,$FRAME+15 # rewind $key_
- ?vperm v26,v26,v27,$keyperm
- lvx v29,$x40,$key
- ?vperm v27,v27,v28,$keyperm
- lvx v30,$x50,$key
- ?vperm v28,v28,v29,$keyperm
- lvx v31,$x60,$key
- ?vperm v29,v29,v30,$keyperm
- lvx $out0,$x70,$key # borrow $out0
- ?vperm v30,v30,v31,$keyperm
- lvx v24,$x00,$key_ # pre-load round[1]
- ?vperm v31,v31,$out0,$keyperm
- lvx v25,$x10,$key_ # pre-load round[2]
-
- #lvx $inptail,0,$inp # "caller" already did this
- #addi $inp,$inp,15 # 15 is not typo
- subi $inp,$inp,15 # undo "caller"
-
- le?li $idx,8
- lvx_u $in0,$x00,$inp # load first 8 "words"
- le?lvsl $inpperm,0,$idx
- le?vspltisb $tmp,0x0f
- lvx_u $in1,$x10,$inp
- le?vxor $inpperm,$inpperm,$tmp # transform for lvx_u/stvx_u
- lvx_u $in2,$x20,$inp
- le?vperm $in0,$in0,$in0,$inpperm
- lvx_u $in3,$x30,$inp
- le?vperm $in1,$in1,$in1,$inpperm
- lvx_u $in4,$x40,$inp
- le?vperm $in2,$in2,$in2,$inpperm
- vxor $out0,$in0,$rndkey0
- lvx_u $in5,$x50,$inp
- le?vperm $in3,$in3,$in3,$inpperm
- vxor $out1,$in1,$rndkey0
- lvx_u $in6,$x60,$inp
- le?vperm $in4,$in4,$in4,$inpperm
- vxor $out2,$in2,$rndkey0
- lvx_u $in7,$x70,$inp
- addi $inp,$inp,0x80
- le?vperm $in5,$in5,$in5,$inpperm
- vxor $out3,$in3,$rndkey0
- le?vperm $in6,$in6,$in6,$inpperm
- vxor $out4,$in4,$rndkey0
- le?vperm $in7,$in7,$in7,$inpperm
- vxor $out5,$in5,$rndkey0
- vxor $out6,$in6,$rndkey0
- vxor $out7,$in7,$rndkey0
-
- mtctr $rounds
- b Loop_cbc_dec8x
-.align 5
-Loop_cbc_dec8x:
- vncipher $out0,$out0,v24
- vncipher $out1,$out1,v24
- vncipher $out2,$out2,v24
- vncipher $out3,$out3,v24
- vncipher $out4,$out4,v24
- vncipher $out5,$out5,v24
- vncipher $out6,$out6,v24
- vncipher $out7,$out7,v24
- lvx v24,$x20,$key_ # round[3]
- addi $key_,$key_,0x20
-
- vncipher $out0,$out0,v25
- vncipher $out1,$out1,v25
- vncipher $out2,$out2,v25
- vncipher $out3,$out3,v25
- vncipher $out4,$out4,v25
- vncipher $out5,$out5,v25
- vncipher $out6,$out6,v25
- vncipher $out7,$out7,v25
- lvx v25,$x10,$key_ # round[4]
- bdnz Loop_cbc_dec8x
-
- subic $len,$len,128 # $len-=128
- vncipher $out0,$out0,v24
- vncipher $out1,$out1,v24
- vncipher $out2,$out2,v24
- vncipher $out3,$out3,v24
- vncipher $out4,$out4,v24
- vncipher $out5,$out5,v24
- vncipher $out6,$out6,v24
- vncipher $out7,$out7,v24
-
- subfe. r0,r0,r0 # borrow?-1:0
- vncipher $out0,$out0,v25
- vncipher $out1,$out1,v25
- vncipher $out2,$out2,v25
- vncipher $out3,$out3,v25
- vncipher $out4,$out4,v25
- vncipher $out5,$out5,v25
- vncipher $out6,$out6,v25
- vncipher $out7,$out7,v25
-
- and r0,r0,$len
- vncipher $out0,$out0,v26
- vncipher $out1,$out1,v26
- vncipher $out2,$out2,v26
- vncipher $out3,$out3,v26
- vncipher $out4,$out4,v26
- vncipher $out5,$out5,v26
- vncipher $out6,$out6,v26
- vncipher $out7,$out7,v26
-
- add $inp,$inp,r0 # $inp is adjusted in such
- # way that at exit from the
- # loop inX-in7 are loaded
- # with last "words"
- vncipher $out0,$out0,v27
- vncipher $out1,$out1,v27
- vncipher $out2,$out2,v27
- vncipher $out3,$out3,v27
- vncipher $out4,$out4,v27
- vncipher $out5,$out5,v27
- vncipher $out6,$out6,v27
- vncipher $out7,$out7,v27
-
- addi $key_,$sp,$FRAME+15 # rewind $key_
- vncipher $out0,$out0,v28
- vncipher $out1,$out1,v28
- vncipher $out2,$out2,v28
- vncipher $out3,$out3,v28
- vncipher $out4,$out4,v28
- vncipher $out5,$out5,v28
- vncipher $out6,$out6,v28
- vncipher $out7,$out7,v28
- lvx v24,$x00,$key_ # re-pre-load round[1]
-
- vncipher $out0,$out0,v29
- vncipher $out1,$out1,v29
- vncipher $out2,$out2,v29
- vncipher $out3,$out3,v29
- vncipher $out4,$out4,v29
- vncipher $out5,$out5,v29
- vncipher $out6,$out6,v29
- vncipher $out7,$out7,v29
- lvx v25,$x10,$key_ # re-pre-load round[2]
-
- vncipher $out0,$out0,v30
- vxor $ivec,$ivec,v31 # xor with last round key
- vncipher $out1,$out1,v30
- vxor $in0,$in0,v31
- vncipher $out2,$out2,v30
- vxor $in1,$in1,v31
- vncipher $out3,$out3,v30
- vxor $in2,$in2,v31
- vncipher $out4,$out4,v30
- vxor $in3,$in3,v31
- vncipher $out5,$out5,v30
- vxor $in4,$in4,v31
- vncipher $out6,$out6,v30
- vxor $in5,$in5,v31
- vncipher $out7,$out7,v30
- vxor $in6,$in6,v31
-
- vncipherlast $out0,$out0,$ivec
- vncipherlast $out1,$out1,$in0
- lvx_u $in0,$x00,$inp # load next input block
- vncipherlast $out2,$out2,$in1
- lvx_u $in1,$x10,$inp
- vncipherlast $out3,$out3,$in2
- le?vperm $in0,$in0,$in0,$inpperm
- lvx_u $in2,$x20,$inp
- vncipherlast $out4,$out4,$in3
- le?vperm $in1,$in1,$in1,$inpperm
- lvx_u $in3,$x30,$inp
- vncipherlast $out5,$out5,$in4
- le?vperm $in2,$in2,$in2,$inpperm
- lvx_u $in4,$x40,$inp
- vncipherlast $out6,$out6,$in5
- le?vperm $in3,$in3,$in3,$inpperm
- lvx_u $in5,$x50,$inp
- vncipherlast $out7,$out7,$in6
- le?vperm $in4,$in4,$in4,$inpperm
- lvx_u $in6,$x60,$inp
- vmr $ivec,$in7
- le?vperm $in5,$in5,$in5,$inpperm
- lvx_u $in7,$x70,$inp
- addi $inp,$inp,0x80
-
- le?vperm $out0,$out0,$out0,$inpperm
- le?vperm $out1,$out1,$out1,$inpperm
- stvx_u $out0,$x00,$out
- le?vperm $in6,$in6,$in6,$inpperm
- vxor $out0,$in0,$rndkey0
- le?vperm $out2,$out2,$out2,$inpperm
- stvx_u $out1,$x10,$out
- le?vperm $in7,$in7,$in7,$inpperm
- vxor $out1,$in1,$rndkey0
- le?vperm $out3,$out3,$out3,$inpperm
- stvx_u $out2,$x20,$out
- vxor $out2,$in2,$rndkey0
- le?vperm $out4,$out4,$out4,$inpperm
- stvx_u $out3,$x30,$out
- vxor $out3,$in3,$rndkey0
- le?vperm $out5,$out5,$out5,$inpperm
- stvx_u $out4,$x40,$out
- vxor $out4,$in4,$rndkey0
- le?vperm $out6,$out6,$out6,$inpperm
- stvx_u $out5,$x50,$out
- vxor $out5,$in5,$rndkey0
- le?vperm $out7,$out7,$out7,$inpperm
- stvx_u $out6,$x60,$out
- vxor $out6,$in6,$rndkey0
- stvx_u $out7,$x70,$out
- addi $out,$out,0x80
- vxor $out7,$in7,$rndkey0
-
- mtctr $rounds
- beq Loop_cbc_dec8x # did $len-=128 borrow?
-
- addic. $len,$len,128
- beq Lcbc_dec8x_done
- nop
- nop
-
-Loop_cbc_dec8x_tail: # up to 7 "words" tail...
- vncipher $out1,$out1,v24
- vncipher $out2,$out2,v24
- vncipher $out3,$out3,v24
- vncipher $out4,$out4,v24
- vncipher $out5,$out5,v24
- vncipher $out6,$out6,v24
- vncipher $out7,$out7,v24
- lvx v24,$x20,$key_ # round[3]
- addi $key_,$key_,0x20
-
- vncipher $out1,$out1,v25
- vncipher $out2,$out2,v25
- vncipher $out3,$out3,v25
- vncipher $out4,$out4,v25
- vncipher $out5,$out5,v25
- vncipher $out6,$out6,v25
- vncipher $out7,$out7,v25
- lvx v25,$x10,$key_ # round[4]
- bdnz Loop_cbc_dec8x_tail
-
- vncipher $out1,$out1,v24
- vncipher $out2,$out2,v24
- vncipher $out3,$out3,v24
- vncipher $out4,$out4,v24
- vncipher $out5,$out5,v24
- vncipher $out6,$out6,v24
- vncipher $out7,$out7,v24
-
- vncipher $out1,$out1,v25
- vncipher $out2,$out2,v25
- vncipher $out3,$out3,v25
- vncipher $out4,$out4,v25
- vncipher $out5,$out5,v25
- vncipher $out6,$out6,v25
- vncipher $out7,$out7,v25
-
- vncipher $out1,$out1,v26
- vncipher $out2,$out2,v26
- vncipher $out3,$out3,v26
- vncipher $out4,$out4,v26
- vncipher $out5,$out5,v26
- vncipher $out6,$out6,v26
- vncipher $out7,$out7,v26
-
- vncipher $out1,$out1,v27
- vncipher $out2,$out2,v27
- vncipher $out3,$out3,v27
- vncipher $out4,$out4,v27
- vncipher $out5,$out5,v27
- vncipher $out6,$out6,v27
- vncipher $out7,$out7,v27
-
- vncipher $out1,$out1,v28
- vncipher $out2,$out2,v28
- vncipher $out3,$out3,v28
- vncipher $out4,$out4,v28
- vncipher $out5,$out5,v28
- vncipher $out6,$out6,v28
- vncipher $out7,$out7,v28
-
- vncipher $out1,$out1,v29
- vncipher $out2,$out2,v29
- vncipher $out3,$out3,v29
- vncipher $out4,$out4,v29
- vncipher $out5,$out5,v29
- vncipher $out6,$out6,v29
- vncipher $out7,$out7,v29
-
- vncipher $out1,$out1,v30
- vxor $ivec,$ivec,v31 # last round key
- vncipher $out2,$out2,v30
- vxor $in1,$in1,v31
- vncipher $out3,$out3,v30
- vxor $in2,$in2,v31
- vncipher $out4,$out4,v30
- vxor $in3,$in3,v31
- vncipher $out5,$out5,v30
- vxor $in4,$in4,v31
- vncipher $out6,$out6,v30
- vxor $in5,$in5,v31
- vncipher $out7,$out7,v30
- vxor $in6,$in6,v31
-
- cmplwi $len,32 # switch($len)
- blt Lcbc_dec8x_one
- nop
- beq Lcbc_dec8x_two
- cmplwi $len,64
- blt Lcbc_dec8x_three
- nop
- beq Lcbc_dec8x_four
- cmplwi $len,96
- blt Lcbc_dec8x_five
- nop
- beq Lcbc_dec8x_six
-
-Lcbc_dec8x_seven:
- vncipherlast $out1,$out1,$ivec
- vncipherlast $out2,$out2,$in1
- vncipherlast $out3,$out3,$in2
- vncipherlast $out4,$out4,$in3
- vncipherlast $out5,$out5,$in4
- vncipherlast $out6,$out6,$in5
- vncipherlast $out7,$out7,$in6
- vmr $ivec,$in7
-
- le?vperm $out1,$out1,$out1,$inpperm
- le?vperm $out2,$out2,$out2,$inpperm
- stvx_u $out1,$x00,$out
- le?vperm $out3,$out3,$out3,$inpperm
- stvx_u $out2,$x10,$out
- le?vperm $out4,$out4,$out4,$inpperm
- stvx_u $out3,$x20,$out
- le?vperm $out5,$out5,$out5,$inpperm
- stvx_u $out4,$x30,$out
- le?vperm $out6,$out6,$out6,$inpperm
- stvx_u $out5,$x40,$out
- le?vperm $out7,$out7,$out7,$inpperm
- stvx_u $out6,$x50,$out
- stvx_u $out7,$x60,$out
- addi $out,$out,0x70
- b Lcbc_dec8x_done
-
-.align 5
-Lcbc_dec8x_six:
- vncipherlast $out2,$out2,$ivec
- vncipherlast $out3,$out3,$in2
- vncipherlast $out4,$out4,$in3
- vncipherlast $out5,$out5,$in4
- vncipherlast $out6,$out6,$in5
- vncipherlast $out7,$out7,$in6
- vmr $ivec,$in7
-
- le?vperm $out2,$out2,$out2,$inpperm
- le?vperm $out3,$out3,$out3,$inpperm
- stvx_u $out2,$x00,$out
- le?vperm $out4,$out4,$out4,$inpperm
- stvx_u $out3,$x10,$out
- le?vperm $out5,$out5,$out5,$inpperm
- stvx_u $out4,$x20,$out
- le?vperm $out6,$out6,$out6,$inpperm
- stvx_u $out5,$x30,$out
- le?vperm $out7,$out7,$out7,$inpperm
- stvx_u $out6,$x40,$out
- stvx_u $out7,$x50,$out
- addi $out,$out,0x60
- b Lcbc_dec8x_done
-
-.align 5
-Lcbc_dec8x_five:
- vncipherlast $out3,$out3,$ivec
- vncipherlast $out4,$out4,$in3
- vncipherlast $out5,$out5,$in4
- vncipherlast $out6,$out6,$in5
- vncipherlast $out7,$out7,$in6
- vmr $ivec,$in7
-
- le?vperm $out3,$out3,$out3,$inpperm
- le?vperm $out4,$out4,$out4,$inpperm
- stvx_u $out3,$x00,$out
- le?vperm $out5,$out5,$out5,$inpperm
- stvx_u $out4,$x10,$out
- le?vperm $out6,$out6,$out6,$inpperm
- stvx_u $out5,$x20,$out
- le?vperm $out7,$out7,$out7,$inpperm
- stvx_u $out6,$x30,$out
- stvx_u $out7,$x40,$out
- addi $out,$out,0x50
- b Lcbc_dec8x_done
-
-.align 5
-Lcbc_dec8x_four:
- vncipherlast $out4,$out4,$ivec
- vncipherlast $out5,$out5,$in4
- vncipherlast $out6,$out6,$in5
- vncipherlast $out7,$out7,$in6
- vmr $ivec,$in7
-
- le?vperm $out4,$out4,$out4,$inpperm
- le?vperm $out5,$out5,$out5,$inpperm
- stvx_u $out4,$x00,$out
- le?vperm $out6,$out6,$out6,$inpperm
- stvx_u $out5,$x10,$out
- le?vperm $out7,$out7,$out7,$inpperm
- stvx_u $out6,$x20,$out
- stvx_u $out7,$x30,$out
- addi $out,$out,0x40
- b Lcbc_dec8x_done
-
-.align 5
-Lcbc_dec8x_three:
- vncipherlast $out5,$out5,$ivec
- vncipherlast $out6,$out6,$in5
- vncipherlast $out7,$out7,$in6
- vmr $ivec,$in7
-
- le?vperm $out5,$out5,$out5,$inpperm
- le?vperm $out6,$out6,$out6,$inpperm
- stvx_u $out5,$x00,$out
- le?vperm $out7,$out7,$out7,$inpperm
- stvx_u $out6,$x10,$out
- stvx_u $out7,$x20,$out
- addi $out,$out,0x30
- b Lcbc_dec8x_done
-
-.align 5
-Lcbc_dec8x_two:
- vncipherlast $out6,$out6,$ivec
- vncipherlast $out7,$out7,$in6
- vmr $ivec,$in7
-
- le?vperm $out6,$out6,$out6,$inpperm
- le?vperm $out7,$out7,$out7,$inpperm
- stvx_u $out6,$x00,$out
- stvx_u $out7,$x10,$out
- addi $out,$out,0x20
- b Lcbc_dec8x_done
-
-.align 5
-Lcbc_dec8x_one:
- vncipherlast $out7,$out7,$ivec
- vmr $ivec,$in7
-
- le?vperm $out7,$out7,$out7,$inpperm
- stvx_u $out7,0,$out
- addi $out,$out,0x10
-
-Lcbc_dec8x_done:
- le?vperm $ivec,$ivec,$ivec,$inpperm
- stvx_u $ivec,0,$ivp # write [unaligned] iv
-
- li r10,`$FRAME+15`
- li r11,`$FRAME+31`
- stvx $inpperm,r10,$sp # wipe copies of round keys
- addi r10,r10,32
- stvx $inpperm,r11,$sp
- addi r11,r11,32
- stvx $inpperm,r10,$sp
- addi r10,r10,32
- stvx $inpperm,r11,$sp
- addi r11,r11,32
- stvx $inpperm,r10,$sp
- addi r10,r10,32
- stvx $inpperm,r11,$sp
- addi r11,r11,32
- stvx $inpperm,r10,$sp
- addi r10,r10,32
- stvx $inpperm,r11,$sp
- addi r11,r11,32
-
- mtspr 256,$vrsave
- lvx v20,r10,$sp # ABI says so
- addi r10,r10,32
- lvx v21,r11,$sp
- addi r11,r11,32
- lvx v22,r10,$sp
- addi r10,r10,32
- lvx v23,r11,$sp
- addi r11,r11,32
- lvx v24,r10,$sp
- addi r10,r10,32
- lvx v25,r11,$sp
- addi r11,r11,32
- lvx v26,r10,$sp
- addi r10,r10,32
- lvx v27,r11,$sp
- addi r11,r11,32
- lvx v28,r10,$sp
- addi r10,r10,32
- lvx v29,r11,$sp
- addi r11,r11,32
- lvx v30,r10,$sp
- lvx v31,r11,$sp
- $POP r26,`$FRAME+21*16+0*$SIZE_T`($sp)
- $POP r27,`$FRAME+21*16+1*$SIZE_T`($sp)
- $POP r28,`$FRAME+21*16+2*$SIZE_T`($sp)
- $POP r29,`$FRAME+21*16+3*$SIZE_T`($sp)
- $POP r30,`$FRAME+21*16+4*$SIZE_T`($sp)
- $POP r31,`$FRAME+21*16+5*$SIZE_T`($sp)
- addi $sp,$sp,`$FRAME+21*16+6*$SIZE_T`
- blr
- .long 0
- .byte 0,12,0x14,0,0x80,6,6,0
- .long 0
-.size .${prefix}_cbc_encrypt,.-.${prefix}_cbc_encrypt
-___
-}} }}}
-
-#########################################################################
-{{{ # CTR procedure[s] #
-
-####################### WARNING: Here be dragons! #######################
-#
-# This code is written as 'ctr32', based on a 32-bit counter used
-# upstream. The kernel does *not* use a 32-bit counter. The kernel uses
-# a 128-bit counter.
-#
-# This leads to subtle changes from the upstream code: the counter
-# is incremented with vaddu_q_m rather than vaddu_w_m. This occurs in
-# both the bulk (8 blocks at a time) path, and in the individual block
-# path. Be aware of this when doing updates.
-#
-# See:
-# 1d4aa0b4c181 ("crypto: vmx - Fixing AES-CTR counter bug")
-# 009b30ac7444 ("crypto: vmx - CTR: always increment IV as quadword")
-# https://github.com/openssl/openssl/pull/8942
-#
-#########################################################################
-my ($inp,$out,$len,$key,$ivp,$x10,$rounds,$idx)=map("r$_",(3..10));
-my ($rndkey0,$rndkey1,$inout,$tmp)= map("v$_",(0..3));
-my ($ivec,$inptail,$inpperm,$outhead,$outperm,$outmask,$keyperm,$one)=
- map("v$_",(4..11));
-my $dat=$tmp;
-
-$code.=<<___;
-.globl .${prefix}_ctr32_encrypt_blocks
- ${UCMP}i $len,1
- bltlr-
-
- lis r0,0xfff0
- mfspr $vrsave,256
- mtspr 256,r0
-
- li $idx,15
- vxor $rndkey0,$rndkey0,$rndkey0
- le?vspltisb $tmp,0x0f
-
- lvx $ivec,0,$ivp # load [unaligned] iv
- lvsl $inpperm,0,$ivp
- lvx $inptail,$idx,$ivp
- vspltisb $one,1
- le?vxor $inpperm,$inpperm,$tmp
- vperm $ivec,$ivec,$inptail,$inpperm
- vsldoi $one,$rndkey0,$one,1
-
- neg r11,$inp
- ?lvsl $keyperm,0,$key # prepare for unaligned key
- lwz $rounds,240($key)
-
- lvsr $inpperm,0,r11 # prepare for unaligned load
- lvx $inptail,0,$inp
- addi $inp,$inp,15 # 15 is not typo
- le?vxor $inpperm,$inpperm,$tmp
-
- srwi $rounds,$rounds,1
- li $idx,16
- subi $rounds,$rounds,1
-
- ${UCMP}i $len,8
- bge _aesp8_ctr32_encrypt8x
-
- ?lvsr $outperm,0,$out # prepare for unaligned store
- vspltisb $outmask,-1
- lvx $outhead,0,$out
- ?vperm $outmask,$rndkey0,$outmask,$outperm
- le?vxor $outperm,$outperm,$tmp
-
- lvx $rndkey0,0,$key
- mtctr $rounds
- lvx $rndkey1,$idx,$key
- addi $idx,$idx,16
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vxor $inout,$ivec,$rndkey0
- lvx $rndkey0,$idx,$key
- addi $idx,$idx,16
- b Loop_ctr32_enc
-
-.align 5
-Loop_ctr32_enc:
- ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
- vcipher $inout,$inout,$rndkey1
- lvx $rndkey1,$idx,$key
- addi $idx,$idx,16
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vcipher $inout,$inout,$rndkey0
- lvx $rndkey0,$idx,$key
- addi $idx,$idx,16
- bdnz Loop_ctr32_enc
-
- vadduqm $ivec,$ivec,$one # Kernel change for 128-bit
- vmr $dat,$inptail
- lvx $inptail,0,$inp
- addi $inp,$inp,16
- subic. $len,$len,1 # blocks--
-
- ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
- vcipher $inout,$inout,$rndkey1
- lvx $rndkey1,$idx,$key
- vperm $dat,$dat,$inptail,$inpperm
- li $idx,16
- ?vperm $rndkey1,$rndkey0,$rndkey1,$keyperm
- lvx $rndkey0,0,$key
- vxor $dat,$dat,$rndkey1 # last round key
- vcipherlast $inout,$inout,$dat
-
- lvx $rndkey1,$idx,$key
- addi $idx,$idx,16
- vperm $inout,$inout,$inout,$outperm
- vsel $dat,$outhead,$inout,$outmask
- mtctr $rounds
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vmr $outhead,$inout
- vxor $inout,$ivec,$rndkey0
- lvx $rndkey0,$idx,$key
- addi $idx,$idx,16
- stvx $dat,0,$out
- addi $out,$out,16
- bne Loop_ctr32_enc
-
- addi $out,$out,-1
- lvx $inout,0,$out # redundant in aligned case
- vsel $inout,$outhead,$inout,$outmask
- stvx $inout,0,$out
-
- mtspr 256,$vrsave
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,6,0
- .long 0
-___
-#########################################################################
-{{ # Optimized CTR procedure #
-my $key_="r11";
-my ($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,8,26..31));
-my ($in0, $in1, $in2, $in3, $in4, $in5, $in6, $in7 )=map("v$_",(0..3,10,12..14));
-my ($out0,$out1,$out2,$out3,$out4,$out5,$out6,$out7)=map("v$_",(15..22));
-my $rndkey0="v23"; # v24-v25 rotating buffer for first found keys
- # v26-v31 last 6 round keys
-my ($tmp,$keyperm)=($in3,$in4); # aliases with "caller", redundant assignment
-my ($two,$three,$four)=($outhead,$outperm,$outmask);
-
-$code.=<<___;
-.align 5
-_aesp8_ctr32_encrypt8x:
- $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
- li r10,`$FRAME+8*16+15`
- li r11,`$FRAME+8*16+31`
- stvx v20,r10,$sp # ABI says so
- addi r10,r10,32
- stvx v21,r11,$sp
- addi r11,r11,32
- stvx v22,r10,$sp
- addi r10,r10,32
- stvx v23,r11,$sp
- addi r11,r11,32
- stvx v24,r10,$sp
- addi r10,r10,32
- stvx v25,r11,$sp
- addi r11,r11,32
- stvx v26,r10,$sp
- addi r10,r10,32
- stvx v27,r11,$sp
- addi r11,r11,32
- stvx v28,r10,$sp
- addi r10,r10,32
- stvx v29,r11,$sp
- addi r11,r11,32
- stvx v30,r10,$sp
- stvx v31,r11,$sp
- li r0,-1
- stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave
- li $x10,0x10
- $PUSH r26,`$FRAME+21*16+0*$SIZE_T`($sp)
- li $x20,0x20
- $PUSH r27,`$FRAME+21*16+1*$SIZE_T`($sp)
- li $x30,0x30
- $PUSH r28,`$FRAME+21*16+2*$SIZE_T`($sp)
- li $x40,0x40
- $PUSH r29,`$FRAME+21*16+3*$SIZE_T`($sp)
- li $x50,0x50
- $PUSH r30,`$FRAME+21*16+4*$SIZE_T`($sp)
- li $x60,0x60
- $PUSH r31,`$FRAME+21*16+5*$SIZE_T`($sp)
- li $x70,0x70
- mtspr 256,r0
-
- subi $rounds,$rounds,3 # -4 in total
-
- lvx $rndkey0,$x00,$key # load key schedule
- lvx v30,$x10,$key
- addi $key,$key,0x20
- lvx v31,$x00,$key
- ?vperm $rndkey0,$rndkey0,v30,$keyperm
- addi $key_,$sp,$FRAME+15
- mtctr $rounds
-
-Load_ctr32_enc_key:
- ?vperm v24,v30,v31,$keyperm
- lvx v30,$x10,$key
- addi $key,$key,0x20
- stvx v24,$x00,$key_ # off-load round[1]
- ?vperm v25,v31,v30,$keyperm
- lvx v31,$x00,$key
- stvx v25,$x10,$key_ # off-load round[2]
- addi $key_,$key_,0x20
- bdnz Load_ctr32_enc_key
-
- lvx v26,$x10,$key
- ?vperm v24,v30,v31,$keyperm
- lvx v27,$x20,$key
- stvx v24,$x00,$key_ # off-load round[3]
- ?vperm v25,v31,v26,$keyperm
- lvx v28,$x30,$key
- stvx v25,$x10,$key_ # off-load round[4]
- addi $key_,$sp,$FRAME+15 # rewind $key_
- ?vperm v26,v26,v27,$keyperm
- lvx v29,$x40,$key
- ?vperm v27,v27,v28,$keyperm
- lvx v30,$x50,$key
- ?vperm v28,v28,v29,$keyperm
- lvx v31,$x60,$key
- ?vperm v29,v29,v30,$keyperm
- lvx $out0,$x70,$key # borrow $out0
- ?vperm v30,v30,v31,$keyperm
- lvx v24,$x00,$key_ # pre-load round[1]
- ?vperm v31,v31,$out0,$keyperm
- lvx v25,$x10,$key_ # pre-load round[2]
-
- vadduqm $two,$one,$one
- subi $inp,$inp,15 # undo "caller"
- $SHL $len,$len,4
-
- vadduqm $out1,$ivec,$one # counter values ...
- vadduqm $out2,$ivec,$two # (do all ctr adds as 128-bit)
- vxor $out0,$ivec,$rndkey0 # ... xored with rndkey[0]
- le?li $idx,8
- vadduqm $out3,$out1,$two
- vxor $out1,$out1,$rndkey0
- le?lvsl $inpperm,0,$idx
- vadduqm $out4,$out2,$two
- vxor $out2,$out2,$rndkey0
- le?vspltisb $tmp,0x0f
- vadduqm $out5,$out3,$two
- vxor $out3,$out3,$rndkey0
- le?vxor $inpperm,$inpperm,$tmp # transform for lvx_u/stvx_u
- vadduqm $out6,$out4,$two
- vxor $out4,$out4,$rndkey0
- vadduqm $out7,$out5,$two
- vxor $out5,$out5,$rndkey0
- vadduqm $ivec,$out6,$two # next counter value
- vxor $out6,$out6,$rndkey0
- vxor $out7,$out7,$rndkey0
-
- mtctr $rounds
- b Loop_ctr32_enc8x
-.align 5
-Loop_ctr32_enc8x:
- vcipher $out0,$out0,v24
- vcipher $out1,$out1,v24
- vcipher $out2,$out2,v24
- vcipher $out3,$out3,v24
- vcipher $out4,$out4,v24
- vcipher $out5,$out5,v24
- vcipher $out6,$out6,v24
- vcipher $out7,$out7,v24
-Loop_ctr32_enc8x_middle:
- lvx v24,$x20,$key_ # round[3]
- addi $key_,$key_,0x20
-
- vcipher $out0,$out0,v25
- vcipher $out1,$out1,v25
- vcipher $out2,$out2,v25
- vcipher $out3,$out3,v25
- vcipher $out4,$out4,v25
- vcipher $out5,$out5,v25
- vcipher $out6,$out6,v25
- vcipher $out7,$out7,v25
- lvx v25,$x10,$key_ # round[4]
- bdnz Loop_ctr32_enc8x
-
- subic r11,$len,256 # $len-256, borrow $key_
- vcipher $out0,$out0,v24
- vcipher $out1,$out1,v24
- vcipher $out2,$out2,v24
- vcipher $out3,$out3,v24
- vcipher $out4,$out4,v24
- vcipher $out5,$out5,v24
- vcipher $out6,$out6,v24
- vcipher $out7,$out7,v24
-
- subfe r0,r0,r0 # borrow?-1:0
- vcipher $out0,$out0,v25
- vcipher $out1,$out1,v25
- vcipher $out2,$out2,v25
- vcipher $out3,$out3,v25
- vcipher $out4,$out4,v25
- vcipher $out5,$out5,v25
- vcipher $out6,$out6,v25
- vcipher $out7,$out7,v25
-
- and r0,r0,r11
- addi $key_,$sp,$FRAME+15 # rewind $key_
- vcipher $out0,$out0,v26
- vcipher $out1,$out1,v26
- vcipher $out2,$out2,v26
- vcipher $out3,$out3,v26
- vcipher $out4,$out4,v26
- vcipher $out5,$out5,v26
- vcipher $out6,$out6,v26
- vcipher $out7,$out7,v26
- lvx v24,$x00,$key_ # re-pre-load round[1]
-
- subic $len,$len,129 # $len-=129
- vcipher $out0,$out0,v27
- addi $len,$len,1 # $len-=128 really
- vcipher $out1,$out1,v27
- vcipher $out2,$out2,v27
- vcipher $out3,$out3,v27
- vcipher $out4,$out4,v27
- vcipher $out5,$out5,v27
- vcipher $out6,$out6,v27
- vcipher $out7,$out7,v27
- lvx v25,$x10,$key_ # re-pre-load round[2]
-
- vcipher $out0,$out0,v28
- lvx_u $in0,$x00,$inp # load input
- vcipher $out1,$out1,v28
- lvx_u $in1,$x10,$inp
- vcipher $out2,$out2,v28
- lvx_u $in2,$x20,$inp
- vcipher $out3,$out3,v28
- lvx_u $in3,$x30,$inp
- vcipher $out4,$out4,v28
- lvx_u $in4,$x40,$inp
- vcipher $out5,$out5,v28
- lvx_u $in5,$x50,$inp
- vcipher $out6,$out6,v28
- lvx_u $in6,$x60,$inp
- vcipher $out7,$out7,v28
- lvx_u $in7,$x70,$inp
- addi $inp,$inp,0x80
-
- vcipher $out0,$out0,v29
- le?vperm $in0,$in0,$in0,$inpperm
- vcipher $out1,$out1,v29
- le?vperm $in1,$in1,$in1,$inpperm
- vcipher $out2,$out2,v29
- le?vperm $in2,$in2,$in2,$inpperm
- vcipher $out3,$out3,v29
- le?vperm $in3,$in3,$in3,$inpperm
- vcipher $out4,$out4,v29
- le?vperm $in4,$in4,$in4,$inpperm
- vcipher $out5,$out5,v29
- le?vperm $in5,$in5,$in5,$inpperm
- vcipher $out6,$out6,v29
- le?vperm $in6,$in6,$in6,$inpperm
- vcipher $out7,$out7,v29
- le?vperm $in7,$in7,$in7,$inpperm
-
- add $inp,$inp,r0 # $inp is adjusted in such
- # way that at exit from the
- # loop inX-in7 are loaded
- # with last "words"
- subfe. r0,r0,r0 # borrow?-1:0
- vcipher $out0,$out0,v30
- vxor $in0,$in0,v31 # xor with last round key
- vcipher $out1,$out1,v30
- vxor $in1,$in1,v31
- vcipher $out2,$out2,v30
- vxor $in2,$in2,v31
- vcipher $out3,$out3,v30
- vxor $in3,$in3,v31
- vcipher $out4,$out4,v30
- vxor $in4,$in4,v31
- vcipher $out5,$out5,v30
- vxor $in5,$in5,v31
- vcipher $out6,$out6,v30
- vxor $in6,$in6,v31
- vcipher $out7,$out7,v30
- vxor $in7,$in7,v31
-
- bne Lctr32_enc8x_break # did $len-129 borrow?
-
- vcipherlast $in0,$out0,$in0
- vcipherlast $in1,$out1,$in1
- vadduqm $out1,$ivec,$one # counter values ...
- vcipherlast $in2,$out2,$in2
- vadduqm $out2,$ivec,$two
- vxor $out0,$ivec,$rndkey0 # ... xored with rndkey[0]
- vcipherlast $in3,$out3,$in3
- vadduqm $out3,$out1,$two
- vxor $out1,$out1,$rndkey0
- vcipherlast $in4,$out4,$in4
- vadduqm $out4,$out2,$two
- vxor $out2,$out2,$rndkey0
- vcipherlast $in5,$out5,$in5
- vadduqm $out5,$out3,$two
- vxor $out3,$out3,$rndkey0
- vcipherlast $in6,$out6,$in6
- vadduqm $out6,$out4,$two
- vxor $out4,$out4,$rndkey0
- vcipherlast $in7,$out7,$in7
- vadduqm $out7,$out5,$two
- vxor $out5,$out5,$rndkey0
- le?vperm $in0,$in0,$in0,$inpperm
- vadduqm $ivec,$out6,$two # next counter value
- vxor $out6,$out6,$rndkey0
- le?vperm $in1,$in1,$in1,$inpperm
- vxor $out7,$out7,$rndkey0
- mtctr $rounds
-
- vcipher $out0,$out0,v24
- stvx_u $in0,$x00,$out
- le?vperm $in2,$in2,$in2,$inpperm
- vcipher $out1,$out1,v24
- stvx_u $in1,$x10,$out
- le?vperm $in3,$in3,$in3,$inpperm
- vcipher $out2,$out2,v24
- stvx_u $in2,$x20,$out
- le?vperm $in4,$in4,$in4,$inpperm
- vcipher $out3,$out3,v24
- stvx_u $in3,$x30,$out
- le?vperm $in5,$in5,$in5,$inpperm
- vcipher $out4,$out4,v24
- stvx_u $in4,$x40,$out
- le?vperm $in6,$in6,$in6,$inpperm
- vcipher $out5,$out5,v24
- stvx_u $in5,$x50,$out
- le?vperm $in7,$in7,$in7,$inpperm
- vcipher $out6,$out6,v24
- stvx_u $in6,$x60,$out
- vcipher $out7,$out7,v24
- stvx_u $in7,$x70,$out
- addi $out,$out,0x80
-
- b Loop_ctr32_enc8x_middle
-
-.align 5
-Lctr32_enc8x_break:
- cmpwi $len,-0x60
- blt Lctr32_enc8x_one
- nop
- beq Lctr32_enc8x_two
- cmpwi $len,-0x40
- blt Lctr32_enc8x_three
- nop
- beq Lctr32_enc8x_four
- cmpwi $len,-0x20
- blt Lctr32_enc8x_five
- nop
- beq Lctr32_enc8x_six
- cmpwi $len,0x00
- blt Lctr32_enc8x_seven
-
-Lctr32_enc8x_eight:
- vcipherlast $out0,$out0,$in0
- vcipherlast $out1,$out1,$in1
- vcipherlast $out2,$out2,$in2
- vcipherlast $out3,$out3,$in3
- vcipherlast $out4,$out4,$in4
- vcipherlast $out5,$out5,$in5
- vcipherlast $out6,$out6,$in6
- vcipherlast $out7,$out7,$in7
-
- le?vperm $out0,$out0,$out0,$inpperm
- le?vperm $out1,$out1,$out1,$inpperm
- stvx_u $out0,$x00,$out
- le?vperm $out2,$out2,$out2,$inpperm
- stvx_u $out1,$x10,$out
- le?vperm $out3,$out3,$out3,$inpperm
- stvx_u $out2,$x20,$out
- le?vperm $out4,$out4,$out4,$inpperm
- stvx_u $out3,$x30,$out
- le?vperm $out5,$out5,$out5,$inpperm
- stvx_u $out4,$x40,$out
- le?vperm $out6,$out6,$out6,$inpperm
- stvx_u $out5,$x50,$out
- le?vperm $out7,$out7,$out7,$inpperm
- stvx_u $out6,$x60,$out
- stvx_u $out7,$x70,$out
- addi $out,$out,0x80
- b Lctr32_enc8x_done
-
-.align 5
-Lctr32_enc8x_seven:
- vcipherlast $out0,$out0,$in1
- vcipherlast $out1,$out1,$in2
- vcipherlast $out2,$out2,$in3
- vcipherlast $out3,$out3,$in4
- vcipherlast $out4,$out4,$in5
- vcipherlast $out5,$out5,$in6
- vcipherlast $out6,$out6,$in7
-
- le?vperm $out0,$out0,$out0,$inpperm
- le?vperm $out1,$out1,$out1,$inpperm
- stvx_u $out0,$x00,$out
- le?vperm $out2,$out2,$out2,$inpperm
- stvx_u $out1,$x10,$out
- le?vperm $out3,$out3,$out3,$inpperm
- stvx_u $out2,$x20,$out
- le?vperm $out4,$out4,$out4,$inpperm
- stvx_u $out3,$x30,$out
- le?vperm $out5,$out5,$out5,$inpperm
- stvx_u $out4,$x40,$out
- le?vperm $out6,$out6,$out6,$inpperm
- stvx_u $out5,$x50,$out
- stvx_u $out6,$x60,$out
- addi $out,$out,0x70
- b Lctr32_enc8x_done
-
-.align 5
-Lctr32_enc8x_six:
- vcipherlast $out0,$out0,$in2
- vcipherlast $out1,$out1,$in3
- vcipherlast $out2,$out2,$in4
- vcipherlast $out3,$out3,$in5
- vcipherlast $out4,$out4,$in6
- vcipherlast $out5,$out5,$in7
-
- le?vperm $out0,$out0,$out0,$inpperm
- le?vperm $out1,$out1,$out1,$inpperm
- stvx_u $out0,$x00,$out
- le?vperm $out2,$out2,$out2,$inpperm
- stvx_u $out1,$x10,$out
- le?vperm $out3,$out3,$out3,$inpperm
- stvx_u $out2,$x20,$out
- le?vperm $out4,$out4,$out4,$inpperm
- stvx_u $out3,$x30,$out
- le?vperm $out5,$out5,$out5,$inpperm
- stvx_u $out4,$x40,$out
- stvx_u $out5,$x50,$out
- addi $out,$out,0x60
- b Lctr32_enc8x_done
-
-.align 5
-Lctr32_enc8x_five:
- vcipherlast $out0,$out0,$in3
- vcipherlast $out1,$out1,$in4
- vcipherlast $out2,$out2,$in5
- vcipherlast $out3,$out3,$in6
- vcipherlast $out4,$out4,$in7
-
- le?vperm $out0,$out0,$out0,$inpperm
- le?vperm $out1,$out1,$out1,$inpperm
- stvx_u $out0,$x00,$out
- le?vperm $out2,$out2,$out2,$inpperm
- stvx_u $out1,$x10,$out
- le?vperm $out3,$out3,$out3,$inpperm
- stvx_u $out2,$x20,$out
- le?vperm $out4,$out4,$out4,$inpperm
- stvx_u $out3,$x30,$out
- stvx_u $out4,$x40,$out
- addi $out,$out,0x50
- b Lctr32_enc8x_done
-
-.align 5
-Lctr32_enc8x_four:
- vcipherlast $out0,$out0,$in4
- vcipherlast $out1,$out1,$in5
- vcipherlast $out2,$out2,$in6
- vcipherlast $out3,$out3,$in7
-
- le?vperm $out0,$out0,$out0,$inpperm
- le?vperm $out1,$out1,$out1,$inpperm
- stvx_u $out0,$x00,$out
- le?vperm $out2,$out2,$out2,$inpperm
- stvx_u $out1,$x10,$out
- le?vperm $out3,$out3,$out3,$inpperm
- stvx_u $out2,$x20,$out
- stvx_u $out3,$x30,$out
- addi $out,$out,0x40
- b Lctr32_enc8x_done
-
-.align 5
-Lctr32_enc8x_three:
- vcipherlast $out0,$out0,$in5
- vcipherlast $out1,$out1,$in6
- vcipherlast $out2,$out2,$in7
-
- le?vperm $out0,$out0,$out0,$inpperm
- le?vperm $out1,$out1,$out1,$inpperm
- stvx_u $out0,$x00,$out
- le?vperm $out2,$out2,$out2,$inpperm
- stvx_u $out1,$x10,$out
- stvx_u $out2,$x20,$out
- addi $out,$out,0x30
- b Lctr32_enc8x_done
-
-.align 5
-Lctr32_enc8x_two:
- vcipherlast $out0,$out0,$in6
- vcipherlast $out1,$out1,$in7
-
- le?vperm $out0,$out0,$out0,$inpperm
- le?vperm $out1,$out1,$out1,$inpperm
- stvx_u $out0,$x00,$out
- stvx_u $out1,$x10,$out
- addi $out,$out,0x20
- b Lctr32_enc8x_done
-
-.align 5
-Lctr32_enc8x_one:
- vcipherlast $out0,$out0,$in7
-
- le?vperm $out0,$out0,$out0,$inpperm
- stvx_u $out0,0,$out
- addi $out,$out,0x10
-
-Lctr32_enc8x_done:
- li r10,`$FRAME+15`
- li r11,`$FRAME+31`
- stvx $inpperm,r10,$sp # wipe copies of round keys
- addi r10,r10,32
- stvx $inpperm,r11,$sp
- addi r11,r11,32
- stvx $inpperm,r10,$sp
- addi r10,r10,32
- stvx $inpperm,r11,$sp
- addi r11,r11,32
- stvx $inpperm,r10,$sp
- addi r10,r10,32
- stvx $inpperm,r11,$sp
- addi r11,r11,32
- stvx $inpperm,r10,$sp
- addi r10,r10,32
- stvx $inpperm,r11,$sp
- addi r11,r11,32
-
- mtspr 256,$vrsave
- lvx v20,r10,$sp # ABI says so
- addi r10,r10,32
- lvx v21,r11,$sp
- addi r11,r11,32
- lvx v22,r10,$sp
- addi r10,r10,32
- lvx v23,r11,$sp
- addi r11,r11,32
- lvx v24,r10,$sp
- addi r10,r10,32
- lvx v25,r11,$sp
- addi r11,r11,32
- lvx v26,r10,$sp
- addi r10,r10,32
- lvx v27,r11,$sp
- addi r11,r11,32
- lvx v28,r10,$sp
- addi r10,r10,32
- lvx v29,r11,$sp
- addi r11,r11,32
- lvx v30,r10,$sp
- lvx v31,r11,$sp
- $POP r26,`$FRAME+21*16+0*$SIZE_T`($sp)
- $POP r27,`$FRAME+21*16+1*$SIZE_T`($sp)
- $POP r28,`$FRAME+21*16+2*$SIZE_T`($sp)
- $POP r29,`$FRAME+21*16+3*$SIZE_T`($sp)
- $POP r30,`$FRAME+21*16+4*$SIZE_T`($sp)
- $POP r31,`$FRAME+21*16+5*$SIZE_T`($sp)
- addi $sp,$sp,`$FRAME+21*16+6*$SIZE_T`
- blr
- .long 0
- .byte 0,12,0x14,0,0x80,6,6,0
- .long 0
-.size .${prefix}_ctr32_encrypt_blocks,.-.${prefix}_ctr32_encrypt_blocks
-___
-}} }}}
-
-#########################################################################
-{{{ # XTS procedures #
-# int aes_p8_xts_[en|de]crypt(const char *inp, char *out, size_t len, #
-# const AES_KEY *key1, const AES_KEY *key2, #
-# [const] unsigned char iv[16]); #
-# If $key2 is NULL, then a "tweak chaining" mode is engaged, in which #
-# input tweak value is assumed to be encrypted already, and last tweak #
-# value, one suitable for consecutive call on same chunk of data, is #
-# written back to original buffer. In addition, in "tweak chaining" #
-# mode only complete input blocks are processed. #
-
-my ($inp,$out,$len,$key1,$key2,$ivp,$rounds,$idx) = map("r$_",(3..10));
-my ($rndkey0,$rndkey1,$inout) = map("v$_",(0..2));
-my ($output,$inptail,$inpperm,$leperm,$keyperm) = map("v$_",(3..7));
-my ($tweak,$seven,$eighty7,$tmp,$tweak1) = map("v$_",(8..12));
-my $taillen = $key2;
-
- ($inp,$idx) = ($idx,$inp); # reassign
-
-$code.=<<___;
-.globl .${prefix}_xts_encrypt
- mr $inp,r3 # reassign
- li r3,-1
- ${UCMP}i $len,16
- bltlr-
-
- lis r0,0xfff0
- mfspr r12,256 # save vrsave
- li r11,0
- mtspr 256,r0
-
- vspltisb $seven,0x07 # 0x070707..07
- le?lvsl $leperm,r11,r11
- le?vspltisb $tmp,0x0f
- le?vxor $leperm,$leperm,$seven
-
- li $idx,15
- lvx $tweak,0,$ivp # load [unaligned] iv
- lvsl $inpperm,0,$ivp
- lvx $inptail,$idx,$ivp
- le?vxor $inpperm,$inpperm,$tmp
- vperm $tweak,$tweak,$inptail,$inpperm
-
- neg r11,$inp
- lvsr $inpperm,0,r11 # prepare for unaligned load
- lvx $inout,0,$inp
- addi $inp,$inp,15 # 15 is not typo
- le?vxor $inpperm,$inpperm,$tmp
-
- ${UCMP}i $key2,0 # key2==NULL?
- beq Lxts_enc_no_key2
-
- ?lvsl $keyperm,0,$key2 # prepare for unaligned key
- lwz $rounds,240($key2)
- srwi $rounds,$rounds,1
- subi $rounds,$rounds,1
- li $idx,16
-
- lvx $rndkey0,0,$key2
- lvx $rndkey1,$idx,$key2
- addi $idx,$idx,16
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vxor $tweak,$tweak,$rndkey0
- lvx $rndkey0,$idx,$key2
- addi $idx,$idx,16
- mtctr $rounds
-
-Ltweak_xts_enc:
- ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
- vcipher $tweak,$tweak,$rndkey1
- lvx $rndkey1,$idx,$key2
- addi $idx,$idx,16
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vcipher $tweak,$tweak,$rndkey0
- lvx $rndkey0,$idx,$key2
- addi $idx,$idx,16
- bdnz Ltweak_xts_enc
-
- ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
- vcipher $tweak,$tweak,$rndkey1
- lvx $rndkey1,$idx,$key2
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vcipherlast $tweak,$tweak,$rndkey0
-
- li $ivp,0 # don't chain the tweak
- b Lxts_enc
-
-Lxts_enc_no_key2:
- li $idx,-16
- and $len,$len,$idx # in "tweak chaining"
- # mode only complete
- # blocks are processed
-Lxts_enc:
- lvx $inptail,0,$inp
- addi $inp,$inp,16
-
- ?lvsl $keyperm,0,$key1 # prepare for unaligned key
- lwz $rounds,240($key1)
- srwi $rounds,$rounds,1
- subi $rounds,$rounds,1
- li $idx,16
-
- vslb $eighty7,$seven,$seven # 0x808080..80
- vor $eighty7,$eighty7,$seven # 0x878787..87
- vspltisb $tmp,1 # 0x010101..01
- vsldoi $eighty7,$eighty7,$tmp,15 # 0x870101..01
-
- ${UCMP}i $len,96
- bge _aesp8_xts_encrypt6x
-
- andi. $taillen,$len,15
- subic r0,$len,32
- subi $taillen,$taillen,16
- subfe r0,r0,r0
- and r0,r0,$taillen
- add $inp,$inp,r0
-
- lvx $rndkey0,0,$key1
- lvx $rndkey1,$idx,$key1
- addi $idx,$idx,16
- vperm $inout,$inout,$inptail,$inpperm
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vxor $inout,$inout,$tweak
- vxor $inout,$inout,$rndkey0
- lvx $rndkey0,$idx,$key1
- addi $idx,$idx,16
- mtctr $rounds
- b Loop_xts_enc
-
-.align 5
-Loop_xts_enc:
- ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
- vcipher $inout,$inout,$rndkey1
- lvx $rndkey1,$idx,$key1
- addi $idx,$idx,16
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vcipher $inout,$inout,$rndkey0
- lvx $rndkey0,$idx,$key1
- addi $idx,$idx,16
- bdnz Loop_xts_enc
-
- ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
- vcipher $inout,$inout,$rndkey1
- lvx $rndkey1,$idx,$key1
- li $idx,16
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vxor $rndkey0,$rndkey0,$tweak
- vcipherlast $output,$inout,$rndkey0
-
- le?vperm $tmp,$output,$output,$leperm
- be?nop
- le?stvx_u $tmp,0,$out
- be?stvx_u $output,0,$out
- addi $out,$out,16
-
- subic. $len,$len,16
- beq Lxts_enc_done
-
- vmr $inout,$inptail
- lvx $inptail,0,$inp
- addi $inp,$inp,16
- lvx $rndkey0,0,$key1
- lvx $rndkey1,$idx,$key1
- addi $idx,$idx,16
-
- subic r0,$len,32
- subfe r0,r0,r0
- and r0,r0,$taillen
- add $inp,$inp,r0
-
- vsrab $tmp,$tweak,$seven # next tweak value
- vaddubm $tweak,$tweak,$tweak
- vsldoi $tmp,$tmp,$tmp,15
- vand $tmp,$tmp,$eighty7
- vxor $tweak,$tweak,$tmp
-
- vperm $inout,$inout,$inptail,$inpperm
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vxor $inout,$inout,$tweak
- vxor $output,$output,$rndkey0 # just in case $len<16
- vxor $inout,$inout,$rndkey0
- lvx $rndkey0,$idx,$key1
- addi $idx,$idx,16
-
- mtctr $rounds
- ${UCMP}i $len,16
- bge Loop_xts_enc
-
- vxor $output,$output,$tweak
- lvsr $inpperm,0,$len # $inpperm is no longer needed
- vxor $inptail,$inptail,$inptail # $inptail is no longer needed
- vspltisb $tmp,-1
- vperm $inptail,$inptail,$tmp,$inpperm
- vsel $inout,$inout,$output,$inptail
-
- subi r11,$out,17
- subi $out,$out,16
- mtctr $len
- li $len,16
-Loop_xts_enc_steal:
- lbzu r0,1(r11)
- stb r0,16(r11)
- bdnz Loop_xts_enc_steal
-
- mtctr $rounds
- b Loop_xts_enc # one more time...
-
-Lxts_enc_done:
- ${UCMP}i $ivp,0
- beq Lxts_enc_ret
-
- vsrab $tmp,$tweak,$seven # next tweak value
- vaddubm $tweak,$tweak,$tweak
- vsldoi $tmp,$tmp,$tmp,15
- vand $tmp,$tmp,$eighty7
- vxor $tweak,$tweak,$tmp
-
- le?vperm $tweak,$tweak,$tweak,$leperm
- stvx_u $tweak,0,$ivp
-
-Lxts_enc_ret:
- mtspr 256,r12 # restore vrsave
- li r3,0
- blr
- .long 0
- .byte 0,12,0x04,0,0x80,6,6,0
- .long 0
-.size .${prefix}_xts_encrypt,.-.${prefix}_xts_encrypt
-
-.globl .${prefix}_xts_decrypt
- mr $inp,r3 # reassign
- li r3,-1
- ${UCMP}i $len,16
- bltlr-
-
- lis r0,0xfff8
- mfspr r12,256 # save vrsave
- li r11,0
- mtspr 256,r0
-
- andi. r0,$len,15
- neg r0,r0
- andi. r0,r0,16
- sub $len,$len,r0
-
- vspltisb $seven,0x07 # 0x070707..07
- le?lvsl $leperm,r11,r11
- le?vspltisb $tmp,0x0f
- le?vxor $leperm,$leperm,$seven
-
- li $idx,15
- lvx $tweak,0,$ivp # load [unaligned] iv
- lvsl $inpperm,0,$ivp
- lvx $inptail,$idx,$ivp
- le?vxor $inpperm,$inpperm,$tmp
- vperm $tweak,$tweak,$inptail,$inpperm
-
- neg r11,$inp
- lvsr $inpperm,0,r11 # prepare for unaligned load
- lvx $inout,0,$inp
- addi $inp,$inp,15 # 15 is not typo
- le?vxor $inpperm,$inpperm,$tmp
-
- ${UCMP}i $key2,0 # key2==NULL?
- beq Lxts_dec_no_key2
-
- ?lvsl $keyperm,0,$key2 # prepare for unaligned key
- lwz $rounds,240($key2)
- srwi $rounds,$rounds,1
- subi $rounds,$rounds,1
- li $idx,16
-
- lvx $rndkey0,0,$key2
- lvx $rndkey1,$idx,$key2
- addi $idx,$idx,16
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vxor $tweak,$tweak,$rndkey0
- lvx $rndkey0,$idx,$key2
- addi $idx,$idx,16
- mtctr $rounds
-
-Ltweak_xts_dec:
- ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
- vcipher $tweak,$tweak,$rndkey1
- lvx $rndkey1,$idx,$key2
- addi $idx,$idx,16
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vcipher $tweak,$tweak,$rndkey0
- lvx $rndkey0,$idx,$key2
- addi $idx,$idx,16
- bdnz Ltweak_xts_dec
-
- ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
- vcipher $tweak,$tweak,$rndkey1
- lvx $rndkey1,$idx,$key2
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vcipherlast $tweak,$tweak,$rndkey0
-
- li $ivp,0 # don't chain the tweak
- b Lxts_dec
-
-Lxts_dec_no_key2:
- neg $idx,$len
- andi. $idx,$idx,15
- add $len,$len,$idx # in "tweak chaining"
- # mode only complete
- # blocks are processed
-Lxts_dec:
- lvx $inptail,0,$inp
- addi $inp,$inp,16
-
- ?lvsl $keyperm,0,$key1 # prepare for unaligned key
- lwz $rounds,240($key1)
- srwi $rounds,$rounds,1
- subi $rounds,$rounds,1
- li $idx,16
-
- vslb $eighty7,$seven,$seven # 0x808080..80
- vor $eighty7,$eighty7,$seven # 0x878787..87
- vspltisb $tmp,1 # 0x010101..01
- vsldoi $eighty7,$eighty7,$tmp,15 # 0x870101..01
-
- ${UCMP}i $len,96
- bge _aesp8_xts_decrypt6x
-
- lvx $rndkey0,0,$key1
- lvx $rndkey1,$idx,$key1
- addi $idx,$idx,16
- vperm $inout,$inout,$inptail,$inpperm
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vxor $inout,$inout,$tweak
- vxor $inout,$inout,$rndkey0
- lvx $rndkey0,$idx,$key1
- addi $idx,$idx,16
- mtctr $rounds
-
- ${UCMP}i $len,16
- blt Ltail_xts_dec
- be?b Loop_xts_dec
-
-.align 5
-Loop_xts_dec:
- ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
- vncipher $inout,$inout,$rndkey1
- lvx $rndkey1,$idx,$key1
- addi $idx,$idx,16
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vncipher $inout,$inout,$rndkey0
- lvx $rndkey0,$idx,$key1
- addi $idx,$idx,16
- bdnz Loop_xts_dec
-
- ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
- vncipher $inout,$inout,$rndkey1
- lvx $rndkey1,$idx,$key1
- li $idx,16
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vxor $rndkey0,$rndkey0,$tweak
- vncipherlast $output,$inout,$rndkey0
-
- le?vperm $tmp,$output,$output,$leperm
- be?nop
- le?stvx_u $tmp,0,$out
- be?stvx_u $output,0,$out
- addi $out,$out,16
-
- subic. $len,$len,16
- beq Lxts_dec_done
-
- vmr $inout,$inptail
- lvx $inptail,0,$inp
- addi $inp,$inp,16
- lvx $rndkey0,0,$key1
- lvx $rndkey1,$idx,$key1
- addi $idx,$idx,16
-
- vsrab $tmp,$tweak,$seven # next tweak value
- vaddubm $tweak,$tweak,$tweak
- vsldoi $tmp,$tmp,$tmp,15
- vand $tmp,$tmp,$eighty7
- vxor $tweak,$tweak,$tmp
-
- vperm $inout,$inout,$inptail,$inpperm
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vxor $inout,$inout,$tweak
- vxor $inout,$inout,$rndkey0
- lvx $rndkey0,$idx,$key1
- addi $idx,$idx,16
-
- mtctr $rounds
- ${UCMP}i $len,16
- bge Loop_xts_dec
-
-Ltail_xts_dec:
- vsrab $tmp,$tweak,$seven # next tweak value
- vaddubm $tweak1,$tweak,$tweak
- vsldoi $tmp,$tmp,$tmp,15
- vand $tmp,$tmp,$eighty7
- vxor $tweak1,$tweak1,$tmp
-
- subi $inp,$inp,16
- add $inp,$inp,$len
-
- vxor $inout,$inout,$tweak # :-(
- vxor $inout,$inout,$tweak1 # :-)
-
-Loop_xts_dec_short:
- ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
- vncipher $inout,$inout,$rndkey1
- lvx $rndkey1,$idx,$key1
- addi $idx,$idx,16
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vncipher $inout,$inout,$rndkey0
- lvx $rndkey0,$idx,$key1
- addi $idx,$idx,16
- bdnz Loop_xts_dec_short
-
- ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
- vncipher $inout,$inout,$rndkey1
- lvx $rndkey1,$idx,$key1
- li $idx,16
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
- vxor $rndkey0,$rndkey0,$tweak1
- vncipherlast $output,$inout,$rndkey0
-
- le?vperm $tmp,$output,$output,$leperm
- be?nop
- le?stvx_u $tmp,0,$out
- be?stvx_u $output,0,$out
-
- vmr $inout,$inptail
- lvx $inptail,0,$inp
- #addi $inp,$inp,16
- lvx $rndkey0,0,$key1
- lvx $rndkey1,$idx,$key1
- addi $idx,$idx,16
- vperm $inout,$inout,$inptail,$inpperm
- ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
-
- lvsr $inpperm,0,$len # $inpperm is no longer needed
- vxor $inptail,$inptail,$inptail # $inptail is no longer needed
- vspltisb $tmp,-1
- vperm $inptail,$inptail,$tmp,$inpperm
- vsel $inout,$inout,$output,$inptail
-
- vxor $rndkey0,$rndkey0,$tweak
- vxor $inout,$inout,$rndkey0
- lvx $rndkey0,$idx,$key1
- addi $idx,$idx,16
-
- subi r11,$out,1
- mtctr $len
- li $len,16
-Loop_xts_dec_steal:
- lbzu r0,1(r11)
- stb r0,16(r11)
- bdnz Loop_xts_dec_steal
-
- mtctr $rounds
- b Loop_xts_dec # one more time...
-
-Lxts_dec_done:
- ${UCMP}i $ivp,0
- beq Lxts_dec_ret
-
- vsrab $tmp,$tweak,$seven # next tweak value
- vaddubm $tweak,$tweak,$tweak
- vsldoi $tmp,$tmp,$tmp,15
- vand $tmp,$tmp,$eighty7
- vxor $tweak,$tweak,$tmp
-
- le?vperm $tweak,$tweak,$tweak,$leperm
- stvx_u $tweak,0,$ivp
-
-Lxts_dec_ret:
- mtspr 256,r12 # restore vrsave
- li r3,0
- blr
- .long 0
- .byte 0,12,0x04,0,0x80,6,6,0
- .long 0
-.size .${prefix}_xts_decrypt,.-.${prefix}_xts_decrypt
-___
-#########################################################################
-{{ # Optimized XTS procedures #
-my $key_=$key2;
-my ($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,3,26..31));
- $x00=0 if ($flavour =~ /osx/);
-my ($in0, $in1, $in2, $in3, $in4, $in5 )=map("v$_",(0..5));
-my ($out0, $out1, $out2, $out3, $out4, $out5)=map("v$_",(7,12..16));
-my ($twk0, $twk1, $twk2, $twk3, $twk4, $twk5)=map("v$_",(17..22));
-my $rndkey0="v23"; # v24-v25 rotating buffer for first found keys
- # v26-v31 last 6 round keys
-my ($keyperm)=($out0); # aliases with "caller", redundant assignment
-my $taillen=$x70;
-
-$code.=<<___;
-.align 5
-_aesp8_xts_encrypt6x:
- $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
- mflr r11
- li r7,`$FRAME+8*16+15`
- li r3,`$FRAME+8*16+31`
- $PUSH r11,`$FRAME+21*16+6*$SIZE_T+$LRSAVE`($sp)
- stvx v20,r7,$sp # ABI says so
- addi r7,r7,32
- stvx v21,r3,$sp
- addi r3,r3,32
- stvx v22,r7,$sp
- addi r7,r7,32
- stvx v23,r3,$sp
- addi r3,r3,32
- stvx v24,r7,$sp
- addi r7,r7,32
- stvx v25,r3,$sp
- addi r3,r3,32
- stvx v26,r7,$sp
- addi r7,r7,32
- stvx v27,r3,$sp
- addi r3,r3,32
- stvx v28,r7,$sp
- addi r7,r7,32
- stvx v29,r3,$sp
- addi r3,r3,32
- stvx v30,r7,$sp
- stvx v31,r3,$sp
- li r0,-1
- stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave
- li $x10,0x10
- $PUSH r26,`$FRAME+21*16+0*$SIZE_T`($sp)
- li $x20,0x20
- $PUSH r27,`$FRAME+21*16+1*$SIZE_T`($sp)
- li $x30,0x30
- $PUSH r28,`$FRAME+21*16+2*$SIZE_T`($sp)
- li $x40,0x40
- $PUSH r29,`$FRAME+21*16+3*$SIZE_T`($sp)
- li $x50,0x50
- $PUSH r30,`$FRAME+21*16+4*$SIZE_T`($sp)
- li $x60,0x60
- $PUSH r31,`$FRAME+21*16+5*$SIZE_T`($sp)
- li $x70,0x70
- mtspr 256,r0
-
- xxlor 2, 32+$eighty7, 32+$eighty7
- vsldoi $eighty7,$tmp,$eighty7,1 # 0x010101..87
- xxlor 1, 32+$eighty7, 32+$eighty7
-
- # Load XOR Lconsts.
- mr $x70, r6
- bl Lconsts
- lxvw4x 0, $x40, r6 # load XOR contents
- mr r6, $x70
- li $x70,0x70
-
- subi $rounds,$rounds,3 # -4 in total
-
- lvx $rndkey0,$x00,$key1 # load key schedule
- lvx v30,$x10,$key1
- addi $key1,$key1,0x20
- lvx v31,$x00,$key1
- ?vperm $rndkey0,$rndkey0,v30,$keyperm
- addi $key_,$sp,$FRAME+15
- mtctr $rounds
-
-Load_xts_enc_key:
- ?vperm v24,v30,v31,$keyperm
- lvx v30,$x10,$key1
- addi $key1,$key1,0x20
- stvx v24,$x00,$key_ # off-load round[1]
- ?vperm v25,v31,v30,$keyperm
- lvx v31,$x00,$key1
- stvx v25,$x10,$key_ # off-load round[2]
- addi $key_,$key_,0x20
- bdnz Load_xts_enc_key
-
- lvx v26,$x10,$key1
- ?vperm v24,v30,v31,$keyperm
- lvx v27,$x20,$key1
- stvx v24,$x00,$key_ # off-load round[3]
- ?vperm v25,v31,v26,$keyperm
- lvx v28,$x30,$key1
- stvx v25,$x10,$key_ # off-load round[4]
- addi $key_,$sp,$FRAME+15 # rewind $key_
- ?vperm v26,v26,v27,$keyperm
- lvx v29,$x40,$key1
- ?vperm v27,v27,v28,$keyperm
- lvx v30,$x50,$key1
- ?vperm v28,v28,v29,$keyperm
- lvx v31,$x60,$key1
- ?vperm v29,v29,v30,$keyperm
- lvx $twk5,$x70,$key1 # borrow $twk5
- ?vperm v30,v30,v31,$keyperm
- lvx v24,$x00,$key_ # pre-load round[1]
- ?vperm v31,v31,$twk5,$keyperm
- lvx v25,$x10,$key_ # pre-load round[2]
-
- # Switch to use the following codes with 0x010101..87 to generate tweak.
- # eighty7 = 0x010101..87
- # vsrab tmp, tweak, seven # next tweak value, right shift 7 bits
- # vand tmp, tmp, eighty7 # last byte with carry
- # vaddubm tweak, tweak, tweak # left shift 1 bit (x2)
- # xxlor vsx, 0, 0
- # vpermxor tweak, tweak, tmp, vsx
-
- vperm $in0,$inout,$inptail,$inpperm
- subi $inp,$inp,31 # undo "caller"
- vxor $twk0,$tweak,$rndkey0
- vsrab $tmp,$tweak,$seven # next tweak value
- vaddubm $tweak,$tweak,$tweak
- vand $tmp,$tmp,$eighty7
- vxor $out0,$in0,$twk0
- xxlor 32+$in1, 0, 0
- vpermxor $tweak, $tweak, $tmp, $in1
-
- lvx_u $in1,$x10,$inp
- vxor $twk1,$tweak,$rndkey0
- vsrab $tmp,$tweak,$seven # next tweak value
- vaddubm $tweak,$tweak,$tweak
- le?vperm $in1,$in1,$in1,$leperm
- vand $tmp,$tmp,$eighty7
- vxor $out1,$in1,$twk1
- xxlor 32+$in2, 0, 0
- vpermxor $tweak, $tweak, $tmp, $in2
-
- lvx_u $in2,$x20,$inp
- andi. $taillen,$len,15
- vxor $twk2,$tweak,$rndkey0
- vsrab $tmp,$tweak,$seven # next tweak value
- vaddubm $tweak,$tweak,$tweak
- le?vperm $in2,$in2,$in2,$leperm
- vand $tmp,$tmp,$eighty7
- vxor $out2,$in2,$twk2
- xxlor 32+$in3, 0, 0
- vpermxor $tweak, $tweak, $tmp, $in3
-
- lvx_u $in3,$x30,$inp
- sub $len,$len,$taillen
- vxor $twk3,$tweak,$rndkey0
- vsrab $tmp,$tweak,$seven # next tweak value
- vaddubm $tweak,$tweak,$tweak
- le?vperm $in3,$in3,$in3,$leperm
- vand $tmp,$tmp,$eighty7
- vxor $out3,$in3,$twk3
- xxlor 32+$in4, 0, 0
- vpermxor $tweak, $tweak, $tmp, $in4
-
- lvx_u $in4,$x40,$inp
- subi $len,$len,0x60
- vxor $twk4,$tweak,$rndkey0
- vsrab $tmp,$tweak,$seven # next tweak value
- vaddubm $tweak,$tweak,$tweak
- le?vperm $in4,$in4,$in4,$leperm
- vand $tmp,$tmp,$eighty7
- vxor $out4,$in4,$twk4
- xxlor 32+$in5, 0, 0
- vpermxor $tweak, $tweak, $tmp, $in5
-
- lvx_u $in5,$x50,$inp
- addi $inp,$inp,0x60
- vxor $twk5,$tweak,$rndkey0
- vsrab $tmp,$tweak,$seven # next tweak value
- vaddubm $tweak,$tweak,$tweak
- le?vperm $in5,$in5,$in5,$leperm
- vand $tmp,$tmp,$eighty7
- vxor $out5,$in5,$twk5
- xxlor 32+$in0, 0, 0
- vpermxor $tweak, $tweak, $tmp, $in0
-
- vxor v31,v31,$rndkey0
- mtctr $rounds
- b Loop_xts_enc6x
-
-.align 5
-Loop_xts_enc6x:
- vcipher $out0,$out0,v24
- vcipher $out1,$out1,v24
- vcipher $out2,$out2,v24
- vcipher $out3,$out3,v24
- vcipher $out4,$out4,v24
- vcipher $out5,$out5,v24
- lvx v24,$x20,$key_ # round[3]
- addi $key_,$key_,0x20
-
- vcipher $out0,$out0,v25
- vcipher $out1,$out1,v25
- vcipher $out2,$out2,v25
- vcipher $out3,$out3,v25
- vcipher $out4,$out4,v25
- vcipher $out5,$out5,v25
- lvx v25,$x10,$key_ # round[4]
- bdnz Loop_xts_enc6x
-
- xxlor 32+$eighty7, 1, 1 # 0x010101..87
-
- subic $len,$len,96 # $len-=96
- vxor $in0,$twk0,v31 # xor with last round key
- vcipher $out0,$out0,v24
- vcipher $out1,$out1,v24
- vsrab $tmp,$tweak,$seven # next tweak value
- vxor $twk0,$tweak,$rndkey0
- vaddubm $tweak,$tweak,$tweak
- vcipher $out2,$out2,v24
- vcipher $out3,$out3,v24
- vcipher $out4,$out4,v24
- vcipher $out5,$out5,v24
-
- subfe. r0,r0,r0 # borrow?-1:0
- vand $tmp,$tmp,$eighty7
- vcipher $out0,$out0,v25
- vcipher $out1,$out1,v25
- xxlor 32+$in1, 0, 0
- vpermxor $tweak, $tweak, $tmp, $in1
- vcipher $out2,$out2,v25
- vcipher $out3,$out3,v25
- vxor $in1,$twk1,v31
- vsrab $tmp,$tweak,$seven # next tweak value
- vxor $twk1,$tweak,$rndkey0
- vcipher $out4,$out4,v25
- vcipher $out5,$out5,v25
-
- and r0,r0,$len
- vaddubm $tweak,$tweak,$tweak
- vcipher $out0,$out0,v26
- vcipher $out1,$out1,v26
- vand $tmp,$tmp,$eighty7
- vcipher $out2,$out2,v26
- vcipher $out3,$out3,v26
- xxlor 32+$in2, 0, 0
- vpermxor $tweak, $tweak, $tmp, $in2
- vcipher $out4,$out4,v26
- vcipher $out5,$out5,v26
-
- add $inp,$inp,r0 # $inp is adjusted in such
- # way that at exit from the
- # loop inX-in5 are loaded
- # with last "words"
- vxor $in2,$twk2,v31
- vsrab $tmp,$tweak,$seven # next tweak value
- vxor $twk2,$tweak,$rndkey0
- vaddubm $tweak,$tweak,$tweak
- vcipher $out0,$out0,v27
- vcipher $out1,$out1,v27
- vcipher $out2,$out2,v27
- vcipher $out3,$out3,v27
- vand $tmp,$tmp,$eighty7
- vcipher $out4,$out4,v27
- vcipher $out5,$out5,v27
-
- addi $key_,$sp,$FRAME+15 # rewind $key_
- xxlor 32+$in3, 0, 0
- vpermxor $tweak, $tweak, $tmp, $in3
- vcipher $out0,$out0,v28
- vcipher $out1,$out1,v28
- vxor $in3,$twk3,v31
- vsrab $tmp,$tweak,$seven # next tweak value
- vxor $twk3,$tweak,$rndkey0
- vcipher $out2,$out2,v28
- vcipher $out3,$out3,v28
- vaddubm $tweak,$tweak,$tweak
- vcipher $out4,$out4,v28
- vcipher $out5,$out5,v28
- lvx v24,$x00,$key_ # re-pre-load round[1]
- vand $tmp,$tmp,$eighty7
-
- vcipher $out0,$out0,v29
- vcipher $out1,$out1,v29
- xxlor 32+$in4, 0, 0
- vpermxor $tweak, $tweak, $tmp, $in4
- vcipher $out2,$out2,v29
- vcipher $out3,$out3,v29
- vxor $in4,$twk4,v31
- vsrab $tmp,$tweak,$seven # next tweak value
- vxor $twk4,$tweak,$rndkey0
- vcipher $out4,$out4,v29
- vcipher $out5,$out5,v29
- lvx v25,$x10,$key_ # re-pre-load round[2]
- vaddubm $tweak,$tweak,$tweak
-
- vcipher $out0,$out0,v30
- vcipher $out1,$out1,v30
- vand $tmp,$tmp,$eighty7
- vcipher $out2,$out2,v30
- vcipher $out3,$out3,v30
- xxlor 32+$in5, 0, 0
- vpermxor $tweak, $tweak, $tmp, $in5
- vcipher $out4,$out4,v30
- vcipher $out5,$out5,v30
- vxor $in5,$twk5,v31
- vsrab $tmp,$tweak,$seven # next tweak value
- vxor $twk5,$tweak,$rndkey0
-
- vcipherlast $out0,$out0,$in0
- lvx_u $in0,$x00,$inp # load next input block
- vaddubm $tweak,$tweak,$tweak
- vcipherlast $out1,$out1,$in1
- lvx_u $in1,$x10,$inp
- vcipherlast $out2,$out2,$in2
- le?vperm $in0,$in0,$in0,$leperm
- lvx_u $in2,$x20,$inp
- vand $tmp,$tmp,$eighty7
- vcipherlast $out3,$out3,$in3
- le?vperm $in1,$in1,$in1,$leperm
- lvx_u $in3,$x30,$inp
- vcipherlast $out4,$out4,$in4
- le?vperm $in2,$in2,$in2,$leperm
- lvx_u $in4,$x40,$inp
- xxlor 10, 32+$in0, 32+$in0
- xxlor 32+$in0, 0, 0
- vpermxor $tweak, $tweak, $tmp, $in0
- xxlor 32+$in0, 10, 10
- vcipherlast $tmp,$out5,$in5 # last block might be needed
- # in stealing mode
- le?vperm $in3,$in3,$in3,$leperm
- lvx_u $in5,$x50,$inp
- addi $inp,$inp,0x60
- le?vperm $in4,$in4,$in4,$leperm
- le?vperm $in5,$in5,$in5,$leperm
-
- le?vperm $out0,$out0,$out0,$leperm
- le?vperm $out1,$out1,$out1,$leperm
- stvx_u $out0,$x00,$out # store output
- vxor $out0,$in0,$twk0
- le?vperm $out2,$out2,$out2,$leperm
- stvx_u $out1,$x10,$out
- vxor $out1,$in1,$twk1
- le?vperm $out3,$out3,$out3,$leperm
- stvx_u $out2,$x20,$out
- vxor $out2,$in2,$twk2
- le?vperm $out4,$out4,$out4,$leperm
- stvx_u $out3,$x30,$out
- vxor $out3,$in3,$twk3
- le?vperm $out5,$tmp,$tmp,$leperm
- stvx_u $out4,$x40,$out
- vxor $out4,$in4,$twk4
- le?stvx_u $out5,$x50,$out
- be?stvx_u $tmp, $x50,$out
- vxor $out5,$in5,$twk5
- addi $out,$out,0x60
-
- mtctr $rounds
- beq Loop_xts_enc6x # did $len-=96 borrow?
-
- xxlor 32+$eighty7, 2, 2 # 0x010101..87
-
- addic. $len,$len,0x60
- beq Lxts_enc6x_zero
- cmpwi $len,0x20
- blt Lxts_enc6x_one
- nop
- beq Lxts_enc6x_two
- cmpwi $len,0x40
- blt Lxts_enc6x_three
- nop
- beq Lxts_enc6x_four
-
-Lxts_enc6x_five:
- vxor $out0,$in1,$twk0
- vxor $out1,$in2,$twk1
- vxor $out2,$in3,$twk2
- vxor $out3,$in4,$twk3
- vxor $out4,$in5,$twk4
-
- bl _aesp8_xts_enc5x
-
- le?vperm $out0,$out0,$out0,$leperm
- vmr $twk0,$twk5 # unused tweak
- le?vperm $out1,$out1,$out1,$leperm
- stvx_u $out0,$x00,$out # store output
- le?vperm $out2,$out2,$out2,$leperm
- stvx_u $out1,$x10,$out
- le?vperm $out3,$out3,$out3,$leperm
- stvx_u $out2,$x20,$out
- vxor $tmp,$out4,$twk5 # last block prep for stealing
- le?vperm $out4,$out4,$out4,$leperm
- stvx_u $out3,$x30,$out
- stvx_u $out4,$x40,$out
- addi $out,$out,0x50
- bne Lxts_enc6x_steal
- b Lxts_enc6x_done
-
-.align 4
-Lxts_enc6x_four:
- vxor $out0,$in2,$twk0
- vxor $out1,$in3,$twk1
- vxor $out2,$in4,$twk2
- vxor $out3,$in5,$twk3
- vxor $out4,$out4,$out4
-
- bl _aesp8_xts_enc5x
-
- le?vperm $out0,$out0,$out0,$leperm
- vmr $twk0,$twk4 # unused tweak
- le?vperm $out1,$out1,$out1,$leperm
- stvx_u $out0,$x00,$out # store output
- le?vperm $out2,$out2,$out2,$leperm
- stvx_u $out1,$x10,$out
- vxor $tmp,$out3,$twk4 # last block prep for stealing
- le?vperm $out3,$out3,$out3,$leperm
- stvx_u $out2,$x20,$out
- stvx_u $out3,$x30,$out
- addi $out,$out,0x40
- bne Lxts_enc6x_steal
- b Lxts_enc6x_done
-
-.align 4
-Lxts_enc6x_three:
- vxor $out0,$in3,$twk0
- vxor $out1,$in4,$twk1
- vxor $out2,$in5,$twk2
- vxor $out3,$out3,$out3
- vxor $out4,$out4,$out4
-
- bl _aesp8_xts_enc5x
-
- le?vperm $out0,$out0,$out0,$leperm
- vmr $twk0,$twk3 # unused tweak
- le?vperm $out1,$out1,$out1,$leperm
- stvx_u $out0,$x00,$out # store output
- vxor $tmp,$out2,$twk3 # last block prep for stealing
- le?vperm $out2,$out2,$out2,$leperm
- stvx_u $out1,$x10,$out
- stvx_u $out2,$x20,$out
- addi $out,$out,0x30
- bne Lxts_enc6x_steal
- b Lxts_enc6x_done
-
-.align 4
-Lxts_enc6x_two:
- vxor $out0,$in4,$twk0
- vxor $out1,$in5,$twk1
- vxor $out2,$out2,$out2
- vxor $out3,$out3,$out3
- vxor $out4,$out4,$out4
-
- bl _aesp8_xts_enc5x
-
- le?vperm $out0,$out0,$out0,$leperm
- vmr $twk0,$twk2 # unused tweak
- vxor $tmp,$out1,$twk2 # last block prep for stealing
- le?vperm $out1,$out1,$out1,$leperm
- stvx_u $out0,$x00,$out # store output
- stvx_u $out1,$x10,$out
- addi $out,$out,0x20
- bne Lxts_enc6x_steal
- b Lxts_enc6x_done
-
-.align 4
-Lxts_enc6x_one:
- vxor $out0,$in5,$twk0
- nop
-Loop_xts_enc1x:
- vcipher $out0,$out0,v24
- lvx v24,$x20,$key_ # round[3]
- addi $key_,$key_,0x20
-
- vcipher $out0,$out0,v25
- lvx v25,$x10,$key_ # round[4]
- bdnz Loop_xts_enc1x
-
- add $inp,$inp,$taillen
- cmpwi $taillen,0
- vcipher $out0,$out0,v24
-
- subi $inp,$inp,16
- vcipher $out0,$out0,v25
-
- lvsr $inpperm,0,$taillen
- vcipher $out0,$out0,v26
-
- lvx_u $in0,0,$inp
- vcipher $out0,$out0,v27
-
- addi $key_,$sp,$FRAME+15 # rewind $key_
- vcipher $out0,$out0,v28
- lvx v24,$x00,$key_ # re-pre-load round[1]
-
- vcipher $out0,$out0,v29
- lvx v25,$x10,$key_ # re-pre-load round[2]
- vxor $twk0,$twk0,v31
-
- le?vperm $in0,$in0,$in0,$leperm
- vcipher $out0,$out0,v30
-
- vperm $in0,$in0,$in0,$inpperm
- vcipherlast $out0,$out0,$twk0
-
- vmr $twk0,$twk1 # unused tweak
- vxor $tmp,$out0,$twk1 # last block prep for stealing
- le?vperm $out0,$out0,$out0,$leperm
- stvx_u $out0,$x00,$out # store output
- addi $out,$out,0x10
- bne Lxts_enc6x_steal
- b Lxts_enc6x_done
-
-.align 4
-Lxts_enc6x_zero:
- cmpwi $taillen,0
- beq Lxts_enc6x_done
-
- add $inp,$inp,$taillen
- subi $inp,$inp,16
- lvx_u $in0,0,$inp
- lvsr $inpperm,0,$taillen # $in5 is no more
- le?vperm $in0,$in0,$in0,$leperm
- vperm $in0,$in0,$in0,$inpperm
- vxor $tmp,$tmp,$twk0
-Lxts_enc6x_steal:
- vxor $in0,$in0,$twk0
- vxor $out0,$out0,$out0
- vspltisb $out1,-1
- vperm $out0,$out0,$out1,$inpperm
- vsel $out0,$in0,$tmp,$out0 # $tmp is last block, remember?
-
- subi r30,$out,17
- subi $out,$out,16
- mtctr $taillen
-Loop_xts_enc6x_steal:
- lbzu r0,1(r30)
- stb r0,16(r30)
- bdnz Loop_xts_enc6x_steal
-
- li $taillen,0
- mtctr $rounds
- b Loop_xts_enc1x # one more time...
-
-.align 4
-Lxts_enc6x_done:
- ${UCMP}i $ivp,0
- beq Lxts_enc6x_ret
-
- vxor $tweak,$twk0,$rndkey0
- le?vperm $tweak,$tweak,$tweak,$leperm
- stvx_u $tweak,0,$ivp
-
-Lxts_enc6x_ret:
- mtlr r11
- li r10,`$FRAME+15`
- li r11,`$FRAME+31`
- stvx $seven,r10,$sp # wipe copies of round keys
- addi r10,r10,32
- stvx $seven,r11,$sp
- addi r11,r11,32
- stvx $seven,r10,$sp
- addi r10,r10,32
- stvx $seven,r11,$sp
- addi r11,r11,32
- stvx $seven,r10,$sp
- addi r10,r10,32
- stvx $seven,r11,$sp
- addi r11,r11,32
- stvx $seven,r10,$sp
- addi r10,r10,32
- stvx $seven,r11,$sp
- addi r11,r11,32
-
- mtspr 256,$vrsave
- lvx v20,r10,$sp # ABI says so
- addi r10,r10,32
- lvx v21,r11,$sp
- addi r11,r11,32
- lvx v22,r10,$sp
- addi r10,r10,32
- lvx v23,r11,$sp
- addi r11,r11,32
- lvx v24,r10,$sp
- addi r10,r10,32
- lvx v25,r11,$sp
- addi r11,r11,32
- lvx v26,r10,$sp
- addi r10,r10,32
- lvx v27,r11,$sp
- addi r11,r11,32
- lvx v28,r10,$sp
- addi r10,r10,32
- lvx v29,r11,$sp
- addi r11,r11,32
- lvx v30,r10,$sp
- lvx v31,r11,$sp
- $POP r26,`$FRAME+21*16+0*$SIZE_T`($sp)
- $POP r27,`$FRAME+21*16+1*$SIZE_T`($sp)
- $POP r28,`$FRAME+21*16+2*$SIZE_T`($sp)
- $POP r29,`$FRAME+21*16+3*$SIZE_T`($sp)
- $POP r30,`$FRAME+21*16+4*$SIZE_T`($sp)
- $POP r31,`$FRAME+21*16+5*$SIZE_T`($sp)
- addi $sp,$sp,`$FRAME+21*16+6*$SIZE_T`
- blr
- .long 0
- .byte 0,12,0x04,1,0x80,6,6,0
- .long 0
-
-.align 5
-_aesp8_xts_enc5x:
- vcipher $out0,$out0,v24
- vcipher $out1,$out1,v24
- vcipher $out2,$out2,v24
- vcipher $out3,$out3,v24
- vcipher $out4,$out4,v24
- lvx v24,$x20,$key_ # round[3]
- addi $key_,$key_,0x20
-
- vcipher $out0,$out0,v25
- vcipher $out1,$out1,v25
- vcipher $out2,$out2,v25
- vcipher $out3,$out3,v25
- vcipher $out4,$out4,v25
- lvx v25,$x10,$key_ # round[4]
- bdnz _aesp8_xts_enc5x
-
- add $inp,$inp,$taillen
- cmpwi $taillen,0
- vcipher $out0,$out0,v24
- vcipher $out1,$out1,v24
- vcipher $out2,$out2,v24
- vcipher $out3,$out3,v24
- vcipher $out4,$out4,v24
-
- subi $inp,$inp,16
- vcipher $out0,$out0,v25
- vcipher $out1,$out1,v25
- vcipher $out2,$out2,v25
- vcipher $out3,$out3,v25
- vcipher $out4,$out4,v25
- vxor $twk0,$twk0,v31
-
- vcipher $out0,$out0,v26
- lvsr $inpperm,r0,$taillen # $in5 is no more
- vcipher $out1,$out1,v26
- vcipher $out2,$out2,v26
- vcipher $out3,$out3,v26
- vcipher $out4,$out4,v26
- vxor $in1,$twk1,v31
-
- vcipher $out0,$out0,v27
- lvx_u $in0,0,$inp
- vcipher $out1,$out1,v27
- vcipher $out2,$out2,v27
- vcipher $out3,$out3,v27
- vcipher $out4,$out4,v27
- vxor $in2,$twk2,v31
-
- addi $key_,$sp,$FRAME+15 # rewind $key_
- vcipher $out0,$out0,v28
- vcipher $out1,$out1,v28
- vcipher $out2,$out2,v28
- vcipher $out3,$out3,v28
- vcipher $out4,$out4,v28
- lvx v24,$x00,$key_ # re-pre-load round[1]
- vxor $in3,$twk3,v31
-
- vcipher $out0,$out0,v29
- le?vperm $in0,$in0,$in0,$leperm
- vcipher $out1,$out1,v29
- vcipher $out2,$out2,v29
- vcipher $out3,$out3,v29
- vcipher $out4,$out4,v29
- lvx v25,$x10,$key_ # re-pre-load round[2]
- vxor $in4,$twk4,v31
-
- vcipher $out0,$out0,v30
- vperm $in0,$in0,$in0,$inpperm
- vcipher $out1,$out1,v30
- vcipher $out2,$out2,v30
- vcipher $out3,$out3,v30
- vcipher $out4,$out4,v30
-
- vcipherlast $out0,$out0,$twk0
- vcipherlast $out1,$out1,$in1
- vcipherlast $out2,$out2,$in2
- vcipherlast $out3,$out3,$in3
- vcipherlast $out4,$out4,$in4
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,0,0
-
-.align 5
-_aesp8_xts_decrypt6x:
- $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
- mflr r11
- li r7,`$FRAME+8*16+15`
- li r3,`$FRAME+8*16+31`
- $PUSH r11,`$FRAME+21*16+6*$SIZE_T+$LRSAVE`($sp)
- stvx v20,r7,$sp # ABI says so
- addi r7,r7,32
- stvx v21,r3,$sp
- addi r3,r3,32
- stvx v22,r7,$sp
- addi r7,r7,32
- stvx v23,r3,$sp
- addi r3,r3,32
- stvx v24,r7,$sp
- addi r7,r7,32
- stvx v25,r3,$sp
- addi r3,r3,32
- stvx v26,r7,$sp
- addi r7,r7,32
- stvx v27,r3,$sp
- addi r3,r3,32
- stvx v28,r7,$sp
- addi r7,r7,32
- stvx v29,r3,$sp
- addi r3,r3,32
- stvx v30,r7,$sp
- stvx v31,r3,$sp
- li r0,-1
- stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave
- li $x10,0x10
- $PUSH r26,`$FRAME+21*16+0*$SIZE_T`($sp)
- li $x20,0x20
- $PUSH r27,`$FRAME+21*16+1*$SIZE_T`($sp)
- li $x30,0x30
- $PUSH r28,`$FRAME+21*16+2*$SIZE_T`($sp)
- li $x40,0x40
- $PUSH r29,`$FRAME+21*16+3*$SIZE_T`($sp)
- li $x50,0x50
- $PUSH r30,`$FRAME+21*16+4*$SIZE_T`($sp)
- li $x60,0x60
- $PUSH r31,`$FRAME+21*16+5*$SIZE_T`($sp)
- li $x70,0x70
- mtspr 256,r0
-
- xxlor 2, 32+$eighty7, 32+$eighty7
- vsldoi $eighty7,$tmp,$eighty7,1 # 0x010101..87
- xxlor 1, 32+$eighty7, 32+$eighty7
-
- # Load XOR Lconsts.
- mr $x70, r6
- bl Lconsts
- lxvw4x 0, $x40, r6 # load XOR contents
- mr r6, $x70
- li $x70,0x70
-
- subi $rounds,$rounds,3 # -4 in total
-
- lvx $rndkey0,$x00,$key1 # load key schedule
- lvx v30,$x10,$key1
- addi $key1,$key1,0x20
- lvx v31,$x00,$key1
- ?vperm $rndkey0,$rndkey0,v30,$keyperm
- addi $key_,$sp,$FRAME+15
- mtctr $rounds
-
-Load_xts_dec_key:
- ?vperm v24,v30,v31,$keyperm
- lvx v30,$x10,$key1
- addi $key1,$key1,0x20
- stvx v24,$x00,$key_ # off-load round[1]
- ?vperm v25,v31,v30,$keyperm
- lvx v31,$x00,$key1
- stvx v25,$x10,$key_ # off-load round[2]
- addi $key_,$key_,0x20
- bdnz Load_xts_dec_key
-
- lvx v26,$x10,$key1
- ?vperm v24,v30,v31,$keyperm
- lvx v27,$x20,$key1
- stvx v24,$x00,$key_ # off-load round[3]
- ?vperm v25,v31,v26,$keyperm
- lvx v28,$x30,$key1
- stvx v25,$x10,$key_ # off-load round[4]
- addi $key_,$sp,$FRAME+15 # rewind $key_
- ?vperm v26,v26,v27,$keyperm
- lvx v29,$x40,$key1
- ?vperm v27,v27,v28,$keyperm
- lvx v30,$x50,$key1
- ?vperm v28,v28,v29,$keyperm
- lvx v31,$x60,$key1
- ?vperm v29,v29,v30,$keyperm
- lvx $twk5,$x70,$key1 # borrow $twk5
- ?vperm v30,v30,v31,$keyperm
- lvx v24,$x00,$key_ # pre-load round[1]
- ?vperm v31,v31,$twk5,$keyperm
- lvx v25,$x10,$key_ # pre-load round[2]
-
- vperm $in0,$inout,$inptail,$inpperm
- subi $inp,$inp,31 # undo "caller"
- vxor $twk0,$tweak,$rndkey0
- vsrab $tmp,$tweak,$seven # next tweak value
- vaddubm $tweak,$tweak,$tweak
- vand $tmp,$tmp,$eighty7
- vxor $out0,$in0,$twk0
- xxlor 32+$in1, 0, 0
- vpermxor $tweak, $tweak, $tmp, $in1
-
- lvx_u $in1,$x10,$inp
- vxor $twk1,$tweak,$rndkey0
- vsrab $tmp,$tweak,$seven # next tweak value
- vaddubm $tweak,$tweak,$tweak
- le?vperm $in1,$in1,$in1,$leperm
- vand $tmp,$tmp,$eighty7
- vxor $out1,$in1,$twk1
- xxlor 32+$in2, 0, 0
- vpermxor $tweak, $tweak, $tmp, $in2
-
- lvx_u $in2,$x20,$inp
- andi. $taillen,$len,15
- vxor $twk2,$tweak,$rndkey0
- vsrab $tmp,$tweak,$seven # next tweak value
- vaddubm $tweak,$tweak,$tweak
- le?vperm $in2,$in2,$in2,$leperm
- vand $tmp,$tmp,$eighty7
- vxor $out2,$in2,$twk2
- xxlor 32+$in3, 0, 0
- vpermxor $tweak, $tweak, $tmp, $in3
-
- lvx_u $in3,$x30,$inp
- sub $len,$len,$taillen
- vxor $twk3,$tweak,$rndkey0
- vsrab $tmp,$tweak,$seven # next tweak value
- vaddubm $tweak,$tweak,$tweak
- le?vperm $in3,$in3,$in3,$leperm
- vand $tmp,$tmp,$eighty7
- vxor $out3,$in3,$twk3
- xxlor 32+$in4, 0, 0
- vpermxor $tweak, $tweak, $tmp, $in4
-
- lvx_u $in4,$x40,$inp
- subi $len,$len,0x60
- vxor $twk4,$tweak,$rndkey0
- vsrab $tmp,$tweak,$seven # next tweak value
- vaddubm $tweak,$tweak,$tweak
- le?vperm $in4,$in4,$in4,$leperm
- vand $tmp,$tmp,$eighty7
- vxor $out4,$in4,$twk4
- xxlor 32+$in5, 0, 0
- vpermxor $tweak, $tweak, $tmp, $in5
-
- lvx_u $in5,$x50,$inp
- addi $inp,$inp,0x60
- vxor $twk5,$tweak,$rndkey0
- vsrab $tmp,$tweak,$seven # next tweak value
- vaddubm $tweak,$tweak,$tweak
- le?vperm $in5,$in5,$in5,$leperm
- vand $tmp,$tmp,$eighty7
- vxor $out5,$in5,$twk5
- xxlor 32+$in0, 0, 0
- vpermxor $tweak, $tweak, $tmp, $in0
-
- vxor v31,v31,$rndkey0
- mtctr $rounds
- b Loop_xts_dec6x
-
-.align 5
-Loop_xts_dec6x:
- vncipher $out0,$out0,v24
- vncipher $out1,$out1,v24
- vncipher $out2,$out2,v24
- vncipher $out3,$out3,v24
- vncipher $out4,$out4,v24
- vncipher $out5,$out5,v24
- lvx v24,$x20,$key_ # round[3]
- addi $key_,$key_,0x20
-
- vncipher $out0,$out0,v25
- vncipher $out1,$out1,v25
- vncipher $out2,$out2,v25
- vncipher $out3,$out3,v25
- vncipher $out4,$out4,v25
- vncipher $out5,$out5,v25
- lvx v25,$x10,$key_ # round[4]
- bdnz Loop_xts_dec6x
-
- xxlor 32+$eighty7, 1, 1 # 0x010101..87
-
- subic $len,$len,96 # $len-=96
- vxor $in0,$twk0,v31 # xor with last round key
- vncipher $out0,$out0,v24
- vncipher $out1,$out1,v24
- vsrab $tmp,$tweak,$seven # next tweak value
- vxor $twk0,$tweak,$rndkey0
- vaddubm $tweak,$tweak,$tweak
- vncipher $out2,$out2,v24
- vncipher $out3,$out3,v24
- vncipher $out4,$out4,v24
- vncipher $out5,$out5,v24
-
- subfe. r0,r0,r0 # borrow?-1:0
- vand $tmp,$tmp,$eighty7
- vncipher $out0,$out0,v25
- vncipher $out1,$out1,v25
- xxlor 32+$in1, 0, 0
- vpermxor $tweak, $tweak, $tmp, $in1
- vncipher $out2,$out2,v25
- vncipher $out3,$out3,v25
- vxor $in1,$twk1,v31
- vsrab $tmp,$tweak,$seven # next tweak value
- vxor $twk1,$tweak,$rndkey0
- vncipher $out4,$out4,v25
- vncipher $out5,$out5,v25
-
- and r0,r0,$len
- vaddubm $tweak,$tweak,$tweak
- vncipher $out0,$out0,v26
- vncipher $out1,$out1,v26
- vand $tmp,$tmp,$eighty7
- vncipher $out2,$out2,v26
- vncipher $out3,$out3,v26
- xxlor 32+$in2, 0, 0
- vpermxor $tweak, $tweak, $tmp, $in2
- vncipher $out4,$out4,v26
- vncipher $out5,$out5,v26
-
- add $inp,$inp,r0 # $inp is adjusted in such
- # way that at exit from the
- # loop inX-in5 are loaded
- # with last "words"
- vxor $in2,$twk2,v31
- vsrab $tmp,$tweak,$seven # next tweak value
- vxor $twk2,$tweak,$rndkey0
- vaddubm $tweak,$tweak,$tweak
- vncipher $out0,$out0,v27
- vncipher $out1,$out1,v27
- vncipher $out2,$out2,v27
- vncipher $out3,$out3,v27
- vand $tmp,$tmp,$eighty7
- vncipher $out4,$out4,v27
- vncipher $out5,$out5,v27
-
- addi $key_,$sp,$FRAME+15 # rewind $key_
- xxlor 32+$in3, 0, 0
- vpermxor $tweak, $tweak, $tmp, $in3
- vncipher $out0,$out0,v28
- vncipher $out1,$out1,v28
- vxor $in3,$twk3,v31
- vsrab $tmp,$tweak,$seven # next tweak value
- vxor $twk3,$tweak,$rndkey0
- vncipher $out2,$out2,v28
- vncipher $out3,$out3,v28
- vaddubm $tweak,$tweak,$tweak
- vncipher $out4,$out4,v28
- vncipher $out5,$out5,v28
- lvx v24,$x00,$key_ # re-pre-load round[1]
- vand $tmp,$tmp,$eighty7
-
- vncipher $out0,$out0,v29
- vncipher $out1,$out1,v29
- xxlor 32+$in4, 0, 0
- vpermxor $tweak, $tweak, $tmp, $in4
- vncipher $out2,$out2,v29
- vncipher $out3,$out3,v29
- vxor $in4,$twk4,v31
- vsrab $tmp,$tweak,$seven # next tweak value
- vxor $twk4,$tweak,$rndkey0
- vncipher $out4,$out4,v29
- vncipher $out5,$out5,v29
- lvx v25,$x10,$key_ # re-pre-load round[2]
- vaddubm $tweak,$tweak,$tweak
-
- vncipher $out0,$out0,v30
- vncipher $out1,$out1,v30
- vand $tmp,$tmp,$eighty7
- vncipher $out2,$out2,v30
- vncipher $out3,$out3,v30
- xxlor 32+$in5, 0, 0
- vpermxor $tweak, $tweak, $tmp, $in5
- vncipher $out4,$out4,v30
- vncipher $out5,$out5,v30
- vxor $in5,$twk5,v31
- vsrab $tmp,$tweak,$seven # next tweak value
- vxor $twk5,$tweak,$rndkey0
-
- vncipherlast $out0,$out0,$in0
- lvx_u $in0,$x00,$inp # load next input block
- vaddubm $tweak,$tweak,$tweak
- vncipherlast $out1,$out1,$in1
- lvx_u $in1,$x10,$inp
- vncipherlast $out2,$out2,$in2
- le?vperm $in0,$in0,$in0,$leperm
- lvx_u $in2,$x20,$inp
- vand $tmp,$tmp,$eighty7
- vncipherlast $out3,$out3,$in3
- le?vperm $in1,$in1,$in1,$leperm
- lvx_u $in3,$x30,$inp
- vncipherlast $out4,$out4,$in4
- le?vperm $in2,$in2,$in2,$leperm
- lvx_u $in4,$x40,$inp
- xxlor 10, 32+$in0, 32+$in0
- xxlor 32+$in0, 0, 0
- vpermxor $tweak, $tweak, $tmp, $in0
- xxlor 32+$in0, 10, 10
- vncipherlast $out5,$out5,$in5
- le?vperm $in3,$in3,$in3,$leperm
- lvx_u $in5,$x50,$inp
- addi $inp,$inp,0x60
- le?vperm $in4,$in4,$in4,$leperm
- le?vperm $in5,$in5,$in5,$leperm
-
- le?vperm $out0,$out0,$out0,$leperm
- le?vperm $out1,$out1,$out1,$leperm
- stvx_u $out0,$x00,$out # store output
- vxor $out0,$in0,$twk0
- le?vperm $out2,$out2,$out2,$leperm
- stvx_u $out1,$x10,$out
- vxor $out1,$in1,$twk1
- le?vperm $out3,$out3,$out3,$leperm
- stvx_u $out2,$x20,$out
- vxor $out2,$in2,$twk2
- le?vperm $out4,$out4,$out4,$leperm
- stvx_u $out3,$x30,$out
- vxor $out3,$in3,$twk3
- le?vperm $out5,$out5,$out5,$leperm
- stvx_u $out4,$x40,$out
- vxor $out4,$in4,$twk4
- stvx_u $out5,$x50,$out
- vxor $out5,$in5,$twk5
- addi $out,$out,0x60
-
- mtctr $rounds
- beq Loop_xts_dec6x # did $len-=96 borrow?
-
- xxlor 32+$eighty7, 2, 2 # 0x010101..87
-
- addic. $len,$len,0x60
- beq Lxts_dec6x_zero
- cmpwi $len,0x20
- blt Lxts_dec6x_one
- nop
- beq Lxts_dec6x_two
- cmpwi $len,0x40
- blt Lxts_dec6x_three
- nop
- beq Lxts_dec6x_four
-
-Lxts_dec6x_five:
- vxor $out0,$in1,$twk0
- vxor $out1,$in2,$twk1
- vxor $out2,$in3,$twk2
- vxor $out3,$in4,$twk3
- vxor $out4,$in5,$twk4
-
- bl _aesp8_xts_dec5x
-
- le?vperm $out0,$out0,$out0,$leperm
- vmr $twk0,$twk5 # unused tweak
- vxor $twk1,$tweak,$rndkey0
- le?vperm $out1,$out1,$out1,$leperm
- stvx_u $out0,$x00,$out # store output
- vxor $out0,$in0,$twk1
- le?vperm $out2,$out2,$out2,$leperm
- stvx_u $out1,$x10,$out
- le?vperm $out3,$out3,$out3,$leperm
- stvx_u $out2,$x20,$out
- le?vperm $out4,$out4,$out4,$leperm
- stvx_u $out3,$x30,$out
- stvx_u $out4,$x40,$out
- addi $out,$out,0x50
- bne Lxts_dec6x_steal
- b Lxts_dec6x_done
-
-.align 4
-Lxts_dec6x_four:
- vxor $out0,$in2,$twk0
- vxor $out1,$in3,$twk1
- vxor $out2,$in4,$twk2
- vxor $out3,$in5,$twk3
- vxor $out4,$out4,$out4
-
- bl _aesp8_xts_dec5x
-
- le?vperm $out0,$out0,$out0,$leperm
- vmr $twk0,$twk4 # unused tweak
- vmr $twk1,$twk5
- le?vperm $out1,$out1,$out1,$leperm
- stvx_u $out0,$x00,$out # store output
- vxor $out0,$in0,$twk5
- le?vperm $out2,$out2,$out2,$leperm
- stvx_u $out1,$x10,$out
- le?vperm $out3,$out3,$out3,$leperm
- stvx_u $out2,$x20,$out
- stvx_u $out3,$x30,$out
- addi $out,$out,0x40
- bne Lxts_dec6x_steal
- b Lxts_dec6x_done
-
-.align 4
-Lxts_dec6x_three:
- vxor $out0,$in3,$twk0
- vxor $out1,$in4,$twk1
- vxor $out2,$in5,$twk2
- vxor $out3,$out3,$out3
- vxor $out4,$out4,$out4
-
- bl _aesp8_xts_dec5x
-
- le?vperm $out0,$out0,$out0,$leperm
- vmr $twk0,$twk3 # unused tweak
- vmr $twk1,$twk4
- le?vperm $out1,$out1,$out1,$leperm
- stvx_u $out0,$x00,$out # store output
- vxor $out0,$in0,$twk4
- le?vperm $out2,$out2,$out2,$leperm
- stvx_u $out1,$x10,$out
- stvx_u $out2,$x20,$out
- addi $out,$out,0x30
- bne Lxts_dec6x_steal
- b Lxts_dec6x_done
-
-.align 4
-Lxts_dec6x_two:
- vxor $out0,$in4,$twk0
- vxor $out1,$in5,$twk1
- vxor $out2,$out2,$out2
- vxor $out3,$out3,$out3
- vxor $out4,$out4,$out4
-
- bl _aesp8_xts_dec5x
-
- le?vperm $out0,$out0,$out0,$leperm
- vmr $twk0,$twk2 # unused tweak
- vmr $twk1,$twk3
- le?vperm $out1,$out1,$out1,$leperm
- stvx_u $out0,$x00,$out # store output
- vxor $out0,$in0,$twk3
- stvx_u $out1,$x10,$out
- addi $out,$out,0x20
- bne Lxts_dec6x_steal
- b Lxts_dec6x_done
-
-.align 4
-Lxts_dec6x_one:
- vxor $out0,$in5,$twk0
- nop
-Loop_xts_dec1x:
- vncipher $out0,$out0,v24
- lvx v24,$x20,$key_ # round[3]
- addi $key_,$key_,0x20
-
- vncipher $out0,$out0,v25
- lvx v25,$x10,$key_ # round[4]
- bdnz Loop_xts_dec1x
-
- subi r0,$taillen,1
- vncipher $out0,$out0,v24
-
- andi. r0,r0,16
- cmpwi $taillen,0
- vncipher $out0,$out0,v25
-
- sub $inp,$inp,r0
- vncipher $out0,$out0,v26
-
- lvx_u $in0,0,$inp
- vncipher $out0,$out0,v27
-
- addi $key_,$sp,$FRAME+15 # rewind $key_
- vncipher $out0,$out0,v28
- lvx v24,$x00,$key_ # re-pre-load round[1]
-
- vncipher $out0,$out0,v29
- lvx v25,$x10,$key_ # re-pre-load round[2]
- vxor $twk0,$twk0,v31
-
- le?vperm $in0,$in0,$in0,$leperm
- vncipher $out0,$out0,v30
-
- mtctr $rounds
- vncipherlast $out0,$out0,$twk0
-
- vmr $twk0,$twk1 # unused tweak
- vmr $twk1,$twk2
- le?vperm $out0,$out0,$out0,$leperm
- stvx_u $out0,$x00,$out # store output
- addi $out,$out,0x10
- vxor $out0,$in0,$twk2
- bne Lxts_dec6x_steal
- b Lxts_dec6x_done
-
-.align 4
-Lxts_dec6x_zero:
- cmpwi $taillen,0
- beq Lxts_dec6x_done
-
- lvx_u $in0,0,$inp
- le?vperm $in0,$in0,$in0,$leperm
- vxor $out0,$in0,$twk1
-Lxts_dec6x_steal:
- vncipher $out0,$out0,v24
- lvx v24,$x20,$key_ # round[3]
- addi $key_,$key_,0x20
-
- vncipher $out0,$out0,v25
- lvx v25,$x10,$key_ # round[4]
- bdnz Lxts_dec6x_steal
-
- add $inp,$inp,$taillen
- vncipher $out0,$out0,v24
-
- cmpwi $taillen,0
- vncipher $out0,$out0,v25
-
- lvx_u $in0,0,$inp
- vncipher $out0,$out0,v26
-
- lvsr $inpperm,0,$taillen # $in5 is no more
- vncipher $out0,$out0,v27
-
- addi $key_,$sp,$FRAME+15 # rewind $key_
- vncipher $out0,$out0,v28
- lvx v24,$x00,$key_ # re-pre-load round[1]
-
- vncipher $out0,$out0,v29
- lvx v25,$x10,$key_ # re-pre-load round[2]
- vxor $twk1,$twk1,v31
-
- le?vperm $in0,$in0,$in0,$leperm
- vncipher $out0,$out0,v30
-
- vperm $in0,$in0,$in0,$inpperm
- vncipherlast $tmp,$out0,$twk1
-
- le?vperm $out0,$tmp,$tmp,$leperm
- le?stvx_u $out0,0,$out
- be?stvx_u $tmp,0,$out
-
- vxor $out0,$out0,$out0
- vspltisb $out1,-1
- vperm $out0,$out0,$out1,$inpperm
- vsel $out0,$in0,$tmp,$out0
- vxor $out0,$out0,$twk0
-
- subi r30,$out,1
- mtctr $taillen
-Loop_xts_dec6x_steal:
- lbzu r0,1(r30)
- stb r0,16(r30)
- bdnz Loop_xts_dec6x_steal
-
- li $taillen,0
- mtctr $rounds
- b Loop_xts_dec1x # one more time...
-
-.align 4
-Lxts_dec6x_done:
- ${UCMP}i $ivp,0
- beq Lxts_dec6x_ret
-
- vxor $tweak,$twk0,$rndkey0
- le?vperm $tweak,$tweak,$tweak,$leperm
- stvx_u $tweak,0,$ivp
-
-Lxts_dec6x_ret:
- mtlr r11
- li r10,`$FRAME+15`
- li r11,`$FRAME+31`
- stvx $seven,r10,$sp # wipe copies of round keys
- addi r10,r10,32
- stvx $seven,r11,$sp
- addi r11,r11,32
- stvx $seven,r10,$sp
- addi r10,r10,32
- stvx $seven,r11,$sp
- addi r11,r11,32
- stvx $seven,r10,$sp
- addi r10,r10,32
- stvx $seven,r11,$sp
- addi r11,r11,32
- stvx $seven,r10,$sp
- addi r10,r10,32
- stvx $seven,r11,$sp
- addi r11,r11,32
-
- mtspr 256,$vrsave
- lvx v20,r10,$sp # ABI says so
- addi r10,r10,32
- lvx v21,r11,$sp
- addi r11,r11,32
- lvx v22,r10,$sp
- addi r10,r10,32
- lvx v23,r11,$sp
- addi r11,r11,32
- lvx v24,r10,$sp
- addi r10,r10,32
- lvx v25,r11,$sp
- addi r11,r11,32
- lvx v26,r10,$sp
- addi r10,r10,32
- lvx v27,r11,$sp
- addi r11,r11,32
- lvx v28,r10,$sp
- addi r10,r10,32
- lvx v29,r11,$sp
- addi r11,r11,32
- lvx v30,r10,$sp
- lvx v31,r11,$sp
- $POP r26,`$FRAME+21*16+0*$SIZE_T`($sp)
- $POP r27,`$FRAME+21*16+1*$SIZE_T`($sp)
- $POP r28,`$FRAME+21*16+2*$SIZE_T`($sp)
- $POP r29,`$FRAME+21*16+3*$SIZE_T`($sp)
- $POP r30,`$FRAME+21*16+4*$SIZE_T`($sp)
- $POP r31,`$FRAME+21*16+5*$SIZE_T`($sp)
- addi $sp,$sp,`$FRAME+21*16+6*$SIZE_T`
- blr
- .long 0
- .byte 0,12,0x04,1,0x80,6,6,0
- .long 0
-
-.align 5
-_aesp8_xts_dec5x:
- vncipher $out0,$out0,v24
- vncipher $out1,$out1,v24
- vncipher $out2,$out2,v24
- vncipher $out3,$out3,v24
- vncipher $out4,$out4,v24
- lvx v24,$x20,$key_ # round[3]
- addi $key_,$key_,0x20
-
- vncipher $out0,$out0,v25
- vncipher $out1,$out1,v25
- vncipher $out2,$out2,v25
- vncipher $out3,$out3,v25
- vncipher $out4,$out4,v25
- lvx v25,$x10,$key_ # round[4]
- bdnz _aesp8_xts_dec5x
-
- subi r0,$taillen,1
- vncipher $out0,$out0,v24
- vncipher $out1,$out1,v24
- vncipher $out2,$out2,v24
- vncipher $out3,$out3,v24
- vncipher $out4,$out4,v24
-
- andi. r0,r0,16
- cmpwi $taillen,0
- vncipher $out0,$out0,v25
- vncipher $out1,$out1,v25
- vncipher $out2,$out2,v25
- vncipher $out3,$out3,v25
- vncipher $out4,$out4,v25
- vxor $twk0,$twk0,v31
-
- sub $inp,$inp,r0
- vncipher $out0,$out0,v26
- vncipher $out1,$out1,v26
- vncipher $out2,$out2,v26
- vncipher $out3,$out3,v26
- vncipher $out4,$out4,v26
- vxor $in1,$twk1,v31
-
- vncipher $out0,$out0,v27
- lvx_u $in0,0,$inp
- vncipher $out1,$out1,v27
- vncipher $out2,$out2,v27
- vncipher $out3,$out3,v27
- vncipher $out4,$out4,v27
- vxor $in2,$twk2,v31
-
- addi $key_,$sp,$FRAME+15 # rewind $key_
- vncipher $out0,$out0,v28
- vncipher $out1,$out1,v28
- vncipher $out2,$out2,v28
- vncipher $out3,$out3,v28
- vncipher $out4,$out4,v28
- lvx v24,$x00,$key_ # re-pre-load round[1]
- vxor $in3,$twk3,v31
-
- vncipher $out0,$out0,v29
- le?vperm $in0,$in0,$in0,$leperm
- vncipher $out1,$out1,v29
- vncipher $out2,$out2,v29
- vncipher $out3,$out3,v29
- vncipher $out4,$out4,v29
- lvx v25,$x10,$key_ # re-pre-load round[2]
- vxor $in4,$twk4,v31
-
- vncipher $out0,$out0,v30
- vncipher $out1,$out1,v30
- vncipher $out2,$out2,v30
- vncipher $out3,$out3,v30
- vncipher $out4,$out4,v30
-
- vncipherlast $out0,$out0,$twk0
- vncipherlast $out1,$out1,$in1
- vncipherlast $out2,$out2,$in2
- vncipherlast $out3,$out3,$in3
- vncipherlast $out4,$out4,$in4
- mtctr $rounds
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,0,0
-___
-}} }}}
-
-my $consts=1;
-foreach(split("\n",$code)) {
- s/\`([^\`]*)\`/eval($1)/geo;
-
- # constants table endian-specific conversion
- if ($consts && m/\.(long|byte)\s+(.+)\s+(\?[a-z]*)$/o) {
- my $conv=$3;
- my @bytes=();
-
- # convert to endian-agnostic format
- if ($1 eq "long") {
- foreach (split(/,\s*/,$2)) {
- my $l = /^0/?oct:int;
- push @bytes,($l>>24)&0xff,($l>>16)&0xff,($l>>8)&0xff,$l&0xff;
- }
- } else {
- @bytes = map(/^0/?oct:int,split(/,\s*/,$2));
- }
-
- # little-endian conversion
- if ($flavour =~ /le$/o) {
- SWITCH: for($conv) {
- /\?inv/ && do { @bytes=map($_^0xf,@bytes); last; };
- /\?rev/ && do { @bytes=reverse(@bytes); last; };
- }
- }
-
- #emit
- print ".byte\t",join(',',map (sprintf("0x%02x",$_),@bytes)),"\n";
- next;
- }
- $consts=0 if (m/Lconsts:/o); # end of table
-
- # instructions prefixed with '?' are endian-specific and need
- # to be adjusted accordingly...
- if ($flavour =~ /le$/o) { # little-endian
- s/le\?//o or
- s/be\?/#be#/o or
- s/\?lvsr/lvsl/o or
- s/\?lvsl/lvsr/o or
- s/\?(vperm\s+v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+)/$1$3$2$4/o or
- s/\?(vsldoi\s+v[0-9]+,\s*)(v[0-9]+,)\s*(v[0-9]+,\s*)([0-9]+)/$1$3$2 16-$4/o or
- s/\?(vspltw\s+v[0-9]+,\s*)(v[0-9]+,)\s*([0-9])/$1$2 3-$3/o;
- } else { # big-endian
- s/le\?/#le#/o or
- s/be\?//o or
- s/\?([a-z]+)/$1/o;
- }
-
- print $_,"\n";
-}
-
-close STDOUT;
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
deleted file mode 100644
index 77eca20bc7ac6..0000000000000
--- a/drivers/crypto/vmx/ghash.c
+++ /dev/null
@@ -1,185 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GHASH routines supporting VMX instructions on the Power 8
- *
- * Copyright (C) 2015, 2019 International Business Machines Inc.
- *
- * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
- *
- * Extended by Daniel Axtens <dja@axtens.net> to replace the fallback
- * mechanism. The new approach is based on arm64 code, which is:
- * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
- */
-
-#include <linux/types.h>
-#include <linux/err.h>
-#include <linux/crypto.h>
-#include <linux/delay.h>
-#include <asm/simd.h>
-#include <asm/switch_to.h>
-#include <crypto/aes.h>
-#include <crypto/ghash.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
-#include <crypto/b128ops.h>
-#include "aesp8-ppc.h"
-
-void gcm_init_p8(u128 htable[16], const u64 Xi[2]);
-void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]);
-void gcm_ghash_p8(u64 Xi[2], const u128 htable[16],
- const u8 *in, size_t len);
-
-struct p8_ghash_ctx {
- /* key used by vector asm */
- u128 htable[16];
- /* key used by software fallback */
- be128 key;
-};
-
-struct p8_ghash_desc_ctx {
- u64 shash[2];
- u8 buffer[GHASH_DIGEST_SIZE];
- int bytes;
-};
-
-static int p8_ghash_init(struct shash_desc *desc)
-{
- struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
-
- dctx->bytes = 0;
- memset(dctx->shash, 0, GHASH_DIGEST_SIZE);
- return 0;
-}
-
-static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm));
-
- if (keylen != GHASH_BLOCK_SIZE)
- return -EINVAL;
-
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- gcm_init_p8(ctx->htable, (const u64 *) key);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
-
- memcpy(&ctx->key, key, GHASH_BLOCK_SIZE);
-
- return 0;
-}
-
-static inline void __ghash_block(struct p8_ghash_ctx *ctx,
- struct p8_ghash_desc_ctx *dctx)
-{
- if (crypto_simd_usable()) {
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- gcm_ghash_p8(dctx->shash, ctx->htable,
- dctx->buffer, GHASH_DIGEST_SIZE);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
- } else {
- crypto_xor((u8 *)dctx->shash, dctx->buffer, GHASH_BLOCK_SIZE);
- gf128mul_lle((be128 *)dctx->shash, &ctx->key);
- }
-}
-
-static inline void __ghash_blocks(struct p8_ghash_ctx *ctx,
- struct p8_ghash_desc_ctx *dctx,
- const u8 *src, unsigned int srclen)
-{
- if (crypto_simd_usable()) {
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- gcm_ghash_p8(dctx->shash, ctx->htable,
- src, srclen);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
- } else {
- while (srclen >= GHASH_BLOCK_SIZE) {
- crypto_xor((u8 *)dctx->shash, src, GHASH_BLOCK_SIZE);
- gf128mul_lle((be128 *)dctx->shash, &ctx->key);
- srclen -= GHASH_BLOCK_SIZE;
- src += GHASH_BLOCK_SIZE;
- }
- }
-}
-
-static int p8_ghash_update(struct shash_desc *desc,
- const u8 *src, unsigned int srclen)
-{
- unsigned int len;
- struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
- struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
-
- if (dctx->bytes) {
- if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
- memcpy(dctx->buffer + dctx->bytes, src,
- srclen);
- dctx->bytes += srclen;
- return 0;
- }
- memcpy(dctx->buffer + dctx->bytes, src,
- GHASH_DIGEST_SIZE - dctx->bytes);
-
- __ghash_block(ctx, dctx);
-
- src += GHASH_DIGEST_SIZE - dctx->bytes;
- srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
- dctx->bytes = 0;
- }
- len = srclen & ~(GHASH_DIGEST_SIZE - 1);
- if (len) {
- __ghash_blocks(ctx, dctx, src, len);
- src += len;
- srclen -= len;
- }
- if (srclen) {
- memcpy(dctx->buffer, src, srclen);
- dctx->bytes = srclen;
- }
- return 0;
-}
-
-static int p8_ghash_final(struct shash_desc *desc, u8 *out)
-{
- int i;
- struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
- struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
-
- if (dctx->bytes) {
- for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
- dctx->buffer[i] = 0;
- __ghash_block(ctx, dctx);
- dctx->bytes = 0;
- }
- memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
- return 0;
-}
-
-struct shash_alg p8_ghash_alg = {
- .digestsize = GHASH_DIGEST_SIZE,
- .init = p8_ghash_init,
- .update = p8_ghash_update,
- .final = p8_ghash_final,
- .setkey = p8_ghash_setkey,
- .descsize = sizeof(struct p8_ghash_desc_ctx)
- + sizeof(struct ghash_desc_ctx),
- .base = {
- .cra_name = "ghash",
- .cra_driver_name = "p8_ghash",
- .cra_priority = 1000,
- .cra_blocksize = GHASH_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct p8_ghash_ctx),
- .cra_module = THIS_MODULE,
- },
-};
diff --git a/drivers/crypto/vmx/ghashp8-ppc.pl b/drivers/crypto/vmx/ghashp8-ppc.pl
deleted file mode 100644
index 041e633c214f5..0000000000000
--- a/drivers/crypto/vmx/ghashp8-ppc.pl
+++ /dev/null
@@ -1,243 +0,0 @@
-#!/usr/bin/env perl
-# SPDX-License-Identifier: GPL-2.0
-
-# This code is taken from the OpenSSL project but the author (Andy Polyakov)
-# has relicensed it under the GPLv2. Therefore this program is free software;
-# you can redistribute it and/or modify it under the terms of the GNU General
-# Public License version 2 as published by the Free Software Foundation.
-#
-# The original headers, including the original license headers, are
-# included below for completeness.
-
-# ====================================================================
-# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see https://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-#
-# GHASH for PowerISA v2.07.
-#
-# July 2014
-#
-# Accurate performance measurements are problematic, because it's
-# always virtualized setup with possibly throttled processor.
-# Relative comparison is therefore more informative. This initial
-# version is ~2.1x slower than hardware-assisted AES-128-CTR, ~12x
-# faster than "4-bit" integer-only compiler-generated 64-bit code.
-# "Initial version" means that there is room for futher improvement.
-
-$flavour=shift;
-$output =shift;
-
-if ($flavour =~ /64/) {
- $SIZE_T=8;
- $LRSAVE=2*$SIZE_T;
- $STU="stdu";
- $POP="ld";
- $PUSH="std";
-} elsif ($flavour =~ /32/) {
- $SIZE_T=4;
- $LRSAVE=$SIZE_T;
- $STU="stwu";
- $POP="lwz";
- $PUSH="stw";
-} else { die "nonsense $flavour"; }
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
-( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
-die "can't locate ppc-xlate.pl";
-
-open STDOUT,"| $^X $xlate $flavour $output" || die "can't call $xlate: $!";
-
-my ($Xip,$Htbl,$inp,$len)=map("r$_",(3..6)); # argument block
-
-my ($Xl,$Xm,$Xh,$IN)=map("v$_",(0..3));
-my ($zero,$t0,$t1,$t2,$xC2,$H,$Hh,$Hl,$lemask)=map("v$_",(4..12));
-my $vrsave="r12";
-
-$code=<<___;
-.machine "any"
-
-.text
-
-.globl .gcm_init_p8
- lis r0,0xfff0
- li r8,0x10
- mfspr $vrsave,256
- li r9,0x20
- mtspr 256,r0
- li r10,0x30
- lvx_u $H,0,r4 # load H
- le?xor r7,r7,r7
- le?addi r7,r7,0x8 # need a vperm start with 08
- le?lvsr 5,0,r7
- le?vspltisb 6,0x0f
- le?vxor 5,5,6 # set a b-endian mask
- le?vperm $H,$H,$H,5
-
- vspltisb $xC2,-16 # 0xf0
- vspltisb $t0,1 # one
- vaddubm $xC2,$xC2,$xC2 # 0xe0
- vxor $zero,$zero,$zero
- vor $xC2,$xC2,$t0 # 0xe1
- vsldoi $xC2,$xC2,$zero,15 # 0xe1...
- vsldoi $t1,$zero,$t0,1 # ...1
- vaddubm $xC2,$xC2,$xC2 # 0xc2...
- vspltisb $t2,7
- vor $xC2,$xC2,$t1 # 0xc2....01
- vspltb $t1,$H,0 # most significant byte
- vsl $H,$H,$t0 # H<<=1
- vsrab $t1,$t1,$t2 # broadcast carry bit
- vand $t1,$t1,$xC2
- vxor $H,$H,$t1 # twisted H
-
- vsldoi $H,$H,$H,8 # twist even more ...
- vsldoi $xC2,$zero,$xC2,8 # 0xc2.0
- vsldoi $Hl,$zero,$H,8 # ... and split
- vsldoi $Hh,$H,$zero,8
-
- stvx_u $xC2,0,r3 # save pre-computed table
- stvx_u $Hl,r8,r3
- stvx_u $H, r9,r3
- stvx_u $Hh,r10,r3
-
- mtspr 256,$vrsave
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,2,0
- .long 0
-.size .gcm_init_p8,.-.gcm_init_p8
-
-.globl .gcm_gmult_p8
- lis r0,0xfff8
- li r8,0x10
- mfspr $vrsave,256
- li r9,0x20
- mtspr 256,r0
- li r10,0x30
- lvx_u $IN,0,$Xip # load Xi
-
- lvx_u $Hl,r8,$Htbl # load pre-computed table
- le?lvsl $lemask,r0,r0
- lvx_u $H, r9,$Htbl
- le?vspltisb $t0,0x07
- lvx_u $Hh,r10,$Htbl
- le?vxor $lemask,$lemask,$t0
- lvx_u $xC2,0,$Htbl
- le?vperm $IN,$IN,$IN,$lemask
- vxor $zero,$zero,$zero
-
- vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo
- vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi
- vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi
-
- vpmsumd $t2,$Xl,$xC2 # 1st phase
-
- vsldoi $t0,$Xm,$zero,8
- vsldoi $t1,$zero,$Xm,8
- vxor $Xl,$Xl,$t0
- vxor $Xh,$Xh,$t1
-
- vsldoi $Xl,$Xl,$Xl,8
- vxor $Xl,$Xl,$t2
-
- vsldoi $t1,$Xl,$Xl,8 # 2nd phase
- vpmsumd $Xl,$Xl,$xC2
- vxor $t1,$t1,$Xh
- vxor $Xl,$Xl,$t1
-
- le?vperm $Xl,$Xl,$Xl,$lemask
- stvx_u $Xl,0,$Xip # write out Xi
-
- mtspr 256,$vrsave
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,2,0
- .long 0
-.size .gcm_gmult_p8,.-.gcm_gmult_p8
-
-.globl .gcm_ghash_p8
- lis r0,0xfff8
- li r8,0x10
- mfspr $vrsave,256
- li r9,0x20
- mtspr 256,r0
- li r10,0x30
- lvx_u $Xl,0,$Xip # load Xi
-
- lvx_u $Hl,r8,$Htbl # load pre-computed table
- le?lvsl $lemask,r0,r0
- lvx_u $H, r9,$Htbl
- le?vspltisb $t0,0x07
- lvx_u $Hh,r10,$Htbl
- le?vxor $lemask,$lemask,$t0
- lvx_u $xC2,0,$Htbl
- le?vperm $Xl,$Xl,$Xl,$lemask
- vxor $zero,$zero,$zero
-
- lvx_u $IN,0,$inp
- addi $inp,$inp,16
- subi $len,$len,16
- le?vperm $IN,$IN,$IN,$lemask
- vxor $IN,$IN,$Xl
- b Loop
-
-.align 5
-Loop:
- subic $len,$len,16
- vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo
- subfe. r0,r0,r0 # borrow?-1:0
- vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi
- and r0,r0,$len
- vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi
- add $inp,$inp,r0
-
- vpmsumd $t2,$Xl,$xC2 # 1st phase
-
- vsldoi $t0,$Xm,$zero,8
- vsldoi $t1,$zero,$Xm,8
- vxor $Xl,$Xl,$t0
- vxor $Xh,$Xh,$t1
-
- vsldoi $Xl,$Xl,$Xl,8
- vxor $Xl,$Xl,$t2
- lvx_u $IN,0,$inp
- addi $inp,$inp,16
-
- vsldoi $t1,$Xl,$Xl,8 # 2nd phase
- vpmsumd $Xl,$Xl,$xC2
- le?vperm $IN,$IN,$IN,$lemask
- vxor $t1,$t1,$Xh
- vxor $IN,$IN,$t1
- vxor $IN,$IN,$Xl
- beq Loop # did $len-=16 borrow?
-
- vxor $Xl,$Xl,$t1
- le?vperm $Xl,$Xl,$Xl,$lemask
- stvx_u $Xl,0,$Xip # write out Xi
-
- mtspr 256,$vrsave
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,4,0
- .long 0
-.size .gcm_ghash_p8,.-.gcm_ghash_p8
-
-.asciz "GHASH for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
-.align 2
-___
-
-foreach (split("\n",$code)) {
- if ($flavour =~ /le$/o) { # little-endian
- s/le\?//o or
- s/be\?/#be#/o;
- } else {
- s/le\?/#le#/o or
- s/be\?//o;
- }
- print $_,"\n";
-}
-
-close STDOUT; # enforce flush
diff --git a/drivers/crypto/vmx/ppc-xlate.pl b/drivers/crypto/vmx/ppc-xlate.pl
deleted file mode 100644
index b583898c11ae8..0000000000000
--- a/drivers/crypto/vmx/ppc-xlate.pl
+++ /dev/null
@@ -1,231 +0,0 @@
-#!/usr/bin/env perl
-# SPDX-License-Identifier: GPL-2.0
-
-# PowerPC assembler distiller by <appro>.
-
-my $flavour = shift;
-my $output = shift;
-open STDOUT,">$output" || die "can't open $output: $!";
-
-my %GLOBALS;
-my $dotinlocallabels=($flavour=~/linux/)?1:0;
-my $elfv2abi=(($flavour =~ /linux-ppc64le/) or ($flavour =~ /linux-ppc64-elfv2/))?1:0;
-my $dotfunctions=($elfv2abi=~1)?0:1;
-
-################################################################
-# directives which need special treatment on different platforms
-################################################################
-my $globl = sub {
- my $junk = shift;
- my $name = shift;
- my $global = \$GLOBALS{$name};
- my $ret;
-
- $name =~ s|^[\.\_]||;
-
- SWITCH: for ($flavour) {
- /aix/ && do { $name = ".$name";
- last;
- };
- /osx/ && do { $name = "_$name";
- last;
- };
- /linux/
- && do { $ret = "_GLOBAL($name)";
- last;
- };
- }
-
- $ret = ".globl $name\nalign 5\n$name:" if (!$ret);
- $$global = $name;
- $ret;
-};
-my $text = sub {
- my $ret = ($flavour =~ /aix/) ? ".csect\t.text[PR],7" : ".text";
- $ret = ".abiversion 2\n".$ret if ($elfv2abi);
- $ret;
-};
-my $machine = sub {
- my $junk = shift;
- my $arch = shift;
- if ($flavour =~ /osx/)
- { $arch =~ s/\"//g;
- $arch = ($flavour=~/64/) ? "ppc970-64" : "ppc970" if ($arch eq "any");
- }
- ".machine $arch";
-};
-my $size = sub {
- if ($flavour =~ /linux/)
- { shift;
- my $name = shift; $name =~ s|^[\.\_]||;
- my $ret = ".size $name,.-".($dotfunctions?".":"").$name;
- $ret .= "\n.size .$name,.-.$name" if ($dotfunctions);
- $ret;
- }
- else
- { ""; }
-};
-my $asciz = sub {
- shift;
- my $line = join(",",@_);
- if ($line =~ /^"(.*)"$/)
- { ".byte " . join(",",unpack("C*",$1),0) . "\n.align 2"; }
- else
- { ""; }
-};
-my $quad = sub {
- shift;
- my @ret;
- my ($hi,$lo);
- for (@_) {
- if (/^0x([0-9a-f]*?)([0-9a-f]{1,8})$/io)
- { $hi=$1?"0x$1":"0"; $lo="0x$2"; }
- elsif (/^([0-9]+)$/o)
- { $hi=$1>>32; $lo=$1&0xffffffff; } # error-prone with 32-bit perl
- else
- { $hi=undef; $lo=$_; }
-
- if (defined($hi))
- { push(@ret,$flavour=~/le$/o?".long\t$lo,$hi":".long\t$hi,$lo"); }
- else
- { push(@ret,".quad $lo"); }
- }
- join("\n",@ret);
-};
-
-################################################################
-# simplified mnemonics not handled by at least one assembler
-################################################################
-my $cmplw = sub {
- my $f = shift;
- my $cr = 0; $cr = shift if ($#_>1);
- # Some out-of-date 32-bit GNU assembler just can't handle cmplw...
- ($flavour =~ /linux.*32/) ?
- " .long ".sprintf "0x%x",31<<26|$cr<<23|$_[0]<<16|$_[1]<<11|64 :
- " cmplw ".join(',',$cr,@_);
-};
-my $bdnz = sub {
- my $f = shift;
- my $bo = $f=~/[\+\-]/ ? 16+9 : 16; # optional "to be taken" hint
- " bc $bo,0,".shift;
-} if ($flavour!~/linux/);
-my $bltlr = sub {
- my $f = shift;
- my $bo = $f=~/\-/ ? 12+2 : 12; # optional "not to be taken" hint
- ($flavour =~ /linux/) ? # GNU as doesn't allow most recent hints
- " .long ".sprintf "0x%x",19<<26|$bo<<21|16<<1 :
- " bclr $bo,0";
-};
-my $bnelr = sub {
- my $f = shift;
- my $bo = $f=~/\-/ ? 4+2 : 4; # optional "not to be taken" hint
- ($flavour =~ /linux/) ? # GNU as doesn't allow most recent hints
- " .long ".sprintf "0x%x",19<<26|$bo<<21|2<<16|16<<1 :
- " bclr $bo,2";
-};
-my $beqlr = sub {
- my $f = shift;
- my $bo = $f=~/-/ ? 12+2 : 12; # optional "not to be taken" hint
- ($flavour =~ /linux/) ? # GNU as doesn't allow most recent hints
- " .long ".sprintf "0x%X",19<<26|$bo<<21|2<<16|16<<1 :
- " bclr $bo,2";
-};
-# GNU assembler can't handle extrdi rA,rS,16,48, or when sum of last two
-# arguments is 64, with "operand out of range" error.
-my $extrdi = sub {
- my ($f,$ra,$rs,$n,$b) = @_;
- $b = ($b+$n)&63; $n = 64-$n;
- " rldicl $ra,$rs,$b,$n";
-};
-my $vmr = sub {
- my ($f,$vx,$vy) = @_;
- " vor $vx,$vy,$vy";
-};
-
-# Some ABIs specify vrsave, special-purpose register #256, as reserved
-# for system use.
-my $no_vrsave = ($elfv2abi);
-my $mtspr = sub {
- my ($f,$idx,$ra) = @_;
- if ($idx == 256 && $no_vrsave) {
- " or $ra,$ra,$ra";
- } else {
- " mtspr $idx,$ra";
- }
-};
-my $mfspr = sub {
- my ($f,$rd,$idx) = @_;
- if ($idx == 256 && $no_vrsave) {
- " li $rd,-1";
- } else {
- " mfspr $rd,$idx";
- }
-};
-
-# PowerISA 2.06 stuff
-sub vsxmem_op {
- my ($f, $vrt, $ra, $rb, $op) = @_;
- " .long ".sprintf "0x%X",(31<<26)|($vrt<<21)|($ra<<16)|($rb<<11)|($op*2+1);
-}
-# made-up unaligned memory reference AltiVec/VMX instructions
-my $lvx_u = sub { vsxmem_op(@_, 844); }; # lxvd2x
-my $stvx_u = sub { vsxmem_op(@_, 972); }; # stxvd2x
-my $lvdx_u = sub { vsxmem_op(@_, 588); }; # lxsdx
-my $stvdx_u = sub { vsxmem_op(@_, 716); }; # stxsdx
-my $lvx_4w = sub { vsxmem_op(@_, 780); }; # lxvw4x
-my $stvx_4w = sub { vsxmem_op(@_, 908); }; # stxvw4x
-
-# PowerISA 2.07 stuff
-sub vcrypto_op {
- my ($f, $vrt, $vra, $vrb, $op) = @_;
- " .long ".sprintf "0x%X",(4<<26)|($vrt<<21)|($vra<<16)|($vrb<<11)|$op;
-}
-my $vcipher = sub { vcrypto_op(@_, 1288); };
-my $vcipherlast = sub { vcrypto_op(@_, 1289); };
-my $vncipher = sub { vcrypto_op(@_, 1352); };
-my $vncipherlast= sub { vcrypto_op(@_, 1353); };
-my $vsbox = sub { vcrypto_op(@_, 0, 1480); };
-my $vshasigmad = sub { my ($st,$six)=splice(@_,-2); vcrypto_op(@_, $st<<4|$six, 1730); };
-my $vshasigmaw = sub { my ($st,$six)=splice(@_,-2); vcrypto_op(@_, $st<<4|$six, 1666); };
-my $vpmsumb = sub { vcrypto_op(@_, 1032); };
-my $vpmsumd = sub { vcrypto_op(@_, 1224); };
-my $vpmsubh = sub { vcrypto_op(@_, 1096); };
-my $vpmsumw = sub { vcrypto_op(@_, 1160); };
-my $vaddudm = sub { vcrypto_op(@_, 192); };
-my $vadduqm = sub { vcrypto_op(@_, 256); };
-
-my $mtsle = sub {
- my ($f, $arg) = @_;
- " .long ".sprintf "0x%X",(31<<26)|($arg<<21)|(147*2);
-};
-
-print "#include <asm/ppc_asm.h>\n" if $flavour =~ /linux/;
-
-while($line=<>) {
-
- $line =~ s|[#!;].*$||; # get rid of asm-style comments...
- $line =~ s|/\*.*\*/||; # ... and C-style comments...
- $line =~ s|^\s+||; # ... and skip white spaces in beginning...
- $line =~ s|\s+$||; # ... and at the end
-
- {
- $line =~ s|\b\.L(\w+)|L$1|g; # common denominator for Locallabel
- $line =~ s|\bL(\w+)|\.L$1|g if ($dotinlocallabels);
- }
-
- {
- $line =~ s|^\s*(\.?)(\w+)([\.\+\-]?)\s*||;
- my $c = $1; $c = "\t" if ($c eq "");
- my $mnemonic = $2;
- my $f = $3;
- my $opcode = eval("\$$mnemonic");
- $line =~ s/\b(c?[rf]|v|vs)([0-9]+)\b/$2/g if ($c ne "." and $flavour !~ /osx/);
- if (ref($opcode) eq 'CODE') { $line = &$opcode($f,split(',',$line)); }
- elsif ($mnemonic) { $line = $c.$mnemonic.$f."\t".$line; }
- }
-
- print $line if ($line);
- print "\n";
-}
-
-close STDOUT;
diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c
deleted file mode 100644
index 7eb713cc87c8c..0000000000000
--- a/drivers/crypto/vmx/vmx.c
+++ /dev/null
@@ -1,77 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Routines supporting VMX instructions on the Power 8
- *
- * Copyright (C) 2015 International Business Machines Inc.
- *
- * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/types.h>
-#include <linux/err.h>
-#include <linux/cpufeature.h>
-#include <linux/crypto.h>
-#include <asm/cputable.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/skcipher.h>
-
-#include "aesp8-ppc.h"
-
-static int __init p8_init(void)
-{
- int ret;
-
- ret = crypto_register_shash(&p8_ghash_alg);
- if (ret)
- goto err;
-
- ret = crypto_register_alg(&p8_aes_alg);
- if (ret)
- goto err_unregister_ghash;
-
- ret = crypto_register_skcipher(&p8_aes_cbc_alg);
- if (ret)
- goto err_unregister_aes;
-
- ret = crypto_register_skcipher(&p8_aes_ctr_alg);
- if (ret)
- goto err_unregister_aes_cbc;
-
- ret = crypto_register_skcipher(&p8_aes_xts_alg);
- if (ret)
- goto err_unregister_aes_ctr;
-
- return 0;
-
-err_unregister_aes_ctr:
- crypto_unregister_skcipher(&p8_aes_ctr_alg);
-err_unregister_aes_cbc:
- crypto_unregister_skcipher(&p8_aes_cbc_alg);
-err_unregister_aes:
- crypto_unregister_alg(&p8_aes_alg);
-err_unregister_ghash:
- crypto_unregister_shash(&p8_ghash_alg);
-err:
- return ret;
-}
-
-static void __exit p8_exit(void)
-{
- crypto_unregister_skcipher(&p8_aes_xts_alg);
- crypto_unregister_skcipher(&p8_aes_ctr_alg);
- crypto_unregister_skcipher(&p8_aes_cbc_alg);
- crypto_unregister_alg(&p8_aes_alg);
- crypto_unregister_shash(&p8_ghash_alg);
-}
-
-module_cpu_feature_match(PPC_MODULE_FEATURE_VEC_CRYPTO, p8_init);
-module_exit(p8_exit);
-
-MODULE_AUTHOR("Marcelo Cerri<mhcerri@br.ibm.com>");
-MODULE_DESCRIPTION("IBM VMX cryptographic acceleration instructions "
- "support on Power 8");
-MODULE_LICENSE("GPL");
-MODULE_VERSION("1.0.0");
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
index 3c205324b22b6..e614057188409 100644
--- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c
+++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
@@ -231,7 +231,10 @@ static int zynqmp_handle_aes_req(struct crypto_engine *engine,
err = zynqmp_aes_aead_cipher(areq);
}
+ local_bh_disable();
crypto_finalize_aead_request(engine, areq, err);
+ local_bh_enable();
+
return 0;
}
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index 67998dbd1d46b..5f3c9c5529b96 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -144,17 +144,4 @@ config CXL_REGION_INVALIDATION_TEST
If unsure, or if this kernel is meant for production environments,
say N.
-config CXL_PMU
- tristate "CXL Performance Monitoring Unit"
- default CXL_BUS
- depends on PERF_EVENTS
- help
- Support performance monitoring as defined in CXL rev 3.0
- section 13.2: Performance Monitoring. CXL components may have
- one or more CXL Performance Monitoring Units (CPMUs).
-
- Say 'y/m' to enable a driver that will attach to performance
- monitoring units and provide standard perf based interfaces.
-
- If unsure say 'm'.
endif
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index 1a3e6aafbdcc3..cb8c155a2c9b3 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -525,20 +525,11 @@ static int get_genport_coordinates(struct device *dev, struct cxl_dport *dport)
{
struct acpi_device *hb = to_cxl_host_bridge(NULL, dev);
u32 uid;
- int rc;
if (kstrtou32(acpi_device_uid(hb), 0, &uid))
return -EINVAL;
- rc = acpi_get_genport_coordinates(uid, &dport->hb_coord);
- if (rc < 0)
- return rc;
-
- /* Adjust back to picoseconds from nanoseconds */
- dport->hb_coord.read_latency *= 1000;
- dport->hb_coord.write_latency *= 1000;
-
- return 0;
+ return acpi_get_genport_coordinates(uid, dport->coord);
}
static int add_host_bridge_dport(struct device *match, void *arg)
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
index 08fd0baea7a0e..bb83867d9fec9 100644
--- a/drivers/cxl/core/cdat.c
+++ b/drivers/cxl/core/cdat.c
@@ -9,16 +9,47 @@
#include "cxlmem.h"
#include "core.h"
#include "cxl.h"
+#include "core.h"
struct dsmas_entry {
struct range dpa_range;
u8 handle;
- struct access_coordinate coord;
+ struct access_coordinate coord[ACCESS_COORDINATE_MAX];
int entries;
int qos_class;
};
+static u32 cdat_normalize(u16 entry, u64 base, u8 type)
+{
+ u32 value;
+
+ /*
+ * Check for invalid and overflow values
+ */
+ if (entry == 0xffff || !entry)
+ return 0;
+ else if (base > (UINT_MAX / (entry)))
+ return 0;
+
+ /*
+ * CDAT fields follow the format of HMAT fields. See table 5 Device
+ * Scoped Latency and Bandwidth Information Structure in Coherent Device
+ * Attribute Table (CDAT) Specification v1.01.
+ */
+ value = entry * base;
+ switch (type) {
+ case ACPI_HMAT_ACCESS_LATENCY:
+ case ACPI_HMAT_READ_LATENCY:
+ case ACPI_HMAT_WRITE_LATENCY:
+ value = DIV_ROUND_UP(value, 1000);
+ break;
+ default:
+ break;
+ }
+ return value;
+}
+
static int cdat_dsmas_handler(union acpi_subtable_headers *header, void *arg,
const unsigned long end)
{
@@ -57,8 +88,8 @@ static int cdat_dsmas_handler(union acpi_subtable_headers *header, void *arg,
return 0;
}
-static void cxl_access_coordinate_set(struct access_coordinate *coord,
- int access, unsigned int val)
+static void __cxl_access_coordinate_set(struct access_coordinate *coord,
+ int access, unsigned int val)
{
switch (access) {
case ACPI_HMAT_ACCESS_LATENCY:
@@ -84,6 +115,13 @@ static void cxl_access_coordinate_set(struct access_coordinate *coord,
}
}
+static void cxl_access_coordinate_set(struct access_coordinate *coord,
+ int access, unsigned int val)
+{
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
+ __cxl_access_coordinate_set(&coord[i], access, val);
+}
+
static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
const unsigned long end)
{
@@ -96,7 +134,6 @@ static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
__le16 le_val;
u64 val;
u16 len;
- int rc;
len = le16_to_cpu((__force __le16)hdr->length);
if (len != size || (unsigned long)hdr + len > end) {
@@ -123,12 +160,10 @@ static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
le_base = (__force __le64)dslbis->entry_base_unit;
le_val = (__force __le16)dslbis->entry[0];
- rc = check_mul_overflow(le64_to_cpu(le_base),
- le16_to_cpu(le_val), &val);
- if (rc)
- pr_warn("DSLBIS value overflowed.\n");
+ val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base),
+ dslbis->data_type);
- cxl_access_coordinate_set(&dent->coord, dslbis->data_type, val);
+ cxl_access_coordinate_set(dent->coord, dslbis->data_type, val);
return 0;
}
@@ -149,28 +184,28 @@ static int cxl_cdat_endpoint_process(struct cxl_port *port,
int rc;
rc = cdat_table_parse(ACPI_CDAT_TYPE_DSMAS, cdat_dsmas_handler,
- dsmas_xa, port->cdat.table);
+ dsmas_xa, port->cdat.table, port->cdat.length);
rc = cdat_table_parse_output(rc);
if (rc)
return rc;
rc = cdat_table_parse(ACPI_CDAT_TYPE_DSLBIS, cdat_dslbis_handler,
- dsmas_xa, port->cdat.table);
+ dsmas_xa, port->cdat.table, port->cdat.length);
return cdat_table_parse_output(rc);
}
static int cxl_port_perf_data_calculate(struct cxl_port *port,
struct xarray *dsmas_xa)
{
- struct access_coordinate c;
+ struct access_coordinate ep_c[ACCESS_COORDINATE_MAX];
struct dsmas_entry *dent;
int valid_entries = 0;
unsigned long index;
int rc;
- rc = cxl_endpoint_get_perf_coordinates(port, &c);
+ rc = cxl_endpoint_get_perf_coordinates(port, ep_c);
if (rc) {
- dev_dbg(&port->dev, "Failed to retrieve perf coordinates.\n");
+ dev_dbg(&port->dev, "Failed to retrieve ep perf coordinates.\n");
return rc;
}
@@ -185,18 +220,11 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port,
xa_for_each(dsmas_xa, index, dent) {
int qos_class;
- dent->coord.read_latency = dent->coord.read_latency +
- c.read_latency;
- dent->coord.write_latency = dent->coord.write_latency +
- c.write_latency;
- dent->coord.read_bandwidth = min_t(int, c.read_bandwidth,
- dent->coord.read_bandwidth);
- dent->coord.write_bandwidth = min_t(int, c.write_bandwidth,
- dent->coord.write_bandwidth);
-
+ cxl_coordinates_combine(dent->coord, dent->coord, ep_c);
dent->entries = 1;
- rc = cxl_root->ops->qos_class(cxl_root, &dent->coord, 1,
- &qos_class);
+ rc = cxl_root->ops->qos_class(cxl_root,
+ &dent->coord[ACCESS_COORDINATE_CPU],
+ 1, &qos_class);
if (rc != 1)
continue;
@@ -213,14 +241,17 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port,
static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
struct cxl_dpa_perf *dpa_perf)
{
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
+ dpa_perf->coord[i] = dent->coord[i];
dpa_perf->dpa_range = dent->dpa_range;
- dpa_perf->coord = dent->coord;
dpa_perf->qos_class = dent->qos_class;
dev_dbg(dev,
"DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
dent->dpa_range.start, dpa_perf->qos_class,
- dent->coord.read_bandwidth, dent->coord.write_bandwidth,
- dent->coord.read_latency, dent->coord.write_latency);
+ dent->coord[ACCESS_COORDINATE_CPU].read_bandwidth,
+ dent->coord[ACCESS_COORDINATE_CPU].write_bandwidth,
+ dent->coord[ACCESS_COORDINATE_CPU].read_latency,
+ dent->coord[ACCESS_COORDINATE_CPU].write_latency);
}
static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
@@ -389,36 +420,38 @@ EXPORT_SYMBOL_NS_GPL(cxl_endpoint_parse_cdat, CXL);
static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
const unsigned long end)
{
+ struct acpi_cdat_sslbis_table {
+ struct acpi_cdat_header header;
+ struct acpi_cdat_sslbis sslbis_header;
+ struct acpi_cdat_sslbe entries[];
+ } *tbl = (struct acpi_cdat_sslbis_table *)header;
+ int size = sizeof(header->cdat) + sizeof(tbl->sslbis_header);
struct acpi_cdat_sslbis *sslbis;
- int size = sizeof(header->cdat) + sizeof(*sslbis);
struct cxl_port *port = arg;
struct device *dev = &port->dev;
- struct acpi_cdat_sslbe *entry;
int remain, entries, i;
u16 len;
len = le16_to_cpu((__force __le16)header->cdat.length);
remain = len - size;
- if (!remain || remain % sizeof(*entry) ||
+ if (!remain || remain % sizeof(tbl->entries[0]) ||
(unsigned long)header + len > end) {
dev_warn(dev, "Malformed SSLBIS table length: (%u)\n", len);
return -EINVAL;
}
- /* Skip common header */
- sslbis = (struct acpi_cdat_sslbis *)((unsigned long)header +
- sizeof(header->cdat));
-
+ sslbis = &tbl->sslbis_header;
/* Unrecognized data type, we can skip */
if (sslbis->data_type > ACPI_HMAT_WRITE_BANDWIDTH)
return 0;
- entries = remain / sizeof(*entry);
- entry = (struct acpi_cdat_sslbe *)((unsigned long)header + sizeof(*sslbis));
+ entries = remain / sizeof(tbl->entries[0]);
+ if (struct_size(tbl, entries, entries) != len)
+ return -EINVAL;
for (i = 0; i < entries; i++) {
- u16 x = le16_to_cpu((__force __le16)entry->portx_id);
- u16 y = le16_to_cpu((__force __le16)entry->porty_id);
+ u16 x = le16_to_cpu((__force __le16)tbl->entries[i].portx_id);
+ u16 y = le16_to_cpu((__force __le16)tbl->entries[i].porty_id);
__le64 le_base;
__le16 le_val;
struct cxl_dport *dport;
@@ -448,22 +481,19 @@ static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
break;
}
- le_base = (__force __le64)sslbis->entry_base_unit;
- le_val = (__force __le16)entry->latency_or_bandwidth;
-
- if (check_mul_overflow(le64_to_cpu(le_base),
- le16_to_cpu(le_val), &val))
- dev_warn(dev, "SSLBIS value overflowed!\n");
+ le_base = (__force __le64)tbl->sslbis_header.entry_base_unit;
+ le_val = (__force __le16)tbl->entries[i].latency_or_bandwidth;
+ val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base),
+ sslbis->data_type);
xa_for_each(&port->dports, index, dport) {
if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT ||
- dsp_id == dport->port_id)
- cxl_access_coordinate_set(&dport->sw_coord,
+ dsp_id == dport->port_id) {
+ cxl_access_coordinate_set(dport->coord,
sslbis->data_type,
val);
+ }
}
-
- entry++;
}
return 0;
@@ -477,11 +507,93 @@ void cxl_switch_parse_cdat(struct cxl_port *port)
return;
rc = cdat_table_parse(ACPI_CDAT_TYPE_SSLBIS, cdat_sslbis_handler,
- port, port->cdat.table);
+ port, port->cdat.table, port->cdat.length);
rc = cdat_table_parse_output(rc);
if (rc)
dev_dbg(&port->dev, "Failed to parse SSLBIS: %d\n", rc);
}
EXPORT_SYMBOL_NS_GPL(cxl_switch_parse_cdat, CXL);
+static void __cxl_coordinates_combine(struct access_coordinate *out,
+ struct access_coordinate *c1,
+ struct access_coordinate *c2)
+{
+ if (c1->write_bandwidth && c2->write_bandwidth)
+ out->write_bandwidth = min(c1->write_bandwidth,
+ c2->write_bandwidth);
+ out->write_latency = c1->write_latency + c2->write_latency;
+
+ if (c1->read_bandwidth && c2->read_bandwidth)
+ out->read_bandwidth = min(c1->read_bandwidth,
+ c2->read_bandwidth);
+ out->read_latency = c1->read_latency + c2->read_latency;
+}
+
+/**
+ * cxl_coordinates_combine - Combine the two input coordinates
+ *
+ * @out: Output coordinate of c1 and c2 combined
+ * @c1: input coordinates
+ * @c2: input coordinates
+ */
+void cxl_coordinates_combine(struct access_coordinate *out,
+ struct access_coordinate *c1,
+ struct access_coordinate *c2)
+{
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
+ __cxl_coordinates_combine(&out[i], &c1[i], &c2[i]);
+}
+
MODULE_IMPORT_NS(CXL);
+
+void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+ struct range dpa = {
+ .start = cxled->dpa_res->start,
+ .end = cxled->dpa_res->end,
+ };
+ struct cxl_dpa_perf *perf;
+
+ switch (cxlr->mode) {
+ case CXL_DECODER_RAM:
+ perf = &mds->ram_perf;
+ break;
+ case CXL_DECODER_PMEM:
+ perf = &mds->pmem_perf;
+ break;
+ default:
+ return;
+ }
+
+ lockdep_assert_held(&cxl_dpa_rwsem);
+
+ if (!range_contains(&perf->dpa_range, &dpa))
+ return;
+
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+ /* Get total bandwidth and the worst latency for the cxl region */
+ cxlr->coord[i].read_latency = max_t(unsigned int,
+ cxlr->coord[i].read_latency,
+ perf->coord[i].read_latency);
+ cxlr->coord[i].write_latency = max_t(unsigned int,
+ cxlr->coord[i].write_latency,
+ perf->coord[i].write_latency);
+ cxlr->coord[i].read_bandwidth += perf->coord[i].read_bandwidth;
+ cxlr->coord[i].write_bandwidth += perf->coord[i].write_bandwidth;
+ }
+}
+
+int cxl_update_hmat_access_coordinates(int nid, struct cxl_region *cxlr,
+ enum access_coordinate_class access)
+{
+ return hmat_update_target_coordinates(nid, &cxlr->coord[access], access);
+}
+
+bool cxl_need_node_perf_attrs_update(int nid)
+{
+ return !acpi_node_backed_by_real_pxm(nid);
+}
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 3b64fb1b9ed05..bc5a95665aa0a 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -90,4 +90,8 @@ enum cxl_poison_trace_type {
long cxl_pci_get_latency(struct pci_dev *pdev);
+int cxl_update_hmat_access_coordinates(int nid, struct cxl_region *cxlr,
+ enum access_coordinate_class access);
+bool cxl_need_node_perf_attrs_update(int nid);
+
#endif /* __CXL_CORE_H__ */
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 9adda4795eb78..f0f54aeccc872 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -915,7 +915,7 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds,
payload->handles[i++] = gen->hdr.handle;
dev_dbg(mds->cxlds.dev, "Event log '%d': Clearing %u\n", log,
- le16_to_cpu(payload->handles[i]));
+ le16_to_cpu(payload->handles[i - 1]));
if (i == max_handles) {
payload->nr_recs = i;
@@ -958,13 +958,14 @@ static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
.payload_in = &log_type,
.size_in = sizeof(log_type),
.payload_out = payload,
- .size_out = mds->payload_size,
.min_out = struct_size(payload, records, 0),
};
do {
int rc, i;
+ mbox_cmd.size_out = mds->payload_size;
+
rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc) {
dev_err_ratelimited(dev,
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index e9e6c81ce034a..0df09bd794088 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -518,14 +518,14 @@ EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, CXL);
FIELD_PREP(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, (entry_handle)))
static int cxl_cdat_get_length(struct device *dev,
- struct pci_doe_mb *cdat_doe,
+ struct pci_doe_mb *doe_mb,
size_t *length)
{
__le32 request = CDAT_DOE_REQ(0);
__le32 response[2];
int rc;
- rc = pci_doe(cdat_doe, PCI_DVSEC_VENDOR_ID_CXL,
+ rc = pci_doe(doe_mb, PCI_DVSEC_VENDOR_ID_CXL,
CXL_DOE_PROTOCOL_TABLE_ACCESS,
&request, sizeof(request),
&response, sizeof(response));
@@ -543,56 +543,58 @@ static int cxl_cdat_get_length(struct device *dev,
}
static int cxl_cdat_read_table(struct device *dev,
- struct pci_doe_mb *cdat_doe,
- void *cdat_table, size_t *cdat_length)
+ struct pci_doe_mb *doe_mb,
+ struct cdat_doe_rsp *rsp, size_t *length)
{
- size_t length = *cdat_length + sizeof(__le32);
- __le32 *data = cdat_table;
- int entry_handle = 0;
+ size_t received, remaining = *length;
+ unsigned int entry_handle = 0;
+ union cdat_data *data;
__le32 saved_dw = 0;
do {
__le32 request = CDAT_DOE_REQ(entry_handle);
- struct cdat_entry_header *entry;
- size_t entry_dw;
int rc;
- rc = pci_doe(cdat_doe, PCI_DVSEC_VENDOR_ID_CXL,
+ rc = pci_doe(doe_mb, PCI_DVSEC_VENDOR_ID_CXL,
CXL_DOE_PROTOCOL_TABLE_ACCESS,
&request, sizeof(request),
- data, length);
+ rsp, sizeof(*rsp) + remaining);
if (rc < 0) {
dev_err(dev, "DOE failed: %d", rc);
return rc;
}
- /* 1 DW Table Access Response Header + CDAT entry */
- entry = (struct cdat_entry_header *)(data + 1);
- if ((entry_handle == 0 &&
- rc != sizeof(__le32) + sizeof(struct cdat_header)) ||
- (entry_handle > 0 &&
- (rc < sizeof(__le32) + sizeof(*entry) ||
- rc != sizeof(__le32) + le16_to_cpu(entry->length))))
+ if (rc < sizeof(*rsp))
return -EIO;
+ data = (union cdat_data *)rsp->data;
+ received = rc - sizeof(*rsp);
+
+ if (entry_handle == 0) {
+ if (received != sizeof(data->header))
+ return -EIO;
+ } else {
+ if (received < sizeof(data->entry) ||
+ received != le16_to_cpu(data->entry.length))
+ return -EIO;
+ }
+
/* Get the CXL table access header entry handle */
entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE,
- le32_to_cpu(data[0]));
- entry_dw = rc / sizeof(__le32);
- /* Skip Header */
- entry_dw -= 1;
+ le32_to_cpu(rsp->doe_header));
+
/*
* Table Access Response Header overwrote the last DW of
* previous entry, so restore that DW
*/
- *data = saved_dw;
- length -= entry_dw * sizeof(__le32);
- data += entry_dw;
- saved_dw = *data;
+ rsp->doe_header = saved_dw;
+ remaining -= received;
+ rsp = (void *)rsp + received;
+ saved_dw = rsp->doe_header;
} while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY);
/* Length in CDAT header may exceed concatenation of CDAT entries */
- *cdat_length -= length - sizeof(__le32);
+ *length -= remaining;
return 0;
}
@@ -617,11 +619,11 @@ void read_cdat_data(struct cxl_port *port)
{
struct device *uport = port->uport_dev;
struct device *dev = &port->dev;
- struct pci_doe_mb *cdat_doe;
+ struct pci_doe_mb *doe_mb;
struct pci_dev *pdev = NULL;
struct cxl_memdev *cxlmd;
- size_t cdat_length;
- void *cdat_table, *cdat_buf;
+ struct cdat_doe_rsp *buf;
+ size_t table_length, length;
int rc;
if (is_cxl_memdev(uport)) {
@@ -638,39 +640,48 @@ void read_cdat_data(struct cxl_port *port)
if (!pdev)
return;
- cdat_doe = pci_find_doe_mailbox(pdev, PCI_DVSEC_VENDOR_ID_CXL,
- CXL_DOE_PROTOCOL_TABLE_ACCESS);
- if (!cdat_doe) {
+ doe_mb = pci_find_doe_mailbox(pdev, PCI_DVSEC_VENDOR_ID_CXL,
+ CXL_DOE_PROTOCOL_TABLE_ACCESS);
+ if (!doe_mb) {
dev_dbg(dev, "No CDAT mailbox\n");
return;
}
port->cdat_available = true;
- if (cxl_cdat_get_length(dev, cdat_doe, &cdat_length)) {
+ if (cxl_cdat_get_length(dev, doe_mb, &length)) {
dev_dbg(dev, "No CDAT length\n");
return;
}
- cdat_buf = devm_kzalloc(dev, cdat_length + sizeof(__le32), GFP_KERNEL);
- if (!cdat_buf)
- return;
+ /*
+ * The begin of the CDAT buffer needs space for additional 4
+ * bytes for the DOE header. Table data starts afterwards.
+ */
+ buf = devm_kzalloc(dev, sizeof(*buf) + length, GFP_KERNEL);
+ if (!buf)
+ goto err;
+
+ table_length = length;
- rc = cxl_cdat_read_table(dev, cdat_doe, cdat_buf, &cdat_length);
+ rc = cxl_cdat_read_table(dev, doe_mb, buf, &length);
if (rc)
goto err;
- cdat_table = cdat_buf + sizeof(__le32);
- if (cdat_checksum(cdat_table, cdat_length))
+ if (table_length != length)
+ dev_warn(dev, "Malformed CDAT table length (%zu:%zu), discarding trailing data\n",
+ table_length, length);
+
+ if (cdat_checksum(buf->data, length))
goto err;
- port->cdat.table = cdat_table;
- port->cdat.length = cdat_length;
- return;
+ port->cdat.table = buf->data;
+ port->cdat.length = length;
+ return;
err:
/* Don't leave table data allocated on error */
- devm_kfree(dev, cdat_buf);
+ devm_kfree(dev, buf);
dev_err(dev, "Failed to read/validate CDAT.\n");
}
EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL);
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index e59d9d37aa650..762783bb091af 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -3,6 +3,7 @@
#include <linux/platform_device.h>
#include <linux/memregion.h>
#include <linux/workqueue.h>
+#include <linux/einj-cxl.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/module.h>
@@ -793,6 +794,40 @@ static int cxl_dport_setup_regs(struct device *host, struct cxl_dport *dport,
return rc;
}
+DEFINE_SHOW_ATTRIBUTE(einj_cxl_available_error_type);
+
+static int cxl_einj_inject(void *data, u64 type)
+{
+ struct cxl_dport *dport = data;
+
+ if (dport->rch)
+ return einj_cxl_inject_rch_error(dport->rcrb.base, type);
+
+ return einj_cxl_inject_error(to_pci_dev(dport->dport_dev), type);
+}
+DEFINE_DEBUGFS_ATTRIBUTE(cxl_einj_inject_fops, NULL, cxl_einj_inject,
+ "0x%llx\n");
+
+static void cxl_debugfs_create_dport_dir(struct cxl_dport *dport)
+{
+ struct dentry *dir;
+
+ if (!einj_cxl_is_initialized())
+ return;
+
+ /*
+ * dport_dev needs to be a PCIe port for CXL 2.0+ ports because
+ * EINJ expects a dport SBDF to be specified for 2.0 error injection.
+ */
+ if (!dport->rch && !dev_is_pci(dport->dport_dev))
+ return;
+
+ dir = cxl_debugfs_create_dir(dev_name(dport->dport_dev));
+
+ debugfs_create_file("einj_inject", 0200, dir, dport,
+ &cxl_einj_inject_fops);
+}
+
static struct cxl_port *__devm_cxl_add_port(struct device *host,
struct device *uport_dev,
resource_size_t component_reg_phys,
@@ -822,6 +857,7 @@ static struct cxl_port *__devm_cxl_add_port(struct device *host,
*/
port->reg_map = cxlds->reg_map;
port->reg_map.host = &port->dev;
+ cxlmd->endpoint = port;
} else if (parent_dport) {
rc = dev_set_name(dev, "port%d", port->id);
if (rc)
@@ -1149,6 +1185,8 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
if (dev_is_pci(dport_dev))
dport->link_latency = cxl_pci_get_latency(to_pci_dev(dport_dev));
+ cxl_debugfs_create_dport_dir(dport);
+
return dport;
}
@@ -1374,7 +1412,6 @@ int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
get_device(host);
get_device(&endpoint->dev);
- cxlmd->endpoint = endpoint;
cxlmd->depth = endpoint->depth;
return devm_add_action_or_reset(dev, delete_endpoint, cxlmd);
}
@@ -2096,18 +2133,44 @@ bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd)
}
EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL);
-static void combine_coordinates(struct access_coordinate *c1,
- struct access_coordinate *c2)
+static void add_latency(struct access_coordinate *c, long latency)
+{
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+ c[i].write_latency += latency;
+ c[i].read_latency += latency;
+ }
+}
+
+static bool coordinates_valid(struct access_coordinate *c)
+{
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+ if (c[i].read_bandwidth && c[i].write_bandwidth &&
+ c[i].read_latency && c[i].write_latency)
+ continue;
+ return false;
+ }
+
+ return true;
+}
+
+static void set_min_bandwidth(struct access_coordinate *c, unsigned int bw)
{
- if (c2->write_bandwidth)
- c1->write_bandwidth = min(c1->write_bandwidth,
- c2->write_bandwidth);
- c1->write_latency += c2->write_latency;
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+ c[i].write_bandwidth = min(c[i].write_bandwidth, bw);
+ c[i].read_bandwidth = min(c[i].read_bandwidth, bw);
+ }
+}
- if (c2->read_bandwidth)
- c1->read_bandwidth = min(c1->read_bandwidth,
- c2->read_bandwidth);
- c1->read_latency += c2->read_latency;
+static void set_access_coordinates(struct access_coordinate *out,
+ struct access_coordinate *in)
+{
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
+ out[i] = in[i];
+}
+
+static bool parent_port_is_cxl_root(struct cxl_port *port)
+{
+ return is_cxl_root(to_cxl_port(port->dev.parent));
}
/**
@@ -2121,38 +2184,53 @@ static void combine_coordinates(struct access_coordinate *c1,
int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
struct access_coordinate *coord)
{
- struct access_coordinate c = {
- .read_bandwidth = UINT_MAX,
- .write_bandwidth = UINT_MAX,
+ struct access_coordinate c[] = {
+ {
+ .read_bandwidth = UINT_MAX,
+ .write_bandwidth = UINT_MAX,
+ },
+ {
+ .read_bandwidth = UINT_MAX,
+ .write_bandwidth = UINT_MAX,
+ },
};
struct cxl_port *iter = port;
struct cxl_dport *dport;
struct pci_dev *pdev;
unsigned int bw;
+ bool is_cxl_root;
if (!is_cxl_endpoint(port))
return -EINVAL;
- dport = iter->parent_dport;
-
/*
- * Exit the loop when the parent port of the current port is cxl root.
- * The iterative loop starts at the endpoint and gathers the
- * latency of the CXL link from the current iter to the next downstream
- * port each iteration. If the parent is cxl root then there is
- * nothing to gather.
+ * Exit the loop when the parent port of the current iter port is cxl
+ * root. The iterative loop starts at the endpoint and gathers the
+ * latency of the CXL link from the current device/port to the connected
+ * downstream port each iteration.
*/
- while (iter && !is_cxl_root(to_cxl_port(iter->dev.parent))) {
- combine_coordinates(&c, &dport->sw_coord);
- c.write_latency += dport->link_latency;
- c.read_latency += dport->link_latency;
-
- iter = to_cxl_port(iter->dev.parent);
+ do {
dport = iter->parent_dport;
- }
+ iter = to_cxl_port(iter->dev.parent);
+ is_cxl_root = parent_port_is_cxl_root(iter);
+
+ /*
+ * There's no valid access_coordinate for a root port since RPs do not
+ * have CDAT and therefore needs to be skipped.
+ */
+ if (!is_cxl_root) {
+ if (!coordinates_valid(dport->coord))
+ return -EINVAL;
+ cxl_coordinates_combine(c, c, dport->coord);
+ }
+ add_latency(c, dport->link_latency);
+ } while (!is_cxl_root);
- /* Augment with the generic port (host bridge) perf data */
- combine_coordinates(&c, &dport->hb_coord);
+ dport = iter->parent_dport;
+ /* Retrieve HB coords */
+ if (!coordinates_valid(dport->coord))
+ return -EINVAL;
+ cxl_coordinates_combine(c, c, dport->coord);
/* Get the calculated PCI paths bandwidth */
pdev = to_pci_dev(port->uport_dev->parent);
@@ -2161,10 +2239,8 @@ int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
return -ENXIO;
bw /= BITS_PER_BYTE;
- c.write_bandwidth = min(c.write_bandwidth, bw);
- c.read_bandwidth = min(c.read_bandwidth, bw);
-
- *coord = c;
+ set_min_bandwidth(c, bw);
+ set_access_coordinates(coord, c);
return 0;
}
@@ -2221,6 +2297,10 @@ static __init int cxl_core_init(void)
cxl_debugfs = debugfs_create_dir("cxl", NULL);
+ if (einj_cxl_is_initialized())
+ debugfs_create_file("einj_types", 0400, cxl_debugfs, NULL,
+ &einj_cxl_available_error_type_fops);
+
cxl_mbox_init();
rc = cxl_memdev_init();
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 4c7fd2d5cccb2..5c186e0a39b96 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -4,6 +4,7 @@
#include <linux/genalloc.h>
#include <linux/device.h>
#include <linux/module.h>
+#include <linux/memory.h>
#include <linux/slab.h>
#include <linux/uuid.h>
#include <linux/sort.h>
@@ -30,6 +31,108 @@
static struct cxl_region *to_cxl_region(struct device *dev);
+#define __ACCESS_ATTR_RO(_level, _name) { \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
+ .show = _name##_access##_level##_show, \
+}
+
+#define ACCESS_DEVICE_ATTR_RO(level, name) \
+ struct device_attribute dev_attr_access##level##_##name = __ACCESS_ATTR_RO(level, name)
+
+#define ACCESS_ATTR_RO(level, attrib) \
+static ssize_t attrib##_access##level##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct cxl_region *cxlr = to_cxl_region(dev); \
+ \
+ if (cxlr->coord[level].attrib == 0) \
+ return -ENOENT; \
+ \
+ return sysfs_emit(buf, "%u\n", cxlr->coord[level].attrib); \
+} \
+static ACCESS_DEVICE_ATTR_RO(level, attrib)
+
+ACCESS_ATTR_RO(0, read_bandwidth);
+ACCESS_ATTR_RO(0, read_latency);
+ACCESS_ATTR_RO(0, write_bandwidth);
+ACCESS_ATTR_RO(0, write_latency);
+
+#define ACCESS_ATTR_DECLARE(level, attrib) \
+ (&dev_attr_access##level##_##attrib.attr)
+
+static struct attribute *access0_coordinate_attrs[] = {
+ ACCESS_ATTR_DECLARE(0, read_bandwidth),
+ ACCESS_ATTR_DECLARE(0, write_bandwidth),
+ ACCESS_ATTR_DECLARE(0, read_latency),
+ ACCESS_ATTR_DECLARE(0, write_latency),
+ NULL
+};
+
+ACCESS_ATTR_RO(1, read_bandwidth);
+ACCESS_ATTR_RO(1, read_latency);
+ACCESS_ATTR_RO(1, write_bandwidth);
+ACCESS_ATTR_RO(1, write_latency);
+
+static struct attribute *access1_coordinate_attrs[] = {
+ ACCESS_ATTR_DECLARE(1, read_bandwidth),
+ ACCESS_ATTR_DECLARE(1, write_bandwidth),
+ ACCESS_ATTR_DECLARE(1, read_latency),
+ ACCESS_ATTR_DECLARE(1, write_latency),
+ NULL
+};
+
+#define ACCESS_VISIBLE(level) \
+static umode_t cxl_region_access##level##_coordinate_visible( \
+ struct kobject *kobj, struct attribute *a, int n) \
+{ \
+ struct device *dev = kobj_to_dev(kobj); \
+ struct cxl_region *cxlr = to_cxl_region(dev); \
+ \
+ if (a == &dev_attr_access##level##_read_latency.attr && \
+ cxlr->coord[level].read_latency == 0) \
+ return 0; \
+ \
+ if (a == &dev_attr_access##level##_write_latency.attr && \
+ cxlr->coord[level].write_latency == 0) \
+ return 0; \
+ \
+ if (a == &dev_attr_access##level##_read_bandwidth.attr && \
+ cxlr->coord[level].read_bandwidth == 0) \
+ return 0; \
+ \
+ if (a == &dev_attr_access##level##_write_bandwidth.attr && \
+ cxlr->coord[level].write_bandwidth == 0) \
+ return 0; \
+ \
+ return a->mode; \
+}
+
+ACCESS_VISIBLE(0);
+ACCESS_VISIBLE(1);
+
+static const struct attribute_group cxl_region_access0_coordinate_group = {
+ .name = "access0",
+ .attrs = access0_coordinate_attrs,
+ .is_visible = cxl_region_access0_coordinate_visible,
+};
+
+static const struct attribute_group *get_cxl_region_access0_group(void)
+{
+ return &cxl_region_access0_coordinate_group;
+}
+
+static const struct attribute_group cxl_region_access1_coordinate_group = {
+ .name = "access1",
+ .attrs = access1_coordinate_attrs,
+ .is_visible = cxl_region_access1_coordinate_visible,
+};
+
+static const struct attribute_group *get_cxl_region_access1_group(void)
+{
+ return &cxl_region_access1_coordinate_group;
+}
+
static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1752,6 +1855,8 @@ static int cxl_region_attach(struct cxl_region *cxlr,
return -EINVAL;
}
+ cxl_region_perf_data_calculate(cxlr, cxled);
+
if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
int i;
@@ -2067,6 +2172,8 @@ static const struct attribute_group *region_groups[] = {
&cxl_base_attribute_group,
&cxl_region_group,
&cxl_region_target_group,
+ &cxl_region_access0_coordinate_group,
+ &cxl_region_access1_coordinate_group,
NULL,
};
@@ -2120,6 +2227,7 @@ static void unregister_region(void *_cxlr)
struct cxl_region_params *p = &cxlr->params;
int i;
+ unregister_memory_notifier(&cxlr->memory_notifier);
device_del(&cxlr->dev);
/*
@@ -2164,6 +2272,63 @@ static struct cxl_region *cxl_region_alloc(struct cxl_root_decoder *cxlrd, int i
return cxlr;
}
+static bool cxl_region_update_coordinates(struct cxl_region *cxlr, int nid)
+{
+ int cset = 0;
+ int rc;
+
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+ if (cxlr->coord[i].read_bandwidth) {
+ rc = 0;
+ if (cxl_need_node_perf_attrs_update(nid))
+ node_set_perf_attrs(nid, &cxlr->coord[i], i);
+ else
+ rc = cxl_update_hmat_access_coordinates(nid, cxlr, i);
+
+ if (rc == 0)
+ cset++;
+ }
+ }
+
+ if (!cset)
+ return false;
+
+ rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_access0_group());
+ if (rc)
+ dev_dbg(&cxlr->dev, "Failed to update access0 group\n");
+
+ rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_access1_group());
+ if (rc)
+ dev_dbg(&cxlr->dev, "Failed to update access1 group\n");
+
+ return true;
+}
+
+static int cxl_region_perf_attrs_callback(struct notifier_block *nb,
+ unsigned long action, void *arg)
+{
+ struct cxl_region *cxlr = container_of(nb, struct cxl_region,
+ memory_notifier);
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_endpoint_decoder *cxled = p->targets[0];
+ struct cxl_decoder *cxld = &cxled->cxld;
+ struct memory_notify *mnb = arg;
+ int nid = mnb->status_change_nid;
+ int region_nid;
+
+ if (nid == NUMA_NO_NODE || action != MEM_ONLINE)
+ return NOTIFY_DONE;
+
+ region_nid = phys_to_target_node(cxld->hpa_range.start);
+ if (nid != region_nid)
+ return NOTIFY_DONE;
+
+ if (!cxl_region_update_coordinates(cxlr, nid))
+ return NOTIFY_DONE;
+
+ return NOTIFY_OK;
+}
+
/**
* devm_cxl_add_region - Adds a region to a decoder
* @cxlrd: root decoder
@@ -2211,6 +2376,10 @@ static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
if (rc)
goto err;
+ cxlr->memory_notifier.notifier_call = cxl_region_perf_attrs_callback;
+ cxlr->memory_notifier.priority = CXL_CALLBACK_PRI;
+ register_memory_notifier(&cxlr->memory_notifier);
+
rc = devm_add_action_or_reset(port->uport_dev, unregister_region, cxlr);
if (rc)
return ERR_PTR(rc);
diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
index 372786f809555..3c42f984eeafa 100644
--- a/drivers/cxl/core/regs.c
+++ b/drivers/cxl/core/regs.c
@@ -271,6 +271,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_map_device_regs, CXL);
static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
struct cxl_register_map *map)
{
+ u8 reg_type = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK, reg_lo);
int bar = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BIR_MASK, reg_lo);
u64 offset = ((u64)reg_hi << 32) |
(reg_lo & CXL_DVSEC_REG_LOCATOR_BLOCK_OFF_LOW_MASK);
@@ -278,11 +279,11 @@ static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
if (offset > pci_resource_len(pdev, bar)) {
dev_warn(&pdev->dev,
"BAR%d: %pr: too small (offset: %pa, type: %d)\n", bar,
- &pdev->resource[bar], &offset, map->reg_type);
+ &pdev->resource[bar], &offset, reg_type);
return false;
}
- map->reg_type = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK, reg_lo);
+ map->reg_type = reg_type;
map->resource = pci_resource_start(pdev, bar) + offset;
map->max_size = pci_resource_len(pdev, bar) - offset;
return true;
diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h
index bdf117a33744b..e5f13260fc524 100644
--- a/drivers/cxl/core/trace.h
+++ b/drivers/cxl/core/trace.h
@@ -646,18 +646,18 @@ u64 cxl_trace_hpa(struct cxl_region *cxlr, struct cxl_memdev *memdev, u64 dpa);
TRACE_EVENT(cxl_poison,
- TP_PROTO(struct cxl_memdev *cxlmd, struct cxl_region *region,
+ TP_PROTO(struct cxl_memdev *cxlmd, struct cxl_region *cxlr,
const struct cxl_poison_record *record, u8 flags,
__le64 overflow_ts, enum cxl_poison_trace_type trace_type),
- TP_ARGS(cxlmd, region, record, flags, overflow_ts, trace_type),
+ TP_ARGS(cxlmd, cxlr, record, flags, overflow_ts, trace_type),
TP_STRUCT__entry(
__string(memdev, dev_name(&cxlmd->dev))
__string(host, dev_name(cxlmd->dev.parent))
__field(u64, serial)
__field(u8, trace_type)
- __string(region, region)
+ __string(region, cxlr ? dev_name(&cxlr->dev) : "")
__field(u64, overflow_ts)
__field(u64, hpa)
__field(u64, dpa)
@@ -677,10 +677,10 @@ TRACE_EVENT(cxl_poison,
__entry->source = cxl_poison_record_source(record);
__entry->trace_type = trace_type;
__entry->flags = flags;
- if (region) {
- __assign_str(region, dev_name(&region->dev));
- memcpy(__entry->uuid, &region->params.uuid, 16);
- __entry->hpa = cxl_trace_hpa(region, cxlmd,
+ if (cxlr) {
+ __assign_str(region, dev_name(&cxlr->dev));
+ memcpy(__entry->uuid, &cxlr->params.uuid, 16);
+ __entry->hpa = cxl_trace_hpa(cxlr, cxlmd,
__entry->dpa);
} else {
__assign_str(region, "");
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 003feebab79b5..036d17db68e00 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -6,6 +6,7 @@
#include <linux/libnvdimm.h>
#include <linux/bitfield.h>
+#include <linux/notifier.h>
#include <linux/bitops.h>
#include <linux/log2.h>
#include <linux/node.h>
@@ -517,6 +518,8 @@ struct cxl_region_params {
* @cxlr_pmem: (for pmem regions) cached copy of the nvdimm bridge
* @flags: Region state flags
* @params: active + config params for the region
+ * @coord: QoS access coordinates for the region
+ * @memory_notifier: notifier for setting the access coordinates to node
*/
struct cxl_region {
struct device dev;
@@ -527,6 +530,8 @@ struct cxl_region {
struct cxl_pmem_region *cxlr_pmem;
unsigned long flags;
struct cxl_region_params params;
+ struct access_coordinate coord[ACCESS_COORDINATE_MAX];
+ struct notifier_block memory_notifier;
};
struct cxl_nvdimm_bridge {
@@ -658,8 +663,7 @@ struct cxl_rcrb_info {
* @rch: Indicate whether this dport was enumerated in RCH or VH mode
* @port: reference to cxl_port that contains this downstream port
* @regs: Dport parsed register blocks
- * @sw_coord: access coordinates (performance) for switch from CDAT
- * @hb_coord: access coordinates (performance) from ACPI generic port (host bridge)
+ * @coord: access coordinates (bandwidth and latency performance attributes)
* @link_latency: calculated PCIe downstream latency
*/
struct cxl_dport {
@@ -670,8 +674,7 @@ struct cxl_dport {
bool rch;
struct cxl_port *port;
struct cxl_regs regs;
- struct access_coordinate sw_coord;
- struct access_coordinate hb_coord;
+ struct access_coordinate coord[ACCESS_COORDINATE_MAX];
long link_latency;
};
@@ -879,9 +882,15 @@ void cxl_switch_parse_cdat(struct cxl_port *port);
int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
struct access_coordinate *coord);
+void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled);
void cxl_memdev_update_perf(struct cxl_memdev *cxlmd);
+void cxl_coordinates_combine(struct access_coordinate *out,
+ struct access_coordinate *c1,
+ struct access_coordinate *c2);
+
/*
* Unit test builds overrides this to __weak, find the 'strong' version
* of these symbols in tools/testing/cxl/.
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 20fb3b35e89e0..36cee9c30cebd 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -401,7 +401,7 @@ enum cxl_devtype {
*/
struct cxl_dpa_perf {
struct range dpa_range;
- struct access_coordinate coord;
+ struct access_coordinate coord[ACCESS_COORDINATE_MAX];
int qos_class;
};
diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
index 711b05d9a370e..93992a1c8eecf 100644
--- a/drivers/cxl/cxlpci.h
+++ b/drivers/cxl/cxlpci.h
@@ -71,6 +71,15 @@ enum cxl_regloc_type {
CXL_REGLOC_RBI_TYPES
};
+/*
+ * Table Access DOE, CDAT Read Entry Response
+ *
+ * Spec refs:
+ *
+ * CXL 3.1 8.1.11, Table 8-14: Read Entry Response
+ * CDAT Specification 1.03: 2 CDAT Data Structures
+ */
+
struct cdat_header {
__le32 length;
u8 revision;
@@ -86,6 +95,21 @@ struct cdat_entry_header {
} __packed;
/*
+ * The DOE CDAT read response contains a CDAT read entry (either the
+ * CDAT header or a structure).
+ */
+union cdat_data {
+ struct cdat_header header;
+ struct cdat_entry_header entry;
+} __packed;
+
+/* There is an additional CDAT response header of 4 bytes. */
+struct cdat_doe_rsp {
+ __le32 doe_header;
+ u8 data[];
+} __packed;
+
+/*
* CXL v3.0 6.2.3 Table 6-4
* The table indicates that if PCIe Flit Mode is set, then CXL is in 256B flits
* mode, otherwise it's 68B flits mode.
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index 1ff1ab5fa105a..797e1ebff2997 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -12,6 +12,18 @@
static DEFINE_MUTEX(dax_bus_lock);
+/*
+ * All changes to the dax region configuration occur with this lock held
+ * for write.
+ */
+DECLARE_RWSEM(dax_region_rwsem);
+
+/*
+ * All changes to the dax device configuration occur with this lock held
+ * for write.
+ */
+DECLARE_RWSEM(dax_dev_rwsem);
+
#define DAX_NAME_LEN 30
struct dax_id {
struct list_head list;
@@ -180,7 +192,7 @@ static u64 dev_dax_size(struct dev_dax *dev_dax)
u64 size = 0;
int i;
- device_lock_assert(&dev_dax->dev);
+ WARN_ON_ONCE(!rwsem_is_locked(&dax_dev_rwsem));
for (i = 0; i < dev_dax->nr_range; i++)
size += range_len(&dev_dax->ranges[i].range);
@@ -194,8 +206,15 @@ static int dax_bus_probe(struct device *dev)
struct dev_dax *dev_dax = to_dev_dax(dev);
struct dax_region *dax_region = dev_dax->region;
int rc;
+ u64 size;
+
+ rc = down_read_interruptible(&dax_dev_rwsem);
+ if (rc)
+ return rc;
+ size = dev_dax_size(dev_dax);
+ up_read(&dax_dev_rwsem);
- if (dev_dax_size(dev_dax) == 0 || dev_dax->id < 0)
+ if (size == 0 || dev_dax->id < 0)
return -ENXIO;
rc = dax_drv->probe(dev_dax);
@@ -222,7 +241,7 @@ static void dax_bus_remove(struct device *dev)
dax_drv->remove(dev_dax);
}
-static struct bus_type dax_bus_type = {
+static const struct bus_type dax_bus_type = {
.name = "dax",
.uevent = dax_bus_uevent,
.match = dax_bus_match,
@@ -250,7 +269,7 @@ static ssize_t id_show(struct device *dev,
{
struct dax_region *dax_region = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", dax_region->id);
+ return sysfs_emit(buf, "%d\n", dax_region->id);
}
static DEVICE_ATTR_RO(id);
@@ -259,8 +278,8 @@ static ssize_t region_size_show(struct device *dev,
{
struct dax_region *dax_region = dev_get_drvdata(dev);
- return sprintf(buf, "%llu\n", (unsigned long long)
- resource_size(&dax_region->res));
+ return sysfs_emit(buf, "%llu\n",
+ (unsigned long long)resource_size(&dax_region->res));
}
static struct device_attribute dev_attr_region_size = __ATTR(size, 0444,
region_size_show, NULL);
@@ -270,7 +289,7 @@ static ssize_t region_align_show(struct device *dev,
{
struct dax_region *dax_region = dev_get_drvdata(dev);
- return sprintf(buf, "%u\n", dax_region->align);
+ return sysfs_emit(buf, "%u\n", dax_region->align);
}
static struct device_attribute dev_attr_region_align =
__ATTR(align, 0400, region_align_show, NULL);
@@ -283,7 +302,7 @@ static unsigned long long dax_region_avail_size(struct dax_region *dax_region)
resource_size_t size = resource_size(&dax_region->res);
struct resource *res;
- device_lock_assert(dax_region->dev);
+ WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
for_each_dax_region_resource(dax_region, res)
size -= resource_size(res);
@@ -295,12 +314,15 @@ static ssize_t available_size_show(struct device *dev,
{
struct dax_region *dax_region = dev_get_drvdata(dev);
unsigned long long size;
+ int rc;
- device_lock(dev);
+ rc = down_read_interruptible(&dax_region_rwsem);
+ if (rc)
+ return rc;
size = dax_region_avail_size(dax_region);
- device_unlock(dev);
+ up_read(&dax_region_rwsem);
- return sprintf(buf, "%llu\n", size);
+ return sysfs_emit(buf, "%llu\n", size);
}
static DEVICE_ATTR_RO(available_size);
@@ -314,10 +336,12 @@ static ssize_t seed_show(struct device *dev,
if (is_static(dax_region))
return -EINVAL;
- device_lock(dev);
+ rc = down_read_interruptible(&dax_region_rwsem);
+ if (rc)
+ return rc;
seed = dax_region->seed;
- rc = sprintf(buf, "%s\n", seed ? dev_name(seed) : "");
- device_unlock(dev);
+ rc = sysfs_emit(buf, "%s\n", seed ? dev_name(seed) : "");
+ up_read(&dax_region_rwsem);
return rc;
}
@@ -333,14 +357,18 @@ static ssize_t create_show(struct device *dev,
if (is_static(dax_region))
return -EINVAL;
- device_lock(dev);
+ rc = down_read_interruptible(&dax_region_rwsem);
+ if (rc)
+ return rc;
youngest = dax_region->youngest;
- rc = sprintf(buf, "%s\n", youngest ? dev_name(youngest) : "");
- device_unlock(dev);
+ rc = sysfs_emit(buf, "%s\n", youngest ? dev_name(youngest) : "");
+ up_read(&dax_region_rwsem);
return rc;
}
+static struct dev_dax *__devm_create_dev_dax(struct dev_dax_data *data);
+
static ssize_t create_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
{
@@ -358,7 +386,9 @@ static ssize_t create_store(struct device *dev, struct device_attribute *attr,
if (val != 1)
return -EINVAL;
- device_lock(dev);
+ rc = down_write_killable(&dax_region_rwsem);
+ if (rc)
+ return rc;
avail = dax_region_avail_size(dax_region);
if (avail == 0)
rc = -ENOSPC;
@@ -369,7 +399,7 @@ static ssize_t create_store(struct device *dev, struct device_attribute *attr,
.id = -1,
.memmap_on_memory = false,
};
- struct dev_dax *dev_dax = devm_create_dev_dax(&data);
+ struct dev_dax *dev_dax = __devm_create_dev_dax(&data);
if (IS_ERR(dev_dax))
rc = PTR_ERR(dev_dax);
@@ -387,7 +417,7 @@ static ssize_t create_store(struct device *dev, struct device_attribute *attr,
rc = len;
}
}
- device_unlock(dev);
+ up_write(&dax_region_rwsem);
return rc;
}
@@ -417,7 +447,7 @@ static void trim_dev_dax_range(struct dev_dax *dev_dax)
struct range *range = &dev_dax->ranges[i].range;
struct dax_region *dax_region = dev_dax->region;
- device_lock_assert(dax_region->dev);
+ WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
dev_dbg(&dev_dax->dev, "delete range[%d]: %#llx:%#llx\n", i,
(unsigned long long)range->start,
(unsigned long long)range->end);
@@ -435,7 +465,7 @@ static void free_dev_dax_ranges(struct dev_dax *dev_dax)
trim_dev_dax_range(dev_dax);
}
-static void unregister_dev_dax(void *dev)
+static void __unregister_dev_dax(void *dev)
{
struct dev_dax *dev_dax = to_dev_dax(dev);
@@ -447,6 +477,17 @@ static void unregister_dev_dax(void *dev)
put_device(dev);
}
+static void unregister_dev_dax(void *dev)
+{
+ if (rwsem_is_locked(&dax_region_rwsem))
+ return __unregister_dev_dax(dev);
+
+ if (WARN_ON_ONCE(down_write_killable(&dax_region_rwsem) != 0))
+ return;
+ __unregister_dev_dax(dev);
+ up_write(&dax_region_rwsem);
+}
+
static void dax_region_free(struct kref *kref)
{
struct dax_region *dax_region;
@@ -463,11 +504,10 @@ static void dax_region_put(struct dax_region *dax_region)
/* a return value >= 0 indicates this invocation invalidated the id */
static int __free_dev_dax_id(struct dev_dax *dev_dax)
{
- struct device *dev = &dev_dax->dev;
struct dax_region *dax_region;
int rc = dev_dax->id;
- device_lock_assert(dev);
+ WARN_ON_ONCE(!rwsem_is_locked(&dax_dev_rwsem));
if (!dev_dax->dyn_id || dev_dax->id < 0)
return -1;
@@ -480,12 +520,13 @@ static int __free_dev_dax_id(struct dev_dax *dev_dax)
static int free_dev_dax_id(struct dev_dax *dev_dax)
{
- struct device *dev = &dev_dax->dev;
int rc;
- device_lock(dev);
+ rc = down_write_killable(&dax_dev_rwsem);
+ if (rc)
+ return rc;
rc = __free_dev_dax_id(dev_dax);
- device_unlock(dev);
+ up_write(&dax_dev_rwsem);
return rc;
}
@@ -519,8 +560,14 @@ static ssize_t delete_store(struct device *dev, struct device_attribute *attr,
if (!victim)
return -ENXIO;
- device_lock(dev);
- device_lock(victim);
+ rc = down_write_killable(&dax_region_rwsem);
+ if (rc)
+ return rc;
+ rc = down_write_killable(&dax_dev_rwsem);
+ if (rc) {
+ up_write(&dax_region_rwsem);
+ return rc;
+ }
dev_dax = to_dev_dax(victim);
if (victim->driver || dev_dax_size(dev_dax))
rc = -EBUSY;
@@ -541,12 +588,12 @@ static ssize_t delete_store(struct device *dev, struct device_attribute *attr,
} else
rc = -EBUSY;
}
- device_unlock(victim);
+ up_write(&dax_dev_rwsem);
/* won the race to invalidate the device, clean it up */
if (do_del)
devm_release_action(dev, unregister_dev_dax, victim);
- device_unlock(dev);
+ up_write(&dax_region_rwsem);
put_device(victim);
return rc;
@@ -658,16 +705,15 @@ static void dax_mapping_release(struct device *dev)
put_device(parent);
}
-static void unregister_dax_mapping(void *data)
+static void __unregister_dax_mapping(void *data)
{
struct device *dev = data;
struct dax_mapping *mapping = to_dax_mapping(dev);
struct dev_dax *dev_dax = to_dev_dax(dev->parent);
- struct dax_region *dax_region = dev_dax->region;
dev_dbg(dev, "%s\n", __func__);
- device_lock_assert(dax_region->dev);
+ WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
dev_dax->ranges[mapping->range_id].mapping = NULL;
mapping->range_id = -1;
@@ -675,28 +721,37 @@ static void unregister_dax_mapping(void *data)
device_unregister(dev);
}
+static void unregister_dax_mapping(void *data)
+{
+ if (rwsem_is_locked(&dax_region_rwsem))
+ return __unregister_dax_mapping(data);
+
+ if (WARN_ON_ONCE(down_write_killable(&dax_region_rwsem) != 0))
+ return;
+ __unregister_dax_mapping(data);
+ up_write(&dax_region_rwsem);
+}
+
static struct dev_dax_range *get_dax_range(struct device *dev)
{
struct dax_mapping *mapping = to_dax_mapping(dev);
struct dev_dax *dev_dax = to_dev_dax(dev->parent);
- struct dax_region *dax_region = dev_dax->region;
+ int rc;
- device_lock(dax_region->dev);
+ rc = down_write_killable(&dax_region_rwsem);
+ if (rc)
+ return NULL;
if (mapping->range_id < 0) {
- device_unlock(dax_region->dev);
+ up_write(&dax_region_rwsem);
return NULL;
}
return &dev_dax->ranges[mapping->range_id];
}
-static void put_dax_range(struct dev_dax_range *dax_range)
+static void put_dax_range(void)
{
- struct dax_mapping *mapping = dax_range->mapping;
- struct dev_dax *dev_dax = to_dev_dax(mapping->dev.parent);
- struct dax_region *dax_region = dev_dax->region;
-
- device_unlock(dax_region->dev);
+ up_write(&dax_region_rwsem);
}
static ssize_t start_show(struct device *dev,
@@ -708,8 +763,8 @@ static ssize_t start_show(struct device *dev,
dax_range = get_dax_range(dev);
if (!dax_range)
return -ENXIO;
- rc = sprintf(buf, "%#llx\n", dax_range->range.start);
- put_dax_range(dax_range);
+ rc = sysfs_emit(buf, "%#llx\n", dax_range->range.start);
+ put_dax_range();
return rc;
}
@@ -724,8 +779,8 @@ static ssize_t end_show(struct device *dev,
dax_range = get_dax_range(dev);
if (!dax_range)
return -ENXIO;
- rc = sprintf(buf, "%#llx\n", dax_range->range.end);
- put_dax_range(dax_range);
+ rc = sysfs_emit(buf, "%#llx\n", dax_range->range.end);
+ put_dax_range();
return rc;
}
@@ -740,8 +795,8 @@ static ssize_t pgoff_show(struct device *dev,
dax_range = get_dax_range(dev);
if (!dax_range)
return -ENXIO;
- rc = sprintf(buf, "%#lx\n", dax_range->pgoff);
- put_dax_range(dax_range);
+ rc = sysfs_emit(buf, "%#lx\n", dax_range->pgoff);
+ put_dax_range();
return rc;
}
@@ -775,7 +830,7 @@ static int devm_register_dax_mapping(struct dev_dax *dev_dax, int range_id)
struct device *dev;
int rc;
- device_lock_assert(dax_region->dev);
+ WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
if (dev_WARN_ONCE(&dev_dax->dev, !dax_region->dev->driver,
"region disabled\n"))
@@ -821,7 +876,7 @@ static int alloc_dev_dax_range(struct dev_dax *dev_dax, u64 start,
struct resource *alloc;
int i, rc;
- device_lock_assert(dax_region->dev);
+ WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
/* handle the seed alloc special case */
if (!size) {
@@ -875,13 +930,12 @@ static int adjust_dev_dax_range(struct dev_dax *dev_dax, struct resource *res, r
{
int last_range = dev_dax->nr_range - 1;
struct dev_dax_range *dax_range = &dev_dax->ranges[last_range];
- struct dax_region *dax_region = dev_dax->region;
bool is_shrink = resource_size(res) > size;
struct range *range = &dax_range->range;
struct device *dev = &dev_dax->dev;
int rc;
- device_lock_assert(dax_region->dev);
+ WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
if (dev_WARN_ONCE(dev, !size, "deletion is handled by dev_dax_shrink\n"))
return -EINVAL;
@@ -907,12 +961,15 @@ static ssize_t size_show(struct device *dev,
{
struct dev_dax *dev_dax = to_dev_dax(dev);
unsigned long long size;
+ int rc;
- device_lock(dev);
+ rc = down_write_killable(&dax_dev_rwsem);
+ if (rc)
+ return rc;
size = dev_dax_size(dev_dax);
- device_unlock(dev);
+ up_write(&dax_dev_rwsem);
- return sprintf(buf, "%llu\n", size);
+ return sysfs_emit(buf, "%llu\n", size);
}
static bool alloc_is_aligned(struct dev_dax *dev_dax, resource_size_t size)
@@ -1080,17 +1137,27 @@ static ssize_t size_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
}
- device_lock(dax_region->dev);
+ rc = down_write_killable(&dax_region_rwsem);
+ if (rc)
+ return rc;
if (!dax_region->dev->driver) {
- device_unlock(dax_region->dev);
- return -ENXIO;
+ rc = -ENXIO;
+ goto err_region;
}
- device_lock(dev);
+ rc = down_write_killable(&dax_dev_rwsem);
+ if (rc)
+ goto err_dev;
+
rc = dev_dax_resize(dax_region, dev_dax, val);
- device_unlock(dev);
- device_unlock(dax_region->dev);
- return rc == 0 ? len : rc;
+err_dev:
+ up_write(&dax_dev_rwsem);
+err_region:
+ up_write(&dax_region_rwsem);
+
+ if (rc == 0)
+ return len;
+ return rc;
}
static DEVICE_ATTR_RW(size);
@@ -1138,18 +1205,24 @@ static ssize_t mapping_store(struct device *dev, struct device_attribute *attr,
return rc;
rc = -ENXIO;
- device_lock(dax_region->dev);
+ rc = down_write_killable(&dax_region_rwsem);
+ if (rc)
+ return rc;
if (!dax_region->dev->driver) {
- device_unlock(dax_region->dev);
+ up_write(&dax_region_rwsem);
+ return rc;
+ }
+ rc = down_write_killable(&dax_dev_rwsem);
+ if (rc) {
+ up_write(&dax_region_rwsem);
return rc;
}
- device_lock(dev);
to_alloc = range_len(&r);
if (alloc_is_aligned(dev_dax, to_alloc))
rc = alloc_dev_dax_range(dev_dax, r.start, to_alloc);
- device_unlock(dev);
- device_unlock(dax_region->dev);
+ up_write(&dax_dev_rwsem);
+ up_write(&dax_region_rwsem);
return rc == 0 ? len : rc;
}
@@ -1160,7 +1233,7 @@ static ssize_t align_show(struct device *dev,
{
struct dev_dax *dev_dax = to_dev_dax(dev);
- return sprintf(buf, "%d\n", dev_dax->align);
+ return sysfs_emit(buf, "%d\n", dev_dax->align);
}
static ssize_t dev_dax_validate_align(struct dev_dax *dev_dax)
@@ -1196,13 +1269,19 @@ static ssize_t align_store(struct device *dev, struct device_attribute *attr,
if (!dax_align_valid(val))
return -EINVAL;
- device_lock(dax_region->dev);
+ rc = down_write_killable(&dax_region_rwsem);
+ if (rc)
+ return rc;
if (!dax_region->dev->driver) {
- device_unlock(dax_region->dev);
+ up_write(&dax_region_rwsem);
return -ENXIO;
}
- device_lock(dev);
+ rc = down_write_killable(&dax_dev_rwsem);
+ if (rc) {
+ up_write(&dax_region_rwsem);
+ return rc;
+ }
if (dev->driver) {
rc = -EBUSY;
goto out_unlock;
@@ -1214,8 +1293,8 @@ static ssize_t align_store(struct device *dev, struct device_attribute *attr,
if (rc)
dev_dax->align = align_save;
out_unlock:
- device_unlock(dev);
- device_unlock(dax_region->dev);
+ up_write(&dax_dev_rwsem);
+ up_write(&dax_region_rwsem);
return rc == 0 ? len : rc;
}
static DEVICE_ATTR_RW(align);
@@ -1232,7 +1311,7 @@ static ssize_t target_node_show(struct device *dev,
{
struct dev_dax *dev_dax = to_dev_dax(dev);
- return sprintf(buf, "%d\n", dev_dax_target_node(dev_dax));
+ return sysfs_emit(buf, "%d\n", dev_dax_target_node(dev_dax));
}
static DEVICE_ATTR_RO(target_node);
@@ -1248,7 +1327,7 @@ static ssize_t resource_show(struct device *dev,
else
start = dev_dax->ranges[0].range.start;
- return sprintf(buf, "%#llx\n", start);
+ return sysfs_emit(buf, "%#llx\n", start);
}
static DEVICE_ATTR(resource, 0400, resource_show, NULL);
@@ -1259,17 +1338,59 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
* We only ever expect to handle device-dax instances, i.e. the
* @type argument to MODULE_ALIAS_DAX_DEVICE() is always zero
*/
- return sprintf(buf, DAX_DEVICE_MODALIAS_FMT "\n", 0);
+ return sysfs_emit(buf, DAX_DEVICE_MODALIAS_FMT "\n", 0);
}
static DEVICE_ATTR_RO(modalias);
static ssize_t numa_node_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", dev_to_node(dev));
+ return sysfs_emit(buf, "%d\n", dev_to_node(dev));
}
static DEVICE_ATTR_RO(numa_node);
+static ssize_t memmap_on_memory_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_dax *dev_dax = to_dev_dax(dev);
+
+ return sysfs_emit(buf, "%d\n", dev_dax->memmap_on_memory);
+}
+
+static ssize_t memmap_on_memory_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct dev_dax *dev_dax = to_dev_dax(dev);
+ bool val;
+ int rc;
+
+ rc = kstrtobool(buf, &val);
+ if (rc)
+ return rc;
+
+ if (val == true && !mhp_supports_memmap_on_memory()) {
+ dev_dbg(dev, "memmap_on_memory is not available\n");
+ return -EOPNOTSUPP;
+ }
+
+ rc = down_write_killable(&dax_dev_rwsem);
+ if (rc)
+ return rc;
+
+ if (dev_dax->memmap_on_memory != val && dev->driver &&
+ to_dax_drv(dev->driver)->type == DAXDRV_KMEM_TYPE) {
+ up_write(&dax_dev_rwsem);
+ return -EBUSY;
+ }
+
+ dev_dax->memmap_on_memory = val;
+ up_write(&dax_dev_rwsem);
+
+ return len;
+}
+static DEVICE_ATTR_RW(memmap_on_memory);
+
static umode_t dev_dax_visible(struct kobject *kobj, struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -1296,6 +1417,7 @@ static struct attribute *dev_dax_attributes[] = {
&dev_attr_align.attr,
&dev_attr_resource.attr,
&dev_attr_numa_node.attr,
+ &dev_attr_memmap_on_memory.attr,
NULL,
};
@@ -1325,7 +1447,7 @@ static const struct device_type dev_dax_type = {
.groups = dax_attribute_groups,
};
-struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
+static struct dev_dax *__devm_create_dev_dax(struct dev_dax_data *data)
{
struct dax_region *dax_region = data->dax_region;
struct device *parent = dax_region->dev;
@@ -1440,6 +1562,21 @@ err_id:
return ERR_PTR(rc);
}
+
+struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
+{
+ struct dev_dax *dev_dax;
+ int rc;
+
+ rc = down_write_killable(&dax_region_rwsem);
+ if (rc)
+ return ERR_PTR(rc);
+
+ dev_dax = __devm_create_dev_dax(data);
+ up_write(&dax_region_rwsem);
+
+ return dev_dax;
+}
EXPORT_SYMBOL_GPL(devm_create_dev_dax);
int __dax_driver_register(struct dax_device_driver *dax_drv,
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index a0244f6bb44bd..aca71d7fccc1f 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -13,6 +13,7 @@
#include <linux/uio.h>
#include <linux/dax.h>
#include <linux/fs.h>
+#include <linux/cacheinfo.h>
#include "dax-private.h"
/**
@@ -319,6 +320,11 @@ EXPORT_SYMBOL_GPL(dax_alive);
* that any fault handlers or operations that might have seen
* dax_alive(), have completed. Any operations that start after
* synchronize_srcu() has run will abort upon seeing !dax_alive().
+ *
+ * Note, because alloc_dax() returns an ERR_PTR() on error, callers
+ * typically store its result into a local variable in order to check
+ * the result. Therefore, care must be taken to populate the struct
+ * device dax_dev field make sure the dax_dev is not leaked.
*/
void kill_dax(struct dax_device *dax_dev)
{
@@ -446,6 +452,14 @@ struct dax_device *alloc_dax(void *private, const struct dax_operations *ops)
dev_t devt;
int minor;
+ /*
+ * Unavailable on architectures with virtually aliased data caches,
+ * except for device-dax (NULL operations pointer), which does
+ * not use aliased mappings from the kernel.
+ */
+ if (ops && cpu_dcache_is_aliasing())
+ return ERR_PTR(-EOPNOTSUPP);
+
if (WARN_ON_ONCE(ops && !ops->zero_page_range))
return ERR_PTR(-EINVAL);
diff --git a/drivers/dio/dio-driver.c b/drivers/dio/dio-driver.c
index 69c46935ffc78..2d9fa6011945d 100644
--- a/drivers/dio/dio-driver.c
+++ b/drivers/dio/dio-driver.c
@@ -123,7 +123,7 @@ static int dio_bus_match(struct device *dev, struct device_driver *drv)
}
-struct bus_type dio_bus_type = {
+const struct bus_type dio_bus_type = {
.name = "dio",
.match = dio_bus_match,
.probe = dio_device_probe,
diff --git a/drivers/dma-buf/st-dma-fence-chain.c b/drivers/dma-buf/st-dma-fence-chain.c
index 9c2a0c082a768..ed4b323886e43 100644
--- a/drivers/dma-buf/st-dma-fence-chain.c
+++ b/drivers/dma-buf/st-dma-fence-chain.c
@@ -84,11 +84,11 @@ static int sanitycheck(void *arg)
return -ENOMEM;
chain = mock_chain(NULL, f, 1);
- if (!chain)
+ if (chain)
+ dma_fence_enable_sw_signaling(chain);
+ else
err = -ENOMEM;
- dma_fence_enable_sw_signaling(chain);
-
dma_fence_signal(f);
dma_fence_put(f);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index e928f2ca0f1e9..002a5ec806207 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -643,16 +643,16 @@ config TEGRA20_APB_DMA
config TEGRA210_ADMA
tristate "NVIDIA Tegra210 ADMA support"
- depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST)
+ depends on (ARCH_TEGRA || COMPILE_TEST)
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
- Support for the NVIDIA Tegra210 ADMA controller driver. The
- DMA controller has multiple DMA channels and is used to service
- various audio clients in the Tegra210 audio processing engine
- (APE). This DMA controller transfers data from memory to
- peripheral and vice versa. It does not support memory to
- memory data transfer.
+ Support for the NVIDIA Tegra210/Tegra186/Tegra194/Tegra234 ADMA
+ controller driver. The DMA controller has multiple DMA channels
+ and is used to service various audio clients in the Tegra210
+ audio processing engine (APE). This DMA controller transfers
+ data from memory to peripheral and vice versa. It does not
+ support memory to memory data transfer.
config TIMB_DMA
tristate "Timberdale FPGA DMA support"
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index eea8bd33b4b73..fbf048f432bf8 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -2239,7 +2239,7 @@ static int pl08x_resume(struct dma_chan *chan)
bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
{
struct pl08x_dma_chan *plchan;
- char *name = chan_id;
+ const char *name = chan_id;
/* Reject channels for devices not bound to this driver */
if (chan->device->dev->driver != &pl08x_amba_driver.drv)
diff --git a/drivers/dma/bestcomm/sram.c b/drivers/dma/bestcomm/sram.c
index 0553956f74569..ad74d57cc3abe 100644
--- a/drivers/dma/bestcomm/sram.c
+++ b/drivers/dma/bestcomm/sram.c
@@ -90,13 +90,8 @@ int bcom_sram_init(struct device_node *sram_node, char *owner)
bcom_sram->rh = rh_create(4);
/* Attach the free zones */
-#if 0
- /* Currently disabled ... for future use only */
- reg_addr_p = of_get_property(sram_node, "available", &psize);
-#else
regaddr_p = NULL;
psize = 0;
-#endif
if (!regaddr_p || !psize) {
/* Attach the whole zone */
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index 793f1a7ad5e34..b18faa7cfedb9 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -97,8 +97,8 @@ static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
* ch_mux: With the exception of 0, attempts to write a value
* already in use will be forced to 0.
*/
- if (!edma_readl_chreg(fsl_chan, ch_mux))
- edma_writel_chreg(fsl_chan, fsl_chan->srcid, ch_mux);
+ if (!edma_readl(fsl_chan->edma, fsl_chan->mux_addr))
+ edma_writel(fsl_chan->edma, fsl_chan->srcid, fsl_chan->mux_addr);
}
val = edma_readl_chreg(fsl_chan, ch_csr);
@@ -134,7 +134,7 @@ static void fsl_edma3_disable_request(struct fsl_edma_chan *fsl_chan)
flags = fsl_edma_drvflags(fsl_chan);
if (flags & FSL_EDMA_DRV_HAS_CHMUX)
- edma_writel_chreg(fsl_chan, 0, ch_mux);
+ edma_writel(fsl_chan->edma, 0, fsl_chan->mux_addr);
val &= ~EDMA_V3_CH_CSR_ERQ;
edma_writel_chreg(fsl_chan, val, ch_csr);
@@ -351,39 +351,45 @@ static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
{
struct fsl_edma_desc *edesc = fsl_chan->edesc;
enum dma_transfer_direction dir = edesc->dirn;
- dma_addr_t cur_addr, dma_addr;
+ dma_addr_t cur_addr, dma_addr, old_addr;
size_t len, size;
u32 nbytes = 0;
int i;
/* calculate the total size in this desc */
for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) {
- nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes);
+ nbytes = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, nbytes);
if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE))
nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes);
- len += nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter);
+ len += nbytes * fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, biter);
}
if (!in_progress)
return len;
- if (dir == DMA_MEM_TO_DEV)
- cur_addr = edma_read_tcdreg(fsl_chan, saddr);
- else
- cur_addr = edma_read_tcdreg(fsl_chan, daddr);
+ /* 64bit read is not atomic, need read retry when high 32bit changed */
+ do {
+ if (dir == DMA_MEM_TO_DEV) {
+ old_addr = edma_read_tcdreg(fsl_chan, saddr);
+ cur_addr = edma_read_tcdreg(fsl_chan, saddr);
+ } else {
+ old_addr = edma_read_tcdreg(fsl_chan, daddr);
+ cur_addr = edma_read_tcdreg(fsl_chan, daddr);
+ }
+ } while (upper_32_bits(cur_addr) != upper_32_bits(old_addr));
/* figure out the finished and calculate the residue */
for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
- nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes);
+ nbytes = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, nbytes);
if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE))
nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes);
- size = nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter);
+ size = nbytes * fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, biter);
if (dir == DMA_MEM_TO_DEV)
- dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
+ dma_addr = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, saddr);
else
- dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
+ dma_addr = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, daddr);
len -= size;
if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
@@ -426,8 +432,7 @@ enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
return fsl_chan->status;
}
-static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
- struct fsl_edma_hw_tcd *tcd)
+static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, void *tcd)
{
u16 csr = 0;
@@ -439,26 +444,26 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
*/
edma_write_tcdreg(fsl_chan, 0, csr);
- edma_write_tcdreg(fsl_chan, tcd->saddr, saddr);
- edma_write_tcdreg(fsl_chan, tcd->daddr, daddr);
+ edma_cp_tcd_to_reg(fsl_chan, tcd, saddr);
+ edma_cp_tcd_to_reg(fsl_chan, tcd, daddr);
- edma_write_tcdreg(fsl_chan, tcd->attr, attr);
- edma_write_tcdreg(fsl_chan, tcd->soff, soff);
+ edma_cp_tcd_to_reg(fsl_chan, tcd, attr);
+ edma_cp_tcd_to_reg(fsl_chan, tcd, soff);
- edma_write_tcdreg(fsl_chan, tcd->nbytes, nbytes);
- edma_write_tcdreg(fsl_chan, tcd->slast, slast);
+ edma_cp_tcd_to_reg(fsl_chan, tcd, nbytes);
+ edma_cp_tcd_to_reg(fsl_chan, tcd, slast);
- edma_write_tcdreg(fsl_chan, tcd->citer, citer);
- edma_write_tcdreg(fsl_chan, tcd->biter, biter);
- edma_write_tcdreg(fsl_chan, tcd->doff, doff);
+ edma_cp_tcd_to_reg(fsl_chan, tcd, citer);
+ edma_cp_tcd_to_reg(fsl_chan, tcd, biter);
+ edma_cp_tcd_to_reg(fsl_chan, tcd, doff);
- edma_write_tcdreg(fsl_chan, tcd->dlast_sga, dlast_sga);
+ edma_cp_tcd_to_reg(fsl_chan, tcd, dlast_sga);
- csr = le16_to_cpu(tcd->csr);
+ csr = fsl_edma_get_tcd_to_cpu(fsl_chan, tcd, csr);
if (fsl_chan->is_sw) {
csr |= EDMA_TCD_CSR_START;
- tcd->csr = cpu_to_le16(csr);
+ fsl_edma_set_tcd_to_le(fsl_chan, tcd, csr, csr);
}
/*
@@ -473,14 +478,14 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
edma_writel_chreg(fsl_chan, edma_readl_chreg(fsl_chan, ch_csr), ch_csr);
- edma_write_tcdreg(fsl_chan, tcd->csr, csr);
+ edma_cp_tcd_to_reg(fsl_chan, tcd, csr);
}
static inline
void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
- struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
- u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
- u16 biter, u16 doff, u32 dlast_sga, bool major_int,
+ struct fsl_edma_hw_tcd *tcd, dma_addr_t src, dma_addr_t dst,
+ u16 attr, u16 soff, u32 nbytes, dma_addr_t slast, u16 citer,
+ u16 biter, u16 doff, dma_addr_t dlast_sga, bool major_int,
bool disable_req, bool enable_sg)
{
struct dma_slave_config *cfg = &fsl_chan->cfg;
@@ -493,12 +498,12 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
* So we put the value in little endian in memory, waiting
* for fsl_edma_set_tcd_regs doing the swap.
*/
- tcd->saddr = cpu_to_le32(src);
- tcd->daddr = cpu_to_le32(dst);
+ fsl_edma_set_tcd_to_le(fsl_chan, tcd, src, saddr);
+ fsl_edma_set_tcd_to_le(fsl_chan, tcd, dst, daddr);
- tcd->attr = cpu_to_le16(attr);
+ fsl_edma_set_tcd_to_le(fsl_chan, tcd, attr, attr);
- tcd->soff = cpu_to_le16(soff);
+ fsl_edma_set_tcd_to_le(fsl_chan, tcd, soff, soff);
if (fsl_chan->is_multi_fifo) {
/* set mloff to support multiple fifo */
@@ -515,15 +520,16 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
}
}
- tcd->nbytes = cpu_to_le32(nbytes);
- tcd->slast = cpu_to_le32(slast);
+ fsl_edma_set_tcd_to_le(fsl_chan, tcd, nbytes, nbytes);
+ fsl_edma_set_tcd_to_le(fsl_chan, tcd, slast, slast);
+
+ fsl_edma_set_tcd_to_le(fsl_chan, tcd, EDMA_TCD_CITER_CITER(citer), citer);
+ fsl_edma_set_tcd_to_le(fsl_chan, tcd, doff, doff);
- tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
- tcd->doff = cpu_to_le16(doff);
+ fsl_edma_set_tcd_to_le(fsl_chan, tcd, dlast_sga, dlast_sga);
- tcd->dlast_sga = cpu_to_le32(dlast_sga);
+ fsl_edma_set_tcd_to_le(fsl_chan, tcd, EDMA_TCD_BITER_BITER(biter), biter);
- tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
if (major_int)
csr |= EDMA_TCD_CSR_INT_MAJOR;
@@ -539,7 +545,7 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
if (fsl_chan->is_sw)
csr |= EDMA_TCD_CSR_START;
- tcd->csr = cpu_to_le16(csr);
+ fsl_edma_set_tcd_to_le(fsl_chan, tcd, csr, csr);
}
static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
@@ -580,8 +586,9 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
dma_addr_t dma_buf_next;
bool major_int = true;
int sg_len, i;
- u32 src_addr, dst_addr, last_sg, nbytes;
+ dma_addr_t src_addr, dst_addr, last_sg;
u16 soff, doff, iter;
+ u32 nbytes;
if (!is_slave_direction(direction))
return NULL;
@@ -653,8 +660,9 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
struct fsl_edma_desc *fsl_desc;
struct scatterlist *sg;
- u32 src_addr, dst_addr, last_sg, nbytes;
+ dma_addr_t src_addr, dst_addr, last_sg;
u16 soff, doff, iter;
+ u32 nbytes;
int i;
if (!is_slave_direction(direction))
@@ -803,7 +811,8 @@ int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
- sizeof(struct fsl_edma_hw_tcd),
+ fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_TCD64 ?
+ sizeof(struct fsl_edma_hw_tcd64) : sizeof(struct fsl_edma_hw_tcd),
32, 0);
return 0;
}
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index f5e216b157c75..7bf0aba471a8c 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -88,6 +88,20 @@ struct fsl_edma_hw_tcd {
__le16 biter;
};
+struct fsl_edma_hw_tcd64 {
+ __le64 saddr;
+ __le16 soff;
+ __le16 attr;
+ __le32 nbytes;
+ __le64 slast;
+ __le64 daddr;
+ __le64 dlast_sga;
+ __le16 doff;
+ __le16 citer;
+ __le16 csr;
+ __le16 biter;
+} __packed;
+
struct fsl_edma3_ch_reg {
__le32 ch_csr;
__le32 ch_es;
@@ -97,7 +111,10 @@ struct fsl_edma3_ch_reg {
__le32 ch_mux;
__le32 ch_mattr; /* edma4, reserved for edma3 */
__le32 ch_reserved;
- struct fsl_edma_hw_tcd tcd;
+ union {
+ struct fsl_edma_hw_tcd tcd;
+ struct fsl_edma_hw_tcd64 tcd64;
+ };
} __packed;
/*
@@ -126,7 +143,7 @@ struct edma_regs {
struct fsl_edma_sw_tcd {
dma_addr_t ptcd;
- struct fsl_edma_hw_tcd *vtcd;
+ void *vtcd;
};
struct fsl_edma_chan {
@@ -145,7 +162,8 @@ struct fsl_edma_chan {
u32 dma_dev_size;
enum dma_data_direction dma_dir;
char chan_name[32];
- struct fsl_edma_hw_tcd __iomem *tcd;
+ void __iomem *tcd;
+ void __iomem *mux_addr;
u32 real_count;
struct work_struct issue_worker;
struct platform_device *pdev;
@@ -188,6 +206,7 @@ struct fsl_edma_desc {
#define FSL_EDMA_DRV_CLEAR_DONE_E_SG BIT(13)
/* Need clean CHn_CSR DONE before enable TCD's MAJORELINK */
#define FSL_EDMA_DRV_CLEAR_DONE_E_LINK BIT(14)
+#define FSL_EDMA_DRV_TCD64 BIT(15)
#define FSL_EDMA_DRV_EDMA3 (FSL_EDMA_DRV_SPLIT_REG | \
FSL_EDMA_DRV_BUS_8BYTE | \
@@ -207,6 +226,8 @@ struct fsl_edma_drvdata {
u32 chreg_off;
u32 chreg_space_sz;
u32 flags;
+ u32 mux_off; /* channel mux register offset */
+ u32 mux_skip; /* how much skip for each channel */
int (*setup_irq)(struct platform_device *pdev,
struct fsl_edma_engine *fsl_edma);
};
@@ -229,23 +250,108 @@ struct fsl_edma_engine {
struct fsl_edma_chan chans[] __counted_by(n_chans);
};
-#define edma_read_tcdreg(chan, __name) \
-(sizeof(chan->tcd->__name) == sizeof(u32) ? \
- edma_readl(chan->edma, &chan->tcd->__name) : \
- edma_readw(chan->edma, &chan->tcd->__name))
-
-#define edma_write_tcdreg(chan, val, __name) \
-(sizeof(chan->tcd->__name) == sizeof(u32) ? \
- edma_writel(chan->edma, (u32 __force)val, &chan->tcd->__name) : \
- edma_writew(chan->edma, (u16 __force)val, &chan->tcd->__name))
+#define edma_read_tcdreg_c(chan, _tcd, __name) \
+(sizeof((_tcd)->__name) == sizeof(u64) ? \
+ edma_readq(chan->edma, &(_tcd)->__name) : \
+ ((sizeof((_tcd)->__name) == sizeof(u32)) ? \
+ edma_readl(chan->edma, &(_tcd)->__name) : \
+ edma_readw(chan->edma, &(_tcd)->__name) \
+ ))
+
+#define edma_read_tcdreg(chan, __name) \
+((fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) ? \
+ edma_read_tcdreg_c(chan, ((struct fsl_edma_hw_tcd64 __iomem *)chan->tcd), __name) : \
+ edma_read_tcdreg_c(chan, ((struct fsl_edma_hw_tcd __iomem *)chan->tcd), __name) \
+)
+
+#define edma_write_tcdreg_c(chan, _tcd, _val, __name) \
+do { \
+ switch (sizeof(_tcd->__name)) { \
+ case sizeof(u64): \
+ edma_writeq(chan->edma, (u64 __force)_val, &_tcd->__name); \
+ break; \
+ case sizeof(u32): \
+ edma_writel(chan->edma, (u32 __force)_val, &_tcd->__name); \
+ break; \
+ case sizeof(u16): \
+ edma_writew(chan->edma, (u16 __force)_val, &_tcd->__name); \
+ break; \
+ case sizeof(u8): \
+ edma_writeb(chan->edma, (u8 __force)_val, &_tcd->__name); \
+ break; \
+ } \
+} while (0)
+
+#define edma_write_tcdreg(chan, val, __name) \
+do { \
+ struct fsl_edma_hw_tcd64 __iomem *tcd64_r = (struct fsl_edma_hw_tcd64 __iomem *)chan->tcd; \
+ struct fsl_edma_hw_tcd __iomem *tcd_r = (struct fsl_edma_hw_tcd __iomem *)chan->tcd; \
+ \
+ if (fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) \
+ edma_write_tcdreg_c(chan, tcd64_r, val, __name); \
+ else \
+ edma_write_tcdreg_c(chan, tcd_r, val, __name); \
+} while (0)
+
+#define edma_cp_tcd_to_reg(chan, __tcd, __name) \
+do { \
+ struct fsl_edma_hw_tcd64 __iomem *tcd64_r = (struct fsl_edma_hw_tcd64 __iomem *)chan->tcd; \
+ struct fsl_edma_hw_tcd __iomem *tcd_r = (struct fsl_edma_hw_tcd __iomem *)chan->tcd; \
+ struct fsl_edma_hw_tcd64 *tcd64_m = (struct fsl_edma_hw_tcd64 *)__tcd; \
+ struct fsl_edma_hw_tcd *tcd_m = (struct fsl_edma_hw_tcd *)__tcd; \
+ \
+ if (fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) \
+ edma_write_tcdreg_c(chan, tcd64_r, tcd64_m->__name, __name); \
+ else \
+ edma_write_tcdreg_c(chan, tcd_r, tcd_m->__name, __name); \
+} while (0)
#define edma_readl_chreg(chan, __name) \
edma_readl(chan->edma, \
- (void __iomem *)&(container_of(chan->tcd, struct fsl_edma3_ch_reg, tcd)->__name))
+ (void __iomem *)&(container_of(((__force void *)chan->tcd),\
+ struct fsl_edma3_ch_reg, tcd)->__name))
#define edma_writel_chreg(chan, val, __name) \
edma_writel(chan->edma, val, \
- (void __iomem *)&(container_of(chan->tcd, struct fsl_edma3_ch_reg, tcd)->__name))
+ (void __iomem *)&(container_of(((__force void *)chan->tcd),\
+ struct fsl_edma3_ch_reg, tcd)->__name))
+
+#define fsl_edma_get_tcd(_chan, _tcd, _field) \
+(fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64 ? (((struct fsl_edma_hw_tcd64 *)_tcd)->_field) : \
+ (((struct fsl_edma_hw_tcd *)_tcd)->_field))
+
+#define fsl_edma_le_to_cpu(x) \
+(sizeof(x) == sizeof(u64) ? le64_to_cpu((__force __le64)(x)) : \
+ (sizeof(x) == sizeof(u32) ? le32_to_cpu((__force __le32)(x)) : \
+ le16_to_cpu((__force __le16)(x))))
+
+#define fsl_edma_get_tcd_to_cpu(_chan, _tcd, _field) \
+(fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64 ? \
+ fsl_edma_le_to_cpu(((struct fsl_edma_hw_tcd64 *)_tcd)->_field) : \
+ fsl_edma_le_to_cpu(((struct fsl_edma_hw_tcd *)_tcd)->_field))
+
+#define fsl_edma_set_tcd_to_le_c(_tcd, _val, _field) \
+do { \
+ switch (sizeof((_tcd)->_field)) { \
+ case sizeof(u64): \
+ *(__force __le64 *)(&((_tcd)->_field)) = cpu_to_le64(_val); \
+ break; \
+ case sizeof(u32): \
+ *(__force __le32 *)(&((_tcd)->_field)) = cpu_to_le32(_val); \
+ break; \
+ case sizeof(u16): \
+ *(__force __le16 *)(&((_tcd)->_field)) = cpu_to_le16(_val); \
+ break; \
+ } \
+} while (0)
+
+#define fsl_edma_set_tcd_to_le(_chan, _tcd, _val, _field) \
+do { \
+ if (fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64) \
+ fsl_edma_set_tcd_to_le_c((struct fsl_edma_hw_tcd64 *)_tcd, _val, _field); \
+ else \
+ fsl_edma_set_tcd_to_le_c((struct fsl_edma_hw_tcd *)_tcd, _val, _field); \
+} while (0)
/*
* R/W functions for big- or little-endian registers:
@@ -253,6 +359,21 @@ struct fsl_edma_engine {
* For the big-endian IP module, the offset for 8-bit or 16-bit registers
* should also be swapped opposite to that in little-endian IP.
*/
+static inline u64 edma_readq(struct fsl_edma_engine *edma, void __iomem *addr)
+{
+ u64 l, h;
+
+ if (edma->big_endian) {
+ l = ioread32be(addr);
+ h = ioread32be(addr + 4);
+ } else {
+ l = ioread32(addr);
+ h = ioread32(addr + 4);
+ }
+
+ return (h << 32) | l;
+}
+
static inline u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
{
if (edma->big_endian)
@@ -298,6 +419,18 @@ static inline void edma_writel(struct fsl_edma_engine *edma,
iowrite32(val, addr);
}
+static inline void edma_writeq(struct fsl_edma_engine *edma,
+ u64 val, void __iomem *addr)
+{
+ if (edma->big_endian) {
+ iowrite32be(val & 0xFFFFFFFF, addr);
+ iowrite32be(val >> 32, addr + 4);
+ } else {
+ iowrite32(val & 0xFFFFFFFF, addr);
+ iowrite32(val >> 32, addr + 4);
+ }
+}
+
static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
{
return container_of(chan, struct fsl_edma_chan, vchan.chan);
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index d36e28b9c767a..402f0058a180c 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -360,6 +360,18 @@ static struct fsl_edma_drvdata imx93_data4 = {
.flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4,
.chreg_space_sz = 0x8000,
.chreg_off = 0x10000,
+ .mux_off = 0x10000 + offsetof(struct fsl_edma3_ch_reg, ch_mux),
+ .mux_skip = 0x8000,
+ .setup_irq = fsl_edma3_irq_init,
+};
+
+static struct fsl_edma_drvdata imx95_data5 = {
+ .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4 |
+ FSL_EDMA_DRV_TCD64,
+ .chreg_space_sz = 0x8000,
+ .chreg_off = 0x10000,
+ .mux_off = 0x200,
+ .mux_skip = sizeof(u32),
.setup_irq = fsl_edma3_irq_init,
};
@@ -371,6 +383,7 @@ static const struct of_device_id fsl_edma_dt_ids[] = {
{ .compatible = "fsl,imx8qm-adma", .data = &imx8qm_audio_data},
{ .compatible = "fsl,imx93-edma3", .data = &imx93_data3},
{ .compatible = "fsl,imx93-edma4", .data = &imx93_data4},
+ { .compatible = "fsl,imx95-edma5", .data = &imx95_data5},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
@@ -511,6 +524,9 @@ static int fsl_edma_probe(struct platform_device *pdev)
return ret;
}
+ if (drvdata->flags & FSL_EDMA_DRV_TCD64)
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+
INIT_LIST_HEAD(&fsl_edma->dma_dev.channels);
for (i = 0; i < fsl_edma->n_chans; i++) {
struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
@@ -533,11 +549,12 @@ static int fsl_edma_probe(struct platform_device *pdev)
offsetof(struct fsl_edma3_ch_reg, tcd) : 0;
fsl_chan->tcd = fsl_edma->membase
+ i * drvdata->chreg_space_sz + drvdata->chreg_off + len;
+ fsl_chan->mux_addr = fsl_edma->membase + drvdata->mux_off + i * drvdata->mux_skip;
fsl_chan->pdev = pdev;
vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
- edma_write_tcdreg(fsl_chan, 0, csr);
+ edma_write_tcdreg(fsl_chan, cpu_to_le32(0), csr);
fsl_edma_chan_mux(fsl_chan, 0, false);
}
diff --git a/drivers/dma/idxd/bus.c b/drivers/dma/idxd/bus.c
index 0c9e689a2e77d..b83b27e04f2a4 100644
--- a/drivers/dma/idxd/bus.c
+++ b/drivers/dma/idxd/bus.c
@@ -72,7 +72,7 @@ static int idxd_bus_uevent(const struct device *dev, struct kobj_uevent_env *env
return add_uevent_var(env, "MODALIAS=" IDXD_DEVICES_MODALIAS_FMT, 0);
}
-struct bus_type dsa_bus_type = {
+const struct bus_type dsa_bus_type = {
.name = "dsa",
.match = idxd_config_bus_match,
.probe = idxd_config_bus_probe,
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index e5a94a93a3cc4..8078ab9acfbc3 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -152,7 +152,7 @@ static void idxd_file_dev_release(struct device *dev)
mutex_unlock(&wq->wq_lock);
}
-static struct device_type idxd_cdev_file_type = {
+static const struct device_type idxd_cdev_file_type = {
.name = "idxd_file",
.release = idxd_file_dev_release,
.groups = cdev_file_attribute_groups,
@@ -169,7 +169,7 @@ static void idxd_cdev_dev_release(struct device *dev)
kfree(idxd_cdev);
}
-static struct device_type idxd_cdev_device_type = {
+static const struct device_type idxd_cdev_device_type = {
.name = "idxd_cdev",
.release = idxd_cdev_dev_release,
};
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index d0f5db6cf1eda..a4099a1e2340f 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -282,7 +282,7 @@ typedef int (*load_device_defaults_fn_t) (struct idxd_device *idxd);
struct idxd_driver_data {
const char *name_prefix;
enum idxd_type type;
- struct device_type *dev_type;
+ const struct device_type *dev_type;
int compl_size;
int align;
int evl_cr_off;
@@ -515,15 +515,15 @@ static inline void idxd_set_user_intr(struct idxd_device *idxd, bool enable)
iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
}
-extern struct bus_type dsa_bus_type;
+extern const struct bus_type dsa_bus_type;
extern bool support_enqcmd;
extern struct ida idxd_ida;
-extern struct device_type dsa_device_type;
-extern struct device_type iax_device_type;
-extern struct device_type idxd_wq_device_type;
-extern struct device_type idxd_engine_device_type;
-extern struct device_type idxd_group_device_type;
+extern const struct device_type dsa_device_type;
+extern const struct device_type iax_device_type;
+extern const struct device_type idxd_wq_device_type;
+extern const struct device_type idxd_engine_device_type;
+extern const struct device_type idxd_group_device_type;
static inline bool is_dsa_dev(struct idxd_dev *idxd_dev)
{
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 523ae0dff7d4a..7f28f01be672b 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -91,7 +91,7 @@ static void idxd_conf_engine_release(struct device *dev)
kfree(engine);
}
-struct device_type idxd_engine_device_type = {
+const struct device_type idxd_engine_device_type = {
.name = "engine",
.release = idxd_conf_engine_release,
.groups = idxd_engine_attribute_groups,
@@ -577,7 +577,7 @@ static void idxd_conf_group_release(struct device *dev)
kfree(group);
}
-struct device_type idxd_group_device_type = {
+const struct device_type idxd_group_device_type = {
.name = "group",
.release = idxd_conf_group_release,
.groups = idxd_group_attribute_groups,
@@ -1369,7 +1369,7 @@ static void idxd_conf_wq_release(struct device *dev)
kfree(wq);
}
-struct device_type idxd_wq_device_type = {
+const struct device_type idxd_wq_device_type = {
.name = "wq",
.release = idxd_conf_wq_release,
.groups = idxd_wq_attribute_groups,
@@ -1798,13 +1798,13 @@ static void idxd_conf_device_release(struct device *dev)
kfree(idxd);
}
-struct device_type dsa_device_type = {
+const struct device_type dsa_device_type = {
.name = "dsa",
.release = idxd_conf_device_release,
.groups = idxd_attribute_groups,
};
-struct device_type iax_device_type = {
+const struct device_type iax_device_type = {
.name = "iax",
.release = idxd_conf_device_release,
.groups = idxd_attribute_groups,
diff --git a/drivers/dma/mcf-edma-main.c b/drivers/dma/mcf-edma-main.c
index ab21455d9c3a4..dba6317838767 100644
--- a/drivers/dma/mcf-edma-main.c
+++ b/drivers/dma/mcf-edma-main.c
@@ -202,7 +202,7 @@ static int mcf_edma_probe(struct platform_device *pdev)
vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev);
mcf_chan->tcd = mcf_edma->membase + EDMA_TCD
+ i * sizeof(struct fsl_edma_hw_tcd);
- iowrite32(0x0, &mcf_chan->tcd->csr);
+ edma_write_tcdreg(mcf_chan, cpu_to_le32(0), csr);
}
iowrite32(~0, regs->inth);
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index 775a7f408b9a0..e588fff9f21d2 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -29,7 +29,7 @@ static DEFINE_MUTEX(of_dma_lock);
* to the DMA data stored is retuned. A NULL pointer is returned if no match is
* found.
*/
-static struct of_dma *of_dma_find_controller(struct of_phandle_args *dma_spec)
+static struct of_dma *of_dma_find_controller(const struct of_phandle_args *dma_spec)
{
struct of_dma *ofdma;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index c29744bfdf2c2..5f6d7f1e095f9 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2588,6 +2588,7 @@ static struct dma_pl330_desc *pluck_desc(struct list_head *pool,
desc->status = PREP;
desc->txd.callback = NULL;
+ desc->txd.callback_result = NULL;
}
spin_unlock_irqrestore(lock, flags);
diff --git a/drivers/dma/ti/k3-psil-j721s2.c b/drivers/dma/ti/k3-psil-j721s2.c
index 1d5430fc5724d..ba08bdcdcd2b6 100644
--- a/drivers/dma/ti/k3-psil-j721s2.c
+++ b/drivers/dma/ti/k3-psil-j721s2.c
@@ -57,6 +57,14 @@
}, \
}
+#define PSIL_CSI2RX(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ }, \
+ }
+
/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
static struct psil_ep j721s2_src_ep_map[] = {
/* PDMA_MCASP - McASP0-4 */
@@ -114,6 +122,71 @@ static struct psil_ep j721s2_src_ep_map[] = {
PSIL_PDMA_XY_PKT(0x4707),
PSIL_PDMA_XY_PKT(0x4708),
PSIL_PDMA_XY_PKT(0x4709),
+ /* CSI2RX */
+ PSIL_CSI2RX(0x4940),
+ PSIL_CSI2RX(0x4941),
+ PSIL_CSI2RX(0x4942),
+ PSIL_CSI2RX(0x4943),
+ PSIL_CSI2RX(0x4944),
+ PSIL_CSI2RX(0x4945),
+ PSIL_CSI2RX(0x4946),
+ PSIL_CSI2RX(0x4947),
+ PSIL_CSI2RX(0x4948),
+ PSIL_CSI2RX(0x4949),
+ PSIL_CSI2RX(0x494a),
+ PSIL_CSI2RX(0x494b),
+ PSIL_CSI2RX(0x494c),
+ PSIL_CSI2RX(0x494d),
+ PSIL_CSI2RX(0x494e),
+ PSIL_CSI2RX(0x494f),
+ PSIL_CSI2RX(0x4950),
+ PSIL_CSI2RX(0x4951),
+ PSIL_CSI2RX(0x4952),
+ PSIL_CSI2RX(0x4953),
+ PSIL_CSI2RX(0x4954),
+ PSIL_CSI2RX(0x4955),
+ PSIL_CSI2RX(0x4956),
+ PSIL_CSI2RX(0x4957),
+ PSIL_CSI2RX(0x4958),
+ PSIL_CSI2RX(0x4959),
+ PSIL_CSI2RX(0x495a),
+ PSIL_CSI2RX(0x495b),
+ PSIL_CSI2RX(0x495c),
+ PSIL_CSI2RX(0x495d),
+ PSIL_CSI2RX(0x495e),
+ PSIL_CSI2RX(0x495f),
+ PSIL_CSI2RX(0x4960),
+ PSIL_CSI2RX(0x4961),
+ PSIL_CSI2RX(0x4962),
+ PSIL_CSI2RX(0x4963),
+ PSIL_CSI2RX(0x4964),
+ PSIL_CSI2RX(0x4965),
+ PSIL_CSI2RX(0x4966),
+ PSIL_CSI2RX(0x4967),
+ PSIL_CSI2RX(0x4968),
+ PSIL_CSI2RX(0x4969),
+ PSIL_CSI2RX(0x496a),
+ PSIL_CSI2RX(0x496b),
+ PSIL_CSI2RX(0x496c),
+ PSIL_CSI2RX(0x496d),
+ PSIL_CSI2RX(0x496e),
+ PSIL_CSI2RX(0x496f),
+ PSIL_CSI2RX(0x4970),
+ PSIL_CSI2RX(0x4971),
+ PSIL_CSI2RX(0x4972),
+ PSIL_CSI2RX(0x4973),
+ PSIL_CSI2RX(0x4974),
+ PSIL_CSI2RX(0x4975),
+ PSIL_CSI2RX(0x4976),
+ PSIL_CSI2RX(0x4977),
+ PSIL_CSI2RX(0x4978),
+ PSIL_CSI2RX(0x4979),
+ PSIL_CSI2RX(0x497a),
+ PSIL_CSI2RX(0x497b),
+ PSIL_CSI2RX(0x497c),
+ PSIL_CSI2RX(0x497d),
+ PSIL_CSI2RX(0x497e),
+ PSIL_CSI2RX(0x497f),
/* MAIN SA2UL */
PSIL_SA2UL(0x4a40, 0),
PSIL_SA2UL(0x4a41, 0),
diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
index c278d5facf7d8..c9b93055dc9d3 100644
--- a/drivers/dma/ti/k3-udma-glue.c
+++ b/drivers/dma/ti/k3-udma-glue.c
@@ -111,6 +111,35 @@ static int of_k3_udma_glue_parse(struct device_node *udmax_np,
return 0;
}
+static int of_k3_udma_glue_parse_chn_common(struct k3_udma_glue_common *common, u32 thread_id,
+ bool tx_chn)
+{
+ if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET))
+ return -EINVAL;
+
+ if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET))
+ return -EINVAL;
+
+ /* get psil endpoint config */
+ common->ep_config = psil_get_ep_config(thread_id);
+ if (IS_ERR(common->ep_config)) {
+ dev_err(common->dev,
+ "No configuration for psi-l thread 0x%04x\n",
+ thread_id);
+ return PTR_ERR(common->ep_config);
+ }
+
+ common->epib = common->ep_config->needs_epib;
+ common->psdata_size = common->ep_config->psd_size;
+
+ if (tx_chn)
+ common->dst_thread = thread_id;
+ else
+ common->src_thread = thread_id;
+
+ return 0;
+}
+
static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
const char *name, struct k3_udma_glue_common *common,
bool tx_chn)
@@ -153,38 +182,32 @@ static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
common->atype_asel = dma_spec.args[1];
}
- if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
- ret = -EINVAL;
- goto out_put_spec;
- }
+ ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn);
- if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
- ret = -EINVAL;
- goto out_put_spec;
- }
+out_put_spec:
+ of_node_put(dma_spec.np);
+ return ret;
+}
- /* get psil endpoint config */
- common->ep_config = psil_get_ep_config(thread_id);
- if (IS_ERR(common->ep_config)) {
- dev_err(common->dev,
- "No configuration for psi-l thread 0x%04x\n",
- thread_id);
- ret = PTR_ERR(common->ep_config);
- goto out_put_spec;
- }
+static int
+of_k3_udma_glue_parse_chn_by_id(struct device_node *udmax_np, struct k3_udma_glue_common *common,
+ bool tx_chn, u32 thread_id)
+{
+ int ret = 0;
- common->epib = common->ep_config->needs_epib;
- common->psdata_size = common->ep_config->psd_size;
+ if (unlikely(!udmax_np))
+ return -EINVAL;
- if (tx_chn)
- common->dst_thread = thread_id;
- else
- common->src_thread = thread_id;
+ ret = of_k3_udma_glue_parse(udmax_np, common);
+ if (ret)
+ goto out_put_spec;
+
+ ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn);
out_put_spec:
- of_node_put(dma_spec.np);
+ of_node_put(udmax_np);
return ret;
-};
+}
static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
{
@@ -251,29 +274,13 @@ static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
}
-struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
- const char *name, struct k3_udma_glue_tx_channel_cfg *cfg)
+static int
+k3_udma_glue_request_tx_chn_common(struct device *dev,
+ struct k3_udma_glue_tx_channel *tx_chn,
+ struct k3_udma_glue_tx_channel_cfg *cfg)
{
- struct k3_udma_glue_tx_channel *tx_chn;
int ret;
- tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
- if (!tx_chn)
- return ERR_PTR(-ENOMEM);
-
- tx_chn->common.dev = dev;
- tx_chn->common.swdata_size = cfg->swdata_size;
- tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
- tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
- tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
- tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
-
- /* parse of udmap channel */
- ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
- &tx_chn->common, true);
- if (ret)
- goto err;
-
tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
tx_chn->common.psdata_size,
tx_chn->common.swdata_size);
@@ -289,7 +296,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
if (IS_ERR(tx_chn->udma_tchanx)) {
ret = PTR_ERR(tx_chn->udma_tchanx);
dev_err(dev, "UDMAX tchanx get err %d\n", ret);
- goto err;
+ return ret;
}
tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
@@ -302,7 +309,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
dev_err(dev, "Channel Device registration failed %d\n", ret);
put_device(&tx_chn->common.chan_dev);
tx_chn->common.chan_dev.parent = NULL;
- goto err;
+ return ret;
}
if (xudma_is_pktdma(tx_chn->common.udmax)) {
@@ -326,7 +333,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
&tx_chn->ringtxcq);
if (ret) {
dev_err(dev, "Failed to get TX/TXCQ rings %d\n", ret);
- goto err;
+ return ret;
}
/* Set the dma_dev for the rings to be configured */
@@ -342,13 +349,13 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
if (ret) {
dev_err(dev, "Failed to cfg ringtx %d\n", ret);
- goto err;
+ return ret;
}
ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
if (ret) {
dev_err(dev, "Failed to cfg ringtx %d\n", ret);
- goto err;
+ return ret;
}
/* request and cfg psi-l */
@@ -359,11 +366,42 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
ret = k3_udma_glue_cfg_tx_chn(tx_chn);
if (ret) {
dev_err(dev, "Failed to cfg tchan %d\n", ret);
- goto err;
+ return ret;
}
k3_udma_glue_dump_tx_chn(tx_chn);
+ return 0;
+}
+
+struct k3_udma_glue_tx_channel *
+k3_udma_glue_request_tx_chn(struct device *dev, const char *name,
+ struct k3_udma_glue_tx_channel_cfg *cfg)
+{
+ struct k3_udma_glue_tx_channel *tx_chn;
+ int ret;
+
+ tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
+ if (!tx_chn)
+ return ERR_PTR(-ENOMEM);
+
+ tx_chn->common.dev = dev;
+ tx_chn->common.swdata_size = cfg->swdata_size;
+ tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
+ tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
+ tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
+ tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
+
+ /* parse of udmap channel */
+ ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
+ &tx_chn->common, true);
+ if (ret)
+ goto err;
+
+ ret = k3_udma_glue_request_tx_chn_common(dev, tx_chn, cfg);
+ if (ret)
+ goto err;
+
return tx_chn;
err:
@@ -372,6 +410,41 @@ err:
}
EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn);
+struct k3_udma_glue_tx_channel *
+k3_udma_glue_request_tx_chn_for_thread_id(struct device *dev,
+ struct k3_udma_glue_tx_channel_cfg *cfg,
+ struct device_node *udmax_np, u32 thread_id)
+{
+ struct k3_udma_glue_tx_channel *tx_chn;
+ int ret;
+
+ tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
+ if (!tx_chn)
+ return ERR_PTR(-ENOMEM);
+
+ tx_chn->common.dev = dev;
+ tx_chn->common.swdata_size = cfg->swdata_size;
+ tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
+ tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
+ tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
+ tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
+
+ ret = of_k3_udma_glue_parse_chn_by_id(udmax_np, &tx_chn->common, true, thread_id);
+ if (ret)
+ goto err;
+
+ ret = k3_udma_glue_request_tx_chn_common(dev, tx_chn, cfg);
+ if (ret)
+ goto err;
+
+ return tx_chn;
+
+err:
+ k3_udma_glue_release_tx_chn(tx_chn);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn_for_thread_id);
+
void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
{
if (tx_chn->psil_paired) {
@@ -1000,12 +1073,59 @@ err:
return ERR_PTR(ret);
}
+static int
+k3_udma_glue_request_remote_rx_chn_common(struct k3_udma_glue_rx_channel *rx_chn,
+ struct k3_udma_glue_rx_channel_cfg *cfg,
+ struct device *dev)
+{
+ int ret, i;
+
+ rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
+ rx_chn->common.psdata_size,
+ rx_chn->common.swdata_size);
+
+ rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
+ sizeof(*rx_chn->flows), GFP_KERNEL);
+ if (!rx_chn->flows)
+ return -ENOMEM;
+
+ rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
+ rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
+ dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x-0x%02x",
+ rx_chn->common.src_thread, rx_chn->flow_id_base);
+ ret = device_register(&rx_chn->common.chan_dev);
+ if (ret) {
+ dev_err(dev, "Channel Device registration failed %d\n", ret);
+ put_device(&rx_chn->common.chan_dev);
+ rx_chn->common.chan_dev.parent = NULL;
+ return ret;
+ }
+
+ if (xudma_is_pktdma(rx_chn->common.udmax)) {
+ /* prepare the channel device as coherent */
+ rx_chn->common.chan_dev.dma_coherent = true;
+ dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
+ DMA_BIT_MASK(48));
+ }
+
+ ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < rx_chn->flow_num; i++)
+ rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
+
+ k3_udma_glue_dump_rx_chn(rx_chn);
+
+ return 0;
+}
+
static struct k3_udma_glue_rx_channel *
k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
struct k3_udma_glue_rx_channel_cfg *cfg)
{
struct k3_udma_glue_rx_channel *rx_chn;
- int ret, i;
+ int ret;
if (cfg->flow_id_num <= 0 ||
cfg->flow_id_use_rxchan_id ||
@@ -1036,44 +1156,55 @@ k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
if (ret)
goto err;
- rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
- rx_chn->common.psdata_size,
- rx_chn->common.swdata_size);
-
- rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
- sizeof(*rx_chn->flows), GFP_KERNEL);
- if (!rx_chn->flows) {
- ret = -ENOMEM;
+ ret = k3_udma_glue_request_remote_rx_chn_common(rx_chn, cfg, dev);
+ if (ret)
goto err;
- }
- rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
- rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
- dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x",
- rx_chn->common.src_thread);
- ret = device_register(&rx_chn->common.chan_dev);
- if (ret) {
- dev_err(dev, "Channel Device registration failed %d\n", ret);
- put_device(&rx_chn->common.chan_dev);
- rx_chn->common.chan_dev.parent = NULL;
- goto err;
- }
+ return rx_chn;
- if (xudma_is_pktdma(rx_chn->common.udmax)) {
- /* prepare the channel device as coherent */
- rx_chn->common.chan_dev.dma_coherent = true;
- dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
- DMA_BIT_MASK(48));
- }
+err:
+ k3_udma_glue_release_rx_chn(rx_chn);
+ return ERR_PTR(ret);
+}
- ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
+struct k3_udma_glue_rx_channel *
+k3_udma_glue_request_remote_rx_chn_for_thread_id(struct device *dev,
+ struct k3_udma_glue_rx_channel_cfg *cfg,
+ struct device_node *udmax_np, u32 thread_id)
+{
+ struct k3_udma_glue_rx_channel *rx_chn;
+ int ret;
+
+ if (cfg->flow_id_num <= 0 ||
+ cfg->flow_id_use_rxchan_id ||
+ cfg->def_flow_cfg ||
+ cfg->flow_id_base < 0)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Remote RX channel is under control of Remote CPU core, so
+ * Linux can only request and manipulate by dedicated RX flows
+ */
+
+ rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
+ if (!rx_chn)
+ return ERR_PTR(-ENOMEM);
+
+ rx_chn->common.dev = dev;
+ rx_chn->common.swdata_size = cfg->swdata_size;
+ rx_chn->remote = true;
+ rx_chn->udma_rchan_id = -1;
+ rx_chn->flow_num = cfg->flow_id_num;
+ rx_chn->flow_id_base = cfg->flow_id_base;
+ rx_chn->psil_paired = false;
+
+ ret = of_k3_udma_glue_parse_chn_by_id(udmax_np, &rx_chn->common, false, thread_id);
if (ret)
goto err;
- for (i = 0; i < rx_chn->flow_num; i++)
- rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
-
- k3_udma_glue_dump_rx_chn(rx_chn);
+ ret = k3_udma_glue_request_remote_rx_chn_common(rx_chn, cfg, dev);
+ if (ret)
+ goto err;
return rx_chn;
@@ -1081,6 +1212,7 @@ err:
k3_udma_glue_release_rx_chn(rx_chn);
return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(k3_udma_glue_request_remote_rx_chn_for_thread_id);
struct k3_udma_glue_rx_channel *
k3_udma_glue_request_rx_chn(struct device *dev, const char *name,
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index e40696f6f8647..5eb51ae93e89d 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -112,7 +112,9 @@
/* Register Direct Mode Registers */
#define XILINX_DMA_REG_VSIZE 0x0000
+#define XILINX_DMA_VSIZE_MASK GENMASK(12, 0)
#define XILINX_DMA_REG_HSIZE 0x0004
+#define XILINX_DMA_HSIZE_MASK GENMASK(15, 0)
#define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
#define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
@@ -2050,6 +2052,10 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
if (!xt->numf || !xt->sgl[0].size)
return NULL;
+ if (xt->numf & ~XILINX_DMA_VSIZE_MASK ||
+ xt->sgl[0].size & ~XILINX_DMA_HSIZE_MASK)
+ return NULL;
+
if (xt->frame_size != 1)
return NULL;
diff --git a/drivers/dpll/Kconfig b/drivers/dpll/Kconfig
index a4cae73f20d3d..20607ed542435 100644
--- a/drivers/dpll/Kconfig
+++ b/drivers/dpll/Kconfig
@@ -4,4 +4,4 @@
#
config DPLL
- bool
+ bool
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 7d3346b3a2bf3..e6cdb905eeaca 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -322,7 +322,8 @@ static ssize_t show_immediate(struct device *dev,
if (value < 0)
return -ENOENT;
- return snprintf(buf, buf ? PAGE_SIZE : 0, "0x%06x\n", value);
+ // Note that this function is also called by init_fw_attribute_group() with NULL pointer.
+ return buf ? sysfs_emit(buf, "0x%06x\n", value) : 0;
}
#define IMMEDIATE_ATTR(name, key) \
@@ -357,6 +358,7 @@ static ssize_t show_text_leaf(struct device *dev,
}
}
+ // Note that this function is also called by init_fw_attribute_group() with NULL pointer.
if (buf) {
bufsize = PAGE_SIZE - 1;
} else {
@@ -490,7 +492,7 @@ static ssize_t is_local_show(struct device *dev,
{
struct fw_device *device = fw_device(dev);
- return sprintf(buf, "%u\n", device->is_local);
+ return sysfs_emit(buf, "%u\n", device->is_local);
}
static int units_sprintf(char *buf, const u32 *directory)
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 7bc71f4be64a0..38d19410a2be6 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2060,6 +2060,8 @@ static void bus_reset_work(struct work_struct *work)
ohci->generation = generation;
reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
+ if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
+ reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
if (ohci->quirks & QUIRK_RESET_PACKET)
ohci->request_generation = generation;
@@ -2125,12 +2127,14 @@ static irqreturn_t irq_handler(int irq, void *data)
return IRQ_NONE;
/*
- * busReset and postedWriteErr must not be cleared yet
+ * busReset and postedWriteErr events must not be cleared yet
* (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
*/
reg_write(ohci, OHCI1394_IntEventClear,
event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
log_irqs(ohci, event);
+ if (event & OHCI1394_busReset)
+ reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
if (event & OHCI1394_selfIDComplete)
queue_work(selfid_workqueue, &ohci->bus_reset_work);
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index f2556a8e94015..9bc2e10381afd 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -790,7 +790,7 @@ static void ffa_notification_info_get(void)
part_id = packed_id_list[ids_processed++];
- if (!ids_count[list]) { /* Global Notification */
+ if (ids_count[list] == 1) { /* Global Notification */
__do_sched_recv_cb(part_id, 0, false);
continue;
}
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 8e832d1ad8251..345fff167b52f 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -871,6 +871,9 @@ static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph,
else
freq = dom->opp[idx].indicative_freq * dom->mult_factor;
+ /* All OPPs above the sustained frequency are treated as turbo */
+ data.turbo = freq > dom->sustained_freq_khz * 1000;
+
data.level = dom->opp[idx].perf;
data.freq = freq;
diff --git a/drivers/firmware/arm_scmi/powercap.c b/drivers/firmware/arm_scmi/powercap.c
index ea9201e7044cb..1fa79bba492e8 100644
--- a/drivers/firmware/arm_scmi/powercap.c
+++ b/drivers/firmware/arm_scmi/powercap.c
@@ -736,7 +736,7 @@ static void scmi_powercap_domain_init_fc(const struct scmi_protocol_handle *ph,
ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
POWERCAP_PAI_GET, 4, domain,
&fc[POWERCAP_FC_PAI].get_addr, NULL,
- &fc[POWERCAP_PAI_GET].rate_limit);
+ &fc[POWERCAP_FC_PAI].rate_limit);
*p_fc = fc;
}
diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c
index 3505735185033..130d13e9cd6be 100644
--- a/drivers/firmware/arm_scmi/raw_mode.c
+++ b/drivers/firmware/arm_scmi/raw_mode.c
@@ -921,7 +921,7 @@ static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp)
rd->raw = raw;
filp->private_data = rd;
- return 0;
+ return nonseekable_open(inode, filp);
}
static int scmi_dbg_raw_mode_release(struct inode *inode, struct file *filp)
@@ -950,6 +950,7 @@ static const struct file_operations scmi_dbg_raw_mode_reset_fops = {
.open = scmi_dbg_raw_mode_open,
.release = scmi_dbg_raw_mode_release,
.write = scmi_dbg_raw_mode_reset_write,
+ .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -959,6 +960,7 @@ static const struct file_operations scmi_dbg_raw_mode_message_fops = {
.read = scmi_dbg_raw_mode_message_read,
.write = scmi_dbg_raw_mode_message_write,
.poll = scmi_dbg_raw_mode_message_poll,
+ .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -975,6 +977,7 @@ static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
.read = scmi_dbg_raw_mode_message_read,
.write = scmi_dbg_raw_mode_message_async_write,
.poll = scmi_dbg_raw_mode_message_poll,
+ .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -998,6 +1001,7 @@ static const struct file_operations scmi_dbg_raw_mode_notification_fops = {
.release = scmi_dbg_raw_mode_release,
.read = scmi_test_dbg_raw_mode_notif_read,
.poll = scmi_test_dbg_raw_mode_notif_poll,
+ .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -1021,6 +1025,7 @@ static const struct file_operations scmi_dbg_raw_mode_errors_fops = {
.release = scmi_dbg_raw_mode_release,
.read = scmi_test_dbg_raw_mode_errors_read,
.poll = scmi_test_dbg_raw_mode_errors_poll,
+ .llseek = no_llseek,
.owner = THIS_MODULE,
};
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index 79d4254d1f9bc..9f3d665cfdcf7 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -522,7 +522,7 @@ void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp)
{
cs_dsp_debugfs_clear(dsp);
debugfs_remove_recursive(dsp->debugfs_root);
- dsp->debugfs_root = NULL;
+ dsp->debugfs_root = ERR_PTR(-ENODEV);
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_cleanup_debugfs, FW_CS_DSP);
#else
@@ -2246,6 +2246,11 @@ static int cs_dsp_common_init(struct cs_dsp *dsp)
mutex_init(&dsp->pwr_lock);
+#ifdef CONFIG_DEBUG_FS
+ /* Ensure this is invalid if client never provides a debugfs root */
+ dsp->debugfs_root = ERR_PTR(-ENODEV);
+#endif
+
return 0;
}
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 9b3884ff81e69..7d2cdd9e22274 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -445,8 +445,8 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
printk("%saer_uncor_severity: 0x%08x\n",
pfx, aer->uncor_severity);
printk("%sTLP Header: %08x %08x %08x %08x\n", pfx,
- aer->header_log.dw0, aer->header_log.dw1,
- aer->header_log.dw2, aer->header_log.dw3);
+ aer->header_log.dw[0], aer->header_log.dw[1],
+ aer->header_log.dw[2], aer->header_log.dw[3]);
}
}
diff --git a/drivers/firmware/efi/earlycon.c b/drivers/firmware/efi/earlycon.c
index f80a9af3d16e9..d18a1a5de1449 100644
--- a/drivers/firmware/efi/earlycon.c
+++ b/drivers/firmware/efi/earlycon.c
@@ -252,7 +252,7 @@ static int __init efi_earlycon_setup(struct earlycon_device *device,
if (si->lfb_depth != 32)
return -ENODEV;
- font = get_default_font(xres, yres, -1, -1);
+ font = get_default_font(xres, yres, NULL, NULL);
if (!font)
return -ENODEV;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 8859fb0b006d3..fdf07dd6f4591 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -203,6 +203,8 @@ static bool generic_ops_supported(void)
name_size = sizeof(name);
+ if (!efi.get_next_variable)
+ return false;
status = efi.get_next_variable(&name_size, &name, &guid);
if (status == EFI_UNSUPPORTED)
return false;
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 73f4810f6db38..31eb1e287ce16 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -105,7 +105,7 @@ lib-y := $(patsubst %.o,%.stub.o,$(lib-y))
# Even when -mbranch-protection=none is set, Clang will generate a
# .note.gnu.property for code-less object files (like lib/ctype.c),
# so work around this by explicitly removing the unwanted section.
-# https://bugs.llvm.org/show_bug.cgi?id=46480
+# https://llvm.org/pr46480
STUBCOPY_FLAGS-y += --remove-section=.note.gnu.property
STUBCOPY_RELOC-$(CONFIG_X86_32) := R_386_32
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
index 4e96a855fdf47..c41e7b2091cdd 100644
--- a/drivers/firmware/efi/libstub/randomalloc.c
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -120,7 +120,7 @@ efi_status_t efi_random_alloc(unsigned long size,
continue;
}
- target = round_up(md->phys_addr, align) + target_slot * align;
+ target = round_up(max_t(u64, md->phys_addr, alloc_min), align) + target_slot * align;
pages = size / EFI_PAGE_SIZE;
status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 4f448d4df7b82..d5a8182cf2e1c 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -21,6 +21,8 @@
#include "efistub.h"
#include "x86-stub.h"
+extern char _bss[], _ebss[];
+
const efi_system_table_t *efi_system_table;
const efi_dxe_services_table_t *efi_dxe_table;
static efi_loaded_image_t *image = NULL;
@@ -474,6 +476,9 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
efi_status_t status;
char *cmdline_ptr;
+ if (efi_is_native())
+ memset(_bss, 0, _ebss - _bss);
+
efi_system_table = sys_table_arg;
/* Check if we were booted by the EFI firmware */
@@ -491,6 +496,7 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
hdr->vid_mode = 0xffff;
hdr->type_of_loader = 0x21;
+ hdr->initrd_addr_max = INT_MAX;
/* Convert unicode cmdline to ascii */
cmdline_ptr = efi_convert_cmdline(image, &options_size);
@@ -970,8 +976,6 @@ fail:
void efi_handover_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg,
struct boot_params *boot_params)
{
- extern char _bss[], _ebss[];
-
memset(_bss, 0, _ebss - _bss);
efi_stub_entry(handle, sys_table_arg, boot_params);
}
diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c
index 456d0e5eaf78b..cc807ed35aedf 100644
--- a/drivers/firmware/efi/sysfb_efi.c
+++ b/drivers/firmware/efi/sysfb_efi.c
@@ -336,7 +336,7 @@ static int efifb_add_links(struct fwnode_handle *fwnode)
if (!sup_np)
return 0;
- fwnode_link_add(fwnode, of_fwnode_handle(sup_np));
+ fwnode_link_add(fwnode, of_fwnode_handle(sup_np), 0);
of_node_put(sup_np);
return 0;
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index 03da9a4354f88..5f43dfa22f799 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -37,7 +37,7 @@
#include <uapi/linux/qemu_fw_cfg.h>
#include <linux/delay.h>
#include <linux/crash_dump.h>
-#include <linux/crash_core.h>
+#include <linux/vmcore_info.h>
MODULE_AUTHOR("Gabriel L. Somlo <somlo@cmu.edu>");
MODULE_DESCRIPTION("QEMU fw_cfg sysfs support");
@@ -67,7 +67,7 @@ static void fw_cfg_sel_endianness(u16 key)
iowrite16(key, fw_cfg_reg_ctrl);
}
-#ifdef CONFIG_CRASH_CORE
+#ifdef CONFIG_VMCORE_INFO
static inline bool fw_cfg_dma_enabled(void)
{
return (fw_cfg_rev & FW_CFG_VERSION_DMA) && fw_cfg_reg_dma;
@@ -156,7 +156,7 @@ static ssize_t fw_cfg_read_blob(u16 key,
return count;
}
-#ifdef CONFIG_CRASH_CORE
+#ifdef CONFIG_VMCORE_INFO
/* write chunk of given fw_cfg blob (caller responsible for sanity-check) */
static ssize_t fw_cfg_write_blob(u16 key,
void *buf, loff_t pos, size_t count)
@@ -195,7 +195,7 @@ end:
return ret;
}
-#endif /* CONFIG_CRASH_CORE */
+#endif /* CONFIG_VMCORE_INFO */
/* clean up fw_cfg device i/o */
static void fw_cfg_io_cleanup(void)
@@ -319,7 +319,7 @@ struct fw_cfg_sysfs_entry {
struct list_head list;
};
-#ifdef CONFIG_CRASH_CORE
+#ifdef CONFIG_VMCORE_INFO
static ssize_t fw_cfg_write_vmcoreinfo(const struct fw_cfg_file *f)
{
static struct fw_cfg_vmcoreinfo *data;
@@ -343,7 +343,7 @@ static ssize_t fw_cfg_write_vmcoreinfo(const struct fw_cfg_file *f)
kfree(data);
return ret;
}
-#endif /* CONFIG_CRASH_CORE */
+#endif /* CONFIG_VMCORE_INFO */
/* get fw_cfg_sysfs_entry from kobject member */
static inline struct fw_cfg_sysfs_entry *to_entry(struct kobject *kobj)
@@ -583,7 +583,7 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
int err;
struct fw_cfg_sysfs_entry *entry;
-#ifdef CONFIG_CRASH_CORE
+#ifdef CONFIG_VMCORE_INFO
if (fw_cfg_dma_enabled() &&
strcmp(f->name, FW_CFG_VMCOREINFO_FILENAME) == 0 &&
!is_kdump_kernel()) {
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 79789f0563f6a..9bc45357e1a80 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -3,6 +3,7 @@
* Xilinx Zynq MPSoC Firmware layer
*
* Copyright (C) 2014-2022 Xilinx, Inc.
+ * Copyright (C) 2022 - 2023, Advanced Micro Devices, Inc.
*
* Michal Simek <michal.simek@amd.com>
* Davorin Mista <davorin.mista@aggios.com>
@@ -1385,6 +1386,30 @@ int zynqmp_pm_aes_engine(const u64 address, u32 *out)
EXPORT_SYMBOL_GPL(zynqmp_pm_aes_engine);
/**
+ * zynqmp_pm_efuse_access - Provides access to efuse memory.
+ * @address: Address of the efuse params structure
+ * @out: Returned output value
+ *
+ * Return: Returns status, either success or error code.
+ */
+int zynqmp_pm_efuse_access(const u64 address, u32 *out)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!out)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_EFUSE_ACCESS, ret_payload, 2,
+ upper_32_bits(address),
+ lower_32_bits(address));
+ *out = ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_efuse_access);
+
+/**
* zynqmp_pm_sha_hash - Access the SHA engine to calculate the hash
* @address: Address of the data/ Address of output buffer where
* hash should be stored.
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index e6d12fbab653f..094ee97ea26cb 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -327,7 +327,7 @@ static struct attribute *dfl_dev_attrs[] = {
};
ATTRIBUTE_GROUPS(dfl_dev);
-static struct bus_type dfl_bus_type = {
+static const struct bus_type dfl_bus_type = {
.name = "dfl",
.match = dfl_bus_match,
.probe = dfl_bus_probe,
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index a024be2b84e29..79c473b3c7c3d 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -30,7 +30,7 @@ int fpga_bridge_enable(struct fpga_bridge *bridge)
{
dev_dbg(&bridge->dev, "enable\n");
- if (bridge->br_ops && bridge->br_ops->enable_set)
+ if (bridge->br_ops->enable_set)
return bridge->br_ops->enable_set(bridge, 1);
return 0;
@@ -48,7 +48,7 @@ int fpga_bridge_disable(struct fpga_bridge *bridge)
{
dev_dbg(&bridge->dev, "disable\n");
- if (bridge->br_ops && bridge->br_ops->enable_set)
+ if (bridge->br_ops->enable_set)
return bridge->br_ops->enable_set(bridge, 0);
return 0;
@@ -296,7 +296,7 @@ static ssize_t state_show(struct device *dev,
struct fpga_bridge *bridge = to_fpga_bridge(dev);
int state = 1;
- if (bridge->br_ops && bridge->br_ops->enable_show) {
+ if (bridge->br_ops->enable_show) {
state = bridge->br_ops->enable_show(bridge);
if (state < 0)
return state;
@@ -401,7 +401,7 @@ void fpga_bridge_unregister(struct fpga_bridge *bridge)
* If the low level driver provides a method for putting bridge into
* a desired state upon unregister, do it.
*/
- if (bridge->br_ops && bridge->br_ops->fpga_bridge_remove)
+ if (bridge->br_ops->fpga_bridge_remove)
bridge->br_ops->fpga_bridge_remove(bridge);
device_unregister(&bridge->dev);
diff --git a/drivers/gnss/serial.c b/drivers/gnss/serial.c
index baa956494e79f..0e43bf6294f87 100644
--- a/drivers/gnss/serial.c
+++ b/drivers/gnss/serial.c
@@ -80,7 +80,7 @@ static const struct gnss_operations gnss_serial_gnss_ops = {
.write_raw = gnss_serial_write_raw,
};
-static ssize_t gnss_serial_receive_buf(struct serdev_device *serdev,
+static size_t gnss_serial_receive_buf(struct serdev_device *serdev,
const u8 *buf, size_t count)
{
struct gnss_serial *gserial = serdev_device_get_drvdata(serdev);
diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c
index 6801a8fb20401..79375d14bbb67 100644
--- a/drivers/gnss/sirf.c
+++ b/drivers/gnss/sirf.c
@@ -160,7 +160,7 @@ static const struct gnss_operations sirf_gnss_ops = {
.write_raw = sirf_write_raw,
};
-static ssize_t sirf_receive_buf(struct serdev_device *serdev,
+static size_t sirf_receive_buf(struct serdev_device *serdev,
const u8 *buf, size_t count)
{
struct sirf_data *data = serdev_device_get_drvdata(serdev);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 3fbb0bdb15c14..b50d0b4708497 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -478,6 +478,18 @@ config GPIO_MXS
select GPIO_GENERIC
select GENERIC_IRQ_CHIP
+config GPIO_NOMADIK
+ bool "Nomadik GPIO driver"
+ depends on ARCH_U8500 || ARCH_NOMADIK || MACH_EYEQ5 || COMPILE_TEST
+ select GPIOLIB_IRQCHIP
+ help
+ Say yes here to support the Nomadik SoC GPIO block. This block is also
+ used by the Mobileye EyeQ5 SoC.
+
+ It handles up to 32 GPIOs per bank, that can all be interrupt sources.
+ It is deeply interconnected with the associated pinctrl driver as GPIO
+ registers handle muxing ("alternate functions") as well.
+
config GPIO_NPCM_SGPIO
bool "Nuvoton SGPIO support"
depends on ARCH_NPCM || COMPILE_TEST
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 7ae4d81de1df7..fdd28c58d8904 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -117,6 +117,7 @@ obj-$(CONFIG_GPIO_MT7621) += gpio-mt7621.o
obj-$(CONFIG_GPIO_MVEBU) += gpio-mvebu.o
obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o
obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o
+obj-$(CONFIG_GPIO_NOMADIK) += gpio-nomadik.o
obj-$(CONFIG_GPIO_NPCM_SGPIO) += gpio-npcm-sgpio.o
obj-$(CONFIG_GPIO_OCTEON) += gpio-octeon.o
obj-$(CONFIG_GPIO_OMAP) += gpio-omap.o
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index 1ee62cd58582b..25db014494a4d 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -92,7 +92,7 @@ static inline int to_reg(int gpio, enum ctrl_register reg_type)
case 0x5e:
return GPIOPANELCTL;
default:
- return -EOPNOTSUPP;
+ return -ENOTSUPP;
}
}
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index 5ef8af8249806..c097e310c9e84 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -529,6 +529,7 @@ static const struct of_device_id lpc32xx_gpio_of_match[] = {
{ .compatible = "nxp,lpc3220-gpio", },
{ },
};
+MODULE_DEVICE_TABLE(of, lpc32xx_gpio_of_match);
static struct platform_driver lpc32xx_gpio_driver = {
.driver = {
diff --git a/drivers/gpio/gpio-nomadik.c b/drivers/gpio/gpio-nomadik.c
new file mode 100644
index 0000000000000..836f1cc760c26
--- /dev/null
+++ b/drivers/gpio/gpio-nomadik.c
@@ -0,0 +1,730 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * GPIO driver for the IP block found in the Nomadik SoC; it is an AMBA device,
+ * managing 32 pins with alternate functions. It can also handle the STA2X11
+ * block from ST.
+ *
+ * The GPIO chips are shared with pinctrl-nomadik if used; it needs access for
+ * pinmuxing functionality and others.
+ *
+ * This driver also handles the mobileye,eyeq5-gpio compatible. It is an STA2X11
+ * but with only data, direction and interrupts register active. We want to
+ * avoid touching SLPM, RWIMSC, FWIMSC, AFSLA and AFSLB registers; that is,
+ * wake and alternate function registers. It is NOT compatible with
+ * pinctrl-nomadik.
+ *
+ * Copyright (C) 2008,2009 STMicroelectronics
+ * Copyright (C) 2009 Alessandro Rubini <rubini@unipv.it>
+ * Rewritten based on work by Prafulla WADASKAR <prafulla.wadaskar@st.com>
+ * Copyright (C) 2011-2013 Linus Walleij <linus.walleij@linaro.org>
+ */
+#include <linux/cleanup.h>
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/reset.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <linux/gpio/gpio-nomadik.h>
+
+#ifndef CONFIG_PINCTRL_NOMADIK
+static DEFINE_SPINLOCK(nmk_gpio_slpm_lock);
+#endif
+
+void __nmk_gpio_set_slpm(struct nmk_gpio_chip *nmk_chip, unsigned int offset,
+ enum nmk_gpio_slpm mode)
+{
+ u32 slpm;
+
+ /* We should NOT have been called. */
+ if (WARN_ON(nmk_chip->is_mobileye_soc))
+ return;
+
+ slpm = readl(nmk_chip->addr + NMK_GPIO_SLPC);
+ if (mode == NMK_GPIO_SLPM_NOCHANGE)
+ slpm |= BIT(offset);
+ else
+ slpm &= ~BIT(offset);
+ writel(slpm, nmk_chip->addr + NMK_GPIO_SLPC);
+}
+
+static void __nmk_gpio_set_output(struct nmk_gpio_chip *nmk_chip,
+ unsigned int offset, int val)
+{
+ if (val)
+ writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DATS);
+ else
+ writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DATC);
+}
+
+void __nmk_gpio_make_output(struct nmk_gpio_chip *nmk_chip,
+ unsigned int offset, int val)
+{
+ writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DIRS);
+ __nmk_gpio_set_output(nmk_chip, offset, val);
+}
+
+/* IRQ functions */
+
+static void nmk_gpio_irq_ack(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
+
+ clk_enable(nmk_chip->clk);
+ writel(BIT(d->hwirq), nmk_chip->addr + NMK_GPIO_IC);
+ clk_disable(nmk_chip->clk);
+}
+
+enum nmk_gpio_irq_type {
+ NORMAL,
+ WAKE,
+};
+
+static void __nmk_gpio_irq_modify(struct nmk_gpio_chip *nmk_chip,
+ int offset, enum nmk_gpio_irq_type which,
+ bool enable)
+{
+ u32 *rimscval;
+ u32 *fimscval;
+ u32 rimscreg;
+ u32 fimscreg;
+
+ if (which == NORMAL) {
+ rimscreg = NMK_GPIO_RIMSC;
+ fimscreg = NMK_GPIO_FIMSC;
+ rimscval = &nmk_chip->rimsc;
+ fimscval = &nmk_chip->fimsc;
+ } else {
+ /* We should NOT have been called. */
+ if (WARN_ON(nmk_chip->is_mobileye_soc))
+ return;
+ rimscreg = NMK_GPIO_RWIMSC;
+ fimscreg = NMK_GPIO_FWIMSC;
+ rimscval = &nmk_chip->rwimsc;
+ fimscval = &nmk_chip->fwimsc;
+ }
+
+ /* we must individually set/clear the two edges */
+ if (nmk_chip->edge_rising & BIT(offset)) {
+ if (enable)
+ *rimscval |= BIT(offset);
+ else
+ *rimscval &= ~BIT(offset);
+ writel(*rimscval, nmk_chip->addr + rimscreg);
+ }
+ if (nmk_chip->edge_falling & BIT(offset)) {
+ if (enable)
+ *fimscval |= BIT(offset);
+ else
+ *fimscval &= ~BIT(offset);
+ writel(*fimscval, nmk_chip->addr + fimscreg);
+ }
+}
+
+static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip,
+ int offset, bool on)
+{
+ /* We should NOT have been called. */
+ if (WARN_ON(nmk_chip->is_mobileye_soc))
+ return;
+
+ /*
+ * Ensure WAKEUP_ENABLE is on. No need to disable it if wakeup is
+ * disabled, since setting SLPM to 1 increases power consumption, and
+ * wakeup is anyhow controlled by the RIMSC and FIMSC registers.
+ */
+ if (nmk_chip->sleepmode && on) {
+ __nmk_gpio_set_slpm(nmk_chip, offset,
+ NMK_GPIO_SLPM_WAKEUP_ENABLE);
+ }
+
+ __nmk_gpio_irq_modify(nmk_chip, offset, WAKE, on);
+}
+
+static void nmk_gpio_irq_maskunmask(struct nmk_gpio_chip *nmk_chip,
+ struct irq_data *d, bool enable)
+{
+ unsigned long flags;
+
+ clk_enable(nmk_chip->clk);
+ spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
+ spin_lock(&nmk_chip->lock);
+
+ __nmk_gpio_irq_modify(nmk_chip, d->hwirq, NORMAL, enable);
+
+ if (!nmk_chip->is_mobileye_soc && !(nmk_chip->real_wake & BIT(d->hwirq)))
+ __nmk_gpio_set_wake(nmk_chip, d->hwirq, enable);
+
+ spin_unlock(&nmk_chip->lock);
+ spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
+ clk_disable(nmk_chip->clk);
+}
+
+static void nmk_gpio_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
+
+ nmk_gpio_irq_maskunmask(nmk_chip, d, false);
+ gpiochip_disable_irq(gc, irqd_to_hwirq(d));
+}
+
+static void nmk_gpio_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
+
+ gpiochip_enable_irq(gc, irqd_to_hwirq(d));
+ nmk_gpio_irq_maskunmask(nmk_chip, d, true);
+}
+
+static int nmk_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
+ unsigned long flags;
+
+ /* Handler is registered in all cases. */
+ if (nmk_chip->is_mobileye_soc)
+ return -ENXIO;
+
+ clk_enable(nmk_chip->clk);
+ spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
+ spin_lock(&nmk_chip->lock);
+
+ if (irqd_irq_disabled(d))
+ __nmk_gpio_set_wake(nmk_chip, d->hwirq, on);
+
+ if (on)
+ nmk_chip->real_wake |= BIT(d->hwirq);
+ else
+ nmk_chip->real_wake &= ~BIT(d->hwirq);
+
+ spin_unlock(&nmk_chip->lock);
+ spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
+ clk_disable(nmk_chip->clk);
+
+ return 0;
+}
+
+static int nmk_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
+ bool enabled = !irqd_irq_disabled(d);
+ bool wake = irqd_is_wakeup_set(d);
+ unsigned long flags;
+
+ if (type & IRQ_TYPE_LEVEL_HIGH)
+ return -EINVAL;
+ if (type & IRQ_TYPE_LEVEL_LOW)
+ return -EINVAL;
+
+ clk_enable(nmk_chip->clk);
+ spin_lock_irqsave(&nmk_chip->lock, flags);
+
+ if (enabled)
+ __nmk_gpio_irq_modify(nmk_chip, d->hwirq, NORMAL, false);
+
+ if (!nmk_chip->is_mobileye_soc && (enabled || wake))
+ __nmk_gpio_irq_modify(nmk_chip, d->hwirq, WAKE, false);
+
+ nmk_chip->edge_rising &= ~BIT(d->hwirq);
+ if (type & IRQ_TYPE_EDGE_RISING)
+ nmk_chip->edge_rising |= BIT(d->hwirq);
+
+ nmk_chip->edge_falling &= ~BIT(d->hwirq);
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ nmk_chip->edge_falling |= BIT(d->hwirq);
+
+ if (enabled)
+ __nmk_gpio_irq_modify(nmk_chip, d->hwirq, NORMAL, true);
+
+ if (!nmk_chip->is_mobileye_soc && (enabled || wake))
+ __nmk_gpio_irq_modify(nmk_chip, d->hwirq, WAKE, true);
+
+ spin_unlock_irqrestore(&nmk_chip->lock, flags);
+ clk_disable(nmk_chip->clk);
+
+ return 0;
+}
+
+static unsigned int nmk_gpio_irq_startup(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
+
+ clk_enable(nmk_chip->clk);
+ nmk_gpio_irq_unmask(d);
+ return 0;
+}
+
+static void nmk_gpio_irq_shutdown(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
+
+ nmk_gpio_irq_mask(d);
+ clk_disable(nmk_chip->clk);
+}
+
+static irqreturn_t nmk_gpio_irq_handler(int irq, void *dev_id)
+{
+ struct nmk_gpio_chip *nmk_chip = dev_id;
+ struct gpio_chip *chip = &nmk_chip->chip;
+ unsigned long mask = GENMASK(chip->ngpio - 1, 0);
+ unsigned long status;
+ int bit;
+
+ clk_enable(nmk_chip->clk);
+
+ status = readl(nmk_chip->addr + NMK_GPIO_IS);
+
+ /* Ensure we cannot leave pending bits; this should never occur. */
+ if (unlikely(status & ~mask))
+ writel(status & ~mask, nmk_chip->addr + NMK_GPIO_IC);
+
+ clk_disable(nmk_chip->clk);
+
+ for_each_set_bit(bit, &status, chip->ngpio)
+ generic_handle_domain_irq_safe(chip->irq.domain, bit);
+
+ return IRQ_RETVAL((status & mask) != 0);
+}
+
+/* I/O Functions */
+
+static int nmk_gpio_get_dir(struct gpio_chip *chip, unsigned int offset)
+{
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
+ int dir;
+
+ clk_enable(nmk_chip->clk);
+
+ dir = readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset);
+
+ clk_disable(nmk_chip->clk);
+
+ if (dir)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
+}
+
+static int nmk_gpio_make_input(struct gpio_chip *chip, unsigned int offset)
+{
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
+
+ clk_enable(nmk_chip->clk);
+
+ writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DIRC);
+
+ clk_disable(nmk_chip->clk);
+
+ return 0;
+}
+
+static int nmk_gpio_get_input(struct gpio_chip *chip, unsigned int offset)
+{
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
+ int value;
+
+ clk_enable(nmk_chip->clk);
+
+ value = !!(readl(nmk_chip->addr + NMK_GPIO_DAT) & BIT(offset));
+
+ clk_disable(nmk_chip->clk);
+
+ return value;
+}
+
+static void nmk_gpio_set_output(struct gpio_chip *chip, unsigned int offset,
+ int val)
+{
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
+
+ clk_enable(nmk_chip->clk);
+
+ __nmk_gpio_set_output(nmk_chip, offset, val);
+
+ clk_disable(nmk_chip->clk);
+}
+
+static int nmk_gpio_make_output(struct gpio_chip *chip, unsigned int offset,
+ int val)
+{
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
+
+ clk_enable(nmk_chip->clk);
+
+ __nmk_gpio_make_output(nmk_chip, offset, val);
+
+ clk_disable(nmk_chip->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static int nmk_gpio_get_mode(struct nmk_gpio_chip *nmk_chip, int offset)
+{
+ u32 afunc, bfunc;
+
+ /* We don't support modes. */
+ if (nmk_chip->is_mobileye_soc)
+ return NMK_GPIO_ALT_GPIO;
+
+ clk_enable(nmk_chip->clk);
+
+ afunc = readl(nmk_chip->addr + NMK_GPIO_AFSLA) & BIT(offset);
+ bfunc = readl(nmk_chip->addr + NMK_GPIO_AFSLB) & BIT(offset);
+
+ clk_disable(nmk_chip->clk);
+
+ return (afunc ? NMK_GPIO_ALT_A : 0) | (bfunc ? NMK_GPIO_ALT_B : 0);
+}
+
+void nmk_gpio_dbg_show_one(struct seq_file *s, struct pinctrl_dev *pctldev,
+ struct gpio_chip *chip, unsigned int offset,
+ unsigned int gpio)
+{
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
+ int mode;
+ bool is_out;
+ bool data_out;
+ bool pull;
+ static const char * const modes[] = {
+ [NMK_GPIO_ALT_GPIO] = "gpio",
+ [NMK_GPIO_ALT_A] = "altA",
+ [NMK_GPIO_ALT_B] = "altB",
+ [NMK_GPIO_ALT_C] = "altC",
+ [NMK_GPIO_ALT_C + 1] = "altC1",
+ [NMK_GPIO_ALT_C + 2] = "altC2",
+ [NMK_GPIO_ALT_C + 3] = "altC3",
+ [NMK_GPIO_ALT_C + 4] = "altC4",
+ };
+
+ char *label = gpiochip_dup_line_label(chip, offset);
+ if (IS_ERR(label))
+ return;
+
+ clk_enable(nmk_chip->clk);
+ is_out = !!(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset));
+ pull = !(readl(nmk_chip->addr + NMK_GPIO_PDIS) & BIT(offset));
+ data_out = !!(readl(nmk_chip->addr + NMK_GPIO_DAT) & BIT(offset));
+ mode = nmk_gpio_get_mode(nmk_chip, offset);
+#ifdef CONFIG_PINCTRL_NOMADIK
+ if (mode == NMK_GPIO_ALT_C && pctldev)
+ mode = nmk_prcm_gpiocr_get_mode(pctldev, gpio);
+#endif
+
+ if (is_out) {
+ seq_printf(s, " gpio-%-3d (%-20.20s) out %s %s",
+ gpio,
+ label ?: "(none)",
+ data_out ? "hi" : "lo",
+ (mode < 0) ? "unknown" : modes[mode]);
+ } else {
+ int irq = chip->to_irq(chip, offset);
+ const int pullidx = pull ? 1 : 0;
+ int val;
+ static const char * const pulls[] = {
+ "none ",
+ "pull enabled",
+ };
+
+ seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s",
+ gpio,
+ label ?: "(none)",
+ pulls[pullidx],
+ (mode < 0) ? "unknown" : modes[mode]);
+
+ val = nmk_gpio_get_input(chip, offset);
+ seq_printf(s, " VAL %d", val);
+
+ /*
+ * This races with request_irq(), set_irq_type(),
+ * and set_irq_wake() ... but those are "rare".
+ */
+ if (irq > 0 && irq_has_action(irq)) {
+ char *trigger;
+ bool wake;
+
+ if (nmk_chip->edge_rising & BIT(offset))
+ trigger = "edge-rising";
+ else if (nmk_chip->edge_falling & BIT(offset))
+ trigger = "edge-falling";
+ else
+ trigger = "edge-undefined";
+
+ wake = !!(nmk_chip->real_wake & BIT(offset));
+
+ seq_printf(s, " irq-%d %s%s",
+ irq, trigger, wake ? " wakeup" : "");
+ }
+ }
+ clk_disable(nmk_chip->clk);
+}
+
+static void nmk_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+ unsigned int i, gpio = chip->base;
+
+ for (i = 0; i < chip->ngpio; i++, gpio++) {
+ nmk_gpio_dbg_show_one(s, NULL, chip, i, gpio);
+ seq_puts(s, "\n");
+ }
+}
+
+#else
+
+#define nmk_gpio_dbg_show NULL
+
+#endif
+
+/*
+ * We will allocate memory for the state container using devm* allocators
+ * binding to the first device reaching this point, it doesn't matter if
+ * it is the pin controller or GPIO driver. However we need to use the right
+ * platform device when looking up resources so pay attention to pdev.
+ */
+struct nmk_gpio_chip *nmk_gpio_populate_chip(struct fwnode_handle *fwnode,
+ struct platform_device *pdev)
+{
+ struct nmk_gpio_chip *nmk_chip;
+ struct platform_device *gpio_pdev;
+ struct device *dev = &pdev->dev;
+ struct reset_control *reset;
+ struct device *gpio_dev;
+ struct gpio_chip *chip;
+ struct resource *res;
+ struct clk *clk;
+ void __iomem *base;
+ u32 id, ngpio;
+ int ret;
+
+ gpio_dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode);
+ if (!gpio_dev) {
+ dev_err(dev, "populate \"%pfwP\": device not found\n", fwnode);
+ return ERR_PTR(-ENODEV);
+ }
+ gpio_pdev = to_platform_device(gpio_dev);
+
+ if (device_property_read_u32(gpio_dev, "gpio-bank", &id)) {
+ dev_err(dev, "populate: gpio-bank property not found\n");
+ platform_device_put(gpio_pdev);
+ return ERR_PTR(-EINVAL);
+ }
+
+#ifdef CONFIG_PINCTRL_NOMADIK
+ if (id >= ARRAY_SIZE(nmk_gpio_chips)) {
+ dev_err(dev, "populate: invalid id: %u\n", id);
+ platform_device_put(gpio_pdev);
+ return ERR_PTR(-EINVAL);
+ }
+ /* Already populated? */
+ nmk_chip = nmk_gpio_chips[id];
+ if (nmk_chip) {
+ platform_device_put(gpio_pdev);
+ return nmk_chip;
+ }
+#endif
+
+ nmk_chip = devm_kzalloc(dev, sizeof(*nmk_chip), GFP_KERNEL);
+ if (!nmk_chip) {
+ platform_device_put(gpio_pdev);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (device_property_read_u32(gpio_dev, "ngpios", &ngpio)) {
+ ngpio = NMK_GPIO_PER_CHIP;
+ dev_dbg(dev, "populate: using default ngpio (%u)\n", ngpio);
+ }
+
+ nmk_chip->is_mobileye_soc = device_is_compatible(gpio_dev,
+ "mobileye,eyeq5-gpio");
+ nmk_chip->bank = id;
+ chip = &nmk_chip->chip;
+ chip->base = -1;
+ chip->ngpio = ngpio;
+ chip->label = dev_name(gpio_dev);
+ chip->parent = gpio_dev;
+
+ /* NOTE: different devices! No devm_platform_ioremap_resource() here! */
+ res = platform_get_resource(gpio_pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base)) {
+ platform_device_put(gpio_pdev);
+ return ERR_CAST(base);
+ }
+ nmk_chip->addr = base;
+
+ /* NOTE: do not use devm_ here! */
+ clk = clk_get_optional(gpio_dev, NULL);
+ if (IS_ERR(clk)) {
+ platform_device_put(gpio_pdev);
+ return ERR_CAST(clk);
+ }
+ clk_prepare(clk);
+ nmk_chip->clk = clk;
+
+ /* NOTE: do not use devm_ here! */
+ reset = reset_control_get_optional_shared(gpio_dev, NULL);
+ if (IS_ERR(reset)) {
+ clk_unprepare(clk);
+ clk_put(clk);
+ platform_device_put(gpio_pdev);
+ dev_err(dev, "failed getting reset control: %pe\n",
+ reset);
+ return ERR_CAST(reset);
+ }
+
+ /*
+ * Reset might be shared and asserts/deasserts calls are unbalanced. We
+ * only support sharing this reset with other gpio-nomadik devices that
+ * use this reset to ensure deassertion at probe.
+ */
+ ret = reset_control_deassert(reset);
+ if (ret) {
+ reset_control_put(reset);
+ clk_unprepare(clk);
+ clk_put(clk);
+ platform_device_put(gpio_pdev);
+ dev_err(dev, "failed reset deassert: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+#ifdef CONFIG_PINCTRL_NOMADIK
+ nmk_gpio_chips[id] = nmk_chip;
+#endif
+ return nmk_chip;
+}
+
+static void nmk_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
+
+ seq_printf(p, "nmk%u-%u-%u", nmk_chip->bank,
+ gc->base, gc->base + gc->ngpio - 1);
+}
+
+static const struct irq_chip nmk_irq_chip = {
+ .irq_ack = nmk_gpio_irq_ack,
+ .irq_mask = nmk_gpio_irq_mask,
+ .irq_unmask = nmk_gpio_irq_unmask,
+ .irq_set_type = nmk_gpio_irq_set_type,
+ .irq_set_wake = nmk_gpio_irq_set_wake,
+ .irq_startup = nmk_gpio_irq_startup,
+ .irq_shutdown = nmk_gpio_irq_shutdown,
+ .irq_print_chip = nmk_gpio_irq_print_chip,
+ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static int nmk_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct nmk_gpio_chip *nmk_chip;
+ struct gpio_irq_chip *girq;
+ bool supports_sleepmode;
+ struct gpio_chip *chip;
+ int irq;
+ int ret;
+
+ nmk_chip = nmk_gpio_populate_chip(dev_fwnode(dev), pdev);
+ if (IS_ERR(nmk_chip)) {
+ dev_err(dev, "could not populate nmk chip struct\n");
+ return PTR_ERR(nmk_chip);
+ }
+
+ supports_sleepmode =
+ device_property_read_bool(dev, "st,supports-sleepmode");
+
+ /* Correct platform device ID */
+ pdev->id = nmk_chip->bank;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ /*
+ * The virt address in nmk_chip->addr is in the nomadik register space,
+ * so we can simply convert the resource address, without remapping
+ */
+ nmk_chip->sleepmode = supports_sleepmode;
+ spin_lock_init(&nmk_chip->lock);
+
+ chip = &nmk_chip->chip;
+ chip->parent = dev;
+ chip->request = gpiochip_generic_request;
+ chip->free = gpiochip_generic_free;
+ chip->get_direction = nmk_gpio_get_dir;
+ chip->direction_input = nmk_gpio_make_input;
+ chip->get = nmk_gpio_get_input;
+ chip->direction_output = nmk_gpio_make_output;
+ chip->set = nmk_gpio_set_output;
+ chip->dbg_show = nmk_gpio_dbg_show;
+ chip->can_sleep = false;
+ chip->owner = THIS_MODULE;
+
+ girq = &chip->irq;
+ gpio_irq_chip_set_chip(girq, &nmk_irq_chip);
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_edge_irq;
+
+ ret = devm_request_irq(dev, irq, nmk_gpio_irq_handler, IRQF_SHARED,
+ dev_name(dev), nmk_chip);
+ if (ret) {
+ dev_err(dev, "failed requesting IRQ\n");
+ return ret;
+ }
+
+ if (!nmk_chip->is_mobileye_soc) {
+ clk_enable(nmk_chip->clk);
+ nmk_chip->lowemi = readl_relaxed(nmk_chip->addr + NMK_GPIO_LOWEMI);
+ clk_disable(nmk_chip->clk);
+ }
+
+ ret = gpiochip_add_data(chip, nmk_chip);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, nmk_chip);
+
+ dev_info(dev, "chip registered\n");
+
+ return 0;
+}
+
+static const struct of_device_id nmk_gpio_match[] = {
+ { .compatible = "st,nomadik-gpio", },
+ { .compatible = "mobileye,eyeq5-gpio", },
+ {}
+};
+
+static struct platform_driver nmk_gpio_driver = {
+ .driver = {
+ .name = "nomadik-gpio",
+ .of_match_table = nmk_gpio_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = nmk_gpio_probe,
+};
+
+static int __init nmk_gpio_init(void)
+{
+ return platform_driver_register(&nmk_gpio_driver);
+}
+subsys_initcall(nmk_gpio_init);
diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
index c18b6b47384f1..94ca9d03c0949 100644
--- a/drivers/gpio/gpio-wcove.c
+++ b/drivers/gpio/gpio-wcove.c
@@ -104,7 +104,7 @@ static inline int to_reg(int gpio, enum ctrl_register type)
unsigned int reg = type == CTRL_IN ? GPIO_IN_CTRL_BASE : GPIO_OUT_CTRL_BASE;
if (gpio >= WCOVE_GPIO_NUM)
- return -EOPNOTSUPP;
+ return -ENOTSUPP;
return reg + gpio;
}
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index f384fa2787648..d09c7d7283655 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -728,6 +728,25 @@ static u32 line_event_id(int level)
GPIO_V2_LINE_EVENT_FALLING_EDGE;
}
+static inline char *make_irq_label(const char *orig)
+{
+ char *new;
+
+ if (!orig)
+ return NULL;
+
+ new = kstrdup_and_replace(orig, '/', ':', GFP_KERNEL);
+ if (!new)
+ return ERR_PTR(-ENOMEM);
+
+ return new;
+}
+
+static inline void free_irq_label(const char *label)
+{
+ kfree(label);
+}
+
#ifdef CONFIG_HTE
static enum hte_return process_hw_ts_thread(void *p)
@@ -1015,6 +1034,7 @@ static int debounce_setup(struct line *line, unsigned int debounce_period_us)
{
unsigned long irqflags;
int ret, level, irq;
+ char *label;
/* try hardware */
ret = gpiod_set_debounce(line->desc, debounce_period_us);
@@ -1037,11 +1057,17 @@ static int debounce_setup(struct line *line, unsigned int debounce_period_us)
if (irq < 0)
return -ENXIO;
+ label = make_irq_label(line->req->label);
+ if (IS_ERR(label))
+ return -ENOMEM;
+
irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
ret = request_irq(irq, debounce_irq_handler, irqflags,
- line->req->label, line);
- if (ret)
+ label, line);
+ if (ret) {
+ free_irq_label(label);
return ret;
+ }
line->irq = irq;
} else {
ret = hte_edge_setup(line, GPIO_V2_LINE_FLAG_EDGE_BOTH);
@@ -1086,7 +1112,7 @@ static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc,
static void edge_detector_stop(struct line *line)
{
if (line->irq) {
- free_irq(line->irq, line);
+ free_irq_label(free_irq(line->irq, line));
line->irq = 0;
}
@@ -1110,6 +1136,7 @@ static int edge_detector_setup(struct line *line,
unsigned long irqflags = 0;
u64 eflags;
int irq, ret;
+ char *label;
eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
if (eflags && !kfifo_initialized(&line->req->events)) {
@@ -1146,11 +1173,17 @@ static int edge_detector_setup(struct line *line,
IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
irqflags |= IRQF_ONESHOT;
+ label = make_irq_label(line->req->label);
+ if (IS_ERR(label))
+ return PTR_ERR(label);
+
/* Request a thread to read the events */
ret = request_threaded_irq(irq, edge_irq_handler, edge_irq_thread,
- irqflags, line->req->label, line);
- if (ret)
+ irqflags, label, line);
+ if (ret) {
+ free_irq_label(label);
return ret;
+ }
line->irq = irq;
return 0;
@@ -1973,7 +2006,7 @@ static void lineevent_free(struct lineevent_state *le)
blocking_notifier_chain_unregister(&le->gdev->device_notifier,
&le->device_unregistered_nb);
if (le->irq)
- free_irq(le->irq, le);
+ free_irq_label(free_irq(le->irq, le));
if (le->desc)
gpiod_free(le->desc);
kfree(le->label);
@@ -2114,6 +2147,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
int fd;
int ret;
int irq, irqflags = 0;
+ char *label;
if (copy_from_user(&eventreq, ip, sizeof(eventreq)))
return -EFAULT;
@@ -2198,15 +2232,23 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
if (ret)
goto out_free_le;
+ label = make_irq_label(le->label);
+ if (IS_ERR(label)) {
+ ret = PTR_ERR(label);
+ goto out_free_le;
+ }
+
/* Request a thread to read the events */
ret = request_threaded_irq(irq,
lineevent_irq_handler,
lineevent_irq_thread,
irqflags,
- le->label,
+ label,
le);
- if (ret)
+ if (ret) {
+ free_irq_label(label);
goto out_free_le;
+ }
le->irq = irq;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index ce94e37bcbee7..94903fc1c1459 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1175,6 +1175,9 @@ struct gpio_device *gpio_device_find(const void *data,
list_for_each_entry_srcu(gdev, &gpio_devices, list,
srcu_read_lock_held(&gpio_devices_srcu)) {
+ if (!device_is_registered(&gdev->dev))
+ continue;
+
guard(srcu)(&gdev->srcu);
gc = srcu_dereference(gdev->chip, &gdev->srcu);
@@ -2397,6 +2400,11 @@ char *gpiochip_dup_line_label(struct gpio_chip *gc, unsigned int offset)
}
EXPORT_SYMBOL_GPL(gpiochip_dup_line_label);
+static inline const char *function_name_or_default(const char *con_id)
+{
+ return con_id ?: "(default)";
+}
+
/**
* gpiochip_request_own_desc - Allow GPIO chip to request its own descriptor
* @gc: GPIO chip
@@ -2425,10 +2433,11 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc,
enum gpiod_flags dflags)
{
struct gpio_desc *desc = gpiochip_get_desc(gc, hwnum);
+ const char *name = function_name_or_default(label);
int ret;
if (IS_ERR(desc)) {
- chip_err(gc, "failed to get GPIO descriptor\n");
+ chip_err(gc, "failed to get GPIO %s descriptor\n", name);
return desc;
}
@@ -2438,8 +2447,8 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc,
ret = gpiod_configure_flags(desc, label, lflags, dflags);
if (ret) {
- chip_err(gc, "setup of own GPIO %s failed\n", label);
gpiod_free_commit(desc);
+ chip_err(gc, "setup of own GPIO %s failed\n", name);
return ERR_PTR(ret);
}
@@ -4153,19 +4162,17 @@ static struct gpio_desc *gpiod_find_by_fwnode(struct fwnode_handle *fwnode,
enum gpiod_flags *flags,
unsigned long *lookupflags)
{
+ const char *name = function_name_or_default(con_id);
struct gpio_desc *desc = ERR_PTR(-ENOENT);
if (is_of_node(fwnode)) {
- dev_dbg(consumer, "using DT '%pfw' for '%s' GPIO lookup\n",
- fwnode, con_id);
+ dev_dbg(consumer, "using DT '%pfw' for '%s' GPIO lookup\n", fwnode, name);
desc = of_find_gpio(to_of_node(fwnode), con_id, idx, lookupflags);
} else if (is_acpi_node(fwnode)) {
- dev_dbg(consumer, "using ACPI '%pfw' for '%s' GPIO lookup\n",
- fwnode, con_id);
+ dev_dbg(consumer, "using ACPI '%pfw' for '%s' GPIO lookup\n", fwnode, name);
desc = acpi_find_gpio(fwnode, con_id, idx, flags, lookupflags);
} else if (is_software_node(fwnode)) {
- dev_dbg(consumer, "using swnode '%pfw' for '%s' GPIO lookup\n",
- fwnode, con_id);
+ dev_dbg(consumer, "using swnode '%pfw' for '%s' GPIO lookup\n", fwnode, name);
desc = swnode_find_gpio(fwnode, con_id, idx, lookupflags);
}
@@ -4181,6 +4188,7 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
bool platform_lookup_allowed)
{
unsigned long lookupflags = GPIO_LOOKUP_FLAGS_DEFAULT;
+ const char *name = function_name_or_default(con_id);
/*
* scoped_guard() is implemented as a for loop, meaning static
* analyzers will complain about these two not being initialized.
@@ -4203,8 +4211,7 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
}
if (IS_ERR(desc)) {
- dev_dbg(consumer, "No GPIO consumer %s found\n",
- con_id);
+ dev_dbg(consumer, "No GPIO consumer %s found\n", name);
return desc;
}
@@ -4226,15 +4233,14 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
*
* FIXME: Make this more sane and safe.
*/
- dev_info(consumer,
- "nonexclusive access to GPIO for %s\n", con_id);
+ dev_info(consumer, "nonexclusive access to GPIO for %s\n", name);
return desc;
}
ret = gpiod_configure_flags(desc, con_id, lookupflags, flags);
if (ret < 0) {
- dev_dbg(consumer, "setup of GPIO %s failed\n", con_id);
gpiod_put(desc);
+ dev_dbg(consumer, "setup of GPIO %s failed\n", name);
return ERR_PTR(ret);
}
@@ -4350,6 +4356,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_optional);
int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
unsigned long lflags, enum gpiod_flags dflags)
{
+ const char *name = function_name_or_default(con_id);
int ret;
if (lflags & GPIO_ACTIVE_LOW)
@@ -4393,7 +4400,7 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
/* No particular flag request, return here... */
if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
- gpiod_dbg(desc, "no flags found for %s\n", con_id);
+ gpiod_dbg(desc, "no flags found for GPIO %s\n", name);
return 0;
}
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 182ed8f678500..5a0c476361c30 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -68,6 +68,7 @@ config DRM_USE_DYNAMIC_DEBUG
config DRM_KUNIT_TEST_HELPERS
tristate
depends on DRM && KUNIT
+ select DRM_KMS_HELPER
help
KUnit Helpers for KMS drivers.
@@ -80,7 +81,6 @@ config DRM_KUNIT_TEST
select DRM_EXEC
select DRM_EXPORT_FOR_TESTS if m
select DRM_GEM_SHMEM_HELPER
- select DRM_KMS_HELPER
select DRM_KUNIT_TEST_HELPERS
select DRM_LIB_RANDOM
select PRIME_NUMBERS
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 9c62552bec344..b3b84647207ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -210,6 +210,7 @@ extern int amdgpu_async_gfx_ring;
extern int amdgpu_mcbp;
extern int amdgpu_discovery;
extern int amdgpu_mes;
+extern int amdgpu_mes_log_enable;
extern int amdgpu_mes_kiq;
extern int amdgpu_noretry;
extern int amdgpu_force_asic_type;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index f5f2945711be0..35dd6effa9a34 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -146,7 +146,7 @@ int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev)
{
int ret;
- if (!adev->kfd.init_complete)
+ if (!adev->kfd.init_complete || adev->kfd.client.dev)
return 0;
ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 14dc9d2d8d53a..df58a6a1a67ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -2869,14 +2869,16 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
mutex_lock(&process_info->lock);
- drm_exec_init(&exec, 0, 0);
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
drm_exec_until_all_locked(&exec) {
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
drm_exec_retry_on_contention(&exec);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ pr_err("Locking VM PD failed, ret: %d\n", ret);
goto ttm_reserve_fail;
+ }
}
/* Reserve all BOs and page tables/directory. Add all BOs from
@@ -2889,8 +2891,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
gobj = &mem->bo->tbo.base;
ret = drm_exec_prepare_obj(&exec, gobj, 1);
drm_exec_retry_on_contention(&exec);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ pr_err("drm_exec_prepare_obj failed, ret: %d\n", ret);
goto ttm_reserve_fail;
+ }
}
}
@@ -2950,8 +2954,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
* validations above would invalidate DMABuf imports again.
*/
ret = process_validate_vms(process_info, &exec.ticket);
- if (ret)
+ if (ret) {
+ pr_debug("Validating VMs failed, ret: %d\n", ret);
goto validate_map_fail;
+ }
/* Update mappings not managed by KFD */
list_for_each_entry(peer_vm, &process_info->vm_list_head,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 0a4b09709cfb1..ec888fc6ead8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -819,7 +819,7 @@ retry:
p->bytes_moved += ctx.bytes_moved;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
- amdgpu_bo_in_cpu_visible_vram(bo))
+ amdgpu_res_cpu_visible(adev, bo->tbo.resource))
p->bytes_moved_vis += ctx.bytes_moved;
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 1e9454e6e4cb4..7753a2e64d411 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4040,10 +4040,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
* early on during init and before calling to RREG32.
*/
adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
- if (!adev->reset_domain) {
- r = -ENOMEM;
- goto unmap_memory;
- }
+ if (!adev->reset_domain)
+ return -ENOMEM;
/* detect hw virtualization here */
amdgpu_detect_virtualization(adev);
@@ -4053,7 +4051,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_device_get_job_timeout_settings(adev);
if (r) {
dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
- goto unmap_memory;
+ return r;
}
amdgpu_device_set_mcbp(adev);
@@ -4061,12 +4059,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* early init functions */
r = amdgpu_device_ip_early_init(adev);
if (r)
- goto unmap_memory;
+ return r;
/* Get rid of things like offb */
r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
if (r)
- goto unmap_memory;
+ return r;
/* Enable TMZ based on IP_VERSION */
amdgpu_gmc_tmz_set(adev);
@@ -4076,7 +4074,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (adev->gmc.xgmi.supported) {
r = adev->gfxhub.funcs->get_xgmi_info(adev);
if (r)
- goto unmap_memory;
+ return r;
}
/* enable PCIE atomic ops */
@@ -4137,18 +4135,22 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->ip_blocks[i].status.hw = true;
}
}
+ } else if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
+ !amdgpu_device_has_display_hardware(adev)) {
+ r = psp_gpu_reset(adev);
} else {
- tmp = amdgpu_reset_method;
- /* It should do a default reset when loading or reloading the driver,
- * regardless of the module parameter reset_method.
- */
- amdgpu_reset_method = AMD_RESET_METHOD_NONE;
- r = amdgpu_asic_reset(adev);
- amdgpu_reset_method = tmp;
- if (r) {
- dev_err(adev->dev, "asic reset on init failed\n");
- goto failed;
- }
+ tmp = amdgpu_reset_method;
+ /* It should do a default reset when loading or reloading the driver,
+ * regardless of the module parameter reset_method.
+ */
+ amdgpu_reset_method = AMD_RESET_METHOD_NONE;
+ r = amdgpu_asic_reset(adev);
+ amdgpu_reset_method = tmp;
+ }
+
+ if (r) {
+ dev_err(adev->dev, "asic reset on init failed\n");
+ goto failed;
}
}
@@ -4345,8 +4347,6 @@ release_ras_con:
failed:
amdgpu_vf_error_trans_all(adev);
-unmap_memory:
- iounmap(adev->rmmio);
return r;
}
@@ -4543,6 +4543,8 @@ int amdgpu_device_prepare(struct drm_device *dev)
if (r)
goto unprepare;
+ flush_delayed_work(&adev->gfx.gfx_off_delay_work);
+
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index a07e4b87d4cae..ac5bf01fe8d2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -1896,6 +1896,7 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
break;
case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
break;
default:
@@ -2237,6 +2238,7 @@ static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)
{
switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
case IP_VERSION(4, 0, 5):
+ case IP_VERSION(4, 0, 6):
if (amdgpu_umsch_mm & 0x1) {
amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);
adev->enable_umsch_mm = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 15b188aaf6818..e4277298cf1aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -195,6 +195,7 @@ int amdgpu_async_gfx_ring = 1;
int amdgpu_mcbp = -1;
int amdgpu_discovery = -1;
int amdgpu_mes;
+int amdgpu_mes_log_enable = 0;
int amdgpu_mes_kiq;
int amdgpu_noretry = -1;
int amdgpu_force_asic_type = -1;
@@ -668,6 +669,15 @@ MODULE_PARM_DESC(mes,
module_param_named(mes, amdgpu_mes, int, 0444);
/**
+ * DOC: mes_log_enable (int)
+ * Enable Micro Engine Scheduler log. This is used to enable/disable MES internal log.
+ * (0 = disabled (default), 1 = enabled)
+ */
+MODULE_PARM_DESC(mes_log_enable,
+ "Enable Micro Engine Scheduler log (0 = disabled (default), 1 = enabled)");
+module_param_named(mes_log_enable, amdgpu_mes_log_enable, int, 0444);
+
+/**
* DOC: mes_kiq (int)
* Enable Micro Engine Scheduler KIQ. This is a new engine pipe for kiq.
* (0 = disabled (default), 1 = enabled)
@@ -2479,8 +2489,11 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
}
for (i = 0; i < mgpu_info.num_dgpu; i++) {
adev = mgpu_info.gpu_ins[i].adev;
- if (!adev->kfd.init_complete)
+ if (!adev->kfd.init_complete) {
+ kgd2kfd_init_zone_device(adev);
amdgpu_amdkfd_device_init(adev);
+ amdgpu_amdkfd_drm_client_create(adev);
+ }
amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index f8b48fd93108c..55d5508987ffe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -687,7 +687,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock);
if (r)
- DRM_ERROR("KCQ enable failed\n");
+ DRM_ERROR("KGQ enable failed\n");
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
index 55b65fc04b651..431ec72655ec8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
@@ -129,13 +129,25 @@ static const struct mmu_interval_notifier_ops amdgpu_hmm_hsa_ops = {
*/
int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr)
{
+ int r;
+
if (bo->kfd_bo)
- return mmu_interval_notifier_insert(&bo->notifier, current->mm,
+ r = mmu_interval_notifier_insert(&bo->notifier, current->mm,
addr, amdgpu_bo_size(bo),
&amdgpu_hmm_hsa_ops);
- return mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
- amdgpu_bo_size(bo),
- &amdgpu_hmm_gfx_ops);
+ else
+ r = mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
+ amdgpu_bo_size(bo),
+ &amdgpu_hmm_gfx_ops);
+ if (r)
+ /*
+ * Make sure amdgpu_hmm_unregister() doesn't call
+ * mmu_interval_notifier_remove() when the notifier isn't properly
+ * initialized.
+ */
+ bo->notifier.mm = NULL;
+
+ return r;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 4b3000c21ef2c..e4742b65032d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -304,12 +304,15 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
dma_fence_set_error(finished, -ECANCELED);
if (finished->error < 0) {
- DRM_INFO("Skip scheduling IBs!\n");
+ dev_dbg(adev->dev, "Skip scheduling IBs in ring(%s)",
+ ring->name);
} else {
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
&fence);
if (r)
- DRM_ERROR("Error scheduling IBs (%d)\n", r);
+ dev_err(adev->dev,
+ "Error scheduling IBs (%d) in ring(%s)", r,
+ ring->name);
}
job->job_run_counter++;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index a98e03e0a51f1..a00cf4756ad0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -102,7 +102,10 @@ static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
{
int r;
- r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
+ if (!amdgpu_mes_log_enable)
+ return 0;
+
+ r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_LOG_BUFFER_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&adev->mes.event_log_gpu_obj,
&adev->mes.event_log_gpu_addr,
@@ -1549,12 +1552,11 @@ static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
- mem, PAGE_SIZE, false);
+ mem, AMDGPU_MES_LOG_BUFFER_SIZE, false);
return 0;
}
-
DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
#endif
@@ -1565,7 +1567,7 @@ void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
#if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *root = minor->debugfs_root;
- if (adev->enable_mes)
+ if (adev->enable_mes && amdgpu_mes_log_enable)
debugfs_create_file("amdgpu_mes_event_log", 0444, root,
adev, &amdgpu_debugfs_mes_event_log_fops);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index 7d4f93fea937a..4c8fc3117ef89 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -52,6 +52,7 @@ enum amdgpu_mes_priority_level {
#define AMDGPU_MES_PROC_CTX_SIZE 0x1000 /* one page area */
#define AMDGPU_MES_GANG_CTX_SIZE 0x1000 /* one page area */
+#define AMDGPU_MES_LOG_BUFFER_SIZE 0x4000 /* Maximu log buffer size for MES */
struct amdgpu_mes_funcs;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 010b0cb7693c9..2099159a693fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -617,8 +617,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
return r;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
- bo->tbo.resource->mem_type == TTM_PL_VRAM &&
- amdgpu_bo_in_cpu_visible_vram(bo))
+ amdgpu_res_cpu_visible(adev, bo->tbo.resource))
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
ctx.bytes_moved);
else
@@ -1272,23 +1271,25 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict)
void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
struct amdgpu_mem_stats *stats)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_resource *res = bo->tbo.resource;
uint64_t size = amdgpu_bo_size(bo);
struct drm_gem_object *obj;
unsigned int domain;
bool shared;
/* Abort if the BO doesn't currently have a backing store */
- if (!bo->tbo.resource)
+ if (!res)
return;
obj = &bo->tbo.base;
shared = drm_gem_object_is_shared_for_memory_stats(obj);
- domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
+ domain = amdgpu_mem_type_to_domain(res->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
stats->vram += size;
- if (amdgpu_bo_in_cpu_visible_vram(bo))
+ if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
stats->visible_vram += size;
if (shared)
stats->vram_shared += size;
@@ -1389,10 +1390,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* Remember that this BO was accessed by the CPU */
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- if (bo->resource->mem_type != TTM_PL_VRAM)
- return 0;
-
- if (amdgpu_bo_in_cpu_visible_vram(abo))
+ if (amdgpu_res_cpu_visible(adev, bo->resource))
return 0;
/* Can't move a pinned BO to visible VRAM */
@@ -1415,7 +1413,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* this should never happen */
if (bo->resource->mem_type == TTM_PL_VRAM &&
- !amdgpu_bo_in_cpu_visible_vram(abo))
+ !amdgpu_res_cpu_visible(adev, bo->resource))
return VM_FAULT_SIGBUS;
ttm_bo_move_to_lru_tail_unlocked(bo);
@@ -1579,6 +1577,7 @@ uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
*/
u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct dma_buf_attachment *attachment;
struct dma_buf *dma_buf;
const char *placement;
@@ -1587,10 +1586,11 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
if (dma_resv_trylock(bo->tbo.base.resv)) {
unsigned int domain;
+
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
- if (amdgpu_bo_in_cpu_visible_vram(bo))
+ if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
placement = "VRAM VISIBLE";
else
placement = "VRAM";
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index be679c42b0b8c..fa03d9e4874cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -251,28 +251,6 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
}
/**
- * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
- */
-static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
-{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct amdgpu_res_cursor cursor;
-
- if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM)
- return false;
-
- amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
- while (cursor.remaining) {
- if (cursor.start < adev->gmc.visible_vram_size)
- return true;
-
- amdgpu_res_next(&cursor, cursor.size);
- }
-
- return false;
-}
-
-/**
* amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
*/
static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 3c2b1413058bb..94b310fdb719d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -1830,6 +1830,10 @@ static int psp_hdcp_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
+ /* bypass hdcp initialization if dmu is harvested */
+ if (!amdgpu_device_has_display_hardware(psp->adev))
+ return 0;
+
if (!psp->hdcp_context.context.bin_desc.size_bytes ||
!psp->hdcp_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
@@ -1862,6 +1866,9 @@ int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
if (amdgpu_sriov_vf(psp->adev))
return 0;
+ if (!psp->hdcp_context.context.initialized)
+ return 0;
+
return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
}
@@ -1897,6 +1904,10 @@ static int psp_dtm_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
+ /* bypass dtm initialization if dmu is harvested */
+ if (!amdgpu_device_has_display_hardware(psp->adev))
+ return 0;
+
if (!psp->dtm_context.context.bin_desc.size_bytes ||
!psp->dtm_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
@@ -1929,6 +1940,9 @@ int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
if (amdgpu_sriov_vf(psp->adev))
return 0;
+ if (!psp->dtm_context.context.initialized)
+ return 0;
+
return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
}
@@ -2063,6 +2077,10 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
+ /* bypass securedisplay initialization if dmu is harvested */
+ if (!amdgpu_device_has_display_hardware(psp->adev))
+ return 0;
+
if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
!psp->securedisplay_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 5505d646f43aa..06f0a6534a94f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -524,46 +524,58 @@ static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf,
{
struct amdgpu_ring *ring = file_inode(f)->i_private;
volatile u32 *mqd;
- int r;
+ u32 *kbuf;
+ int r, i;
uint32_t value, result;
if (*pos & 3 || size & 3)
return -EINVAL;
- result = 0;
+ kbuf = kmalloc(ring->mqd_size, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0))
- return r;
+ goto err_free;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);
- if (r) {
- amdgpu_bo_unreserve(ring->mqd_obj);
- return r;
- }
+ if (r)
+ goto err_unreserve;
+ /*
+ * Copy to local buffer to avoid put_user(), which might fault
+ * and acquire mmap_sem, under reservation_ww_class_mutex.
+ */
+ for (i = 0; i < ring->mqd_size/sizeof(u32); i++)
+ kbuf[i] = mqd[i];
+
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ amdgpu_bo_unreserve(ring->mqd_obj);
+
+ result = 0;
while (size) {
if (*pos >= ring->mqd_size)
- goto done;
+ break;
- value = mqd[*pos/4];
+ value = kbuf[*pos/4];
r = put_user(value, (uint32_t *)buf);
if (r)
- goto done;
+ goto err_free;
buf += 4;
result += 4;
size -= 4;
*pos += 4;
}
-done:
- amdgpu_bo_kunmap(ring->mqd_obj);
- mqd = NULL;
- amdgpu_bo_unreserve(ring->mqd_obj);
- if (r)
- return r;
-
+ kfree(kbuf);
return result;
+
+err_unreserve:
+ amdgpu_bo_unreserve(ring->mqd_obj);
+err_free:
+ kfree(kbuf);
+ return r;
}
static const struct file_operations amdgpu_debugfs_mqd_fops = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 8722beba494e5..1d71729e3f6bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -133,7 +133,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
} else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
- amdgpu_bo_in_cpu_visible_vram(abo)) {
+ amdgpu_res_cpu_visible(adev, bo->resource)) {
/* Try evicting to the CPU inaccessible part of VRAM
* first, but only set GTT as busy placement, so this
@@ -403,40 +403,55 @@ error:
return r;
}
-/*
- * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
+/**
+ * amdgpu_res_cpu_visible - Check that resource can be accessed by CPU
+ * @adev: amdgpu device
+ * @res: the resource to check
*
- * Called by amdgpu_bo_move()
+ * Returns: true if the full resource is CPU visible, false otherwise.
*/
-static bool amdgpu_mem_visible(struct amdgpu_device *adev,
- struct ttm_resource *mem)
+bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
+ struct ttm_resource *res)
{
- u64 mem_size = (u64)mem->size;
struct amdgpu_res_cursor cursor;
- u64 end;
- if (mem->mem_type == TTM_PL_SYSTEM ||
- mem->mem_type == TTM_PL_TT)
+ if (!res)
+ return false;
+
+ if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
+ res->mem_type == AMDGPU_PL_PREEMPT)
return true;
- if (mem->mem_type != TTM_PL_VRAM)
+
+ if (res->mem_type != TTM_PL_VRAM)
return false;
- amdgpu_res_first(mem, 0, mem_size, &cursor);
- end = cursor.start + cursor.size;
+ amdgpu_res_first(res, 0, res->size, &cursor);
while (cursor.remaining) {
+ if ((cursor.start + cursor.size) >= adev->gmc.visible_vram_size)
+ return false;
amdgpu_res_next(&cursor, cursor.size);
+ }
- if (!cursor.remaining)
- break;
+ return true;
+}
- /* ttm_resource_ioremap only supports contiguous memory */
- if (end != cursor.start)
- return false;
+/*
+ * amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy
+ *
+ * Called by amdgpu_bo_move()
+ */
+static bool amdgpu_res_copyable(struct amdgpu_device *adev,
+ struct ttm_resource *mem)
+{
+ if (!amdgpu_res_cpu_visible(adev, mem))
+ return false;
- end = cursor.start + cursor.size;
- }
+ /* ttm_resource_ioremap only supports contiguous memory */
+ if (mem->mem_type == TTM_PL_VRAM &&
+ !(mem->placement & TTM_PL_FLAG_CONTIGUOUS))
+ return false;
- return end <= adev->gmc.visible_vram_size;
+ return true;
}
/*
@@ -529,8 +544,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
if (r) {
/* Check that all memory is CPU accessible */
- if (!amdgpu_mem_visible(adev, old_mem) ||
- !amdgpu_mem_visible(adev, new_mem)) {
+ if (!amdgpu_res_copyable(adev, old_mem) ||
+ !amdgpu_res_copyable(adev, new_mem)) {
pr_err("Move buffer fallback to memcpy unavailable\n");
return r;
}
@@ -557,7 +572,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- size_t bus_size = (size_t)mem->size;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
@@ -568,9 +582,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
break;
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
- /* check if it's visible */
- if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
- return -EINVAL;
if (adev->mman.aper_base_kaddr &&
mem->placement & TTM_PL_FLAG_CONTIGUOUS)
@@ -864,6 +875,7 @@ static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
gtt->ttm.dma_address, flags);
}
+ gtt->bound = true;
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 65ec82141a8e0..32cf6b6f6efd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -139,6 +139,9 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
uint64_t start);
+bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
+ struct ttm_resource *res);
+
int amdgpu_ttm_init(struct amdgpu_device *adev);
void amdgpu_ttm_fini(struct amdgpu_device *adev);
void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
index ab820cf526683..0df97c3e3a700 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
@@ -189,10 +189,13 @@ static void setup_vpe_queue(struct amdgpu_device *adev,
mqd->rptr_val = 0;
mqd->unmapped = 1;
+ if (adev->vpe.collaborate_mode)
+ memcpy(++mqd, test->mqd_data_cpu_addr, sizeof(struct MQD_INFO));
+
qinfo->mqd_addr = test->mqd_data_gpu_addr;
qinfo->csa_addr = test->ctx_data_gpu_addr +
offsetof(struct umsch_mm_test_ctx_data, vpe_ctx_csa);
- qinfo->doorbell_offset_0 = (adev->doorbell_index.vpe_ring + 1) << 1;
+ qinfo->doorbell_offset_0 = 0;
qinfo->doorbell_offset_1 = 0;
}
@@ -287,7 +290,10 @@ static int submit_vpe_queue(struct amdgpu_device *adev, struct umsch_mm_test *te
ring[5] = 0;
mqd->wptr_val = (6 << 2);
- // WDOORBELL32(adev->umsch_mm.agdb_index[CONTEXT_PRIORITY_LEVEL_NORMAL], mqd->wptr_val);
+ if (adev->vpe.collaborate_mode)
+ (++mqd)->wptr_val = (6 << 2);
+
+ WDOORBELL32(adev->umsch_mm.agdb_index[CONTEXT_PRIORITY_LEVEL_NORMAL], mqd->wptr_val);
for (i = 0; i < adev->usec_timeout; i++) {
if (*fence == test_pattern)
@@ -571,6 +577,7 @@ int amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm *umsch)
switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
case IP_VERSION(4, 0, 5):
+ case IP_VERSION(4, 0, 6):
fw_name = "amdgpu/umsch_mm_4_0_0.bin";
break;
default:
@@ -750,6 +757,7 @@ static int umsch_mm_early_init(void *handle)
switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
case IP_VERSION(4, 0, 5):
+ case IP_VERSION(4, 0, 6):
umsch_mm_v4_0_set_funcs(&adev->umsch_mm);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h
index 8258a43a6236c..5014b5af95fd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h
@@ -33,13 +33,6 @@ enum UMSCH_SWIP_ENGINE_TYPE {
UMSCH_SWIP_ENGINE_TYPE_MAX
};
-enum UMSCH_SWIP_AFFINITY_TYPE {
- UMSCH_SWIP_AFFINITY_TYPE_ANY = 0,
- UMSCH_SWIP_AFFINITY_TYPE_VCN0 = 1,
- UMSCH_SWIP_AFFINITY_TYPE_VCN1 = 2,
- UMSCH_SWIP_AFFINITY_TYPE_MAX
-};
-
enum UMSCH_CONTEXT_PRIORITY_LEVEL {
CONTEXT_PRIORITY_LEVEL_IDLE = 0,
CONTEXT_PRIORITY_LEVEL_NORMAL = 1,
@@ -51,13 +44,15 @@ enum UMSCH_CONTEXT_PRIORITY_LEVEL {
struct umsch_mm_set_resource_input {
uint32_t vmid_mask_mm_vcn;
uint32_t vmid_mask_mm_vpe;
+ uint32_t collaboration_mask_vpe;
uint32_t logging_vmid;
uint32_t engine_mask;
union {
struct {
uint32_t disable_reset : 1;
uint32_t disable_umsch_mm_log : 1;
- uint32_t reserved : 30;
+ uint32_t use_rs64mem_for_proc_ctx_csa : 1;
+ uint32_t reserved : 29;
};
uint32_t uint32_all;
};
@@ -78,15 +73,18 @@ struct umsch_mm_add_queue_input {
uint32_t doorbell_offset_1;
enum UMSCH_SWIP_ENGINE_TYPE engine_type;
uint32_t affinity;
- enum UMSCH_SWIP_AFFINITY_TYPE affinity_type;
uint64_t mqd_addr;
uint64_t h_context;
uint64_t h_queue;
uint32_t vm_context_cntl;
+ uint32_t process_csa_array_index;
+ uint32_t context_csa_array_index;
+
struct {
uint32_t is_context_suspended : 1;
- uint32_t reserved : 31;
+ uint32_t collaboration_mode : 1;
+ uint32_t reserved : 30;
};
};
@@ -94,6 +92,7 @@ struct umsch_mm_remove_queue_input {
uint32_t doorbell_offset_0;
uint32_t doorbell_offset_1;
uint64_t context_csa_addr;
+ uint32_t context_csa_array_index;
};
struct MQD_INFO {
@@ -103,6 +102,7 @@ struct MQD_INFO {
uint32_t wptr_val;
uint32_t rptr_val;
uint32_t unmapped;
+ uint32_t vmid;
};
struct amdgpu_umsch_mm;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index b2535023764f4..9c514a606a2f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -60,6 +60,7 @@
#define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin"
#define FIRMWARE_VCN4_0_5 "amdgpu/vcn_4_0_5.bin"
#define FIRMWARE_VCN4_0_6 "amdgpu/vcn_4_0_6.bin"
+#define FIRMWARE_VCN4_0_6_1 "amdgpu/vcn_4_0_6_1.bin"
#define FIRMWARE_VCN5_0_0 "amdgpu/vcn_5_0_0.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN);
@@ -85,6 +86,7 @@ MODULE_FIRMWARE(FIRMWARE_VCN4_0_3);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_5);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_6);
+MODULE_FIRMWARE(FIRMWARE_VCN4_0_6_1);
MODULE_FIRMWARE(FIRMWARE_VCN5_0_0);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
@@ -93,14 +95,22 @@ int amdgpu_vcn_early_init(struct amdgpu_device *adev)
{
char ucode_prefix[30];
char fw_name[40];
- int r;
+ int r, i;
- amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
- r = amdgpu_ucode_request(adev, &adev->vcn.fw, fw_name);
- if (r)
- amdgpu_ucode_release(&adev->vcn.fw);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
+ if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 6) &&
+ i == 1) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_%d.bin", ucode_prefix, i);
+ }
+ r = amdgpu_ucode_request(adev, &adev->vcn.fw[i], fw_name);
+ if (r) {
+ amdgpu_ucode_release(&adev->vcn.fw[i]);
+ return r;
+ }
+ }
return r;
}
@@ -141,7 +151,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
}
}
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[0]->data;
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
/* Bit 20-23, it is encode major and non-zero for new naming convention.
@@ -256,9 +266,10 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
+
+ amdgpu_ucode_release(&adev->vcn.fw[j]);
}
- amdgpu_ucode_release(&adev->vcn.fw);
mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
mutex_destroy(&adev->vcn.vcn_pg_lock);
@@ -354,11 +365,12 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
const struct common_firmware_header *hdr;
unsigned int offset;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
- memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
+ memcpy_toio(adev->vcn.inst[i].cpu_addr,
+ adev->vcn.fw[i]->data + offset,
le32_to_cpu(hdr->ucode_size_bytes));
drm_dev_exit(idx);
}
@@ -1043,11 +1055,11 @@ void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
-
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (adev->vcn.harvest_config & (1 << i))
continue;
+
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data;
/* currently only support 2 FW instances */
if (i >= 2) {
dev_info(adev->dev, "More then 2 VCN FW instances!\n");
@@ -1055,7 +1067,7 @@ void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
}
idx = AMDGPU_UCODE_ID_VCN + i;
adev->firmware.ucode[idx].ucode_id = idx;
- adev->firmware.ucode[idx].fw = adev->vcn.fw;
+ adev->firmware.ucode[idx].fw = adev->vcn.fw[i];
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 1985f71b4373b..a418393d89ec9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -306,7 +306,7 @@ struct amdgpu_vcn_ras {
struct amdgpu_vcn {
unsigned fw_version;
struct delayed_work idle_work;
- const struct firmware *fw; /* VCN firmware */
+ const struct firmware *fw[AMDGPU_MAX_VCN_INSTANCES]; /* VCN firmware */
unsigned num_enc_rings;
enum amd_powergating_state cur_state;
bool indirect_sram;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 4299ce386322e..94089069c9ada 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1613,6 +1613,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
trace_amdgpu_vm_bo_map(bo_va, mapping);
}
+/* Validate operation parameters to prevent potential abuse */
+static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo,
+ uint64_t saddr,
+ uint64_t offset,
+ uint64_t size)
+{
+ uint64_t tmp, lpfn;
+
+ if (saddr & AMDGPU_GPU_PAGE_MASK
+ || offset & AMDGPU_GPU_PAGE_MASK
+ || size & AMDGPU_GPU_PAGE_MASK)
+ return -EINVAL;
+
+ if (check_add_overflow(saddr, size, &tmp)
+ || check_add_overflow(offset, size, &tmp)
+ || size == 0 /* which also leads to end < begin */)
+ return -EINVAL;
+
+ /* make sure object fit at this offset */
+ if (bo && offset + size > amdgpu_bo_size(bo))
+ return -EINVAL;
+
+ /* Ensure last pfn not exceed max_pfn */
+ lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
+ if (lpfn >= adev->vm_manager.max_pfn)
+ return -EINVAL;
+
+ return 0;
+}
+
/**
* amdgpu_vm_bo_map - map bo inside a vm
*
@@ -1639,21 +1670,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm;
uint64_t eaddr;
+ int r;
- /* validate the parameters */
- if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
- return -EINVAL;
- if (saddr + size <= saddr || offset + size <= offset)
- return -EINVAL;
-
- /* make sure object fit at this offset */
- eaddr = saddr + size - 1;
- if ((bo && offset + size > amdgpu_bo_size(bo)) ||
- (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
- return -EINVAL;
+ r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
+ if (r)
+ return r;
saddr /= AMDGPU_GPU_PAGE_SIZE;
- eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
if (tmp) {
@@ -1706,17 +1730,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
uint64_t eaddr;
int r;
- /* validate the parameters */
- if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
- return -EINVAL;
- if (saddr + size <= saddr || offset + size <= offset)
- return -EINVAL;
-
- /* make sure object fit at this offset */
- eaddr = saddr + size - 1;
- if ((bo && offset + size > amdgpu_bo_size(bo)) ||
- (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
- return -EINVAL;
+ r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
+ if (r)
+ return r;
/* Allocate all the needed memory */
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
@@ -1730,7 +1746,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
}
saddr /= AMDGPU_GPU_PAGE_SIZE;
- eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
mapping->start = saddr;
mapping->last = eaddr;
@@ -1817,10 +1833,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
LIST_HEAD(removed);
uint64_t eaddr;
+ int r;
+
+ r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
+ if (r)
+ return r;
- eaddr = saddr + size - 1;
saddr /= AMDGPU_GPU_PAGE_SIZE;
- eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
/* Allocate all the needed memory */
before = kzalloc(sizeof(*before), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
index 70c5cc80ecdc0..6695481f870f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -396,6 +396,12 @@ static int vpe_hw_init(void *handle)
struct amdgpu_vpe *vpe = &adev->vpe;
int ret;
+ /* Power on VPE */
+ ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE,
+ AMD_PG_STATE_UNGATE);
+ if (ret)
+ return ret;
+
ret = vpe_load_microcode(vpe);
if (ret)
return ret;
@@ -575,9 +581,6 @@ static unsigned int vpe_ring_init_cond_exec(struct amdgpu_ring *ring,
{
unsigned int ret;
- if (ring->adev->vpe.collaborate_mode)
- return ~0;
-
amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_COND_EXE, 0));
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index d6f808acfb17b..fbb43ae7624f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -62,6 +62,11 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
}
+static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev)
+{
+ return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst);
+}
+
static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
uint32_t inst_idx, struct amdgpu_ring *ring)
{
@@ -87,7 +92,7 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
case AMDGPU_RING_TYPE_VCN_ENC:
case AMDGPU_RING_TYPE_VCN_JPEG:
ip_blk = AMDGPU_XCP_VCN;
- if (adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
+ if (aqua_vanjaram_xcp_vcn_shared(adev))
inst_mask = 1 << (inst_idx * 2);
break;
default:
@@ -140,10 +145,12 @@ static int aqua_vanjaram_xcp_sched_list_update(
aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
- /* VCN is shared by two partitions under CPX MODE */
+ /* VCN may be shared by two partitions under CPX MODE in certain
+ * configs.
+ */
if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
- ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
- adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
+ aqua_vanjaram_xcp_vcn_shared(adev))
aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 904b9ff5ead2f..f90905ef32c76 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -3657,6 +3657,9 @@ static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev)
static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
{
+ if (amdgpu_sriov_vf(adev))
+ return;
+
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(10, 1, 10):
soc15_program_register_sequence(adev,
@@ -4982,7 +4985,8 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
u32 tmp;
int i;
- WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
+ if (!amdgpu_sriov_vf(adev))
+ WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
gfx_v10_0_setup_rb(adev);
gfx_v10_0_get_cu_info(adev, &adev->gfx.cu_info);
@@ -7163,7 +7167,7 @@ static int gfx_v10_0_hw_init(void *handle)
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0))
gfx_v10_3_program_pbb_mode(adev);
- if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0) && !amdgpu_sriov_vf(adev))
gfx_v10_3_set_power_brake_sequence(adev);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 1770e496c1b7c..f7325b02a191f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -1635,7 +1635,7 @@ static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
}
- active_rb_bitmap |= global_active_rb_bitmap;
+ active_rb_bitmap &= global_active_rb_bitmap;
adev->gfx.config.backend_enable_mask = active_rb_bitmap;
adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
}
@@ -5465,6 +5465,7 @@ static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
/* Make sure that we can't skip the SET_Q_MODE packets when the VM
* changed in any way.
*/
+ ring->set_q_mode_offs = 0;
ring->set_q_mode_ptr = NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
index cd0e8a321e460..17509f32f61a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
@@ -155,6 +155,9 @@ static void gfxhub_v2_1_init_system_aperture_regs(struct amdgpu_device *adev)
{
uint64_t value;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
/* Program the AGP BAR */
WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0);
WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
index 16fe428c0722d..7aed96fa10a9d 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
@@ -418,6 +418,12 @@ static u32 ih_v7_0_get_wptr(struct amdgpu_device *adev,
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 072c478665ade..63f281a9984d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -411,8 +411,11 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
mes_set_hw_res_pkt.enable_reg_active_poll = 1;
mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
mes_set_hw_res_pkt.oversubscription_timer = 50;
- mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
- mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr;
+ if (amdgpu_mes_log_enable) {
+ mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
+ mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr =
+ mes->event_log_gpu_addr;
+ }
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
index b3961968c10c4..238ea40c24500 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
@@ -99,16 +99,15 @@ mmhub_v3_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(3, 3, 0):
case IP_VERSION(3, 3, 1):
- mmhub_cid = mmhub_client_ids_v3_3[cid][rw];
+ mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3) ?
+ mmhub_client_ids_v3_3[cid][rw] :
+ cid == 0x140 ? "UMSCH" : NULL;
break;
default:
mmhub_cid = NULL;
break;
}
- if (!mmhub_cid && cid == 0x140)
- mmhub_cid = "UMSCH";
-
dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
mmhub_cid ? mmhub_cid : "unknown", cid);
dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index eaa4f5f499491..82eab49be82bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -431,16 +431,11 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 doorbell_offset, doorbell;
u32 rb_cntl, ib_cntl;
- int i, unset = 0;
+ int i;
for_each_inst(i, inst_mask) {
sdma[i] = &adev->sdma.instance[i].ring;
- if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) {
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
- unset = 1;
- }
-
rb_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
@@ -487,20 +482,10 @@ static void sdma_v4_4_2_inst_rlc_stop(struct amdgpu_device *adev,
static void sdma_v4_4_2_inst_page_stop(struct amdgpu_device *adev,
uint32_t inst_mask)
{
- struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 rb_cntl, ib_cntl;
int i;
- bool unset = false;
for_each_inst(i, inst_mask) {
- sdma[i] = &adev->sdma.instance[i].page;
-
- if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
- (!unset)) {
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
- unset = true;
- }
-
rb_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL,
RB_ENABLE, 0);
@@ -612,7 +597,7 @@ static uint32_t sdma_v4_4_2_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
/* Set ring buffer size in dwords */
uint32_t rb_bufsz = order_base_2(ring->ring_size / 4);
- barrier(); /* work around https://bugs.llvm.org/show_bug.cgi?id=42576 */
+ barrier(); /* work around https://llvm.org/pr42576 */
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
#ifdef __BIG_ENDIAN
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
@@ -950,13 +935,7 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
r = amdgpu_ring_test_helper(page);
if (r)
return r;
-
- if (adev->mman.buffer_funcs_ring == page)
- amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
-
- if (adev->mman.buffer_funcs_ring == ring)
- amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
return r;
@@ -1623,19 +1602,9 @@ static int sdma_v4_4_2_set_ecc_irq_state(struct amdgpu_device *adev,
u32 sdma_cntl;
sdma_cntl = RREG32_SDMA(type, regSDMA_CNTL);
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL,
- DRAM_ECC_INT_ENABLE, 0);
- WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
- break;
- /* sdma ecc interrupt is enabled by default
- * driver doesn't need to do anything to
- * enable the interrupt */
- case AMDGPU_IRQ_STATE_ENABLE:
- default:
- break;
- }
+ sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, DRAM_ECC_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 581a3bd11481c..43ca63fe85ac3 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -457,10 +457,8 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev)
{
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
- return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
- return false;
default:
return true;
}
@@ -722,7 +720,10 @@ static int soc21_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_GFX_PG;
- adev->external_rev_id = adev->rev_id + 0x1;
+ if (adev->rev_id == 0)
+ adev->external_rev_id = 0x1;
+ else
+ adev->external_rev_id = adev->rev_id + 0x10;
break;
case IP_VERSION(11, 5, 1):
adev->cg_flags =
@@ -869,10 +870,35 @@ static int soc21_common_suspend(void *handle)
return soc21_common_hw_fini(adev);
}
+static bool soc21_need_reset_on_resume(struct amdgpu_device *adev)
+{
+ u32 sol_reg1, sol_reg2;
+
+ /* Will reset for the following suspend abort cases.
+ * 1) Only reset dGPU side.
+ * 2) S3 suspend got aborted and TOS is active.
+ */
+ if (!(adev->flags & AMD_IS_APU) && adev->in_s3 &&
+ !adev->suspend_complete) {
+ sol_reg1 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
+ msleep(100);
+ sol_reg2 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
+
+ return (sol_reg1 != sol_reg2);
+ }
+
+ return false;
+}
+
static int soc21_common_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (soc21_need_reset_on_resume(adev)) {
+ dev_info(adev->dev, "S3 suspend aborted, resetting...");
+ soc21_asic_reset(adev);
+ }
+
return soc21_common_hw_init(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c b/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c
index 8e7b763cfdb7e..bd57896ab85d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c
@@ -60,7 +60,7 @@ static int umsch_mm_v4_0_load_microcode(struct amdgpu_umsch_mm *umsch)
umsch->cmd_buf_curr_ptr = umsch->cmd_buf_ptr;
- if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 5)) {
+ if (amdgpu_ip_version(adev, VCN_HWIP, 0) >= IP_VERSION(4, 0, 5)) {
WREG32_SOC15(VCN, 0, regUVD_IPX_DLDO_CONFIG,
1 << UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG__SHIFT);
SOC15_WAIT_ON_RREG(VCN, 0, regUVD_IPX_DLDO_STATUS,
@@ -225,6 +225,8 @@ static int umsch_mm_v4_0_ring_start(struct amdgpu_umsch_mm *umsch)
WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_SIZE, ring->ring_size);
+ ring->wptr = 0;
+
data = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE);
data &= ~(VCN_RB_ENABLE__AUDIO_RB_EN_MASK);
WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, data);
@@ -248,7 +250,7 @@ static int umsch_mm_v4_0_ring_stop(struct amdgpu_umsch_mm *umsch)
data = REG_SET_FIELD(data, VCN_UMSCH_RB_DB_CTRL, EN, 0);
WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_DB_CTRL, data);
- if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 5)) {
+ if (amdgpu_ip_version(adev, VCN_HWIP, 0) >= IP_VERSION(4, 0, 5)) {
WREG32_SOC15(VCN, 0, regUVD_IPX_DLDO_CONFIG,
2 << UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG__SHIFT);
SOC15_WAIT_ON_RREG(VCN, 0, regUVD_IPX_DLDO_STATUS,
@@ -271,6 +273,8 @@ static int umsch_mm_v4_0_set_hw_resources(struct amdgpu_umsch_mm *umsch)
set_hw_resources.vmid_mask_mm_vcn = umsch->vmid_mask_mm_vcn;
set_hw_resources.vmid_mask_mm_vpe = umsch->vmid_mask_mm_vpe;
+ set_hw_resources.collaboration_mask_vpe =
+ adev->vpe.collaborate_mode ? 0x3 : 0x0;
set_hw_resources.engine_mask = umsch->engine_mask;
set_hw_resources.vcn0_hqd_mask[0] = umsch->vcn0_hqd_mask;
@@ -346,6 +350,7 @@ static int umsch_mm_v4_0_add_queue(struct amdgpu_umsch_mm *umsch,
add_queue.h_queue = input_ptr->h_queue;
add_queue.vm_context_cntl = input_ptr->vm_context_cntl;
add_queue.is_context_suspended = input_ptr->is_context_suspended;
+ add_queue.collaboration_mode = adev->vpe.collaborate_mode ? 1 : 0;
add_queue.api_status.api_completion_fence_addr = umsch->ring.fence_drv.gpu_addr;
add_queue.api_status.api_completion_fence_value = ++umsch->ring.fence_drv.sync_seq;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 25ba27151ac0f..aaceecd558cf9 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -304,7 +304,7 @@ static int vcn_v1_0_resume(void *handle)
*/
static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -371,7 +371,7 @@ static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
uint32_t offset;
/* cache window 0: fw */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 18794394c5a05..e357d8cf0c015 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -330,7 +330,7 @@ static int vcn_v2_0_resume(void *handle)
*/
static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
uint32_t offset;
if (amdgpu_sriov_vf(adev))
@@ -386,7 +386,7 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -1878,7 +1878,7 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
init_table += header->vcn_table_offset;
- size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index aba403d718065..1cd8a94b0fbc2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -414,13 +414,15 @@ static int vcn_v2_5_resume(void *handle)
*/
static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size;
uint32_t offset;
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
+
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
/* cache window 0: fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
@@ -469,7 +471,7 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst_idx]->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -1240,7 +1242,7 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
- size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
/* mc resume*/
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V1_0_INSERT_DIRECT_WT(
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index e02af4de521c6..8f82fb887e9c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -449,7 +449,7 @@ static int vcn_v3_0_resume(void *handle)
*/
static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst]->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -499,7 +499,7 @@ static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst_idx]->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -1332,7 +1332,7 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
mmUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
- cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 8ab01ae919d2e..832d15f7b5f61 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -382,7 +382,7 @@ static void vcn_v4_0_mc_resume(struct amdgpu_device *adev, int inst)
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -442,7 +442,7 @@ static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
{
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -1289,7 +1289,7 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
regUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
- cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 810bbfccd6f2e..203fa988322bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -332,7 +332,7 @@ static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx)
uint32_t offset, size, vcn_inst;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
vcn_inst = GET_INST(VCN, inst_idx);
@@ -407,7 +407,7 @@ static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -894,7 +894,7 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev)
MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
- cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index 0468955338b75..501e53e69f2a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -45,7 +45,7 @@
#define mmUVD_DPG_LMA_DATA_BASE_IDX regUVD_DPG_LMA_DATA_BASE_IDX
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
-#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
+#define VCN1_VID_SOC_ADDRESS_3_0 (0x48300 + 0x38000)
#define VCN_HARVEST_MMSCH 0
@@ -329,7 +329,7 @@ static void vcn_v4_0_5_mc_resume(struct amdgpu_device *adev, int inst)
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -390,7 +390,7 @@ static void vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -486,7 +486,8 @@ static void vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
/* VCN global tiling registers */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
- VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+ VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG),
+ adev->gfx.config.gb_addr_config, 0, indirect);
}
/**
@@ -911,7 +912,6 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
VCN, inst_idx, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
-
if (indirect)
amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index d6ee9958ba5fc..bc60c554eb329 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -290,7 +290,7 @@ static void vcn_v5_0_0_mc_resume(struct amdgpu_device *adev, int inst)
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -351,7 +351,7 @@ static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index dfa8c69532d47..55aa74cbc5325 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -779,8 +779,8 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
* nodes, but not more than args->num_of_nodes as that is
* the amount of memory allocated by user
*/
- pa = kzalloc((sizeof(struct kfd_process_device_apertures) *
- args->num_of_nodes), GFP_KERNEL);
+ pa = kcalloc(args->num_of_nodes, sizeof(struct kfd_process_device_apertures),
+ GFP_KERNEL);
if (!pa)
return -ENOMEM;
@@ -1523,7 +1523,7 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep,
/* Find a KFD GPU device that supports the get_dmabuf_info query */
for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++)
- if (dev)
+ if (dev && !kfd_devcgroup_check_permission(dev))
break;
if (!dev)
return -EINVAL;
@@ -1545,7 +1545,7 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep,
if (xcp_id >= 0)
args->gpu_id = dmabuf_adev->kfd.dev->nodes[xcp_id]->id;
else
- args->gpu_id = dmabuf_adev->kfd.dev->nodes[0]->id;
+ args->gpu_id = dev->id;
args->flags = flags;
/* Copy metadata buffer to user mode */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 041ec3de55e72..719d6d365e150 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -960,7 +960,6 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
{
struct kfd_node *node;
int i;
- int count;
if (!kfd->init_complete)
return;
@@ -968,12 +967,10 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
/* for runtime suspend, skip locking kfd */
if (!run_pm) {
mutex_lock(&kfd_processes_mutex);
- count = ++kfd_locked;
- mutex_unlock(&kfd_processes_mutex);
-
/* For first KFD device suspend all the KFD processes */
- if (count == 1)
+ if (++kfd_locked == 1)
kfd_suspend_all_processes();
+ mutex_unlock(&kfd_processes_mutex);
}
for (i = 0; i < kfd->num_nodes; i++) {
@@ -984,7 +981,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
{
- int ret, count, i;
+ int ret, i;
if (!kfd->init_complete)
return 0;
@@ -998,12 +995,10 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
/* for runtime resume, skip unlocking kfd */
if (!run_pm) {
mutex_lock(&kfd_processes_mutex);
- count = --kfd_locked;
- mutex_unlock(&kfd_processes_mutex);
-
- WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
- if (count == 0)
+ if (--kfd_locked == 0)
ret = kfd_resume_all_processes();
+ WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error");
+ mutex_unlock(&kfd_processes_mutex);
}
return ret;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index f4d395e38683d..0b655555e1678 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -2001,6 +2001,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
dev_err(dev, "HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n");
while (halt_if_hws_hang)
schedule();
+ kfd_hws_hang(dqm);
return -ETIME;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
index 9a06c6fb66058..40a21be6c07c9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
@@ -339,7 +339,8 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
break;
}
kfd_signal_event_interrupt(pasid, context_id0 & 0x7fffff, 23);
- } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) {
+ } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE &&
+ KFD_DBG_EC_TYPE_IS_PACKET(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0))) {
kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_DEBUG_DOORBELL_ID(context_id0),
KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)),
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
index 7e2859736a558..fe2ad0c0de954 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
@@ -328,7 +328,8 @@ static void event_interrupt_wq_v11(struct kfd_node *dev,
/* CP */
if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
kfd_signal_event_interrupt(pasid, context_id0, 32);
- else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
+ else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE &&
+ KFD_DBG_EC_TYPE_IS_PACKET(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0)))
kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_CTXID0_DOORBELL_ID(context_id0),
KFD_EC_MASK(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0)),
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 91dd5e045b511..c4c6a29052ac8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -388,7 +388,8 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
break;
}
kfd_signal_event_interrupt(pasid, sq_int_data, 24);
- } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) {
+ } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE &&
+ KFD_DBG_EC_TYPE_IS_PACKET(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0))) {
kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_DEBUG_DOORBELL_ID(context_id0),
KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)),
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 42d40560cd30d..a81ef232fdef9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -1473,7 +1473,7 @@ static inline void kfd_flush_tlb(struct kfd_process_device *pdd,
static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
{
- return KFD_GC_VERSION(dev) > IP_VERSION(9, 4, 2) ||
+ return KFD_GC_VERSION(dev) >= IP_VERSION(9, 4, 2) ||
(KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) ||
KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 717a60d7a4ea9..b79986412cd83 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -819,9 +819,9 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
mutex_lock(&kfd_processes_mutex);
if (kfd_is_locked()) {
- mutex_unlock(&kfd_processes_mutex);
pr_debug("KFD is locked! Cannot create process");
- return ERR_PTR(-EINVAL);
+ process = ERR_PTR(-EINVAL);
+ goto out;
}
/* A prior open of /dev/kfd could have already created the process. */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 1c9c6096e28fb..6d2f60c61decc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -148,6 +148,9 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
#define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB);
+#define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB);
+
/* Number of bytes in PSP header for firmware. */
#define PSP_HEADER_BYTES 0x100
@@ -1767,6 +1770,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
adev->dm.dc->debug.force_subvp_mclk_switch = true;
+ if (amdgpu_dc_debug_mask & DC_ENABLE_DML2)
+ adev->dm.dc->debug.using_dml2 = true;
+
adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
/* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
@@ -3041,6 +3047,10 @@ static int dm_resume(void *handle)
/* Do mst topology probing after resuming cached state*/
drm_connector_list_iter_begin(ddev, &iter);
drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type != dc_connection_mst_branch ||
aconnector->mst_root)
@@ -4817,9 +4827,11 @@ static int dm_init_microcode(struct amdgpu_device *adev)
fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
break;
case IP_VERSION(3, 5, 0):
- case IP_VERSION(3, 5, 1):
fw_name_dmub = FIRMWARE_DCN_35_DMUB;
break;
+ case IP_VERSION(3, 5, 1):
+ fw_name_dmub = FIRMWARE_DCN_351_DMUB;
+ break;
default:
/* ASIC doesn't support DMUB. */
return 0;
@@ -5918,6 +5930,9 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
&aconnector->base.probed_modes :
&aconnector->base.modes;
+ if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ return NULL;
+
if (aconnector->freesync_vid_base.clock != 0)
return &aconnector->freesync_vid_base;
@@ -6302,27 +6317,22 @@ create_stream_for_sink(struct drm_connector *connector,
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
- else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
- stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
- stream->signal == SIGNAL_TYPE_EDP) {
+
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+ stream->signal == SIGNAL_TYPE_EDP) {
//
// should decide stream support vsc sdp colorimetry capability
// before building vsc info packet
//
- stream->use_vsc_sdp_for_colorimetry = false;
- if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- stream->use_vsc_sdp_for_colorimetry =
- aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
- } else {
- if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
- stream->use_vsc_sdp_for_colorimetry = true;
- }
+ stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
+ stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED;
+
if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
tf = TRANSFER_FUNC_GAMMA_22;
mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
+ aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
- if (stream->link->psr_settings.psr_feature_enabled)
- aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
}
finish:
dc_sink_release(sink);
@@ -8761,10 +8771,10 @@ static void amdgpu_dm_commit_audio(struct drm_device *dev,
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
continue;
+notify:
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
-notify:
aconnector = to_amdgpu_dm_connector(connector);
mutex_lock(&adev->dm.audio_lock);
@@ -11271,18 +11281,24 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (!adev->dm.freesync_module)
goto update;
- if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
- || sink->sink_signal == SIGNAL_TYPE_EDP) {
+ if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ sink->sink_signal == SIGNAL_TYPE_EDP)) {
bool edid_check_required = false;
- if (edid) {
- edid_check_required = is_dp_capable_without_timing_msa(
- adev->dm.dc,
- amdgpu_dm_connector);
+ if (is_dp_capable_without_timing_msa(adev->dm.dc,
+ amdgpu_dm_connector)) {
+ if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) {
+ freesync_capable = true;
+ amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
+ amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
+ } else {
+ edid_check_required = edid->version > 1 ||
+ (edid->version == 1 &&
+ edid->revision > 1);
+ }
}
- if (edid_check_required == true && (edid->version > 1 ||
- (edid->version == 1 && edid->revision > 1))) {
+ if (edid_check_required) {
for (i = 0; i < 4; i++) {
timing = &edid->detailed_timings[i];
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index 1f08c6564c3bf..286ecd28cc6e6 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -141,9 +141,8 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
* amdgpu_dm_psr_enable() - enable psr f/w
* @stream: stream state
*
- * Return: true if success
*/
-bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
+void amdgpu_dm_psr_enable(struct dc_stream_state *stream)
{
struct dc_link *link = stream->link;
unsigned int vsync_rate_hz = 0;
@@ -190,7 +189,10 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
power_opt |= psr_power_opt_z10_static_screen;
- return dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt);
+ dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt);
+
+ if (link->ctx->dc->caps.ips_support)
+ dc_allow_idle_optimizations(link->ctx->dc, true);
}
/*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
index 6806b3c9c84ba..1fdfd183c0d91 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
@@ -32,7 +32,7 @@
#define AMDGPU_DM_PSR_ENTRY_DELAY 5
void amdgpu_dm_set_psr_caps(struct dc_link *link);
-bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
+void amdgpu_dm_psr_enable(struct dc_stream_state *stream);
bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
index 16e72d623630c..08c494a7a21ba 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
@@ -76,10 +76,8 @@ static int amdgpu_dm_wb_encoder_atomic_check(struct drm_encoder *encoder,
static int amdgpu_dm_wb_connector_get_modes(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
-
- return drm_add_modes_noedid(connector, dev->mode_config.max_width,
- dev->mode_config.max_height);
+ /* Maximum resolution supported by DWB */
+ return drm_add_modes_noedid(connector, 3840, 2160);
}
static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 12f3e8aa46d8d..6ad4f4efec5dd 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -99,20 +99,25 @@ static int dcn316_get_active_display_cnt_wa(
return display_count;
}
-static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
+static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
+ bool safe_to_lower, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
- struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe = safe_to_lower
+ ? &context->res_ctx.pipe_ctx[i]
+ : &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
- if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
- dc_is_virtual_signal(pipe->stream->signal))) {
+ if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
+ !pipe->stream->link_enc)) {
if (disable) {
- pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
+ if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
+ pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
+
reset_sync_context_for_pipe(dc, context, i);
} else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
@@ -207,11 +212,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- dcn316_disable_otg_wa(clk_mgr_base, context, true);
+ dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
- dcn316_disable_otg_wa(clk_mgr_base, context, false);
+ dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
update_dispclk = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index 668f05c8654ef..bec252e1dd27a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -216,6 +216,16 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
if (clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz > 1950)
clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = 1950;
+ /* DPPCLK */
+ dcn32_init_single_clock(clk_mgr, PPCLK_DPPCLK,
+ &clk_mgr_base->bw_params->clk_table.entries[0].dppclk_mhz,
+ &num_entries_per_clk->num_dppclk_levels);
+ num_levels = num_entries_per_clk->num_dppclk_levels;
+ clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DPPCLK);
+ //HW recommends limit of 1950 MHz in display clock for all DCN3.2.x
+ if (clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz > 1950)
+ clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz = 1950;
+
if (num_entries_per_clk->num_dcfclk_levels &&
num_entries_per_clk->num_dtbclk_levels &&
num_entries_per_clk->num_dispclk_levels)
@@ -240,6 +250,10 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
= khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_dpp_clk_khz);
}
+ for (i = 0; i < num_levels; i++)
+ if (clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz > 1950)
+ clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz = 1950;
+
/* Get UCLK, update bounding box */
clk_mgr_base->funcs->get_memclk_states_from_smu(clk_mgr_base);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index c378b879c76d8..d9c5692c86c21 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -73,6 +73,14 @@
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L
+#define regCLK5_0_CLK5_spll_field_8 0x464b
+#define regCLK5_0_CLK5_spll_field_8_BASE_IDX 0
+
+#define CLK5_0_CLK5_spll_field_8__spll_ssc_en__SHIFT 0xd
+#define CLK5_0_CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L
+
+#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0
+
#define REG(reg_name) \
(ctx->clk_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
@@ -409,11 +417,25 @@ static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs
{
}
+static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dc_context *ctx = clk_mgr->base.ctx;
+ uint32_t ssc_enable;
+
+ REG_GET(CLK5_0_CLK5_spll_field_8, spll_ssc_en, &ssc_enable);
+
+ return ssc_enable == 1;
+}
+
static void init_clk_states(struct clk_mgr *clk_mgr)
{
+ struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+ if (clk_mgr_int->smu_ver >= SMU_VER_THRESHOLD)
+ clk_mgr->clks.dtbclk_en = true; // request DTBCLK disable on first commit
clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
clk_mgr->clks.p_state_change_support = true;
clk_mgr->clks.prev_p_state_change_support = true;
@@ -423,7 +445,16 @@ static void init_clk_states(struct clk_mgr *clk_mgr)
void dcn35_init_clocks(struct clk_mgr *clk_mgr)
{
+ struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
init_clk_states(clk_mgr);
+
+ // to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk
+ if (dcn35_is_spll_ssc_enabled(clk_mgr))
+ clk_mgr->dp_dto_source_clock_in_khz =
+ dce_adjust_dp_ref_freq_for_ss(clk_mgr_int, clk_mgr->dprefclk_khz);
+ else
+ clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz;
+
}
static struct clk_bw_params dcn35_bw_params = {
.vram_type = Ddr4MemType,
@@ -512,6 +543,28 @@ static DpmClocks_t_dcn35 dummy_clocks;
static struct dcn35_watermarks dummy_wms = { 0 };
+static struct dcn35_ss_info_table ss_info_table = {
+ .ss_divider = 1000,
+ .ss_percentage = {0, 0, 375, 375, 375}
+};
+
+static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
+{
+ struct dc_context *ctx = clk_mgr->base.ctx;
+ uint32_t clock_source;
+
+ REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source);
+ // If it's DFS mode, clock_source is 0.
+ if (dcn35_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) {
+ clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source];
+
+ if (clk_mgr->dprefclk_ss_percentage != 0) {
+ clk_mgr->ss_on_dprefclk = true;
+ clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider;
+ }
+ }
+}
+
static void dcn35_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn35_watermarks *table)
{
int i, num_valid_sets;
@@ -709,7 +762,7 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
clock_table->NumFclkLevelsEnabled;
max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, num_fclk);
- num_dcfclk = (clock_table->NumFclkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS :
+ num_dcfclk = (clock_table->NumDcfClkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS :
clock_table->NumDcfClkLevelsEnabled;
for (i = 0; i < num_dcfclk; i++) {
int j;
@@ -1056,6 +1109,8 @@ void dcn35_clk_mgr_construct(
dce_clock_read_ss_info(&clk_mgr->base);
/*when clk src is from FCH, it could have ss, same clock src as DPREF clk*/
+ dcn35_read_ss_info_from_lut(&clk_mgr->base);
+
clk_mgr->base.base.bw_params = &dcn35_bw_params;
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 5211c1c0f3c0c..03b554e912a20 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1302,6 +1302,54 @@ static void disable_vbios_mode_if_required(
}
}
+/**
+ * wait_for_blank_complete - wait for all active OPPs to finish pending blank
+ * pattern updates
+ *
+ * @dc: [in] dc reference
+ * @context: [in] hardware context in use
+ */
+static void wait_for_blank_complete(struct dc *dc,
+ struct dc_state *context)
+{
+ struct pipe_ctx *opp_head;
+ struct dce_hwseq *hws = dc->hwseq;
+ int i;
+
+ if (!hws->funcs.wait_for_blank_complete)
+ return;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ opp_head = &context->res_ctx.pipe_ctx[i];
+
+ if (!resource_is_pipe_type(opp_head, OPP_HEAD) ||
+ dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM)
+ continue;
+
+ hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp);
+ }
+}
+
+static void wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context)
+{
+ struct pipe_ctx *otg_master;
+ struct timing_generator *tg;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ otg_master = &context->res_ctx.pipe_ctx[i];
+ if (!resource_is_pipe_type(otg_master, OTG_MASTER) ||
+ dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM)
+ continue;
+ tg = otg_master->stream_res.tg;
+ if (tg->funcs->wait_odm_doublebuffer_pending_clear)
+ tg->funcs->wait_odm_doublebuffer_pending_clear(tg);
+ }
+
+ /* ODM update may require to reprogram blank pattern for each OPP */
+ wait_for_blank_complete(dc, context);
+}
+
static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
{
int i;
@@ -1993,6 +2041,11 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
context->stream_count == 0) {
/* Must wait for no flips to be pending before doing optimize bw */
wait_for_no_pipes_pending(dc, context);
+ /*
+ * optimized dispclk depends on ODM setup. Need to wait for ODM
+ * update pending complete before optimizing bandwidth.
+ */
+ wait_for_odm_update_pending_complete(dc, context);
/* pplib is notified if disp_num changed */
dc->hwss.optimize_bandwidth(dc, context);
/* Need to do otg sync again as otg could be out of sync due to otg
@@ -2971,7 +3024,8 @@ static void backup_planes_and_stream_state(
scratch->blend_tf[i] = *status->plane_states[i]->blend_tf;
}
scratch->stream_state = *stream;
- scratch->out_transfer_func = *stream->out_transfer_func;
+ if (stream->out_transfer_func)
+ scratch->out_transfer_func = *stream->out_transfer_func;
}
static void restore_planes_and_stream_state(
@@ -2993,7 +3047,8 @@ static void restore_planes_and_stream_state(
*status->plane_states[i]->blend_tf = scratch->blend_tf[i];
}
*stream = scratch->stream_state;
- *stream->out_transfer_func = scratch->out_transfer_func;
+ if (stream->out_transfer_func)
+ *stream->out_transfer_func = scratch->out_transfer_func;
}
static bool update_planes_and_stream_state(struct dc *dc,
@@ -3270,6 +3325,9 @@ static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_s
if (stream->link->replay_settings.config.replay_supported)
return true;
+ if (stream->ctx->dce_version >= DCN_VERSION_3_5 && stream->abm_level)
+ return true;
+
return false;
}
@@ -3493,7 +3551,7 @@ static void commit_planes_for_stream_fast(struct dc *dc,
top_pipe_to_program->stream->update_flags.raw = 0;
}
-static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context)
+static void wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context)
{
/*
* This function calls HWSS to wait for any potentially double buffered
@@ -3531,6 +3589,7 @@ static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state
}
}
}
+ wait_for_odm_update_pending_complete(dc, dc_context);
}
static void commit_planes_for_stream(struct dc *dc,
@@ -4844,22 +4903,16 @@ void dc_exit_ips_for_hw_access(struct dc *dc)
bool dc_dmub_is_ips_idle_state(struct dc *dc)
{
- uint32_t idle_state = 0;
-
if (dc->debug.disable_idle_power_optimizations)
return false;
if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
return false;
- if (dc->hwss.get_idle_state)
- idle_state = dc->hwss.get_idle_state(dc);
-
- if (!(idle_state & DMUB_IPS1_ALLOW_MASK) ||
- !(idle_state & DMUB_IPS2_ALLOW_MASK))
- return true;
+ if (!dc->ctx->dmub_srv)
+ return false;
- return false;
+ return dc->ctx->dmub_srv->idle_allowed;
}
/* set min and max memory clock to lowest and highest DPM level, respectively */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index 180ac47868c22..61986e5cb4919 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -334,7 +334,8 @@ static void dc_state_free(struct kref *kref)
void dc_state_release(struct dc_state *state)
{
- kref_put(&state->refcount, dc_state_free);
+ if (state != NULL)
+ kref_put(&state->refcount, dc_state_free);
}
/*
* dc_state_add_stream() - Add a new dc_stream_state to a dc_state.
@@ -435,6 +436,15 @@ bool dc_state_add_plane(
goto out;
}
+ if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm)
+ /* ODM combine could prevent us from supporting more planes
+ * we will reset ODM slice count back to 1 when all planes have
+ * been removed to maximize the amount of planes supported when
+ * new planes are added.
+ */
+ resource_update_pipes_for_stream_with_slice_count(
+ state, dc->current_state, dc->res_pool, stream, 1);
+
otg_master_pipe = resource_get_otg_master_for_stream(
&state->res_ctx, stream);
if (otg_master_pipe)
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 9900dda2eef5c..be2ac5c442a48 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -1085,9 +1085,9 @@ struct replay_settings {
/* SMU optimization is enabled */
bool replay_smu_opt_enable;
/* Current Coasting vtotal */
- uint16_t coasting_vtotal;
+ uint32_t coasting_vtotal;
/* Coasting vtotal table */
- uint16_t coasting_vtotal_table[PR_COASTING_TYPE_NUM];
+ uint32_t coasting_vtotal_table[PR_COASTING_TYPE_NUM];
/* Maximum link off frame count */
enum replay_link_off_frame_count_level link_off_frame_count_level;
/* Replay pseudo vtotal for abm + ips on full screen video which can improve ips residency */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 970644b695cd4..b5e0289d2fe82 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -976,7 +976,10 @@ static bool dcn31_program_pix_clk(
struct bp_pixel_clock_parameters bp_pc_params = {0};
enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24;
- if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0)
+ // Apply ssed(spread spectrum) dpref clock for edp only.
+ if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0
+ && pix_clk_params->signal_type == SIGNAL_TYPE_EDP
+ && encoding == DP_8b_10b_ENCODING)
dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
// For these signal types Driver to program DP_DTO without calling VBIOS Command table
if (dc_is_dp_signal(pix_clk_params->signal_type) || dc_is_virtual_signal(pix_clk_params->signal_type)) {
@@ -1093,9 +1096,6 @@ static bool get_pixel_clk_frequency_100hz(
unsigned int modulo_hz = 0;
unsigned int dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dprefclk_khz;
- if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0)
- dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
-
if (clock_source->id == CLOCK_SOURCE_ID_DP_DTO) {
clock_hz = REG_READ(PHASE[inst]);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/Makefile b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
index f0777d61c2cbb..c307f040e48fc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce110/dce110_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce110/dce110_resource.o = -Wno-override-init
DCE110 = dce110_timing_generator.o \
dce110_compressor.o dce110_opp_regamma_v.o \
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/Makefile b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
index 7e92effec8944..683866797709b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce112/dce112_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce112/dce112_resource.o = -Wno-override-init
DCE112 = dce112_compressor.o
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/Makefile b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
index 1e3ef68a452a5..8f508e6627480 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
@@ -24,7 +24,7 @@
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce120/dce120_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce120/dce120_resource.o = -Wno-override-init
DCE120 = dce120_timing_generator.o
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/Makefile b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
index fee331accc0e7..eede83ad91fa0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = -Wno-override-init
DCE60 = dce60_timing_generator.o dce60_hw_sequencer.o \
dce60_resource.o
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/Makefile b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
index 7eefffbdc9253..fba189d26652d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce80/dce80_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce80/dce80_resource.o = -Wno-override-init
DCE80 = dce80_timing_generator.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index 48a40dcc7050b..5838a11efd00c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -384,6 +384,7 @@ static const struct opp_funcs dcn10_opp_funcs = {
.opp_set_disp_pattern_generator = NULL,
.opp_program_dpg_dimensions = NULL,
.dpg_is_blanked = NULL,
+ .dpg_is_pending = NULL,
.opp_destroy = opp1_destroy
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
index 0784d01986610..fbf1b6370eb23 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
@@ -337,6 +337,19 @@ bool opp2_dpg_is_blanked(struct output_pixel_processor *opp)
(double_buffer_pending == 0);
}
+bool opp2_dpg_is_pending(struct output_pixel_processor *opp)
+{
+ struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
+ uint32_t double_buffer_pending;
+ uint32_t dpg_en;
+
+ REG_GET(DPG_CONTROL, DPG_EN, &dpg_en);
+
+ REG_GET(DPG_STATUS, DPG_DOUBLE_BUFFER_PENDING, &double_buffer_pending);
+
+ return (dpg_en == 1 && double_buffer_pending == 1);
+}
+
void opp2_program_left_edge_extra_pixel (
struct output_pixel_processor *opp,
bool count)
@@ -363,6 +376,7 @@ static struct opp_funcs dcn20_opp_funcs = {
.opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator,
.opp_program_dpg_dimensions = opp2_program_dpg_dimensions,
.dpg_is_blanked = opp2_dpg_is_blanked,
+ .dpg_is_pending = opp2_dpg_is_pending,
.opp_dpg_set_blank_color = opp2_dpg_set_blank_color,
.opp_destroy = opp1_destroy,
.opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
index 3ab221bdd27dd..8f186abd558db 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
@@ -159,6 +159,8 @@ void opp2_program_dpg_dimensions(
bool opp2_dpg_is_blanked(struct output_pixel_processor *opp);
+bool opp2_dpg_is_pending(struct output_pixel_processor *opp);
+
void opp2_dpg_set_blank_color(
struct output_pixel_processor *opp,
const struct tg_color *color);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c
index 8e77db46a4090..6a71ba3dfc632 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c
@@ -50,6 +50,7 @@ static struct opp_funcs dcn201_opp_funcs = {
.opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator,
.opp_program_dpg_dimensions = opp2_program_dpg_dimensions,
.dpg_is_blanked = opp2_dpg_is_blanked,
+ .dpg_is_pending = opp2_dpg_is_pending,
.opp_dpg_set_blank_color = opp2_dpg_set_blank_color,
.opp_destroy = opp1_destroy,
.opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
index bf3386cd444d6..5ebb573031304 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
@@ -44,6 +44,36 @@
#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
+void mpc3_mpc_init(struct mpc *mpc)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+ int opp_id;
+
+ mpc1_mpc_init(mpc);
+
+ for (opp_id = 0; opp_id < MAX_OPP; opp_id++) {
+ if (REG(MUX[opp_id]))
+ /* disable mpc out rate and flow control */
+ REG_UPDATE_2(MUX[opp_id], MPC_OUT_RATE_CONTROL_DISABLE,
+ 1, MPC_OUT_FLOW_CONTROL_COUNT, 0);
+ }
+}
+
+void mpc3_mpc_init_single_inst(struct mpc *mpc, unsigned int mpcc_id)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ mpc1_mpc_init_single_inst(mpc, mpcc_id);
+
+ /* assuming mpc out mux is connected to opp with the same index at this
+ * point in time (e.g. transitioning from vbios to driver)
+ */
+ if (mpcc_id < MAX_OPP && REG(MUX[mpcc_id]))
+ /* disable mpc out rate and flow control */
+ REG_UPDATE_2(MUX[mpcc_id], MPC_OUT_RATE_CONTROL_DISABLE,
+ 1, MPC_OUT_FLOW_CONTROL_COUNT, 0);
+}
+
bool mpc3_is_dwb_idle(
struct mpc *mpc,
int dwb_id)
@@ -80,25 +110,6 @@ void mpc3_disable_dwb_mux(
MPC_DWB0_MUX, 0xf);
}
-void mpc3_set_out_rate_control(
- struct mpc *mpc,
- int opp_id,
- bool enable,
- bool rate_2x_mode,
- struct mpc_dwb_flow_control *flow_control)
-{
- struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
-
- REG_UPDATE_2(MUX[opp_id],
- MPC_OUT_RATE_CONTROL_DISABLE, !enable,
- MPC_OUT_RATE_CONTROL, rate_2x_mode);
-
- if (flow_control)
- REG_UPDATE_2(MUX[opp_id],
- MPC_OUT_FLOW_CONTROL_MODE, flow_control->flow_ctrl_mode,
- MPC_OUT_FLOW_CONTROL_COUNT, flow_control->flow_ctrl_cnt1);
-}
-
enum dc_lut_mode mpc3_get_ogam_current(struct mpc *mpc, int mpcc_id)
{
/*Contrary to DCN2 and DCN1 wherein a single status register field holds this info;
@@ -1490,8 +1501,8 @@ static const struct mpc_funcs dcn30_mpc_funcs = {
.read_mpcc_state = mpc3_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
- .mpc_init = mpc1_mpc_init,
- .mpc_init_single_inst = mpc1_mpc_init_single_inst,
+ .mpc_init = mpc3_mpc_init,
+ .mpc_init_single_inst = mpc3_mpc_init_single_inst,
.update_blending = mpc2_update_blending,
.cursor_lock = mpc1_cursor_lock,
.get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp,
@@ -1508,7 +1519,6 @@ static const struct mpc_funcs dcn30_mpc_funcs = {
.set_dwb_mux = mpc3_set_dwb_mux,
.disable_dwb_mux = mpc3_disable_dwb_mux,
.is_dwb_idle = mpc3_is_dwb_idle,
- .set_out_rate_control = mpc3_set_out_rate_control,
.set_gamut_remap = mpc3_set_gamut_remap,
.program_shaper = mpc3_program_shaper,
.acquire_rmu = mpcc3_acquire_rmu,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
index 9cb96ae95a2f7..ce93003dae011 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
@@ -1007,6 +1007,13 @@ void dcn30_mpc_construct(struct dcn30_mpc *mpc30,
int num_mpcc,
int num_rmu);
+void mpc3_mpc_init(
+ struct mpc *mpc);
+
+void mpc3_mpc_init_single_inst(
+ struct mpc *mpc,
+ unsigned int mpcc_id);
+
bool mpc3_program_shaper(
struct mpc *mpc,
const struct pwl_params *params,
@@ -1078,13 +1085,6 @@ bool mpc3_is_dwb_idle(
struct mpc *mpc,
int dwb_id);
-void mpc3_set_out_rate_control(
- struct mpc *mpc,
- int opp_id,
- bool enable,
- bool rate_2x_mode,
- struct mpc_dwb_flow_control *flow_control);
-
void mpc3_power_on_ogam_lut(
struct mpc *mpc, int mpcc_id,
bool power_on);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
index e224a028d68ac..8a0460e863097 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
@@ -248,14 +248,12 @@ void dcn32_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
- enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
-
- enc10->base.features = *enc_features;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
- if (enc10->base.connector.id == CONNECTOR_ID_USBC)
- enc10->base.features.flags.bits.DP_IS_USB_C = 1;
+ enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
+
+ enc10->base.features = *enc_features;
enc10->base.transmitter = init_data->transmitter;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
index e789e654c3870..e408e859b3556 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
@@ -47,7 +47,7 @@ void mpc32_mpc_init(struct mpc *mpc)
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
int mpcc_id;
- mpc1_mpc_init(mpc);
+ mpc3_mpc_init(mpc);
if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) {
if (mpc30->mpc_mask->MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE && mpc30->mpc_mask->MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE) {
@@ -991,7 +991,7 @@ static const struct mpc_funcs dcn32_mpc_funcs = {
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
.mpc_init = mpc32_mpc_init,
- .mpc_init_single_inst = mpc1_mpc_init_single_inst,
+ .mpc_init_single_inst = mpc3_mpc_init_single_inst,
.update_blending = mpc2_update_blending,
.cursor_lock = mpc1_cursor_lock,
.get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp,
@@ -1008,7 +1008,6 @@ static const struct mpc_funcs dcn32_mpc_funcs = {
.set_dwb_mux = mpc3_set_dwb_mux,
.disable_dwb_mux = mpc3_disable_dwb_mux,
.is_dwb_idle = mpc3_is_dwb_idle,
- .set_out_rate_control = mpc3_set_out_rate_control,
.set_gamut_remap = mpc3_set_gamut_remap,
.program_shaper = mpc32_program_shaper,
.program_3dlut = mpc32_program_3dlut,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index 87760600e154d..f98def6c8c2d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -782,3 +782,9 @@ void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc
pipe_cnt++;
}
}
+
+void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context)
+{
+ if (dcn32_subvp_in_use(dc, context) && context->bw_ctx.bw.dcn.clk.dcfclk_khz <= MIN_SUBVP_DCFCLK_KHZ)
+ context->bw_ctx.bw.dcn.clk.dcfclk_khz = MIN_SUBVP_DCFCLK_KHZ;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
index 81e349d5835bb..da94e5309fbaf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
@@ -184,6 +184,8 @@ void dcn35_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
+ if (enc10->base.connector.id == CONNECTOR_ID_USBC)
+ enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
@@ -238,8 +240,6 @@ void dcn35_link_encoder_construct(
}
enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
- if (enc10->base.connector.id == CONNECTOR_ID_USBC)
- enc10->base.features.flags.bits.DP_IS_USB_C = 1;
if (bp_funcs->get_connector_speed_cap_info)
result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index b49e1dc9d8ba5..a0a65e0991041 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -623,6 +623,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
* - Not TMZ surface
*/
if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) &&
+ !(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) &&
(!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&
dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE &&
(refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) &&
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index 80bebfc268db0..21e0eef3269b1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -166,8 +166,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.num_states = 5,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
- .sr_exit_z8_time_us = 210.0,
- .sr_enter_plus_exit_z8_time_us = 320.0,
+ .sr_exit_z8_time_us = 250.0,
+ .sr_enter_plus_exit_z8_time_us = 350.0,
.fclk_change_latency_us = 24.0,
.usr_retraining_latency_us = 2,
.writeback_latency_us = 12.0,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
index dc9e1b758ed6a..b3ffab77cf889 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
@@ -98,55 +98,114 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = {
.clock_limits = {
{
.state = 0,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
+ .dcfclk_mhz = 400.0,
+ .fabricclk_mhz = 400.0,
+ .socclk_mhz = 600.0,
+ .dram_speed_mts = 3200.0,
+ .dispclk_mhz = 600.0,
+ .dppclk_mhz = 600.0,
.phyclk_mhz = 600.0,
.phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 186.0,
+ .dscclk_mhz = 200.0,
.dtbclk_mhz = 600.0,
},
{
.state = 1,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
+ .dcfclk_mhz = 600.0,
+ .fabricclk_mhz = 1000.0,
+ .socclk_mhz = 733.0,
+ .dram_speed_mts = 6400.0,
+ .dispclk_mhz = 800.0,
+ .dppclk_mhz = 800.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 209.0,
+ .dscclk_mhz = 266.7,
.dtbclk_mhz = 600.0,
},
{
.state = 2,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
+ .dcfclk_mhz = 738.0,
+ .fabricclk_mhz = 1200.0,
+ .socclk_mhz = 880.0,
+ .dram_speed_mts = 7500.0,
+ .dispclk_mhz = 800.0,
+ .dppclk_mhz = 800.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 209.0,
+ .dscclk_mhz = 266.7,
.dtbclk_mhz = 600.0,
},
{
.state = 3,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
+ .dcfclk_mhz = 800.0,
+ .fabricclk_mhz = 1400.0,
+ .socclk_mhz = 978.0,
+ .dram_speed_mts = 7500.0,
+ .dispclk_mhz = 960.0,
+ .dppclk_mhz = 960.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 371.0,
+ .dscclk_mhz = 320.0,
.dtbclk_mhz = 600.0,
},
{
.state = 4,
+ .dcfclk_mhz = 873.0,
+ .fabricclk_mhz = 1600.0,
+ .socclk_mhz = 1100.0,
+ .dram_speed_mts = 8533.0,
+ .dispclk_mhz = 1066.7,
+ .dppclk_mhz = 1066.7,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 355.6,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 5,
+ .dcfclk_mhz = 960.0,
+ .fabricclk_mhz = 1700.0,
+ .socclk_mhz = 1257.0,
+ .dram_speed_mts = 8533.0,
.dispclk_mhz = 1200.0,
.dppclk_mhz = 1200.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 417.0,
+ .dscclk_mhz = 400.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 6,
+ .dcfclk_mhz = 1067.0,
+ .fabricclk_mhz = 1850.0,
+ .socclk_mhz = 1257.0,
+ .dram_speed_mts = 8533.0,
+ .dispclk_mhz = 1371.4,
+ .dppclk_mhz = 1371.4,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 457.1,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 7,
+ .dcfclk_mhz = 1200.0,
+ .fabricclk_mhz = 2000.0,
+ .socclk_mhz = 1467.0,
+ .dram_speed_mts = 8533.0,
+ .dispclk_mhz = 1600.0,
+ .dppclk_mhz = 1600.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 533.3,
.dtbclk_mhz = 600.0,
},
},
- .num_states = 5,
+ .num_states = 8,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
- .sr_exit_z8_time_us = 210.0,
- .sr_enter_plus_exit_z8_time_us = 320.0,
+ .sr_exit_z8_time_us = 250.0,
+ .sr_enter_plus_exit_z8_time_us = 350.0,
.fclk_change_latency_us = 24.0,
.usr_retraining_latency_us = 2,
.writeback_latency_us = 12.0,
@@ -177,6 +236,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = {
.do_urgent_latency_adjustment = 0,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+ .num_chans = 4,
+ .dram_clock_change_latency_us = 11.72,
+ .dispclk_dppclk_vco_speed_mhz = 2400.0,
};
/*
@@ -340,6 +402,8 @@ void dcn351_update_bw_bounding_box_fpu(struct dc *dc,
clock_limits[i].socclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
+ clock_limits[i].dtbclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
clk_table->num_entries;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels =
@@ -352,6 +416,8 @@ void dcn351_update_bw_bounding_box_fpu(struct dc *dc,
clk_table->num_entries;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels =
clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels =
+ clk_table->num_entries;
}
}
@@ -551,6 +617,7 @@ void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context)
if (context->res_ctx.pipe_ctx[i].plane_state)
plane_count++;
}
+
/*dcn351 does not support z9/z10*/
if (context->stream_count == 0 || plane_count == 0) {
support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY;
@@ -564,11 +631,9 @@ void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context)
dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
-
/*for psr1/psr-su, we allow z8 and z10 based on latency, for replay with IPS enabled, it will enter ips2*/
- if (is_pwrseq0 && (is_psr || is_replay))
+ if (is_pwrseq0 && (is_psr || is_replay))
support = allow_z8 ? allow_z8 : DCN_ZSTATE_SUPPORT_DISALLOW;
-
}
context->bw_ctx.bw.dcn.clk.zstate_support = support;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index 1ba6933d2b361..a20f28a5d2e7b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -228,17 +228,13 @@ void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, s
break;
case dml_project_dcn35:
+ case dml_project_dcn351:
out->num_chans = 4;
out->round_trip_ping_latency_dcfclk_cycles = 106;
out->smn_latency_us = 2;
out->dispclk_dppclk_vco_speed_mhz = 3600;
break;
- case dml_project_dcn351:
- out->num_chans = 16;
- out->round_trip_ping_latency_dcfclk_cycles = 1100;
- out->smn_latency_us = 2;
- break;
}
/* ---Overrides if available--- */
if (dml2->config.bbox_overrides.dram_num_chan)
@@ -824,13 +820,25 @@ static struct scaler_data get_scaler_data_for_plane(const struct dc_plane_state
static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_stream_state *in)
{
+ dml_uint_t width, height;
+
+ if (in->timing.h_addressable > 3840)
+ width = 3840;
+ else
+ width = in->timing.h_addressable; // 4K max
+
+ if (in->timing.v_addressable > 2160)
+ height = 2160;
+ else
+ height = in->timing.v_addressable; // 4K max
+
out->CursorBPP[location] = dml_cur_32bit;
out->CursorWidth[location] = 256;
out->GPUVMMinPageSizeKBytes[location] = 256;
- out->ViewportWidth[location] = in->timing.h_addressable;
- out->ViewportHeight[location] = in->timing.v_addressable;
+ out->ViewportWidth[location] = width;
+ out->ViewportHeight[location] = height;
out->ViewportStationary[location] = false;
out->ViewportWidthChroma[location] = 0;
out->ViewportHeightChroma[location] = 0;
@@ -849,7 +857,7 @@ static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned
out->HTapsChroma[location] = 0;
out->VTapsChroma[location] = 0;
out->SourceScan[location] = dml_rotation_0;
- out->ScalerRecoutWidth[location] = in->timing.h_addressable;
+ out->ScalerRecoutWidth[location] = width;
out->LBBitPerPixel[location] = 57;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index 2a58a7687bdb5..72cca367062e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -703,13 +703,8 @@ static inline struct dml2_context *dml2_allocate_memory(void)
return (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
}
-bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
+static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
{
- // Allocate Mode Lib Ctx
- *dml2 = dml2_allocate_memory();
-
- if (!(*dml2))
- return false;
// Store config options
(*dml2)->config = *config;
@@ -737,9 +732,18 @@ bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options
initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc);
initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states);
+}
+
+bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
+{
+ // Allocate Mode Lib Ctx
+ *dml2 = dml2_allocate_memory();
+
+ if (!(*dml2))
+ return false;
+
+ dml2_init(in_dc, config, dml2);
- /*Initialize DML20 instance which calls dml2_core_create, and core_dcn3_populate_informative*/
- //dml2_initialize_instance(&(*dml_ctx)->v20.dml_init);
return true;
}
@@ -779,3 +783,11 @@ bool dml2_create_copy(struct dml2_context **dst_dml2,
return true;
}
+
+void dml2_reinit(const struct dc *in_dc,
+ const struct dml2_configuration_options *config,
+ struct dml2_context **dml2)
+{
+
+ dml2_init(in_dc, config, dml2);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
index ee0eb184eb6d7..cc662d682fd4d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
@@ -214,6 +214,9 @@ void dml2_copy(struct dml2_context *dst_dml2,
struct dml2_context *src_dml2);
bool dml2_create_copy(struct dml2_context **dst_dml2,
struct dml2_context *src_dml2);
+void dml2_reinit(const struct dc *in_dc,
+ const struct dml2_configuration_options *config,
+ struct dml2_context **dml2);
/*
* dml2_validate - Determines if a display configuration is supported or not.
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 9d5df4c0da597..0ba1feaf96c0d 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -1185,7 +1185,8 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
if (dccg) {
dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
- dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ if (dccg && dccg->funcs->set_dtbclk_dto)
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
}
} else if (dccg && dccg->funcs->disable_symclk_se) {
dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index c55d5155ecb9c..8b3536c380b8d 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -1498,6 +1498,11 @@ static void dcn20_detect_pipe_changes(struct dc_state *old_state,
return;
}
+ if (resource_is_pipe_type(new_pipe, OTG_MASTER) &&
+ resource_is_odm_topology_changed(new_pipe, old_pipe))
+ /* Detect odm changes */
+ new_pipe->update_flags.bits.odm = 1;
+
/* Exit on unchanged, unused pipe */
if (!old_pipe->plane_state && !new_pipe->plane_state)
return;
@@ -1551,10 +1556,6 @@ static void dcn20_detect_pipe_changes(struct dc_state *old_state,
/* Detect top pipe only changes */
if (resource_is_pipe_type(new_pipe, OTG_MASTER)) {
- /* Detect odm changes */
- if (resource_is_odm_topology_changed(new_pipe, old_pipe))
- new_pipe->update_flags.bits.odm = 1;
-
/* Detect global sync changes */
if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset
|| old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start
@@ -1999,19 +2000,20 @@ void dcn20_program_front_end_for_ctx(
DC_LOGGER_INIT(dc->ctx->logger);
unsigned int prev_hubp_count = 0;
unsigned int hubp_count = 0;
+ struct pipe_ctx *pipe;
if (resource_is_pipe_topology_changed(dc->current_state, context))
resource_log_pipe_topology_update(dc, context);
if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) {
- ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
+ if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) {
+ ASSERT(!pipe->plane_state->triplebuffer_flips);
/*turn off triple buffer for full update*/
dc->hwss.program_triplebuffer(
- dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
+ dc, pipe, pipe->plane_state->triplebuffer_flips);
}
}
}
@@ -2085,12 +2087,22 @@ void dcn20_program_front_end_for_ctx(
DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
}
+ /* update ODM for blanked OTG master pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ if (resource_is_pipe_type(pipe, OTG_MASTER) &&
+ !resource_is_pipe_type(pipe, DPP_PIPE) &&
+ pipe->update_flags.bits.odm &&
+ hws->funcs.update_odm)
+ hws->funcs.update_odm(dc, context, pipe);
+ }
+
/*
* Program all updated pipes, order matters for mpcc setup. Start with
* top pipe and program all pipes that follow in order
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ pipe = &context->res_ctx.pipe_ctx[i];
if (pipe->plane_state && !pipe->top_pipe) {
while (pipe) {
@@ -2129,17 +2141,6 @@ void dcn20_program_front_end_for_ctx(
context->stream_status[0].plane_count > 1) {
pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);
}
-
- /* when dynamic ODM is active, pipes must be reconfigured when all planes are
- * disabled, as some transitions will leave software and hardware state
- * mismatched.
- */
- if (dc->debug.enable_single_display_2to1_odm_policy &&
- pipe->stream &&
- pipe->update_flags.bits.disable &&
- !pipe->prev_odm_pipe &&
- hws->funcs.update_odm)
- hws->funcs.update_odm(dc, context, pipe);
}
}
@@ -2451,7 +2452,7 @@ bool dcn20_wait_for_blank_complete(
int counter;
for (counter = 0; counter < 1000; counter++) {
- if (opp->funcs->dpg_is_blanked(opp))
+ if (!opp->funcs->dpg_is_pending(opp))
break;
udelay(100);
@@ -2462,7 +2463,7 @@ bool dcn20_wait_for_blank_complete(
return false;
}
- return true;
+ return opp->funcs->dpg_is_blanked(opp);
}
bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index 7e6b7f2a6dc9e..8bc3d01537bbd 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -812,10 +812,20 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
if (pipe_ctx == NULL)
return;
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL)
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) {
pipe_ctx->stream_res.stream_enc->funcs->set_avmute(
pipe_ctx->stream_res.stream_enc,
enable);
+
+ /* Wait for two frame to make sure AV mute is sent out */
+ if (enable) {
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+ }
+ }
}
void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
index 3a9cc8ac0c079..093f4387553ce 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
@@ -69,29 +69,6 @@
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
-static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
- int opp_cnt)
-{
- bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing);
- int flow_ctrl_cnt;
-
- if (opp_cnt >= 2)
- hblank_halved = true;
-
- flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable -
- stream->timing.h_border_left -
- stream->timing.h_border_right;
-
- if (hblank_halved)
- flow_ctrl_cnt /= 2;
-
- /* ODM combine 4:1 case */
- if (opp_cnt == 4)
- flow_ctrl_cnt /= 2;
-
- return flow_ctrl_cnt;
-}
-
static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
{
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
@@ -183,10 +160,6 @@ void dcn314_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx
struct pipe_ctx *odm_pipe;
int opp_cnt = 0;
int opp_inst[MAX_PIPES] = {0};
- bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing));
- struct mpc_dwb_flow_control flow_control;
- struct mpc *mpc = dc->res_pool->mpc;
- int i;
opp_cnt = get_odm_config(pipe_ctx, opp_inst);
@@ -199,20 +172,6 @@ void dcn314_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1;
- flow_control.flow_ctrl_mode = 0;
- flow_control.flow_ctrl_cnt0 = 0x80;
- flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt);
- if (mpc->funcs->set_out_rate_control) {
- for (i = 0; i < opp_cnt; ++i) {
- mpc->funcs->set_out_rate_control(
- mpc, opp_inst[i],
- true,
- rate_control_2x_pclk,
- &flow_control);
- }
- }
-
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
odm_pipe->stream_res.opp,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index aa36d7a56ca8c..7668229438da2 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -966,29 +966,6 @@ void dcn32_init_hw(struct dc *dc)
}
}
-static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
- int opp_cnt)
-{
- bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing);
- int flow_ctrl_cnt;
-
- if (opp_cnt >= 2)
- hblank_halved = true;
-
- flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable -
- stream->timing.h_border_left -
- stream->timing.h_border_right;
-
- if (hblank_halved)
- flow_ctrl_cnt /= 2;
-
- /* ODM combine 4:1 case */
- if (opp_cnt == 4)
- flow_ctrl_cnt /= 2;
-
- return flow_ctrl_cnt;
-}
-
static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
{
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
@@ -1103,10 +1080,6 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
struct pipe_ctx *odm_pipe;
int opp_cnt = 0;
int opp_inst[MAX_PIPES] = {0};
- bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing));
- struct mpc_dwb_flow_control flow_control;
- struct mpc *mpc = dc->res_pool->mpc;
- int i;
opp_cnt = get_odm_config(pipe_ctx, opp_inst);
@@ -1119,20 +1092,6 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1;
- flow_control.flow_ctrl_mode = 0;
- flow_control.flow_ctrl_cnt0 = 0x80;
- flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt);
- if (mpc->funcs->set_out_rate_control) {
- for (i = 0; i < opp_cnt; ++i) {
- mpc->funcs->set_out_rate_control(
- mpc, opp_inst[i],
- true,
- rate_control_2x_pclk,
- &flow_control);
- }
- }
-
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
odm_pipe->stream_res.opp,
@@ -1156,6 +1115,13 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
dsc->funcs->dsc_disconnect(dsc);
}
}
+
+ if (!resource_is_pipe_type(pipe_ctx, DPP_PIPE))
+ /*
+ * blank pattern is generated by OPP, reprogram blank pattern
+ * due to OPP count change
+ */
+ dc->hwseq->funcs.blank_pixel_data(dc, pipe_ctx, true);
}
unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div)
@@ -1778,3 +1744,26 @@ void dcn32_prepare_bandwidth(struct dc *dc,
context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
}
}
+
+void dcn32_interdependent_update_lock(struct dc *dc,
+ struct dc_state *context, bool lock)
+{
+ unsigned int i;
+ struct pipe_ctx *pipe;
+ struct timing_generator *tg;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ tg = pipe->stream_res.tg;
+
+ if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
+ !tg->funcs->is_tg_enabled(tg) ||
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
+ continue;
+
+ if (lock)
+ dc->hwss.pipe_control_lock(dc, pipe, true);
+ else
+ dc->hwss.pipe_control_lock(dc, pipe, false);
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
index 069e20bc87c0a..f55c11fc56ec7 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
@@ -129,4 +129,6 @@ bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc,
void dcn32_prepare_bandwidth(struct dc *dc,
struct dc_state *context);
+void dcn32_interdependent_update_lock(struct dc *dc,
+ struct dc_state *context, bool lock);
#endif /* __DC_HWSS_DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
index 2b073123d3ede..67d661dbd5b7c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
@@ -58,7 +58,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.disable_plane = dcn20_disable_plane,
.disable_pixel_data = dcn20_disable_pixel_data,
.pipe_control_lock = dcn20_pipe_control_lock,
- .interdependent_update_lock = dcn10_lock_all_pipes,
+ .interdependent_update_lock = dcn32_interdependent_update_lock,
.cursor_lock = dcn10_cursor_lock,
.prepare_bandwidth = dcn32_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index 4b92df23ff0db..a5560b3fc39ba 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -358,29 +358,6 @@ void dcn35_init_hw(struct dc *dc)
}
}
-static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
- int opp_cnt)
-{
- bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing);
- int flow_ctrl_cnt;
-
- if (opp_cnt >= 2)
- hblank_halved = true;
-
- flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable -
- stream->timing.h_border_left -
- stream->timing.h_border_right;
-
- if (hblank_halved)
- flow_ctrl_cnt /= 2;
-
- /* ODM combine 4:1 case */
- if (opp_cnt == 4)
- flow_ctrl_cnt /= 2;
-
- return flow_ctrl_cnt;
-}
-
static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
{
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
@@ -474,10 +451,6 @@ void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
struct pipe_ctx *odm_pipe;
int opp_cnt = 0;
int opp_inst[MAX_PIPES] = {0};
- bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing));
- struct mpc_dwb_flow_control flow_control;
- struct mpc *mpc = dc->res_pool->mpc;
- int i;
opp_cnt = get_odm_config(pipe_ctx, opp_inst);
@@ -490,20 +463,6 @@ void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1;
- flow_control.flow_ctrl_mode = 0;
- flow_control.flow_ctrl_cnt0 = 0x80;
- flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt);
- if (mpc->funcs->set_out_rate_control) {
- for (i = 0; i < opp_cnt; ++i) {
- mpc->funcs->set_out_rate_control(
- mpc, opp_inst[i],
- true,
- rate_control_2x_pclk,
- &flow_control);
- }
- }
-
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
odm_pipe->stream_res.opp,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
index ab17fa1c64e8c..670255c9bc822 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
@@ -67,7 +67,7 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.prepare_bandwidth = dcn35_prepare_bandwidth,
.optimize_bandwidth = dcn35_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth,
- .set_drr = dcn10_set_drr,
+ .set_drr = dcn35_set_drr,
.get_position = dcn10_get_position,
.set_static_screen_control = dcn35_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index aee5372e292c5..d89c92370d5b3 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -337,6 +337,9 @@ struct opp_funcs {
bool (*dpg_is_blanked)(
struct output_pixel_processor *opp);
+ bool (*dpg_is_pending)(struct output_pixel_processor *opp);
+
+
void (*opp_dpg_set_blank_color)(
struct output_pixel_processor *opp,
const struct tg_color *color);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index d98d72f35be5b..ffad8fe16c54d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -331,6 +331,7 @@ struct timing_generator_funcs {
void (*init_odm)(struct timing_generator *tg);
void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg);
+ void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
index 26fe81f213da5..bf29fc58ea6a6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
@@ -285,12 +285,12 @@ struct link_service {
enum replay_FW_Message_type msg,
union dmub_replay_cmd_set *cmd_data);
bool (*edp_set_coasting_vtotal)(
- struct dc_link *link, uint16_t coasting_vtotal);
+ struct dc_link *link, uint32_t coasting_vtotal);
bool (*edp_replay_residency)(const struct dc_link *link,
unsigned int *residency, const bool is_start,
const bool is_alpm);
bool (*edp_set_replay_power_opt_and_coasting_vtotal)(struct dc_link *link,
- const unsigned int *power_opts, uint16_t coasting_vtotal);
+ const unsigned int *power_opts, uint32_t coasting_vtotal);
bool (*edp_wait_for_t12)(struct dc_link *link);
bool (*edp_is_ilr_optimization_required)(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index acfbbc638cc64..3baa2bdd6dd65 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -1034,7 +1034,7 @@ bool edp_send_replay_cmd(struct dc_link *link,
return true;
}
-bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal)
+bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal)
{
struct dc *dc = link->ctx->dc;
struct dmub_replay *replay = dc->res_pool->replay;
@@ -1073,7 +1073,7 @@ bool edp_replay_residency(const struct dc_link *link,
}
bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
- const unsigned int *power_opts, uint16_t coasting_vtotal)
+ const unsigned int *power_opts, uint32_t coasting_vtotal)
{
struct dc *dc = link->ctx->dc;
struct dmub_replay *replay = dc->res_pool->replay;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
index 34e521af7bb48..a158c6234d422 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
@@ -59,12 +59,12 @@ bool edp_setup_replay(struct dc_link *link,
bool edp_send_replay_cmd(struct dc_link *link,
enum replay_FW_Message_type msg,
union dmub_replay_cmd_set *cmd_data);
-bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal);
+bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal);
bool edp_replay_residency(const struct dc_link *link,
unsigned int *residency, const bool is_start, const bool is_alpm);
bool edp_get_replay_state(const struct dc_link *link, uint64_t *state);
bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
- const unsigned int *power_opts, uint16_t coasting_vtotal);
+ const unsigned int *power_opts, uint32_t coasting_vtotal);
bool edp_wait_for_t12(struct dc_link *link);
bool edp_is_ilr_optimization_required(struct dc_link *link,
struct dc_crtc_timing *crtc_timing);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
index ab81594a7fadc..6c2e84d3967fc 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
@@ -557,7 +557,8 @@ struct dcn_optc_registers {
type OTG_CRC_DATA_STREAM_SPLIT_MODE;\
type OTG_CRC_DATA_FORMAT;\
type OTG_V_TOTAL_LAST_USED_BY_DRR;\
- type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;
+ type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;\
+ type OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING;
#define TG_REG_FIELD_LIST_DCN3_2(type) \
type OTG_H_TIMING_DIV_MODE_MANUAL;
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
index 8234935433254..52eab8fccb7f1 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
@@ -122,6 +122,13 @@ void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combi
}
}
+void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(tg);
+
+ REG_WAIT(OTG_DOUBLE_BUFFER_CONTROL, OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING, 0, 2, 50000);
+}
+
void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -260,9 +267,6 @@ static void optc32_setup_manual_trigger(struct timing_generator *optc)
OTG_V_TOTAL_MAX_SEL, 1,
OTG_FORCE_LOCK_ON_EVENT, 0,
OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
-
- // Setup manual flow control for EOF via TRIG_A
- optc->funcs->setup_manual_trigger(optc);
}
}
@@ -345,6 +349,7 @@ static struct timing_generator_funcs dcn32_tg_funcs = {
.set_odm_bypass = optc32_set_odm_bypass,
.set_odm_combine = optc32_set_odm_combine,
.get_odm_combine_segments = optc32_get_odm_combine_segments,
+ .wait_odm_doublebuffer_pending_clear = optc32_wait_odm_doublebuffer_pending_clear,
.set_h_timing_div_manual_mode = optc32_set_h_timing_div_manual_mode,
.get_optc_source = optc2_get_optc_source,
.set_out_mux = optc3_set_out_mux,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
index 8ce3b178cab06..0c2c146955619 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
@@ -183,5 +183,6 @@ void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool man
void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combine_segments);
void optc32_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
+void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg);
#endif /* __DC_OPTC_DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index 3f3951f3ba983..ce1754cc1f463 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -1771,6 +1771,7 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
dcn32_override_min_req_memclk(dc, context);
+ dcn32_override_min_req_dcfclk(dc, context);
BW_VAL_TRACE_END_WATERMARKS();
@@ -1930,6 +1931,8 @@ static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw
{
DC_FP_START();
dcn32_update_bw_bounding_box_fpu(dc, bw_params);
+ if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
+ dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
DC_FP_END();
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
index 0c87b0fabba7d..2258c5c7212d8 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
@@ -42,6 +42,7 @@
#define SUBVP_ACTIVE_MARGIN_LIST_LEN 2
#define DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ 1800
#define DCN3_2_VMIN_DISPCLK_HZ 717000000
+#define MIN_SUBVP_DCFCLK_KHZ 400000
#define TO_DCN32_RES_POOL(pool)\
container_of(pool, struct dcn32_resource_pool, base)
@@ -181,6 +182,8 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes);
+void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context);
+
/* definitions for run time init of reg offsets */
/* CLK SRC */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index b356fed1726d9..296a0a8e71459 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -1581,6 +1581,8 @@ static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
{
DC_FP_START();
dcn321_update_bw_bounding_box_fpu(dc, bw_params);
+ if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
+ dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
DC_FP_END();
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index 5b486400dfdb5..909e14261f9b4 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -700,6 +700,8 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dcc = DCC_ENABLE,
.disable_dpp_power_gate = true,
.disable_hubp_power_gate = true,
+ .disable_optc_power_gate = true, /*should the same as above two*/
+ .disable_hpo_power_gate = true, /*dmubfw force domain25 on*/
.disable_clock_gate = false,
.disable_dsc_power_gate = true,
.vsr_support = true,
@@ -742,12 +744,13 @@ static const struct dc_debug_options debug_defaults_drv = {
},
.seamless_boot_odm_combine = DML_FAIL_SOURCE_PIXEL_FORMAT,
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
+ .minimum_z8_residency_time = 2100,
.using_dml2 = true,
.support_eDP1_5 = true,
.enable_hpo_pg_support = false,
.enable_legacy_fast_update = true,
.enable_single_display_2to1_odm_policy = true,
- .disable_idle_power_optimizations = true,
+ .disable_idle_power_optimizations = false,
.dmcub_emulation = false,
.disable_boot_optimizations = false,
.disable_unbounded_requesting = false,
@@ -758,8 +761,10 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_z10 = true,
.ignore_pg = true,
.psp_disabled_wa = true,
- .ips2_eval_delay_us = 200,
- .ips2_entry_delay_us = 400
+ .ips2_eval_delay_us = 2000,
+ .ips2_entry_delay_us = 800,
+ .disable_dmub_reallow_idle = true,
+ .static_screen_wait_frames = 2,
};
static const struct dc_panel_config panel_config_defaults = {
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index a529e369b2ace..af3fe8bb0728b 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -3238,6 +3238,14 @@ struct dmub_cmd_replay_set_coasting_vtotal_data {
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
+ /**
+ * 16-bit value dicated by driver that indicates the coasting vtotal high byte part.
+ */
+ uint16_t coasting_vtotal_high;
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad[2];
};
/**
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index 8c137d7c032e1..7c9805705fd38 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -513,6 +513,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+ if (!display)
+ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+
hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index;
if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0)
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index 738ee763f24a5..84f9b412a4f11 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -147,15 +147,12 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
}
/* VSC packet set to 4 for PSR-SU, or 2 for PSR1 */
- if (stream->link->psr_settings.psr_feature_enabled) {
- if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
- vsc_packet_revision = vsc_packet_rev4;
- else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
- vsc_packet_revision = vsc_packet_rev2;
- }
-
- if (stream->link->replay_settings.config.replay_supported)
+ if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
+ vsc_packet_revision = vsc_packet_rev4;
+ else if (stream->link->replay_settings.config.replay_supported)
vsc_packet_revision = vsc_packet_rev4;
+ else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
+ vsc_packet_revision = vsc_packet_rev2;
/* Update to revision 5 for extended colorimetry support */
if (stream->use_vsc_sdp_for_colorimetry)
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index e304e8435fb8f..2a3698fd2dc24 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -975,7 +975,7 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
void set_replay_coasting_vtotal(struct dc_link *link,
enum replay_coasting_vtotal_type type,
- uint16_t vtotal)
+ uint32_t vtotal)
{
link->replay_settings.coasting_vtotal_table[type] = vtotal;
}
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index bef4815e1703d..ff7e6f3cd6be2 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -56,7 +56,7 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
void init_replay_config(struct dc_link *link, struct replay_config *pr_config);
void set_replay_coasting_vtotal(struct dc_link *link,
enum replay_coasting_vtotal_type type,
- uint16_t vtotal);
+ uint32_t vtotal);
void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal);
void calculate_replay_link_off_frame_count(struct dc_link *link,
uint16_t vtotal, uint16_t htotal);
diff --git a/drivers/gpu/drm/amd/include/umsch_mm_4_0_api_def.h b/drivers/gpu/drm/amd/include/umsch_mm_4_0_api_def.h
index beadb9e42850c..ca83e9e5c3ffb 100644
--- a/drivers/gpu/drm/amd/include/umsch_mm_4_0_api_def.h
+++ b/drivers/gpu/drm/amd/include/umsch_mm_4_0_api_def.h
@@ -234,7 +234,8 @@ union UMSCHAPI__SET_HW_RESOURCES {
uint32_t enable_level_process_quantum_check : 1;
uint32_t is_vcn0_enabled : 1;
uint32_t is_vcn1_enabled : 1;
- uint32_t reserved : 27;
+ uint32_t use_rs64mem_for_proc_ctx_csa : 1;
+ uint32_t reserved : 26;
};
uint32_t uint32_all;
};
@@ -297,9 +298,12 @@ union UMSCHAPI__ADD_QUEUE {
struct {
uint32_t is_context_suspended : 1;
- uint32_t reserved : 31;
+ uint32_t collaboration_mode : 1;
+ uint32_t reserved : 30;
};
struct UMSCH_API_STATUS api_status;
+ uint32_t process_csa_array_index;
+ uint32_t context_csa_array_index;
};
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
@@ -314,6 +318,7 @@ union UMSCHAPI__REMOVE_QUEUE {
uint64_t context_csa_addr;
struct UMSCH_API_STATUS api_status;
+ uint32_t context_csa_array_index;
};
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
@@ -337,6 +342,7 @@ union UMSCHAPI__SUSPEND {
uint32_t suspend_fence_value;
struct UMSCH_API_STATUS api_status;
+ uint32_t context_csa_array_index;
};
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
@@ -356,6 +362,7 @@ union UMSCHAPI__RESUME {
enum UMSCH_ENGINE_TYPE engine_type;
struct UMSCH_API_STATUS api_status;
+ uint32_t context_csa_array_index;
};
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
@@ -404,6 +411,7 @@ union UMSCHAPI__UPDATE_AFFINITY {
union UMSCH_AFFINITY affinity;
uint64_t context_csa_addr;
struct UMSCH_API_STATUS api_status;
+ uint32_t context_csa_array_index;
};
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
@@ -417,6 +425,7 @@ union UMSCHAPI__CHANGE_CONTEXT_PRIORITY_LEVEL {
uint64_t context_quantum;
uint64_t context_csa_addr;
struct UMSCH_API_STATUS api_status;
+ uint32_t context_csa_array_index;
};
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 246b211b1e85f..65333141b1c1b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -735,7 +735,7 @@ static int smu_early_init(void *handle)
smu->adev = adev;
smu->pm_enabled = !!amdgpu_dpm;
smu->is_apu = false;
- smu->smu_baco.state = SMU_BACO_STATE_EXIT;
+ smu->smu_baco.state = SMU_BACO_STATE_NONE;
smu->smu_baco.platform_support = false;
smu->user_dpm_profile.fan_mode = -1;
@@ -1966,10 +1966,25 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
return 0;
}
+static int smu_reset_mp1_state(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if ((!adev->in_runpm) && (!adev->in_suspend) &&
+ (!amdgpu_in_reset(adev)) && amdgpu_ip_version(adev, MP1_HWIP, 0) ==
+ IP_VERSION(13, 0, 10) &&
+ !amdgpu_device_has_display_hardware(adev))
+ ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
+
+ return ret;
+}
+
static int smu_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
@@ -1987,7 +2002,15 @@ static int smu_hw_fini(void *handle)
adev->pm.dpm_enabled = false;
- return smu_smc_hw_cleanup(smu);
+ ret = smu_smc_hw_cleanup(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_reset_mp1_state(smu);
+ if (ret)
+ return ret;
+
+ return 0;
}
static void smu_late_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index a870bdd49a4e3..1fa81575788c5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -424,6 +424,7 @@ enum smu_reset_mode {
enum smu_baco_state {
SMU_BACO_STATE_ENTER = 0,
SMU_BACO_STATE_EXIT,
+ SMU_BACO_STATE_NONE,
};
struct smu_baco_context {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
index 5bb7a63c0602b..97522c0852589 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
@@ -144,6 +144,37 @@ typedef struct {
uint32_t MaxGfxClk;
} DpmClocks_t;
+//Freq in MHz
+//Voltage in milli volts with 2 fractional bits
+typedef struct {
+ uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
+ uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS];
+ uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS];
+ uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
+ uint32_t VClocks0[NUM_VCN_DPM_LEVELS];
+ uint32_t VClocks1[NUM_VCN_DPM_LEVELS];
+ uint32_t DClocks0[NUM_VCN_DPM_LEVELS];
+ uint32_t DClocks1[NUM_VCN_DPM_LEVELS];
+ uint32_t VPEClocks[NUM_VPE_DPM_LEVELS];
+ uint32_t FclkClocks_Freq[NUM_FCLK_DPM_LEVELS];
+ uint32_t FclkClocks_Voltage[NUM_FCLK_DPM_LEVELS];
+ uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS];
+ MemPstateTable_t MemPstateTable[NUM_MEM_PSTATE_LEVELS];
+
+ uint8_t NumDcfClkLevelsEnabled;
+ uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk
+ uint8_t NumSocClkLevelsEnabled;
+ uint8_t Vcn0ClkLevelsEnabled; //Applies to both Vclk0 and Dclk0
+ uint8_t Vcn1ClkLevelsEnabled; //Applies to both Vclk1 and Dclk1
+ uint8_t VpeClkLevelsEnabled;
+ uint8_t NumMemPstatesEnabled;
+ uint8_t NumFclkLevelsEnabled;
+ uint8_t spare;
+
+ uint32_t MinGfxClk;
+ uint32_t MaxGfxClk;
+} DpmClocks_t_v14_0_1;
+
typedef struct {
uint16_t CoreFrequency[16]; //Target core frequency [MHz]
uint16_t CorePower[16]; //CAC calculated core power [mW]
@@ -224,7 +255,7 @@ typedef enum {
#define TABLE_CUSTOM_DPM 2 // Called by Driver
#define TABLE_BIOS_GPIO_CONFIG 3 // Called by BIOS
#define TABLE_DPMCLOCKS 4 // Called by Driver and VBIOS
-#define TABLE_SPARE0 5 // Unused
+#define TABLE_MOMENTARY_PM 5 // Called by Tools
#define TABLE_MODERN_STDBY 6 // Called by Tools for Modern Standby Log
#define TABLE_SMU_METRICS 7 // Called by Driver and SMF/PMF
#define TABLE_COUNT 8
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h
index 356e0f57a426f..ddb6258600831 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h
@@ -42,7 +42,7 @@
#define FEATURE_EDC_BIT 7
#define FEATURE_PLL_POWER_DOWN_BIT 8
#define FEATURE_VDDOFF_BIT 9
-#define FEATURE_VCN_DPM_BIT 10
+#define FEATURE_VCN_DPM_BIT 10 /* this is for both VCN0 and VCN1 */
#define FEATURE_DS_MPM_BIT 11
#define FEATURE_FCLK_DPM_BIT 12
#define FEATURE_SOCCLK_DPM_BIT 13
@@ -56,9 +56,9 @@
#define FEATURE_DS_GFXCLK_BIT 21
#define FEATURE_DS_SOCCLK_BIT 22
#define FEATURE_DS_LCLK_BIT 23
-#define FEATURE_LOW_POWER_DCNCLKS_BIT 24 // for all DISP clks
+#define FEATURE_LOW_POWER_DCNCLKS_BIT 24
#define FEATURE_DS_SHUBCLK_BIT 25
-#define FEATURE_SPARE0_BIT 26 //SPARE
+#define FEATURE_RESERVED0_BIT 26
#define FEATURE_ZSTATES_BIT 27
#define FEATURE_IOMMUL2_PG_BIT 28
#define FEATURE_DS_FCLK_BIT 29
@@ -66,8 +66,8 @@
#define FEATURE_DS_MP1CLK_BIT 31
#define FEATURE_WHISPER_MODE_BIT 32
#define FEATURE_SMU_LOW_POWER_BIT 33
-#define FEATURE_SMART_L3_RINSER_BIT 34
-#define FEATURE_SPARE1_BIT 35 //SPARE
+#define FEATURE_RESERVED1_BIT 34 /* v14_0_0 SMART_L3_RINSER; v14_0_1 RESERVED1 */
+#define FEATURE_GFX_DEM_BIT 35 /* v14_0_0 SPARE; v14_0_1 GFX_DEM */
#define FEATURE_PSI_BIT 36
#define FEATURE_PROCHOT_BIT 37
#define FEATURE_CPUOFF_BIT 38
@@ -77,11 +77,11 @@
#define FEATURE_PERF_LIMIT_BIT 42
#define FEATURE_CORE_DLDO_BIT 43
#define FEATURE_DVO_BIT 44
-#define FEATURE_DS_VCN_BIT 45
+#define FEATURE_DS_VCN_BIT 45 /* v14_0_1 this is for both VCN0 and VCN1 */
#define FEATURE_CPPC_BIT 46
#define FEATURE_CPPC_PREFERRED_CORES 47
#define FEATURE_DF_CSTATES_BIT 48
-#define FEATURE_SPARE2_BIT 49 //SPARE
+#define FEATURE_FAST_PSTATE_CLDO_BIT 49 /* v14_0_0 SPARE */
#define FEATURE_ATHUB_PG_BIT 50
#define FEATURE_VDDOFF_ECO_BIT 51
#define FEATURE_ZSTATES_ECO_BIT 52
@@ -93,8 +93,8 @@
#define FEATURE_DS_IPUCLK_BIT 58
#define FEATURE_DS_VPECLK_BIT 59
#define FEATURE_VPE_DPM_BIT 60
-#define FEATURE_SPARE_61 61
-#define FEATURE_FP_DIDT 62
+#define FEATURE_SMART_L3_RINSER_BIT 61 /* v14_0_0 SPARE*/
+#define FEATURE_PCC_BIT 62 /* v14_0_0 FP_DIDT v14_0_1 PCC_BIT */
#define NUM_FEATURES 63
// Firmware Header/Footer
@@ -151,6 +151,43 @@ typedef struct {
// MP1_EXT_SCRATCH7 = RTOS Current Job
} FwStatus_t;
+typedef struct {
+ // MP1_EXT_SCRATCH0
+ uint32_t DpmHandlerID : 8;
+ uint32_t ActivityMonitorID : 8;
+ uint32_t DpmTimerID : 8;
+ uint32_t DpmHubID : 4;
+ uint32_t DpmHubTask : 4;
+ // MP1_EXT_SCRATCH1
+ uint32_t CclkSyncStatus : 8;
+ uint32_t ZstateStatus : 4;
+ uint32_t Cpu1VddOff : 4;
+ uint32_t DstateFun : 4;
+ uint32_t DstateDev : 4;
+ uint32_t GfxOffStatus : 2;
+ uint32_t Cpu0Off : 2;
+ uint32_t Cpu1Off : 2;
+ uint32_t Cpu0VddOff : 2;
+ // MP1_EXT_SCRATCH2
+ uint32_t P2JobHandler :32;
+ // MP1_EXT_SCRATCH3
+ uint32_t PostCode :32;
+ // MP1_EXT_SCRATCH4
+ uint32_t MsgPortBusy :15;
+ uint32_t RsmuPmiP1Pending : 1;
+ uint32_t RsmuPmiP2PendingCnt : 8;
+ uint32_t DfCstateExitPending : 1;
+ uint32_t Pc6EntryPending : 1;
+ uint32_t Pc6ExitPending : 1;
+ uint32_t WarmResetPending : 1;
+ uint32_t Mp0ClkPending : 1;
+ uint32_t InWhisperMode : 1;
+ uint32_t spare2 : 2;
+ // MP1_EXT_SCRATCH5
+ uint32_t IdleMask :32;
+ // MP1_EXT_SCRATCH6 = RTOS threads' status
+ // MP1_EXT_SCRATCH7 = RTOS Current Job
+} FwStatus_t_v14_0_1;
#pragma pack(pop)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
index 8a8a57c56bc0c..c4dc5881d8df0 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
@@ -54,14 +54,14 @@
#define PPSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team
#define PPSMC_MSG_GetPmfwVersion 0x02 ///< Get PMFW version
#define PPSMC_MSG_GetDriverIfVersion 0x03 ///< Get PMFW_DRIVER_IF version
-#define PPSMC_MSG_SPARE0 0x04 ///< SPARE
-#define PPSMC_MSG_SPARE1 0x05 ///< SPARE
-#define PPSMC_MSG_PowerDownVcn 0x06 ///< Power down VCN
-#define PPSMC_MSG_PowerUpVcn 0x07 ///< Power up VCN; VCN is power gated by default
-#define PPSMC_MSG_SetHardMinVcn 0x08 ///< For wireless display
+#define PPSMC_MSG_PowerDownVcn1 0x04 ///< Power down VCN1
+#define PPSMC_MSG_PowerUpVcn1 0x05 ///< Power up VCN1; VCN1 is power gated by default
+#define PPSMC_MSG_PowerDownVcn0 0x06 ///< Power down VCN0
+#define PPSMC_MSG_PowerUpVcn0 0x07 ///< Power up VCN0; VCN0 is power gated by default
+#define PPSMC_MSG_SetHardMinVcn0 0x08 ///< For wireless display
#define PPSMC_MSG_SetSoftMinGfxclk 0x09 ///< Set SoftMin for GFXCLK, argument is frequency in MHz
-#define PPSMC_MSG_SPARE2 0x0A ///< SPARE
-#define PPSMC_MSG_SPARE3 0x0B ///< SPARE
+#define PPSMC_MSG_SetHardMinVcn1 0x0A ///< For wireless display
+#define PPSMC_MSG_SetSoftMinVcn1 0x0B ///< Set soft min for VCN1 clocks (VCLK1 and DCLK1)
#define PPSMC_MSG_PrepareMp1ForUnload 0x0C ///< Prepare PMFW for GFX driver unload
#define PPSMC_MSG_SetDriverDramAddrHigh 0x0D ///< Set high 32 bits of DRAM address for Driver table transfer
#define PPSMC_MSG_SetDriverDramAddrLow 0x0E ///< Set low 32 bits of DRAM address for Driver table transfer
@@ -71,36 +71,32 @@
#define PPSMC_MSG_GetEnabledSmuFeatures 0x12 ///< Get enabled features in PMFW
#define PPSMC_MSG_SetHardMinSocclkByFreq 0x13 ///< Set hard min for SOC CLK
#define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK
-#define PPSMC_MSG_SetSoftMinVcn 0x15 ///< Set soft min for VCN clocks (VCLK and DCLK)
-
+#define PPSMC_MSG_SetSoftMinVcn0 0x15 ///< Set soft min for VCN0 clocks (VCLK0 and DCLK0)
#define PPSMC_MSG_EnableGfxImu 0x16 ///< Enable GFX IMU
-
-#define PPSMC_MSG_spare_0x17 0x17
-#define PPSMC_MSG_spare_0x18 0x18
+#define PPSMC_MSG_spare_0x17 0x17 ///< Get GFX clock frequency
+#define PPSMC_MSG_spare_0x18 0x18 ///< Get FCLK frequency
#define PPSMC_MSG_AllowGfxOff 0x19 ///< Inform PMFW of allowing GFXOFF entry
#define PPSMC_MSG_DisallowGfxOff 0x1A ///< Inform PMFW of disallowing GFXOFF entry
#define PPSMC_MSG_SetSoftMaxGfxClk 0x1B ///< Set soft max for GFX CLK
#define PPSMC_MSG_SetHardMinGfxClk 0x1C ///< Set hard min for GFX CLK
-
#define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x1D ///< Set soft max for SOC CLK
#define PPSMC_MSG_SetSoftMaxFclkByFreq 0x1E ///< Set soft max for FCLK
-#define PPSMC_MSG_SetSoftMaxVcn 0x1F ///< Set soft max for VCN clocks (VCLK and DCLK)
-#define PPSMC_MSG_spare_0x20 0x20
-#define PPSMC_MSG_PowerDownJpeg 0x21 ///< Power down Jpeg
-#define PPSMC_MSG_PowerUpJpeg 0x22 ///< Power up Jpeg; VCN is power gated by default
-
+#define PPSMC_MSG_SetSoftMaxVcn0 0x1F ///< Set soft max for VCN0 clocks (VCLK0 and DCLK0)
+#define PPSMC_MSG_spare_0x20 0x20 ///< Set power limit percentage
+#define PPSMC_MSG_PowerDownJpeg0 0x21 ///< Power down Jpeg of VCN0
+#define PPSMC_MSG_PowerUpJpeg0 0x22 ///< Power up Jpeg of VCN0; VCN0 is power gated by default
#define PPSMC_MSG_SetHardMinFclkByFreq 0x23 ///< Set hard min for FCLK
#define PPSMC_MSG_SetSoftMinSocclkByFreq 0x24 ///< Set soft min for SOC CLK
#define PPSMC_MSG_AllowZstates 0x25 ///< Inform PMFM of allowing Zstate entry, i.e. no Miracast activity
-#define PPSMC_MSG_Reserved 0x26 ///< Not used
-#define PPSMC_MSG_Reserved1 0x27 ///< Not used, previously PPSMC_MSG_RequestActiveWgp
-#define PPSMC_MSG_Reserved2 0x28 ///< Not used, previously PPSMC_MSG_QueryActiveWgp
+#define PPSMC_MSG_PowerDownJpeg1 0x26 ///< Power down Jpeg of VCN1
+#define PPSMC_MSG_PowerUpJpeg1 0x27 ///< Power up Jpeg of VCN1; VCN1 is power gated by default
+#define PPSMC_MSG_SetSoftMaxVcn1 0x28 ///< Set soft max for VCN1 clocks (VCLK1 and DCLK1)
#define PPSMC_MSG_PowerDownIspByTile 0x29 ///< ISP is power gated by default
#define PPSMC_MSG_PowerUpIspByTile 0x2A ///< This message is used to power up ISP tiles and enable the ISP DPM
#define PPSMC_MSG_SetHardMinIspiclkByFreq 0x2B ///< Set HardMin by frequency for ISPICLK
#define PPSMC_MSG_SetHardMinIspxclkByFreq 0x2C ///< Set HardMin by frequency for ISPXCLK
-#define PPSMC_MSG_PowerDownUmsch 0x2D ///< Power down VCN.UMSCH (aka VSCH) scheduler
-#define PPSMC_MSG_PowerUpUmsch 0x2E ///< Power up VCN.UMSCH (aka VSCH) scheduler
+#define PPSMC_MSG_PowerDownUmsch 0x2D ///< Power down VCN0.UMSCH (aka VSCH) scheduler
+#define PPSMC_MSG_PowerUpUmsch 0x2E ///< Power up VCN0.UMSCH (aka VSCH) scheduler
#define PPSMC_Message_IspStutterOn_MmhubPgDis 0x2F ///< ISP StutterOn mmHub PgDis
#define PPSMC_Message_IspStutterOff_MmhubPgEn 0x30 ///< ISP StufferOff mmHub PgEn
#define PPSMC_MSG_PowerUpVpe 0x31 ///< Power up VPE
@@ -110,7 +106,9 @@
#define PPSMC_MSG_DisableLSdma 0x35 ///< Disable LSDMA
#define PPSMC_MSG_SetSoftMaxVpe 0x36 ///<
#define PPSMC_MSG_SetSoftMinVpe 0x37 ///<
-#define PPSMC_Message_Count 0x38 ///< Total number of PPSMC messages
+#define PPSMC_MSG_AllocMALLCache 0x38 ///< Allocating MALL Cache
+#define PPSMC_MSG_ReleaseMALLCache 0x39 ///< Releasing MALL Cache
+#define PPSMC_Message_Count 0x3A ///< Total number of PPSMC messages
/** @}*/
/**
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index a941fdbf78b6b..af427cc7dbb84 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -115,6 +115,10 @@
__SMU_DUMMY_MAP(PowerDownVcn), \
__SMU_DUMMY_MAP(PowerUpJpeg), \
__SMU_DUMMY_MAP(PowerDownJpeg), \
+ __SMU_DUMMY_MAP(PowerUpJpeg0), \
+ __SMU_DUMMY_MAP(PowerDownJpeg0), \
+ __SMU_DUMMY_MAP(PowerUpJpeg1), \
+ __SMU_DUMMY_MAP(PowerDownJpeg1), \
__SMU_DUMMY_MAP(BacoAudioD3PME), \
__SMU_DUMMY_MAP(ArmD3), \
__SMU_DUMMY_MAP(RunDcBtc), \
@@ -135,6 +139,8 @@
__SMU_DUMMY_MAP(PowerUpSdma), \
__SMU_DUMMY_MAP(SetHardMinIspclkByFreq), \
__SMU_DUMMY_MAP(SetHardMinVcn), \
+ __SMU_DUMMY_MAP(SetHardMinVcn0), \
+ __SMU_DUMMY_MAP(SetHardMinVcn1), \
__SMU_DUMMY_MAP(SetAllowFclkSwitch), \
__SMU_DUMMY_MAP(SetMinVideoGfxclkFreq), \
__SMU_DUMMY_MAP(ActiveProcessNotify), \
@@ -150,6 +156,8 @@
__SMU_DUMMY_MAP(SetPhyclkVoltageByFreq), \
__SMU_DUMMY_MAP(SetDppclkVoltageByFreq), \
__SMU_DUMMY_MAP(SetSoftMinVcn), \
+ __SMU_DUMMY_MAP(SetSoftMinVcn0), \
+ __SMU_DUMMY_MAP(SetSoftMinVcn1), \
__SMU_DUMMY_MAP(EnablePostCode), \
__SMU_DUMMY_MAP(GetGfxclkFrequency), \
__SMU_DUMMY_MAP(GetFclkFrequency), \
@@ -161,6 +169,8 @@
__SMU_DUMMY_MAP(SetSoftMaxSocclkByFreq), \
__SMU_DUMMY_MAP(SetSoftMaxFclkByFreq), \
__SMU_DUMMY_MAP(SetSoftMaxVcn), \
+ __SMU_DUMMY_MAP(SetSoftMaxVcn0), \
+ __SMU_DUMMY_MAP(SetSoftMaxVcn1), \
__SMU_DUMMY_MAP(PowerGateMmHub), \
__SMU_DUMMY_MAP(UpdatePmeRestore), \
__SMU_DUMMY_MAP(GpuChangeState), \
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
index 3f7463c1c1a91..4af1985ae4466 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
@@ -27,6 +27,7 @@
#define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x1
#define FEATURE_MASK(feature) (1ULL << feature)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 1d96eb274d72d..0c2d04f978ac9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -1283,10 +1283,8 @@ static int arcturus_get_power_limit(struct smu_context *smu,
uint32_t *max_power_limit,
uint32_t *min_power_limit)
{
- struct smu_11_0_powerplay_table *powerplay_table =
- (struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
PPTable_t *pptable = smu->smu_table.driver_pptable;
- uint32_t power_limit, od_percent_upper, od_percent_lower;
+ uint32_t power_limit;
if (smu_v11_0_get_current_power_limit(smu, &power_limit)) {
/* the last hope to figure out the ppt limit */
@@ -1302,26 +1300,10 @@ static int arcturus_get_power_limit(struct smu_context *smu,
*current_power_limit = power_limit;
if (default_power_limit)
*default_power_limit = power_limit;
-
- if (smu->od_enabled)
- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
- else
- od_percent_upper = 0;
-
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
-
- dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
- od_percent_upper, od_percent_lower, power_limit);
-
- if (max_power_limit) {
- *max_power_limit = power_limit * (100 + od_percent_upper);
- *max_power_limit /= 100;
- }
-
- if (min_power_limit) {
- *min_power_limit = power_limit * (100 - od_percent_lower);
- *min_power_limit /= 100;
- }
+ if (max_power_limit)
+ *max_power_limit = power_limit;
+ if (min_power_limit)
+ *min_power_limit = power_limit;
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index ed189a3878ebe..836b1df799286 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2339,7 +2339,7 @@ static int navi10_get_power_limit(struct smu_context *smu,
(struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
PPTable_t *pptable = smu->smu_table.driver_pptable;
- uint32_t power_limit, od_percent_upper, od_percent_lower;
+ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
if (smu_v11_0_get_current_power_limit(smu, &power_limit)) {
/* the last hope to figure out the ppt limit */
@@ -2356,13 +2356,16 @@ static int navi10_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled &&
- navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT))
- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
- else
- od_percent_upper = 0;
-
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ if (powerplay_table) {
+ if (smu->od_enabled &&
+ navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
+ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ } else if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
+ od_percent_upper = 0;
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ }
+ }
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index e2ad2b972ab0b..1f18b61884f3f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -617,6 +617,12 @@ static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *s
return throttler_status;
}
+static bool sienna_cichlid_is_od_feature_supported(struct smu_11_0_7_overdrive_table *od_table,
+ enum SMU_11_0_7_ODFEATURE_CAP cap)
+{
+ return od_table->cap[cap];
+}
+
static int sienna_cichlid_get_power_limit(struct smu_context *smu,
uint32_t *current_power_limit,
uint32_t *default_power_limit,
@@ -625,7 +631,8 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu,
{
struct smu_11_0_7_powerplay_table *powerplay_table =
(struct smu_11_0_7_powerplay_table *)smu->smu_table.power_play_table;
- uint32_t power_limit, od_percent_upper, od_percent_lower;
+ struct smu_11_0_7_overdrive_table *od_settings = smu->od_settings;
+ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
uint16_t *table_member;
GET_PPTABLE_MEMBER(SocketPowerLimitAc, &table_member);
@@ -640,12 +647,16 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled)
- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
- else
- od_percent_upper = 0;
-
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
+ if (powerplay_table) {
+ if (smu->od_enabled &&
+ sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_POWER_LIMIT)) {
+ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
+ } else if ((sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_POWER_LIMIT))) {
+ od_percent_upper = 0;
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
+ }
+ }
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
@@ -1250,12 +1261,6 @@ static bool sienna_cichlid_is_support_fine_grained_dpm(struct smu_context *smu,
return dpm_desc->SnapToDiscrete == 0;
}
-static bool sienna_cichlid_is_od_feature_supported(struct smu_11_0_7_overdrive_table *od_table,
- enum SMU_11_0_7_ODFEATURE_CAP cap)
-{
- return od_table->cap[cap];
-}
-
static void sienna_cichlid_get_od_setting_range(struct smu_11_0_7_overdrive_table *od_table,
enum SMU_11_0_7_ODSETTING_ID setting,
uint32_t *min, uint32_t *max)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 9b80f18ea6c35..67117ced7c6ae 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2356,7 +2356,7 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
(struct smu_13_0_0_powerplay_table *)table_context->power_play_table;
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
- uint32_t power_limit, od_percent_upper, od_percent_lower;
+ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
if (smu_v13_0_get_current_power_limit(smu, &power_limit))
@@ -2369,12 +2369,16 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled)
- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
- else
- od_percent_upper = 0;
-
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+ if (powerplay_table) {
+ if (smu->od_enabled &&
+ smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
+ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+ } else if (smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
+ od_percent_upper = 0;
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+ }
+ }
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
@@ -2747,7 +2751,13 @@ static int smu_v13_0_0_set_mp1_state(struct smu_context *smu,
switch (mp1_state) {
case PP_MP1_STATE_UNLOAD:
- ret = smu_cmn_set_mp1_state(smu, mp1_state);
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_PrepareMp1ForUnload,
+ 0x55, NULL);
+
+ if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT)
+ ret = smu_v13_0_disable_pmfw_state(smu);
+
break;
default:
/* Ignore others */
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
index bb98156b2fa1d..949131bd1ecb2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
@@ -226,8 +226,18 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!en && !adev->in_s0ix)
+ if (!en && !adev->in_s0ix) {
+ /* Adds a GFX reset as workaround just before sending the
+ * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
+ * an invalid state.
+ */
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
+ SMU_RESET_MODE_2, NULL);
+ if (ret)
+ return ret;
+
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 3dc7b60cb0754..7318964f1f148 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -2320,7 +2320,7 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
(struct smu_13_0_7_powerplay_table *)table_context->power_play_table;
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
- uint32_t power_limit, od_percent_upper, od_percent_lower;
+ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
if (smu_v13_0_get_current_power_limit(smu, &power_limit))
@@ -2333,12 +2333,16 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled)
- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
- else
- od_percent_upper = 0;
-
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+ if (powerplay_table) {
+ if (smu->od_enabled &&
+ (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT))) {
+ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+ } else if (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
+ od_percent_upper = 0;
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+ }
+ }
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index b06a3cc433054..07a65e005785d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -234,7 +234,7 @@ int smu_v14_0_check_fw_version(struct smu_context *smu)
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
break;
case IP_VERSION(14, 0, 1):
- smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
+ smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1;
break;
default:
@@ -1402,9 +1402,22 @@ int smu_v14_0_set_vcn_enable(struct smu_context *smu,
if (adev->vcn.harvest_config & (1 << i))
continue;
- ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
- SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
- i << 16U, NULL);
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) {
+ if (i == 0)
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn0 : SMU_MSG_PowerDownVcn0,
+ i << 16U, NULL);
+ else if (i == 1)
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn1 : SMU_MSG_PowerDownVcn1,
+ i << 16U, NULL);
+ } else {
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
+ i << 16U, NULL);
+ }
+
if (ret)
return ret;
}
@@ -1415,9 +1428,34 @@ int smu_v14_0_set_vcn_enable(struct smu_context *smu,
int smu_v14_0_set_jpeg_enable(struct smu_context *smu,
bool enable)
{
- return smu_cmn_send_smc_msg_with_param(smu, enable ?
- SMU_MSG_PowerUpJpeg : SMU_MSG_PowerDownJpeg,
- 0, NULL);
+ struct amdgpu_device *adev = smu->adev;
+ int i, ret = 0;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) {
+ if (i == 0)
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpJpeg0 : SMU_MSG_PowerDownJpeg0,
+ i << 16U, NULL);
+ else if (i == 1 && amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpJpeg1 : SMU_MSG_PowerDownJpeg1,
+ i << 16U, NULL);
+ } else {
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpJpeg : SMU_MSG_PowerDownJpeg,
+ i << 16U, NULL);
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ return ret;
}
int smu_v14_0_run_btc(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index 9310c4758e38c..63399c00cc28f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -70,9 +70,12 @@ static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetPmfwVersion, 1),
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
- MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1),
- MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1),
- MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 1),
+ MSG_MAP(PowerDownVcn0, PPSMC_MSG_PowerDownVcn0, 1),
+ MSG_MAP(PowerUpVcn0, PPSMC_MSG_PowerUpVcn0, 1),
+ MSG_MAP(SetHardMinVcn0, PPSMC_MSG_SetHardMinVcn0, 1),
+ MSG_MAP(PowerDownVcn1, PPSMC_MSG_PowerDownVcn1, 1),
+ MSG_MAP(PowerUpVcn1, PPSMC_MSG_PowerUpVcn1, 1),
+ MSG_MAP(SetHardMinVcn1, PPSMC_MSG_SetHardMinVcn1, 1),
MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxclk, 1),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1),
MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
@@ -83,7 +86,8 @@ static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 1),
MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 1),
MSG_MAP(SetSoftMinFclk, PPSMC_MSG_SetSoftMinFclk, 1),
- MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 1),
+ MSG_MAP(SetSoftMinVcn0, PPSMC_MSG_SetSoftMinVcn0, 1),
+ MSG_MAP(SetSoftMinVcn1, PPSMC_MSG_SetSoftMinVcn1, 1),
MSG_MAP(EnableGfxImu, PPSMC_MSG_EnableGfxImu, 1),
MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1),
MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1),
@@ -91,9 +95,12 @@ static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 1),
MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 1),
MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 1),
- MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 1),
- MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 1),
- MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 1),
+ MSG_MAP(SetSoftMaxVcn0, PPSMC_MSG_SetSoftMaxVcn0, 1),
+ MSG_MAP(SetSoftMaxVcn1, PPSMC_MSG_SetSoftMaxVcn1, 1),
+ MSG_MAP(PowerDownJpeg0, PPSMC_MSG_PowerDownJpeg0, 1),
+ MSG_MAP(PowerUpJpeg0, PPSMC_MSG_PowerUpJpeg0, 1),
+ MSG_MAP(PowerDownJpeg1, PPSMC_MSG_PowerDownJpeg1, 1),
+ MSG_MAP(PowerUpJpeg1, PPSMC_MSG_PowerUpJpeg1, 1),
MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 1),
MSG_MAP(SetSoftMinSocclkByFreq, PPSMC_MSG_SetSoftMinSocclkByFreq, 1),
MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile, 1),
@@ -154,7 +161,7 @@ static int smu_v14_0_0_init_smc_tables(struct smu_context *smu)
SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
- SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t),
+ SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, max(sizeof(DpmClocks_t), sizeof(DpmClocks_t_v14_0_1)),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
@@ -164,7 +171,7 @@ static int smu_v14_0_0_init_smc_tables(struct smu_context *smu)
goto err0_out;
smu_table->metrics_time = 0;
- smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL);
+ smu_table->clocks_table = kzalloc(max(sizeof(DpmClocks_t), sizeof(DpmClocks_t_v14_0_1)), GFP_KERNEL);
if (!smu_table->clocks_table)
goto err1_out;
@@ -586,6 +593,60 @@ static int smu_v14_0_0_mode2_reset(struct smu_context *smu)
return ret;
}
+static int smu_v14_0_1_get_dpm_freq_by_index(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t dpm_level,
+ uint32_t *freq)
+{
+ DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
+
+ if (!clk_table || clk_type >= SMU_CLK_COUNT)
+ return -EINVAL;
+
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ if (dpm_level >= clk_table->NumSocClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->SocClocks[dpm_level];
+ break;
+ case SMU_VCLK:
+ if (dpm_level >= clk_table->Vcn0ClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->VClocks0[dpm_level];
+ break;
+ case SMU_DCLK:
+ if (dpm_level >= clk_table->Vcn0ClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->DClocks0[dpm_level];
+ break;
+ case SMU_VCLK1:
+ if (dpm_level >= clk_table->Vcn1ClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->VClocks1[dpm_level];
+ break;
+ case SMU_DCLK1:
+ if (dpm_level >= clk_table->Vcn1ClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->DClocks1[dpm_level];
+ break;
+ case SMU_UCLK:
+ case SMU_MCLK:
+ if (dpm_level >= clk_table->NumMemPstatesEnabled)
+ return -EINVAL;
+ *freq = clk_table->MemPstateTable[dpm_level].MemClk;
+ break;
+ case SMU_FCLK:
+ if (dpm_level >= clk_table->NumFclkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->FclkClocks_Freq[dpm_level];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int smu_v14_0_0_get_dpm_freq_by_index(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t dpm_level,
@@ -630,6 +691,19 @@ static int smu_v14_0_0_get_dpm_freq_by_index(struct smu_context *smu,
return 0;
}
+static int smu_v14_0_common_get_dpm_freq_by_index(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t dpm_level,
+ uint32_t *freq)
+{
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+ smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq);
+ else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ smu_v14_0_1_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq);
+
+ return 0;
+}
+
static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu,
enum smu_clk_type clk_type)
{
@@ -650,6 +724,8 @@ static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu,
break;
case SMU_VCLK:
case SMU_DCLK:
+ case SMU_VCLK1:
+ case SMU_DCLK1:
feature_id = SMU_FEATURE_VCN_DPM_BIT;
break;
default:
@@ -659,6 +735,126 @@ static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu,
return smu_cmn_feature_is_enabled(smu, feature_id);
}
+static int smu_v14_0_1_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min,
+ uint32_t *max)
+{
+ DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
+ uint32_t clock_limit;
+ uint32_t max_dpm_level, min_dpm_level;
+ int ret = 0;
+
+ if (!smu_v14_0_0_clk_dpm_is_enabled(smu, clk_type)) {
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ clock_limit = smu->smu_table.boot_values.uclk;
+ break;
+ case SMU_FCLK:
+ clock_limit = smu->smu_table.boot_values.fclk;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ clock_limit = smu->smu_table.boot_values.gfxclk;
+ break;
+ case SMU_SOCCLK:
+ clock_limit = smu->smu_table.boot_values.socclk;
+ break;
+ case SMU_VCLK:
+ case SMU_VCLK1:
+ clock_limit = smu->smu_table.boot_values.vclk;
+ break;
+ case SMU_DCLK:
+ case SMU_DCLK1:
+ clock_limit = smu->smu_table.boot_values.dclk;
+ break;
+ default:
+ clock_limit = 0;
+ break;
+ }
+
+ /* clock in Mhz unit */
+ if (min)
+ *min = clock_limit / 100;
+ if (max)
+ *max = clock_limit / 100;
+
+ return 0;
+ }
+
+ if (max) {
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ *max = clk_table->MaxGfxClk;
+ break;
+ case SMU_MCLK:
+ case SMU_UCLK:
+ case SMU_FCLK:
+ max_dpm_level = 0;
+ break;
+ case SMU_SOCCLK:
+ max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1;
+ break;
+ case SMU_VCLK:
+ case SMU_DCLK:
+ max_dpm_level = clk_table->Vcn0ClkLevelsEnabled - 1;
+ break;
+ case SMU_VCLK1:
+ case SMU_DCLK1:
+ max_dpm_level = clk_table->Vcn1ClkLevelsEnabled - 1;
+ break;
+ default:
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max);
+ if (ret)
+ goto failed;
+ }
+ }
+
+ if (min) {
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ *min = clk_table->MinGfxClk;
+ break;
+ case SMU_MCLK:
+ case SMU_UCLK:
+ min_dpm_level = clk_table->NumMemPstatesEnabled - 1;
+ break;
+ case SMU_FCLK:
+ min_dpm_level = clk_table->NumFclkLevelsEnabled - 1;
+ break;
+ case SMU_SOCCLK:
+ min_dpm_level = 0;
+ break;
+ case SMU_VCLK:
+ case SMU_DCLK:
+ case SMU_VCLK1:
+ case SMU_DCLK1:
+ min_dpm_level = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min);
+ if (ret)
+ goto failed;
+ }
+ }
+
+failed:
+ return ret;
+}
+
static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *min,
@@ -729,7 +925,7 @@ static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
}
if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
- ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max);
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max);
if (ret)
goto failed;
}
@@ -761,7 +957,7 @@ static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
}
if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
- ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min);
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min);
if (ret)
goto failed;
}
@@ -771,6 +967,19 @@ failed:
return ret;
}
+static int smu_v14_0_common_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min,
+ uint32_t *max)
+{
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+ smu_v14_0_0_get_dpm_ultimate_freq(smu, clk_type, min, max);
+ else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ smu_v14_0_1_get_dpm_ultimate_freq(smu, clk_type, min, max);
+
+ return 0;
+}
+
static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *value)
@@ -804,6 +1013,37 @@ static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu,
return smu_v14_0_0_get_smu_metrics_data(smu, member_type, value);
}
+static int smu_v14_0_1_get_dpm_level_count(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *count)
+{
+ DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
+
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ *count = clk_table->NumSocClkLevelsEnabled;
+ break;
+ case SMU_VCLK:
+ case SMU_DCLK:
+ *count = clk_table->Vcn0ClkLevelsEnabled;
+ break;
+ case SMU_VCLK1:
+ case SMU_DCLK1:
+ *count = clk_table->Vcn1ClkLevelsEnabled;
+ break;
+ case SMU_MCLK:
+ *count = clk_table->NumMemPstatesEnabled;
+ break;
+ case SMU_FCLK:
+ *count = clk_table->NumFclkLevelsEnabled;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static int smu_v14_0_0_get_dpm_level_count(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *count)
@@ -833,6 +1073,18 @@ static int smu_v14_0_0_get_dpm_level_count(struct smu_context *smu,
return 0;
}
+static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *count)
+{
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+ smu_v14_0_0_get_dpm_level_count(smu, clk_type, count);
+ else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ smu_v14_0_1_get_dpm_level_count(smu, clk_type, count);
+
+ return 0;
+}
+
static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
@@ -859,18 +1111,20 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
case SMU_SOCCLK:
case SMU_VCLK:
case SMU_DCLK:
+ case SMU_VCLK1:
+ case SMU_DCLK1:
case SMU_MCLK:
case SMU_FCLK:
ret = smu_v14_0_0_get_current_clk_freq(smu, clk_type, &cur_value);
if (ret)
break;
- ret = smu_v14_0_0_get_dpm_level_count(smu, clk_type, &count);
+ ret = smu_v14_0_common_get_dpm_level_count(smu, clk_type, &count);
if (ret)
break;
for (i = 0; i < count; i++) {
- ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, i, &value);
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, i, &value);
if (ret)
break;
@@ -933,8 +1187,13 @@ static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu,
break;
case SMU_VCLK:
case SMU_DCLK:
- msg_set_min = SMU_MSG_SetHardMinVcn;
- msg_set_max = SMU_MSG_SetSoftMaxVcn;
+ msg_set_min = SMU_MSG_SetHardMinVcn0;
+ msg_set_max = SMU_MSG_SetSoftMaxVcn0;
+ break;
+ case SMU_VCLK1:
+ case SMU_DCLK1:
+ msg_set_min = SMU_MSG_SetHardMinVcn1;
+ msg_set_max = SMU_MSG_SetSoftMaxVcn1;
break;
default:
return -EINVAL;
@@ -964,11 +1223,11 @@ static int smu_v14_0_0_force_clk_levels(struct smu_context *smu,
case SMU_FCLK:
case SMU_VCLK:
case SMU_DCLK:
- ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
if (ret)
break;
- ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
if (ret)
break;
@@ -993,25 +1252,25 @@ static int smu_v14_0_0_set_performance_level(struct smu_context *smu,
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max);
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max);
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max);
sclk_min = sclk_max;
fclk_min = fclk_max;
socclk_min = socclk_max;
break;
case AMD_DPM_FORCED_LEVEL_LOW:
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL);
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL);
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL);
sclk_max = sclk_min;
fclk_max = fclk_min;
socclk_max = socclk_min;
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max);
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max);
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
@@ -1060,6 +1319,18 @@ static int smu_v14_0_0_set_performance_level(struct smu_context *smu,
return ret;
}
+static int smu_v14_0_1_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
+{
+ DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
+
+ smu->gfx_default_hard_min_freq = clk_table->MinGfxClk;
+ smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk;
+ smu->gfx_actual_hard_min_freq = 0;
+ smu->gfx_actual_soft_max_freq = 0;
+
+ return 0;
+}
+
static int smu_v14_0_0_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
{
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
@@ -1072,6 +1343,16 @@ static int smu_v14_0_0_set_fine_grain_gfx_freq_parameters(struct smu_context *sm
return 0;
}
+static int smu_v14_0_common_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
+{
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+ smu_v14_0_0_set_fine_grain_gfx_freq_parameters(smu);
+ else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ smu_v14_0_1_set_fine_grain_gfx_freq_parameters(smu);
+
+ return 0;
+}
+
static int smu_v14_0_0_set_vpe_enable(struct smu_context *smu,
bool enable)
{
@@ -1088,6 +1369,25 @@ static int smu_v14_0_0_set_umsch_mm_enable(struct smu_context *smu,
0, NULL);
}
+static int smu_14_0_1_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table)
+{
+ DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
+ uint8_t idx;
+
+ /* Only the Clock information of SOC and VPE is copied to provide VPE DPM settings for use. */
+ for (idx = 0; idx < NUM_SOCCLK_DPM_LEVELS; idx++) {
+ clock_table->SocClocks[idx].Freq = (idx < clk_table->NumSocClkLevelsEnabled) ? clk_table->SocClocks[idx]:0;
+ clock_table->SocClocks[idx].Vol = 0;
+ }
+
+ for (idx = 0; idx < NUM_VPE_DPM_LEVELS; idx++) {
+ clock_table->VPEClocks[idx].Freq = (idx < clk_table->VpeClkLevelsEnabled) ? clk_table->VPEClocks[idx]:0;
+ clock_table->VPEClocks[idx].Vol = 0;
+ }
+
+ return 0;
+}
+
static int smu_14_0_0_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table)
{
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
@@ -1107,6 +1407,16 @@ static int smu_14_0_0_get_dpm_table(struct smu_context *smu, struct dpm_clocks *
return 0;
}
+static int smu_v14_0_common_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table)
+{
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+ smu_14_0_0_get_dpm_table(smu, clock_table);
+ else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ smu_14_0_1_get_dpm_table(smu, clock_table);
+
+ return 0;
+}
+
static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.check_fw_status = smu_v14_0_check_fw_status,
.check_fw_version = smu_v14_0_check_fw_version,
@@ -1128,16 +1438,16 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.set_driver_table_location = smu_v14_0_set_driver_table_location,
.gfx_off_control = smu_v14_0_gfx_off_control,
.mode2_reset = smu_v14_0_0_mode2_reset,
- .get_dpm_ultimate_freq = smu_v14_0_0_get_dpm_ultimate_freq,
+ .get_dpm_ultimate_freq = smu_v14_0_common_get_dpm_ultimate_freq,
.od_edit_dpm_table = smu_v14_0_od_edit_dpm_table,
.print_clk_levels = smu_v14_0_0_print_clk_levels,
.force_clk_levels = smu_v14_0_0_force_clk_levels,
.set_performance_level = smu_v14_0_0_set_performance_level,
- .set_fine_grain_gfx_freq_parameters = smu_v14_0_0_set_fine_grain_gfx_freq_parameters,
+ .set_fine_grain_gfx_freq_parameters = smu_v14_0_common_set_fine_grain_gfx_freq_parameters,
.set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu,
.dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable,
.dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable,
- .get_dpm_clock_table = smu_14_0_0_get_dpm_table,
+ .get_dpm_clock_table = smu_v14_0_common_get_dpm_table,
};
static void smu_v14_0_0_set_smu_mailbox_registers(struct smu_context *smu)
diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
index ebb6d8ebd44eb..1e9259416980e 100644
--- a/drivers/gpu/drm/ast/ast_dp.c
+++ b/drivers/gpu/drm/ast/ast_dp.c
@@ -180,6 +180,7 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
{
struct ast_device *ast = to_ast_device(dev);
u8 video_on_off = on;
+ u32 i = 0;
// Video On/Off
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, (u8) ~AST_DP_VIDEO_ENABLE, on);
@@ -192,6 +193,8 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
ASTDP_MIRROR_VIDEO_ENABLE) != video_on_off) {
// wait 1 ms
mdelay(1);
+ if (++i > 200)
+ break;
}
}
}
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index e7c4bef74aa46..4b2ae27f0a57f 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -441,23 +441,21 @@ lt8912_connector_mode_valid(struct drm_connector *connector,
static int lt8912_connector_get_modes(struct drm_connector *connector)
{
const struct drm_edid *drm_edid;
- int ret = -1;
- int num = 0;
struct lt8912 *lt = connector_to_lt8912(connector);
u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ int ret, num;
drm_edid = drm_bridge_edid_read(lt->hdmi_port, connector);
drm_edid_connector_update(connector, drm_edid);
- if (drm_edid) {
- num = drm_edid_connector_add_modes(connector);
- } else {
- return ret;
- }
+ if (!drm_edid)
+ return 0;
+
+ num = drm_edid_connector_add_modes(connector);
ret = drm_display_info_set_bus_formats(&connector->display_info,
&bus_format, 1);
- if (ret)
- num = ret;
+ if (ret < 0)
+ num = 0;
drm_edid_free(drm_edid);
return num;
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index bcf8bccd86d6c..f4f593ad8f795 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -294,8 +294,8 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc,
static int lt9611uxc_connector_get_modes(struct drm_connector *connector)
{
struct lt9611uxc *lt9611uxc = connector_to_lt9611uxc(connector);
- unsigned int count;
const struct drm_edid *drm_edid;
+ int count;
drm_edid = drm_bridge_edid_read(&lt9611uxc->bridge, connector);
drm_edid_connector_update(connector, drm_edid);
diff --git a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
index bd61e20770a5b..14a2a8473682b 100644
--- a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
@@ -52,7 +52,7 @@
* @adapter: I2C adapter for the DDC bus
* @offset: register offset
* @buffer: buffer for return data
- * @size: sizo of the buffer
+ * @size: size of the buffer
*
* Reads @size bytes from the DP dual mode adaptor registers
* starting at @offset.
@@ -116,7 +116,7 @@ EXPORT_SYMBOL(drm_dp_dual_mode_read);
* @adapter: I2C adapter for the DDC bus
* @offset: register offset
* @buffer: buffer for write data
- * @size: sizo of the buffer
+ * @size: size of the buffer
*
* Writes @size bytes to the DP dual mode adaptor registers
* starting at @offset.
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index 266826eac4a75..f5d4be8978660 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -4111,6 +4111,13 @@ int drm_dp_bw_overhead(int lane_count, int hactive,
u32 overhead = 1000000;
int symbol_cycles;
+ if (lane_count == 0 || hactive == 0 || bpp_x16 == 0) {
+ DRM_DEBUG_KMS("Invalid BW overhead params: lane_count %d, hactive %d, bpp_x16 %d.%04d\n",
+ lane_count, hactive,
+ bpp_x16 >> 4, (bpp_x16 & 0xf) * 625);
+ return 0;
+ }
+
/*
* DP Standard v2.1 2.6.4.1
* SSC downspread and ref clock variation margin:
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 871e4e2129d6d..0683a129b3628 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -777,6 +777,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
unsigned int total_modes_count = 0;
struct drm_client_offset *offsets;
unsigned int connector_count = 0;
+ /* points to modes protected by mode_config.mutex */
struct drm_display_mode **modes;
struct drm_crtc **crtcs;
int i, ret = 0;
@@ -845,7 +846,6 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
drm_client_pick_crtcs(client, connectors, connector_count,
crtcs, modes, 0, width, height);
}
- mutex_unlock(&dev->mode_config.mutex);
drm_client_modeset_release(client);
@@ -875,6 +875,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
modeset->y = offset->y;
}
}
+ mutex_unlock(&dev->mode_config.mutex);
mutex_unlock(&client->modeset_mutex);
out:
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index e814020bbcd3b..cfbe020de54e0 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -274,19 +274,24 @@ EXPORT_SYMBOL(drm_panel_disable);
* The modes probed from the panel are automatically added to the connector
* that the panel is attached to.
*
- * Return: The number of modes available from the panel on success or a
- * negative error code on failure.
+ * Return: The number of modes available from the panel on success, or 0 on
+ * failure (no modes).
*/
int drm_panel_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
if (!panel)
- return -EINVAL;
+ return 0;
- if (panel->funcs && panel->funcs->get_modes)
- return panel->funcs->get_modes(panel, connector);
+ if (panel->funcs && panel->funcs->get_modes) {
+ int num;
- return -EOPNOTSUPP;
+ num = panel->funcs->get_modes(panel, connector);
+ if (num > 0)
+ return num;
+ }
+
+ return 0;
}
EXPORT_SYMBOL(drm_panel_get_modes);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 7352bde299d54..03bd3c7bd0dc2 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -582,7 +582,12 @@ int drm_gem_map_attach(struct dma_buf *dma_buf,
{
struct drm_gem_object *obj = dma_buf->priv;
- if (!obj->funcs->get_sg_table)
+ /*
+ * drm_gem_map_dma_buf() requires obj->get_sg_table(), but drivers
+ * that implement their own ->map_dma_buf() do not.
+ */
+ if (dma_buf->ops->map_dma_buf == drm_gem_map_dma_buf &&
+ !obj->funcs->get_sg_table)
return -ENOSYS;
return drm_gem_pin(obj);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 4d60cc810b577..bf2dd1f46b6c4 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -422,6 +422,13 @@ static int drm_helper_probe_get_modes(struct drm_connector *connector)
count = connector_funcs->get_modes(connector);
+ /* The .get_modes() callback should not return negative values. */
+ if (count < 0) {
+ drm_err(connector->dev, ".get_modes() returned %pe\n",
+ ERR_PTR(count));
+ count = 0;
+ }
+
/*
* Fallback for when DDC probe failed in drm_get_edid() and thus skipped
* override/firmware EDID.
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index ca31bad6c5760..f48c4343f4690 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -74,16 +74,15 @@ static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data,
{
struct exynos_dp_device *dp = to_dp(plat_data);
struct drm_display_mode *mode;
- int num_modes = 0;
if (dp->plat_data.panel)
- return num_modes;
+ return 0;
mode = drm_mode_create(connector->dev);
if (!mode) {
DRM_DEV_ERROR(dp->dev,
"failed to create a new display mode.\n");
- return num_modes;
+ return 0;
}
drm_display_mode_from_videomode(&dp->vm, mode);
@@ -94,7 +93,7 @@ static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data,
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
- return num_modes + 1;
+ return 1;
}
static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 00382f28748ac..f5bbba9ad2252 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -316,14 +316,14 @@ static int vidi_get_modes(struct drm_connector *connector)
*/
if (!ctx->raw_edid) {
DRM_DEV_DEBUG_KMS(ctx->dev, "raw_edid is null.\n");
- return -EFAULT;
+ return 0;
}
edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
if (!edid) {
DRM_DEV_DEBUG_KMS(ctx->dev, "failed to allocate edid\n");
- return -ENOMEM;
+ return 0;
}
drm_connector_update_edid_property(connector, edid);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 43bed6cbaaea0..b1d02dec3774d 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -887,11 +887,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
int ret;
if (!hdata->ddc_adpt)
- return -ENODEV;
+ return 0;
edid = drm_get_edid(connector, hdata->ddc_adpt);
if (!edid)
- return -ENODEV;
+ return 0;
hdata->dvi_mode = !connector->display_info.is_hdmi;
DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n",
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 3ef6ed41e62b4..fba73c38e2356 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -33,9 +33,9 @@ endif
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
# Fine grained warnings disable
-CFLAGS_i915_pci.o = $(call cc-disable-warning, override-init)
-CFLAGS_display/intel_display_device.o = $(call cc-disable-warning, override-init)
-CFLAGS_display/intel_fbdev.o = $(call cc-disable-warning, override-init)
+CFLAGS_i915_pci.o = -Wno-override-init
+CFLAGS_display/intel_display_device.o = -Wno-override-init
+CFLAGS_display/intel_fbdev.o = -Wno-override-init
# Support compiling the display code separately for both i915 and xe
# drivers. Define I915 when building i915.
@@ -118,6 +118,7 @@ gt-y += \
gt/intel_ggtt_fencing.o \
gt/intel_gt.o \
gt/intel_gt_buffer_pool.o \
+ gt/intel_gt_ccs_mode.o \
gt/intel_gt_clock_utils.o \
gt/intel_gt_debugfs.o \
gt/intel_gt_engines_debugfs.o \
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index dfe0b07a122d1..06ec04e667e32 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -717,7 +717,6 @@ static void g4x_enable_dp(struct intel_atomic_state *state,
{
intel_enable_dp(state, encoder, pipe_config, conn_state);
intel_edp_backlight_on(pipe_config, conn_state);
- encoder->audio_enable(encoder, pipe_config, conn_state);
}
static void vlv_enable_dp(struct intel_atomic_state *state,
@@ -726,7 +725,6 @@ static void vlv_enable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
intel_edp_backlight_on(pipe_config, conn_state);
- encoder->audio_enable(encoder, pipe_config, conn_state);
}
static void g4x_pre_enable_dp(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index eda4a8b885904..ac456a2275dba 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1155,7 +1155,6 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
}
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
- intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
/* ensure all panel commands dispatched before enabling transcoder */
wait_for_cmds_dispatched_to_panel(encoder);
@@ -1256,6 +1255,8 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
/* step6d: enable dsi transcoder */
gen11_dsi_enable_transcoder(encoder);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
+
/* step7: enable backlight */
intel_backlight_enable(crtc_state, conn_state);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index fe52c06271ef0..52bd3576835b6 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -1955,16 +1955,12 @@ static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915,
* these devices we split the init OTP sequence into a deassert sequence and
* the actual init OTP part.
*/
-static void fixup_mipi_sequences(struct drm_i915_private *i915,
- struct intel_panel *panel)
+static void vlv_fixup_mipi_sequences(struct drm_i915_private *i915,
+ struct intel_panel *panel)
{
u8 *init_otp;
int len;
- /* Limit this to VLV for now. */
- if (!IS_VALLEYVIEW(i915))
- return;
-
/* Limit this to v1 vid-mode sequences */
if (panel->vbt.dsi.config->is_cmd_mode ||
panel->vbt.dsi.seq_version != 1)
@@ -2000,6 +1996,41 @@ static void fixup_mipi_sequences(struct drm_i915_private *i915,
panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
}
+/*
+ * Some machines (eg. Lenovo 82TQ) appear to have broken
+ * VBT sequences:
+ * - INIT_OTP is not present at all
+ * - what should be in INIT_OTP is in DISPLAY_ON
+ * - what should be in DISPLAY_ON is in BACKLIGHT_ON
+ * (along with the actual backlight stuff)
+ *
+ * To make those work we simply swap DISPLAY_ON and INIT_OTP.
+ *
+ * TODO: Do we need to limit this to specific machines,
+ * or examine the contents of the sequences to
+ * avoid false positives?
+ */
+static void icl_fixup_mipi_sequences(struct drm_i915_private *i915,
+ struct intel_panel *panel)
+{
+ if (!panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] &&
+ panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]) {
+ drm_dbg_kms(&i915->drm, "Broken VBT: Swapping INIT_OTP and DISPLAY_ON sequences\n");
+
+ swap(panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP],
+ panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]);
+ }
+}
+
+static void fixup_mipi_sequences(struct drm_i915_private *i915,
+ struct intel_panel *panel)
+{
+ if (DISPLAY_VER(i915) >= 11)
+ icl_fixup_mipi_sequences(i915, panel);
+ else if (IS_VALLEYVIEW(i915))
+ vlv_fixup_mipi_sequences(i915, panel);
+}
+
static void
parse_mipi_sequence(struct drm_i915_private *i915,
struct intel_panel *panel)
@@ -3351,6 +3382,9 @@ bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_da
{
const struct child_device_config *child = &devdata->child;
+ if (!devdata)
+ return false;
+
if (!intel_bios_encoder_supports_dp(devdata) ||
!intel_bios_encoder_supports_hdmi(devdata))
return false;
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index ed89b86ea625a..f672bfd70d455 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -2534,7 +2534,8 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
intel_atomic_get_new_cdclk_state(state);
- enum pipe pipe = new_cdclk_state->pipe;
+ struct intel_cdclk_config cdclk_config;
+ enum pipe pipe;
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
@@ -2543,12 +2544,25 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
if (IS_DG2(i915))
intel_cdclk_pcode_pre_notify(state);
- if (pipe == INVALID_PIPE ||
- old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) {
- drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+ if (new_cdclk_state->disable_pipes) {
+ cdclk_config = new_cdclk_state->actual;
+ pipe = INVALID_PIPE;
+ } else {
+ if (new_cdclk_state->actual.cdclk >= old_cdclk_state->actual.cdclk) {
+ cdclk_config = new_cdclk_state->actual;
+ pipe = new_cdclk_state->pipe;
+ } else {
+ cdclk_config = old_cdclk_state->actual;
+ pipe = INVALID_PIPE;
+ }
- intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
+ cdclk_config.voltage_level = max(new_cdclk_state->actual.voltage_level,
+ old_cdclk_state->actual.voltage_level);
}
+
+ drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+
+ intel_set_cdclk(i915, &cdclk_config, pipe);
}
/**
@@ -2566,7 +2580,7 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
intel_atomic_get_new_cdclk_state(state);
- enum pipe pipe = new_cdclk_state->pipe;
+ enum pipe pipe;
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
@@ -2575,12 +2589,15 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
if (IS_DG2(i915))
intel_cdclk_pcode_post_notify(state);
- if (pipe != INVALID_PIPE &&
- old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) {
- drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+ if (!new_cdclk_state->disable_pipes &&
+ new_cdclk_state->actual.cdclk < old_cdclk_state->actual.cdclk)
+ pipe = new_cdclk_state->pipe;
+ else
+ pipe = INVALID_PIPE;
+
+ drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
- intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
- }
+ intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
}
static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
@@ -3058,6 +3075,7 @@ static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_globa
return NULL;
cdclk_state->pipe = INVALID_PIPE;
+ cdclk_state->disable_pipes = false;
return &cdclk_state->base;
}
@@ -3236,6 +3254,8 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
if (ret)
return ret;
+ new_cdclk_state->disable_pipes = true;
+
drm_dbg_kms(&dev_priv->drm,
"Modeset required for cdclk change\n");
}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index 48fd7d39e0cd9..71bc032bfef16 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -51,6 +51,9 @@ struct intel_cdclk_state {
/* bitmask of active pipes */
u8 active_pipes;
+
+ /* update cdclk with pipes disabled */
+ bool disable_pipes;
};
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index f8b33999d43fc..0d3da55e1c24d 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -36,12 +36,10 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
to_i915(plane_state->uapi.plane->dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 base;
if (DISPLAY_INFO(dev_priv)->cursor_needs_physical)
- base = i915_gem_object_get_dma_address(obj, 0);
+ base = plane_state->phys_dma_addr;
else
base = intel_plane_ggtt_offset(plane_state);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index c587a8efeafcf..c17462b4c2ac1 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -4256,7 +4256,12 @@ static bool m_n_equal(const struct intel_link_m_n *m_n_1,
static bool crtcs_port_sync_compatible(const struct intel_crtc_state *crtc_state1,
const struct intel_crtc_state *crtc_state2)
{
+ /*
+ * FIXME the modeset sequence is currently wrong and
+ * can't deal with bigjoiner + port sync at the same time.
+ */
return crtc_state1->hw.active && crtc_state2->hw.active &&
+ !crtc_state1->bigjoiner_pipes && !crtc_state2->bigjoiner_pipes &&
crtc_state1->output_types == crtc_state2->output_types &&
crtc_state1->output_format == crtc_state2->output_format &&
crtc_state1->lane_count == crtc_state2->lane_count &&
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index ab2f52d21bad8..8af9e6128277a 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -2709,15 +2709,6 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
*/
intel_de_write(dev_priv, PIPESRC(pipe),
PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
-
- if (!crtc_state->enable_psr2_su_region_et)
- return;
-
- width = drm_rect_width(&crtc_state->psr2_su_area);
- height = drm_rect_height(&crtc_state->psr2_su_area);
-
- intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(pipe),
- PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
}
static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index fe42688137863..9b1bce2624b9e 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -47,6 +47,7 @@ struct drm_printer;
#define HAS_DPT(i915) (DISPLAY_VER(i915) >= 13)
#define HAS_DSB(i915) (DISPLAY_INFO(i915)->has_dsb)
#define HAS_DSC(__i915) (DISPLAY_RUNTIME_INFO(__i915)->has_dsc)
+#define HAS_DSC_MST(__i915) (DISPLAY_VER(__i915) >= 12 && HAS_DSC(__i915))
#define HAS_FBC(i915) (DISPLAY_RUNTIME_INFO(i915)->fbc_mask != 0)
#define HAS_FPGA_DBG_UNCLAIMED(i915) (DISPLAY_INFO(i915)->has_fpga_dbg)
#define HAS_FW_BLC(i915) (DISPLAY_VER(i915) >= 3)
diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.h b/drivers/gpu/drm/i915/display/intel_display_trace.h
index 99bdb833591ce..7862e7cefe027 100644
--- a/drivers/gpu/drm/i915/display/intel_display_trace.h
+++ b/drivers/gpu/drm/i915/display/intel_display_trace.h
@@ -411,7 +411,7 @@ TRACE_EVENT(intel_fbc_activate,
struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev),
plane->pipe);
__assign_str(dev, __dev_name_kms(plane));
- __assign_str(name, plane->base.name)
+ __assign_str(name, plane->base.name);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -438,7 +438,7 @@ TRACE_EVENT(intel_fbc_deactivate,
struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev),
plane->pipe);
__assign_str(dev, __dev_name_kms(plane));
- __assign_str(name, plane->base.name)
+ __assign_str(name, plane->base.name);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -465,7 +465,7 @@ TRACE_EVENT(intel_fbc_nuke,
struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev),
plane->pipe);
__assign_str(dev, __dev_name_kms(plane));
- __assign_str(name, plane->base.name)
+ __assign_str(name, plane->base.name);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index e67cd5b02e84f..bf3f942e19c3d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -727,6 +727,7 @@ struct intel_plane_state {
#define PLANE_HAS_FENCE BIT(0)
struct intel_fb_view view;
+ u32 phys_dma_addr; /* for cursor_needs_physical */
/* Plane pxp decryption state */
bool decrypt;
@@ -1422,6 +1423,8 @@ struct intel_crtc_state {
u32 psr2_man_track_ctl;
+ u32 pipe_srcsz_early_tpt;
+
struct drm_rect psr2_su_area;
/* Variable Refresh Rate state */
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index f0c3ed37b350b..e583515f9b25a 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -67,6 +67,7 @@
#include "intel_dp_tunnel.h"
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
+#include "intel_drrs.h"
#include "intel_fifo_underrun.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
@@ -498,7 +499,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
/* The values must be in increasing order */
static const int mtl_rates[] = {
162000, 216000, 243000, 270000, 324000, 432000, 540000, 675000,
- 810000, 1000000, 1350000, 2000000,
+ 810000, 1000000, 2000000,
};
static const int icl_rates[] = {
162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000,
@@ -1421,7 +1422,8 @@ static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
if (DISPLAY_VER(dev_priv) >= 12)
return true;
- if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A)
+ if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A &&
+ !intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
return true;
return false;
@@ -1916,8 +1918,9 @@ icl_dsc_compute_link_config(struct intel_dp *intel_dp,
dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp); i++) {
- if (valid_dsc_bpp[i] < dsc_min_bpp ||
- valid_dsc_bpp[i] > dsc_max_bpp)
+ if (valid_dsc_bpp[i] < dsc_min_bpp)
+ continue;
+ if (valid_dsc_bpp[i] > dsc_max_bpp)
break;
ret = dsc_compute_link_config(intel_dp,
@@ -2683,15 +2686,6 @@ intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
}
-static bool cpu_transcoder_has_drrs(struct drm_i915_private *i915,
- enum transcoder cpu_transcoder)
-{
- if (HAS_DOUBLE_BUFFERED_M_N(i915))
- return true;
-
- return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder);
-}
-
static bool can_enable_drrs(struct intel_connector *connector,
const struct intel_crtc_state *pipe_config,
const struct drm_display_mode *downclock_mode)
@@ -2714,7 +2708,7 @@ static bool can_enable_drrs(struct intel_connector *connector,
if (pipe_config->has_pch_encoder)
return false;
- if (!cpu_transcoder_has_drrs(i915, pipe_config->cpu_transcoder))
+ if (!intel_cpu_transcoder_has_drrs(i915, pipe_config->cpu_transcoder))
return false;
return downclock_mode &&
@@ -2731,7 +2725,11 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
int pixel_clock;
- if (has_seamless_m_n(connector))
+ /*
+ * FIXME all joined pipes share the same transcoder.
+ * Need to account for that when updating M/N live.
+ */
+ if (has_seamless_m_n(connector) && !pipe_config->bigjoiner_pipes)
pipe_config->update_m_n = true;
if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
@@ -6565,6 +6563,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
+ intel_connector->sync_state = intel_dp_connector_sync_state;
if (!intel_edp_init_connector(intel_dp, intel_connector)) {
intel_dp_aux_fini(intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index b98a87883fefb..9db43bd81ce2f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -691,12 +691,15 @@ int intel_dp_hdcp_get_remote_capability(struct intel_connector *connector,
u8 bcaps;
int ret;
+ *hdcp_capable = false;
+ *hdcp2_capable = false;
if (!intel_encoder_is_mst(connector->encoder))
return -EINVAL;
ret = _intel_dp_hdcp2_get_capability(aux, hdcp2_capable);
if (ret)
- return ret;
+ drm_dbg_kms(&i915->drm,
+ "HDCP2 DPCD capability read failed err: %d\n", ret);
ret = intel_dp_hdcp_read_bcaps(aux, i915, &bcaps);
if (ret)
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 53aec023ce92f..b651c990af85f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -1355,7 +1355,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
return 0;
}
- if (DISPLAY_VER(dev_priv) >= 10 &&
+ if (HAS_DSC_MST(dev_priv) &&
drm_dp_sink_supports_dsc(intel_connector->dp.dsc_dpcd)) {
/*
* TBD pass the connector BPC,
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index ff480f171f75a..b6d24410740f8 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -2554,7 +2554,7 @@ static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
static bool
ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
{
- return (((IS_ELKHARTLAKE(i915) || IS_JASPERLAKE(i915)) &&
+ return ((IS_ELKHARTLAKE(i915) &&
IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
i915->display.dpll.ref_clks.nssc == 38400;
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
index 169ef38ff1883..597f8bd6aa1a0 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.c
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -63,6 +63,15 @@ const char *intel_drrs_type_str(enum drrs_type drrs_type)
return str[drrs_type];
}
+bool intel_cpu_transcoder_has_drrs(struct drm_i915_private *i915,
+ enum transcoder cpu_transcoder)
+{
+ if (HAS_DOUBLE_BUFFERED_M_N(i915))
+ return true;
+
+ return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder);
+}
+
static void
intel_drrs_set_refresh_rate_pipeconf(struct intel_crtc *crtc,
enum drrs_refresh_rate refresh_rate)
@@ -312,9 +321,8 @@ static int intel_drrs_debugfs_status_show(struct seq_file *m, void *unused)
mutex_lock(&crtc->drrs.mutex);
seq_printf(m, "DRRS capable: %s\n",
- str_yes_no(crtc_state->has_drrs ||
- HAS_DOUBLE_BUFFERED_M_N(i915) ||
- intel_cpu_transcoder_has_m2_n2(i915, crtc_state->cpu_transcoder)));
+ str_yes_no(intel_cpu_transcoder_has_drrs(i915,
+ crtc_state->cpu_transcoder)));
seq_printf(m, "DRRS enabled: %s\n",
str_yes_no(crtc_state->has_drrs));
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.h b/drivers/gpu/drm/i915/display/intel_drrs.h
index 8ef5f93a80ffd..0982f95eab727 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.h
+++ b/drivers/gpu/drm/i915/display/intel_drrs.h
@@ -9,12 +9,15 @@
#include <linux/types.h>
enum drrs_type;
+enum transcoder;
struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_connector;
+bool intel_cpu_transcoder_has_drrs(struct drm_i915_private *i915,
+ enum transcoder cpu_transcoder);
const char *intel_drrs_type_str(enum drrs_type drrs_type);
bool intel_drrs_is_active(struct intel_crtc *crtc);
void intel_drrs_activate(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index d62e050185e7c..e4515bf920388 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -340,6 +340,17 @@ static int intel_dsb_dewake_scanline(const struct intel_crtc_state *crtc_state)
return max(0, vblank_start - intel_usecs_to_scanlines(adjusted_mode, latency));
}
+static u32 dsb_chicken(struct intel_crtc *crtc)
+{
+ if (crtc->mode_flags & I915_MODE_FLAG_VRR)
+ return DSB_CTRL_WAIT_SAFE_WINDOW |
+ DSB_CTRL_NO_WAIT_VBLANK |
+ DSB_INST_WAIT_SAFE_WINDOW |
+ DSB_INST_NO_WAIT_VBLANK;
+ else
+ return 0;
+}
+
static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
int dewake_scanline)
{
@@ -361,6 +372,9 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
intel_de_write_fw(dev_priv, DSB_CTRL(pipe, dsb->id),
ctrl | DSB_ENABLE);
+ intel_de_write_fw(dev_priv, DSB_CHICKEN(pipe, dsb->id),
+ dsb_chicken(crtc));
+
intel_de_write_fw(dev_priv, DSB_HEAD(pipe, dsb->id),
intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf));
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index 7b42aef37d2f7..b6df9baf481b6 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -255,6 +255,16 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state)
return PTR_ERR(vma);
plane_state->ggtt_vma = vma;
+
+ /*
+ * Pre-populate the dma address before we enter the vblank
+ * evade critical section as i915_gem_object_get_dma_address()
+ * will trigger might_sleep() even if it won't actually sleep,
+ * which is the case when the fb has already been pinned.
+ */
+ if (phys_cursor)
+ plane_state->phys_dma_addr =
+ i915_gem_object_get_dma_address(intel_fb_obj(fb), 0);
} else {
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 6927785fd6ff2..aabd018bd7374 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -1422,6 +1422,17 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
+ /*
+ * FIXME figure out what is wrong with PSR+bigjoiner and
+ * fix it. Presumably something related to the fact that
+ * PSR is a transcoder level feature.
+ */
+ if (crtc_state->bigjoiner_pipes) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR disabled due to bigjoiner\n");
+ return;
+ }
+
if (CAN_PANEL_REPLAY(intel_dp))
crtc_state->has_panel_replay = true;
else
@@ -1994,6 +2005,7 @@ static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
struct intel_encoder *encoder;
@@ -2013,6 +2025,12 @@ void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_st
intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
crtc_state->psr2_man_track_ctl);
+
+ if (!crtc_state->enable_psr2_su_region_et)
+ return;
+
+ intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
+ crtc_state->pipe_srcsz_early_tpt);
}
static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
@@ -2051,6 +2069,20 @@ exit:
crtc_state->psr2_man_track_ctl = val;
}
+static u32 psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state,
+ bool full_update)
+{
+ int width, height;
+
+ if (!crtc_state->enable_psr2_su_region_et || full_update)
+ return 0;
+
+ width = drm_rect_width(&crtc_state->psr2_su_area);
+ height = drm_rect_height(&crtc_state->psr2_su_area);
+
+ return PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1);
+}
+
static void clip_area_update(struct drm_rect *overlap_damage_area,
struct drm_rect *damage_area,
struct drm_rect *pipe_src)
@@ -2095,21 +2127,36 @@ static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_st
* cursor fully when cursor is in SU area.
*/
static void
-intel_psr2_sel_fetch_et_alignment(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *cursor_state)
+intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_rect inter;
+ struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_plane_state *new_plane_state;
+ struct intel_plane *plane;
+ int i;
- if (!crtc_state->enable_psr2_su_region_et ||
- !cursor_state->uapi.visible)
+ if (!crtc_state->enable_psr2_su_region_et)
return;
- inter = crtc_state->psr2_su_area;
- if (!drm_rect_intersect(&inter, &cursor_state->uapi.dst))
- return;
+ for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
+ struct drm_rect inter;
+
+ if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
+ continue;
+
+ if (plane->id != PLANE_CURSOR)
+ continue;
+
+ if (!new_plane_state->uapi.visible)
+ continue;
- clip_area_update(&crtc_state->psr2_su_area, &cursor_state->uapi.dst,
- &crtc_state->pipe_src);
+ inter = crtc_state->psr2_su_area;
+ if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
+ continue;
+
+ clip_area_update(&crtc_state->psr2_su_area, &new_plane_state->uapi.dst,
+ &crtc_state->pipe_src);
+ }
}
/*
@@ -2152,8 +2199,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_plane_state *new_plane_state, *old_plane_state,
- *cursor_plane_state = NULL;
+ struct intel_plane_state *new_plane_state, *old_plane_state;
struct intel_plane *plane;
bool full_update = false;
int i, ret;
@@ -2238,13 +2284,6 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src);
-
- /*
- * Cursor plane new state is stored to adjust su area to cover
- * cursor are fully.
- */
- if (plane->id == PLANE_CURSOR)
- cursor_plane_state = new_plane_state;
}
/*
@@ -2273,9 +2312,13 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (ret)
return ret;
- /* Adjust su area to cover cursor fully as necessary */
- if (cursor_plane_state)
- intel_psr2_sel_fetch_et_alignment(crtc_state, cursor_plane_state);
+ /*
+ * Adjust su area to cover cursor fully as necessary (early
+ * transport). This needs to be done after
+ * drm_atomic_add_affected_planes to ensure visible cursor is added into
+ * affected planes even when cursor is not updated by itself.
+ */
+ intel_psr2_sel_fetch_et_alignment(state, crtc);
intel_psr2_sel_fetch_pipe_alignment(crtc_state);
@@ -2338,6 +2381,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
skip_sel_fetch_set_loop:
psr2_man_trk_ctl_calc(crtc_state, full_update);
+ crtc_state->pipe_srcsz_early_tpt =
+ psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 5f9e748adc89e..0cd9c183f6212 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -1842,8 +1842,6 @@ static void intel_disable_sdvo(struct intel_atomic_state *state,
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
u32 temp;
- encoder->audio_disable(encoder, old_crtc_state, conn_state);
-
intel_sdvo_set_active_outputs(intel_sdvo, 0);
if (0)
intel_sdvo_set_encoder_power_state(intel_sdvo,
@@ -1935,8 +1933,6 @@ static void intel_enable_sdvo(struct intel_atomic_state *state,
intel_sdvo_set_encoder_power_state(intel_sdvo,
DRM_MODE_DPMS_ON);
intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo_connector->output_flag);
-
- encoder->audio_enable(encoder, pipe_config, conn_state);
}
static enum drm_mode_status
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 5d905f932cb4b..f542ee1db1d97 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -117,6 +117,13 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
const struct drm_display_info *info = &connector->base.display_info;
int vmin, vmax;
+ /*
+ * FIXME all joined pipes share the same transcoder.
+ * Need to account for that during VRR toggle/push/etc.
+ */
+ if (crtc_state->bigjoiner_pipes)
+ return;
+
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
return;
@@ -187,10 +194,11 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
/*
- * TRANS_SET_CONTEXT_LATENCY with VRR enabled
- * requires this chicken bit on ADL/DG2.
+ * This bit seems to have two meanings depending on the platform:
+ * TGL: generate VRR "safe window" for DSB vblank waits
+ * ADL/DG2: make TRANS_SET_CONTEXT_LATENCY effective with VRR
*/
- if (DISPLAY_VER(dev_priv) == 13)
+ if (IS_DISPLAY_VER(dev_priv, 12, 13))
intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
0, PIPE_VBLANK_WITH_DELAY);
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index e941e2e4fd14c..860574d04f881 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -2295,6 +2295,9 @@ static u8 skl_get_plane_caps(struct drm_i915_private *i915,
if (HAS_4TILE(i915))
caps |= INTEL_PLANE_CAP_TILING_4;
+ if (!IS_ENABLED(I915) && !HAS_FLAT_CCS(i915))
+ return caps;
+
if (skl_plane_has_rc_ccs(i915, pipe, plane_id)) {
caps |= INTEL_PLANE_CAP_CCS_RC;
if (DISPLAY_VER(i915) >= 12)
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index fa46d2308b0ed..81bf2216371be 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -961,6 +961,9 @@ static int gen8_init_rsvd(struct i915_address_space *vm)
struct i915_vma *vma;
int ret;
+ if (!intel_gt_needs_wa_16018031267(vm->gt))
+ return 0;
+
/* The memory will be used only by GPU. */
obj = i915_gem_object_create_lmem(i915, PAGE_SIZE,
I915_BO_ALLOC_VOLATILE |
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 1ade568ffbfa4..7a6dc371c384e 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -908,6 +908,23 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
info->engine_mask &= ~BIT(GSC0);
}
+ /*
+ * Do not create the command streamer for CCS slices beyond the first.
+ * All the workload submitted to the first engine will be shared among
+ * all the slices.
+ *
+ * Once the user will be allowed to customize the CCS mode, then this
+ * check needs to be removed.
+ */
+ if (IS_DG2(gt->i915)) {
+ u8 first_ccs = __ffs(CCS_MASK(gt));
+
+ /* Mask off all the CCS engine */
+ info->engine_mask &= ~GENMASK(CCS3, CCS0);
+ /* Put back in the first CCS engine */
+ info->engine_mask |= BIT(_CCS(first_ccs));
+ }
+
return info->engine_mask;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 96bdb93a948d1..fb7bff27b45a3 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -279,9 +279,6 @@ static int __engine_park(struct intel_wakeref *wf)
intel_engine_park_heartbeat(engine);
intel_breadcrumbs_park(engine->breadcrumbs);
- /* Must be reset upon idling, or we may miss the busy wakeup. */
- GEM_BUG_ON(engine->sched_engine->queue_priority_hint != INT_MIN);
-
if (engine->park)
engine->park(engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 42aade0faf2d1..b061a0a0d6b08 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -3272,6 +3272,9 @@ static void execlists_park(struct intel_engine_cs *engine)
{
cancel_timer(&engine->execlists.timer);
cancel_timer(&engine->execlists.preempt);
+
+ /* Reset upon idling, or we may delay the busy wakeup. */
+ WRITE_ONCE(engine->sched_engine->queue_priority_hint, INT_MIN);
}
static void add_to_engine(struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index a425db5ed3a22..6a2c2718bcc38 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -1024,6 +1024,12 @@ enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
return I915_MAP_WC;
}
+bool intel_gt_needs_wa_16018031267(struct intel_gt *gt)
+{
+ /* Wa_16018031267, Wa_16018063123 */
+ return IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 55), IP_VER(12, 71));
+}
+
bool intel_gt_needs_wa_22016122933(struct intel_gt *gt)
{
return MEDIA_VER_FULL(gt->i915) == IP_VER(13, 0) && gt->type == GT_MEDIA;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 608f5c8729285..003eb93b826fd 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -82,17 +82,18 @@ struct drm_printer;
##__VA_ARGS__); \
} while (0)
-#define NEEDS_FASTCOLOR_BLT_WABB(engine) ( \
- IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 55), IP_VER(12, 71)) && \
- engine->class == COPY_ENGINE_CLASS && engine->instance == 0)
-
static inline bool gt_is_root(struct intel_gt *gt)
{
return !gt->info.id;
}
+bool intel_gt_needs_wa_16018031267(struct intel_gt *gt);
bool intel_gt_needs_wa_22016122933(struct intel_gt *gt);
+#define NEEDS_FASTCOLOR_BLT_WABB(engine) ( \
+ intel_gt_needs_wa_16018031267(engine->gt) && \
+ engine->class == COPY_ENGINE_CLASS && engine->instance == 0)
+
static inline struct intel_gt *uc_to_gt(struct intel_uc *uc)
{
return container_of(uc, struct intel_gt, uc);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
new file mode 100644
index 0000000000000..044219c5960a5
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_gt_ccs_mode.h"
+#include "intel_gt_regs.h"
+
+void intel_gt_apply_ccs_mode(struct intel_gt *gt)
+{
+ int cslice;
+ u32 mode = 0;
+ int first_ccs = __ffs(CCS_MASK(gt));
+
+ if (!IS_DG2(gt->i915))
+ return;
+
+ /* Build the value for the fixed CCS load balancing */
+ for (cslice = 0; cslice < I915_MAX_CCS; cslice++) {
+ if (CCS_MASK(gt) & BIT(cslice))
+ /*
+ * If available, assign the cslice
+ * to the first available engine...
+ */
+ mode |= XEHP_CCS_MODE_CSLICE(cslice, first_ccs);
+
+ else
+ /*
+ * ... otherwise, mark the cslice as
+ * unavailable if no CCS dispatches here
+ */
+ mode |= XEHP_CCS_MODE_CSLICE(cslice,
+ XEHP_CCS_MODE_CSLICE_MASK);
+ }
+
+ intel_uncore_write(gt->uncore, XEHP_CCS_MODE, mode);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h
new file mode 100644
index 0000000000000..9e5549caeb269
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef __INTEL_GT_CCS_MODE_H__
+#define __INTEL_GT_CCS_MODE_H__
+
+struct intel_gt;
+
+void intel_gt_apply_ccs_mode(struct intel_gt *gt);
+
+#endif /* __INTEL_GT_CCS_MODE_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index 50962cfd1353a..743fe35667227 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -1477,8 +1477,14 @@
#define ECOBITS_PPGTT_CACHE4B (0 << 8)
#define GEN12_RCU_MODE _MMIO(0x14800)
+#define XEHP_RCU_MODE_FIXED_SLICE_CCS_MODE REG_BIT(1)
#define GEN12_RCU_MODE_CCS_ENABLE REG_BIT(0)
+#define XEHP_CCS_MODE _MMIO(0x14804)
+#define XEHP_CCS_MODE_CSLICE_MASK REG_GENMASK(2, 0) /* CCS0-3 + rsvd */
+#define XEHP_CCS_MODE_CSLICE_WIDTH ilog2(XEHP_CCS_MODE_CSLICE_MASK + 1)
+#define XEHP_CCS_MODE_CSLICE(cslice, ccs) (ccs << (cslice * XEHP_CCS_MODE_CSLICE_WIDTH))
+
#define CHV_FUSE_GT _MMIO(VLV_GUNIT_BASE + 0x2168)
#define CHV_FGT_DISABLE_SS0 (1 << 10)
#define CHV_FGT_DISABLE_SS1 (1 << 11)
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index d67d44611c283..6ec3582c97357 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -10,6 +10,7 @@
#include "intel_engine_regs.h"
#include "intel_gpu_commands.h"
#include "intel_gt.h"
+#include "intel_gt_ccs_mode.h"
#include "intel_gt_mcr.h"
#include "intel_gt_print.h"
#include "intel_gt_regs.h"
@@ -51,7 +52,8 @@
* registers belonging to BCS, VCS or VECS should be implemented in
* xcs_engine_wa_init(). Workarounds for registers not belonging to a specific
* engine's MMIO range but that are part of of the common RCS/CCS reset domain
- * should be implemented in general_render_compute_wa_init().
+ * should be implemented in general_render_compute_wa_init(). The settings
+ * about the CCS load balancing should be added in ccs_engine_wa_mode().
*
* - GT workarounds: the list of these WAs is applied whenever these registers
* revert to their default values: on GPU reset, suspend/resume [1]_, etc.
@@ -1653,6 +1655,7 @@ static void
xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
/* Wa_14018575942 / Wa_18018781329 */
+ wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
/* Wa_22016670082 */
@@ -2853,6 +2856,28 @@ add_render_compute_tuning_settings(struct intel_gt *gt,
wa_write_clr(wal, GEN8_GARBCNTL, GEN12_BUS_HASH_CTL_BIT_EXC);
}
+static void ccs_engine_wa_mode(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+{
+ struct intel_gt *gt = engine->gt;
+
+ if (!IS_DG2(gt->i915))
+ return;
+
+ /*
+ * Wa_14019159160: This workaround, along with others, leads to
+ * significant challenges in utilizing load balancing among the
+ * CCS slices. Consequently, an architectural decision has been
+ * made to completely disable automatic CCS load balancing.
+ */
+ wa_masked_en(wal, GEN12_RCU_MODE, XEHP_RCU_MODE_FIXED_SLICE_CCS_MODE);
+
+ /*
+ * After having disabled automatic load balancing we need to
+ * assign all slices to a single CCS. We will call it CCS mode 1
+ */
+ intel_gt_apply_ccs_mode(gt);
+}
+
/*
* The workarounds in this function apply to shared registers in
* the general render reset domain that aren't tied to a
@@ -3003,8 +3028,10 @@ engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal
* to a single RCS/CCS engine's workaround list since
* they're reset as part of the general render domain reset.
*/
- if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
+ if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) {
general_render_compute_wa_init(engine, wal);
+ ccs_engine_wa_mode(engine, wal);
+ }
if (engine->class == COMPUTE_CLASS)
ccs_engine_wa_init(engine, wal);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index f3dcae4b9d455..0f83c6d4376ff 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1403,14 +1403,17 @@ static void guc_cancel_busyness_worker(struct intel_guc *guc)
* Trying to pass a 'need_sync' or 'in_reset' flag all the way down through
* every possible call stack is unfeasible. It would be too intrusive to many
* areas that really don't care about the GuC backend. However, there is the
- * 'reset_in_progress' flag available, so just use that.
+ * I915_RESET_BACKOFF flag and the gt->reset.mutex can be tested for is_locked.
+ * So just use those. Note that testing both is required due to the hideously
+ * complex nature of the i915 driver's reset code paths.
*
* And note that in the case of a reset occurring during driver unload
- * (wedge_on_fini), skipping the cancel in _prepare (when the reset flag is set
- * is fine because there is another cancel in _finish (when the reset flag is
- * not).
+ * (wedged_on_fini), skipping the cancel in reset_prepare/reset_fini (when the
+ * reset flag/mutex are set) is fine because there is another explicit cancel in
+ * intel_guc_submission_fini (when the reset flag/mutex are not).
*/
- if (guc_to_gt(guc)->uc.reset_in_progress)
+ if (mutex_is_locked(&guc_to_gt(guc)->reset.mutex) ||
+ test_bit(I915_RESET_BACKOFF, &guc_to_gt(guc)->reset.flags))
cancel_delayed_work(&guc->timestamp.work);
else
cancel_delayed_work_sync(&guc->timestamp.work);
@@ -1424,8 +1427,6 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc)
unsigned long flags;
ktime_t unused;
- guc_cancel_busyness_worker(guc);
-
spin_lock_irqsave(&guc->timestamp.lock, flags);
guc_update_pm_timestamp(guc, &unused);
@@ -2004,13 +2005,6 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc)
void intel_guc_submission_reset_finish(struct intel_guc *guc)
{
- /*
- * Ensure the busyness worker gets cancelled even on a fatal wedge.
- * Note that reset_prepare is not allowed to because it confuses lockdep.
- */
- if (guc_submission_initialized(guc))
- guc_cancel_busyness_worker(guc);
-
/* Reset called during driver load or during wedge? */
if (unlikely(!guc_submission_initialized(guc) ||
!intel_guc_is_fw_running(guc) ||
@@ -2136,6 +2130,7 @@ void intel_guc_submission_fini(struct intel_guc *guc)
if (!guc->submission_initialized)
return;
+ guc_fini_engine_stats(guc);
guc_flush_destroyed_contexts(guc);
guc_lrc_desc_pool_destroy_v69(guc);
i915_sched_engine_put(guc->sched_engine);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 6dfe5d9456c69..399bc319180b0 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -637,6 +637,10 @@ void intel_uc_reset_finish(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
+ /*
+ * NB: The wedge code path results in prepare -> prepare -> finish -> finish.
+ * So this function is sometimes called with the in-progress flag not set.
+ */
uc->reset_in_progress = false;
/* Firmware expected to be running when this function is called */
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 9ee902d5b72c4..4b9233c07a22c 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -800,7 +800,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_cleanup_modeset2;
ret = intel_pxp_init(i915);
- if (ret != -ENODEV)
+ if (ret && ret != -ENODEV)
drm_dbg(&i915->drm, "pxp init failed with %d\n", ret);
ret = intel_display_driver_probe(i915);
diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c
index 8c3f443c8347e..b758fd110c204 100644
--- a/drivers/gpu/drm/i915/i915_hwmon.c
+++ b/drivers/gpu/drm/i915/i915_hwmon.c
@@ -72,12 +72,13 @@ hwm_locked_with_pm_intel_uncore_rmw(struct hwm_drvdata *ddat,
struct intel_uncore *uncore = ddat->uncore;
intel_wakeref_t wakeref;
- mutex_lock(&hwmon->hwmon_lock);
+ with_intel_runtime_pm(uncore->rpm, wakeref) {
+ mutex_lock(&hwmon->hwmon_lock);
- with_intel_runtime_pm(uncore->rpm, wakeref)
intel_uncore_rmw(uncore, reg, clear, set);
- mutex_unlock(&hwmon->hwmon_lock);
+ mutex_unlock(&hwmon->hwmon_lock);
+ }
}
/*
@@ -136,20 +137,21 @@ hwm_energy(struct hwm_drvdata *ddat, long *energy)
else
rgaddr = hwmon->rg.energy_status_all;
- mutex_lock(&hwmon->hwmon_lock);
+ with_intel_runtime_pm(uncore->rpm, wakeref) {
+ mutex_lock(&hwmon->hwmon_lock);
- with_intel_runtime_pm(uncore->rpm, wakeref)
reg_val = intel_uncore_read(uncore, rgaddr);
- if (reg_val >= ei->reg_val_prev)
- ei->accum_energy += reg_val - ei->reg_val_prev;
- else
- ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
- ei->reg_val_prev = reg_val;
+ if (reg_val >= ei->reg_val_prev)
+ ei->accum_energy += reg_val - ei->reg_val_prev;
+ else
+ ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
+ ei->reg_val_prev = reg_val;
- *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
- hwmon->scl_shift_energy);
- mutex_unlock(&hwmon->hwmon_lock);
+ *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
+ hwmon->scl_shift_energy);
+ mutex_unlock(&hwmon->hwmon_lock);
+ }
}
static ssize_t
@@ -404,6 +406,7 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
/* Block waiting for GuC reset to complete when needed */
for (;;) {
+ wakeref = intel_runtime_pm_get(ddat->uncore->rpm);
mutex_lock(&hwmon->hwmon_lock);
prepare_to_wait(&ddat->waitq, &wait, TASK_INTERRUPTIBLE);
@@ -417,14 +420,13 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
}
mutex_unlock(&hwmon->hwmon_lock);
+ intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
schedule();
}
finish_wait(&ddat->waitq, &wait);
if (ret)
- goto unlock;
-
- wakeref = intel_runtime_pm_get(ddat->uncore->rpm);
+ goto exit;
/* Disable PL1 limit and verify, because the limit cannot be disabled on all platforms */
if (val == PL1_DISABLE) {
@@ -444,9 +446,8 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
intel_uncore_rmw(ddat->uncore, hwmon->rg.pkg_rapl_limit,
PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, nval);
exit:
- intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
-unlock:
mutex_unlock(&hwmon->hwmon_lock);
+ intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c
index ba82277254b76..cc41974cee746 100644
--- a/drivers/gpu/drm/i915/i915_memcpy.c
+++ b/drivers/gpu/drm/i915/i915_memcpy.c
@@ -25,6 +25,8 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/cpufeature.h>
+#include <linux/bug.h>
+#include <linux/build_bug.h>
#include <asm/fpu/api.h>
#include "i915_memcpy.h"
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e00557e1a57f0..3b2e49ce29ba0 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -4599,7 +4599,7 @@
#define MTL_CHICKEN_TRANS(trans) _MMIO_TRANS((trans), \
_MTL_CHICKEN_TRANS_A, \
_MTL_CHICKEN_TRANS_B)
-#define PIPE_VBLANK_WITH_DELAY REG_BIT(31) /* ADL/DG2 */
+#define PIPE_VBLANK_WITH_DELAY REG_BIT(31) /* tgl+ */
#define SKL_UNMASK_VBL_TO_PIPE_IN_SRD REG_BIT(30) /* skl+ */
#define HSW_FRAME_START_DELAY_MASK REG_GENMASK(28, 27)
#define HSW_FRAME_START_DELAY(x) REG_FIELD_PREP(HSW_FRAME_START_DELAY_MASK, x)
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index d09aad34ba37f..b70715b1411d6 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -34,6 +34,7 @@
#include "gt/intel_engine.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_requests.h"
#include "gt/intel_tlb.h"
@@ -103,12 +104,42 @@ static inline struct i915_vma *active_to_vma(struct i915_active *ref)
static int __i915_vma_active(struct i915_active *ref)
{
- return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
+ struct i915_vma *vma = active_to_vma(ref);
+
+ if (!i915_vma_tryget(vma))
+ return -ENOENT;
+
+ /*
+ * Exclude global GTT VMA from holding a GT wakeref
+ * while active, otherwise GPU never goes idle.
+ */
+ if (!i915_vma_is_ggtt(vma)) {
+ /*
+ * Since we and our _retire() counterpart can be
+ * called asynchronously, storing a wakeref tracking
+ * handle inside struct i915_vma is not safe, and
+ * there is no other good place for that. Hence,
+ * use untracked variants of intel_gt_pm_get/put().
+ */
+ intel_gt_pm_get_untracked(vma->vm->gt);
+ }
+
+ return 0;
}
static void __i915_vma_retire(struct i915_active *ref)
{
- i915_vma_put(active_to_vma(ref));
+ struct i915_vma *vma = active_to_vma(ref);
+
+ if (!i915_vma_is_ggtt(vma)) {
+ /*
+ * Since we can be called from atomic contexts,
+ * use an async variant of intel_gt_pm_put().
+ */
+ intel_gt_pm_put_async_untracked(vma->vm->gt);
+ }
+
+ i915_vma_put(vma);
}
static struct i915_vma *
@@ -1404,7 +1435,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
struct i915_vma_work *work = NULL;
struct dma_fence *moving = NULL;
struct i915_vma_resource *vma_res = NULL;
- intel_wakeref_t wakeref = 0;
+ intel_wakeref_t wakeref;
unsigned int bound;
int err;
@@ -1424,8 +1455,14 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
if (err)
return err;
- if (flags & PIN_GLOBAL)
- wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
+ /*
+ * In case of a global GTT, we must hold a runtime-pm wakeref
+ * while global PTEs are updated. In other cases, we hold
+ * the rpm reference while the VMA is active. Since runtime
+ * resume may require allocations, which are forbidden inside
+ * vm->mutex, get the first rpm wakeref outside of the mutex.
+ */
+ wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
if (flags & vma->vm->bind_async_flags) {
/* lock VM */
@@ -1561,8 +1598,7 @@ err_fence:
if (work)
dma_fence_work_commit_imm(&work->base);
err_rpm:
- if (wakeref)
- intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
if (moving)
dma_fence_put(moving);
diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
index 70349739dd89b..55dedd73f528c 100644
--- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c
+++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
@@ -72,14 +72,14 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
int ret;
if (!mode)
- return -EINVAL;
+ return 0;
ret = of_get_drm_display_mode(np, &imxpd->mode,
&imxpd->bus_flags,
OF_USE_NATIVE_MODE);
if (ret) {
drm_mode_destroy(connector->dev, mode);
- return ret;
+ return 0;
}
drm_mode_copy(mode, &imxpd->mode);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 0674aca0f8a3f..cf0b1de1c0712 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1377,6 +1377,10 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
if (adreno_is_a618(gpu))
gpu->ubwc_config.highest_bank_bit = 14;
+ if (adreno_is_a619(gpu))
+ /* TODO: Should be 14 but causes corruption at e.g. 1920x1200 on DP */
+ gpu->ubwc_config.highest_bank_bit = 13;
+
if (adreno_is_a619_holi(gpu))
gpu->ubwc_config.highest_bank_bit = 13;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index 1f5245fc2cdc6..a847a0f7a73c9 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -852,7 +852,7 @@ static void a6xx_get_shader_block(struct msm_gpu *gpu,
(block->type << 8) | i);
in += CRASHDUMP_READ(in, REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE,
- block->size, dumper->iova + A6XX_CD_DATA_OFFSET);
+ block->size, out);
out += block->size * sizeof(u32);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
index 9a9f7092c526a..a3e60ac70689e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
@@ -324,6 +324,7 @@ static const struct dpu_wb_cfg x1e80100_wb[] = {
},
};
+/* TODO: INTF 3, 8 and 7 are used for MST, marked as INTF_NONE for now */
static const struct dpu_intf_cfg x1e80100_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@@ -358,8 +359,8 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
.name = "intf_3", .id = INTF_3,
.base = 0x37000, .len = 0x280,
.features = INTF_SC7280_MASK,
- .type = INTF_DP,
- .controller_id = MSM_DP_CONTROLLER_1,
+ .type = INTF_NONE,
+ .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
@@ -368,7 +369,7 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
.base = 0x38000, .len = 0x280,
.features = INTF_SC7280_MASK,
.type = INTF_DP,
- .controller_id = MSM_DP_CONTROLLER_2,
+ .controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 20),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 21),
@@ -381,6 +382,33 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 22),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 23),
+ }, {
+ .name = "intf_6", .id = INTF_6,
+ .base = 0x3A000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_2,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16),
+ }, {
+ .name = "intf_7", .id = INTF_7,
+ .base = 0x3b000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_NONE,
+ .controller_id = MSM_DP_CONTROLLER_2, /* pair with intf_6 for DP MST */
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 18),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 19),
+ }, {
+ .name = "intf_8", .id = INTF_8,
+ .base = 0x3c000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_NONE,
+ .controller_id = MSM_DP_CONTROLLER_1, /* pair with intf_4 for DP MST */
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index ef871239adb2a..68fae048a9a83 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -459,15 +459,15 @@ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
&perf->core_clk_rate);
debugfs_create_u32("enable_bw_release", 0600, entry,
(u32 *)&perf->enable_bw_release);
- debugfs_create_u32("threshold_low", 0600, entry,
+ debugfs_create_u32("threshold_low", 0400, entry,
(u32 *)&perf->perf_cfg->max_bw_low);
- debugfs_create_u32("threshold_high", 0600, entry,
+ debugfs_create_u32("threshold_high", 0400, entry,
(u32 *)&perf->perf_cfg->max_bw_high);
- debugfs_create_u32("min_core_ib", 0600, entry,
+ debugfs_create_u32("min_core_ib", 0400, entry,
(u32 *)&perf->perf_cfg->min_core_ib);
- debugfs_create_u32("min_llcc_ib", 0600, entry,
+ debugfs_create_u32("min_llcc_ib", 0400, entry,
(u32 *)&perf->perf_cfg->min_llcc_ib);
- debugfs_create_u32("min_dram_ib", 0600, entry,
+ debugfs_create_u32("min_dram_ib", 0400, entry,
(u32 *)&perf->perf_cfg->min_dram_ib);
debugfs_create_file("perf_mode", 0600, entry,
(u32 *)perf, &dpu_core_perf_mode_fops);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index 946dd0135dffc..6a0a74832fb64 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -525,14 +525,14 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms,
int ret;
if (!irq_cb) {
- DPU_ERROR("invalid IRQ=[%d, %d] irq_cb:%ps\n",
- DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
+ DPU_ERROR("IRQ=[%d, %d] NULL callback\n",
+ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
return -EINVAL;
}
if (!dpu_core_irq_is_valid(irq_idx)) {
- DPU_ERROR("invalid IRQ=[%d, %d]\n",
- DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
+ DPU_ERROR("invalid IRQ=[%d, %d] irq_cb:%ps\n",
+ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index c4cb82af5c2f2..ffbfde9225898 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -484,7 +484,7 @@ static void dp_display_handle_video_request(struct dp_display_private *dp)
}
}
-static int dp_display_handle_port_ststus_changed(struct dp_display_private *dp)
+static int dp_display_handle_port_status_changed(struct dp_display_private *dp)
{
int rc = 0;
@@ -541,7 +541,7 @@ static int dp_display_usbpd_attention_cb(struct device *dev)
drm_dbg_dp(dp->drm_dev, "hpd_state=%d sink_request=%d\n",
dp->hpd_state, sink_request);
if (sink_request & DS_PORT_STATUS_CHANGED)
- rc = dp_display_handle_port_ststus_changed(dp);
+ rc = dp_display_handle_port_status_changed(dp);
else
rc = dp_display_handle_irq_hpd(dp);
}
@@ -588,6 +588,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
ret = dp_display_usbpd_configure_cb(&pdev->dev);
if (ret) { /* link train failed */
dp->hpd_state = ST_DISCONNECTED;
+ pm_runtime_put_sync(&pdev->dev);
} else {
dp->hpd_state = ST_MAINLINK_READY;
}
@@ -645,6 +646,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
dp_display_host_phy_exit(dp);
dp->hpd_state = ST_DISCONNECTED;
dp_display_notify_disconnect(&dp->dp_display.pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
mutex_unlock(&dp->event_mutex);
return 0;
}
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index e3f61c39df69b..80166f702a0db 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -89,7 +89,7 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb,
for (i = 0; i < n; i++) {
ret = msm_gem_get_and_pin_iova(fb->obj[i], aspace, &msm_fb->iova[i]);
- drm_dbg_state(fb->dev, "FB[%u]: iova[%d]: %08llx (%d)",
+ drm_dbg_state(fb->dev, "FB[%u]: iova[%d]: %08llx (%d)\n",
fb->base.id, i, msm_fb->iova[i], ret);
if (ret)
return ret;
@@ -176,7 +176,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
const struct msm_format *format;
int ret, i, n;
- drm_dbg_state(dev, "create framebuffer: mode_cmd=%p (%dx%d@%4.4s)",
+ drm_dbg_state(dev, "create framebuffer: mode_cmd=%p (%dx%d@%4.4s)\n",
mode_cmd, mode_cmd->width, mode_cmd->height,
(char *)&mode_cmd->pixel_format);
@@ -232,7 +232,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
refcount_set(&msm_fb->dirtyfb, 1);
- drm_dbg_state(dev, "create: FB ID: %d (%p)", fb->base.id, fb);
+ drm_dbg_state(dev, "create: FB ID: %d (%p)\n", fb->base.id, fb);
return fb;
diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c
index 84c21ec2ceeae..af6a6fcb11736 100644
--- a/drivers/gpu/drm/msm/msm_kms.c
+++ b/drivers/gpu/drm/msm/msm_kms.c
@@ -149,7 +149,7 @@ int msm_crtc_enable_vblank(struct drm_crtc *crtc)
struct msm_kms *kms = priv->kms;
if (!kms)
return -ENXIO;
- drm_dbg_vbl(dev, "crtc=%u", crtc->base.id);
+ drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id);
return vblank_ctrl_queue_work(priv, crtc, true);
}
@@ -160,7 +160,7 @@ void msm_crtc_disable_vblank(struct drm_crtc *crtc)
struct msm_kms *kms = priv->kms;
if (!kms)
return;
- drm_dbg_vbl(dev, "crtc=%u", crtc->base.id);
+ drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id);
vblank_ctrl_queue_work(priv, crtc, false);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 479effcf607e2..79cfab53f80e2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -23,6 +23,7 @@
*/
#include "nouveau_drv.h"
+#include "nouveau_bios.h"
#include "nouveau_reg.h"
#include "dispnv04/hw.h"
#include "nouveau_encoder.h"
@@ -1677,7 +1678,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
*/
if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
if (*conn == 0xf2005014 && *conf == 0xffffffff) {
- fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1);
+ fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, DCB_OUTPUT_B);
return false;
}
}
@@ -1763,26 +1764,26 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
#ifdef __powerpc__
/* Apple iMac G4 NV17 */
if (of_machine_is_compatible("PowerMac4,5")) {
- fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1);
- fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2);
+ fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, DCB_OUTPUT_B);
+ fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, DCB_OUTPUT_C);
return;
}
#endif
/* Make up some sane defaults */
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG,
- bios->legacy.i2c_indices.crt, 1, 1);
+ bios->legacy.i2c_indices.crt, 1, DCB_OUTPUT_B);
if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
fabricate_dcb_output(dcb, DCB_OUTPUT_TV,
bios->legacy.i2c_indices.tv,
- all_heads, 0);
+ all_heads, DCB_OUTPUT_A);
else if (bios->tmds.output0_script_ptr ||
bios->tmds.output1_script_ptr)
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS,
bios->legacy.i2c_indices.panel,
- all_heads, 1);
+ all_heads, DCB_OUTPUT_B);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 56dcd25db1ce2..db8cbf6151129 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1256,6 +1256,8 @@ out:
drm_vma_node_unmap(&nvbo->bo.base.vma_node,
bdev->dev_mapping);
nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource);
+ nvbo->bo.resource->bus.offset = 0;
+ nvbo->bo.resource->bus.addr = NULL;
goto retry;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 12feecf71e752..6fb65b01d7780 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -378,9 +378,9 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
dma_addr_t *dma_addrs;
struct nouveau_fence *fence;
- src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
- dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
- dma_addrs = kcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL);
+ src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
+ dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
+ dma_addrs = kvcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL | __GFP_NOFAIL);
migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT,
npages);
@@ -406,11 +406,11 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
migrate_device_pages(src_pfns, dst_pfns, npages);
nouveau_dmem_fence_done(&fence);
migrate_device_finalize(src_pfns, dst_pfns, npages);
- kfree(src_pfns);
- kfree(dst_pfns);
+ kvfree(src_pfns);
+ kvfree(dst_pfns);
for (i = 0; i < npages; i++)
dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
- kfree(dma_addrs);
+ kvfree(dma_addrs);
}
void
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 7de7707ec6a89..a72c45809484a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -225,12 +225,18 @@ nouveau_dp_detect(struct nouveau_connector *nv_connector,
u8 *dpcd = nv_encoder->dp.dpcd;
int ret = NOUVEAU_DP_NONE, hpd;
- /* If we've already read the DPCD on an eDP device, we don't need to
- * reread it as it won't change
+ /* eDP ports don't support hotplugging - so there's no point in probing eDP ports unless we
+ * haven't probed them once before.
*/
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
- dpcd[DP_DPCD_REV] != 0)
- return NOUVEAU_DP_SST;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (connector->status == connector_status_connected)
+ return NOUVEAU_DP_SST;
+ else if (connector->status == connector_status_disconnected)
+ return NOUVEAU_DP_NONE;
+ }
+
+ // Ensure that the aux bus is enabled for probing
+ drm_dp_dpcd_set_powered(&nv_connector->aux, true);
mutex_lock(&nv_encoder->dp.hpd_irq_lock);
if (mstm) {
@@ -293,6 +299,13 @@ out:
if (mstm && !mstm->suspended && ret != NOUVEAU_DP_MST)
nv50_mstm_remove(mstm);
+ /* GSP doesn't like when we try to do aux transactions on a port it considers disconnected,
+ * and since we don't really have a usecase for that anyway - just disable the aux bus here
+ * if we've decided the connector is disconnected
+ */
+ if (ret == NOUVEAU_DP_NONE)
+ drm_dp_dpcd_set_powered(&nv_connector->aux, false);
+
mutex_unlock(&nv_encoder->dp.hpd_irq_lock);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index 0a0a11dc9ec03..ee02cd833c5e4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -812,15 +812,15 @@ op_remap(struct drm_gpuva_op_remap *r,
struct drm_gpuva_op_unmap *u = r->unmap;
struct nouveau_uvma *uvma = uvma_from_va(u->va);
u64 addr = uvma->va.va.addr;
- u64 range = uvma->va.va.range;
+ u64 end = uvma->va.va.addr + uvma->va.va.range;
if (r->prev)
addr = r->prev->va.addr + r->prev->va.range;
if (r->next)
- range = r->next->va.addr - addr;
+ end = r->next->va.addr;
- op_unmap_range(u, addr, range);
+ op_unmap_range(u, addr, end - addr);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 986e8d547c942..060c74a80eb14 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -420,7 +420,7 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
return ret;
} else {
ret = nvkm_memory_map(gr->attrib_cb, 0, chan->vmm, chan->attrib_cb,
- &args, sizeof(args));;
+ &args, sizeof(args));
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
index 4bf486b571013..cb05f7f48a98b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
@@ -66,11 +66,16 @@ of_init(struct nvkm_bios *bios, const char *name)
return ERR_PTR(-EINVAL);
}
+static void of_fini(void *p)
+{
+ kfree(p);
+}
+
const struct nvbios_source
nvbios_of = {
.name = "OpenFirmware",
.init = of_init,
- .fini = (void(*)(void *))kfree,
+ .fini = of_fini,
.read = of_read,
.size = of_size,
.rw = false,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
index 7bcbc4895ec22..271bfa038f5bc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
@@ -25,6 +25,7 @@
#include <subdev/bios.h>
#include <subdev/bios/init.h>
+#include <subdev/gsp.h>
void
gm107_devinit_disable(struct nvkm_devinit *init)
@@ -33,10 +34,13 @@ gm107_devinit_disable(struct nvkm_devinit *init)
u32 r021c00 = nvkm_rd32(device, 0x021c00);
u32 r021c04 = nvkm_rd32(device, 0x021c04);
- if (r021c00 & 0x00000001)
- nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
- if (r021c00 & 0x00000004)
- nvkm_subdev_disable(device, NVKM_ENGINE_CE, 2);
+ /* gsp only wants to enable/disable display */
+ if (!nvkm_gsp_rm(device->gsp)) {
+ if (r021c00 & 0x00000001)
+ nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
+ if (r021c00 & 0x00000004)
+ nvkm_subdev_disable(device, NVKM_ENGINE_CE, 2);
+ }
if (r021c04 & 0x00000001)
nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
index a73a5b5897904..9858c1438aa7f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
@@ -1112,7 +1112,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
rpc->numEntries = NV_GSP_REG_NUM_ENTRIES;
str_offset = offsetof(typeof(*rpc), entries[NV_GSP_REG_NUM_ENTRIES]);
- strings = (char *)&rpc->entries[NV_GSP_REG_NUM_ENTRIES];
+ strings = (char *)rpc + str_offset;
for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) {
int name_len = strlen(r535_registry_entries[i].name) + 1;
@@ -1430,6 +1430,10 @@ r535_gsp_msg_post_event(void *priv, u32 fn, void *repv, u32 repc)
/**
* r535_gsp_msg_run_cpu_sequencer() -- process I/O commands from the GSP
+ * @priv: gsp pointer
+ * @fn: function number (ignored)
+ * @repv: pointer to libos print RPC
+ * @repc: message size
*
* The GSP sequencer is a list of I/O commands that the GSP can send to
* the driver to perform for various purposes. The most common usage is to
@@ -1781,6 +1785,7 @@ static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size)
/**
* r535_gsp_libos_init() -- create the libos arguments structure
+ * @gsp: gsp pointer
*
* The logging buffers are byte queues that contain encoded printf-like
* messages from GSP-RM. They need to be decoded by a special application
@@ -1920,6 +1925,10 @@ nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3)
/**
* nvkm_gsp_radix3_sg - build a radix3 table from a S/G list
+ * @gsp: gsp pointer
+ * @sgt: S/G list to traverse
+ * @size: size of the image, in bytes
+ * @rx3: radix3 array to update
*
* The GSP uses a three-level page table, called radix3, to map the firmware.
* Each 64-bit "pointer" in the table is either the bus address of an entry in
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index a7f3fc342d87e..dd5b5a17ece0b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -222,8 +222,11 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
void __iomem *map = NULL;
/* Already mapped? */
- if (refcount_inc_not_zero(&iobj->maps))
+ if (refcount_inc_not_zero(&iobj->maps)) {
+ /* read barrier match the wmb on refcount set */
+ smp_rmb();
return iobj->map;
+ }
/* Take the lock, and re-check that another thread hasn't
* already mapped the object in the meantime.
@@ -250,6 +253,8 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
iobj->base.memory.ptrs = &nv50_instobj_fast;
else
iobj->base.memory.ptrs = &nv50_instobj_slow;
+ /* barrier to ensure the ptrs are written before refcount is set */
+ smp_wmb();
refcount_set(&iobj->maps, 1);
}
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672e.c b/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
index cb7406d744669..c39fe0fc5d69c 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
@@ -614,8 +614,6 @@ static void nt36672e_panel_remove(struct mipi_dsi_device *dsi)
struct nt36672e_panel *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(ctx->dsi);
- mipi_dsi_device_unregister(ctx->dsi);
-
drm_panel_remove(&ctx->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
index 775144695283f..b15ca56a09a74 100644
--- a/drivers/gpu/drm/panel/panel-visionox-rm69299.c
+++ b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
@@ -253,8 +253,6 @@ static void visionox_rm69299_remove(struct mipi_dsi_device *dsi)
struct visionox_rm69299 *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(ctx->dsi);
- mipi_dsi_device_unregister(ctx->dsi);
-
drm_panel_remove(&ctx->panel);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index 9063ce2546422..fd8e44992184f 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -441,19 +441,19 @@ void panfrost_gpu_power_off(struct panfrost_device *pfdev)
gpu_write(pfdev, SHADER_PWROFF_LO, pfdev->features.shader_present);
ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_PWRTRANS_LO,
- val, !val, 1, 1000);
+ val, !val, 1, 2000);
if (ret)
dev_err(pfdev->dev, "shader power transition timeout");
gpu_write(pfdev, TILER_PWROFF_LO, pfdev->features.tiler_present);
ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_PWRTRANS_LO,
- val, !val, 1, 1000);
+ val, !val, 1, 2000);
if (ret)
dev_err(pfdev->dev, "tiler power transition timeout");
gpu_write(pfdev, L2_PWROFF_LO, pfdev->features.l2_present);
ret = readl_poll_timeout(pfdev->iomem + L2_PWRTRANS_LO,
- val, !val, 0, 1000);
+ val, !val, 0, 2000);
if (ret)
dev_err(pfdev->dev, "l2 power transition timeout");
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index f38385fe76bbb..b91019cd5acb1 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -502,11 +502,18 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
mapping_set_unevictable(mapping);
for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
+ /* Can happen if the last fault only partially filled this
+ * section of the pages array before failing. In that case
+ * we skip already filled pages.
+ */
+ if (pages[i])
+ continue;
+
pages[i] = shmem_read_mapping_page(mapping, i);
if (IS_ERR(pages[i])) {
ret = PTR_ERR(pages[i]);
pages[i] = NULL;
- goto err_pages;
+ goto err_unlock;
}
}
@@ -514,7 +521,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
if (ret)
- goto err_pages;
+ goto err_unlock;
ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
if (ret)
@@ -537,8 +544,6 @@ out:
err_map:
sg_free_table(sgt);
-err_pages:
- drm_gem_shmem_put_pages(&bo->base);
err_unlock:
dma_resv_unlock(obj->resv);
err_bo:
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 281edab518cdd..d6ea01f3797be 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -421,7 +421,6 @@ int qxl_surface_id_alloc(struct qxl_device *qdev,
{
uint32_t handle;
int idr_ret;
- int count = 0;
again:
idr_preload(GFP_ATOMIC);
spin_lock(&qdev->surf_id_idr_lock);
@@ -433,7 +432,6 @@ again:
handle = idr_ret;
if (handle >= qdev->rom->n_surfaces) {
- count++;
spin_lock(&qdev->surf_id_idr_lock);
idr_remove(&qdev->surf_id_idr, handle);
spin_unlock(&qdev->surf_id_idr_lock);
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index dd0f834d881ce..506ae1f5e099f 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -145,7 +145,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
struct qxl_release *release;
struct qxl_bo *cmd_bo;
void *fb_cmd;
- int i, ret, num_relocs;
+ int i, ret;
int unwritten;
switch (cmd->type) {
@@ -200,7 +200,6 @@ static int qxl_process_single_command(struct qxl_device *qdev,
}
/* fill out reloc info structs */
- num_relocs = 0;
for (i = 0; i < cmd->relocs_num; ++i) {
struct drm_qxl_reloc reloc;
struct drm_qxl_reloc __user *u = u64_to_user_ptr(cmd->relocs);
@@ -230,7 +229,6 @@ static int qxl_process_single_command(struct qxl_device *qdev,
reloc_info[i].dst_bo = cmd_bo;
reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
}
- num_relocs++;
/* reserve and validate the reloc dst bo */
if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle) {
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 368d26da0d6a2..9febc8b73f09e 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -58,16 +58,56 @@ static long qxl_fence_wait(struct dma_fence *fence, bool intr,
signed long timeout)
{
struct qxl_device *qdev;
+ struct qxl_release *release;
+ int count = 0, sc = 0;
+ bool have_drawable_releases;
unsigned long cur, end = jiffies + timeout;
qdev = container_of(fence->lock, struct qxl_device, release_lock);
+ release = container_of(fence, struct qxl_release, base);
+ have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
- if (!wait_event_timeout(qdev->release_event,
- (dma_fence_is_signaled(fence) ||
- (qxl_io_notify_oom(qdev), 0)),
- timeout))
- return 0;
+retry:
+ sc++;
+
+ if (dma_fence_is_signaled(fence))
+ goto signaled;
+
+ qxl_io_notify_oom(qdev);
+
+ for (count = 0; count < 11; count++) {
+ if (!qxl_queue_garbage_collect(qdev, true))
+ break;
+
+ if (dma_fence_is_signaled(fence))
+ goto signaled;
+ }
+
+ if (dma_fence_is_signaled(fence))
+ goto signaled;
+
+ if (have_drawable_releases || sc < 4) {
+ if (sc > 2)
+ /* back off */
+ usleep_range(500, 1000);
+
+ if (time_after(jiffies, end))
+ return 0;
+
+ if (have_drawable_releases && sc > 300) {
+ DMA_FENCE_WARN(fence,
+ "failed to wait on release %llu after spincount %d\n",
+ fence->context & ~0xf0000000, sc);
+ goto signaled;
+ }
+ goto retry;
+ }
+ /*
+ * yeah, original sync_obj_wait gave up after 3 spins when
+ * have_drawable_releases is not set.
+ */
+signaled:
cur = jiffies;
if (time_after(cur, end))
return 0;
diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h
index 94947229888ba..b7f22597ee95e 100644
--- a/drivers/gpu/drm/radeon/pptable.h
+++ b/drivers/gpu/drm/radeon/pptable.h
@@ -424,7 +424,7 @@ typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
typedef struct _ATOM_PPLIB_STATE_V2
{
//number of valid dpm levels in this state; Driver uses it to calculate the whole
- //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
+ //size of the state: struct_size(ATOM_PPLIB_STATE_V2, clockInfoIndex, ucNumDPMLevels)
UCHAR ucNumDPMLevels;
//a index to the array of nonClockInfos
@@ -432,14 +432,14 @@ typedef struct _ATOM_PPLIB_STATE_V2
/**
* Driver will read the first ucNumDPMLevels in this array
*/
- UCHAR clockInfoIndex[1];
+ UCHAR clockInfoIndex[] __counted_by(ucNumDPMLevels);
} ATOM_PPLIB_STATE_V2;
typedef struct _StateArray{
//how many states we have
UCHAR ucNumEntries;
- ATOM_PPLIB_STATE_V2 states[1];
+ ATOM_PPLIB_STATE_V2 states[] __counted_by(ucNumEntries);
}StateArray;
@@ -450,7 +450,7 @@ typedef struct _ClockInfoArray{
//sizeof(ATOM_PPLIB_CLOCK_INFO)
UCHAR ucEntrySize;
- UCHAR clockInfo[1];
+ UCHAR clockInfo[] __counted_by(ucNumEntries);
}ClockInfoArray;
typedef struct _NonClockInfoArray{
@@ -460,7 +460,7 @@ typedef struct _NonClockInfoArray{
//sizeof(ATOM_PPLIB_NONCLOCK_INFO)
UCHAR ucEntrySize;
- ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
+ ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[] __counted_by(ucNumEntries);
}NonClockInfoArray;
typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index bb1f0a3371ab5..10793a433bf58 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -923,8 +923,12 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
for (i = 0; i < max_device; i++) {
- ATOM_CONNECTOR_INFO_I2C ci =
- supported_devices->info.asConnInfo[i];
+ ATOM_CONNECTOR_INFO_I2C ci;
+
+ if (frev > 1)
+ ci = supported_devices->info_2d1.asConnInfo[i];
+ else
+ ci = supported_devices->info.asConnInfo[i];
bios_connectors[i].valid = false;
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
index 48170694ac6b8..18efb3fe1c000 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
@@ -17,9 +17,7 @@
static const uint32_t formats_cluster[] = {
DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_ARGB2101010,
DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_ABGR2101010,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XBGR8888,
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 3c4f5a392b064..58c8161289fea 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -71,13 +71,19 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
entity->guilty = guilty;
entity->num_sched_list = num_sched_list;
entity->priority = priority;
+ /*
+ * It's perfectly valid to initialize an entity without having a valid
+ * scheduler attached. It's just not valid to use the scheduler before it
+ * is initialized itself.
+ */
entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
RCU_INIT_POINTER(entity->last_scheduled, NULL);
RB_CLEAR_NODE(&entity->rb_tree_node);
- if (!sched_list[0]->sched_rq) {
- /* Warn drivers not to do this and to fix their DRM
- * calling order.
+ if (num_sched_list && !sched_list[0]->sched_rq) {
+ /* Since every entry covered by num_sched_list
+ * should be non-NULL and therefore we warn drivers
+ * not to do this and to fix their DRM calling order.
*/
pr_warn("%s: called with uninitialized scheduler\n", __func__);
} else if (num_sched_list) {
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 69001a3dc0df2..2d1880c61b50d 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -166,7 +166,7 @@ sun4i_hdmi_connector_clock_valid(const struct drm_connector *connector,
unsigned long long clock)
{
const struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
- unsigned long diff = clock / 200; /* +-0.5% allowed by HDMI spec */
+ unsigned long diff = div_u64(clock, 200); /* +-0.5% allowed by HDMI spec */
long rounded_rate;
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 112438d965ffb..6e1fd6985ffcb 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -288,17 +288,23 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
enum ttm_caching caching,
unsigned int order)
{
- if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE)
+ if (pool->use_dma_alloc)
return &pool->caching[caching].orders[order];
#ifdef CONFIG_X86
switch (caching) {
case ttm_write_combined:
+ if (pool->nid != NUMA_NO_NODE)
+ return &pool->caching[caching].orders[order];
+
if (pool->use_dma32)
return &global_dma32_write_combined[order];
return &global_write_combined[order];
case ttm_uncached:
+ if (pool->nid != NUMA_NO_NODE)
+ return &pool->caching[caching].orders[order];
+
if (pool->use_dma32)
return &global_dma32_uncached[order];
@@ -566,11 +572,17 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
pool->use_dma_alloc = use_dma_alloc;
pool->use_dma32 = use_dma32;
- if (use_dma_alloc || nid != NUMA_NO_NODE) {
- for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
- for (j = 0; j < NR_PAGE_ORDERS; ++j)
- ttm_pool_type_init(&pool->caching[i].orders[j],
- pool, i, j);
+ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+ for (j = 0; j < NR_PAGE_ORDERS; ++j) {
+ struct ttm_pool_type *pt;
+
+ /* Initialize only pool types which are actually used */
+ pt = ttm_pool_select_type(pool, i, j);
+ if (pt != &pool->caching[i].orders[j])
+ continue;
+
+ ttm_pool_type_init(pt, pool, i, j);
+ }
}
}
EXPORT_SYMBOL(ttm_pool_init);
@@ -599,10 +611,16 @@ void ttm_pool_fini(struct ttm_pool *pool)
{
unsigned int i, j;
- if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
- for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
- for (j = 0; j < NR_PAGE_ORDERS; ++j)
- ttm_pool_type_fini(&pool->caching[i].orders[j]);
+ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+ for (j = 0; j < NR_PAGE_ORDERS; ++j) {
+ struct ttm_pool_type *pt;
+
+ pt = ttm_pool_select_type(pool, i, j);
+ if (pt != &pool->caching[i].orders[j])
+ continue;
+
+ ttm_pool_type_fini(pt);
+ }
}
/* We removed the pool types from the LRU, but we need to also make sure
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index 2e04f6cb661e4..ce6b2fb341d1f 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -105,7 +105,6 @@ v3d_irq(int irq, void *arg)
struct v3d_file_priv *file = v3d->bin_job->base.file->driver_priv;
u64 runtime = local_clock() - file->start_ns[V3D_BIN];
- file->enabled_ns[V3D_BIN] += local_clock() - file->start_ns[V3D_BIN];
file->jobs_sent[V3D_BIN]++;
v3d->queue[V3D_BIN].jobs_sent++;
@@ -126,7 +125,6 @@ v3d_irq(int irq, void *arg)
struct v3d_file_priv *file = v3d->render_job->base.file->driver_priv;
u64 runtime = local_clock() - file->start_ns[V3D_RENDER];
- file->enabled_ns[V3D_RENDER] += local_clock() - file->start_ns[V3D_RENDER];
file->jobs_sent[V3D_RENDER]++;
v3d->queue[V3D_RENDER].jobs_sent++;
@@ -147,7 +145,6 @@ v3d_irq(int irq, void *arg)
struct v3d_file_priv *file = v3d->csd_job->base.file->driver_priv;
u64 runtime = local_clock() - file->start_ns[V3D_CSD];
- file->enabled_ns[V3D_CSD] += local_clock() - file->start_ns[V3D_CSD];
file->jobs_sent[V3D_CSD]++;
v3d->queue[V3D_CSD].jobs_sent++;
@@ -195,7 +192,6 @@ v3d_hub_irq(int irq, void *arg)
struct v3d_file_priv *file = v3d->tfu_job->base.file->driver_priv;
u64 runtime = local_clock() - file->start_ns[V3D_TFU];
- file->enabled_ns[V3D_TFU] += local_clock() - file->start_ns[V3D_TFU];
file->jobs_sent[V3D_TFU]++;
v3d->queue[V3D_TFU].jobs_sent++;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 34f807ed1c315..d8751ea203032 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -509,7 +509,7 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
edid = drm_get_edid(connector, vc4_hdmi->ddc);
cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
if (!edid)
- return -ENODEV;
+ return 0;
drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index c52c7bf1485b1..717d624e9a052 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -456,8 +456,10 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
.no_wait_gpu = false
};
u32 j, initial_line = dst_offset / dst_stride;
- struct vmw_bo_blit_line_data d;
+ struct vmw_bo_blit_line_data d = {0};
int ret = 0;
+ struct page **dst_pages = NULL;
+ struct page **src_pages = NULL;
/* Buffer objects need to be either pinned or reserved: */
if (!(dst->pin_count))
@@ -477,12 +479,35 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
return ret;
}
+ if (!src->ttm->pages && src->ttm->sg) {
+ src_pages = kvmalloc_array(src->ttm->num_pages,
+ sizeof(struct page *), GFP_KERNEL);
+ if (!src_pages)
+ return -ENOMEM;
+ ret = drm_prime_sg_to_page_array(src->ttm->sg, src_pages,
+ src->ttm->num_pages);
+ if (ret)
+ goto out;
+ }
+ if (!dst->ttm->pages && dst->ttm->sg) {
+ dst_pages = kvmalloc_array(dst->ttm->num_pages,
+ sizeof(struct page *), GFP_KERNEL);
+ if (!dst_pages) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = drm_prime_sg_to_page_array(dst->ttm->sg, dst_pages,
+ dst->ttm->num_pages);
+ if (ret)
+ goto out;
+ }
+
d.mapped_dst = 0;
d.mapped_src = 0;
d.dst_addr = NULL;
d.src_addr = NULL;
- d.dst_pages = dst->ttm->pages;
- d.src_pages = src->ttm->pages;
+ d.dst_pages = dst->ttm->pages ? dst->ttm->pages : dst_pages;
+ d.src_pages = src->ttm->pages ? src->ttm->pages : src_pages;
d.dst_num_pages = PFN_UP(dst->resource->size);
d.src_num_pages = PFN_UP(src->resource->size);
d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
@@ -504,6 +529,10 @@ out:
kunmap_atomic(d.src_addr);
if (d.dst_addr)
kunmap_atomic(d.dst_addr);
+ if (src_pages)
+ kvfree(src_pages);
+ if (dst_pages)
+ kvfree(dst_pages);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index bfd41ce3c8f4f..e5eb21a471a60 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -377,7 +377,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
{
struct ttm_operation_ctx ctx = {
.interruptible = params->bo_type != ttm_bo_type_kernel,
- .no_wait_gpu = false
+ .no_wait_gpu = false,
+ .resv = params->resv,
};
struct ttm_device *bdev = &dev_priv->bdev;
struct drm_device *vdev = &dev_priv->drm;
@@ -394,8 +395,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
- &vmw_bo->placement, 0, &ctx, NULL,
- NULL, destroy);
+ &vmw_bo->placement, 0, &ctx,
+ params->sg, params->resv, destroy);
if (unlikely(ret))
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
index 0d496dc9c6af7..f349642e6190d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
@@ -55,6 +55,8 @@ struct vmw_bo_params {
enum ttm_bo_type bo_type;
size_t size;
bool pin;
+ struct dma_resv *resv;
+ struct sg_table *sg;
};
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index d3e308fdfd5be..58fb40c93100a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -666,11 +666,12 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
[vmw_dma_map_populate] = "Caching DMA mappings.",
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
- /* TTM currently doesn't fully support SEV encryption. */
- if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
- return -EINVAL;
-
- if (vmw_force_coherent)
+ /*
+ * When running with SEV we always want dma mappings, because
+ * otherwise ttm tt pool pages will bounce through swiotlb running
+ * out of available space.
+ */
+ if (vmw_force_coherent || cc_platform_has(CC_ATTR_MEM_ENCRYPT))
dev_priv->map_mode = vmw_dma_alloc_coherent;
else if (vmw_restrict_iommu)
dev_priv->map_mode = vmw_dma_map_bind;
@@ -1444,12 +1445,15 @@ static void vmw_debugfs_resource_managers_init(struct vmw_private *vmw)
root, "system_ttm");
ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, TTM_PL_VRAM),
root, "vram_ttm");
- ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_GMR),
- root, "gmr_ttm");
- ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_MOB),
- root, "mob_ttm");
- ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_SYSTEM),
- root, "system_mob_ttm");
+ if (vmw->has_gmr)
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_GMR),
+ root, "gmr_ttm");
+ if (vmw->has_mob) {
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_MOB),
+ root, "mob_ttm");
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_SYSTEM),
+ root, "system_mob_ttm");
+ }
}
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
@@ -1624,6 +1628,7 @@ static const struct drm_driver driver = {
.prime_fd_to_handle = vmw_prime_fd_to_handle,
.prime_handle_to_fd = vmw_prime_handle_to_fd,
+ .gem_prime_import_sg_table = vmw_prime_import_sg_table,
.fops = &vmwgfx_driver_fops,
.name = VMWGFX_DRIVER_NAME,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 12efecc17df66..b019a1a1787af 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1130,6 +1130,9 @@ extern int vmw_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv,
uint32_t handle, uint32_t flags,
int *prime_fd);
+struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *table);
/*
* MemoryOBject management - vmwgfx_mob.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index 12787bb9c111d..d6bcaf078b1f4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -149,6 +149,38 @@ out_no_bo:
return ret;
}
+struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *table)
+{
+ int ret;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_gem_object *gem = NULL;
+ struct vmw_bo *vbo;
+ struct vmw_bo_params params = {
+ .domain = (dev_priv->has_mob) ? VMW_BO_DOMAIN_SYS : VMW_BO_DOMAIN_VRAM,
+ .busy_domain = VMW_BO_DOMAIN_SYS,
+ .bo_type = ttm_bo_type_sg,
+ .size = attach->dmabuf->size,
+ .pin = false,
+ .resv = attach->dmabuf->resv,
+ .sg = table,
+
+ };
+
+ dma_resv_lock(params.resv, NULL);
+
+ ret = vmw_bo_create(dev_priv, &params, &vbo);
+ if (ret != 0)
+ goto out_no_bo;
+
+ vbo->tbo.base.funcs = &vmw_gem_object_funcs;
+
+ gem = &vbo->tbo.base;
+out_no_bo:
+ dma_resv_unlock(params.resv);
+ return gem;
+}
int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index cd4925346ed45..84ae4e10a2ebe 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -933,6 +933,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
@@ -940,9 +941,13 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
bool has_primary = new_state->plane_mask &
drm_plane_mask(crtc->primary);
- /* We always want to have an active plane with an active CRTC */
- if (has_primary != new_state->enable)
- return -EINVAL;
+ /*
+ * This is fine in general, but broken userspace might expect
+ * some actual rendering so give a clue as why it's blank.
+ */
+ if (new_state->enable && !has_primary)
+ drm_dbg_driver(&vmw->drm,
+ "CRTC without a primary plane will be blank.\n");
if (new_state->connector_mask != connector_mask &&
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index a94947b588e85..19a843da87b78 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -243,10 +243,10 @@ struct vmw_framebuffer_bo {
static const uint32_t __maybe_unused vmw_primary_plane_formats[] = {
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB1555,
};
static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
index 2d72a5ee7c0c7..c99cad4449915 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -75,8 +75,12 @@ int vmw_prime_fd_to_handle(struct drm_device *dev,
int fd, u32 *handle)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret = ttm_prime_fd_to_handle(tfile, fd, handle);
- return ttm_prime_fd_to_handle(tfile, fd, handle);
+ if (ret)
+ ret = drm_gem_prime_fd_to_handle(dev, file_priv, fd, handle);
+
+ return ret;
}
int vmw_prime_handle_to_fd(struct drm_device *dev,
@@ -85,5 +89,12 @@ int vmw_prime_handle_to_fd(struct drm_device *dev,
int *prime_fd)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
+ int ret;
+
+ if (handle > VMWGFX_NUM_MOB)
+ ret = ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
+ else
+ ret = drm_gem_prime_handle_to_fd(dev, file_priv, handle, flags, prime_fd);
+
+ return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 4d23d0a70bcb7..621d98b376bbb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -188,13 +188,18 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
switch (dev_priv->map_mode) {
case vmw_dma_map_bind:
case vmw_dma_map_populate:
- vsgt->sgt = &vmw_tt->sgt;
- ret = sg_alloc_table_from_pages_segment(
- &vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
- (unsigned long)vsgt->num_pages << PAGE_SHIFT,
- dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL);
- if (ret)
- goto out_sg_alloc_fail;
+ if (vmw_tt->dma_ttm.page_flags & TTM_TT_FLAG_EXTERNAL) {
+ vsgt->sgt = vmw_tt->dma_ttm.sg;
+ } else {
+ vsgt->sgt = &vmw_tt->sgt;
+ ret = sg_alloc_table_from_pages_segment(&vmw_tt->sgt,
+ vsgt->pages, vsgt->num_pages, 0,
+ (unsigned long)vsgt->num_pages << PAGE_SHIFT,
+ dma_get_max_seg_size(dev_priv->drm.dev),
+ GFP_KERNEL);
+ if (ret)
+ goto out_sg_alloc_fail;
+ }
ret = vmw_ttm_map_for_dma(vmw_tt);
if (unlikely(ret != 0))
@@ -209,8 +214,9 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
return 0;
out_map_fail:
- sg_free_table(vmw_tt->vsgt.sgt);
- vmw_tt->vsgt.sgt = NULL;
+ drm_warn(&dev_priv->drm, "VSG table map failed!");
+ sg_free_table(vsgt->sgt);
+ vsgt->sgt = NULL;
out_sg_alloc_fail:
return ret;
}
@@ -356,15 +362,17 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
static int vmw_ttm_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
- int ret;
+ bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
- /* TODO: maybe completely drop this ? */
if (ttm_tt_is_populated(ttm))
return 0;
- ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
+ if (external && ttm->sg)
+ return drm_prime_sg_to_dma_addr_array(ttm->sg,
+ ttm->dma_address,
+ ttm->num_pages);
- return ret;
+ return ttm_pool_alloc(&bdev->pool, ttm, ctx);
}
static void vmw_ttm_unpopulate(struct ttm_device *bdev,
@@ -372,6 +380,10 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
{
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
dma_ttm);
+ bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
+
+ if (external)
+ return;
vmw_ttm_unbind(bdev, ttm);
@@ -390,6 +402,7 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
{
struct vmw_ttm_tt *vmw_be;
int ret;
+ bool external = bo->type == ttm_bo_type_sg;
vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
if (!vmw_be)
@@ -398,7 +411,10 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
vmw_be->dev_priv = vmw_priv_from_ttm(bo->bdev);
vmw_be->mob = NULL;
- if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
+ if (external)
+ page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE;
+
+ if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent || external)
ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
ttm_cached);
else
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 5a428ca00f10f..c29a850859ad5 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -172,8 +172,8 @@ subdir-ccflags-$(CONFIG_DRM_XE_DISPLAY) += \
-Ddrm_i915_gem_object=xe_bo \
-Ddrm_i915_private=xe_device
-CFLAGS_i915-display/intel_fbdev.o = $(call cc-disable-warning, override-init)
-CFLAGS_i915-display/intel_display_device.o = $(call cc-disable-warning, override-init)
+CFLAGS_i915-display/intel_fbdev.o = -Wno-override-init
+CFLAGS_i915-display/intel_display_device.o = -Wno-override-init
# Rule to build SOC code shared with i915
$(obj)/i915-soc/%.o: $(srctree)/drivers/gpu/drm/i915/soc/%.c FORCE
diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.c b/drivers/gpu/drm/xe/display/intel_fb_bo.c
index b21da7b745a5e..a9c1f9885c6bb 100644
--- a/drivers/gpu/drm/xe/display/intel_fb_bo.c
+++ b/drivers/gpu/drm/xe/display/intel_fb_bo.c
@@ -31,7 +31,7 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
ret = ttm_bo_reserve(&bo->ttm, true, false, NULL);
if (ret)
- return ret;
+ goto err;
if (!(bo->flags & XE_BO_SCANOUT_BIT)) {
/*
@@ -42,12 +42,16 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
*/
if (XE_IOCTL_DBG(i915, !list_empty(&bo->ttm.base.gpuva.list))) {
ttm_bo_unreserve(&bo->ttm);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err;
}
bo->flags |= XE_BO_SCANOUT_BIT;
}
ttm_bo_unreserve(&bo->ttm);
+ return 0;
+err:
+ xe_bo_put(bo);
return ret;
}
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index e4db069f0db3f..6ec375c1c4b6c 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -108,11 +108,6 @@ int xe_display_create(struct xe_device *xe)
xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0);
drmm_mutex_init(&xe->drm, &xe->sb_lock);
- drmm_mutex_init(&xe->drm, &xe->display.backlight.lock);
- drmm_mutex_init(&xe->drm, &xe->display.audio.mutex);
- drmm_mutex_init(&xe->drm, &xe->display.wm.wm_mutex);
- drmm_mutex_init(&xe->drm, &xe->display.pps.mutex);
- drmm_mutex_init(&xe->drm, &xe->display.hdcp.hdcp_mutex);
xe->enabled_irq_mask = ~0;
err = drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
index 0b1266c88a6af..deddc8be48c0a 100644
--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -125,7 +125,7 @@
#define RING_EXECLIST_STATUS_LO(base) XE_REG((base) + 0x234)
#define RING_EXECLIST_STATUS_HI(base) XE_REG((base) + 0x234 + 4)
-#define RING_CONTEXT_CONTROL(base) XE_REG((base) + 0x244)
+#define RING_CONTEXT_CONTROL(base) XE_REG((base) + 0x244, XE_REG_OPTION_MASKED)
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH REG_BIT(3)
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT REG_BIT(0)
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 6603a0ea79c5a..9c0837b6fdfc8 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -144,9 +144,6 @@ static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
.mem_type = XE_PL_TT,
};
*c += 1;
-
- if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID)
- bo->props.preferred_mem_type = XE_PL_TT;
}
}
@@ -181,25 +178,15 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo,
}
places[*c] = place;
*c += 1;
-
- if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID)
- bo->props.preferred_mem_type = mem_type;
}
static void try_add_vram(struct xe_device *xe, struct xe_bo *bo,
u32 bo_flags, u32 *c)
{
- if (bo->props.preferred_gt == XE_GT1) {
- if (bo_flags & XE_BO_CREATE_VRAM1_BIT)
- add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
- if (bo_flags & XE_BO_CREATE_VRAM0_BIT)
- add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);
- } else {
- if (bo_flags & XE_BO_CREATE_VRAM0_BIT)
- add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);
- if (bo_flags & XE_BO_CREATE_VRAM1_BIT)
- add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
- }
+ if (bo_flags & XE_BO_CREATE_VRAM0_BIT)
+ add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);
+ if (bo_flags & XE_BO_CREATE_VRAM1_BIT)
+ add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
}
static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
@@ -223,17 +210,8 @@ static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
{
u32 c = 0;
- bo->props.preferred_mem_type = XE_BO_PROPS_INVALID;
-
- /* The order of placements should indicate preferred location */
-
- if (bo->props.preferred_mem_class == DRM_XE_MEM_REGION_CLASS_SYSMEM) {
- try_add_system(xe, bo, bo_flags, &c);
- try_add_vram(xe, bo, bo_flags, &c);
- } else {
- try_add_vram(xe, bo, bo_flags, &c);
- try_add_system(xe, bo, bo_flags, &c);
- }
+ try_add_vram(xe, bo, bo_flags, &c);
+ try_add_system(xe, bo, bo_flags, &c);
try_add_stolen(xe, bo, bo_flags, &c);
if (!c)
@@ -1126,13 +1104,6 @@ static void xe_gem_object_close(struct drm_gem_object *obj,
}
}
-static bool should_migrate_to_system(struct xe_bo *bo)
-{
- struct xe_device *xe = xe_bo_device(bo);
-
- return xe_device_in_fault_mode(xe) && bo->props.cpu_atomic;
-}
-
static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
@@ -1141,7 +1112,7 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
struct xe_bo *bo = ttm_to_xe_bo(tbo);
bool needs_rpm = bo->flags & XE_BO_CREATE_VRAM_MASK;
vm_fault_t ret;
- int idx, r = 0;
+ int idx;
if (needs_rpm)
xe_device_mem_access_get(xe);
@@ -1153,17 +1124,8 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
if (drm_dev_enter(ddev, &idx)) {
trace_xe_bo_cpu_fault(bo);
- if (should_migrate_to_system(bo)) {
- r = xe_bo_migrate(bo, XE_PL_TT);
- if (r == -EBUSY || r == -ERESTARTSYS || r == -EINTR)
- ret = VM_FAULT_NOPAGE;
- else if (r)
- ret = VM_FAULT_SIGBUS;
- }
- if (!ret)
- ret = ttm_bo_vm_fault_reserved(vmf,
- vmf->vma->vm_page_prot,
- TTM_BO_VM_NUM_PREFAULT);
+ ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+ TTM_BO_VM_NUM_PREFAULT);
drm_dev_exit(idx);
} else {
ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
@@ -1291,9 +1253,6 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
bo->flags = flags;
bo->cpu_caching = cpu_caching;
bo->ttm.base.funcs = &xe_gem_object_funcs;
- bo->props.preferred_mem_class = XE_BO_PROPS_INVALID;
- bo->props.preferred_gt = XE_BO_PROPS_INVALID;
- bo->props.preferred_mem_type = XE_BO_PROPS_INVALID;
bo->ttm.priority = XE_BO_PRIORITY_NORMAL;
INIT_LIST_HEAD(&bo->pinned_link);
#ifdef CONFIG_PROC_FS
diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
index 14ef13b7b421f..86422e113d396 100644
--- a/drivers/gpu/drm/xe/xe_bo_types.h
+++ b/drivers/gpu/drm/xe/xe_bo_types.h
@@ -56,25 +56,6 @@ struct xe_bo {
*/
struct list_head client_link;
#endif
- /** @props: BO user controlled properties */
- struct {
- /** @preferred_mem: preferred memory class for this BO */
- s16 preferred_mem_class;
- /** @prefered_gt: preferred GT for this BO */
- s16 preferred_gt;
- /** @preferred_mem_type: preferred memory type */
- s32 preferred_mem_type;
- /**
- * @cpu_atomic: the CPU expects to do atomics operations to
- * this BO
- */
- bool cpu_atomic;
- /**
- * @device_atomic: the device expects to do atomics operations
- * to this BO
- */
- bool device_atomic;
- } props;
/** @freed: List node for delayed put. */
struct llist_node freed;
/** @created: Whether the bo has passed initial creation */
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index ca85e81fdb443..d32ff3857e658 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -193,6 +193,9 @@ static void xe_device_destroy(struct drm_device *dev, void *dummy)
{
struct xe_device *xe = to_xe_device(dev);
+ if (xe->preempt_fence_wq)
+ destroy_workqueue(xe->preempt_fence_wq);
+
if (xe->ordered_wq)
destroy_workqueue(xe->ordered_wq);
@@ -258,9 +261,15 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
INIT_LIST_HEAD(&xe->pinned.external_vram);
INIT_LIST_HEAD(&xe->pinned.evicted);
+ xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq", 0);
xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0);
xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0);
- if (!xe->ordered_wq || !xe->unordered_wq) {
+ if (!xe->ordered_wq || !xe->unordered_wq ||
+ !xe->preempt_fence_wq) {
+ /*
+ * Cleanup done in xe_device_destroy via
+ * drmm_add_action_or_reset register above
+ */
drm_err(&xe->drm, "Failed to allocate xe workqueues\n");
err = -ENOMEM;
goto err;
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index 14be34d9f5434..d413bc2c6be5a 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -58,7 +58,7 @@ static inline struct xe_tile *xe_device_get_root_tile(struct xe_device *xe)
static inline struct xe_gt *xe_tile_get_gt(struct xe_tile *tile, u8 gt_id)
{
- if (drm_WARN_ON(&tile_to_xe(tile)->drm, gt_id > XE_MAX_GT_PER_TILE))
+ if (drm_WARN_ON(&tile_to_xe(tile)->drm, gt_id >= XE_MAX_GT_PER_TILE))
gt_id = 0;
return gt_id ? tile->media_gt : tile->primary_gt;
@@ -79,7 +79,7 @@ static inline struct xe_gt *xe_device_get_gt(struct xe_device *xe, u8 gt_id)
if (MEDIA_VER(xe) >= 13) {
gt = xe_tile_get_gt(root_tile, gt_id);
} else {
- if (drm_WARN_ON(&xe->drm, gt_id > XE_MAX_TILES_PER_DEVICE))
+ if (drm_WARN_ON(&xe->drm, gt_id >= XE_MAX_TILES_PER_DEVICE))
gt_id = 0;
gt = xe->tiles[gt_id].primary_gt;
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 9785eef2e5a4e..8e3a222b41cf0 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -363,6 +363,9 @@ struct xe_device {
/** @ufence_wq: user fence wait queue */
wait_queue_head_t ufence_wq;
+ /** @preempt_fence_wq: used to serialize preempt fences */
+ struct workqueue_struct *preempt_fence_wq;
+
/** @ordered_wq: used to serialize compute mode resume */
struct workqueue_struct *ordered_wq;
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index 952496c6260df..cc5e0f75de3c7 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -94,48 +94,16 @@
* Unlock all
*/
+/*
+ * Add validation and rebinding to the drm_exec locking loop, since both can
+ * trigger eviction which may require sleeping dma_resv locks.
+ */
static int xe_exec_fn(struct drm_gpuvm_exec *vm_exec)
{
struct xe_vm *vm = container_of(vm_exec->vm, struct xe_vm, gpuvm);
- struct drm_gem_object *obj;
- unsigned long index;
- int num_fences;
- int ret;
-
- ret = drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec);
- if (ret)
- return ret;
-
- /*
- * 1 fence slot for the final submit, and 1 more for every per-tile for
- * GPU bind and 1 extra for CPU bind. Note that there are potentially
- * many vma per object/dma-resv, however the fence slot will just be
- * re-used, since they are largely the same timeline and the seqno
- * should be in order. In the case of CPU bind there is dummy fence used
- * for all CPU binds, so no need to have a per-tile slot for that.
- */
- num_fences = 1 + 1 + vm->xe->info.tile_count;
- /*
- * We don't know upfront exactly how many fence slots we will need at
- * the start of the exec, since the TTM bo_validate above can consume
- * numerous fence slots. Also due to how the dma_resv_reserve_fences()
- * works it only ensures that at least that many fence slots are
- * available i.e if there are already 10 slots available and we reserve
- * two more, it can just noop without reserving anything. With this it
- * is quite possible that TTM steals some of the fence slots and then
- * when it comes time to do the vma binding and final exec stage we are
- * lacking enough fence slots, leading to some nasty BUG_ON() when
- * adding the fences. Hence just add our own fences here, after the
- * validate stage.
- */
- drm_exec_for_each_locked_object(&vm_exec->exec, index, obj) {
- ret = dma_resv_reserve_fences(obj->resv, num_fences);
- if (ret)
- return ret;
- }
-
- return 0;
+ /* The fence slot added here is intended for the exec sched job. */
+ return xe_vm_validate_rebind(vm, &vm_exec->exec, 1);
}
int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
@@ -152,7 +120,6 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct drm_exec *exec = &vm_exec.exec;
u32 i, num_syncs = 0, num_ufence = 0;
struct xe_sched_job *job;
- struct dma_fence *rebind_fence;
struct xe_vm *vm;
bool write_locked, skip_retry = false;
ktime_t end = 0;
@@ -235,6 +202,29 @@ retry:
goto err_unlock_list;
}
+ if (!args->num_batch_buffer) {
+ err = xe_vm_lock(vm, true);
+ if (err)
+ goto err_unlock_list;
+
+ if (!xe_vm_in_lr_mode(vm)) {
+ struct dma_fence *fence;
+
+ fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm);
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
+ goto err_unlock_list;
+ }
+ for (i = 0; i < num_syncs; i++)
+ xe_sync_entry_signal(&syncs[i], NULL, fence);
+ xe_exec_queue_last_fence_set(q, vm, fence);
+ dma_fence_put(fence);
+ }
+
+ xe_vm_unlock(vm);
+ goto err_unlock_list;
+ }
+
vm_exec.vm = &vm->gpuvm;
vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
if (xe_vm_in_lr_mode(vm)) {
@@ -254,24 +244,6 @@ retry:
goto err_exec;
}
- if (!args->num_batch_buffer) {
- if (!xe_vm_in_lr_mode(vm)) {
- struct dma_fence *fence;
-
- fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm);
- if (IS_ERR(fence)) {
- err = PTR_ERR(fence);
- goto err_exec;
- }
- for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], NULL, fence);
- xe_exec_queue_last_fence_set(q, vm, fence);
- dma_fence_put(fence);
- }
-
- goto err_exec;
- }
-
if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) {
err = -EWOULDBLOCK; /* Aliased to -EAGAIN */
skip_retry = true;
@@ -285,39 +257,7 @@ retry:
goto err_exec;
}
- /*
- * Rebind any invalidated userptr or evicted BOs in the VM, non-compute
- * VM mode only.
- */
- rebind_fence = xe_vm_rebind(vm, false);
- if (IS_ERR(rebind_fence)) {
- err = PTR_ERR(rebind_fence);
- goto err_put_job;
- }
-
- /*
- * We store the rebind_fence in the VM so subsequent execs don't get
- * scheduled before the rebinds of userptrs / evicted BOs is complete.
- */
- if (rebind_fence) {
- dma_fence_put(vm->rebind_fence);
- vm->rebind_fence = rebind_fence;
- }
- if (vm->rebind_fence) {
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &vm->rebind_fence->flags)) {
- dma_fence_put(vm->rebind_fence);
- vm->rebind_fence = NULL;
- } else {
- dma_fence_get(vm->rebind_fence);
- err = drm_sched_job_add_dependency(&job->drm,
- vm->rebind_fence);
- if (err)
- goto err_put_job;
- }
- }
-
- /* Wait behind munmap style rebinds */
+ /* Wait behind rebinds */
if (!xe_vm_in_lr_mode(vm)) {
err = drm_sched_job_add_resv_dependencies(&job->drm,
xe_vm_resv(vm),
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 11e150f4c0c1f..ead25d5e723ea 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -448,7 +448,7 @@ find_hw_engine(struct xe_device *xe,
{
u32 idx;
- if (eci.engine_class > ARRAY_SIZE(user_to_xe_engine_class))
+ if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
return NULL;
if (eci.gt_id >= xe->info.gt_count)
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index 62b3d9d1d7cdd..462b331950320 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -148,6 +148,11 @@ struct xe_exec_queue {
const struct xe_ring_ops *ring_ops;
/** @entity: DRM sched entity for this exec queue (1 to 1 relationship) */
struct drm_sched_entity *entity;
+ /**
+ * @tlb_flush_seqno: The seqno of the last rebind tlb flush performed
+ * Protected by @vm's resv. Unused if @vm == NULL.
+ */
+ u64 tlb_flush_seqno;
/** @lrc: logical ring context for this exec queue */
struct xe_lrc lrc[];
};
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 73c535193a984..fa9e9853c53ba 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -69,7 +69,7 @@ static bool access_is_atomic(enum access_type access_type)
static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
{
return BIT(tile->id) & vma->tile_present &&
- !(BIT(tile->id) & vma->usm.tile_invalidated);
+ !(BIT(tile->id) & vma->tile_invalidated);
}
static bool vma_matches(struct xe_vma *vma, u64 page_addr)
@@ -100,10 +100,9 @@ static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
{
struct xe_bo *bo = xe_vma_bo(vma);
struct xe_vm *vm = xe_vma_vm(vma);
- unsigned int num_shared = 2; /* slots for bind + move */
int err;
- err = xe_vm_prepare_vma(exec, vma, num_shared);
+ err = xe_vm_lock_vma(exec, vma);
if (err)
return err;
@@ -226,7 +225,7 @@ retry_userptr:
if (xe_vma_is_userptr(vma))
ret = xe_vma_userptr_check_repin(to_userptr_vma(vma));
- vma->usm.tile_invalidated &= ~BIT(tile->id);
+ vma->tile_invalidated &= ~BIT(tile->id);
unlock_dma_resv:
drm_exec_fini(&exec);
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index f03e077f81a04..e598a4363d019 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -61,7 +61,6 @@ int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
INIT_LIST_HEAD(&gt->tlb_invalidation.pending_fences);
spin_lock_init(&gt->tlb_invalidation.pending_lock);
spin_lock_init(&gt->tlb_invalidation.lock);
- gt->tlb_invalidation.fence_context = dma_fence_context_alloc(1);
INIT_DELAYED_WORK(&gt->tlb_invalidation.fence_tdr,
xe_gt_tlb_fence_timeout);
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index 70c615dd14986..07b2f724ec456 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -177,13 +177,6 @@ struct xe_gt {
* xe_gt_tlb_fence_timeout after the timeut interval is over.
*/
struct delayed_work fence_tdr;
- /** @tlb_invalidation.fence_context: context for TLB invalidation fences */
- u64 fence_context;
- /**
- * @tlb_invalidation.fence_seqno: seqno to TLB invalidation fences, protected by
- * tlb_invalidation.lock
- */
- u32 fence_seqno;
/** @tlb_invalidation.lock: protects TLB invalidation fences */
spinlock_t lock;
} tlb_invalidation;
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index ff77bc8da1b27..e2a4c3b5e9ff8 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -1220,7 +1220,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
init_waitqueue_head(&ge->suspend_wait);
timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
- q->sched_props.job_timeout_ms;
+ msecs_to_jiffies(q->sched_props.job_timeout_ms);
err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
get_submit_wq(guc),
q->lrc[0].ring.size / MAX_JOB_SIZE_BYTES, 64,
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index b82233a416062..9ac7fbe201b3c 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -290,7 +290,7 @@ xe_hwmon_power1_max_interval_show(struct device *dev, struct device_attribute *a
* As y can be < 2, we compute tau4 = (4 | x) << y
* and then add 2 when doing the final right shift to account for units
*/
- tau4 = ((1 << x_w) | x) << y;
+ tau4 = (u64)((1 << x_w) | x) << y;
/* val in hwmon interface units (millisec) */
out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
@@ -330,7 +330,7 @@ xe_hwmon_power1_max_interval_store(struct device *dev, struct device_attribute *
r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
- tau4 = ((1 << x_w) | x) << y;
+ tau4 = (u64)((1 << x_w) | x) << y;
max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
if (val > max_win)
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index 7ad853b0788af..57066faf575ee 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -97,7 +97,6 @@ static void set_offsets(u32 *regs,
#define REG16(x) \
(((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \
(((x) >> 2) & 0x7f)
-#define END 0
{
const u32 base = hwe->mmio_base;
@@ -168,7 +167,7 @@ static const u8 gen12_xcs_offsets[] = {
REG16(0x274),
REG16(0x270),
- END
+ 0
};
static const u8 dg2_xcs_offsets[] = {
@@ -202,7 +201,7 @@ static const u8 dg2_xcs_offsets[] = {
REG16(0x274),
REG16(0x270),
- END
+ 0
};
static const u8 gen12_rcs_offsets[] = {
@@ -298,7 +297,7 @@ static const u8 gen12_rcs_offsets[] = {
REG(0x084),
NOP(1),
- END
+ 0
};
static const u8 xehp_rcs_offsets[] = {
@@ -339,7 +338,7 @@ static const u8 xehp_rcs_offsets[] = {
LRI(1, 0),
REG(0x0c8),
- END
+ 0
};
static const u8 dg2_rcs_offsets[] = {
@@ -382,7 +381,7 @@ static const u8 dg2_rcs_offsets[] = {
LRI(1, 0),
REG(0x0c8),
- END
+ 0
};
static const u8 mtl_rcs_offsets[] = {
@@ -425,7 +424,7 @@ static const u8 mtl_rcs_offsets[] = {
LRI(1, 0),
REG(0x0c8),
- END
+ 0
};
#define XE2_CTX_COMMON \
@@ -471,7 +470,7 @@ static const u8 xe2_rcs_offsets[] = {
LRI(1, 0), /* [0x47] */
REG(0x0c8), /* [0x48] R_PWR_CLK_STATE */
- END
+ 0
};
static const u8 xe2_bcs_offsets[] = {
@@ -482,16 +481,15 @@ static const u8 xe2_bcs_offsets[] = {
REG16(0x200), /* [0x42] BCS_SWCTRL */
REG16(0x204), /* [0x44] BLIT_CCTL */
- END
+ 0
};
static const u8 xe2_xcs_offsets[] = {
XE2_CTX_COMMON,
- END
+ 0
};
-#undef END
#undef REG16
#undef REG
#undef LRI
@@ -527,9 +525,8 @@ static const u8 *reg_offsets(struct xe_device *xe, enum xe_engine_class class)
static void set_context_control(u32 *regs, struct xe_hw_engine *hwe)
{
- regs[CTX_CONTEXT_CONTROL] = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH) |
- _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
- CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
+ regs[CTX_CONTEXT_CONTROL] = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
+ CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
/* TODO: Timestamp */
}
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index ee1bb938c4934..2ba4fb9511f63 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -227,7 +227,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
if (vm->flags & XE_VM_FLAG_64K && level == 1)
flags = XE_PDE_64K;
- entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (level - 1) *
+ entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (u64)(level - 1) *
XE_PAGE_SIZE, pat_index);
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
entry | flags);
@@ -235,7 +235,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
/* Write PDE's that point to our BO. */
for (i = 0; i < num_entries - num_level; i++) {
- entry = vm->pt_ops->pde_encode_bo(bo, i * XE_PAGE_SIZE,
+ entry = vm->pt_ops->pde_encode_bo(bo, (u64)i * XE_PAGE_SIZE,
pat_index);
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
@@ -291,7 +291,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
#define VM_SA_UPDATE_UNIT_SIZE (XE_PAGE_SIZE / NUM_VMUSA_UNIT_PER_PAGE)
#define NUM_VMUSA_WRITES_PER_UNIT (VM_SA_UPDATE_UNIT_SIZE / sizeof(u64))
drm_suballoc_manager_init(&m->vm_update_sa,
- (map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
+ (size_t)(map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
NUM_VMUSA_UNIT_PER_PAGE, 0);
m->pt_bo = bo;
@@ -490,7 +490,7 @@ static void emit_pte(struct xe_migrate *m,
struct xe_vm *vm = m->q->vm;
u16 pat_index;
u32 ptes;
- u64 ofs = at_pt * XE_PAGE_SIZE;
+ u64 ofs = (u64)at_pt * XE_PAGE_SIZE;
u64 cur_ofs;
/* Indirect access needs compression enabled uncached PAT index */
diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c b/drivers/gpu/drm/xe/xe_preempt_fence.c
index 7bce2a332603c..7d50c6e89d8e7 100644
--- a/drivers/gpu/drm/xe/xe_preempt_fence.c
+++ b/drivers/gpu/drm/xe/xe_preempt_fence.c
@@ -49,7 +49,7 @@ static bool preempt_fence_enable_signaling(struct dma_fence *fence)
struct xe_exec_queue *q = pfence->q;
pfence->error = q->ops->suspend(q);
- queue_work(system_unbound_wq, &pfence->preempt_work);
+ queue_work(q->vm->xe->preempt_fence_wq, &pfence->preempt_work);
return true;
}
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index 7f54bc3e389d5..4efc8c1a3d7a9 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -1135,8 +1135,7 @@ static int invalidation_fence_init(struct xe_gt *gt,
spin_lock_irq(&gt->tlb_invalidation.lock);
dma_fence_init(&ifence->base.base, &invalidation_fence_ops,
&gt->tlb_invalidation.lock,
- gt->tlb_invalidation.fence_context,
- ++gt->tlb_invalidation.fence_seqno);
+ dma_fence_context_alloc(1), 1);
spin_unlock_irq(&gt->tlb_invalidation.lock);
INIT_LIST_HEAD(&ifence->base.link);
@@ -1236,6 +1235,13 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue
err = xe_pt_prepare_bind(tile, vma, entries, &num_entries);
if (err)
goto err;
+
+ err = dma_resv_reserve_fences(xe_vm_resv(vm), 1);
+ if (!err && !xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
+ err = dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv, 1);
+ if (err)
+ goto err;
+
xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries));
xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries);
@@ -1254,11 +1260,13 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue
* non-faulting LR, in particular on user-space batch buffer chaining,
* it needs to be done here.
*/
- if ((rebind && !xe_vm_in_lr_mode(vm) && !vm->batch_invalidate_tlb) ||
- (!rebind && xe_vm_has_scratch(vm) && xe_vm_in_preempt_fence_mode(vm))) {
+ if ((!rebind && xe_vm_has_scratch(vm) && xe_vm_in_preempt_fence_mode(vm))) {
ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
if (!ifence)
return ERR_PTR(-ENOMEM);
+ } else if (rebind && !xe_vm_in_lr_mode(vm)) {
+ /* We bump also if batch_invalidate_tlb is true */
+ vm->tlb_flush_seqno++;
}
rfence = kzalloc(sizeof(*rfence), GFP_KERNEL);
@@ -1297,7 +1305,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue
}
/* add shared fence now for pagetable delayed destroy */
- dma_resv_add_fence(xe_vm_resv(vm), fence, !rebind &&
+ dma_resv_add_fence(xe_vm_resv(vm), fence, rebind ||
last_munmap_rebind ?
DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP);
@@ -1576,6 +1584,7 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queu
struct dma_fence *fence = NULL;
struct invalidation_fence *ifence;
struct xe_range_fence *rfence;
+ int err;
LLIST_HEAD(deferred);
@@ -1593,6 +1602,12 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queu
xe_pt_calc_rfence_interval(vma, &unbind_pt_update, entries,
num_entries);
+ err = dma_resv_reserve_fences(xe_vm_resv(vm), 1);
+ if (!err && !xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
+ err = dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv, 1);
+ if (err)
+ return ERR_PTR(err);
+
ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
if (!ifence)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index 92bb06c0586eb..075f9eaef0312 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -132,7 +132,7 @@ query_engine_cycles(struct xe_device *xe,
return -EINVAL;
eci = &resp.eci;
- if (eci->gt_id > XE_MAX_GT_PER_TILE)
+ if (eci->gt_id >= XE_MAX_GT_PER_TILE)
return -EINVAL;
gt = xe_device_get_gt(xe, eci->gt_id);
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
index c4edffcd4a320..5b2b37b598130 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops.c
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -219,10 +219,9 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc
{
u32 dw[MAX_JOB_SIZE_DW], i = 0;
u32 ppgtt_flag = get_ppgtt_flag(job);
- struct xe_vm *vm = job->q->vm;
struct xe_gt *gt = job->q->gt;
- if (vm && vm->batch_invalidate_tlb) {
+ if (job->ring_ops_flush_tlb) {
dw[i++] = preparser_disable(true);
i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
seqno, true, dw, i);
@@ -270,7 +269,6 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
struct xe_gt *gt = job->q->gt;
struct xe_device *xe = gt_to_xe(gt);
bool decode = job->q->class == XE_ENGINE_CLASS_VIDEO_DECODE;
- struct xe_vm *vm = job->q->vm;
dw[i++] = preparser_disable(true);
@@ -282,13 +280,13 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
i = emit_aux_table_inv(gt, VE0_AUX_INV, dw, i);
}
- if (vm && vm->batch_invalidate_tlb)
+ if (job->ring_ops_flush_tlb)
i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
seqno, true, dw, i);
dw[i++] = preparser_disable(false);
- if (!vm || !vm->batch_invalidate_tlb)
+ if (!job->ring_ops_flush_tlb)
i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
seqno, dw, i);
@@ -317,7 +315,6 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
struct xe_gt *gt = job->q->gt;
struct xe_device *xe = gt_to_xe(gt);
bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
- struct xe_vm *vm = job->q->vm;
u32 mask_flags = 0;
dw[i++] = preparser_disable(true);
@@ -327,7 +324,7 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
mask_flags = PIPE_CONTROL_3D_ENGINE_FLAGS;
/* See __xe_pt_bind_vma() for a discussion on TLB invalidations. */
- i = emit_pipe_invalidate(mask_flags, vm && vm->batch_invalidate_tlb, dw, i);
+ i = emit_pipe_invalidate(mask_flags, job->ring_ops_flush_tlb, dw, i);
/* hsdes: 1809175790 */
if (has_aux_ccs(xe))
diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
index 8151ddafb9407..b0c7fa4693cfe 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.c
+++ b/drivers/gpu/drm/xe/xe_sched_job.c
@@ -250,6 +250,16 @@ bool xe_sched_job_completed(struct xe_sched_job *job)
void xe_sched_job_arm(struct xe_sched_job *job)
{
+ struct xe_exec_queue *q = job->q;
+ struct xe_vm *vm = q->vm;
+
+ if (vm && !xe_sched_job_is_migration(q) && !xe_vm_in_lr_mode(vm) &&
+ (vm->batch_invalidate_tlb || vm->tlb_flush_seqno != q->tlb_flush_seqno)) {
+ xe_vm_assert_held(vm);
+ q->tlb_flush_seqno = vm->tlb_flush_seqno;
+ job->ring_ops_flush_tlb = true;
+ }
+
drm_sched_job_arm(&job->drm);
}
diff --git a/drivers/gpu/drm/xe/xe_sched_job_types.h b/drivers/gpu/drm/xe/xe_sched_job_types.h
index b1d83da50a53d..5e12724219fdd 100644
--- a/drivers/gpu/drm/xe/xe_sched_job_types.h
+++ b/drivers/gpu/drm/xe/xe_sched_job_types.h
@@ -39,6 +39,8 @@ struct xe_sched_job {
} user_fence;
/** @migrate_flush_flags: Additional flush flags for migration jobs */
u32 migrate_flush_flags;
+ /** @ring_ops_flush_tlb: The ring ops need to flush TLB before payload. */
+ bool ring_ops_flush_tlb;
/** @batch_addr: batch buffer address of job */
u64 batch_addr[];
};
diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
index 4ddc55527f9ab..846f14507d5ff 100644
--- a/drivers/gpu/drm/xe/xe_trace.h
+++ b/drivers/gpu/drm/xe/xe_trace.h
@@ -468,7 +468,7 @@ DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate,
TP_ARGS(vma)
);
-DEFINE_EVENT(xe_vma, xe_vma_usm_invalidate,
+DEFINE_EVENT(xe_vma, xe_vma_invalidate,
TP_PROTO(struct xe_vma *vma),
TP_ARGS(vma)
);
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index d28260351af2e..3d4c8f342e215 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -482,17 +482,53 @@ static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
return 0;
}
+/**
+ * xe_vm_validate_rebind() - Validate buffer objects and rebind vmas
+ * @vm: The vm for which we are rebinding.
+ * @exec: The struct drm_exec with the locked GEM objects.
+ * @num_fences: The number of fences to reserve for the operation, not
+ * including rebinds and validations.
+ *
+ * Validates all evicted gem objects and rebinds their vmas. Note that
+ * rebindings may cause evictions and hence the validation-rebind
+ * sequence is rerun until there are no more objects to validate.
+ *
+ * Return: 0 on success, negative error code on error. In particular,
+ * may return -EINTR or -ERESTARTSYS if interrupted, and -EDEADLK if
+ * the drm_exec transaction needs to be restarted.
+ */
+int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
+ unsigned int num_fences)
+{
+ struct drm_gem_object *obj;
+ unsigned long index;
+ int ret;
+
+ do {
+ ret = drm_gpuvm_validate(&vm->gpuvm, exec);
+ if (ret)
+ return ret;
+
+ ret = xe_vm_rebind(vm, false);
+ if (ret)
+ return ret;
+ } while (!list_empty(&vm->gpuvm.evict.list));
+
+ drm_exec_for_each_locked_object(exec, index, obj) {
+ ret = dma_resv_reserve_fences(obj->resv, num_fences);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
bool *done)
{
int err;
- /*
- * 1 fence for each preempt fence plus a fence for each tile from a
- * possible rebind
- */
- err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, vm->preempt.num_exec_queues +
- vm->xe->info.tile_count);
+ err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, 0);
if (err)
return err;
@@ -507,7 +543,7 @@ static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
return 0;
}
- err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, vm->preempt.num_exec_queues);
+ err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, 0);
if (err)
return err;
@@ -515,14 +551,19 @@ static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
if (err)
return err;
- return drm_gpuvm_validate(&vm->gpuvm, exec);
+ /*
+ * Add validation and rebinding to the locking loop since both can
+ * cause evictions which may require blocing dma_resv locks.
+ * The fence reservation here is intended for the new preempt fences
+ * we attach at the end of the rebind work.
+ */
+ return xe_vm_validate_rebind(vm, exec, vm->preempt.num_exec_queues);
}
static void preempt_rebind_work_func(struct work_struct *w)
{
struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
struct drm_exec exec;
- struct dma_fence *rebind_fence;
unsigned int fence_count = 0;
LIST_HEAD(preempt_fences);
ktime_t end = 0;
@@ -568,18 +609,11 @@ retry:
if (err)
goto out_unlock;
- rebind_fence = xe_vm_rebind(vm, true);
- if (IS_ERR(rebind_fence)) {
- err = PTR_ERR(rebind_fence);
+ err = xe_vm_rebind(vm, true);
+ if (err)
goto out_unlock;
- }
-
- if (rebind_fence) {
- dma_fence_wait(rebind_fence, false);
- dma_fence_put(rebind_fence);
- }
- /* Wait on munmap style VM unbinds */
+ /* Wait on rebinds and munmap style VM unbinds */
wait = dma_resv_wait_timeout(xe_vm_resv(vm),
DMA_RESV_USAGE_KERNEL,
false, MAX_SCHEDULE_TIMEOUT);
@@ -708,6 +742,7 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
int err = 0;
LIST_HEAD(tmp_evict);
+ xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
lockdep_assert_held_write(&vm->lock);
/* Collect invalidated userptrs */
@@ -724,11 +759,27 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
userptr.repin_link) {
err = xe_vma_userptr_pin_pages(uvma);
- if (err < 0)
- return err;
+ if (err == -EFAULT) {
+ list_del_init(&uvma->userptr.repin_link);
+
+ /* Wait for pending binds */
+ xe_vm_lock(vm, false);
+ dma_resv_wait_timeout(xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
- list_del_init(&uvma->userptr.repin_link);
- list_move_tail(&uvma->vma.combined_links.rebind, &vm->rebind_list);
+ err = xe_vm_invalidate_vma(&uvma->vma);
+ xe_vm_unlock(vm);
+ if (err)
+ return err;
+ } else {
+ if (err < 0)
+ return err;
+
+ list_del_init(&uvma->userptr.repin_link);
+ list_move_tail(&uvma->vma.combined_links.rebind,
+ &vm->rebind_list);
+ }
}
return 0;
@@ -756,14 +807,14 @@ xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
struct xe_sync_entry *syncs, u32 num_syncs,
bool first_op, bool last_op);
-struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
+int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
{
- struct dma_fence *fence = NULL;
+ struct dma_fence *fence;
struct xe_vma *vma, *next;
lockdep_assert_held(&vm->lock);
if (xe_vm_in_lr_mode(vm) && !rebind_worker)
- return NULL;
+ return 0;
xe_vm_assert_held(vm);
list_for_each_entry_safe(vma, next, &vm->rebind_list,
@@ -771,17 +822,17 @@ struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
xe_assert(vm->xe, vma->tile_present);
list_del_init(&vma->combined_links.rebind);
- dma_fence_put(fence);
if (rebind_worker)
trace_xe_vma_rebind_worker(vma);
else
trace_xe_vma_rebind_exec(vma);
fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false);
if (IS_ERR(fence))
- return fence;
+ return PTR_ERR(fence);
+ dma_fence_put(fence);
}
- return fence;
+ return 0;
}
static void xe_vma_free(struct xe_vma *vma)
@@ -987,35 +1038,26 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
}
/**
- * xe_vm_prepare_vma() - drm_exec utility to lock a vma
+ * xe_vm_lock_vma() - drm_exec utility to lock a vma
* @exec: The drm_exec object we're currently locking for.
* @vma: The vma for witch we want to lock the vm resv and any attached
* object's resv.
- * @num_shared: The number of dma-fence slots to pre-allocate in the
- * objects' reservation objects.
*
* Return: 0 on success, negative error code on error. In particular
* may return -EDEADLK on WW transaction contention and -EINTR if
* an interruptible wait is terminated by a signal.
*/
-int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
- unsigned int num_shared)
+int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma)
{
struct xe_vm *vm = xe_vma_vm(vma);
struct xe_bo *bo = xe_vma_bo(vma);
int err;
XE_WARN_ON(!vm);
- if (num_shared)
- err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
- else
- err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
- if (!err && bo && !bo->vm) {
- if (num_shared)
- err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared);
- else
- err = drm_exec_lock_obj(exec, &bo->ttm.base);
- }
+
+ err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
+ if (!err && bo && !bo->vm)
+ err = drm_exec_lock_obj(exec, &bo->ttm.base);
return err;
}
@@ -1027,7 +1069,7 @@ static void xe_vma_destroy_unlocked(struct xe_vma *vma)
drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
- err = xe_vm_prepare_vma(&exec, vma, 0);
+ err = xe_vm_lock_vma(&exec, vma);
drm_exec_retry_on_contention(&exec);
if (XE_WARN_ON(err))
break;
@@ -1535,6 +1577,16 @@ void xe_vm_close_and_put(struct xe_vm *vm)
xe->usm.num_vm_in_fault_mode--;
else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
xe->usm.num_vm_in_non_fault_mode--;
+
+ if (vm->usm.asid) {
+ void *lookup;
+
+ xe_assert(xe, xe->info.has_asid);
+ xe_assert(xe, !(vm->flags & XE_VM_FLAG_MIGRATION));
+
+ lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
+ xe_assert(xe, lookup == vm);
+ }
mutex_unlock(&xe->usm.lock);
for_each_tile(tile, xe, id)
@@ -1550,29 +1602,19 @@ static void vm_destroy_work_func(struct work_struct *w)
struct xe_device *xe = vm->xe;
struct xe_tile *tile;
u8 id;
- void *lookup;
/* xe_vm_close_and_put was not called? */
xe_assert(xe, !vm->size);
mutex_destroy(&vm->snap_mutex);
- if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
+ if (!(vm->flags & XE_VM_FLAG_MIGRATION))
xe_device_mem_access_put(xe);
- if (xe->info.has_asid && vm->usm.asid) {
- mutex_lock(&xe->usm.lock);
- lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
- xe_assert(xe, lookup == vm);
- mutex_unlock(&xe->usm.lock);
- }
- }
-
for_each_tile(tile, xe, id)
XE_WARN_ON(vm->pt_root[id]);
trace_xe_vm_free(vm);
- dma_fence_put(vm->rebind_fence);
kfree(vm);
}
@@ -2024,7 +2066,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
return err;
}
- if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
+ if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) {
return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
true, first_op, last_op);
} else {
@@ -2495,7 +2537,7 @@ static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
lockdep_assert_held_write(&vm->lock);
- err = xe_vm_prepare_vma(exec, vma, 1);
+ err = xe_vm_lock_vma(exec, vma);
if (err)
return err;
@@ -3214,9 +3256,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
u8 id;
int ret;
- xe_assert(xe, xe_vm_in_fault_mode(xe_vma_vm(vma)));
xe_assert(xe, !xe_vma_is_null(vma));
- trace_xe_vma_usm_invalidate(vma);
+ trace_xe_vma_invalidate(vma);
/* Check that we don't race with page-table updates */
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
@@ -3254,7 +3295,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
}
}
- vma->usm.tile_invalidated = vma->tile_mask;
+ vma->tile_invalidated = vma->tile_mask;
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 6df1f1c7f85d9..306cd0934a190 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -207,7 +207,7 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
int xe_vm_userptr_check_repin(struct xe_vm *vm);
-struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
+int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
int xe_vm_invalidate_vma(struct xe_vma *vma);
@@ -242,8 +242,10 @@ bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id);
-int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
- unsigned int num_shared);
+int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma);
+
+int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
+ unsigned int num_fences);
/**
* xe_vm_resv() - Return's the vm's reservation object
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 79b5cab577119..badf3945083d5 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -84,11 +84,8 @@ struct xe_vma {
struct work_struct destroy_work;
};
- /** @usm: unified shared memory state */
- struct {
- /** @tile_invalidated: VMA has been invalidated */
- u8 tile_invalidated;
- } usm;
+ /** @tile_invalidated: VMA has been invalidated */
+ u8 tile_invalidated;
/** @tile_mask: Tile mask of where to create binding for this VMA */
u8 tile_mask;
@@ -180,9 +177,6 @@ struct xe_vm {
*/
struct list_head rebind_list;
- /** @rebind_fence: rebind fence from execbuf */
- struct dma_fence *rebind_fence;
-
/**
* @destroy_work: worker to destroy VM, needed as a dma_fence signaling
* from an irq context can be last put and the destroy needs to be able
@@ -267,6 +261,11 @@ struct xe_vm {
bool capture_once;
} error_capture;
+ /**
+ * @tlb_flush_seqno: Required TLB flush seqno for the next exec.
+ * protected by the vm resv.
+ */
+ u64 tlb_flush_seqno;
/** @batch_invalidate_tlb: Always invalidate TLB before batch start */
bool batch_invalidate_tlb;
/** @xef: XE file handle for tracking this VM's drm client */
diff --git a/drivers/gpu/drm/xe/xe_vram_freq.c b/drivers/gpu/drm/xe/xe_vram_freq.c
index 079cc283a1866..c5f6b5a5d1176 100644
--- a/drivers/gpu/drm/xe/xe_vram_freq.c
+++ b/drivers/gpu/drm/xe/xe_vram_freq.c
@@ -111,8 +111,10 @@ void xe_vram_freq_sysfs_init(struct xe_tile *tile)
return;
kobj = kobject_create_and_add("memory", tile->sysfs);
- if (!kobj)
+ if (!kobj) {
drm_warn(&xe->drm, "failed to add memory directory, err: %d\n", -ENOMEM);
+ return;
+ }
err = sysfs_create_group(kobj, &freq_group_attrs);
if (err) {
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 783975d1384fc..7c52757a89db9 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -351,11 +351,6 @@ static int host1x_device_uevent(const struct device *dev,
return 0;
}
-static int host1x_dma_configure(struct device *dev)
-{
- return of_dma_configure(dev, dev->of_node, true);
-}
-
static const struct dev_pm_ops host1x_device_pm_ops = {
.suspend = pm_generic_suspend,
.resume = pm_generic_resume,
@@ -369,7 +364,6 @@ const struct bus_type host1x_bus_type = {
.name = "host1x",
.match = host1x_device_match,
.uevent = host1x_device_uevent,
- .dma_configure = host1x_dma_configure,
.pm = &host1x_device_pm_ops,
};
@@ -458,8 +452,6 @@ static int host1x_device_add(struct host1x *host1x,
device->dev.bus = &host1x_bus_type;
device->dev.parent = host1x->dev;
- of_dma_configure(&device->dev, host1x->dev->of_node, true);
-
device->dev.dma_parms = &device->dma_parms;
dma_set_max_seg_size(&device->dev, UINT_MAX);
diff --git a/drivers/greybus/bundle.c b/drivers/greybus/bundle.c
index 84660729538b9..a6e1cca06172f 100644
--- a/drivers/greybus/bundle.c
+++ b/drivers/greybus/bundle.c
@@ -166,7 +166,7 @@ static const struct dev_pm_ops gb_bundle_pm_ops = {
SET_RUNTIME_PM_OPS(gb_bundle_suspend, gb_bundle_resume, gb_bundle_idle)
};
-struct device_type greybus_bundle_type = {
+const struct device_type greybus_bundle_type = {
.name = "greybus_bundle",
.release = gb_bundle_release,
.pm = &gb_bundle_pm_ops,
diff --git a/drivers/greybus/control.c b/drivers/greybus/control.c
index 359a258419739..b5cf49d09df22 100644
--- a/drivers/greybus/control.c
+++ b/drivers/greybus/control.c
@@ -436,7 +436,7 @@ static void gb_control_release(struct device *dev)
kfree(control);
}
-struct device_type greybus_control_type = {
+const struct device_type greybus_control_type = {
.name = "greybus_control",
.release = gb_control_release,
};
diff --git a/drivers/greybus/core.c b/drivers/greybus/core.c
index 5714be7404707..95c09d4f3a869 100644
--- a/drivers/greybus/core.c
+++ b/drivers/greybus/core.c
@@ -27,6 +27,36 @@ int greybus_disabled(void)
}
EXPORT_SYMBOL_GPL(greybus_disabled);
+static int is_gb_host_device(const struct device *dev)
+{
+ return dev->type == &greybus_hd_type;
+}
+
+static int is_gb_module(const struct device *dev)
+{
+ return dev->type == &greybus_module_type;
+}
+
+static int is_gb_interface(const struct device *dev)
+{
+ return dev->type == &greybus_interface_type;
+}
+
+static int is_gb_control(const struct device *dev)
+{
+ return dev->type == &greybus_control_type;
+}
+
+static int is_gb_bundle(const struct device *dev)
+{
+ return dev->type == &greybus_bundle_type;
+}
+
+static int is_gb_svc(const struct device *dev)
+{
+ return dev->type == &greybus_svc_type;
+}
+
static bool greybus_match_one_id(struct gb_bundle *bundle,
const struct greybus_bundle_id *id)
{
@@ -155,7 +185,7 @@ static void greybus_shutdown(struct device *dev)
}
}
-struct bus_type greybus_bus_type = {
+const struct bus_type greybus_bus_type = {
.name = "greybus",
.match = greybus_match_device,
.uevent = greybus_uevent,
diff --git a/drivers/greybus/es2.c b/drivers/greybus/es2.c
index e89cca0150955..1ee78d0d90b4d 100644
--- a/drivers/greybus/es2.c
+++ b/drivers/greybus/es2.c
@@ -513,16 +513,16 @@ static int es2_cport_allocate(struct gb_host_device *hd, int cport_id,
if (cport_id < 0) {
ida_start = 0;
- ida_end = hd->num_cports;
+ ida_end = hd->num_cports - 1;
} else if (cport_id < hd->num_cports) {
ida_start = cport_id;
- ida_end = cport_id + 1;
+ ida_end = cport_id;
} else {
dev_err(&hd->dev, "cport %d not available\n", cport_id);
return -EINVAL;
}
- return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
+ return ida_alloc_range(id_map, ida_start, ida_end, GFP_KERNEL);
}
static void es2_cport_release(struct gb_host_device *hd, u16 cport_id)
@@ -535,7 +535,7 @@ static void es2_cport_release(struct gb_host_device *hd, u16 cport_id)
return;
}
- ida_simple_remove(&hd->cport_id_map, cport_id);
+ ida_free(&hd->cport_id_map, cport_id);
}
static int cport_enable(struct gb_host_device *hd, u16 cport_id,
diff --git a/drivers/greybus/gb-beagleplay.c b/drivers/greybus/gb-beagleplay.c
index c3e90025064bd..33f8fad70260a 100644
--- a/drivers/greybus/gb-beagleplay.c
+++ b/drivers/greybus/gb-beagleplay.c
@@ -271,7 +271,7 @@ static void hdlc_rx_frame(struct gb_beagleplay *bg)
}
}
-static ssize_t hdlc_rx(struct gb_beagleplay *bg, const u8 *data, size_t count)
+static size_t hdlc_rx(struct gb_beagleplay *bg, const u8 *data, size_t count)
{
size_t i;
u8 c;
@@ -331,8 +331,8 @@ static void hdlc_deinit(struct gb_beagleplay *bg)
flush_work(&bg->tx_work);
}
-static ssize_t gb_tty_receive(struct serdev_device *sd, const u8 *data,
- size_t count)
+static size_t gb_tty_receive(struct serdev_device *sd, const u8 *data,
+ size_t count)
{
struct gb_beagleplay *bg = serdev_device_get_drvdata(sd);
diff --git a/drivers/greybus/hd.c b/drivers/greybus/hd.c
index 72b21bf2d7d36..5de98d9177f15 100644
--- a/drivers/greybus/hd.c
+++ b/drivers/greybus/hd.c
@@ -50,7 +50,7 @@ int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id)
struct ida *id_map = &hd->cport_id_map;
int ret;
- ret = ida_simple_get(id_map, cport_id, cport_id + 1, GFP_KERNEL);
+ ret = ida_alloc_range(id_map, cport_id, cport_id, GFP_KERNEL);
if (ret < 0) {
dev_err(&hd->dev, "failed to reserve cport %u\n", cport_id);
return ret;
@@ -64,7 +64,7 @@ void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id)
{
struct ida *id_map = &hd->cport_id_map;
- ida_simple_remove(id_map, cport_id);
+ ida_free(id_map, cport_id);
}
EXPORT_SYMBOL_GPL(gb_hd_cport_release_reserved);
@@ -80,16 +80,16 @@ int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id,
if (cport_id < 0) {
ida_start = 0;
- ida_end = hd->num_cports;
+ ida_end = hd->num_cports - 1;
} else if (cport_id < hd->num_cports) {
ida_start = cport_id;
- ida_end = cport_id + 1;
+ ida_end = cport_id;
} else {
dev_err(&hd->dev, "cport %d not available\n", cport_id);
return -EINVAL;
}
- return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
+ return ida_alloc_range(id_map, ida_start, ida_end, GFP_KERNEL);
}
/* Locking: Caller guarantees serialisation */
@@ -100,7 +100,7 @@ void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id)
return;
}
- ida_simple_remove(&hd->cport_id_map, cport_id);
+ ida_free(&hd->cport_id_map, cport_id);
}
static void gb_hd_release(struct device *dev)
@@ -111,12 +111,12 @@ static void gb_hd_release(struct device *dev)
if (hd->svc)
gb_svc_put(hd->svc);
- ida_simple_remove(&gb_hd_bus_id_map, hd->bus_id);
+ ida_free(&gb_hd_bus_id_map, hd->bus_id);
ida_destroy(&hd->cport_id_map);
kfree(hd);
}
-struct device_type greybus_hd_type = {
+const struct device_type greybus_hd_type = {
.name = "greybus_host_device",
.release = gb_hd_release,
};
@@ -162,7 +162,7 @@ struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver,
if (!hd)
return ERR_PTR(-ENOMEM);
- ret = ida_simple_get(&gb_hd_bus_id_map, 1, 0, GFP_KERNEL);
+ ret = ida_alloc_min(&gb_hd_bus_id_map, 1, GFP_KERNEL);
if (ret < 0) {
kfree(hd);
return ERR_PTR(ret);
diff --git a/drivers/greybus/interface.c b/drivers/greybus/interface.c
index 9ec949a438ef6..fd58a86b0888d 100644
--- a/drivers/greybus/interface.c
+++ b/drivers/greybus/interface.c
@@ -131,9 +131,8 @@ static int gb_interface_route_create(struct gb_interface *intf)
int ret;
/* Allocate an interface device id. */
- ret = ida_simple_get(&svc->device_id_map,
- GB_SVC_DEVICE_ID_MIN, GB_SVC_DEVICE_ID_MAX + 1,
- GFP_KERNEL);
+ ret = ida_alloc_range(&svc->device_id_map, GB_SVC_DEVICE_ID_MIN,
+ GB_SVC_DEVICE_ID_MAX, GFP_KERNEL);
if (ret < 0) {
dev_err(&intf->dev, "failed to allocate device id: %d\n", ret);
return ret;
@@ -165,7 +164,7 @@ err_svc_id_free:
* XXX anymore.
*/
err_ida_remove:
- ida_simple_remove(&svc->device_id_map, device_id);
+ ida_free(&svc->device_id_map, device_id);
return ret;
}
@@ -178,7 +177,7 @@ static void gb_interface_route_destroy(struct gb_interface *intf)
return;
gb_svc_route_destroy(svc, svc->ap_intf_id, intf->interface_id);
- ida_simple_remove(&svc->device_id_map, intf->device_id);
+ ida_free(&svc->device_id_map, intf->device_id);
intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
}
@@ -765,7 +764,7 @@ static const struct dev_pm_ops gb_interface_pm_ops = {
gb_interface_runtime_idle)
};
-struct device_type greybus_interface_type = {
+const struct device_type greybus_interface_type = {
.name = "greybus_interface",
.release = gb_interface_release,
.pm = &gb_interface_pm_ops,
diff --git a/drivers/greybus/module.c b/drivers/greybus/module.c
index 36f77f9e1d743..7f7153a1dd602 100644
--- a/drivers/greybus/module.c
+++ b/drivers/greybus/module.c
@@ -81,7 +81,7 @@ static void gb_module_release(struct device *dev)
kfree(module);
}
-struct device_type greybus_module_type = {
+const struct device_type greybus_module_type = {
.name = "greybus_module",
.release = gb_module_release,
};
diff --git a/drivers/greybus/svc.c b/drivers/greybus/svc.c
index 0d7e749174a48..4256467fcd359 100644
--- a/drivers/greybus/svc.c
+++ b/drivers/greybus/svc.c
@@ -1305,7 +1305,7 @@ static void gb_svc_release(struct device *dev)
kfree(svc);
}
-struct device_type greybus_svc_type = {
+const struct device_type greybus_svc_type = {
.name = "greybus_svc",
.release = gb_svc_release,
};
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index da6a7abd584f7..10926359e6d21 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -114,9 +114,10 @@ enum {
* @netdev: Phonet network device
* @txqueue: TX data queue
* @cmdqueue: Queue of free commands
+ * @work: &struct work_struct for scheduled work
* @cl: HSI client own reference
* @link: Link for ssip_list
- * @tx_usecount: Refcount to keep track the slaves that use the wake line
+ * @tx_usecnt: Refcount to keep track the slaves that use the wake line
* @channel_id_cmd: HSI channel id for command stream
* @channel_id_data: HSI channel id for data stream
*/
diff --git a/drivers/hsi/hsi_core.c b/drivers/hsi/hsi_core.c
index acbf82f755a8e..e3beeac8aee57 100644
--- a/drivers/hsi/hsi_core.c
+++ b/drivers/hsi/hsi_core.c
@@ -48,7 +48,7 @@ static int hsi_bus_match(struct device *dev, struct device_driver *driver)
return false;
}
-static struct bus_type hsi_bus_type = {
+static const struct bus_type hsi_bus_type = {
.name = "hsi",
.dev_groups = hsi_bus_dev_groups,
.match = hsi_bus_match,
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index 00242107d62e0..862c47b191afe 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -16,6 +16,7 @@ config HYPERV
config HYPERV_VTL_MODE
bool "Enable Linux to boot in VTL context"
depends on X86_64 && HYPERV
+ depends on SMP
default n
help
Virtual Secure Mode (VSM) is a set of hypervisor capabilities and
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index adbf674355b2b..fb8cd8469328e 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -153,7 +153,9 @@ void vmbus_free_ring(struct vmbus_channel *channel)
hv_ringbuffer_cleanup(&channel->inbound);
if (channel->ringbuffer_page) {
- __free_pages(channel->ringbuffer_page,
+ /* In a CoCo VM leak the memory if it didn't get re-encrypted */
+ if (!channel->ringbuffer_gpadlhandle.decrypted)
+ __free_pages(channel->ringbuffer_page,
get_order(channel->ringbuffer_pagecount
<< PAGE_SHIFT));
channel->ringbuffer_page = NULL;
@@ -436,9 +438,18 @@ static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
(atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
ret = create_gpadl_header(type, kbuffer, size, send_offset, &msginfo);
- if (ret)
+ if (ret) {
+ gpadl->decrypted = false;
return ret;
+ }
+ /*
+ * Set the "decrypted" flag to true for the set_memory_decrypted()
+ * success case. In the failure case, the encryption state of the
+ * memory is unknown. Leave "decrypted" as true to ensure the
+ * memory will be leaked instead of going back on the free list.
+ */
+ gpadl->decrypted = true;
ret = set_memory_decrypted((unsigned long)kbuffer,
PFN_UP(size));
if (ret) {
@@ -527,9 +538,15 @@ cleanup:
kfree(msginfo);
- if (ret)
- set_memory_encrypted((unsigned long)kbuffer,
- PFN_UP(size));
+ if (ret) {
+ /*
+ * If set_memory_encrypted() fails, the decrypted flag is
+ * left as true so the memory is leaked instead of being
+ * put back on the free list.
+ */
+ if (!set_memory_encrypted((unsigned long)kbuffer, PFN_UP(size)))
+ gpadl->decrypted = false;
+ }
return ret;
}
@@ -850,6 +867,8 @@ post_msg_err:
if (ret)
pr_warn("Fail to set mem host visibility in GPADL teardown %d.\n", ret);
+ gpadl->decrypted = ret;
+
return ret;
}
EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 3cabeeabb1cac..f001ae880e1db 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -237,8 +237,17 @@ int vmbus_connect(void)
vmbus_connection.monitor_pages[0], 1);
ret |= set_memory_decrypted((unsigned long)
vmbus_connection.monitor_pages[1], 1);
- if (ret)
+ if (ret) {
+ /*
+ * If set_memory_decrypted() fails, the encryption state
+ * of the memory is unknown. So leak the memory instead
+ * of risking returning decrypted memory to the free list.
+ * For simplicity, always handle both pages the same.
+ */
+ vmbus_connection.monitor_pages[0] = NULL;
+ vmbus_connection.monitor_pages[1] = NULL;
goto cleanup;
+ }
/*
* Set_memory_decrypted() will change the memory contents if
@@ -337,13 +346,19 @@ void vmbus_disconnect(void)
vmbus_connection.int_page = NULL;
}
- set_memory_encrypted((unsigned long)vmbus_connection.monitor_pages[0], 1);
- set_memory_encrypted((unsigned long)vmbus_connection.monitor_pages[1], 1);
+ if (vmbus_connection.monitor_pages[0]) {
+ if (!set_memory_encrypted(
+ (unsigned long)vmbus_connection.monitor_pages[0], 1))
+ hv_free_hyperv_page(vmbus_connection.monitor_pages[0]);
+ vmbus_connection.monitor_pages[0] = NULL;
+ }
- hv_free_hyperv_page(vmbus_connection.monitor_pages[0]);
- hv_free_hyperv_page(vmbus_connection.monitor_pages[1]);
- vmbus_connection.monitor_pages[0] = NULL;
- vmbus_connection.monitor_pages[1] = NULL;
+ if (vmbus_connection.monitor_pages[1]) {
+ if (!set_memory_encrypted(
+ (unsigned long)vmbus_connection.monitor_pages[1], 1))
+ hv_free_hyperv_page(vmbus_connection.monitor_pages[1]);
+ vmbus_connection.monitor_pages[1] = NULL;
+ }
}
/*
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 51e5018ac9b26..a8ad728354cb0 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -270,7 +270,7 @@ void hv_synic_enable_regs(unsigned int cpu)
union hv_synic_scontrol sctrl;
/* Setup the Synic's message page */
- simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
+ simp.as_uint64 = hv_get_msr(HV_MSR_SIMP);
simp.simp_enabled = 1;
if (ms_hyperv.paravisor_present || hv_root_partition) {
@@ -286,10 +286,10 @@ void hv_synic_enable_regs(unsigned int cpu)
>> HV_HYP_PAGE_SHIFT;
}
- hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
+ hv_set_msr(HV_MSR_SIMP, simp.as_uint64);
/* Setup the Synic's event page */
- siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
+ siefp.as_uint64 = hv_get_msr(HV_MSR_SIEFP);
siefp.siefp_enabled = 1;
if (ms_hyperv.paravisor_present || hv_root_partition) {
@@ -305,13 +305,12 @@ void hv_synic_enable_regs(unsigned int cpu)
>> HV_HYP_PAGE_SHIFT;
}
- hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
+ hv_set_msr(HV_MSR_SIEFP, siefp.as_uint64);
/* Setup the shared SINT. */
if (vmbus_irq != -1)
enable_percpu_irq(vmbus_irq, 0);
- shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 +
- VMBUS_MESSAGE_SINT);
+ shared_sint.as_uint64 = hv_get_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT);
shared_sint.vector = vmbus_interrupt;
shared_sint.masked = false;
@@ -326,14 +325,13 @@ void hv_synic_enable_regs(unsigned int cpu)
#else
shared_sint.auto_eoi = 0;
#endif
- hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT,
- shared_sint.as_uint64);
+ hv_set_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
/* Enable the global synic bit */
- sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL);
+ sctrl.as_uint64 = hv_get_msr(HV_MSR_SCONTROL);
sctrl.enable = 1;
- hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64);
+ hv_set_msr(HV_MSR_SCONTROL, sctrl.as_uint64);
}
int hv_synic_init(unsigned int cpu)
@@ -357,17 +355,15 @@ void hv_synic_disable_regs(unsigned int cpu)
union hv_synic_siefp siefp;
union hv_synic_scontrol sctrl;
- shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 +
- VMBUS_MESSAGE_SINT);
+ shared_sint.as_uint64 = hv_get_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT);
shared_sint.masked = 1;
/* Need to correctly cleanup in the case of SMP!!! */
/* Disable the interrupt */
- hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT,
- shared_sint.as_uint64);
+ hv_set_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
- simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
+ simp.as_uint64 = hv_get_msr(HV_MSR_SIMP);
/*
* In Isolation VM, sim and sief pages are allocated by
* paravisor. These pages also will be used by kdump
@@ -382,9 +378,9 @@ void hv_synic_disable_regs(unsigned int cpu)
simp.base_simp_gpa = 0;
}
- hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
+ hv_set_msr(HV_MSR_SIMP, simp.as_uint64);
- siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
+ siefp.as_uint64 = hv_get_msr(HV_MSR_SIEFP);
siefp.siefp_enabled = 0;
if (ms_hyperv.paravisor_present || hv_root_partition) {
@@ -394,12 +390,12 @@ void hv_synic_disable_regs(unsigned int cpu)
siefp.base_siefp_gpa = 0;
}
- hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
+ hv_set_msr(HV_MSR_SIEFP, siefp.as_uint64);
/* Disable the global synic bit */
- sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL);
+ sctrl.as_uint64 = hv_get_msr(HV_MSR_SCONTROL);
sctrl.enable = 0;
- hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64);
+ hv_set_msr(HV_MSR_SCONTROL, sctrl.as_uint64);
if (vmbus_irq != -1)
disable_percpu_irq(vmbus_irq);
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index 0285a74363b3d..dde3f9b6871af 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -20,8 +20,11 @@
#include <linux/sched/task_stack.h>
#include <linux/panic_notifier.h>
#include <linux/ptrace.h>
+#include <linux/random.h>
+#include <linux/efi.h>
#include <linux/kdebug.h>
#include <linux/kmsg_dump.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/dma-map-ops.h>
#include <linux/set_memory.h>
@@ -227,19 +230,19 @@ static void hv_kmsg_dump(struct kmsg_dumper *dumper,
* contain the size of the panic data in that page. Rest of the
* registers are no-op when the NOTIFY_MSG flag is set.
*/
- hv_set_register(HV_REGISTER_CRASH_P0, 0);
- hv_set_register(HV_REGISTER_CRASH_P1, 0);
- hv_set_register(HV_REGISTER_CRASH_P2, 0);
- hv_set_register(HV_REGISTER_CRASH_P3, virt_to_phys(hv_panic_page));
- hv_set_register(HV_REGISTER_CRASH_P4, bytes_written);
+ hv_set_msr(HV_MSR_CRASH_P0, 0);
+ hv_set_msr(HV_MSR_CRASH_P1, 0);
+ hv_set_msr(HV_MSR_CRASH_P2, 0);
+ hv_set_msr(HV_MSR_CRASH_P3, virt_to_phys(hv_panic_page));
+ hv_set_msr(HV_MSR_CRASH_P4, bytes_written);
/*
* Let Hyper-V know there is crash data available along with
* the panic message.
*/
- hv_set_register(HV_REGISTER_CRASH_CTL,
- (HV_CRASH_CTL_CRASH_NOTIFY |
- HV_CRASH_CTL_CRASH_NOTIFY_MSG));
+ hv_set_msr(HV_MSR_CRASH_CTL,
+ (HV_CRASH_CTL_CRASH_NOTIFY |
+ HV_CRASH_CTL_CRASH_NOTIFY_MSG));
}
static struct kmsg_dumper hv_kmsg_dumper = {
@@ -278,6 +281,14 @@ static void hv_kmsg_dump_register(void)
int __init hv_common_init(void)
{
int i;
+ union hv_hypervisor_version_info version;
+
+ /* Get information about the Hyper-V host version */
+ if (!hv_get_hypervisor_version(&version))
+ pr_info("Hyper-V: Host Build %d.%d.%d.%d-%d-%d\n",
+ version.major_version, version.minor_version,
+ version.build_number, version.service_number,
+ version.service_pack, version.service_branch);
if (hv_is_isolation_supported())
sysctl_record_panic_msg = 0;
@@ -310,7 +321,7 @@ int __init hv_common_init(void)
* Register for panic kmsg callback only if the right
* capability is supported by the hypervisor.
*/
- hyperv_crash_ctl = hv_get_register(HV_REGISTER_CRASH_CTL);
+ hyperv_crash_ctl = hv_get_msr(HV_MSR_CRASH_CTL);
if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG)
hv_kmsg_dump_register();
@@ -347,6 +358,72 @@ int __init hv_common_init(void)
return 0;
}
+void __init ms_hyperv_late_init(void)
+{
+ struct acpi_table_header *header;
+ acpi_status status;
+ u8 *randomdata;
+ u32 length, i;
+
+ /*
+ * Seed the Linux random number generator with entropy provided by
+ * the Hyper-V host in ACPI table OEM0.
+ */
+ if (!IS_ENABLED(CONFIG_ACPI))
+ return;
+
+ status = acpi_get_table("OEM0", 0, &header);
+ if (ACPI_FAILURE(status) || !header)
+ return;
+
+ /*
+ * Since the "OEM0" table name is for OEM specific usage, verify
+ * that what we're seeing purports to be from Microsoft.
+ */
+ if (strncmp(header->oem_table_id, "MICROSFT", 8))
+ goto error;
+
+ /*
+ * Ensure the length is reasonable. Requiring at least 8 bytes and
+ * no more than 4K bytes is somewhat arbitrary and just protects
+ * against a malformed table. Hyper-V currently provides 64 bytes,
+ * but allow for a change in a later version.
+ */
+ if (header->length < sizeof(*header) + 8 ||
+ header->length > sizeof(*header) + SZ_4K)
+ goto error;
+
+ length = header->length - sizeof(*header);
+ randomdata = (u8 *)(header + 1);
+
+ pr_debug("Hyper-V: Seeding rng with %d random bytes from ACPI table OEM0\n",
+ length);
+
+ add_bootloader_randomness(randomdata, length);
+
+ /*
+ * To prevent the seed data from being visible in /sys/firmware/acpi,
+ * zero out the random data in the ACPI table and fixup the checksum.
+ * The zero'ing is done out of an abundance of caution in avoiding
+ * potential security risks to the rng. Similarly, reset the table
+ * length to just the header size so that a subsequent kexec doesn't
+ * try to use the zero'ed out random data.
+ */
+ for (i = 0; i < length; i++) {
+ header->checksum += randomdata[i];
+ randomdata[i] = 0;
+ }
+
+ for (i = 0; i < sizeof(header->length); i++)
+ header->checksum += ((u8 *)&header->length)[i];
+ header->length = sizeof(*header);
+ for (i = 0; i < sizeof(header->length); i++)
+ header->checksum -= ((u8 *)&header->length)[i];
+
+error:
+ acpi_put_table(header);
+}
+
/*
* Hyper-V specific initialization and die code for
* individual CPUs that is common across all architectures.
@@ -409,7 +486,7 @@ int hv_common_cpu_init(unsigned int cpu)
*inputarg = mem;
}
- msr_vp_index = hv_get_register(HV_REGISTER_VP_INDEX);
+ msr_vp_index = hv_get_msr(HV_MSR_VP_INDEX);
hv_vp_index[cpu] = msr_vp_index;
@@ -506,7 +583,7 @@ EXPORT_SYMBOL_GPL(hv_is_hibernation_supported);
*/
static u64 __hv_read_ref_counter(void)
{
- return hv_get_register(HV_REGISTER_TIME_REF_COUNT);
+ return hv_get_msr(HV_MSR_TIME_REF_COUNT);
}
u64 (*hv_read_reference_counter)(void) = __hv_read_ref_counter;
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 7f7965f3d1878..12a707ab73f85 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -131,7 +131,7 @@ static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
+ return sysfs_emit(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
}
static DEVICE_ATTR_RO(id);
@@ -142,7 +142,7 @@ static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n", hv_dev->channel->state);
+ return sysfs_emit(buf, "%d\n", hv_dev->channel->state);
}
static DEVICE_ATTR_RO(state);
@@ -153,7 +153,7 @@ static ssize_t monitor_id_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
+ return sysfs_emit(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
}
static DEVICE_ATTR_RO(monitor_id);
@@ -164,8 +164,8 @@ static ssize_t class_id_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "{%pUl}\n",
- &hv_dev->channel->offermsg.offer.if_type);
+ return sysfs_emit(buf, "{%pUl}\n",
+ &hv_dev->channel->offermsg.offer.if_type);
}
static DEVICE_ATTR_RO(class_id);
@@ -176,8 +176,8 @@ static ssize_t device_id_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "{%pUl}\n",
- &hv_dev->channel->offermsg.offer.if_instance);
+ return sysfs_emit(buf, "{%pUl}\n",
+ &hv_dev->channel->offermsg.offer.if_instance);
}
static DEVICE_ATTR_RO(device_id);
@@ -186,7 +186,7 @@ static ssize_t modalias_show(struct device *dev,
{
struct hv_device *hv_dev = device_to_hv_device(dev);
- return sprintf(buf, "vmbus:%*phN\n", UUID_SIZE, &hv_dev->dev_type);
+ return sysfs_emit(buf, "vmbus:%*phN\n", UUID_SIZE, &hv_dev->dev_type);
}
static DEVICE_ATTR_RO(modalias);
@@ -199,7 +199,7 @@ static ssize_t numa_node_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n", cpu_to_node(hv_dev->channel->target_cpu));
+ return sysfs_emit(buf, "%d\n", cpu_to_node(hv_dev->channel->target_cpu));
}
static DEVICE_ATTR_RO(numa_node);
#endif
@@ -212,9 +212,8 @@ static ssize_t server_monitor_pending_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n",
- channel_pending(hv_dev->channel,
- vmbus_connection.monitor_pages[0]));
+ return sysfs_emit(buf, "%d\n", channel_pending(hv_dev->channel,
+ vmbus_connection.monitor_pages[0]));
}
static DEVICE_ATTR_RO(server_monitor_pending);
@@ -226,9 +225,8 @@ static ssize_t client_monitor_pending_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n",
- channel_pending(hv_dev->channel,
- vmbus_connection.monitor_pages[1]));
+ return sysfs_emit(buf, "%d\n", channel_pending(hv_dev->channel,
+ vmbus_connection.monitor_pages[1]));
}
static DEVICE_ATTR_RO(client_monitor_pending);
@@ -240,9 +238,8 @@ static ssize_t server_monitor_latency_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n",
- channel_latency(hv_dev->channel,
- vmbus_connection.monitor_pages[0]));
+ return sysfs_emit(buf, "%d\n", channel_latency(hv_dev->channel,
+ vmbus_connection.monitor_pages[0]));
}
static DEVICE_ATTR_RO(server_monitor_latency);
@@ -254,9 +251,8 @@ static ssize_t client_monitor_latency_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n",
- channel_latency(hv_dev->channel,
- vmbus_connection.monitor_pages[1]));
+ return sysfs_emit(buf, "%d\n", channel_latency(hv_dev->channel,
+ vmbus_connection.monitor_pages[1]));
}
static DEVICE_ATTR_RO(client_monitor_latency);
@@ -268,9 +264,8 @@ static ssize_t server_monitor_conn_id_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n",
- channel_conn_id(hv_dev->channel,
- vmbus_connection.monitor_pages[0]));
+ return sysfs_emit(buf, "%d\n", channel_conn_id(hv_dev->channel,
+ vmbus_connection.monitor_pages[0]));
}
static DEVICE_ATTR_RO(server_monitor_conn_id);
@@ -282,9 +277,8 @@ static ssize_t client_monitor_conn_id_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n",
- channel_conn_id(hv_dev->channel,
- vmbus_connection.monitor_pages[1]));
+ return sysfs_emit(buf, "%d\n", channel_conn_id(hv_dev->channel,
+ vmbus_connection.monitor_pages[1]));
}
static DEVICE_ATTR_RO(client_monitor_conn_id);
@@ -303,7 +297,7 @@ static ssize_t out_intr_mask_show(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
+ return sysfs_emit(buf, "%d\n", outbound.current_interrupt_mask);
}
static DEVICE_ATTR_RO(out_intr_mask);
@@ -321,7 +315,7 @@ static ssize_t out_read_index_show(struct device *dev,
&outbound);
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", outbound.current_read_index);
+ return sysfs_emit(buf, "%d\n", outbound.current_read_index);
}
static DEVICE_ATTR_RO(out_read_index);
@@ -340,7 +334,7 @@ static ssize_t out_write_index_show(struct device *dev,
&outbound);
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", outbound.current_write_index);
+ return sysfs_emit(buf, "%d\n", outbound.current_write_index);
}
static DEVICE_ATTR_RO(out_write_index);
@@ -359,7 +353,7 @@ static ssize_t out_read_bytes_avail_show(struct device *dev,
&outbound);
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
+ return sysfs_emit(buf, "%d\n", outbound.bytes_avail_toread);
}
static DEVICE_ATTR_RO(out_read_bytes_avail);
@@ -378,7 +372,7 @@ static ssize_t out_write_bytes_avail_show(struct device *dev,
&outbound);
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
+ return sysfs_emit(buf, "%d\n", outbound.bytes_avail_towrite);
}
static DEVICE_ATTR_RO(out_write_bytes_avail);
@@ -396,7 +390,7 @@ static ssize_t in_intr_mask_show(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
+ return sysfs_emit(buf, "%d\n", inbound.current_interrupt_mask);
}
static DEVICE_ATTR_RO(in_intr_mask);
@@ -414,7 +408,7 @@ static ssize_t in_read_index_show(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", inbound.current_read_index);
+ return sysfs_emit(buf, "%d\n", inbound.current_read_index);
}
static DEVICE_ATTR_RO(in_read_index);
@@ -432,7 +426,7 @@ static ssize_t in_write_index_show(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", inbound.current_write_index);
+ return sysfs_emit(buf, "%d\n", inbound.current_write_index);
}
static DEVICE_ATTR_RO(in_write_index);
@@ -451,7 +445,7 @@ static ssize_t in_read_bytes_avail_show(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
+ return sysfs_emit(buf, "%d\n", inbound.bytes_avail_toread);
}
static DEVICE_ATTR_RO(in_read_bytes_avail);
@@ -470,7 +464,7 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
+ return sysfs_emit(buf, "%d\n", inbound.bytes_avail_towrite);
}
static DEVICE_ATTR_RO(in_write_bytes_avail);
@@ -480,7 +474,7 @@ static ssize_t channel_vp_mapping_show(struct device *dev,
{
struct hv_device *hv_dev = device_to_hv_device(dev);
struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
- int buf_size = PAGE_SIZE, n_written, tot_written;
+ int n_written;
struct list_head *cur;
if (!channel)
@@ -488,25 +482,21 @@ static ssize_t channel_vp_mapping_show(struct device *dev,
mutex_lock(&vmbus_connection.channel_mutex);
- tot_written = snprintf(buf, buf_size, "%u:%u\n",
- channel->offermsg.child_relid, channel->target_cpu);
+ n_written = sysfs_emit(buf, "%u:%u\n",
+ channel->offermsg.child_relid,
+ channel->target_cpu);
list_for_each(cur, &channel->sc_list) {
- if (tot_written >= buf_size - 1)
- break;
cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
- n_written = scnprintf(buf + tot_written,
- buf_size - tot_written,
- "%u:%u\n",
- cur_sc->offermsg.child_relid,
- cur_sc->target_cpu);
- tot_written += n_written;
+ n_written += sysfs_emit_at(buf, n_written, "%u:%u\n",
+ cur_sc->offermsg.child_relid,
+ cur_sc->target_cpu);
}
mutex_unlock(&vmbus_connection.channel_mutex);
- return tot_written;
+ return n_written;
}
static DEVICE_ATTR_RO(channel_vp_mapping);
@@ -516,7 +506,7 @@ static ssize_t vendor_show(struct device *dev,
{
struct hv_device *hv_dev = device_to_hv_device(dev);
- return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
+ return sysfs_emit(buf, "0x%x\n", hv_dev->vendor_id);
}
static DEVICE_ATTR_RO(vendor);
@@ -526,7 +516,7 @@ static ssize_t device_show(struct device *dev,
{
struct hv_device *hv_dev = device_to_hv_device(dev);
- return sprintf(buf, "0x%x\n", hv_dev->device_id);
+ return sysfs_emit(buf, "0x%x\n", hv_dev->device_id);
}
static DEVICE_ATTR_RO(device);
@@ -551,7 +541,7 @@ static ssize_t driver_override_show(struct device *dev,
ssize_t len;
device_lock(dev);
- len = snprintf(buf, PAGE_SIZE, "%s\n", hv_dev->driver_override);
+ len = sysfs_emit(buf, "%s\n", hv_dev->driver_override);
device_unlock(dev);
return len;
@@ -2359,10 +2349,9 @@ static int vmbus_platform_driver_probe(struct platform_device *pdev)
return vmbus_acpi_add(pdev);
}
-static int vmbus_platform_driver_remove(struct platform_device *pdev)
+static void vmbus_platform_driver_remove(struct platform_device *pdev)
{
vmbus_mmio_remove();
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -2542,7 +2531,7 @@ static const struct dev_pm_ops vmbus_bus_pm = {
static struct platform_driver vmbus_platform_driver = {
.probe = vmbus_platform_driver_probe,
- .remove = vmbus_platform_driver_remove,
+ .remove_new = vmbus_platform_driver_remove,
.driver = {
.name = "vmbus",
.acpi_match_table = ACPI_PTR(vmbus_acpi_device_ids),
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 4fa837e65a617..efcf78673e747 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -108,7 +108,7 @@ struct dell_smm_cooling_data {
struct dell_smm_data *data;
};
-MODULE_AUTHOR("Massimo Dal Zotto (dz@debian.org)");
+MODULE_AUTHOR("Massimo Dal Zotto <dz@debian.org>");
MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
MODULE_DESCRIPTION("Dell laptop SMM BIOS hwmon driver");
MODULE_LICENSE("GPL");
@@ -1600,6 +1600,7 @@ static struct wmi_driver dell_smm_wmi_driver = {
},
.id_table = dell_smm_wmi_id_table,
.probe = dell_smm_wmi_probe,
+ .no_singleton = true,
};
/*
diff --git a/drivers/hwmon/ultra45_env.c b/drivers/hwmon/ultra45_env.c
index 9823afb0675a0..2765d5f1b7f05 100644
--- a/drivers/hwmon/ultra45_env.c
+++ b/drivers/hwmon/ultra45_env.c
@@ -18,7 +18,7 @@
#define DRV_MODULE_VERSION "0.1"
-MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
MODULE_DESCRIPTION("Ultra45 environmental monitor driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
diff --git a/drivers/hwspinlock/omap_hwspinlock.c b/drivers/hwspinlock/omap_hwspinlock.c
index a9fd9ca45f2a8..27b47b8623c09 100644
--- a/drivers/hwspinlock/omap_hwspinlock.c
+++ b/drivers/hwspinlock/omap_hwspinlock.c
@@ -74,17 +74,12 @@ static const struct hwspinlock_ops omap_hwspinlock_ops = {
static int omap_hwspinlock_probe(struct platform_device *pdev)
{
- struct device_node *node = pdev->dev.of_node;
struct hwspinlock_device *bank;
- struct hwspinlock *hwlock;
void __iomem *io_base;
int num_locks, i, ret;
/* Only a single hwspinlock block device is supported */
int base_id = 0;
- if (!node)
- return -ENODEV;
-
io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(io_base))
return PTR_ERR(io_base);
@@ -93,10 +88,10 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
* make sure the module is enabled and clocked before reading
* the module SYSSTATUS register
*/
- pm_runtime_enable(&pdev->dev);
+ devm_pm_runtime_enable(&pdev->dev);
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0)
- goto runtime_err;
+ return ret;
/* Determine number of locks */
i = readl(io_base + SYSSTATUS_OFFSET);
@@ -108,55 +103,24 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
*/
ret = pm_runtime_put(&pdev->dev);
if (ret < 0)
- goto runtime_err;
+ return ret;
/* one of the four lsb's must be set, and nothing else */
- if (hweight_long(i & 0xf) != 1 || i > 8) {
- ret = -EINVAL;
- goto runtime_err;
- }
+ if (hweight_long(i & 0xf) != 1 || i > 8)
+ return -EINVAL;
num_locks = i * 32; /* actual number of locks in this device */
bank = devm_kzalloc(&pdev->dev, struct_size(bank, lock, num_locks),
GFP_KERNEL);
- if (!bank) {
- ret = -ENOMEM;
- goto runtime_err;
- }
-
- platform_set_drvdata(pdev, bank);
+ if (!bank)
+ return -ENOMEM;
- for (i = 0, hwlock = &bank->lock[0]; i < num_locks; i++, hwlock++)
- hwlock->priv = io_base + LOCK_BASE_OFFSET + sizeof(u32) * i;
+ for (i = 0; i < num_locks; i++)
+ bank->lock[i].priv = io_base + LOCK_BASE_OFFSET + sizeof(u32) * i;
- ret = hwspin_lock_register(bank, &pdev->dev, &omap_hwspinlock_ops,
+ return devm_hwspin_lock_register(&pdev->dev, bank, &omap_hwspinlock_ops,
base_id, num_locks);
- if (ret)
- goto runtime_err;
-
- dev_dbg(&pdev->dev, "Registered %d locks with HwSpinlock core\n",
- num_locks);
-
- return 0;
-
-runtime_err:
- pm_runtime_disable(&pdev->dev);
- return ret;
-}
-
-static void omap_hwspinlock_remove(struct platform_device *pdev)
-{
- struct hwspinlock_device *bank = platform_get_drvdata(pdev);
- int ret;
-
- ret = hwspin_lock_unregister(bank);
- if (ret) {
- dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
- return;
- }
-
- pm_runtime_disable(&pdev->dev);
}
static const struct of_device_id omap_hwspinlock_of_match[] = {
@@ -169,7 +133,6 @@ MODULE_DEVICE_TABLE(of, omap_hwspinlock_of_match);
static struct platform_driver omap_hwspinlock_driver = {
.probe = omap_hwspinlock_probe,
- .remove_new = omap_hwspinlock_remove,
.driver = {
.name = "omap_hwspinlock",
.of_match_table = omap_hwspinlock_of_match,
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 995d3b2c76df2..4ba478211b318 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -2,6 +2,26 @@
#
# Makefile for CoreSight drivers.
#
+
+# Current W=1 warnings
+subdir-ccflags-y += -Wextra -Wunused -Wno-unused-parameter
+subdir-ccflags-y += -Wmissing-declarations
+subdir-ccflags-y += -Wmissing-format-attribute
+subdir-ccflags-y += -Wmissing-prototypes
+subdir-ccflags-y += -Wold-style-definition
+subdir-ccflags-y += -Wmissing-include-dirs
+subdir-ccflags-y += -Wno-sign-compare
+condflags := \
+ $(call cc-option, -Wrestrict) \
+ $(call cc-option, -Wunused-but-set-variable) \
+ $(call cc-option, -Wunused-const-variable) \
+ $(call cc-option, -Wpacked-not-aligned) \
+ $(call cc-option, -Wformat-overflow) \
+ $(call cc-option, -Wformat-truncation) \
+ $(call cc-option, -Wstringop-overflow) \
+ $(call cc-option, -Wstringop-truncation)
+subdir-ccflags-y += $(condflags)
+
obj-$(CONFIG_CORESIGHT) += coresight.o
coresight-y := coresight-core.o coresight-etm-perf.o coresight-platform.o \
coresight-sysfs.o coresight-syscfg.o coresight-config.o \
diff --git a/drivers/hwtracing/coresight/coresight-cfg-afdo.c b/drivers/hwtracing/coresight/coresight-cfg-afdo.c
index 84b31184252bb..e794f2e145fad 100644
--- a/drivers/hwtracing/coresight/coresight-cfg-afdo.c
+++ b/drivers/hwtracing/coresight/coresight-cfg-afdo.c
@@ -9,6 +9,7 @@
/* ETMv4 includes and features */
#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X)
#include "coresight-etm4x-cfg.h"
+#include "coresight-cfg-preload.h"
/* preload configurations and features */
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index d7f0e231feb99..b83613e342891 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -9,7 +9,6 @@
#include <linux/types.h>
#include <linux/device.h>
#include <linux/io.h>
-#include <linux/idr.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/slab.h>
@@ -25,15 +24,12 @@
#include "coresight-priv.h"
#include "coresight-syscfg.h"
-static DEFINE_MUTEX(coresight_mutex);
-static DEFINE_PER_CPU(struct coresight_device *, csdev_sink);
-
/*
- * Use IDR to map the hash of the source's device name
- * to the pointer of path for the source. The idr is for
- * the sources which aren't associated with CPU.
+ * Mutex used to lock all sysfs enable and disable actions and loading and
+ * unloading devices by the Coresight core.
*/
-static DEFINE_IDR(path_idr);
+DEFINE_MUTEX(coresight_mutex);
+static DEFINE_PER_CPU(struct coresight_device *, csdev_sink);
/**
* struct coresight_node - elements of a path, from source to sink
@@ -46,12 +42,6 @@ struct coresight_node {
};
/*
- * When operating Coresight drivers from the sysFS interface, only a single
- * path can exist from a tracer (associated to a CPU) to a sink.
- */
-static DEFINE_PER_CPU(struct list_head *, tracer_path);
-
-/*
* When losing synchronisation a new barrier packet needs to be inserted at the
* beginning of the data collected in a buffer. That way the decoder knows that
* it needs to look for another sync sequence.
@@ -61,34 +51,6 @@ EXPORT_SYMBOL_GPL(coresight_barrier_pkt);
static const struct cti_assoc_op *cti_assoc_ops;
-ssize_t coresight_simple_show_pair(struct device *_dev,
- struct device_attribute *attr, char *buf)
-{
- struct coresight_device *csdev = container_of(_dev, struct coresight_device, dev);
- struct cs_pair_attribute *cs_attr = container_of(attr, struct cs_pair_attribute, attr);
- u64 val;
-
- pm_runtime_get_sync(_dev->parent);
- val = csdev_access_relaxed_read_pair(&csdev->access, cs_attr->lo_off, cs_attr->hi_off);
- pm_runtime_put_sync(_dev->parent);
- return sysfs_emit(buf, "0x%llx\n", val);
-}
-EXPORT_SYMBOL_GPL(coresight_simple_show_pair);
-
-ssize_t coresight_simple_show32(struct device *_dev,
- struct device_attribute *attr, char *buf)
-{
- struct coresight_device *csdev = container_of(_dev, struct coresight_device, dev);
- struct cs_off_attribute *cs_attr = container_of(attr, struct cs_off_attribute, attr);
- u64 val;
-
- pm_runtime_get_sync(_dev->parent);
- val = csdev_access_relaxed_read32(&csdev->access, cs_attr->off);
- pm_runtime_put_sync(_dev->parent);
- return sysfs_emit(buf, "0x%llx\n", val);
-}
-EXPORT_SYMBOL_GPL(coresight_simple_show32);
-
void coresight_set_cti_ops(const struct cti_assoc_op *cti_op)
{
cti_assoc_ops = cti_op;
@@ -279,42 +241,18 @@ EXPORT_SYMBOL_GPL(coresight_add_helper);
static int coresight_enable_sink(struct coresight_device *csdev,
enum cs_mode mode, void *data)
{
- int ret;
-
- /*
- * We need to make sure the "new" session is compatible with the
- * existing "mode" of operation.
- */
- if (!sink_ops(csdev)->enable)
- return -EINVAL;
-
- ret = sink_ops(csdev)->enable(csdev, mode, data);
- if (ret)
- return ret;
-
- csdev->enable = true;
-
- return 0;
+ return sink_ops(csdev)->enable(csdev, mode, data);
}
static void coresight_disable_sink(struct coresight_device *csdev)
{
- int ret;
-
- if (!sink_ops(csdev)->disable)
- return;
-
- ret = sink_ops(csdev)->disable(csdev);
- if (ret)
- return;
- csdev->enable = false;
+ sink_ops(csdev)->disable(csdev);
}
static int coresight_enable_link(struct coresight_device *csdev,
struct coresight_device *parent,
struct coresight_device *child)
{
- int ret = 0;
int link_subtype;
struct coresight_connection *inconn, *outconn;
@@ -330,21 +268,13 @@ static int coresight_enable_link(struct coresight_device *csdev,
if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT && IS_ERR(outconn))
return PTR_ERR(outconn);
- if (link_ops(csdev)->enable) {
- ret = link_ops(csdev)->enable(csdev, inconn, outconn);
- if (!ret)
- csdev->enable = true;
- }
-
- return ret;
+ return link_ops(csdev)->enable(csdev, inconn, outconn);
}
static void coresight_disable_link(struct coresight_device *csdev,
struct coresight_device *parent,
struct coresight_device *child)
{
- int i;
- int link_subtype;
struct coresight_connection *inconn, *outconn;
if (!parent || !child)
@@ -352,49 +282,9 @@ static void coresight_disable_link(struct coresight_device *csdev,
inconn = coresight_find_out_connection(parent, csdev);
outconn = coresight_find_out_connection(csdev, child);
- link_subtype = csdev->subtype.link_subtype;
- if (link_ops(csdev)->disable) {
- link_ops(csdev)->disable(csdev, inconn, outconn);
- }
-
- if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) {
- for (i = 0; i < csdev->pdata->nr_inconns; i++)
- if (atomic_read(&csdev->pdata->in_conns[i]->dest_refcnt) !=
- 0)
- return;
- } else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT) {
- for (i = 0; i < csdev->pdata->nr_outconns; i++)
- if (atomic_read(&csdev->pdata->out_conns[i]->src_refcnt) !=
- 0)
- return;
- } else {
- if (atomic_read(&csdev->refcnt) != 0)
- return;
- }
-
- csdev->enable = false;
-}
-
-int coresight_enable_source(struct coresight_device *csdev, enum cs_mode mode,
- void *data)
-{
- int ret;
-
- if (!csdev->enable) {
- if (source_ops(csdev)->enable) {
- ret = source_ops(csdev)->enable(csdev, data, mode);
- if (ret)
- return ret;
- }
- csdev->enable = true;
- }
-
- atomic_inc(&csdev->refcnt);
-
- return 0;
+ link_ops(csdev)->disable(csdev, inconn, outconn);
}
-EXPORT_SYMBOL_GPL(coresight_enable_source);
static bool coresight_is_helper(struct coresight_device *csdev)
{
@@ -404,29 +294,12 @@ static bool coresight_is_helper(struct coresight_device *csdev)
static int coresight_enable_helper(struct coresight_device *csdev,
enum cs_mode mode, void *data)
{
- int ret;
-
- if (!helper_ops(csdev)->enable)
- return 0;
- ret = helper_ops(csdev)->enable(csdev, mode, data);
- if (ret)
- return ret;
-
- csdev->enable = true;
- return 0;
+ return helper_ops(csdev)->enable(csdev, mode, data);
}
static void coresight_disable_helper(struct coresight_device *csdev)
{
- int ret;
-
- if (!helper_ops(csdev)->disable)
- return;
-
- ret = helper_ops(csdev)->disable(csdev, NULL);
- if (ret)
- return;
- csdev->enable = false;
+ helper_ops(csdev)->disable(csdev, NULL);
}
static void coresight_disable_helpers(struct coresight_device *csdev)
@@ -441,25 +314,20 @@ static void coresight_disable_helpers(struct coresight_device *csdev)
}
}
-/**
- * coresight_disable_source - Drop the reference count by 1 and disable
- * the device if there are no users left.
- *
- * @csdev: The coresight device to disable
- * @data: Opaque data to pass on to the disable function of the source device.
- * For example in perf mode this is a pointer to the struct perf_event.
+/*
+ * Helper function to call source_ops(csdev)->disable and also disable the
+ * helpers.
*
- * Returns true if the device has been disabled.
+ * There is an imbalance between coresight_enable_path() and
+ * coresight_disable_path(). Enabling also enables the source's helpers as part
+ * of the path, but disabling always skips the first item in the path (which is
+ * the source), so sources and their helpers don't get disabled as part of that
+ * function and we need the extra step here.
*/
-bool coresight_disable_source(struct coresight_device *csdev, void *data)
+void coresight_disable_source(struct coresight_device *csdev, void *data)
{
- if (atomic_dec_return(&csdev->refcnt) == 0) {
- if (source_ops(csdev)->disable)
- source_ops(csdev)->disable(csdev, data);
- coresight_disable_helpers(csdev);
- csdev->enable = false;
- }
- return !csdev->enable;
+ source_ops(csdev)->disable(csdev, data);
+ coresight_disable_helpers(csdev);
}
EXPORT_SYMBOL_GPL(coresight_disable_source);
@@ -484,7 +352,7 @@ static void coresight_disable_path_from(struct list_head *path,
/*
* ETF devices are tricky... They can be a link or a sink,
* depending on how they are configured. If an ETF has been
- * "activated" it will be configured as a sink, otherwise
+ * selected as a sink it will be configured as a sink, otherwise
* go ahead with the link configuration.
*/
if (type == CORESIGHT_DEV_TYPE_LINKSINK)
@@ -562,7 +430,7 @@ int coresight_enable_path(struct list_head *path, enum cs_mode mode,
/*
* ETF devices are tricky... They can be a link or a sink,
* depending on how they are configured. If an ETF has been
- * "activated" it will be configured as a sink, otherwise
+ * selected as a sink it will be configured as a sink, otherwise
* go ahead with the link configuration.
*/
if (type == CORESIGHT_DEV_TYPE_LINKSINK)
@@ -619,48 +487,6 @@ struct coresight_device *coresight_get_sink(struct list_head *path)
return csdev;
}
-static struct coresight_device *
-coresight_find_enabled_sink(struct coresight_device *csdev)
-{
- int i;
- struct coresight_device *sink = NULL;
-
- if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
- csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
- csdev->activated)
- return csdev;
-
- /*
- * Recursively explore each port found on this element.
- */
- for (i = 0; i < csdev->pdata->nr_outconns; i++) {
- struct coresight_device *child_dev;
-
- child_dev = csdev->pdata->out_conns[i]->dest_dev;
- if (child_dev)
- sink = coresight_find_enabled_sink(child_dev);
- if (sink)
- return sink;
- }
-
- return NULL;
-}
-
-/**
- * coresight_get_enabled_sink - returns the first enabled sink using
- * connection based search starting from the source reference
- *
- * @source: Coresight source device reference
- */
-struct coresight_device *
-coresight_get_enabled_sink(struct coresight_device *source)
-{
- if (!source)
- return NULL;
-
- return coresight_find_enabled_sink(source);
-}
-
static int coresight_sink_by_id(struct device *dev, const void *data)
{
struct coresight_device *csdev = to_coresight_device(dev);
@@ -794,11 +620,10 @@ static void coresight_drop_device(struct coresight_device *csdev)
* @sink: The final sink we want in this path.
* @path: The list to add devices to.
*
- * The tree of Coresight device is traversed until an activated sink is
- * found. From there the sink is added to the list along with all the
- * devices that led to that point - the end result is a list from source
- * to sink. In that list the source is the first device and the sink the
- * last one.
+ * The tree of Coresight device is traversed until @sink is found.
+ * From there the sink is added to the list along with all the devices that led
+ * to that point - the end result is a list from source to sink. In that list
+ * the source is the first device and the sink the last one.
*/
static int _coresight_build_path(struct coresight_device *csdev,
struct coresight_device *sink,
@@ -808,7 +633,7 @@ static int _coresight_build_path(struct coresight_device *csdev,
bool found = false;
struct coresight_node *node;
- /* An activated sink has been found. Enqueue the element */
+ /* The sink has been found. Enqueue the element */
if (csdev == sink)
goto out;
@@ -1072,269 +897,6 @@ static void coresight_clear_default_sink(struct coresight_device *csdev)
}
}
-/** coresight_validate_source - make sure a source has the right credentials
- * @csdev: the device structure for a source.
- * @function: the function this was called from.
- *
- * Assumes the coresight_mutex is held.
- */
-static int coresight_validate_source(struct coresight_device *csdev,
- const char *function)
-{
- u32 type, subtype;
-
- type = csdev->type;
- subtype = csdev->subtype.source_subtype;
-
- if (type != CORESIGHT_DEV_TYPE_SOURCE) {
- dev_err(&csdev->dev, "wrong device type in %s\n", function);
- return -EINVAL;
- }
-
- if (subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_PROC &&
- subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE &&
- subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM &&
- subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS) {
- dev_err(&csdev->dev, "wrong device subtype in %s\n", function);
- return -EINVAL;
- }
-
- return 0;
-}
-
-int coresight_enable(struct coresight_device *csdev)
-{
- int cpu, ret = 0;
- struct coresight_device *sink;
- struct list_head *path;
- enum coresight_dev_subtype_source subtype;
- u32 hash;
-
- subtype = csdev->subtype.source_subtype;
-
- mutex_lock(&coresight_mutex);
-
- ret = coresight_validate_source(csdev, __func__);
- if (ret)
- goto out;
-
- if (csdev->enable) {
- /*
- * There could be multiple applications driving the software
- * source. So keep the refcount for each such user when the
- * source is already enabled.
- */
- if (subtype == CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE)
- atomic_inc(&csdev->refcnt);
- goto out;
- }
-
- sink = coresight_get_enabled_sink(csdev);
- if (!sink) {
- ret = -EINVAL;
- goto out;
- }
-
- path = coresight_build_path(csdev, sink);
- if (IS_ERR(path)) {
- pr_err("building path(s) failed\n");
- ret = PTR_ERR(path);
- goto out;
- }
-
- ret = coresight_enable_path(path, CS_MODE_SYSFS, NULL);
- if (ret)
- goto err_path;
-
- ret = coresight_enable_source(csdev, CS_MODE_SYSFS, NULL);
- if (ret)
- goto err_source;
-
- switch (subtype) {
- case CORESIGHT_DEV_SUBTYPE_SOURCE_PROC:
- /*
- * When working from sysFS it is important to keep track
- * of the paths that were created so that they can be
- * undone in 'coresight_disable()'. Since there can only
- * be a single session per tracer (when working from sysFS)
- * a per-cpu variable will do just fine.
- */
- cpu = source_ops(csdev)->cpu_id(csdev);
- per_cpu(tracer_path, cpu) = path;
- break;
- case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE:
- case CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM:
- case CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS:
- /*
- * Use the hash of source's device name as ID
- * and map the ID to the pointer of the path.
- */
- hash = hashlen_hash(hashlen_string(NULL, dev_name(&csdev->dev)));
- ret = idr_alloc_u32(&path_idr, path, &hash, hash, GFP_KERNEL);
- if (ret)
- goto err_source;
- break;
- default:
- /* We can't be here */
- break;
- }
-
-out:
- mutex_unlock(&coresight_mutex);
- return ret;
-
-err_source:
- coresight_disable_path(path);
-
-err_path:
- coresight_release_path(path);
- goto out;
-}
-EXPORT_SYMBOL_GPL(coresight_enable);
-
-void coresight_disable(struct coresight_device *csdev)
-{
- int cpu, ret;
- struct list_head *path = NULL;
- u32 hash;
-
- mutex_lock(&coresight_mutex);
-
- ret = coresight_validate_source(csdev, __func__);
- if (ret)
- goto out;
-
- if (!csdev->enable || !coresight_disable_source(csdev, NULL))
- goto out;
-
- switch (csdev->subtype.source_subtype) {
- case CORESIGHT_DEV_SUBTYPE_SOURCE_PROC:
- cpu = source_ops(csdev)->cpu_id(csdev);
- path = per_cpu(tracer_path, cpu);
- per_cpu(tracer_path, cpu) = NULL;
- break;
- case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE:
- case CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM:
- case CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS:
- hash = hashlen_hash(hashlen_string(NULL, dev_name(&csdev->dev)));
- /* Find the path by the hash. */
- path = idr_find(&path_idr, hash);
- if (path == NULL) {
- pr_err("Path is not found for %s\n", dev_name(&csdev->dev));
- goto out;
- }
- idr_remove(&path_idr, hash);
- break;
- default:
- /* We can't be here */
- break;
- }
-
- coresight_disable_path(path);
- coresight_release_path(path);
-
-out:
- mutex_unlock(&coresight_mutex);
-}
-EXPORT_SYMBOL_GPL(coresight_disable);
-
-static ssize_t enable_sink_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct coresight_device *csdev = to_coresight_device(dev);
-
- return scnprintf(buf, PAGE_SIZE, "%u\n", csdev->activated);
-}
-
-static ssize_t enable_sink_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct coresight_device *csdev = to_coresight_device(dev);
-
- ret = kstrtoul(buf, 10, &val);
- if (ret)
- return ret;
-
- if (val)
- csdev->activated = true;
- else
- csdev->activated = false;
-
- return size;
-
-}
-static DEVICE_ATTR_RW(enable_sink);
-
-static ssize_t enable_source_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct coresight_device *csdev = to_coresight_device(dev);
-
- return scnprintf(buf, PAGE_SIZE, "%u\n", csdev->enable);
-}
-
-static ssize_t enable_source_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret = 0;
- unsigned long val;
- struct coresight_device *csdev = to_coresight_device(dev);
-
- ret = kstrtoul(buf, 10, &val);
- if (ret)
- return ret;
-
- if (val) {
- ret = coresight_enable(csdev);
- if (ret)
- return ret;
- } else {
- coresight_disable(csdev);
- }
-
- return size;
-}
-static DEVICE_ATTR_RW(enable_source);
-
-static struct attribute *coresight_sink_attrs[] = {
- &dev_attr_enable_sink.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(coresight_sink);
-
-static struct attribute *coresight_source_attrs[] = {
- &dev_attr_enable_source.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(coresight_source);
-
-static struct device_type coresight_dev_type[] = {
- {
- .name = "sink",
- .groups = coresight_sink_groups,
- },
- {
- .name = "link",
- },
- {
- .name = "linksink",
- .groups = coresight_sink_groups,
- },
- {
- .name = "source",
- .groups = coresight_source_groups,
- },
- {
- .name = "helper",
- }
-};
-/* Ensure the enum matches the names and groups */
-static_assert(ARRAY_SIZE(coresight_dev_type) == CORESIGHT_DEV_TYPE_MAX);
-
static void coresight_device_release(struct device *dev)
{
struct coresight_device *csdev = to_coresight_device(dev);
@@ -1799,7 +1361,7 @@ done:
}
EXPORT_SYMBOL_GPL(coresight_alloc_device_name);
-struct bus_type coresight_bustype = {
+const struct bus_type coresight_bustype = {
.name = "coresight",
};
diff --git a/drivers/hwtracing/coresight/coresight-cti-core.c b/drivers/hwtracing/coresight/coresight-cti-core.c
index 3999d0a2cb602..e805617020d06 100644
--- a/drivers/hwtracing/coresight/coresight-cti-core.c
+++ b/drivers/hwtracing/coresight/coresight-cti-core.c
@@ -974,7 +974,7 @@ static const struct amba_id cti_ids[] = {
CS_AMBA_ID(0x000bb9aa), /* CTI - C-A73 */
CS_AMBA_UCI_ID(0x000bb9da, uci_id_cti), /* CTI - C-A35 */
CS_AMBA_UCI_ID(0x000bb9ed, uci_id_cti), /* Coresight CTI (SoC 600) */
- { 0, 0},
+ { 0, 0, NULL },
};
MODULE_DEVICE_TABLE(amba, cti_ids);
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index fa80039e0821f..3aab182b562f1 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -76,7 +76,6 @@ DEFINE_CORESIGHT_DEVLIST(etb_devs, "etb");
* @pid: Process ID of the process being monitored by the session
* that is using this component.
* @buf: area of memory where ETB buffer content gets sent.
- * @mode: this ETB is being used.
* @buffer_depth: size of @buf.
* @trigger_cntr: amount of words to store after a trigger.
*/
@@ -89,7 +88,6 @@ struct etb_drvdata {
local_t reading;
pid_t pid;
u8 *buf;
- u32 mode;
u32 buffer_depth;
u32 trigger_cntr;
};
@@ -150,20 +148,20 @@ static int etb_enable_sysfs(struct coresight_device *csdev)
spin_lock_irqsave(&drvdata->spinlock, flags);
/* Don't messup with perf sessions. */
- if (drvdata->mode == CS_MODE_PERF) {
+ if (coresight_get_mode(csdev) == CS_MODE_PERF) {
ret = -EBUSY;
goto out;
}
- if (drvdata->mode == CS_MODE_DISABLED) {
+ if (coresight_get_mode(csdev) == CS_MODE_DISABLED) {
ret = etb_enable_hw(drvdata);
if (ret)
goto out;
- drvdata->mode = CS_MODE_SYSFS;
+ coresight_set_mode(csdev, CS_MODE_SYSFS);
}
- atomic_inc(&csdev->refcnt);
+ csdev->refcnt++;
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return ret;
@@ -181,7 +179,7 @@ static int etb_enable_perf(struct coresight_device *csdev, void *data)
spin_lock_irqsave(&drvdata->spinlock, flags);
/* No need to continue if the component is already in used by sysFS. */
- if (drvdata->mode == CS_MODE_SYSFS) {
+ if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) {
ret = -EBUSY;
goto out;
}
@@ -199,7 +197,7 @@ static int etb_enable_perf(struct coresight_device *csdev, void *data)
* use for this session.
*/
if (drvdata->pid == pid) {
- atomic_inc(&csdev->refcnt);
+ csdev->refcnt++;
goto out;
}
@@ -216,8 +214,8 @@ static int etb_enable_perf(struct coresight_device *csdev, void *data)
if (!ret) {
/* Associate with monitored process. */
drvdata->pid = pid;
- drvdata->mode = CS_MODE_PERF;
- atomic_inc(&csdev->refcnt);
+ coresight_set_mode(drvdata->csdev, CS_MODE_PERF);
+ csdev->refcnt++;
}
out:
@@ -356,17 +354,18 @@ static int etb_disable(struct coresight_device *csdev)
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (atomic_dec_return(&csdev->refcnt)) {
+ csdev->refcnt--;
+ if (csdev->refcnt) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return -EBUSY;
}
/* Complain if we (somehow) got out of sync */
- WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
+ WARN_ON_ONCE(coresight_get_mode(csdev) == CS_MODE_DISABLED);
etb_disable_hw(drvdata);
/* Dissociate from monitored process. */
drvdata->pid = -1;
- drvdata->mode = CS_MODE_DISABLED;
+ coresight_set_mode(csdev, CS_MODE_DISABLED);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
dev_dbg(&csdev->dev, "ETB disabled\n");
@@ -447,7 +446,7 @@ static unsigned long etb_update_buffer(struct coresight_device *csdev,
spin_lock_irqsave(&drvdata->spinlock, flags);
/* Don't do anything if another tracer is using this sink */
- if (atomic_read(&csdev->refcnt) != 1)
+ if (csdev->refcnt != 1)
goto out;
__etb_disable_hw(drvdata);
@@ -589,7 +588,7 @@ static void etb_dump(struct etb_drvdata *drvdata)
unsigned long flags;
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (drvdata->mode == CS_MODE_SYSFS) {
+ if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) {
__etb_disable_hw(drvdata);
etb_dump_hw(drvdata);
__etb_enable_hw(drvdata);
@@ -837,7 +836,7 @@ static const struct amba_id etb_ids[] = {
.id = 0x000bb907,
.mask = 0x000fffff,
},
- { 0, 0},
+ { 0, 0, NULL },
};
MODULE_DEVICE_TABLE(amba, etb_ids);
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index a52cfcce25d6d..c0c60e6a1703e 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -589,7 +589,7 @@ static void etm_event_stop(struct perf_event *event, int mode)
return;
/* stop tracer */
- source_ops(csdev)->disable(csdev, event);
+ coresight_disable_source(csdev, event);
/* tell the core */
event->hw.state = PERF_HES_STOPPED;
diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h
index 9a0d08b092ae7..e02c3ea972c92 100644
--- a/drivers/hwtracing/coresight/coresight-etm.h
+++ b/drivers/hwtracing/coresight/coresight-etm.h
@@ -215,7 +215,6 @@ struct etm_config {
* @port_size: port size as reported by ETMCR bit 4-6 and 21.
* @arch: ETM/PTM version number.
* @use_cpu14: true if management registers need to be accessed via CP14.
- * @mode: this tracer's mode, i.e sysFS, Perf or disabled.
* @sticky_enable: true if ETM base configuration has been done.
* @boot_enable:true if we should start tracing at boot time.
* @os_unlock: true if access to management registers is allowed.
@@ -238,7 +237,6 @@ struct etm_drvdata {
int port_size;
u8 arch;
bool use_cp14;
- local_t mode;
bool sticky_enable;
bool boot_enable;
bool os_unlock;
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-core.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c
index 116a91d90ac20..9d5c1391ffb12 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c
@@ -115,7 +115,7 @@ static void etm_clr_pwrup(struct etm_drvdata *drvdata)
*
* Basically the same as @coresight_timeout except for the register access
* method where we have to account for CP14 configurations.
-
+ *
* Return: 0 as soon as the bit has taken the desired state or -EAGAIN if
* TIMEOUT_US has elapsed, which ever happens first.
*/
@@ -556,14 +556,12 @@ static int etm_enable(struct coresight_device *csdev, struct perf_event *event,
enum cs_mode mode)
{
int ret;
- u32 val;
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);
-
- /* Someone is already using the tracer */
- if (val)
+ if (!coresight_take_mode(csdev, mode)) {
+ /* Someone is already using the tracer */
return -EBUSY;
+ }
switch (mode) {
case CS_MODE_SYSFS:
@@ -578,7 +576,7 @@ static int etm_enable(struct coresight_device *csdev, struct perf_event *event,
/* The tracer didn't start */
if (ret)
- local_set(&drvdata->mode, CS_MODE_DISABLED);
+ coresight_set_mode(drvdata->csdev, CS_MODE_DISABLED);
return ret;
}
@@ -672,14 +670,13 @@ static void etm_disable(struct coresight_device *csdev,
struct perf_event *event)
{
enum cs_mode mode;
- struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
/*
* For as long as the tracer isn't disabled another entity can't
* change its status. As such we can read the status here without
* fearing it will change under us.
*/
- mode = local_read(&drvdata->mode);
+ mode = coresight_get_mode(csdev);
switch (mode) {
case CS_MODE_DISABLED:
@@ -696,7 +693,7 @@ static void etm_disable(struct coresight_device *csdev,
}
if (mode)
- local_set(&drvdata->mode, CS_MODE_DISABLED);
+ coresight_set_mode(csdev, CS_MODE_DISABLED);
}
static const struct coresight_ops_source etm_source_ops = {
@@ -715,7 +712,7 @@ static int etm_online_cpu(unsigned int cpu)
return 0;
if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable)
- coresight_enable(etmdrvdata[cpu]->csdev);
+ coresight_enable_sysfs(etmdrvdata[cpu]->csdev);
return 0;
}
@@ -730,7 +727,7 @@ static int etm_starting_cpu(unsigned int cpu)
etmdrvdata[cpu]->os_unlock = true;
}
- if (local_read(&etmdrvdata[cpu]->mode))
+ if (coresight_get_mode(etmdrvdata[cpu]->csdev))
etm_enable_hw(etmdrvdata[cpu]);
spin_unlock(&etmdrvdata[cpu]->spinlock);
return 0;
@@ -742,7 +739,7 @@ static int etm_dying_cpu(unsigned int cpu)
return 0;
spin_lock(&etmdrvdata[cpu]->spinlock);
- if (local_read(&etmdrvdata[cpu]->mode))
+ if (coresight_get_mode(etmdrvdata[cpu]->csdev))
etm_disable_hw(etmdrvdata[cpu]);
spin_unlock(&etmdrvdata[cpu]->spinlock);
return 0;
@@ -925,7 +922,7 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
dev_info(&drvdata->csdev->dev,
"%s initialized\n", (char *)coresight_get_uci_data(id));
if (boot_enable) {
- coresight_enable(drvdata->csdev);
+ coresight_enable_sysfs(drvdata->csdev);
drvdata->boot_enable = true;
}
@@ -1003,7 +1000,7 @@ static const struct amba_id etm_ids[] = {
CS_AMBA_ID_DATA(0x000bb95f, "PTM 1.1"),
/* PTM 1.1 Qualcomm */
CS_AMBA_ID_DATA(0x000b006f, "PTM 1.1"),
- { 0, 0},
+ { 0, 0, NULL},
};
MODULE_DEVICE_TABLE(amba, etm_ids);
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
index 2f271b7fb048c..68c644be9813b 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
@@ -722,7 +722,7 @@ static ssize_t cntr_val_show(struct device *dev,
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
- if (!local_read(&drvdata->mode)) {
+ if (!coresight_get_mode(drvdata->csdev)) {
spin_lock(&drvdata->spinlock);
for (i = 0; i < drvdata->nr_cntr; i++)
ret += sprintf(buf, "counter %d: %x\n",
@@ -941,7 +941,7 @@ static ssize_t seq_curr_state_show(struct device *dev,
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
- if (!local_read(&drvdata->mode)) {
+ if (!coresight_get_mode(drvdata->csdev)) {
val = config->seq_curr_state;
goto out;
}
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index ce1995a2827f0..c2ca4a02dfce1 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -840,14 +840,11 @@ static int etm4_enable(struct coresight_device *csdev, struct perf_event *event,
enum cs_mode mode)
{
int ret;
- u32 val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);
- /* Someone is already using the tracer */
- if (val)
+ if (!coresight_take_mode(csdev, mode)) {
+ /* Someone is already using the tracer */
return -EBUSY;
+ }
switch (mode) {
case CS_MODE_SYSFS:
@@ -862,7 +859,7 @@ static int etm4_enable(struct coresight_device *csdev, struct perf_event *event,
/* The tracer didn't start */
if (ret)
- local_set(&drvdata->mode, CS_MODE_DISABLED);
+ coresight_set_mode(csdev, CS_MODE_DISABLED);
return ret;
}
@@ -1004,14 +1001,13 @@ static void etm4_disable(struct coresight_device *csdev,
struct perf_event *event)
{
enum cs_mode mode;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
/*
* For as long as the tracer isn't disabled another entity can't
* change its status. As such we can read the status here without
* fearing it will change under us.
*/
- mode = local_read(&drvdata->mode);
+ mode = coresight_get_mode(csdev);
switch (mode) {
case CS_MODE_DISABLED:
@@ -1025,7 +1021,7 @@ static void etm4_disable(struct coresight_device *csdev,
}
if (mode)
- local_set(&drvdata->mode, CS_MODE_DISABLED);
+ coresight_set_mode(csdev, CS_MODE_DISABLED);
}
static const struct coresight_ops_source etm4_source_ops = {
@@ -1200,6 +1196,7 @@ static void etm4_init_arch_data(void *info)
struct etm4_init_arg *init_arg = info;
struct etmv4_drvdata *drvdata;
struct csdev_access *csa;
+ struct device *dev = init_arg->dev;
int i;
drvdata = dev_get_drvdata(init_arg->dev);
@@ -1213,6 +1210,10 @@ static void etm4_init_arch_data(void *info)
if (!etm4_init_csdev_access(drvdata, csa))
return;
+ if (!csa->io_mem ||
+ fwnode_property_present(dev_fwnode(dev), "qcom,skip-power-up"))
+ drvdata->skip_power_up = true;
+
/* Detect the support for OS Lock before we actually use it */
etm_detect_os_lock(drvdata, csa);
@@ -1650,7 +1651,7 @@ static int etm4_online_cpu(unsigned int cpu)
return etm4_probe_cpu(cpu);
if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable)
- coresight_enable(etmdrvdata[cpu]->csdev);
+ coresight_enable_sysfs(etmdrvdata[cpu]->csdev);
return 0;
}
@@ -1663,7 +1664,7 @@ static int etm4_starting_cpu(unsigned int cpu)
if (!etmdrvdata[cpu]->os_unlock)
etm4_os_unlock(etmdrvdata[cpu]);
- if (local_read(&etmdrvdata[cpu]->mode))
+ if (coresight_get_mode(etmdrvdata[cpu]->csdev))
etm4_enable_hw(etmdrvdata[cpu]);
spin_unlock(&etmdrvdata[cpu]->spinlock);
return 0;
@@ -1675,7 +1676,7 @@ static int etm4_dying_cpu(unsigned int cpu)
return 0;
spin_lock(&etmdrvdata[cpu]->spinlock);
- if (local_read(&etmdrvdata[cpu]->mode))
+ if (coresight_get_mode(etmdrvdata[cpu]->csdev))
etm4_disable_hw(etmdrvdata[cpu]);
spin_unlock(&etmdrvdata[cpu]->spinlock);
return 0;
@@ -1833,7 +1834,7 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
* Save and restore the ETM Trace registers only if
* the ETM is active.
*/
- if (local_read(&drvdata->mode) && drvdata->save_state)
+ if (coresight_get_mode(drvdata->csdev) && drvdata->save_state)
ret = __etm4_cpu_save(drvdata);
return ret;
}
@@ -2040,11 +2041,6 @@ static int etm4_add_coresight_dev(struct etm4_init_arg *init_arg)
if (!drvdata->arch)
return -EINVAL;
- /* TRCPDCR is not accessible with system instructions. */
- if (!desc.access.io_mem ||
- fwnode_property_present(dev_fwnode(dev), "qcom,skip-power-up"))
- drvdata->skip_power_up = true;
-
major = ETM_ARCH_MAJOR_VERSION(drvdata->arch);
minor = ETM_ARCH_MINOR_VERSION(drvdata->arch);
@@ -2098,7 +2094,7 @@ static int etm4_add_coresight_dev(struct etm4_init_arg *init_arg)
drvdata->cpu, type_name, major, minor);
if (boot_enable) {
- coresight_enable(drvdata->csdev);
+ coresight_enable_sysfs(drvdata->csdev);
drvdata->boot_enable = true;
}
@@ -2390,7 +2386,7 @@ static const struct of_device_id etm4_sysreg_match[] = {
#ifdef CONFIG_ACPI
static const struct acpi_device_id etm4x_acpi_ids[] = {
- {"ARMHC500", 0}, /* ARM CoreSight ETM4x */
+ {"ARMHC500", 0, 0, 0}, /* ARM CoreSight ETM4x */
{}
};
MODULE_DEVICE_TABLE(acpi, etm4x_acpi_ids);
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index da17b6c49b0f1..9ea678bc2e8e5 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -1016,7 +1016,6 @@ struct etmv4_drvdata {
void __iomem *base;
struct coresight_device *csdev;
spinlock_t spinlock;
- local_t mode;
int cpu;
u8 arch;
u8 nr_pe;
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index a5b1fc787766a..ef1a0abfee4e9 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -350,7 +350,7 @@ MODULE_DEVICE_TABLE(of, static_funnel_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id static_funnel_ids[] = {
- {"ARMHC9FE", 0},
+ {"ARMHC9FE", 0, 0, 0},
{},
};
@@ -391,7 +391,7 @@ static const struct amba_id dynamic_funnel_ids[] = {
.id = 0x000bb9eb,
.mask = 0x000fffff,
},
- { 0, 0},
+ { 0, 0, NULL },
};
MODULE_DEVICE_TABLE(amba, dynamic_funnel_ids);
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 767076e079701..eb365236f9a97 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -12,6 +12,9 @@
#include <linux/coresight.h>
#include <linux/pm_runtime.h>
+extern struct mutex coresight_mutex;
+extern struct device_type coresight_dev_type[];
+
/*
* Coresight management registers (0xf00-0xfcc)
* 0xfa0 - 0xfa4: Management registers in PFTv1.0
@@ -130,8 +133,6 @@ void coresight_disable_path(struct list_head *path);
int coresight_enable_path(struct list_head *path, enum cs_mode mode,
void *sink_data);
struct coresight_device *coresight_get_sink(struct list_head *path);
-struct coresight_device *
-coresight_get_enabled_sink(struct coresight_device *source);
struct coresight_device *coresight_get_sink_by_id(u32 id);
struct coresight_device *
coresight_find_default_sink(struct coresight_device *csdev);
@@ -231,8 +232,6 @@ void coresight_add_helper(struct coresight_device *csdev,
void coresight_set_percpu_sink(int cpu, struct coresight_device *csdev);
struct coresight_device *coresight_get_percpu_sink(int cpu);
-int coresight_enable_source(struct coresight_device *csdev, enum cs_mode mode,
- void *data);
-bool coresight_disable_source(struct coresight_device *csdev, void *data);
+void coresight_disable_source(struct coresight_device *csdev, void *data);
#endif
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 91d93060dda53..73452d9dc13b2 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -363,7 +363,7 @@ MODULE_DEVICE_TABLE(of, static_replicator_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id static_replicator_acpi_ids[] = {
- {"ARMHC985", 0}, /* ARM CoreSight Static Replicator */
+ {"ARMHC985", 0, 0, 0}, /* ARM CoreSight Static Replicator */
{}
};
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index a1c27c901ad17..974d37e5f94c0 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -119,7 +119,6 @@ DEFINE_CORESIGHT_DEVLIST(stm_devs, "stm");
* @spinlock: only one at a time pls.
* @chs: the channels accociated to this STM.
* @stm: structure associated to the generic STM interface.
- * @mode: this tracer's mode (enum cs_mode), i.e sysFS, or disabled.
* @traceid: value of the current ID for this component.
* @write_bytes: Maximus bytes this STM can write at a time.
* @stmsper: settings for register STMSPER.
@@ -136,7 +135,6 @@ struct stm_drvdata {
spinlock_t spinlock;
struct channel_space chs;
struct stm_data stm;
- local_t mode;
u8 traceid;
u32 write_bytes;
u32 stmsper;
@@ -195,17 +193,15 @@ static void stm_enable_hw(struct stm_drvdata *drvdata)
static int stm_enable(struct coresight_device *csdev, struct perf_event *event,
enum cs_mode mode)
{
- u32 val;
struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
if (mode != CS_MODE_SYSFS)
return -EINVAL;
- val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);
-
- /* Someone is already using the tracer */
- if (val)
+ if (!coresight_take_mode(csdev, mode)) {
+ /* Someone is already using the tracer */
return -EBUSY;
+ }
pm_runtime_get_sync(csdev->dev.parent);
@@ -266,7 +262,7 @@ static void stm_disable(struct coresight_device *csdev,
* change its status. As such we can read the status here without
* fearing it will change under us.
*/
- if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
+ if (coresight_get_mode(csdev) == CS_MODE_SYSFS) {
spin_lock(&drvdata->spinlock);
stm_disable_hw(drvdata);
spin_unlock(&drvdata->spinlock);
@@ -276,7 +272,7 @@ static void stm_disable(struct coresight_device *csdev,
pm_runtime_put(csdev->dev.parent);
- local_set(&drvdata->mode, CS_MODE_DISABLED);
+ coresight_set_mode(csdev, CS_MODE_DISABLED);
dev_dbg(&csdev->dev, "STM tracing disabled\n");
}
}
@@ -334,7 +330,7 @@ static int stm_generic_link(struct stm_data *stm_data,
if (!drvdata || !drvdata->csdev)
return -EINVAL;
- return coresight_enable(drvdata->csdev);
+ return coresight_enable_sysfs(drvdata->csdev);
}
static void stm_generic_unlink(struct stm_data *stm_data,
@@ -345,7 +341,7 @@ static void stm_generic_unlink(struct stm_data *stm_data,
if (!drvdata || !drvdata->csdev)
return;
- coresight_disable(drvdata->csdev);
+ coresight_disable_sysfs(drvdata->csdev);
}
static phys_addr_t
@@ -373,7 +369,7 @@ static long stm_generic_set_options(struct stm_data *stm_data,
{
struct stm_drvdata *drvdata = container_of(stm_data,
struct stm_drvdata, stm);
- if (!(drvdata && local_read(&drvdata->mode)))
+ if (!(drvdata && coresight_get_mode(drvdata->csdev)))
return -EINVAL;
if (channel >= drvdata->numsp)
@@ -408,7 +404,7 @@ static ssize_t notrace stm_generic_packet(struct stm_data *stm_data,
struct stm_drvdata, stm);
unsigned int stm_flags;
- if (!(drvdata && local_read(&drvdata->mode)))
+ if (!(drvdata && coresight_get_mode(drvdata->csdev)))
return -EACCES;
if (channel >= drvdata->numsp)
@@ -515,7 +511,7 @@ static ssize_t port_select_show(struct device *dev,
struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
- if (!local_read(&drvdata->mode)) {
+ if (!coresight_get_mode(drvdata->csdev)) {
val = drvdata->stmspscr;
} else {
spin_lock(&drvdata->spinlock);
@@ -541,7 +537,7 @@ static ssize_t port_select_store(struct device *dev,
spin_lock(&drvdata->spinlock);
drvdata->stmspscr = val;
- if (local_read(&drvdata->mode)) {
+ if (coresight_get_mode(drvdata->csdev)) {
CS_UNLOCK(drvdata->base);
/* Process as per ARM's TRM recommendation */
stmsper = readl_relaxed(drvdata->base + STMSPER);
@@ -562,7 +558,7 @@ static ssize_t port_enable_show(struct device *dev,
struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
- if (!local_read(&drvdata->mode)) {
+ if (!coresight_get_mode(drvdata->csdev)) {
val = drvdata->stmsper;
} else {
spin_lock(&drvdata->spinlock);
@@ -588,7 +584,7 @@ static ssize_t port_enable_store(struct device *dev,
spin_lock(&drvdata->spinlock);
drvdata->stmsper = val;
- if (local_read(&drvdata->mode)) {
+ if (coresight_get_mode(drvdata->csdev)) {
CS_UNLOCK(drvdata->base);
writel_relaxed(drvdata->stmsper, drvdata->base + STMSPER);
CS_LOCK(drvdata->base);
@@ -950,7 +946,7 @@ static const struct dev_pm_ops stm_dev_pm_ops = {
static const struct amba_id stm_ids[] = {
CS_AMBA_ID_DATA(0x000bb962, "STM32"),
CS_AMBA_ID_DATA(0x000bb963, "STM500"),
- { 0, 0},
+ { 0, 0, NULL },
};
MODULE_DEVICE_TABLE(amba, stm_ids);
diff --git a/drivers/hwtracing/coresight/coresight-sysfs.c b/drivers/hwtracing/coresight/coresight-sysfs.c
index dd78e9fcfc4dc..f9444e2cb1d9f 100644
--- a/drivers/hwtracing/coresight/coresight-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-sysfs.c
@@ -5,11 +5,402 @@
*/
#include <linux/device.h>
+#include <linux/idr.h>
#include <linux/kernel.h>
#include "coresight-priv.h"
/*
+ * Use IDR to map the hash of the source's device name
+ * to the pointer of path for the source. The idr is for
+ * the sources which aren't associated with CPU.
+ */
+static DEFINE_IDR(path_idr);
+
+/*
+ * When operating Coresight drivers from the sysFS interface, only a single
+ * path can exist from a tracer (associated to a CPU) to a sink.
+ */
+static DEFINE_PER_CPU(struct list_head *, tracer_path);
+
+ssize_t coresight_simple_show_pair(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct coresight_device *csdev = container_of(_dev, struct coresight_device, dev);
+ struct cs_pair_attribute *cs_attr = container_of(attr, struct cs_pair_attribute, attr);
+ u64 val;
+
+ pm_runtime_get_sync(_dev->parent);
+ val = csdev_access_relaxed_read_pair(&csdev->access, cs_attr->lo_off, cs_attr->hi_off);
+ pm_runtime_put_sync(_dev->parent);
+ return sysfs_emit(buf, "0x%llx\n", val);
+}
+EXPORT_SYMBOL_GPL(coresight_simple_show_pair);
+
+ssize_t coresight_simple_show32(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct coresight_device *csdev = container_of(_dev, struct coresight_device, dev);
+ struct cs_off_attribute *cs_attr = container_of(attr, struct cs_off_attribute, attr);
+ u64 val;
+
+ pm_runtime_get_sync(_dev->parent);
+ val = csdev_access_relaxed_read32(&csdev->access, cs_attr->off);
+ pm_runtime_put_sync(_dev->parent);
+ return sysfs_emit(buf, "0x%llx\n", val);
+}
+EXPORT_SYMBOL_GPL(coresight_simple_show32);
+
+static int coresight_enable_source_sysfs(struct coresight_device *csdev,
+ enum cs_mode mode, void *data)
+{
+ int ret;
+
+ /*
+ * Comparison with CS_MODE_SYSFS works without taking any device
+ * specific spinlock because the truthyness of that comparison can only
+ * change with coresight_mutex held, which we already have here.
+ */
+ lockdep_assert_held(&coresight_mutex);
+ if (coresight_get_mode(csdev) != CS_MODE_SYSFS) {
+ ret = source_ops(csdev)->enable(csdev, data, mode);
+ if (ret)
+ return ret;
+ }
+
+ csdev->refcnt++;
+
+ return 0;
+}
+
+/**
+ * coresight_disable_source_sysfs - Drop the reference count by 1 and disable
+ * the device if there are no users left.
+ *
+ * @csdev: The coresight device to disable
+ * @data: Opaque data to pass on to the disable function of the source device.
+ * For example in perf mode this is a pointer to the struct perf_event.
+ *
+ * Returns true if the device has been disabled.
+ */
+static bool coresight_disable_source_sysfs(struct coresight_device *csdev,
+ void *data)
+{
+ lockdep_assert_held(&coresight_mutex);
+ if (coresight_get_mode(csdev) != CS_MODE_SYSFS)
+ return false;
+
+ csdev->refcnt--;
+ if (csdev->refcnt == 0) {
+ coresight_disable_source(csdev, data);
+ return true;
+ }
+ return false;
+}
+
+/**
+ * coresight_find_activated_sysfs_sink - returns the first sink activated via
+ * sysfs using connection based search starting from the source reference.
+ *
+ * @csdev: Coresight source device reference
+ */
+static struct coresight_device *
+coresight_find_activated_sysfs_sink(struct coresight_device *csdev)
+{
+ int i;
+ struct coresight_device *sink = NULL;
+
+ if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+ csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
+ csdev->sysfs_sink_activated)
+ return csdev;
+
+ /*
+ * Recursively explore each port found on this element.
+ */
+ for (i = 0; i < csdev->pdata->nr_outconns; i++) {
+ struct coresight_device *child_dev;
+
+ child_dev = csdev->pdata->out_conns[i]->dest_dev;
+ if (child_dev)
+ sink = coresight_find_activated_sysfs_sink(child_dev);
+ if (sink)
+ return sink;
+ }
+
+ return NULL;
+}
+
+/** coresight_validate_source - make sure a source has the right credentials to
+ * be used via sysfs.
+ * @csdev: the device structure for a source.
+ * @function: the function this was called from.
+ *
+ * Assumes the coresight_mutex is held.
+ */
+static int coresight_validate_source_sysfs(struct coresight_device *csdev,
+ const char *function)
+{
+ u32 type, subtype;
+
+ type = csdev->type;
+ subtype = csdev->subtype.source_subtype;
+
+ if (type != CORESIGHT_DEV_TYPE_SOURCE) {
+ dev_err(&csdev->dev, "wrong device type in %s\n", function);
+ return -EINVAL;
+ }
+
+ if (subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_PROC &&
+ subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE &&
+ subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM &&
+ subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS) {
+ dev_err(&csdev->dev, "wrong device subtype in %s\n", function);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int coresight_enable_sysfs(struct coresight_device *csdev)
+{
+ int cpu, ret = 0;
+ struct coresight_device *sink;
+ struct list_head *path;
+ enum coresight_dev_subtype_source subtype;
+ u32 hash;
+
+ subtype = csdev->subtype.source_subtype;
+
+ mutex_lock(&coresight_mutex);
+
+ ret = coresight_validate_source_sysfs(csdev, __func__);
+ if (ret)
+ goto out;
+
+ /*
+ * mode == SYSFS implies that it's already enabled. Don't look at the
+ * refcount to determine this because we don't claim the source until
+ * coresight_enable_source() so can still race with Perf mode which
+ * doesn't hold coresight_mutex.
+ */
+ if (coresight_get_mode(csdev) == CS_MODE_SYSFS) {
+ /*
+ * There could be multiple applications driving the software
+ * source. So keep the refcount for each such user when the
+ * source is already enabled.
+ */
+ if (subtype == CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE)
+ csdev->refcnt++;
+ goto out;
+ }
+
+ sink = coresight_find_activated_sysfs_sink(csdev);
+ if (!sink) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ path = coresight_build_path(csdev, sink);
+ if (IS_ERR(path)) {
+ pr_err("building path(s) failed\n");
+ ret = PTR_ERR(path);
+ goto out;
+ }
+
+ ret = coresight_enable_path(path, CS_MODE_SYSFS, NULL);
+ if (ret)
+ goto err_path;
+
+ ret = coresight_enable_source_sysfs(csdev, CS_MODE_SYSFS, NULL);
+ if (ret)
+ goto err_source;
+
+ switch (subtype) {
+ case CORESIGHT_DEV_SUBTYPE_SOURCE_PROC:
+ /*
+ * When working from sysFS it is important to keep track
+ * of the paths that were created so that they can be
+ * undone in 'coresight_disable()'. Since there can only
+ * be a single session per tracer (when working from sysFS)
+ * a per-cpu variable will do just fine.
+ */
+ cpu = source_ops(csdev)->cpu_id(csdev);
+ per_cpu(tracer_path, cpu) = path;
+ break;
+ case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE:
+ case CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM:
+ case CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS:
+ /*
+ * Use the hash of source's device name as ID
+ * and map the ID to the pointer of the path.
+ */
+ hash = hashlen_hash(hashlen_string(NULL, dev_name(&csdev->dev)));
+ ret = idr_alloc_u32(&path_idr, path, &hash, hash, GFP_KERNEL);
+ if (ret)
+ goto err_source;
+ break;
+ default:
+ /* We can't be here */
+ break;
+ }
+
+out:
+ mutex_unlock(&coresight_mutex);
+ return ret;
+
+err_source:
+ coresight_disable_path(path);
+
+err_path:
+ coresight_release_path(path);
+ goto out;
+}
+EXPORT_SYMBOL_GPL(coresight_enable_sysfs);
+
+void coresight_disable_sysfs(struct coresight_device *csdev)
+{
+ int cpu, ret;
+ struct list_head *path = NULL;
+ u32 hash;
+
+ mutex_lock(&coresight_mutex);
+
+ ret = coresight_validate_source_sysfs(csdev, __func__);
+ if (ret)
+ goto out;
+
+ if (!coresight_disable_source_sysfs(csdev, NULL))
+ goto out;
+
+ switch (csdev->subtype.source_subtype) {
+ case CORESIGHT_DEV_SUBTYPE_SOURCE_PROC:
+ cpu = source_ops(csdev)->cpu_id(csdev);
+ path = per_cpu(tracer_path, cpu);
+ per_cpu(tracer_path, cpu) = NULL;
+ break;
+ case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE:
+ case CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM:
+ case CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS:
+ hash = hashlen_hash(hashlen_string(NULL, dev_name(&csdev->dev)));
+ /* Find the path by the hash. */
+ path = idr_find(&path_idr, hash);
+ if (path == NULL) {
+ pr_err("Path is not found for %s\n", dev_name(&csdev->dev));
+ goto out;
+ }
+ idr_remove(&path_idr, hash);
+ break;
+ default:
+ /* We can't be here */
+ break;
+ }
+
+ coresight_disable_path(path);
+ coresight_release_path(path);
+
+out:
+ mutex_unlock(&coresight_mutex);
+}
+EXPORT_SYMBOL_GPL(coresight_disable_sysfs);
+
+static ssize_t enable_sink_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct coresight_device *csdev = to_coresight_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", csdev->sysfs_sink_activated);
+}
+
+static ssize_t enable_sink_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct coresight_device *csdev = to_coresight_device(dev);
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ csdev->sysfs_sink_activated = !!val;
+
+ return size;
+
+}
+static DEVICE_ATTR_RW(enable_sink);
+
+static ssize_t enable_source_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct coresight_device *csdev = to_coresight_device(dev);
+
+ guard(mutex)(&coresight_mutex);
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ coresight_get_mode(csdev) == CS_MODE_SYSFS);
+}
+
+static ssize_t enable_source_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret = 0;
+ unsigned long val;
+ struct coresight_device *csdev = to_coresight_device(dev);
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ if (val) {
+ ret = coresight_enable_sysfs(csdev);
+ if (ret)
+ return ret;
+ } else {
+ coresight_disable_sysfs(csdev);
+ }
+
+ return size;
+}
+static DEVICE_ATTR_RW(enable_source);
+
+static struct attribute *coresight_sink_attrs[] = {
+ &dev_attr_enable_sink.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(coresight_sink);
+
+static struct attribute *coresight_source_attrs[] = {
+ &dev_attr_enable_source.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(coresight_source);
+
+struct device_type coresight_dev_type[] = {
+ [CORESIGHT_DEV_TYPE_SINK] = {
+ .name = "sink",
+ .groups = coresight_sink_groups,
+ },
+ [CORESIGHT_DEV_TYPE_LINK] = {
+ .name = "link",
+ },
+ [CORESIGHT_DEV_TYPE_LINKSINK] = {
+ .name = "linksink",
+ .groups = coresight_sink_groups,
+ },
+ [CORESIGHT_DEV_TYPE_SOURCE] = {
+ .name = "source",
+ .groups = coresight_source_groups,
+ },
+ [CORESIGHT_DEV_TYPE_HELPER] = {
+ .name = "helper",
+ }
+};
+/* Ensure the enum matches the names and groups */
+static_assert(ARRAY_SIZE(coresight_dev_type) == CORESIGHT_DEV_TYPE_MAX);
+
+/*
* Connections group - links attribute.
* Count of created links between coresight components in the group.
*/
diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c
index 7ec5365e2b642..72005b0c633e5 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-core.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-core.c
@@ -558,7 +558,7 @@ static void tmc_shutdown(struct amba_device *adev)
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (drvdata->mode == CS_MODE_DISABLED)
+ if (coresight_get_mode(drvdata->csdev) == CS_MODE_DISABLED)
goto out;
if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
@@ -594,7 +594,7 @@ static const struct amba_id tmc_ids[] = {
CS_AMBA_ID(0x000bb9e9),
/* Coresight SoC 600 TMC-ETF */
CS_AMBA_ID(0x000bb9ea),
- { 0, 0},
+ { 0, 0, NULL },
};
MODULE_DEVICE_TABLE(amba, tmc_ids);
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 7406b65e2cdda..d4f641cd9de69 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -89,7 +89,7 @@ static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
* When operating in sysFS mode the content of the buffer needs to be
* read before the TMC is disabled.
*/
- if (drvdata->mode == CS_MODE_SYSFS)
+ if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS)
tmc_etb_dump_hw(drvdata);
tmc_disable_hw(drvdata);
@@ -205,8 +205,8 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
* sink is already enabled no memory is needed and the HW need not be
* touched.
*/
- if (drvdata->mode == CS_MODE_SYSFS) {
- atomic_inc(&csdev->refcnt);
+ if (coresight_get_mode(csdev) == CS_MODE_SYSFS) {
+ csdev->refcnt++;
goto out;
}
@@ -228,8 +228,8 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
ret = tmc_etb_enable_hw(drvdata);
if (!ret) {
- drvdata->mode = CS_MODE_SYSFS;
- atomic_inc(&csdev->refcnt);
+ coresight_set_mode(csdev, CS_MODE_SYSFS);
+ csdev->refcnt++;
} else {
/* Free up the buffer if we failed to enable */
used = false;
@@ -262,7 +262,7 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
* No need to continue if the ETB/ETF is already operated
* from sysFS.
*/
- if (drvdata->mode == CS_MODE_SYSFS) {
+ if (coresight_get_mode(csdev) == CS_MODE_SYSFS) {
ret = -EBUSY;
break;
}
@@ -284,7 +284,7 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
* use for this session.
*/
if (drvdata->pid == pid) {
- atomic_inc(&csdev->refcnt);
+ csdev->refcnt++;
break;
}
@@ -292,8 +292,8 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
if (!ret) {
/* Associate with monitored process. */
drvdata->pid = pid;
- drvdata->mode = CS_MODE_PERF;
- atomic_inc(&csdev->refcnt);
+ coresight_set_mode(csdev, CS_MODE_PERF);
+ csdev->refcnt++;
}
} while (0);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -338,17 +338,18 @@ static int tmc_disable_etf_sink(struct coresight_device *csdev)
return -EBUSY;
}
- if (atomic_dec_return(&csdev->refcnt)) {
+ csdev->refcnt--;
+ if (csdev->refcnt) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return -EBUSY;
}
/* Complain if we (somehow) got out of sync */
- WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
+ WARN_ON_ONCE(coresight_get_mode(csdev) == CS_MODE_DISABLED);
tmc_etb_disable_hw(drvdata);
/* Dissociate from monitored process. */
drvdata->pid = -1;
- drvdata->mode = CS_MODE_DISABLED;
+ coresight_set_mode(csdev, CS_MODE_DISABLED);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -371,15 +372,15 @@ static int tmc_enable_etf_link(struct coresight_device *csdev,
return -EBUSY;
}
- if (atomic_read(&csdev->refcnt) == 0) {
+ if (csdev->refcnt == 0) {
ret = tmc_etf_enable_hw(drvdata);
if (!ret) {
- drvdata->mode = CS_MODE_SYSFS;
+ coresight_set_mode(csdev, CS_MODE_SYSFS);
first_enable = true;
}
}
if (!ret)
- atomic_inc(&csdev->refcnt);
+ csdev->refcnt++;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (first_enable)
@@ -401,9 +402,10 @@ static void tmc_disable_etf_link(struct coresight_device *csdev,
return;
}
- if (atomic_dec_return(&csdev->refcnt) == 0) {
+ csdev->refcnt--;
+ if (csdev->refcnt == 0) {
tmc_etf_disable_hw(drvdata);
- drvdata->mode = CS_MODE_DISABLED;
+ coresight_set_mode(csdev, CS_MODE_DISABLED);
last_disable = true;
}
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -483,13 +485,13 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
return 0;
/* This shouldn't happen */
- if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
+ if (WARN_ON_ONCE(coresight_get_mode(csdev) != CS_MODE_PERF))
return 0;
spin_lock_irqsave(&drvdata->spinlock, flags);
/* Don't do anything if another tracer is using this sink */
- if (atomic_read(&csdev->refcnt) != 1)
+ if (csdev->refcnt != 1)
goto out;
CS_UNLOCK(drvdata->base);
@@ -629,7 +631,7 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
}
/* Don't interfere if operated from Perf */
- if (drvdata->mode == CS_MODE_PERF) {
+ if (coresight_get_mode(drvdata->csdev) == CS_MODE_PERF) {
ret = -EINVAL;
goto out;
}
@@ -641,7 +643,7 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
}
/* Disable the TMC if need be */
- if (drvdata->mode == CS_MODE_SYSFS) {
+ if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) {
/* There is no point in reading a TMC in HW FIFO mode */
mode = readl_relaxed(drvdata->base + TMC_MODE);
if (mode != TMC_MODE_CIRCULAR_BUFFER) {
@@ -673,7 +675,7 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
spin_lock_irqsave(&drvdata->spinlock, flags);
/* Re-enable the TMC if need be */
- if (drvdata->mode == CS_MODE_SYSFS) {
+ if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) {
/* There is no point in reading a TMC in HW FIFO mode */
mode = readl_relaxed(drvdata->base + TMC_MODE);
if (mode != TMC_MODE_CIRCULAR_BUFFER) {
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index af02ba5d5f15d..e75428fa1592a 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -1143,7 +1143,7 @@ static void __tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
* When operating in sysFS mode the content of the buffer needs to be
* read before the TMC is disabled.
*/
- if (drvdata->mode == CS_MODE_SYSFS)
+ if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS)
tmc_etr_sync_sysfs_buf(drvdata);
tmc_disable_hw(drvdata);
@@ -1189,7 +1189,7 @@ static struct etr_buf *tmc_etr_get_sysfs_buffer(struct coresight_device *csdev)
spin_lock_irqsave(&drvdata->spinlock, flags);
}
- if (drvdata->reading || drvdata->mode == CS_MODE_PERF) {
+ if (drvdata->reading || coresight_get_mode(csdev) == CS_MODE_PERF) {
ret = -EBUSY;
goto out;
}
@@ -1230,15 +1230,15 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
* sink is already enabled no memory is needed and the HW need not be
* touched, even if the buffer size has changed.
*/
- if (drvdata->mode == CS_MODE_SYSFS) {
- atomic_inc(&csdev->refcnt);
+ if (coresight_get_mode(csdev) == CS_MODE_SYSFS) {
+ csdev->refcnt++;
goto out;
}
ret = tmc_etr_enable_hw(drvdata, sysfs_buf);
if (!ret) {
- drvdata->mode = CS_MODE_SYSFS;
- atomic_inc(&csdev->refcnt);
+ coresight_set_mode(csdev, CS_MODE_SYSFS);
+ csdev->refcnt++;
}
out:
@@ -1564,7 +1564,7 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
spin_lock_irqsave(&drvdata->spinlock, flags);
/* Don't do anything if another tracer is using this sink */
- if (atomic_read(&csdev->refcnt) != 1) {
+ if (csdev->refcnt != 1) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
goto out;
}
@@ -1652,7 +1652,7 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
spin_lock_irqsave(&drvdata->spinlock, flags);
/* Don't use this sink if it is already claimed by sysFS */
- if (drvdata->mode == CS_MODE_SYSFS) {
+ if (coresight_get_mode(csdev) == CS_MODE_SYSFS) {
rc = -EBUSY;
goto unlock_out;
}
@@ -1676,7 +1676,7 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
* use for this session.
*/
if (drvdata->pid == pid) {
- atomic_inc(&csdev->refcnt);
+ csdev->refcnt++;
goto unlock_out;
}
@@ -1684,9 +1684,9 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
if (!rc) {
/* Associate with monitored process. */
drvdata->pid = pid;
- drvdata->mode = CS_MODE_PERF;
+ coresight_set_mode(csdev, CS_MODE_PERF);
drvdata->perf_buf = etr_perf->etr_buf;
- atomic_inc(&csdev->refcnt);
+ csdev->refcnt++;
}
unlock_out:
@@ -1719,17 +1719,18 @@ static int tmc_disable_etr_sink(struct coresight_device *csdev)
return -EBUSY;
}
- if (atomic_dec_return(&csdev->refcnt)) {
+ csdev->refcnt--;
+ if (csdev->refcnt) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return -EBUSY;
}
/* Complain if we (somehow) got out of sync */
- WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
+ WARN_ON_ONCE(coresight_get_mode(csdev) == CS_MODE_DISABLED);
tmc_etr_disable_hw(drvdata);
/* Dissociate from monitored process. */
drvdata->pid = -1;
- drvdata->mode = CS_MODE_DISABLED;
+ coresight_set_mode(csdev, CS_MODE_DISABLED);
/* Reset perf specific data */
drvdata->perf_buf = NULL;
@@ -1777,7 +1778,7 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
}
/* Disable the TMC if we are trying to read from a running session. */
- if (drvdata->mode == CS_MODE_SYSFS)
+ if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS)
__tmc_etr_disable_hw(drvdata);
drvdata->reading = true;
@@ -1799,7 +1800,7 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
spin_lock_irqsave(&drvdata->spinlock, flags);
/* RE-enable the TMC if need be */
- if (drvdata->mode == CS_MODE_SYSFS) {
+ if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) {
/*
* The trace run will continue with the same allocated trace
* buffer. Since the tracer is still enabled drvdata::buf can't
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 8dcb426ac3e7a..cef979c897e62 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -178,7 +178,6 @@ struct etr_buf {
* @size: trace buffer size for this TMC (common for all modes).
* @max_burst_size: The maximum burst size that can be initiated by
* TMC-ETR on AXI bus.
- * @mode: how this TMC is being used.
* @config_type: TMC variant, must be of type @tmc_config_type.
* @memwidth: width of the memory interface databus, in bytes.
* @trigger_cntr: amount of words to store after a trigger.
@@ -203,7 +202,6 @@ struct tmc_drvdata {
u32 len;
u32 size;
u32 max_burst_size;
- u32 mode;
enum tmc_config_type config_type;
enum tmc_mem_intf_width memwidth;
u32 trigger_cntr;
diff --git a/drivers/hwtracing/coresight/coresight-tpda.c b/drivers/hwtracing/coresight/coresight-tpda.c
index 5f82737c37bba..7739bc7adc449 100644
--- a/drivers/hwtracing/coresight/coresight-tpda.c
+++ b/drivers/hwtracing/coresight/coresight-tpda.c
@@ -18,6 +18,7 @@
#include "coresight-priv.h"
#include "coresight-tpda.h"
#include "coresight-trace-id.h"
+#include "coresight-tpdm.h"
DEFINE_CORESIGHT_DEVLIST(tpda_devs, "tpda");
@@ -28,24 +29,59 @@ static bool coresight_device_is_tpdm(struct coresight_device *csdev)
CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM);
}
+static void tpda_clear_element_size(struct coresight_device *csdev)
+{
+ struct tpda_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ drvdata->dsb_esize = 0;
+ drvdata->cmb_esize = 0;
+}
+
+static void tpda_set_element_size(struct tpda_drvdata *drvdata, u32 *val)
+{
+ /* Clear all relevant fields */
+ *val &= ~(TPDA_Pn_CR_DSBSIZE | TPDA_Pn_CR_CMBSIZE);
+
+ if (drvdata->dsb_esize == 64)
+ *val |= TPDA_Pn_CR_DSBSIZE;
+ else if (drvdata->dsb_esize == 32)
+ *val &= ~TPDA_Pn_CR_DSBSIZE;
+
+ if (drvdata->cmb_esize == 64)
+ *val |= FIELD_PREP(TPDA_Pn_CR_CMBSIZE, 0x2);
+ else if (drvdata->cmb_esize == 32)
+ *val |= FIELD_PREP(TPDA_Pn_CR_CMBSIZE, 0x1);
+ else if (drvdata->cmb_esize == 8)
+ *val &= ~TPDA_Pn_CR_CMBSIZE;
+}
+
/*
- * Read the DSB element size from the TPDM device
+ * Read the element size from the TPDM device. One TPDM must have at least one of the
+ * element size property.
* Returns
- * The dsb element size read from the devicetree if available.
- * 0 - Otherwise, with a warning once.
+ * 0 - The element size property is read
+ * Others - Cannot read the property of the element size
*/
-static int tpdm_read_dsb_element_size(struct coresight_device *csdev)
+static int tpdm_read_element_size(struct tpda_drvdata *drvdata,
+ struct coresight_device *csdev)
{
- int rc = 0;
- u8 size = 0;
+ int rc = -EINVAL;
+ struct tpdm_drvdata *tpdm_data = dev_get_drvdata(csdev->dev.parent);
+
+ if (tpdm_has_dsb_dataset(tpdm_data)) {
+ rc = fwnode_property_read_u32(dev_fwnode(csdev->dev.parent),
+ "qcom,dsb-element-bits", &drvdata->dsb_esize);
+ }
+ if (tpdm_has_cmb_dataset(tpdm_data)) {
+ rc = fwnode_property_read_u32(dev_fwnode(csdev->dev.parent),
+ "qcom,cmb-element-bits", &drvdata->cmb_esize);
+ }
- rc = fwnode_property_read_u8(dev_fwnode(csdev->dev.parent),
- "qcom,dsb-element-size", &size);
if (rc)
dev_warn_once(&csdev->dev,
- "Failed to read TPDM DSB Element size: %d\n", rc);
+ "Failed to read TPDM Element size: %d\n", rc);
- return size;
+ return rc;
}
/*
@@ -56,11 +92,12 @@ static int tpdm_read_dsb_element_size(struct coresight_device *csdev)
* Parameter "inport" is used to pass in the input port number
* of TPDA, and it is set to -1 in the recursize call.
*/
-static int tpda_get_element_size(struct coresight_device *csdev,
+static int tpda_get_element_size(struct tpda_drvdata *drvdata,
+ struct coresight_device *csdev,
int inport)
{
- int dsb_size = -ENOENT;
- int i, size;
+ int rc = 0;
+ int i;
struct coresight_device *in;
for (i = 0; i < csdev->pdata->nr_inconns; i++) {
@@ -69,30 +106,26 @@ static int tpda_get_element_size(struct coresight_device *csdev,
continue;
/* Ignore the paths that do not match port */
- if (inport > 0 &&
+ if (inport >= 0 &&
csdev->pdata->in_conns[i]->dest_port != inport)
continue;
if (coresight_device_is_tpdm(in)) {
- size = tpdm_read_dsb_element_size(in);
+ if (drvdata->dsb_esize || drvdata->cmb_esize)
+ return -EEXIST;
+ rc = tpdm_read_element_size(drvdata, in);
+ if (rc)
+ return rc;
} else {
/* Recurse down the path */
- size = tpda_get_element_size(in, -1);
- }
-
- if (size < 0)
- return size;
-
- if (dsb_size < 0) {
- /* Found a size, save it. */
- dsb_size = size;
- } else {
- /* Found duplicate TPDMs */
- return -EEXIST;
+ rc = tpda_get_element_size(drvdata, in, -1);
+ if (rc)
+ return rc;
}
}
- return dsb_size;
+
+ return rc;
}
/* Settings pre enabling port control register */
@@ -109,37 +142,24 @@ static void tpda_enable_pre_port(struct tpda_drvdata *drvdata)
static int tpda_enable_port(struct tpda_drvdata *drvdata, int port)
{
u32 val;
- int size;
+ int rc;
val = readl_relaxed(drvdata->base + TPDA_Pn_CR(port));
- /*
- * Configure aggregator port n DSB data set element size
- * Set the bit to 0 if the size is 32
- * Set the bit to 1 if the size is 64
- */
- size = tpda_get_element_size(drvdata->csdev, port);
- switch (size) {
- case 32:
- val &= ~TPDA_Pn_CR_DSBSIZE;
- break;
- case 64:
- val |= TPDA_Pn_CR_DSBSIZE;
- break;
- case 0:
- return -EEXIST;
- case -EEXIST:
+ tpda_clear_element_size(drvdata->csdev);
+ rc = tpda_get_element_size(drvdata, drvdata->csdev, port);
+ if (!rc && (drvdata->dsb_esize || drvdata->cmb_esize)) {
+ tpda_set_element_size(drvdata, &val);
+ /* Enable the port */
+ val |= TPDA_Pn_CR_ENA;
+ writel_relaxed(val, drvdata->base + TPDA_Pn_CR(port));
+ } else if (rc == -EEXIST)
dev_warn_once(&drvdata->csdev->dev,
- "Detected multiple TPDMs on port %d", -EEXIST);
- return -EEXIST;
- default:
- return -EINVAL;
- }
-
- /* Enable the port */
- val |= TPDA_Pn_CR_ENA;
- writel_relaxed(val, drvdata->base + TPDA_Pn_CR(port));
+ "Detected multiple TPDMs on port %d", port);
+ else
+ dev_warn_once(&drvdata->csdev->dev,
+ "Didn't find TPDM element size");
- return 0;
+ return rc;
}
static int __tpda_enable(struct tpda_drvdata *drvdata, int port)
@@ -148,7 +168,12 @@ static int __tpda_enable(struct tpda_drvdata *drvdata, int port)
CS_UNLOCK(drvdata->base);
- if (!drvdata->csdev->enable)
+ /*
+ * Only do pre-port enable for first port that calls enable when the
+ * device's main refcount is still 0
+ */
+ lockdep_assert_held(&drvdata->spinlock);
+ if (!drvdata->csdev->refcnt)
tpda_enable_pre_port(drvdata);
ret = tpda_enable_port(drvdata, port);
@@ -169,6 +194,7 @@ static int tpda_enable(struct coresight_device *csdev,
ret = __tpda_enable(drvdata, in->dest_port);
if (!ret) {
atomic_inc(&in->dest_refcnt);
+ csdev->refcnt++;
dev_dbg(drvdata->dev, "TPDA inport %d enabled.\n", in->dest_port);
}
}
@@ -197,9 +223,10 @@ static void tpda_disable(struct coresight_device *csdev,
struct tpda_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
spin_lock(&drvdata->spinlock);
- if (atomic_dec_return(&in->dest_refcnt) == 0)
+ if (atomic_dec_return(&in->dest_refcnt) == 0) {
__tpda_disable(drvdata, in->dest_port);
-
+ csdev->refcnt--;
+ }
spin_unlock(&drvdata->spinlock);
dev_dbg(drvdata->dev, "TPDA inport %d disabled\n", in->dest_port);
@@ -300,7 +327,7 @@ static struct amba_id tpda_ids[] = {
.id = 0x000f0f00,
.mask = 0x000fff00,
},
- { 0, 0},
+ { 0, 0, NULL },
};
static struct amba_driver tpda_driver = {
diff --git a/drivers/hwtracing/coresight/coresight-tpda.h b/drivers/hwtracing/coresight/coresight-tpda.h
index b3b38fd41b64b..c6af3d2da3efe 100644
--- a/drivers/hwtracing/coresight/coresight-tpda.h
+++ b/drivers/hwtracing/coresight/coresight-tpda.h
@@ -10,6 +10,8 @@
#define TPDA_Pn_CR(n) (0x004 + (n * 4))
/* Aggregator port enable bit */
#define TPDA_Pn_CR_ENA BIT(0)
+/* Aggregator port CMB data set element size bit */
+#define TPDA_Pn_CR_CMBSIZE GENMASK(7, 6)
/* Aggregator port DSB data set element size bit */
#define TPDA_Pn_CR_DSBSIZE BIT(8)
@@ -25,6 +27,8 @@
* @csdev: component vitals needed by the framework.
* @spinlock: lock for the drvdata value.
* @enable: enable status of the component.
+ * @dsb_esize Record the DSB element size.
+ * @cmb_esize Record the CMB element size.
*/
struct tpda_drvdata {
void __iomem *base;
@@ -32,6 +36,8 @@ struct tpda_drvdata {
struct coresight_device *csdev;
spinlock_t spinlock;
u8 atid;
+ u32 dsb_esize;
+ u32 cmb_esize;
};
#endif /* _CORESIGHT_CORESIGHT_TPDA_H */
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c
index 97654aa4b772a..a9708ab0d4886 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.c
+++ b/drivers/hwtracing/coresight/coresight-tpdm.c
@@ -66,6 +66,31 @@ static ssize_t tpdm_simple_dataset_show(struct device *dev,
return -EINVAL;
return sysfs_emit(buf, "0x%x\n",
drvdata->dsb->msr[tpdm_attr->idx]);
+ case CMB_TRIG_PATT:
+ if (tpdm_attr->idx >= TPDM_CMB_MAX_PATT)
+ return -EINVAL;
+ return sysfs_emit(buf, "0x%x\n",
+ drvdata->cmb->trig_patt[tpdm_attr->idx]);
+ case CMB_TRIG_PATT_MASK:
+ if (tpdm_attr->idx >= TPDM_CMB_MAX_PATT)
+ return -EINVAL;
+ return sysfs_emit(buf, "0x%x\n",
+ drvdata->cmb->trig_patt_mask[tpdm_attr->idx]);
+ case CMB_PATT:
+ if (tpdm_attr->idx >= TPDM_CMB_MAX_PATT)
+ return -EINVAL;
+ return sysfs_emit(buf, "0x%x\n",
+ drvdata->cmb->patt_val[tpdm_attr->idx]);
+ case CMB_PATT_MASK:
+ if (tpdm_attr->idx >= TPDM_CMB_MAX_PATT)
+ return -EINVAL;
+ return sysfs_emit(buf, "0x%x\n",
+ drvdata->cmb->patt_mask[tpdm_attr->idx]);
+ case CMB_MSR:
+ if (tpdm_attr->idx >= drvdata->cmb_msr_num)
+ return -EINVAL;
+ return sysfs_emit(buf, "0x%x\n",
+ drvdata->cmb->msr[tpdm_attr->idx]);
}
return -EINVAL;
}
@@ -77,67 +102,103 @@ static ssize_t tpdm_simple_dataset_store(struct device *dev,
size_t size)
{
unsigned long val;
- ssize_t ret = size;
+ ssize_t ret = -EINVAL;
struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct tpdm_dataset_attribute *tpdm_attr =
container_of(attr, struct tpdm_dataset_attribute, attr);
if (kstrtoul(buf, 0, &val))
- return -EINVAL;
+ return ret;
- spin_lock(&drvdata->spinlock);
+ guard(spinlock)(&drvdata->spinlock);
switch (tpdm_attr->mem) {
case DSB_TRIG_PATT:
- if (tpdm_attr->idx < TPDM_DSB_MAX_PATT)
+ if (tpdm_attr->idx < TPDM_DSB_MAX_PATT) {
drvdata->dsb->trig_patt[tpdm_attr->idx] = val;
- else
- ret = -EINVAL;
+ ret = size;
+ }
break;
case DSB_TRIG_PATT_MASK:
- if (tpdm_attr->idx < TPDM_DSB_MAX_PATT)
+ if (tpdm_attr->idx < TPDM_DSB_MAX_PATT) {
drvdata->dsb->trig_patt_mask[tpdm_attr->idx] = val;
- else
- ret = -EINVAL;
+ ret = size;
+ }
break;
case DSB_PATT:
- if (tpdm_attr->idx < TPDM_DSB_MAX_PATT)
+ if (tpdm_attr->idx < TPDM_DSB_MAX_PATT) {
drvdata->dsb->patt_val[tpdm_attr->idx] = val;
- else
- ret = -EINVAL;
+ ret = size;
+ }
break;
case DSB_PATT_MASK:
- if (tpdm_attr->idx < TPDM_DSB_MAX_PATT)
+ if (tpdm_attr->idx < TPDM_DSB_MAX_PATT) {
drvdata->dsb->patt_mask[tpdm_attr->idx] = val;
- else
- ret = -EINVAL;
+ ret = size;
+ }
break;
case DSB_MSR:
- if (tpdm_attr->idx < drvdata->dsb_msr_num)
+ if (tpdm_attr->idx < drvdata->dsb_msr_num) {
drvdata->dsb->msr[tpdm_attr->idx] = val;
- else
- ret = -EINVAL;
+ ret = size;
+ }
+ break;
+ case CMB_TRIG_PATT:
+ if (tpdm_attr->idx < TPDM_CMB_MAX_PATT) {
+ drvdata->cmb->trig_patt[tpdm_attr->idx] = val;
+ ret = size;
+ }
+ break;
+ case CMB_TRIG_PATT_MASK:
+ if (tpdm_attr->idx < TPDM_CMB_MAX_PATT) {
+ drvdata->cmb->trig_patt_mask[tpdm_attr->idx] = val;
+ ret = size;
+ }
+ break;
+ case CMB_PATT:
+ if (tpdm_attr->idx < TPDM_CMB_MAX_PATT) {
+ drvdata->cmb->patt_val[tpdm_attr->idx] = val;
+ ret = size;
+ }
+ break;
+ case CMB_PATT_MASK:
+ if (tpdm_attr->idx < TPDM_CMB_MAX_PATT) {
+ drvdata->cmb->patt_mask[tpdm_attr->idx] = val;
+ ret = size;
+ }
+ break;
+ case CMB_MSR:
+ if (tpdm_attr->idx < drvdata->cmb_msr_num) {
+ drvdata->cmb->msr[tpdm_attr->idx] = val;
+ ret = size;
+ }
break;
default:
- ret = -EINVAL;
+ break;
}
- spin_unlock(&drvdata->spinlock);
return ret;
}
-static bool tpdm_has_dsb_dataset(struct tpdm_drvdata *drvdata)
+static umode_t tpdm_dsb_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
{
- return (drvdata->datasets & TPDM_PIDR0_DS_DSB);
+ struct device *dev = kobj_to_dev(kobj);
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (drvdata && tpdm_has_dsb_dataset(drvdata))
+ return attr->mode;
+
+ return 0;
}
-static umode_t tpdm_dsb_is_visible(struct kobject *kobj,
+static umode_t tpdm_cmb_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
- if (drvdata && tpdm_has_dsb_dataset(drvdata))
+ if (drvdata && tpdm_has_cmb_dataset(drvdata))
return attr->mode;
return 0;
@@ -159,6 +220,23 @@ static umode_t tpdm_dsb_msr_is_visible(struct kobject *kobj,
return 0;
}
+static umode_t tpdm_cmb_msr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ struct device_attribute *dev_attr =
+ container_of(attr, struct device_attribute, attr);
+ struct tpdm_dataset_attribute *tpdm_attr =
+ container_of(dev_attr, struct tpdm_dataset_attribute, attr);
+
+ if (tpdm_attr->idx < drvdata->cmb_msr_num)
+ return attr->mode;
+
+ return 0;
+}
+
static void tpdm_reset_datasets(struct tpdm_drvdata *drvdata)
{
if (tpdm_has_dsb_dataset(drvdata)) {
@@ -167,6 +245,9 @@ static void tpdm_reset_datasets(struct tpdm_drvdata *drvdata)
drvdata->dsb->trig_ts = true;
drvdata->dsb->trig_type = false;
}
+
+ if (drvdata->cmb)
+ memset(drvdata->cmb, 0, sizeof(struct cmb_dataset));
}
static void set_dsb_mode(struct tpdm_drvdata *drvdata, u32 *val)
@@ -233,25 +314,27 @@ static void tpdm_enable_dsb(struct tpdm_drvdata *drvdata)
{
u32 val, i;
+ if (!tpdm_has_dsb_dataset(drvdata))
+ return;
+
for (i = 0; i < TPDM_DSB_MAX_EDCR; i++)
writel_relaxed(drvdata->dsb->edge_ctrl[i],
- drvdata->base + TPDM_DSB_EDCR(i));
+ drvdata->base + TPDM_DSB_EDCR(i));
for (i = 0; i < TPDM_DSB_MAX_EDCMR; i++)
writel_relaxed(drvdata->dsb->edge_ctrl_mask[i],
- drvdata->base + TPDM_DSB_EDCMR(i));
+ drvdata->base + TPDM_DSB_EDCMR(i));
for (i = 0; i < TPDM_DSB_MAX_PATT; i++) {
writel_relaxed(drvdata->dsb->patt_val[i],
- drvdata->base + TPDM_DSB_TPR(i));
+ drvdata->base + TPDM_DSB_TPR(i));
writel_relaxed(drvdata->dsb->patt_mask[i],
- drvdata->base + TPDM_DSB_TPMR(i));
+ drvdata->base + TPDM_DSB_TPMR(i));
writel_relaxed(drvdata->dsb->trig_patt[i],
- drvdata->base + TPDM_DSB_XPR(i));
+ drvdata->base + TPDM_DSB_XPR(i));
writel_relaxed(drvdata->dsb->trig_patt_mask[i],
- drvdata->base + TPDM_DSB_XPMR(i));
+ drvdata->base + TPDM_DSB_XPMR(i));
}
set_dsb_tier(drvdata);
-
set_dsb_msr(drvdata);
val = readl_relaxed(drvdata->base + TPDM_DSB_CR);
@@ -267,6 +350,76 @@ static void tpdm_enable_dsb(struct tpdm_drvdata *drvdata)
writel_relaxed(val, drvdata->base + TPDM_DSB_CR);
}
+static void set_cmb_tier(struct tpdm_drvdata *drvdata)
+{
+ u32 val;
+
+ val = readl_relaxed(drvdata->base + TPDM_CMB_TIER);
+
+ /* Clear all relevant fields */
+ val &= ~(TPDM_CMB_TIER_PATT_TSENAB | TPDM_CMB_TIER_TS_ALL |
+ TPDM_CMB_TIER_XTRIG_TSENAB);
+
+ /* Set pattern timestamp type and enablement */
+ if (drvdata->cmb->patt_ts)
+ val |= TPDM_CMB_TIER_PATT_TSENAB;
+
+ /* Set trigger timestamp */
+ if (drvdata->cmb->trig_ts)
+ val |= TPDM_CMB_TIER_XTRIG_TSENAB;
+
+ /* Set all timestamp enablement*/
+ if (drvdata->cmb->ts_all)
+ val |= TPDM_CMB_TIER_TS_ALL;
+
+ writel_relaxed(val, drvdata->base + TPDM_CMB_TIER);
+}
+
+static void set_cmb_msr(struct tpdm_drvdata *drvdata)
+{
+ int i;
+
+ for (i = 0; i < drvdata->cmb_msr_num; i++)
+ writel_relaxed(drvdata->cmb->msr[i],
+ drvdata->base + TPDM_CMB_MSR(i));
+}
+
+static void tpdm_enable_cmb(struct tpdm_drvdata *drvdata)
+{
+ u32 val, i;
+
+ if (!tpdm_has_cmb_dataset(drvdata))
+ return;
+
+ /* Configure pattern registers */
+ for (i = 0; i < TPDM_CMB_MAX_PATT; i++) {
+ writel_relaxed(drvdata->cmb->patt_val[i],
+ drvdata->base + TPDM_CMB_TPR(i));
+ writel_relaxed(drvdata->cmb->patt_mask[i],
+ drvdata->base + TPDM_CMB_TPMR(i));
+ writel_relaxed(drvdata->cmb->trig_patt[i],
+ drvdata->base + TPDM_CMB_XPR(i));
+ writel_relaxed(drvdata->cmb->trig_patt_mask[i],
+ drvdata->base + TPDM_CMB_XPMR(i));
+ }
+
+ set_cmb_tier(drvdata);
+ set_cmb_msr(drvdata);
+
+ val = readl_relaxed(drvdata->base + TPDM_CMB_CR);
+ /*
+ * Set to 0 for continuous CMB collection mode,
+ * 1 for trace-on-change CMB collection mode.
+ */
+ if (drvdata->cmb->trace_mode)
+ val |= TPDM_CMB_CR_MODE;
+ else
+ val &= ~TPDM_CMB_CR_MODE;
+ /* Set the enable bit of CMB control register to 1 */
+ val |= TPDM_CMB_CR_ENA;
+ writel_relaxed(val, drvdata->base + TPDM_CMB_CR);
+}
+
/*
* TPDM enable operations
* The TPDM or Monitor serves as data collection component for various
@@ -279,8 +432,8 @@ static void __tpdm_enable(struct tpdm_drvdata *drvdata)
{
CS_UNLOCK(drvdata->base);
- if (tpdm_has_dsb_dataset(drvdata))
- tpdm_enable_dsb(drvdata);
+ tpdm_enable_dsb(drvdata);
+ tpdm_enable_cmb(drvdata);
CS_LOCK(drvdata->base);
}
@@ -308,19 +461,35 @@ static void tpdm_disable_dsb(struct tpdm_drvdata *drvdata)
{
u32 val;
+ if (!tpdm_has_dsb_dataset(drvdata))
+ return;
+
/* Set the enable bit of DSB control register to 0 */
val = readl_relaxed(drvdata->base + TPDM_DSB_CR);
val &= ~TPDM_DSB_CR_ENA;
writel_relaxed(val, drvdata->base + TPDM_DSB_CR);
}
+static void tpdm_disable_cmb(struct tpdm_drvdata *drvdata)
+{
+ u32 val;
+
+ if (!tpdm_has_cmb_dataset(drvdata))
+ return;
+
+ val = readl_relaxed(drvdata->base + TPDM_CMB_CR);
+ /* Set the enable bit of CMB control register to 0 */
+ val &= ~TPDM_CMB_CR_ENA;
+ writel_relaxed(val, drvdata->base + TPDM_CMB_CR);
+}
+
/* TPDM disable operations */
static void __tpdm_disable(struct tpdm_drvdata *drvdata)
{
CS_UNLOCK(drvdata->base);
- if (tpdm_has_dsb_dataset(drvdata))
- tpdm_disable_dsb(drvdata);
+ tpdm_disable_dsb(drvdata);
+ tpdm_disable_cmb(drvdata);
CS_LOCK(drvdata->base);
}
@@ -366,6 +535,12 @@ static int tpdm_datasets_setup(struct tpdm_drvdata *drvdata)
if (!drvdata->dsb)
return -ENOMEM;
}
+ if (tpdm_has_cmb_dataset(drvdata) && (!drvdata->cmb)) {
+ drvdata->cmb = devm_kzalloc(drvdata->dev,
+ sizeof(*drvdata->cmb), GFP_KERNEL);
+ if (!drvdata->cmb)
+ return -ENOMEM;
+ }
tpdm_reset_datasets(drvdata);
return 0;
@@ -577,9 +752,18 @@ static ssize_t enable_ts_show(struct device *dev,
char *buf)
{
struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct tpdm_dataset_attribute *tpdm_attr =
+ container_of(attr, struct tpdm_dataset_attribute, attr);
+ ssize_t size = -EINVAL;
- return sysfs_emit(buf, "%u\n",
- (unsigned int)drvdata->dsb->patt_ts);
+ if (tpdm_attr->mem == DSB_PATT)
+ size = sysfs_emit(buf, "%u\n",
+ (unsigned int)drvdata->dsb->patt_ts);
+ else if (tpdm_attr->mem == CMB_PATT)
+ size = sysfs_emit(buf, "%u\n",
+ (unsigned int)drvdata->cmb->patt_ts);
+
+ return size;
}
/*
@@ -591,17 +775,23 @@ static ssize_t enable_ts_store(struct device *dev,
size_t size)
{
struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct tpdm_dataset_attribute *tpdm_attr =
+ container_of(attr, struct tpdm_dataset_attribute, attr);
unsigned long val;
if ((kstrtoul(buf, 0, &val)) || (val & ~1UL))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
- drvdata->dsb->patt_ts = !!val;
- spin_unlock(&drvdata->spinlock);
+ guard(spinlock)(&drvdata->spinlock);
+ if (tpdm_attr->mem == DSB_PATT)
+ drvdata->dsb->patt_ts = !!val;
+ else if (tpdm_attr->mem == CMB_PATT)
+ drvdata->cmb->patt_ts = !!val;
+ else
+ return -EINVAL;
+
return size;
}
-static DEVICE_ATTR_RW(enable_ts);
static ssize_t set_type_show(struct device *dev,
struct device_attribute *attr,
@@ -704,6 +894,96 @@ static ssize_t dsb_trig_ts_store(struct device *dev,
}
static DEVICE_ATTR_RW(dsb_trig_ts);
+static ssize_t cmb_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return sysfs_emit(buf, "%x\n", drvdata->cmb->trace_mode);
+
+}
+
+static ssize_t cmb_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long trace_mode;
+
+ if (kstrtoul(buf, 0, &trace_mode) || (trace_mode & ~1UL))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ drvdata->cmb->trace_mode = trace_mode;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(cmb_mode);
+
+static ssize_t cmb_ts_all_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return sysfs_emit(buf, "%u\n",
+ (unsigned int)drvdata->cmb->ts_all);
+}
+
+static ssize_t cmb_ts_all_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if ((kstrtoul(buf, 0, &val)) || (val & ~1UL))
+ return -EINVAL;
+
+ guard(spinlock)(&drvdata->spinlock);
+ if (val)
+ drvdata->cmb->ts_all = true;
+ else
+ drvdata->cmb->ts_all = false;
+
+ return size;
+}
+static DEVICE_ATTR_RW(cmb_ts_all);
+
+static ssize_t cmb_trig_ts_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return sysfs_emit(buf, "%u\n",
+ (unsigned int)drvdata->cmb->trig_ts);
+}
+
+static ssize_t cmb_trig_ts_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if ((kstrtoul(buf, 0, &val)) || (val & ~1UL))
+ return -EINVAL;
+
+ guard(spinlock)(&drvdata->spinlock);
+ if (val)
+ drvdata->cmb->trig_ts = true;
+ else
+ drvdata->cmb->trig_ts = false;
+
+ return size;
+}
+static DEVICE_ATTR_RW(cmb_trig_ts);
+
static struct attribute *tpdm_dsb_edge_attrs[] = {
&dev_attr_ctrl_idx.attr,
&dev_attr_ctrl_val.attr,
@@ -772,7 +1052,7 @@ static struct attribute *tpdm_dsb_patt_attrs[] = {
DSB_PATT_MASK_ATTR(5),
DSB_PATT_MASK_ATTR(6),
DSB_PATT_MASK_ATTR(7),
- &dev_attr_enable_ts.attr,
+ DSB_PATT_ENABLE_TS,
&dev_attr_set_type.attr,
NULL,
};
@@ -813,6 +1093,59 @@ static struct attribute *tpdm_dsb_msr_attrs[] = {
NULL,
};
+static struct attribute *tpdm_cmb_trig_patt_attrs[] = {
+ CMB_TRIG_PATT_ATTR(0),
+ CMB_TRIG_PATT_ATTR(1),
+ CMB_TRIG_PATT_MASK_ATTR(0),
+ CMB_TRIG_PATT_MASK_ATTR(1),
+ NULL,
+};
+
+static struct attribute *tpdm_cmb_patt_attrs[] = {
+ CMB_PATT_ATTR(0),
+ CMB_PATT_ATTR(1),
+ CMB_PATT_MASK_ATTR(0),
+ CMB_PATT_MASK_ATTR(1),
+ CMB_PATT_ENABLE_TS,
+ NULL,
+};
+
+static struct attribute *tpdm_cmb_msr_attrs[] = {
+ CMB_MSR_ATTR(0),
+ CMB_MSR_ATTR(1),
+ CMB_MSR_ATTR(2),
+ CMB_MSR_ATTR(3),
+ CMB_MSR_ATTR(4),
+ CMB_MSR_ATTR(5),
+ CMB_MSR_ATTR(6),
+ CMB_MSR_ATTR(7),
+ CMB_MSR_ATTR(8),
+ CMB_MSR_ATTR(9),
+ CMB_MSR_ATTR(10),
+ CMB_MSR_ATTR(11),
+ CMB_MSR_ATTR(12),
+ CMB_MSR_ATTR(13),
+ CMB_MSR_ATTR(14),
+ CMB_MSR_ATTR(15),
+ CMB_MSR_ATTR(16),
+ CMB_MSR_ATTR(17),
+ CMB_MSR_ATTR(18),
+ CMB_MSR_ATTR(19),
+ CMB_MSR_ATTR(20),
+ CMB_MSR_ATTR(21),
+ CMB_MSR_ATTR(22),
+ CMB_MSR_ATTR(23),
+ CMB_MSR_ATTR(24),
+ CMB_MSR_ATTR(25),
+ CMB_MSR_ATTR(26),
+ CMB_MSR_ATTR(27),
+ CMB_MSR_ATTR(28),
+ CMB_MSR_ATTR(29),
+ CMB_MSR_ATTR(30),
+ CMB_MSR_ATTR(31),
+ NULL,
+};
+
static struct attribute *tpdm_dsb_attrs[] = {
&dev_attr_dsb_mode.attr,
&dev_attr_dsb_trig_ts.attr,
@@ -820,6 +1153,13 @@ static struct attribute *tpdm_dsb_attrs[] = {
NULL,
};
+static struct attribute *tpdm_cmb_attrs[] = {
+ &dev_attr_cmb_mode.attr,
+ &dev_attr_cmb_ts_all.attr,
+ &dev_attr_cmb_trig_ts.attr,
+ NULL,
+};
+
static struct attribute_group tpdm_dsb_attr_grp = {
.attrs = tpdm_dsb_attrs,
.is_visible = tpdm_dsb_is_visible,
@@ -849,6 +1189,29 @@ static struct attribute_group tpdm_dsb_msr_grp = {
.name = "dsb_msr",
};
+static struct attribute_group tpdm_cmb_attr_grp = {
+ .attrs = tpdm_cmb_attrs,
+ .is_visible = tpdm_cmb_is_visible,
+};
+
+static struct attribute_group tpdm_cmb_trig_patt_grp = {
+ .attrs = tpdm_cmb_trig_patt_attrs,
+ .is_visible = tpdm_cmb_is_visible,
+ .name = "cmb_trig_patt",
+};
+
+static struct attribute_group tpdm_cmb_patt_grp = {
+ .attrs = tpdm_cmb_patt_attrs,
+ .is_visible = tpdm_cmb_is_visible,
+ .name = "cmb_patt",
+};
+
+static struct attribute_group tpdm_cmb_msr_grp = {
+ .attrs = tpdm_cmb_msr_attrs,
+ .is_visible = tpdm_cmb_msr_is_visible,
+ .name = "cmb_msr",
+};
+
static const struct attribute_group *tpdm_attr_grps[] = {
&tpdm_attr_grp,
&tpdm_dsb_attr_grp,
@@ -856,6 +1219,10 @@ static const struct attribute_group *tpdm_attr_grps[] = {
&tpdm_dsb_trig_patt_grp,
&tpdm_dsb_patt_grp,
&tpdm_dsb_msr_grp,
+ &tpdm_cmb_attr_grp,
+ &tpdm_cmb_trig_patt_grp,
+ &tpdm_cmb_patt_grp,
+ &tpdm_cmb_msr_grp,
NULL,
};
@@ -894,6 +1261,10 @@ static int tpdm_probe(struct amba_device *adev, const struct amba_id *id)
of_property_read_u32(drvdata->dev->of_node,
"qcom,dsb-msrs-num", &drvdata->dsb_msr_num);
+ if (drvdata && tpdm_has_cmb_dataset(drvdata))
+ of_property_read_u32(drvdata->dev->of_node,
+ "qcom,cmb-msrs-num", &drvdata->cmb_msr_num);
+
/* Set up coresight component description */
desc.name = coresight_alloc_device_name(&tpdm_devs, dev);
if (!desc.name)
@@ -933,7 +1304,7 @@ static struct amba_id tpdm_ids[] = {
.id = 0x000f0e00,
.mask = 0x000fff00,
},
- { 0, 0},
+ { 0, 0, NULL },
};
static struct amba_driver tpdm_driver = {
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.h b/drivers/hwtracing/coresight/coresight-tpdm.h
index 4115b2a17b8d8..e08d212642e35 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.h
+++ b/drivers/hwtracing/coresight/coresight-tpdm.h
@@ -9,6 +9,38 @@
/* The max number of the datasets that TPDM supports */
#define TPDM_DATASETS 7
+/* CMB Subunit Registers */
+#define TPDM_CMB_CR (0xA00)
+/* CMB subunit timestamp insertion enable register */
+#define TPDM_CMB_TIER (0xA04)
+/* CMB subunit timestamp pattern registers */
+#define TPDM_CMB_TPR(n) (0xA08 + (n * 4))
+/* CMB subunit timestamp pattern mask registers */
+#define TPDM_CMB_TPMR(n) (0xA10 + (n * 4))
+/* CMB subunit trigger pattern registers */
+#define TPDM_CMB_XPR(n) (0xA18 + (n * 4))
+/* CMB subunit trigger pattern mask registers */
+#define TPDM_CMB_XPMR(n) (0xA20 + (n * 4))
+/* CMB MSR register */
+#define TPDM_CMB_MSR(n) (0xA80 + (n * 4))
+
+/* Enable bit for CMB subunit */
+#define TPDM_CMB_CR_ENA BIT(0)
+/* Trace collection mode for CMB subunit */
+#define TPDM_CMB_CR_MODE BIT(1)
+/* Timestamp control for pattern match */
+#define TPDM_CMB_TIER_PATT_TSENAB BIT(0)
+/* CMB CTI timestamp request */
+#define TPDM_CMB_TIER_XTRIG_TSENAB BIT(1)
+/* For timestamp fo all trace */
+#define TPDM_CMB_TIER_TS_ALL BIT(2)
+
+/* Patten register number */
+#define TPDM_CMB_MAX_PATT 2
+
+/* MAX number of DSB MSR */
+#define TPDM_CMB_MAX_MSR 32
+
/* DSB Subunit Registers */
#define TPDM_DSB_CR (0x780)
#define TPDM_DSB_TIER (0x784)
@@ -79,10 +111,12 @@
*
* PERIPHIDR0[0] : Fix to 1 if ImplDef subunit present, else 0
* PERIPHIDR0[1] : Fix to 1 if DSB subunit present, else 0
+ * PERIPHIDR0[2] : Fix to 1 if CMB subunit present, else 0
*/
#define TPDM_PIDR0_DS_IMPDEF BIT(0)
#define TPDM_PIDR0_DS_DSB BIT(1)
+#define TPDM_PIDR0_DS_CMB BIT(2)
#define TPDM_DSB_MAX_LINES 256
/* MAX number of EDCR registers */
@@ -113,6 +147,16 @@
} \
})[0].attr.attr)
+#define tpdm_patt_enable_ts(name, mem) \
+ (&((struct tpdm_dataset_attribute[]) { \
+ { \
+ __ATTR(name, 0644, enable_ts_show, \
+ enable_ts_store), \
+ mem, \
+ 0, \
+ } \
+ })[0].attr.attr)
+
#define DSB_EDGE_CTRL_ATTR(nr) \
tpdm_simple_dataset_ro(edcr##nr, \
DSB_EDGE_CTRL, nr)
@@ -137,10 +181,38 @@
tpdm_simple_dataset_rw(tpmr##nr, \
DSB_PATT_MASK, nr)
+#define DSB_PATT_ENABLE_TS \
+ tpdm_patt_enable_ts(enable_ts, \
+ DSB_PATT)
+
#define DSB_MSR_ATTR(nr) \
tpdm_simple_dataset_rw(msr##nr, \
DSB_MSR, nr)
+#define CMB_TRIG_PATT_ATTR(nr) \
+ tpdm_simple_dataset_rw(xpr##nr, \
+ CMB_TRIG_PATT, nr)
+
+#define CMB_TRIG_PATT_MASK_ATTR(nr) \
+ tpdm_simple_dataset_rw(xpmr##nr, \
+ CMB_TRIG_PATT_MASK, nr)
+
+#define CMB_PATT_ATTR(nr) \
+ tpdm_simple_dataset_rw(tpr##nr, \
+ CMB_PATT, nr)
+
+#define CMB_PATT_MASK_ATTR(nr) \
+ tpdm_simple_dataset_rw(tpmr##nr, \
+ CMB_PATT_MASK, nr)
+
+#define CMB_PATT_ENABLE_TS \
+ tpdm_patt_enable_ts(enable_ts, \
+ CMB_PATT)
+
+#define CMB_MSR_ATTR(nr) \
+ tpdm_simple_dataset_rw(msr##nr, \
+ CMB_MSR, nr)
+
/**
* struct dsb_dataset - specifics associated to dsb dataset
* @mode: DSB programming mode
@@ -174,6 +246,30 @@ struct dsb_dataset {
};
/**
+ * struct cmb_dataset
+ * @trace_mode: Dataset collection mode
+ * @patt_val: Save value for pattern
+ * @patt_mask: Save value for pattern mask
+ * @trig_patt: Save value for trigger pattern
+ * @trig_patt_mask: Save value for trigger pattern mask
+ * @msr Save value for MSR
+ * @patt_ts: Indicates if pattern match for timestamp is enabled.
+ * @trig_ts: Indicates if CTI trigger for timestamp is enabled.
+ * @ts_all: Indicates if timestamp is enabled for all packets.
+ */
+struct cmb_dataset {
+ u32 trace_mode;
+ u32 patt_val[TPDM_CMB_MAX_PATT];
+ u32 patt_mask[TPDM_CMB_MAX_PATT];
+ u32 trig_patt[TPDM_CMB_MAX_PATT];
+ u32 trig_patt_mask[TPDM_CMB_MAX_PATT];
+ u32 msr[TPDM_CMB_MAX_MSR];
+ bool patt_ts;
+ bool trig_ts;
+ bool ts_all;
+};
+
+/**
* struct tpdm_drvdata - specifics associated to an TPDM component
* @base: memory mapped base address for this component.
* @dev: The device entity associated to this component.
@@ -182,7 +278,9 @@ struct dsb_dataset {
* @enable: enable status of the component.
* @datasets: The datasets types present of the TPDM.
* @dsb Specifics associated to TPDM DSB.
+ * @cmb Specifics associated to TPDM CMB.
* @dsb_msr_num Number of MSR supported by DSB TPDM
+ * @cmb_msr_num Number of MSR supported by CMB TPDM
*/
struct tpdm_drvdata {
@@ -193,7 +291,9 @@ struct tpdm_drvdata {
bool enable;
unsigned long datasets;
struct dsb_dataset *dsb;
+ struct cmb_dataset *cmb;
u32 dsb_msr_num;
+ u32 cmb_msr_num;
};
/* Enumerate members of various datasets */
@@ -205,6 +305,11 @@ enum dataset_mem {
DSB_PATT,
DSB_PATT_MASK,
DSB_MSR,
+ CMB_TRIG_PATT,
+ CMB_TRIG_PATT_MASK,
+ CMB_PATT,
+ CMB_PATT_MASK,
+ CMB_MSR
};
/**
@@ -220,4 +325,13 @@ struct tpdm_dataset_attribute {
u32 idx;
};
+static bool tpdm_has_dsb_dataset(struct tpdm_drvdata *drvdata)
+{
+ return (drvdata->datasets & TPDM_PIDR0_DS_DSB);
+}
+
+static bool tpdm_has_cmb_dataset(struct tpdm_drvdata *drvdata)
+{
+ return (drvdata->datasets & TPDM_PIDR0_DS_CMB);
+}
#endif /* _CORESIGHT_CORESIGHT_TPDM_H */
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 59eac93fd6bb9..29024f880fda7 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -58,6 +58,7 @@ struct tpiu_drvdata {
void __iomem *base;
struct clk *atclk;
struct coresight_device *csdev;
+ spinlock_t spinlock;
};
static void tpiu_enable_hw(struct csdev_access *csa)
@@ -72,8 +73,11 @@ static void tpiu_enable_hw(struct csdev_access *csa)
static int tpiu_enable(struct coresight_device *csdev, enum cs_mode mode,
void *__unused)
{
+ struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ guard(spinlock)(&drvdata->spinlock);
tpiu_enable_hw(&csdev->access);
- atomic_inc(&csdev->refcnt);
+ csdev->refcnt++;
dev_dbg(&csdev->dev, "TPIU enabled\n");
return 0;
}
@@ -96,7 +100,11 @@ static void tpiu_disable_hw(struct csdev_access *csa)
static int tpiu_disable(struct coresight_device *csdev)
{
- if (atomic_dec_return(&csdev->refcnt))
+ struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ guard(spinlock)(&drvdata->spinlock);
+ csdev->refcnt--;
+ if (csdev->refcnt)
return -EBUSY;
tpiu_disable_hw(&csdev->access);
@@ -132,6 +140,8 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
if (!drvdata)
return -ENOMEM;
+ spin_lock_init(&drvdata->spinlock);
+
drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
if (!IS_ERR(drvdata->atclk)) {
ret = clk_prepare_enable(drvdata->atclk);
@@ -218,7 +228,7 @@ static const struct amba_id tpiu_ids[] = {
.id = 0x000bb9e7,
.mask = 0x000fffff,
},
- { 0, 0},
+ { 0, 0, NULL },
};
MODULE_DEVICE_TABLE(amba, tpiu_ids);
diff --git a/drivers/hwtracing/coresight/ultrasoc-smb.c b/drivers/hwtracing/coresight/ultrasoc-smb.c
index 10e886455b8b7..f9ebf20c91e63 100644
--- a/drivers/hwtracing/coresight/ultrasoc-smb.c
+++ b/drivers/hwtracing/coresight/ultrasoc-smb.c
@@ -103,7 +103,7 @@ static int smb_open(struct inode *inode, struct file *file)
if (drvdata->reading)
return -EBUSY;
- if (atomic_read(&drvdata->csdev->refcnt))
+ if (drvdata->csdev->refcnt)
return -EBUSY;
smb_update_data_size(drvdata);
@@ -207,11 +207,11 @@ static void smb_enable_sysfs(struct coresight_device *csdev)
{
struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
- if (drvdata->mode != CS_MODE_DISABLED)
+ if (coresight_get_mode(csdev) != CS_MODE_DISABLED)
return;
smb_enable_hw(drvdata);
- drvdata->mode = CS_MODE_SYSFS;
+ coresight_set_mode(csdev, CS_MODE_SYSFS);
}
static int smb_enable_perf(struct coresight_device *csdev, void *data)
@@ -234,7 +234,7 @@ static int smb_enable_perf(struct coresight_device *csdev, void *data)
if (drvdata->pid == -1) {
smb_enable_hw(drvdata);
drvdata->pid = pid;
- drvdata->mode = CS_MODE_PERF;
+ coresight_set_mode(csdev, CS_MODE_PERF);
}
return 0;
@@ -253,7 +253,8 @@ static int smb_enable(struct coresight_device *csdev, enum cs_mode mode,
return -EBUSY;
/* Do nothing, the SMB is already enabled as other mode */
- if (drvdata->mode != CS_MODE_DISABLED && drvdata->mode != mode)
+ if (coresight_get_mode(csdev) != CS_MODE_DISABLED &&
+ coresight_get_mode(csdev) != mode)
return -EBUSY;
switch (mode) {
@@ -270,7 +271,7 @@ static int smb_enable(struct coresight_device *csdev, enum cs_mode mode,
if (ret)
return ret;
- atomic_inc(&csdev->refcnt);
+ csdev->refcnt++;
dev_dbg(&csdev->dev, "Ultrasoc SMB enabled\n");
return ret;
@@ -285,17 +286,18 @@ static int smb_disable(struct coresight_device *csdev)
if (drvdata->reading)
return -EBUSY;
- if (atomic_dec_return(&csdev->refcnt))
+ csdev->refcnt--;
+ if (csdev->refcnt)
return -EBUSY;
/* Complain if we (somehow) got out of sync */
- WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
+ WARN_ON_ONCE(coresight_get_mode(csdev) == CS_MODE_DISABLED);
smb_disable_hw(drvdata);
/* Dissociate from the target process. */
drvdata->pid = -1;
- drvdata->mode = CS_MODE_DISABLED;
+ coresight_set_mode(csdev, CS_MODE_DISABLED);
dev_dbg(&csdev->dev, "Ultrasoc SMB disabled\n");
return 0;
@@ -380,7 +382,7 @@ static unsigned long smb_update_buffer(struct coresight_device *csdev,
guard(spinlock)(&drvdata->spinlock);
/* Don't do anything if another tracer is using this sink. */
- if (atomic_read(&csdev->refcnt) != 1)
+ if (csdev->refcnt != 1)
return 0;
smb_disable_hw(drvdata);
@@ -586,7 +588,7 @@ static void smb_remove(struct platform_device *pdev)
#ifdef CONFIG_ACPI
static const struct acpi_device_id ultrasoc_smb_acpi_match[] = {
- {"HISI03A1", 0},
+ {"HISI03A1", 0, 0, 0},
{}
};
MODULE_DEVICE_TABLE(acpi, ultrasoc_smb_acpi_match);
diff --git a/drivers/hwtracing/coresight/ultrasoc-smb.h b/drivers/hwtracing/coresight/ultrasoc-smb.h
index 82a44c14a8829..a91d39cfccb8f 100644
--- a/drivers/hwtracing/coresight/ultrasoc-smb.h
+++ b/drivers/hwtracing/coresight/ultrasoc-smb.h
@@ -109,7 +109,6 @@ struct smb_data_buffer {
* @reading: Synchronise user space access to SMB buffer.
* @pid: Process ID of the process being monitored by the
* session that is using this component.
- * @mode: How this SMB is being used, perf mode or sysfs mode.
*/
struct smb_drv_data {
void __iomem *base;
@@ -119,7 +118,6 @@ struct smb_drv_data {
spinlock_t spinlock;
bool reading;
pid_t pid;
- enum cs_mode mode;
};
#endif
diff --git a/drivers/hwtracing/ptt/hisi_ptt.c b/drivers/hwtracing/ptt/hisi_ptt.c
index c1b5fd2b89741..4bf04a9778407 100644
--- a/drivers/hwtracing/ptt/hisi_ptt.c
+++ b/drivers/hwtracing/ptt/hisi_ptt.c
@@ -998,6 +998,9 @@ static int hisi_ptt_pmu_event_init(struct perf_event *event)
int ret;
u32 val;
+ if (event->attr.type != hisi_ptt->hisi_ptt_pmu.type)
+ return -ENOENT;
+
if (event->cpu < 0) {
dev_dbg(event->pmu->dev, "Per-task mode not supported\n");
return -EOPNOTSUPP;
@@ -1006,9 +1009,6 @@ static int hisi_ptt_pmu_event_init(struct perf_event *event)
if (event->attach_state & PERF_ATTACH_TASK)
return -EOPNOTSUPP;
- if (event->attr.type != hisi_ptt->hisi_ptt_pmu.type)
- return -ENOENT;
-
ret = hisi_ptt_trace_valid_filter(hisi_ptt, event->attr.config);
if (ret < 0)
return ret;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 28eb48dd5b326..97989c914260f 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -1235,7 +1235,7 @@ config I2C_RCAR
depends on ARCH_RENESAS || COMPILE_TEST
select I2C_SLAVE
select I2C_SMBUS
- select RESET_CONTROLLER if ARCH_RCAR_GEN3
+ select RESET_CONTROLLER if ARCH_RCAR_GEN3 || ARCH_RCAR_GEN4
help
If you say yes to this option, support will be included for the
R-Car I2C controller.
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index de3f58b60dce5..4bb7d6756947c 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -1176,6 +1176,18 @@ static int __maybe_unused cdns_i2c_runtime_suspend(struct device *dev)
return 0;
}
+static int __maybe_unused cdns_i2c_suspend(struct device *dev)
+{
+ struct cdns_i2c *xi2c = dev_get_drvdata(dev);
+
+ i2c_mark_adapter_suspended(&xi2c->adap);
+
+ if (!pm_runtime_status_suspended(dev))
+ return cdns_i2c_runtime_suspend(dev);
+
+ return 0;
+}
+
/**
* cdns_i2c_init - Controller initialisation
* @id: Device private data structure
@@ -1219,7 +1231,28 @@ static int __maybe_unused cdns_i2c_runtime_resume(struct device *dev)
return 0;
}
+static int __maybe_unused cdns_i2c_resume(struct device *dev)
+{
+ struct cdns_i2c *xi2c = dev_get_drvdata(dev);
+ int err;
+
+ err = cdns_i2c_runtime_resume(dev);
+ if (err)
+ return err;
+
+ if (pm_runtime_status_suspended(dev)) {
+ err = cdns_i2c_runtime_suspend(dev);
+ if (err)
+ return err;
+ }
+
+ i2c_mark_adapter_resumed(&xi2c->adap);
+
+ return 0;
+}
+
static const struct dev_pm_ops cdns_i2c_dev_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_i2c_suspend, cdns_i2c_resume)
SET_RUNTIME_PM_OPS(cdns_i2c_runtime_suspend,
cdns_i2c_runtime_resume, NULL)
};
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index 35f762872b8a5..e8a688d04aee0 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -648,7 +648,7 @@ void i2c_dw_disable(struct dw_i2c_dev *dev)
__i2c_dw_disable(dev);
/* Disable all interrupts */
- regmap_write(dev->map, DW_IC_INTR_MASK, 0);
+ __i2c_dw_write_intr_mask(dev, 0);
regmap_read(dev->map, DW_IC_CLR_INTR, &dummy);
i2c_dw_release_lock(dev);
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index a7f6f3eafad7d..e9606c00b8d10 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -212,6 +212,7 @@ struct reset_control;
* @msg_err: error status of the current transfer
* @status: i2c master status, one of STATUS_*
* @abort_source: copy of the TX_ABRT_SOURCE register
+ * @sw_mask: SW mask of DW_IC_INTR_MASK used in polling mode
* @irq: interrupt number for the i2c master
* @flags: platform specific flags like type of IO accessors or model
* @adapter: i2c subsystem adapter node
@@ -270,6 +271,7 @@ struct dw_i2c_dev {
int msg_err;
unsigned int status;
unsigned int abort_source;
+ unsigned int sw_mask;
int irq;
u32 flags;
struct i2c_adapter adapter;
@@ -303,6 +305,7 @@ struct dw_i2c_dev {
#define ACCESS_INTR_MASK BIT(0)
#define ACCESS_NO_IRQ_SUSPEND BIT(1)
#define ARBITRATION_SEMAPHORE BIT(2)
+#define ACCESS_POLLING BIT(3)
#define MODEL_MSCC_OCELOT BIT(8)
#define MODEL_BAIKAL_BT1 BIT(9)
@@ -318,7 +321,7 @@ struct dw_i2c_dev {
#define AMD_UCSI_INTR_EN 0xd
#define TXGBE_TX_FIFO_DEPTH 4
-#define TXGBE_RX_FIFO_DEPTH 0
+#define TXGBE_RX_FIFO_DEPTH 1
struct i2c_dw_semaphore_callbacks {
int (*probe)(struct dw_i2c_dev *dev);
@@ -351,6 +354,24 @@ static inline void __i2c_dw_disable_nowait(struct dw_i2c_dev *dev)
dev->status &= ~STATUS_ACTIVE;
}
+static inline void __i2c_dw_write_intr_mask(struct dw_i2c_dev *dev,
+ unsigned int intr_mask)
+{
+ unsigned int val = dev->flags & ACCESS_POLLING ? 0 : intr_mask;
+
+ regmap_write(dev->map, DW_IC_INTR_MASK, val);
+ dev->sw_mask = intr_mask;
+}
+
+static inline void __i2c_dw_read_intr_mask(struct dw_i2c_dev *dev,
+ unsigned int *intr_mask)
+{
+ if (!(dev->flags & ACCESS_POLLING))
+ regmap_read(dev->map, DW_IC_INTR_MASK, intr_mask);
+ else
+ *intr_mask = dev->sw_mask;
+}
+
void __i2c_dw_disable(struct dw_i2c_dev *dev);
extern void i2c_dw_configure_master(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index 85dbd0eb5392c..c7e56002809ac 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -240,7 +240,7 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
msgs[dev->msg_write_idx].addr | ic_tar);
/* Enforce disabled interrupts (due to HW issues) */
- regmap_write(dev->map, DW_IC_INTR_MASK, 0);
+ __i2c_dw_write_intr_mask(dev, 0);
/* Enable the adapter */
__i2c_dw_enable(dev);
@@ -250,7 +250,7 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
/* Clear and enable interrupts */
regmap_read(dev->map, DW_IC_CLR_INTR, &dummy);
- regmap_write(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_MASTER_MASK);
+ __i2c_dw_write_intr_mask(dev, DW_IC_INTR_MASTER_MASK);
}
static int i2c_dw_check_stopbit(struct dw_i2c_dev *dev)
@@ -300,7 +300,6 @@ static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs,
dev->msgs = msgs;
dev->msgs_num = num_msgs;
i2c_dw_xfer_init(dev);
- regmap_write(dev->map, DW_IC_INTR_MASK, 0);
/* Initiate messages read/write transaction */
for (msg_wrt_idx = 0; msg_wrt_idx < num_msgs; msg_wrt_idx++) {
@@ -355,68 +354,6 @@ static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs,
return 0;
}
-static int i2c_dw_poll_tx_empty(struct dw_i2c_dev *dev)
-{
- u32 val;
-
- return regmap_read_poll_timeout(dev->map, DW_IC_RAW_INTR_STAT, val,
- val & DW_IC_INTR_TX_EMPTY,
- 100, 1000);
-}
-
-static int i2c_dw_poll_rx_full(struct dw_i2c_dev *dev)
-{
- u32 val;
-
- return regmap_read_poll_timeout(dev->map, DW_IC_RAW_INTR_STAT, val,
- val & DW_IC_INTR_RX_FULL,
- 100, 1000);
-}
-
-static int txgbe_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs,
- int num_msgs)
-{
- struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
- int msg_idx, buf_len, data_idx, ret;
- unsigned int val, stop = 0;
- u8 *buf;
-
- dev->msgs = msgs;
- dev->msgs_num = num_msgs;
- i2c_dw_xfer_init(dev);
- regmap_write(dev->map, DW_IC_INTR_MASK, 0);
-
- for (msg_idx = 0; msg_idx < num_msgs; msg_idx++) {
- buf = msgs[msg_idx].buf;
- buf_len = msgs[msg_idx].len;
-
- for (data_idx = 0; data_idx < buf_len; data_idx++) {
- if (msg_idx == num_msgs - 1 && data_idx == buf_len - 1)
- stop |= BIT(9);
-
- if (msgs[msg_idx].flags & I2C_M_RD) {
- regmap_write(dev->map, DW_IC_DATA_CMD, 0x100 | stop);
-
- ret = i2c_dw_poll_rx_full(dev);
- if (ret)
- return ret;
-
- regmap_read(dev->map, DW_IC_DATA_CMD, &val);
- buf[data_idx] = val;
- } else {
- ret = i2c_dw_poll_tx_empty(dev);
- if (ret)
- return ret;
-
- regmap_write(dev->map, DW_IC_DATA_CMD,
- buf[data_idx] | stop);
- }
- }
- }
-
- return num_msgs;
-}
-
/*
* Initiate (and continue) low level master read/write transaction.
* This function is only called from i2c_dw_isr, and pumping i2c_msg
@@ -546,7 +483,7 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
if (dev->msg_err)
intr_mask = 0;
- regmap_write(dev->map, DW_IC_INTR_MASK, intr_mask);
+ __i2c_dw_write_intr_mask(dev, intr_mask);
}
static u8
@@ -554,6 +491,7 @@ i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len)
{
struct i2c_msg *msgs = dev->msgs;
u32 flags = msgs[dev->msg_read_idx].flags;
+ unsigned int intr_mask;
/*
* Adjust the buffer length and mask the flag
@@ -568,8 +506,9 @@ i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len)
* Received buffer length, re-enable TX_EMPTY interrupt
* to resume the SMBUS transaction.
*/
- regmap_update_bits(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_TX_EMPTY,
- DW_IC_INTR_TX_EMPTY);
+ __i2c_dw_read_intr_mask(dev, &intr_mask);
+ intr_mask |= DW_IC_INTR_TX_EMPTY;
+ __i2c_dw_write_intr_mask(dev, intr_mask);
return len;
}
@@ -633,6 +572,169 @@ i2c_dw_read(struct dw_i2c_dev *dev)
}
}
+static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
+{
+ unsigned int stat, dummy;
+
+ /*
+ * The IC_INTR_STAT register just indicates "enabled" interrupts.
+ * The unmasked raw version of interrupt status bits is available
+ * in the IC_RAW_INTR_STAT register.
+ *
+ * That is,
+ * stat = readl(IC_INTR_STAT);
+ * equals to,
+ * stat = readl(IC_RAW_INTR_STAT) & readl(IC_INTR_MASK);
+ *
+ * The raw version might be useful for debugging purposes.
+ */
+ if (!(dev->flags & ACCESS_POLLING)) {
+ regmap_read(dev->map, DW_IC_INTR_STAT, &stat);
+ } else {
+ regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat);
+ stat &= dev->sw_mask;
+ }
+
+ /*
+ * Do not use the IC_CLR_INTR register to clear interrupts, or
+ * you'll miss some interrupts, triggered during the period from
+ * readl(IC_INTR_STAT) to readl(IC_CLR_INTR).
+ *
+ * Instead, use the separately-prepared IC_CLR_* registers.
+ */
+ if (stat & DW_IC_INTR_RX_UNDER)
+ regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &dummy);
+ if (stat & DW_IC_INTR_RX_OVER)
+ regmap_read(dev->map, DW_IC_CLR_RX_OVER, &dummy);
+ if (stat & DW_IC_INTR_TX_OVER)
+ regmap_read(dev->map, DW_IC_CLR_TX_OVER, &dummy);
+ if (stat & DW_IC_INTR_RD_REQ)
+ regmap_read(dev->map, DW_IC_CLR_RD_REQ, &dummy);
+ if (stat & DW_IC_INTR_TX_ABRT) {
+ /*
+ * The IC_TX_ABRT_SOURCE register is cleared whenever
+ * the IC_CLR_TX_ABRT is read. Preserve it beforehand.
+ */
+ regmap_read(dev->map, DW_IC_TX_ABRT_SOURCE, &dev->abort_source);
+ regmap_read(dev->map, DW_IC_CLR_TX_ABRT, &dummy);
+ }
+ if (stat & DW_IC_INTR_RX_DONE)
+ regmap_read(dev->map, DW_IC_CLR_RX_DONE, &dummy);
+ if (stat & DW_IC_INTR_ACTIVITY)
+ regmap_read(dev->map, DW_IC_CLR_ACTIVITY, &dummy);
+ if ((stat & DW_IC_INTR_STOP_DET) &&
+ ((dev->rx_outstanding == 0) || (stat & DW_IC_INTR_RX_FULL)))
+ regmap_read(dev->map, DW_IC_CLR_STOP_DET, &dummy);
+ if (stat & DW_IC_INTR_START_DET)
+ regmap_read(dev->map, DW_IC_CLR_START_DET, &dummy);
+ if (stat & DW_IC_INTR_GEN_CALL)
+ regmap_read(dev->map, DW_IC_CLR_GEN_CALL, &dummy);
+
+ return stat;
+}
+
+static void i2c_dw_process_transfer(struct dw_i2c_dev *dev, unsigned int stat)
+{
+ if (stat & DW_IC_INTR_TX_ABRT) {
+ dev->cmd_err |= DW_IC_ERR_TX_ABRT;
+ dev->status &= ~STATUS_MASK;
+ dev->rx_outstanding = 0;
+
+ /*
+ * Anytime TX_ABRT is set, the contents of the tx/rx
+ * buffers are flushed. Make sure to skip them.
+ */
+ __i2c_dw_write_intr_mask(dev, 0);
+ goto tx_aborted;
+ }
+
+ if (stat & DW_IC_INTR_RX_FULL)
+ i2c_dw_read(dev);
+
+ if (stat & DW_IC_INTR_TX_EMPTY)
+ i2c_dw_xfer_msg(dev);
+
+ /*
+ * No need to modify or disable the interrupt mask here.
+ * i2c_dw_xfer_msg() will take care of it according to
+ * the current transmit status.
+ */
+
+tx_aborted:
+ if (((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) &&
+ (dev->rx_outstanding == 0))
+ complete(&dev->cmd_complete);
+ else if (unlikely(dev->flags & ACCESS_INTR_MASK)) {
+ /* Workaround to trigger pending interrupt */
+ __i2c_dw_read_intr_mask(dev, &stat);
+ __i2c_dw_write_intr_mask(dev, 0);
+ __i2c_dw_write_intr_mask(dev, stat);
+ }
+}
+
+/*
+ * Interrupt service routine. This gets called whenever an I2C master interrupt
+ * occurs.
+ */
+static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
+{
+ struct dw_i2c_dev *dev = dev_id;
+ unsigned int stat, enabled;
+
+ regmap_read(dev->map, DW_IC_ENABLE, &enabled);
+ regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat);
+ if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY))
+ return IRQ_NONE;
+ if (pm_runtime_suspended(dev->dev) || stat == GENMASK(31, 0))
+ return IRQ_NONE;
+ dev_dbg(dev->dev, "enabled=%#x stat=%#x\n", enabled, stat);
+
+ stat = i2c_dw_read_clear_intrbits(dev);
+
+ if (!(dev->status & STATUS_ACTIVE)) {
+ /*
+ * Unexpected interrupt in driver point of view. State
+ * variables are either unset or stale so acknowledge and
+ * disable interrupts for suppressing further interrupts if
+ * interrupt really came from this HW (E.g. firmware has left
+ * the HW active).
+ */
+ __i2c_dw_write_intr_mask(dev, 0);
+ return IRQ_HANDLED;
+ }
+
+ i2c_dw_process_transfer(dev, stat);
+
+ return IRQ_HANDLED;
+}
+
+static int i2c_dw_wait_transfer(struct dw_i2c_dev *dev)
+{
+ unsigned long timeout = dev->adapter.timeout;
+ unsigned int stat;
+ int ret;
+
+ if (!(dev->flags & ACCESS_POLLING)) {
+ ret = wait_for_completion_timeout(&dev->cmd_complete, timeout);
+ } else {
+ timeout += jiffies;
+ do {
+ ret = try_wait_for_completion(&dev->cmd_complete);
+ if (ret)
+ break;
+
+ stat = i2c_dw_read_clear_intrbits(dev);
+ if (stat)
+ i2c_dw_process_transfer(dev, stat);
+ else
+ /* Try save some power */
+ usleep_range(3, 25);
+ } while (time_before(jiffies, timeout));
+ }
+
+ return ret ? 0 : -ETIMEDOUT;
+}
+
/*
* Prepare controller for a transaction and call i2c_dw_xfer_msg.
*/
@@ -646,18 +748,10 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
pm_runtime_get_sync(dev->dev);
- /*
- * Initiate I2C message transfer when polling mode is enabled,
- * As it is polling based transfer mechanism, which does not support
- * interrupt based functionalities of existing DesignWare driver.
- */
switch (dev->flags & MODEL_MASK) {
case MODEL_AMD_NAVI_GPU:
ret = amd_i2c_dw_xfer_quirk(adap, msgs, num);
goto done_nolock;
- case MODEL_WANGXUN_SP:
- ret = txgbe_i2c_dw_xfer_quirk(adap, msgs, num);
- goto done_nolock;
default:
break;
}
@@ -685,12 +779,12 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
i2c_dw_xfer_init(dev);
/* Wait for tx to complete */
- if (!wait_for_completion_timeout(&dev->cmd_complete, adap->timeout)) {
+ ret = i2c_dw_wait_transfer(dev);
+ if (ret) {
dev_err(dev->dev, "controller timed out\n");
- /* i2c_dw_init implicitly disables the adapter */
+ /* i2c_dw_init_master() implicitly disables the adapter */
i2c_recover_bus(&dev->adapter);
i2c_dw_init_master(dev);
- ret = -ETIMEDOUT;
goto done;
}
@@ -698,7 +792,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
* We must disable the adapter before returning and signaling the end
* of the current transfer. Otherwise the hardware might continue
* generating interrupts which in turn causes a race condition with
- * the following transfer. Needs some more investigation if the
+ * the following transfer. Needs some more investigation if the
* additional interrupts are a hardware bug or this driver doesn't
* handle them correctly yet.
*/
@@ -746,132 +840,6 @@ static const struct i2c_adapter_quirks i2c_dw_quirks = {
.flags = I2C_AQ_NO_ZERO_LEN,
};
-static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
-{
- unsigned int stat, dummy;
-
- /*
- * The IC_INTR_STAT register just indicates "enabled" interrupts.
- * The unmasked raw version of interrupt status bits is available
- * in the IC_RAW_INTR_STAT register.
- *
- * That is,
- * stat = readl(IC_INTR_STAT);
- * equals to,
- * stat = readl(IC_RAW_INTR_STAT) & readl(IC_INTR_MASK);
- *
- * The raw version might be useful for debugging purposes.
- */
- regmap_read(dev->map, DW_IC_INTR_STAT, &stat);
-
- /*
- * Do not use the IC_CLR_INTR register to clear interrupts, or
- * you'll miss some interrupts, triggered during the period from
- * readl(IC_INTR_STAT) to readl(IC_CLR_INTR).
- *
- * Instead, use the separately-prepared IC_CLR_* registers.
- */
- if (stat & DW_IC_INTR_RX_UNDER)
- regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &dummy);
- if (stat & DW_IC_INTR_RX_OVER)
- regmap_read(dev->map, DW_IC_CLR_RX_OVER, &dummy);
- if (stat & DW_IC_INTR_TX_OVER)
- regmap_read(dev->map, DW_IC_CLR_TX_OVER, &dummy);
- if (stat & DW_IC_INTR_RD_REQ)
- regmap_read(dev->map, DW_IC_CLR_RD_REQ, &dummy);
- if (stat & DW_IC_INTR_TX_ABRT) {
- /*
- * The IC_TX_ABRT_SOURCE register is cleared whenever
- * the IC_CLR_TX_ABRT is read. Preserve it beforehand.
- */
- regmap_read(dev->map, DW_IC_TX_ABRT_SOURCE, &dev->abort_source);
- regmap_read(dev->map, DW_IC_CLR_TX_ABRT, &dummy);
- }
- if (stat & DW_IC_INTR_RX_DONE)
- regmap_read(dev->map, DW_IC_CLR_RX_DONE, &dummy);
- if (stat & DW_IC_INTR_ACTIVITY)
- regmap_read(dev->map, DW_IC_CLR_ACTIVITY, &dummy);
- if ((stat & DW_IC_INTR_STOP_DET) &&
- ((dev->rx_outstanding == 0) || (stat & DW_IC_INTR_RX_FULL)))
- regmap_read(dev->map, DW_IC_CLR_STOP_DET, &dummy);
- if (stat & DW_IC_INTR_START_DET)
- regmap_read(dev->map, DW_IC_CLR_START_DET, &dummy);
- if (stat & DW_IC_INTR_GEN_CALL)
- regmap_read(dev->map, DW_IC_CLR_GEN_CALL, &dummy);
-
- return stat;
-}
-
-/*
- * Interrupt service routine. This gets called whenever an I2C master interrupt
- * occurs.
- */
-static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
-{
- struct dw_i2c_dev *dev = dev_id;
- unsigned int stat, enabled;
-
- regmap_read(dev->map, DW_IC_ENABLE, &enabled);
- regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat);
- if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY))
- return IRQ_NONE;
- if (pm_runtime_suspended(dev->dev) || stat == GENMASK(31, 0))
- return IRQ_NONE;
- dev_dbg(dev->dev, "enabled=%#x stat=%#x\n", enabled, stat);
-
- stat = i2c_dw_read_clear_intrbits(dev);
-
- if (!(dev->status & STATUS_ACTIVE)) {
- /*
- * Unexpected interrupt in driver point of view. State
- * variables are either unset or stale so acknowledge and
- * disable interrupts for suppressing further interrupts if
- * interrupt really came from this HW (E.g. firmware has left
- * the HW active).
- */
- regmap_write(dev->map, DW_IC_INTR_MASK, 0);
- return IRQ_HANDLED;
- }
-
- if (stat & DW_IC_INTR_TX_ABRT) {
- dev->cmd_err |= DW_IC_ERR_TX_ABRT;
- dev->status &= ~STATUS_MASK;
- dev->rx_outstanding = 0;
-
- /*
- * Anytime TX_ABRT is set, the contents of the tx/rx
- * buffers are flushed. Make sure to skip them.
- */
- regmap_write(dev->map, DW_IC_INTR_MASK, 0);
- goto tx_aborted;
- }
-
- if (stat & DW_IC_INTR_RX_FULL)
- i2c_dw_read(dev);
-
- if (stat & DW_IC_INTR_TX_EMPTY)
- i2c_dw_xfer_msg(dev);
-
- /*
- * No need to modify or disable the interrupt mask here.
- * i2c_dw_xfer_msg() will take care of it according to
- * the current transmit status.
- */
-
-tx_aborted:
- if (((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) &&
- (dev->rx_outstanding == 0))
- complete(&dev->cmd_complete);
- else if (unlikely(dev->flags & ACCESS_INTR_MASK)) {
- /* Workaround to trigger pending interrupt */
- regmap_read(dev->map, DW_IC_INTR_MASK, &stat);
- regmap_write(dev->map, DW_IC_INTR_MASK, 0);
- regmap_write(dev->map, DW_IC_INTR_MASK, stat);
- }
-
- return IRQ_HANDLED;
-}
-
void i2c_dw_configure_master(struct dw_i2c_dev *dev)
{
struct i2c_timings *t = &dev->timings;
@@ -953,31 +921,6 @@ static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev)
return 0;
}
-static int i2c_dw_poll_adap_quirk(struct dw_i2c_dev *dev)
-{
- struct i2c_adapter *adap = &dev->adapter;
- int ret;
-
- pm_runtime_get_noresume(dev->dev);
- ret = i2c_add_numbered_adapter(adap);
- if (ret)
- dev_err(dev->dev, "Failed to add adapter: %d\n", ret);
- pm_runtime_put_noidle(dev->dev);
-
- return ret;
-}
-
-static bool i2c_dw_is_model_poll(struct dw_i2c_dev *dev)
-{
- switch (dev->flags & MODEL_MASK) {
- case MODEL_AMD_NAVI_GPU:
- case MODEL_WANGXUN_SP:
- return true;
- default:
- return false;
- }
-}
-
int i2c_dw_probe_master(struct dw_i2c_dev *dev)
{
struct i2c_adapter *adap = &dev->adapter;
@@ -1033,9 +976,6 @@ int i2c_dw_probe_master(struct dw_i2c_dev *dev)
adap->dev.parent = dev->dev;
i2c_set_adapdata(adap, dev);
- if (i2c_dw_is_model_poll(dev))
- return i2c_dw_poll_adap_quirk(dev);
-
if (dev->flags & ACCESS_NO_IRQ_SUSPEND) {
irq_flags = IRQF_NO_SUSPEND;
} else {
@@ -1046,15 +986,17 @@ int i2c_dw_probe_master(struct dw_i2c_dev *dev)
if (ret)
return ret;
- regmap_write(dev->map, DW_IC_INTR_MASK, 0);
+ __i2c_dw_write_intr_mask(dev, 0);
i2c_dw_release_lock(dev);
- ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr, irq_flags,
- dev_name(dev->dev), dev);
- if (ret) {
- dev_err(dev->dev, "failure requesting irq %i: %d\n",
- dev->irq, ret);
- return ret;
+ if (!(dev->flags & ACCESS_POLLING)) {
+ ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr,
+ irq_flags, dev_name(dev->dev), dev);
+ if (ret) {
+ dev_err(dev->dev, "failure requesting irq %i: %d\n",
+ dev->irq, ret);
+ return ret;
+ }
}
ret = i2c_dw_init_recovery_info(dev);
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 61d7a27aa0701..9be9a2658e1f6 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -154,7 +154,7 @@ static int navi_amd_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
{
struct dw_i2c_dev *dev = dev_get_drvdata(&pdev->dev);
- dev->flags |= MODEL_AMD_NAVI_GPU;
+ dev->flags |= MODEL_AMD_NAVI_GPU | ACCESS_POLLING;
dev->timings.bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ;
return 0;
}
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 855b698e99c08..4ab41ba39d55f 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -290,7 +290,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
dev->flags = (uintptr_t)device_get_match_data(&pdev->dev);
if (device_property_present(&pdev->dev, "wx,i2c-snps-model"))
- dev->flags = MODEL_WANGXUN_SP;
+ dev->flags = MODEL_WANGXUN_SP | ACCESS_POLLING;
dev->dev = &pdev->dev;
dev->irq = irq;
diff --git a/drivers/i2c/busses/i2c-hisi.c b/drivers/i2c/busses/i2c-hisi.c
index dfad5bad50755..975c0b1c44deb 100644
--- a/drivers/i2c/busses/i2c-hisi.c
+++ b/drivers/i2c/busses/i2c-hisi.c
@@ -57,6 +57,8 @@
#define HISI_I2C_FS_SPK_LEN_CNT GENMASK(7, 0)
#define HISI_I2C_HS_SPK_LEN 0x003c
#define HISI_I2C_HS_SPK_LEN_CNT GENMASK(7, 0)
+#define HISI_I2C_TX_INT_CLR 0x0040
+#define HISI_I2C_TX_AEMPTY_INT BIT(0)
#define HISI_I2C_INT_MSTAT 0x0044
#define HISI_I2C_INT_CLR 0x0048
#define HISI_I2C_INT_MASK 0x004C
@@ -124,6 +126,11 @@ static void hisi_i2c_clear_int(struct hisi_i2c_controller *ctlr, u32 mask)
writel_relaxed(mask, ctlr->iobase + HISI_I2C_INT_CLR);
}
+static void hisi_i2c_clear_tx_int(struct hisi_i2c_controller *ctlr, u32 mask)
+{
+ writel_relaxed(mask, ctlr->iobase + HISI_I2C_TX_INT_CLR);
+}
+
static void hisi_i2c_handle_errors(struct hisi_i2c_controller *ctlr)
{
u32 int_err = ctlr->xfer_err, reg;
@@ -168,6 +175,7 @@ static int hisi_i2c_start_xfer(struct hisi_i2c_controller *ctlr)
writel(reg, ctlr->iobase + HISI_I2C_FIFO_CTRL);
hisi_i2c_clear_int(ctlr, HISI_I2C_INT_ALL);
+ hisi_i2c_clear_tx_int(ctlr, HISI_I2C_TX_AEMPTY_INT);
hisi_i2c_enable_int(ctlr, HISI_I2C_INT_ALL);
return 0;
@@ -266,7 +274,7 @@ static int hisi_i2c_read_rx_fifo(struct hisi_i2c_controller *ctlr)
static void hisi_i2c_xfer_msg(struct hisi_i2c_controller *ctlr)
{
- int max_write = HISI_I2C_TX_FIFO_DEPTH;
+ int max_write = HISI_I2C_TX_FIFO_DEPTH - HISI_I2C_TX_F_AE_THRESH;
bool need_restart = false, last_msg;
struct i2c_msg *cur_msg;
u32 cmd, fifo_state;
@@ -323,6 +331,8 @@ static void hisi_i2c_xfer_msg(struct hisi_i2c_controller *ctlr)
*/
if (ctlr->msg_tx_idx == ctlr->msg_num)
hisi_i2c_disable_int(ctlr, HISI_I2C_INT_TX_EMPTY);
+
+ hisi_i2c_clear_tx_int(ctlr, HISI_I2C_TX_AEMPTY_INT);
}
static irqreturn_t hisi_i2c_irq(int irq, void *context)
@@ -363,6 +373,7 @@ out:
if (int_stat & HISI_I2C_INT_TRANS_CPLT) {
hisi_i2c_disable_int(ctlr, HISI_I2C_INT_ALL);
hisi_i2c_clear_int(ctlr, HISI_I2C_INT_ALL);
+ hisi_i2c_clear_tx_int(ctlr, HISI_I2C_TX_AEMPTY_INT);
complete(ctlr->completion);
}
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 274e987e4cfa0..79870dd7a0146 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -205,6 +205,8 @@
#define STATUS_FLAGS (SMBHSTSTS_BYTE_DONE | SMBHSTSTS_INTR | \
STATUS_ERROR_FLAGS)
+#define SMBUS_LEN_SENTINEL (I2C_SMBUS_BLOCK_MAX + 1)
+
/* Older devices have their ID defined in <linux/pci_ids.h> */
#define PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS 0x02a3
#define PCI_DEVICE_ID_INTEL_COMETLAKE_H_SMBUS 0x06a3
@@ -328,11 +330,39 @@ MODULE_PARM_DESC(disable_features, "Disable selected driver features:\n"
"\t\t 0x10 don't use interrupts\n"
"\t\t 0x20 disable SMBus Host Notify ");
+static int i801_get_block_len(struct i801_priv *priv)
+{
+ u8 len = inb_p(SMBHSTDAT0(priv));
+
+ if (len < 1 || len > I2C_SMBUS_BLOCK_MAX) {
+ pci_err(priv->pci_dev, "Illegal SMBus block read size %u\n", len);
+ return -EPROTO;
+ }
+
+ return len;
+}
+
+static int i801_check_and_clear_pec_error(struct i801_priv *priv)
+{
+ u8 status;
+
+ if (!(priv->features & FEATURE_SMBUS_PEC))
+ return 0;
+
+ status = inb_p(SMBAUXSTS(priv)) & SMBAUXSTS_CRCE;
+ if (status) {
+ outb_p(status, SMBAUXSTS(priv));
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+
/* Make sure the SMBus host is ready to start transmitting.
Return 0 if it is, -EBUSY if it is not. */
static int i801_check_pre(struct i801_priv *priv)
{
- int status;
+ int status, result;
status = inb_p(SMBHSTSTS(priv));
if (status & SMBHSTSTS_HOST_BUSY) {
@@ -353,13 +383,9 @@ static int i801_check_pre(struct i801_priv *priv)
* the hardware was already in this state when the driver
* started.
*/
- if (priv->features & FEATURE_SMBUS_PEC) {
- status = inb_p(SMBAUXSTS(priv)) & SMBAUXSTS_CRCE;
- if (status) {
- pci_dbg(priv->pci_dev, "Clearing aux status flags (%02x)\n", status);
- outb_p(status, SMBAUXSTS(priv));
- }
- }
+ result = i801_check_and_clear_pec_error(priv);
+ if (result)
+ pci_dbg(priv->pci_dev, "Clearing aux status flag CRCE\n");
return 0;
}
@@ -408,14 +434,12 @@ static int i801_check_post(struct i801_priv *priv, int status)
* bit is harmless as long as it's cleared before
* the next operation.
*/
- if ((priv->features & FEATURE_SMBUS_PEC) &&
- (inb_p(SMBAUXSTS(priv)) & SMBAUXSTS_CRCE)) {
- outb_p(SMBAUXSTS_CRCE, SMBAUXSTS(priv));
- result = -EBADMSG;
- dev_dbg(&priv->pci_dev->dev, "PEC error\n");
+ result = i801_check_and_clear_pec_error(priv);
+ if (result) {
+ pci_dbg(priv->pci_dev, "PEC error\n");
} else {
result = -ENXIO;
- dev_dbg(&priv->pci_dev->dev, "No response\n");
+ pci_dbg(priv->pci_dev, "No response\n");
}
}
if (status & SMBHSTSTS_BUS_ERR) {
@@ -512,9 +536,9 @@ static int i801_block_transaction_by_block(struct i801_priv *priv,
if (read_write == I2C_SMBUS_READ ||
command == I2C_SMBUS_BLOCK_PROC_CALL) {
- len = inb_p(SMBHSTDAT0(priv));
- if (len < 1 || len > I2C_SMBUS_BLOCK_MAX) {
- status = -EPROTO;
+ len = i801_get_block_len(priv);
+ if (len < 0) {
+ status = len;
goto out;
}
@@ -531,17 +555,17 @@ out:
static void i801_isr_byte_done(struct i801_priv *priv)
{
if (priv->is_read) {
- /* For SMBus block reads, length is received with first byte */
- if (((priv->cmd & 0x1c) == I801_BLOCK_DATA) &&
- (priv->count == 0)) {
- priv->len = inb_p(SMBHSTDAT0(priv));
- if (priv->len < 1 || priv->len > I2C_SMBUS_BLOCK_MAX) {
- dev_err(&priv->pci_dev->dev,
- "Illegal SMBus block read size %d\n",
- priv->len);
+ /*
+ * At transfer start i801_smbus_block_transaction() marks
+ * the block length as invalid. Check for this sentinel value
+ * and read the block length from SMBHSTDAT0.
+ */
+ if (priv->len == SMBUS_LEN_SENTINEL) {
+ priv->len = i801_get_block_len(priv);
+ if (priv->len < 0)
/* FIXME: Recover */
priv->len = I2C_SMBUS_BLOCK_MAX;
- }
+
priv->data[-1] = priv->len;
}
@@ -688,13 +712,14 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
if (status)
return status;
- if (i == 1 && read_write == I2C_SMBUS_READ
- && command != I2C_SMBUS_I2C_BLOCK_DATA) {
- len = inb_p(SMBHSTDAT0(priv));
- if (len < 1 || len > I2C_SMBUS_BLOCK_MAX) {
- dev_err(&priv->pci_dev->dev,
- "Illegal SMBus block read size %d\n",
- len);
+ /*
+ * At transfer start i801_smbus_block_transaction() marks
+ * the block length as invalid. Check for this sentinel value
+ * and read the block length from SMBHSTDAT0.
+ */
+ if (len == SMBUS_LEN_SENTINEL) {
+ len = i801_get_block_len(priv);
+ if (len < 0) {
/* Recover */
while (inb_p(SMBHSTSTS(priv)) &
SMBHSTSTS_HOST_BUSY)
@@ -792,77 +817,66 @@ static int i801_simple_transaction(struct i801_priv *priv, union i2c_smbus_data
return 0;
}
-/* Block transaction function */
-static int i801_block_transaction(struct i801_priv *priv, union i2c_smbus_data *data,
- u8 addr, u8 hstcmd, char read_write, int command)
+static int i801_smbus_block_transaction(struct i801_priv *priv, union i2c_smbus_data *data,
+ u8 addr, u8 hstcmd, char read_write, int command)
{
- int result = 0;
- unsigned char hostc;
-
if (read_write == I2C_SMBUS_READ && command == I2C_SMBUS_BLOCK_DATA)
- data->block[0] = I2C_SMBUS_BLOCK_MAX;
+ /* Mark block length as invalid */
+ data->block[0] = SMBUS_LEN_SENTINEL;
else if (data->block[0] < 1 || data->block[0] > I2C_SMBUS_BLOCK_MAX)
return -EPROTO;
- switch (command) {
- case I2C_SMBUS_BLOCK_DATA:
- i801_set_hstadd(priv, addr, read_write);
- outb_p(hstcmd, SMBHSTCMD(priv));
- break;
- case I2C_SMBUS_I2C_BLOCK_DATA:
- /*
- * NB: page 240 of ICH5 datasheet shows that the R/#W
- * bit should be cleared here, even when reading.
- * However if SPD Write Disable is set (Lynx Point and later),
- * the read will fail if we don't set the R/#W bit.
- */
- i801_set_hstadd(priv, addr,
- priv->original_hstcfg & SMBHSTCFG_SPD_WD ?
- read_write : I2C_SMBUS_WRITE);
- if (read_write == I2C_SMBUS_READ) {
- /* NB: page 240 of ICH5 datasheet also shows
- * that DATA1 is the cmd field when reading
- */
- outb_p(hstcmd, SMBHSTDAT1(priv));
- } else
- outb_p(hstcmd, SMBHSTCMD(priv));
-
- if (read_write == I2C_SMBUS_WRITE) {
- /* set I2C_EN bit in configuration register */
- pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &hostc);
- pci_write_config_byte(priv->pci_dev, SMBHSTCFG,
- hostc | SMBHSTCFG_I2C_EN);
- } else if (!(priv->features & FEATURE_I2C_BLOCK_READ)) {
- dev_err(&priv->pci_dev->dev,
- "I2C block read is unsupported!\n");
- return -EOPNOTSUPP;
- }
- break;
- case I2C_SMBUS_BLOCK_PROC_CALL:
+ if (command == I2C_SMBUS_BLOCK_PROC_CALL)
/* Needs to be flagged as write transaction */
i801_set_hstadd(priv, addr, I2C_SMBUS_WRITE);
+ else
+ i801_set_hstadd(priv, addr, read_write);
+ outb_p(hstcmd, SMBHSTCMD(priv));
+
+ if (priv->features & FEATURE_BLOCK_BUFFER)
+ return i801_block_transaction_by_block(priv, data, read_write, command);
+ else
+ return i801_block_transaction_byte_by_byte(priv, data, read_write, command);
+}
+
+static int i801_i2c_block_transaction(struct i801_priv *priv, union i2c_smbus_data *data,
+ u8 addr, u8 hstcmd, char read_write, int command)
+{
+ int result;
+ u8 hostc;
+
+ if (data->block[0] < 1 || data->block[0] > I2C_SMBUS_BLOCK_MAX)
+ return -EPROTO;
+ /*
+ * NB: page 240 of ICH5 datasheet shows that the R/#W bit should be cleared here,
+ * even when reading. However if SPD Write Disable is set (Lynx Point and later),
+ * the read will fail if we don't set the R/#W bit.
+ */
+ i801_set_hstadd(priv, addr,
+ priv->original_hstcfg & SMBHSTCFG_SPD_WD ? read_write : I2C_SMBUS_WRITE);
+
+ /* NB: page 240 of ICH5 datasheet shows that DATA1 is the cmd field when reading */
+ if (read_write == I2C_SMBUS_READ)
+ outb_p(hstcmd, SMBHSTDAT1(priv));
+ else
outb_p(hstcmd, SMBHSTCMD(priv));
- break;
+
+ if (read_write == I2C_SMBUS_WRITE) {
+ /* set I2C_EN bit in configuration register */
+ pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &hostc);
+ pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hostc | SMBHSTCFG_I2C_EN);
+ } else if (!(priv->features & FEATURE_I2C_BLOCK_READ)) {
+ pci_err(priv->pci_dev, "I2C block read is unsupported!\n");
+ return -EOPNOTSUPP;
}
- /* Experience has shown that the block buffer can only be used for
- SMBus (not I2C) block transactions, even though the datasheet
- doesn't mention this limitation. */
- if ((priv->features & FEATURE_BLOCK_BUFFER) &&
- command != I2C_SMBUS_I2C_BLOCK_DATA)
- result = i801_block_transaction_by_block(priv, data,
- read_write,
- command);
- else
- result = i801_block_transaction_byte_by_byte(priv, data,
- read_write,
- command);
+ /* Block buffer isn't supported for I2C block transactions */
+ result = i801_block_transaction_byte_by_byte(priv, data, read_write, command);
- if (command == I2C_SMBUS_I2C_BLOCK_DATA
- && read_write == I2C_SMBUS_WRITE) {
- /* restore saved configuration register value */
+ /* restore saved configuration register value */
+ if (read_write == I2C_SMBUS_WRITE)
pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hostc);
- }
+
return result;
}
@@ -893,10 +907,10 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
outb_p(inb_p(SMBAUXCTL(priv)) & (~SMBAUXCTL_CRC),
SMBAUXCTL(priv));
- if (size == I2C_SMBUS_BLOCK_DATA ||
- size == I2C_SMBUS_I2C_BLOCK_DATA ||
- size == I2C_SMBUS_BLOCK_PROC_CALL)
- ret = i801_block_transaction(priv, data, addr, command, read_write, size);
+ if (size == I2C_SMBUS_BLOCK_DATA || size == I2C_SMBUS_BLOCK_PROC_CALL)
+ ret = i801_smbus_block_transaction(priv, data, addr, command, read_write, size);
+ else if (size == I2C_SMBUS_I2C_BLOCK_DATA)
+ ret = i801_i2c_block_transaction(priv, data, addr, command, read_write, size);
else
ret = i801_simple_transaction(priv, data, addr, command, read_write, size);
@@ -969,11 +983,10 @@ static const struct i2c_algorithm smbus_algorithm = {
.functionality = i801_func,
};
-#define FEATURES_ICH5 (FEATURE_BLOCK_PROC | FEATURE_I2C_BLOCK_READ | \
- FEATURE_IRQ | FEATURE_SMBUS_PEC | \
- FEATURE_BLOCK_BUFFER | FEATURE_HOST_NOTIFY)
#define FEATURES_ICH4 (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER | \
FEATURE_HOST_NOTIFY)
+#define FEATURES_ICH5 (FEATURES_ICH4 | FEATURE_BLOCK_PROC | \
+ FEATURE_I2C_BLOCK_READ | FEATURE_IRQ)
static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE_DATA(INTEL, 82801AA_3, 0) },
@@ -1117,7 +1130,7 @@ static void dmi_check_onboard_devices(const struct dmi_header *dm, void *adap)
{
int i, count;
- if (dm->type != 10)
+ if (dm->type != DMI_ENTRY_ONBOARD_DEVICE)
return;
count = (dm->length - sizeof(struct dmi_header)) / 2;
@@ -1447,8 +1460,7 @@ static inline void i801_del_mux(struct i801_priv *priv) { }
#endif
static struct platform_device *
-i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev,
- struct resource *tco_res)
+i801_add_tco_spt(struct pci_dev *pci_dev, struct resource *tco_res)
{
static const struct itco_wdt_platform_data pldata = {
.name = "Intel PCH",
@@ -1479,8 +1491,7 @@ i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev,
}
static struct platform_device *
-i801_add_tco_cnl(struct i801_priv *priv, struct pci_dev *pci_dev,
- struct resource *tco_res)
+i801_add_tco_cnl(struct pci_dev *pci_dev, struct resource *tco_res)
{
static const struct itco_wdt_platform_data pldata = {
.name = "Intel PCH",
@@ -1520,9 +1531,9 @@ static void i801_add_tco(struct i801_priv *priv)
res->flags = IORESOURCE_IO;
if (priv->features & FEATURE_TCO_CNL)
- priv->tco_pdev = i801_add_tco_cnl(priv, pci_dev, tco_res);
+ priv->tco_pdev = i801_add_tco_cnl(pci_dev, tco_res);
else
- priv->tco_pdev = i801_add_tco_spt(priv, pci_dev, tco_res);
+ priv->tco_pdev = i801_add_tco_spt(pci_dev, tco_res);
if (IS_ERR(priv->tco_pdev))
dev_warn(&pci_dev->dev, "failed to create iTCO device\n");
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 678b30e90492a..6d72e4e126dde 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -106,6 +106,7 @@ struct lpi2c_imx_struct {
unsigned int txfifosize;
unsigned int rxfifosize;
enum lpi2c_imx_mode mode;
+ struct i2c_bus_recovery_info rinfo;
};
static void lpi2c_imx_intctrl(struct lpi2c_imx_struct *lpi2c_imx,
@@ -133,6 +134,8 @@ static int lpi2c_imx_bus_busy(struct lpi2c_imx_struct *lpi2c_imx)
if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) {
dev_dbg(&lpi2c_imx->adapter.dev, "bus not work\n");
+ if (lpi2c_imx->adapter.bus_recovery_info)
+ i2c_recover_bus(&lpi2c_imx->adapter);
return -ETIMEDOUT;
}
schedule();
@@ -190,6 +193,8 @@ static void lpi2c_imx_stop(struct lpi2c_imx_struct *lpi2c_imx)
if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) {
dev_dbg(&lpi2c_imx->adapter.dev, "stop timeout\n");
+ if (lpi2c_imx->adapter.bus_recovery_info)
+ i2c_recover_bus(&lpi2c_imx->adapter);
break;
}
schedule();
@@ -325,6 +330,8 @@ static int lpi2c_imx_txfifo_empty(struct lpi2c_imx_struct *lpi2c_imx)
if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) {
dev_dbg(&lpi2c_imx->adapter.dev, "txfifo empty timeout\n");
+ if (lpi2c_imx->adapter.bus_recovery_info)
+ i2c_recover_bus(&lpi2c_imx->adapter);
return -ETIMEDOUT;
}
schedule();
@@ -526,6 +533,20 @@ static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static int lpi2c_imx_init_recovery_info(struct lpi2c_imx_struct *lpi2c_imx,
+ struct platform_device *pdev)
+{
+ struct i2c_bus_recovery_info *bri = &lpi2c_imx->rinfo;
+
+ bri->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR(bri->pinctrl))
+ return PTR_ERR(bri->pinctrl);
+
+ lpi2c_imx->adapter.bus_recovery_info = bri;
+
+ return 0;
+}
+
static u32 lpi2c_imx_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
@@ -600,6 +621,12 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
lpi2c_imx->txfifosize = 1 << (temp & 0x0f);
lpi2c_imx->rxfifosize = 1 << ((temp >> 8) & 0x0f);
+ /* Init optional bus recovery function */
+ ret = lpi2c_imx_init_recovery_info(lpi2c_imx, pdev);
+ /* Give it another chance if pinctrl used is not ready yet */
+ if (ret == -EPROBE_DEFER)
+ goto rpm_disable;
+
ret = i2c_add_adapter(&lpi2c_imx->adapter);
if (ret)
goto rpm_disable;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 60e813137f844..3842e527116b7 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -212,10 +212,6 @@ struct imx_i2c_struct {
const struct imx_i2c_hwdata *hwdata;
struct i2c_bus_recovery_info rinfo;
- struct pinctrl *pinctrl;
- struct pinctrl_state *pinctrl_pins_default;
- struct pinctrl_state *pinctrl_pins_gpio;
-
struct imx_i2c_dma *dma;
struct i2c_client *slave;
enum i2c_slave_event last_slave_event;
@@ -1362,24 +1358,6 @@ static int i2c_imx_xfer_atomic(struct i2c_adapter *adapter,
return result;
}
-static void i2c_imx_prepare_recovery(struct i2c_adapter *adap)
-{
- struct imx_i2c_struct *i2c_imx;
-
- i2c_imx = container_of(adap, struct imx_i2c_struct, adapter);
-
- pinctrl_select_state(i2c_imx->pinctrl, i2c_imx->pinctrl_pins_gpio);
-}
-
-static void i2c_imx_unprepare_recovery(struct i2c_adapter *adap)
-{
- struct imx_i2c_struct *i2c_imx;
-
- i2c_imx = container_of(adap, struct imx_i2c_struct, adapter);
-
- pinctrl_select_state(i2c_imx->pinctrl, i2c_imx->pinctrl_pins_default);
-}
-
/*
* We switch SCL and SDA to their GPIO function and do some bitbanging
* for bus recovery. These alternative pinmux settings can be
@@ -1390,43 +1368,13 @@ static void i2c_imx_unprepare_recovery(struct i2c_adapter *adap)
static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
struct platform_device *pdev)
{
- struct i2c_bus_recovery_info *rinfo = &i2c_imx->rinfo;
-
- i2c_imx->pinctrl = devm_pinctrl_get(&pdev->dev);
- if (!i2c_imx->pinctrl) {
- dev_info(&pdev->dev, "pinctrl unavailable, bus recovery not supported\n");
- return 0;
- }
- if (IS_ERR(i2c_imx->pinctrl)) {
- dev_info(&pdev->dev, "can't get pinctrl, bus recovery not supported\n");
- return PTR_ERR(i2c_imx->pinctrl);
- }
-
- i2c_imx->pinctrl_pins_default = pinctrl_lookup_state(i2c_imx->pinctrl,
- PINCTRL_STATE_DEFAULT);
- i2c_imx->pinctrl_pins_gpio = pinctrl_lookup_state(i2c_imx->pinctrl,
- "gpio");
- rinfo->sda_gpiod = devm_gpiod_get_optional(&pdev->dev, "sda", GPIOD_IN);
- rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN);
-
- if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER ||
- PTR_ERR(rinfo->scl_gpiod) == -EPROBE_DEFER) {
- return -EPROBE_DEFER;
- } else if (IS_ERR(rinfo->sda_gpiod) ||
- IS_ERR(rinfo->scl_gpiod) ||
- IS_ERR(i2c_imx->pinctrl_pins_default) ||
- IS_ERR(i2c_imx->pinctrl_pins_gpio)) {
- dev_dbg(&pdev->dev, "recovery information incomplete\n");
- return 0;
- }
+ struct i2c_bus_recovery_info *bri = &i2c_imx->rinfo;
- dev_dbg(&pdev->dev, "using scl%s for recovery\n",
- rinfo->sda_gpiod ? ",sda" : "");
+ bri->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR(bri->pinctrl))
+ return PTR_ERR(bri->pinctrl);
- rinfo->prepare_recovery = i2c_imx_prepare_recovery;
- rinfo->unprepare_recovery = i2c_imx_unprepare_recovery;
- rinfo->recover_bus = i2c_generic_scl_recovery;
- i2c_imx->adapter.bus_recovery_info = rinfo;
+ i2c_imx->adapter.bus_recovery_info = bri;
return 0;
}
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index e4e4995ab2243..8d73c0f405ed5 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -30,8 +30,6 @@
#include <asm/mpc85xx.h>
#include <sysdev/fsl_soc.h>
-#define DRV_NAME "mpc-i2c"
-
#define MPC_I2C_CLOCK_LEGACY 0
#define MPC_I2C_CLOCK_PRESERVE (~0U)
@@ -844,16 +842,16 @@ static int fsl_i2c_probe(struct platform_device *op)
mpc_i2c_setup_8xxx(op->dev.of_node, i2c, clock);
}
- /*
- * "fsl,timeout" has been marked as deprecated and, to maintain
- * backward compatibility, we will only look for it if
- * "i2c-scl-clk-low-timeout-us" is not present.
- */
+ /* Sadly, we have to support two deprecated bindings here */
result = of_property_read_u32(op->dev.of_node,
- "i2c-scl-clk-low-timeout-us",
+ "i2c-transfer-timeout-us",
&mpc_ops.timeout);
if (result == -EINVAL)
result = of_property_read_u32(op->dev.of_node,
+ "i2c-scl-clk-low-timeout-us",
+ &mpc_ops.timeout);
+ if (result == -EINVAL)
+ result = of_property_read_u32(op->dev.of_node,
"fsl,timeout", &mpc_ops.timeout);
if (!result) {
@@ -960,7 +958,7 @@ static struct platform_driver mpc_i2c_driver = {
.probe = fsl_i2c_probe,
.remove_new = fsl_i2c_remove,
.driver = {
- .name = DRV_NAME,
+ .name = "mpc-i2c",
.of_match_table = mpc_i2c_of_match,
.pm = &mpc_i2c_pm_ops,
},
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index b10574d42b7ac..4f41a3c7824d0 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -6,21 +6,30 @@
* I2C master mode controller driver, used in Nomadik 8815
* and Ux500 platforms.
*
+ * The Mobileye EyeQ5 platform is also supported; it uses
+ * the same Ux500/DB8500 IP block with two quirks:
+ * - The memory bus only supports 32-bit accesses.
+ * - A register must be configured for the I2C speed mode;
+ * it is located in a shared register region called OLB.
+ *
* Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
* Author: Sachin Verma <sachin.verma@st.com>
*/
-#include <linux/init.h>
-#include <linux/module.h>
#include <linux/amba/bus.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/i2c.h>
-#include <linux/err.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/pm_runtime.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
#define DRIVER_NAME "nmk-i2c"
@@ -42,61 +51,63 @@
#define I2C_ICR (0x038)
/* Control registers */
-#define I2C_CR_PE (0x1 << 0) /* Peripheral Enable */
-#define I2C_CR_OM (0x3 << 1) /* Operating mode */
-#define I2C_CR_SAM (0x1 << 3) /* Slave addressing mode */
-#define I2C_CR_SM (0x3 << 4) /* Speed mode */
-#define I2C_CR_SGCM (0x1 << 6) /* Slave general call mode */
-#define I2C_CR_FTX (0x1 << 7) /* Flush Transmit */
-#define I2C_CR_FRX (0x1 << 8) /* Flush Receive */
-#define I2C_CR_DMA_TX_EN (0x1 << 9) /* DMA Tx enable */
-#define I2C_CR_DMA_RX_EN (0x1 << 10) /* DMA Rx Enable */
-#define I2C_CR_DMA_SLE (0x1 << 11) /* DMA sync. logic enable */
-#define I2C_CR_LM (0x1 << 12) /* Loopback mode */
-#define I2C_CR_FON (0x3 << 13) /* Filtering on */
-#define I2C_CR_FS (0x3 << 15) /* Force stop enable */
+#define I2C_CR_PE BIT(0) /* Peripheral Enable */
+#define I2C_CR_OM GENMASK(2, 1) /* Operating mode */
+#define I2C_CR_SAM BIT(3) /* Slave addressing mode */
+#define I2C_CR_SM GENMASK(5, 4) /* Speed mode */
+#define I2C_CR_SGCM BIT(6) /* Slave general call mode */
+#define I2C_CR_FTX BIT(7) /* Flush Transmit */
+#define I2C_CR_FRX BIT(8) /* Flush Receive */
+#define I2C_CR_DMA_TX_EN BIT(9) /* DMA Tx enable */
+#define I2C_CR_DMA_RX_EN BIT(10) /* DMA Rx Enable */
+#define I2C_CR_DMA_SLE BIT(11) /* DMA sync. logic enable */
+#define I2C_CR_LM BIT(12) /* Loopback mode */
+#define I2C_CR_FON GENMASK(14, 13) /* Filtering on */
+#define I2C_CR_FS GENMASK(16, 15) /* Force stop enable */
+
+/* Slave control register (SCR) */
+#define I2C_SCR_SLSU GENMASK(31, 16) /* Slave data setup time */
/* Master controller (MCR) register */
-#define I2C_MCR_OP (0x1 << 0) /* Operation */
-#define I2C_MCR_A7 (0x7f << 1) /* 7-bit address */
-#define I2C_MCR_EA10 (0x7 << 8) /* 10-bit Extended address */
-#define I2C_MCR_SB (0x1 << 11) /* Extended address */
-#define I2C_MCR_AM (0x3 << 12) /* Address type */
-#define I2C_MCR_STOP (0x1 << 14) /* Stop condition */
-#define I2C_MCR_LENGTH (0x7ff << 15) /* Transaction length */
+#define I2C_MCR_OP BIT(0) /* Operation */
+#define I2C_MCR_A7 GENMASK(7, 1) /* 7-bit address */
+#define I2C_MCR_EA10 GENMASK(10, 8) /* 10-bit Extended address */
+#define I2C_MCR_SB BIT(11) /* Extended address */
+#define I2C_MCR_AM GENMASK(13, 12) /* Address type */
+#define I2C_MCR_STOP BIT(14) /* Stop condition */
+#define I2C_MCR_LENGTH GENMASK(25, 15) /* Transaction length */
/* Status register (SR) */
-#define I2C_SR_OP (0x3 << 0) /* Operation */
-#define I2C_SR_STATUS (0x3 << 2) /* controller status */
-#define I2C_SR_CAUSE (0x7 << 4) /* Abort cause */
-#define I2C_SR_TYPE (0x3 << 7) /* Receive type */
-#define I2C_SR_LENGTH (0x7ff << 9) /* Transfer length */
+#define I2C_SR_OP GENMASK(1, 0) /* Operation */
+#define I2C_SR_STATUS GENMASK(3, 2) /* controller status */
+#define I2C_SR_CAUSE GENMASK(6, 4) /* Abort cause */
+#define I2C_SR_TYPE GENMASK(8, 7) /* Receive type */
+#define I2C_SR_LENGTH GENMASK(19, 9) /* Transfer length */
+
+/* Baud-rate counter register (BRCR) */
+#define I2C_BRCR_BRCNT1 GENMASK(31, 16) /* Baud-rate counter 1 */
+#define I2C_BRCR_BRCNT2 GENMASK(15, 0) /* Baud-rate counter 2 */
/* Interrupt mask set/clear (IMSCR) bits */
-#define I2C_IT_TXFE (0x1 << 0)
-#define I2C_IT_TXFNE (0x1 << 1)
-#define I2C_IT_TXFF (0x1 << 2)
-#define I2C_IT_TXFOVR (0x1 << 3)
-#define I2C_IT_RXFE (0x1 << 4)
-#define I2C_IT_RXFNF (0x1 << 5)
-#define I2C_IT_RXFF (0x1 << 6)
-#define I2C_IT_RFSR (0x1 << 16)
-#define I2C_IT_RFSE (0x1 << 17)
-#define I2C_IT_WTSR (0x1 << 18)
-#define I2C_IT_MTD (0x1 << 19)
-#define I2C_IT_STD (0x1 << 20)
-#define I2C_IT_MAL (0x1 << 24)
-#define I2C_IT_BERR (0x1 << 25)
-#define I2C_IT_MTDWS (0x1 << 28)
-
-#define GEN_MASK(val, mask, sb) (((val) << (sb)) & (mask))
+#define I2C_IT_TXFE BIT(0)
+#define I2C_IT_TXFNE BIT(1)
+#define I2C_IT_TXFF BIT(2)
+#define I2C_IT_TXFOVR BIT(3)
+#define I2C_IT_RXFE BIT(4)
+#define I2C_IT_RXFNF BIT(5)
+#define I2C_IT_RXFF BIT(6)
+#define I2C_IT_RFSR BIT(16)
+#define I2C_IT_RFSE BIT(17)
+#define I2C_IT_WTSR BIT(18)
+#define I2C_IT_MTD BIT(19)
+#define I2C_IT_STD BIT(20)
+#define I2C_IT_MAL BIT(24)
+#define I2C_IT_BERR BIT(25)
+#define I2C_IT_MTDWS BIT(28)
/* some bits in ICR are reserved */
#define I2C_CLEAR_ALL_INTS 0x131f007f
-/* first three msb bits are reserved */
-#define IRQ_MASK(mask) (mask & 0x1fffffff)
-
/* maximum threshold value */
#define MAX_I2C_FIFO_THRESHOLD 15
@@ -107,6 +118,15 @@ enum i2c_freq_mode {
I2C_FREQ_MODE_FAST_PLUS, /* up to 1 Mb/s */
};
+/* Mobileye EyeQ5 offset into a shared register region (called OLB) */
+#define NMK_I2C_EYEQ5_OLB_IOCR2 0x0B8
+
+enum i2c_eyeq5_speed {
+ I2C_EYEQ5_SPEED_FAST,
+ I2C_EYEQ5_SPEED_FAST_PLUS,
+ I2C_EYEQ5_SPEED_HIGH_SPEED,
+};
+
/**
* struct i2c_vendor_data - per-vendor variations
* @has_mtdws: variant has the MTDWS bit
@@ -131,6 +151,12 @@ enum i2c_operation {
I2C_READ = 0x01
};
+enum i2c_operating_mode {
+ I2C_OM_SLAVE,
+ I2C_OM_MASTER,
+ I2C_OM_MASTER_OR_SLAVE,
+};
+
/**
* struct i2c_nmk_client - client specific data
* @slave_adr: 7-bit slave address
@@ -159,11 +185,13 @@ struct i2c_nmk_client {
* @clk_freq: clock frequency for the operation mode
* @tft: Tx FIFO Threshold in bytes
* @rft: Rx FIFO Threshold in bytes
- * @timeout: Slave response timeout (ms)
+ * @timeout_usecs: Slave response timeout
* @sm: speed mode
* @stop: stop condition.
- * @xfer_complete: acknowledge completion for a I2C message.
+ * @xfer_wq: xfer done wait queue.
+ * @xfer_done: xfer done boolean.
* @result: controller propogated result.
+ * @has_32b_bus: controller is on a bus that only supports 32-bit accesses.
*/
struct nmk_i2c_dev {
struct i2c_vendor_data *vendor;
@@ -176,11 +204,13 @@ struct nmk_i2c_dev {
u32 clk_freq;
unsigned char tft;
unsigned char rft;
- int timeout;
+ u32 timeout_usecs;
enum i2c_freq_mode sm;
int stop;
- struct completion xfer_complete;
+ struct wait_queue_head xfer_wq;
+ bool xfer_done;
int result;
+ bool has_32b_bus;
};
/* controller's abort causes */
@@ -204,18 +234,36 @@ static inline void i2c_clr_bit(void __iomem *reg, u32 mask)
writel(readl(reg) & ~mask, reg);
}
+static inline u8 nmk_i2c_readb(const struct nmk_i2c_dev *priv,
+ unsigned long reg)
+{
+ if (priv->has_32b_bus)
+ return readl(priv->virtbase + reg);
+ else
+ return readb(priv->virtbase + reg);
+}
+
+static inline void nmk_i2c_writeb(const struct nmk_i2c_dev *priv, u32 val,
+ unsigned long reg)
+{
+ if (priv->has_32b_bus)
+ writel(val, priv->virtbase + reg);
+ else
+ writeb(val, priv->virtbase + reg);
+}
+
/**
* flush_i2c_fifo() - This function flushes the I2C FIFO
- * @dev: private data of I2C Driver
+ * @priv: private data of I2C Driver
*
* This function flushes the I2C Tx and Rx FIFOs. It returns
* 0 on successful flushing of FIFO
*/
-static int flush_i2c_fifo(struct nmk_i2c_dev *dev)
+static int flush_i2c_fifo(struct nmk_i2c_dev *priv)
{
#define LOOP_ATTEMPTS 10
+ ktime_t timeout;
int i;
- unsigned long timeout;
/*
* flush the transmit and receive FIFO. The flushing
@@ -224,19 +272,19 @@ static int flush_i2c_fifo(struct nmk_i2c_dev *dev)
* bits, until then no one must access Tx, Rx FIFO and
* should poll on these bits waiting for the completion.
*/
- writel((I2C_CR_FTX | I2C_CR_FRX), dev->virtbase + I2C_CR);
+ writel((I2C_CR_FTX | I2C_CR_FRX), priv->virtbase + I2C_CR);
for (i = 0; i < LOOP_ATTEMPTS; i++) {
- timeout = jiffies + dev->adap.timeout;
+ timeout = ktime_add_us(ktime_get(), priv->timeout_usecs);
- while (!time_after(jiffies, timeout)) {
- if ((readl(dev->virtbase + I2C_CR) &
+ while (ktime_after(timeout, ktime_get())) {
+ if ((readl(priv->virtbase + I2C_CR) &
(I2C_CR_FTX | I2C_CR_FRX)) == 0)
- return 0;
+ return 0;
}
}
- dev_err(&dev->adev->dev,
+ dev_err(&priv->adev->dev,
"flushing operation timed out giving up after %d attempts",
LOOP_ATTEMPTS);
@@ -245,120 +293,121 @@ static int flush_i2c_fifo(struct nmk_i2c_dev *dev)
/**
* disable_all_interrupts() - Disable all interrupts of this I2c Bus
- * @dev: private data of I2C Driver
+ * @priv: private data of I2C Driver
*/
-static void disable_all_interrupts(struct nmk_i2c_dev *dev)
+static void disable_all_interrupts(struct nmk_i2c_dev *priv)
{
- u32 mask = IRQ_MASK(0);
- writel(mask, dev->virtbase + I2C_IMSCR);
+ writel(0, priv->virtbase + I2C_IMSCR);
}
/**
* clear_all_interrupts() - Clear all interrupts of I2C Controller
- * @dev: private data of I2C Driver
+ * @priv: private data of I2C Driver
*/
-static void clear_all_interrupts(struct nmk_i2c_dev *dev)
+static void clear_all_interrupts(struct nmk_i2c_dev *priv)
{
- u32 mask;
- mask = IRQ_MASK(I2C_CLEAR_ALL_INTS);
- writel(mask, dev->virtbase + I2C_ICR);
+ writel(I2C_CLEAR_ALL_INTS, priv->virtbase + I2C_ICR);
}
/**
* init_hw() - initialize the I2C hardware
- * @dev: private data of I2C Driver
+ * @priv: private data of I2C Driver
*/
-static int init_hw(struct nmk_i2c_dev *dev)
+static int init_hw(struct nmk_i2c_dev *priv)
{
int stat;
- stat = flush_i2c_fifo(dev);
+ stat = flush_i2c_fifo(priv);
if (stat)
goto exit;
/* disable the controller */
- i2c_clr_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
+ i2c_clr_bit(priv->virtbase + I2C_CR, I2C_CR_PE);
- disable_all_interrupts(dev);
+ disable_all_interrupts(priv);
- clear_all_interrupts(dev);
+ clear_all_interrupts(priv);
- dev->cli.operation = I2C_NO_OPERATION;
+ priv->cli.operation = I2C_NO_OPERATION;
exit:
return stat;
}
/* enable peripheral, master mode operation */
-#define DEFAULT_I2C_REG_CR ((1 << 1) | I2C_CR_PE)
+#define DEFAULT_I2C_REG_CR (FIELD_PREP(I2C_CR_OM, I2C_OM_MASTER) | I2C_CR_PE)
+
+/* grab top three bits from extended I2C addresses */
+#define ADR_3MSB_BITS GENMASK(9, 7)
/**
* load_i2c_mcr_reg() - load the MCR register
- * @dev: private data of controller
+ * @priv: private data of controller
* @flags: message flags
*/
-static u32 load_i2c_mcr_reg(struct nmk_i2c_dev *dev, u16 flags)
+static u32 load_i2c_mcr_reg(struct nmk_i2c_dev *priv, u16 flags)
{
u32 mcr = 0;
unsigned short slave_adr_3msb_bits;
- mcr |= GEN_MASK(dev->cli.slave_adr, I2C_MCR_A7, 1);
+ mcr |= FIELD_PREP(I2C_MCR_A7, priv->cli.slave_adr);
if (unlikely(flags & I2C_M_TEN)) {
/* 10-bit address transaction */
- mcr |= GEN_MASK(2, I2C_MCR_AM, 12);
+ mcr |= FIELD_PREP(I2C_MCR_AM, 2);
/*
* Get the top 3 bits.
* EA10 represents extended address in MCR. This includes
* the extension (MSB bits) of the 7 bit address loaded
* in A7
*/
- slave_adr_3msb_bits = (dev->cli.slave_adr >> 7) & 0x7;
+ slave_adr_3msb_bits = FIELD_GET(ADR_3MSB_BITS,
+ priv->cli.slave_adr);
- mcr |= GEN_MASK(slave_adr_3msb_bits, I2C_MCR_EA10, 8);
+ mcr |= FIELD_PREP(I2C_MCR_EA10, slave_adr_3msb_bits);
} else {
/* 7-bit address transaction */
- mcr |= GEN_MASK(1, I2C_MCR_AM, 12);
+ mcr |= FIELD_PREP(I2C_MCR_AM, 1);
}
/* start byte procedure not applied */
- mcr |= GEN_MASK(0, I2C_MCR_SB, 11);
+ mcr |= FIELD_PREP(I2C_MCR_SB, 0);
/* check the operation, master read/write? */
- if (dev->cli.operation == I2C_WRITE)
- mcr |= GEN_MASK(I2C_WRITE, I2C_MCR_OP, 0);
+ if (priv->cli.operation == I2C_WRITE)
+ mcr |= FIELD_PREP(I2C_MCR_OP, I2C_WRITE);
else
- mcr |= GEN_MASK(I2C_READ, I2C_MCR_OP, 0);
+ mcr |= FIELD_PREP(I2C_MCR_OP, I2C_READ);
/* stop or repeated start? */
- if (dev->stop)
- mcr |= GEN_MASK(1, I2C_MCR_STOP, 14);
+ if (priv->stop)
+ mcr |= FIELD_PREP(I2C_MCR_STOP, 1);
else
- mcr &= ~(GEN_MASK(1, I2C_MCR_STOP, 14));
+ mcr &= ~FIELD_PREP(I2C_MCR_STOP, 1);
- mcr |= GEN_MASK(dev->cli.count, I2C_MCR_LENGTH, 15);
+ mcr |= FIELD_PREP(I2C_MCR_LENGTH, priv->cli.count);
return mcr;
}
/**
* setup_i2c_controller() - setup the controller
- * @dev: private data of controller
+ * @priv: private data of controller
*/
-static void setup_i2c_controller(struct nmk_i2c_dev *dev)
+static void setup_i2c_controller(struct nmk_i2c_dev *priv)
{
u32 brcr1, brcr2;
u32 i2c_clk, div;
u32 ns;
u16 slsu;
- writel(0x0, dev->virtbase + I2C_CR);
- writel(0x0, dev->virtbase + I2C_HSMCR);
- writel(0x0, dev->virtbase + I2C_TFTR);
- writel(0x0, dev->virtbase + I2C_RFTR);
- writel(0x0, dev->virtbase + I2C_DMAR);
+ writel(0x0, priv->virtbase + I2C_CR);
+ writel(0x0, priv->virtbase + I2C_HSMCR);
+ writel(0x0, priv->virtbase + I2C_TFTR);
+ writel(0x0, priv->virtbase + I2C_RFTR);
+ writel(0x0, priv->virtbase + I2C_DMAR);
- i2c_clk = clk_get_rate(dev->clk);
+ i2c_clk = clk_get_rate(priv->clk);
/*
* set the slsu:
@@ -373,7 +422,7 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
* slsu = cycles / (1000000000 / f) + 1
*/
ns = DIV_ROUND_UP_ULL(1000000000ULL, i2c_clk);
- switch (dev->sm) {
+ switch (priv->sm) {
case I2C_FREQ_MODE_FAST:
case I2C_FREQ_MODE_FAST_PLUS:
slsu = DIV_ROUND_UP(100, ns); /* Fast */
@@ -388,15 +437,15 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
}
slsu += 1;
- dev_dbg(&dev->adev->dev, "calculated SLSU = %04x\n", slsu);
- writel(slsu << 16, dev->virtbase + I2C_SCR);
+ dev_dbg(&priv->adev->dev, "calculated SLSU = %04x\n", slsu);
+ writel(FIELD_PREP(I2C_SCR_SLSU, slsu), priv->virtbase + I2C_SCR);
/*
* The spec says, in case of std. mode the divider is
* 2 whereas it is 3 for fast and fastplus mode of
* operation. TODO - high speed support.
*/
- div = (dev->clk_freq > I2C_MAX_STANDARD_MODE_FREQ) ? 3 : 2;
+ div = (priv->clk_freq > I2C_MAX_STANDARD_MODE_FREQ) ? 3 : 2;
/*
* generate the mask for baud rate counters. The controller
@@ -405,11 +454,11 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
* plus operation. Currently we do not supprt high speed mode
* so set brcr1 to 0.
*/
- brcr1 = 0 << 16;
- brcr2 = (i2c_clk/(dev->clk_freq * div)) & 0xffff;
+ brcr1 = FIELD_PREP(I2C_BRCR_BRCNT1, 0);
+ brcr2 = FIELD_PREP(I2C_BRCR_BRCNT2, i2c_clk / (priv->clk_freq * div));
/* set the baud rate counter register */
- writel((brcr1 | brcr2), dev->virtbase + I2C_BRCR);
+ writel((brcr1 | brcr2), priv->virtbase + I2C_BRCR);
/*
* set the speed mode. Currently we support
@@ -417,125 +466,142 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
* TODO - support for fast mode plus (up to 1Mb/s)
* and high speed (up to 3.4 Mb/s)
*/
- if (dev->sm > I2C_FREQ_MODE_FAST) {
- dev_err(&dev->adev->dev,
+ if (priv->sm > I2C_FREQ_MODE_FAST) {
+ dev_err(&priv->adev->dev,
"do not support this mode defaulting to std. mode\n");
- brcr2 = i2c_clk / (I2C_MAX_STANDARD_MODE_FREQ * 2) & 0xffff;
- writel((brcr1 | brcr2), dev->virtbase + I2C_BRCR);
- writel(I2C_FREQ_MODE_STANDARD << 4,
- dev->virtbase + I2C_CR);
+ brcr2 = FIELD_PREP(I2C_BRCR_BRCNT2,
+ i2c_clk / (I2C_MAX_STANDARD_MODE_FREQ * 2));
+ writel((brcr1 | brcr2), priv->virtbase + I2C_BRCR);
+ writel(FIELD_PREP(I2C_CR_SM, I2C_FREQ_MODE_STANDARD),
+ priv->virtbase + I2C_CR);
}
- writel(dev->sm << 4, dev->virtbase + I2C_CR);
+ writel(FIELD_PREP(I2C_CR_SM, priv->sm), priv->virtbase + I2C_CR);
/* set the Tx and Rx FIFO threshold */
- writel(dev->tft, dev->virtbase + I2C_TFTR);
- writel(dev->rft, dev->virtbase + I2C_RFTR);
+ writel(priv->tft, priv->virtbase + I2C_TFTR);
+ writel(priv->rft, priv->virtbase + I2C_RFTR);
+}
+
+static bool nmk_i2c_wait_xfer_done(struct nmk_i2c_dev *priv)
+{
+ if (priv->timeout_usecs < jiffies_to_usecs(1)) {
+ unsigned long timeout_usecs = priv->timeout_usecs;
+ ktime_t timeout = ktime_set(0, timeout_usecs * NSEC_PER_USEC);
+
+ wait_event_hrtimeout(priv->xfer_wq, priv->xfer_done, timeout);
+ } else {
+ unsigned long timeout = usecs_to_jiffies(priv->timeout_usecs);
+
+ wait_event_timeout(priv->xfer_wq, priv->xfer_done, timeout);
+ }
+
+ return priv->xfer_done;
}
/**
* read_i2c() - Read from I2C client device
- * @dev: private data of I2C Driver
+ * @priv: private data of I2C Driver
* @flags: message flags
*
* This function reads from i2c client device when controller is in
* master mode. There is a completion timeout. If there is no transfer
* before timeout error is returned.
*/
-static int read_i2c(struct nmk_i2c_dev *dev, u16 flags)
+static int read_i2c(struct nmk_i2c_dev *priv, u16 flags)
{
- int status = 0;
u32 mcr, irq_mask;
- unsigned long timeout;
+ int status = 0;
+ bool xfer_done;
- mcr = load_i2c_mcr_reg(dev, flags);
- writel(mcr, dev->virtbase + I2C_MCR);
+ mcr = load_i2c_mcr_reg(priv, flags);
+ writel(mcr, priv->virtbase + I2C_MCR);
/* load the current CR value */
- writel(readl(dev->virtbase + I2C_CR) | DEFAULT_I2C_REG_CR,
- dev->virtbase + I2C_CR);
+ writel(readl(priv->virtbase + I2C_CR) | DEFAULT_I2C_REG_CR,
+ priv->virtbase + I2C_CR);
/* enable the controller */
- i2c_set_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
+ i2c_set_bit(priv->virtbase + I2C_CR, I2C_CR_PE);
- init_completion(&dev->xfer_complete);
+ init_waitqueue_head(&priv->xfer_wq);
+ priv->xfer_done = false;
/* enable interrupts by setting the mask */
irq_mask = (I2C_IT_RXFNF | I2C_IT_RXFF |
I2C_IT_MAL | I2C_IT_BERR);
- if (dev->stop || !dev->vendor->has_mtdws)
+ if (priv->stop || !priv->vendor->has_mtdws)
irq_mask |= I2C_IT_MTD;
else
irq_mask |= I2C_IT_MTDWS;
- irq_mask = I2C_CLEAR_ALL_INTS & IRQ_MASK(irq_mask);
+ irq_mask &= I2C_CLEAR_ALL_INTS;
- writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask,
- dev->virtbase + I2C_IMSCR);
+ writel(readl(priv->virtbase + I2C_IMSCR) | irq_mask,
+ priv->virtbase + I2C_IMSCR);
- timeout = wait_for_completion_timeout(
- &dev->xfer_complete, dev->adap.timeout);
+ xfer_done = nmk_i2c_wait_xfer_done(priv);
- if (timeout == 0) {
+ if (!xfer_done) {
/* Controller timed out */
- dev_err(&dev->adev->dev, "read from slave 0x%x timed out\n",
- dev->cli.slave_adr);
+ dev_err(&priv->adev->dev, "read from slave 0x%x timed out\n",
+ priv->cli.slave_adr);
status = -ETIMEDOUT;
}
return status;
}
-static void fill_tx_fifo(struct nmk_i2c_dev *dev, int no_bytes)
+static void fill_tx_fifo(struct nmk_i2c_dev *priv, int no_bytes)
{
int count;
for (count = (no_bytes - 2);
(count > 0) &&
- (dev->cli.count != 0);
+ (priv->cli.count != 0);
count--) {
/* write to the Tx FIFO */
- writeb(*dev->cli.buffer,
- dev->virtbase + I2C_TFR);
- dev->cli.buffer++;
- dev->cli.count--;
- dev->cli.xfer_bytes++;
+ nmk_i2c_writeb(priv, *priv->cli.buffer, I2C_TFR);
+ priv->cli.buffer++;
+ priv->cli.count--;
+ priv->cli.xfer_bytes++;
}
}
/**
* write_i2c() - Write data to I2C client.
- * @dev: private data of I2C Driver
+ * @priv: private data of I2C Driver
* @flags: message flags
*
* This function writes data to I2C client
*/
-static int write_i2c(struct nmk_i2c_dev *dev, u16 flags)
+static int write_i2c(struct nmk_i2c_dev *priv, u16 flags)
{
- u32 status = 0;
u32 mcr, irq_mask;
- unsigned long timeout;
+ u32 status = 0;
+ bool xfer_done;
- mcr = load_i2c_mcr_reg(dev, flags);
+ mcr = load_i2c_mcr_reg(priv, flags);
- writel(mcr, dev->virtbase + I2C_MCR);
+ writel(mcr, priv->virtbase + I2C_MCR);
/* load the current CR value */
- writel(readl(dev->virtbase + I2C_CR) | DEFAULT_I2C_REG_CR,
- dev->virtbase + I2C_CR);
+ writel(readl(priv->virtbase + I2C_CR) | DEFAULT_I2C_REG_CR,
+ priv->virtbase + I2C_CR);
/* enable the controller */
- i2c_set_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
+ i2c_set_bit(priv->virtbase + I2C_CR, I2C_CR_PE);
- init_completion(&dev->xfer_complete);
+ init_waitqueue_head(&priv->xfer_wq);
+ priv->xfer_done = false;
/* enable interrupts by settings the masks */
irq_mask = (I2C_IT_TXFOVR | I2C_IT_MAL | I2C_IT_BERR);
/* Fill the TX FIFO with transmit data */
- fill_tx_fifo(dev, MAX_I2C_FIFO_THRESHOLD);
+ fill_tx_fifo(priv, MAX_I2C_FIFO_THRESHOLD);
- if (dev->cli.count != 0)
+ if (priv->cli.count != 0)
irq_mask |= I2C_IT_TXFNE;
/*
@@ -543,23 +609,22 @@ static int write_i2c(struct nmk_i2c_dev *dev, u16 flags)
* set the MTDWS bit (Master Transaction Done Without Stop)
* to start repeated start operation
*/
- if (dev->stop || !dev->vendor->has_mtdws)
+ if (priv->stop || !priv->vendor->has_mtdws)
irq_mask |= I2C_IT_MTD;
else
irq_mask |= I2C_IT_MTDWS;
- irq_mask = I2C_CLEAR_ALL_INTS & IRQ_MASK(irq_mask);
+ irq_mask &= I2C_CLEAR_ALL_INTS;
- writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask,
- dev->virtbase + I2C_IMSCR);
+ writel(readl(priv->virtbase + I2C_IMSCR) | irq_mask,
+ priv->virtbase + I2C_IMSCR);
- timeout = wait_for_completion_timeout(
- &dev->xfer_complete, dev->adap.timeout);
+ xfer_done = nmk_i2c_wait_xfer_done(priv);
- if (timeout == 0) {
+ if (!xfer_done) {
/* Controller timed out */
- dev_err(&dev->adev->dev, "write to slave 0x%x timed out\n",
- dev->cli.slave_adr);
+ dev_err(&priv->adev->dev, "write to slave 0x%x timed out\n",
+ priv->cli.slave_adr);
status = -ETIMEDOUT;
}
@@ -568,44 +633,39 @@ static int write_i2c(struct nmk_i2c_dev *dev, u16 flags)
/**
* nmk_i2c_xfer_one() - transmit a single I2C message
- * @dev: device with a message encoded into it
+ * @priv: device with a message encoded into it
* @flags: message flags
*/
-static int nmk_i2c_xfer_one(struct nmk_i2c_dev *dev, u16 flags)
+static int nmk_i2c_xfer_one(struct nmk_i2c_dev *priv, u16 flags)
{
int status;
if (flags & I2C_M_RD) {
/* read operation */
- dev->cli.operation = I2C_READ;
- status = read_i2c(dev, flags);
+ priv->cli.operation = I2C_READ;
+ status = read_i2c(priv, flags);
} else {
/* write operation */
- dev->cli.operation = I2C_WRITE;
- status = write_i2c(dev, flags);
+ priv->cli.operation = I2C_WRITE;
+ status = write_i2c(priv, flags);
}
- if (status || (dev->result)) {
+ if (status || priv->result) {
u32 i2c_sr;
u32 cause;
- i2c_sr = readl(dev->virtbase + I2C_SR);
- /*
- * Check if the controller I2C operation status
- * is set to ABORT(11b).
- */
- if (((i2c_sr >> 2) & 0x3) == 0x3) {
- /* get the abort cause */
- cause = (i2c_sr >> 4) & 0x7;
- dev_err(&dev->adev->dev, "%s\n",
+ i2c_sr = readl(priv->virtbase + I2C_SR);
+ if (FIELD_GET(I2C_SR_STATUS, i2c_sr) == I2C_ABORT) {
+ cause = FIELD_GET(I2C_SR_CAUSE, i2c_sr);
+ dev_err(&priv->adev->dev, "%s\n",
cause >= ARRAY_SIZE(abort_causes) ?
"unknown reason" :
abort_causes[cause]);
}
- (void) init_hw(dev);
+ init_hw(priv);
- status = status ? status : dev->result;
+ status = status ? status : priv->result;
}
return status;
@@ -663,24 +723,24 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
{
int status = 0;
int i;
- struct nmk_i2c_dev *dev = i2c_get_adapdata(i2c_adap);
+ struct nmk_i2c_dev *priv = i2c_get_adapdata(i2c_adap);
int j;
- pm_runtime_get_sync(&dev->adev->dev);
+ pm_runtime_get_sync(&priv->adev->dev);
/* Attempt three times to send the message queue */
for (j = 0; j < 3; j++) {
/* setup the i2c controller */
- setup_i2c_controller(dev);
+ setup_i2c_controller(priv);
for (i = 0; i < num_msgs; i++) {
- dev->cli.slave_adr = msgs[i].addr;
- dev->cli.buffer = msgs[i].buf;
- dev->cli.count = msgs[i].len;
- dev->stop = (i < (num_msgs - 1)) ? 0 : 1;
- dev->result = 0;
+ priv->cli.slave_adr = msgs[i].addr;
+ priv->cli.buffer = msgs[i].buf;
+ priv->cli.count = msgs[i].len;
+ priv->stop = (i < (num_msgs - 1)) ? 0 : 1;
+ priv->result = 0;
- status = nmk_i2c_xfer_one(dev, msgs[i].flags);
+ status = nmk_i2c_xfer_one(priv, msgs[i].flags);
if (status != 0)
break;
}
@@ -688,7 +748,7 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
break;
}
- pm_runtime_put_sync(&dev->adev->dev);
+ pm_runtime_put_sync(&priv->adev->dev);
/* return the no. messages processed */
if (status)
@@ -699,14 +759,14 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
/**
* disable_interrupts() - disable the interrupts
- * @dev: private data of controller
+ * @priv: private data of controller
* @irq: interrupt number
*/
-static int disable_interrupts(struct nmk_i2c_dev *dev, u32 irq)
+static int disable_interrupts(struct nmk_i2c_dev *priv, u32 irq)
{
- irq = IRQ_MASK(irq);
- writel(readl(dev->virtbase + I2C_IMSCR) & ~(I2C_CLEAR_ALL_INTS & irq),
- dev->virtbase + I2C_IMSCR);
+ irq &= I2C_CLEAR_ALL_INTS;
+ writel(readl(priv->virtbase + I2C_IMSCR) & ~irq,
+ priv->virtbase + I2C_IMSCR);
return 0;
}
@@ -723,38 +783,39 @@ static int disable_interrupts(struct nmk_i2c_dev *dev, u32 irq)
*/
static irqreturn_t i2c_irq_handler(int irq, void *arg)
{
- struct nmk_i2c_dev *dev = arg;
+ struct nmk_i2c_dev *priv = arg;
+ struct device *dev = &priv->adev->dev;
u32 tft, rft;
u32 count;
u32 misr, src;
/* load Tx FIFO and Rx FIFO threshold values */
- tft = readl(dev->virtbase + I2C_TFTR);
- rft = readl(dev->virtbase + I2C_RFTR);
+ tft = readl(priv->virtbase + I2C_TFTR);
+ rft = readl(priv->virtbase + I2C_RFTR);
/* read interrupt status register */
- misr = readl(dev->virtbase + I2C_MISR);
+ misr = readl(priv->virtbase + I2C_MISR);
src = __ffs(misr);
- switch ((1 << src)) {
+ switch (BIT(src)) {
/* Transmit FIFO nearly empty interrupt */
case I2C_IT_TXFNE:
{
- if (dev->cli.operation == I2C_READ) {
+ if (priv->cli.operation == I2C_READ) {
/*
* in read operation why do we care for writing?
* so disable the Transmit FIFO interrupt
*/
- disable_interrupts(dev, I2C_IT_TXFNE);
+ disable_interrupts(priv, I2C_IT_TXFNE);
} else {
- fill_tx_fifo(dev, (MAX_I2C_FIFO_THRESHOLD - tft));
+ fill_tx_fifo(priv, (MAX_I2C_FIFO_THRESHOLD - tft));
/*
* if done, close the transfer by disabling the
* corresponding TXFNE interrupt
*/
- if (dev->cli.count == 0)
- disable_interrupts(dev, I2C_IT_TXFNE);
+ if (priv->cli.count == 0)
+ disable_interrupts(priv, I2C_IT_TXFNE);
}
}
break;
@@ -768,60 +829,63 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
case I2C_IT_RXFNF:
for (count = rft; count > 0; count--) {
/* Read the Rx FIFO */
- *dev->cli.buffer = readb(dev->virtbase + I2C_RFR);
- dev->cli.buffer++;
+ *priv->cli.buffer = nmk_i2c_readb(priv, I2C_RFR);
+ priv->cli.buffer++;
}
- dev->cli.count -= rft;
- dev->cli.xfer_bytes += rft;
+ priv->cli.count -= rft;
+ priv->cli.xfer_bytes += rft;
break;
/* Rx FIFO full */
case I2C_IT_RXFF:
for (count = MAX_I2C_FIFO_THRESHOLD; count > 0; count--) {
- *dev->cli.buffer = readb(dev->virtbase + I2C_RFR);
- dev->cli.buffer++;
+ *priv->cli.buffer = nmk_i2c_readb(priv, I2C_RFR);
+ priv->cli.buffer++;
}
- dev->cli.count -= MAX_I2C_FIFO_THRESHOLD;
- dev->cli.xfer_bytes += MAX_I2C_FIFO_THRESHOLD;
+ priv->cli.count -= MAX_I2C_FIFO_THRESHOLD;
+ priv->cli.xfer_bytes += MAX_I2C_FIFO_THRESHOLD;
break;
/* Master Transaction Done with/without stop */
case I2C_IT_MTD:
case I2C_IT_MTDWS:
- if (dev->cli.operation == I2C_READ) {
- while (!(readl(dev->virtbase + I2C_RISR)
+ if (priv->cli.operation == I2C_READ) {
+ while (!(readl(priv->virtbase + I2C_RISR)
& I2C_IT_RXFE)) {
- if (dev->cli.count == 0)
+ if (priv->cli.count == 0)
break;
- *dev->cli.buffer =
- readb(dev->virtbase + I2C_RFR);
- dev->cli.buffer++;
- dev->cli.count--;
- dev->cli.xfer_bytes++;
+ *priv->cli.buffer =
+ nmk_i2c_readb(priv, I2C_RFR);
+ priv->cli.buffer++;
+ priv->cli.count--;
+ priv->cli.xfer_bytes++;
}
}
- disable_all_interrupts(dev);
- clear_all_interrupts(dev);
+ disable_all_interrupts(priv);
+ clear_all_interrupts(priv);
- if (dev->cli.count) {
- dev->result = -EIO;
- dev_err(&dev->adev->dev,
- "%lu bytes still remain to be xfered\n",
- dev->cli.count);
- (void) init_hw(dev);
+ if (priv->cli.count) {
+ priv->result = -EIO;
+ dev_err(dev, "%lu bytes still remain to be xfered\n",
+ priv->cli.count);
+ init_hw(priv);
}
- complete(&dev->xfer_complete);
+ priv->xfer_done = true;
+ wake_up(&priv->xfer_wq);
+
break;
/* Master Arbitration lost interrupt */
case I2C_IT_MAL:
- dev->result = -EIO;
- (void) init_hw(dev);
+ priv->result = -EIO;
+ init_hw(priv);
+
+ i2c_set_bit(priv->virtbase + I2C_ICR, I2C_IT_MAL);
+ priv->xfer_done = true;
+ wake_up(&priv->xfer_wq);
- i2c_set_bit(dev->virtbase + I2C_ICR, I2C_IT_MAL);
- complete(&dev->xfer_complete);
break;
@@ -831,15 +895,20 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
* during the transaction.
*/
case I2C_IT_BERR:
- dev->result = -EIO;
- /* get the status */
- if (((readl(dev->virtbase + I2C_SR) >> 2) & 0x3) == I2C_ABORT)
- (void) init_hw(dev);
+ {
+ u32 sr;
- i2c_set_bit(dev->virtbase + I2C_ICR, I2C_IT_BERR);
- complete(&dev->xfer_complete);
+ sr = readl(priv->virtbase + I2C_SR);
+ priv->result = -EIO;
+ if (FIELD_GET(I2C_SR_STATUS, sr) == I2C_ABORT)
+ init_hw(priv);
- break;
+ i2c_set_bit(priv->virtbase + I2C_ICR, I2C_IT_BERR);
+ priv->xfer_done = true;
+ wake_up(&priv->xfer_wq);
+
+ }
+ break;
/*
* Tx FIFO overrun interrupt.
@@ -847,11 +916,13 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
* the Tx FIFO is full.
*/
case I2C_IT_TXFOVR:
- dev->result = -EIO;
- (void) init_hw(dev);
+ priv->result = -EIO;
+ init_hw(priv);
+
+ dev_err(dev, "Tx Fifo Over run\n");
+ priv->xfer_done = true;
+ wake_up(&priv->xfer_wq);
- dev_err(&dev->adev->dev, "Tx Fifo Over run\n");
- complete(&dev->xfer_complete);
break;
@@ -863,10 +934,10 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
case I2C_IT_RFSE:
case I2C_IT_WTSR:
case I2C_IT_STD:
- dev_err(&dev->adev->dev, "unhandled Interrupt\n");
+ dev_err(dev, "unhandled Interrupt\n");
break;
default:
- dev_err(&dev->adev->dev, "spurious Interrupt..\n");
+ dev_err(dev, "spurious Interrupt..\n");
break;
}
@@ -893,9 +964,9 @@ static int nmk_i2c_resume_early(struct device *dev)
static int nmk_i2c_runtime_suspend(struct device *dev)
{
struct amba_device *adev = to_amba_device(dev);
- struct nmk_i2c_dev *nmk_i2c = amba_get_drvdata(adev);
+ struct nmk_i2c_dev *priv = amba_get_drvdata(adev);
- clk_disable_unprepare(nmk_i2c->clk);
+ clk_disable_unprepare(priv->clk);
pinctrl_pm_select_idle_state(dev);
return 0;
}
@@ -903,10 +974,10 @@ static int nmk_i2c_runtime_suspend(struct device *dev)
static int nmk_i2c_runtime_resume(struct device *dev)
{
struct amba_device *adev = to_amba_device(dev);
- struct nmk_i2c_dev *nmk_i2c = amba_get_drvdata(adev);
+ struct nmk_i2c_dev *priv = amba_get_drvdata(adev);
int ret;
- ret = clk_prepare_enable(nmk_i2c->clk);
+ ret = clk_prepare_enable(priv->clk);
if (ret) {
dev_err(dev, "can't prepare_enable clock\n");
return ret;
@@ -914,9 +985,9 @@ static int nmk_i2c_runtime_resume(struct device *dev)
pinctrl_pm_select_default_state(dev);
- ret = init_hw(nmk_i2c);
+ ret = init_hw(priv);
if (ret) {
- clk_disable_unprepare(nmk_i2c->clk);
+ clk_disable_unprepare(priv->clk);
pinctrl_pm_select_idle_state(dev);
}
@@ -939,107 +1010,160 @@ static const struct i2c_algorithm nmk_i2c_algo = {
};
static void nmk_i2c_of_probe(struct device_node *np,
- struct nmk_i2c_dev *nmk)
+ struct nmk_i2c_dev *priv)
{
+ u32 timeout_usecs;
+
/* Default to 100 kHz if no frequency is given in the node */
- if (of_property_read_u32(np, "clock-frequency", &nmk->clk_freq))
- nmk->clk_freq = I2C_MAX_STANDARD_MODE_FREQ;
+ if (of_property_read_u32(np, "clock-frequency", &priv->clk_freq))
+ priv->clk_freq = I2C_MAX_STANDARD_MODE_FREQ;
/* This driver only supports 'standard' and 'fast' modes of operation. */
- if (nmk->clk_freq <= I2C_MAX_STANDARD_MODE_FREQ)
- nmk->sm = I2C_FREQ_MODE_STANDARD;
+ if (priv->clk_freq <= I2C_MAX_STANDARD_MODE_FREQ)
+ priv->sm = I2C_FREQ_MODE_STANDARD;
+ else
+ priv->sm = I2C_FREQ_MODE_FAST;
+ priv->tft = 1; /* Tx FIFO threshold */
+ priv->rft = 8; /* Rx FIFO threshold */
+
+ /* Slave response timeout */
+ if (!of_property_read_u32(np, "i2c-transfer-timeout-us", &timeout_usecs))
+ priv->timeout_usecs = timeout_usecs;
+ else
+ priv->timeout_usecs = 200 * USEC_PER_MSEC;
+}
+
+static const unsigned int nmk_i2c_eyeq5_masks[] = {
+ GENMASK(5, 4),
+ GENMASK(7, 6),
+ GENMASK(9, 8),
+ GENMASK(11, 10),
+ GENMASK(13, 12),
+};
+
+static int nmk_i2c_eyeq5_probe(struct nmk_i2c_dev *priv)
+{
+ struct device *dev = &priv->adev->dev;
+ struct device_node *np = dev->of_node;
+ unsigned int mask, speed_mode;
+ struct regmap *olb;
+ unsigned int id;
+
+ priv->has_32b_bus = true;
+
+ olb = syscon_regmap_lookup_by_phandle_args(np, "mobileye,olb", 1, &id);
+ if (IS_ERR(olb))
+ return PTR_ERR(olb);
+ if (id >= ARRAY_SIZE(nmk_i2c_eyeq5_masks))
+ return -ENOENT;
+
+ if (priv->clk_freq <= 400000)
+ speed_mode = I2C_EYEQ5_SPEED_FAST;
+ else if (priv->clk_freq <= 1000000)
+ speed_mode = I2C_EYEQ5_SPEED_FAST_PLUS;
else
- nmk->sm = I2C_FREQ_MODE_FAST;
- nmk->tft = 1; /* Tx FIFO threshold */
- nmk->rft = 8; /* Rx FIFO threshold */
- nmk->timeout = 200; /* Slave response timeout(ms) */
+ speed_mode = I2C_EYEQ5_SPEED_HIGH_SPEED;
+
+ mask = nmk_i2c_eyeq5_masks[id];
+ regmap_update_bits(olb, NMK_I2C_EYEQ5_OLB_IOCR2,
+ mask, speed_mode << __fls(mask));
+
+ return 0;
}
static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret = 0;
+ struct nmk_i2c_dev *priv;
struct device_node *np = adev->dev.of_node;
- struct nmk_i2c_dev *dev;
+ struct device *dev = &adev->dev;
struct i2c_adapter *adap;
struct i2c_vendor_data *vendor = id->data;
u32 max_fifo_threshold = (vendor->fifodepth / 2) - 1;
- dev = devm_kzalloc(&adev->dev, sizeof(*dev), GFP_KERNEL);
- if (!dev)
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- dev->vendor = vendor;
- dev->adev = adev;
- nmk_i2c_of_probe(np, dev);
+ priv->vendor = vendor;
+ priv->adev = adev;
+ priv->has_32b_bus = false;
+ nmk_i2c_of_probe(np, priv);
+
+ if (of_device_is_compatible(np, "mobileye,eyeq5-i2c")) {
+ ret = nmk_i2c_eyeq5_probe(priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed OLB lookup\n");
+ }
- if (dev->tft > max_fifo_threshold) {
- dev_warn(&adev->dev, "requested TX FIFO threshold %u, adjusted down to %u\n",
- dev->tft, max_fifo_threshold);
- dev->tft = max_fifo_threshold;
+ if (priv->tft > max_fifo_threshold) {
+ dev_warn(dev, "requested TX FIFO threshold %u, adjusted down to %u\n",
+ priv->tft, max_fifo_threshold);
+ priv->tft = max_fifo_threshold;
}
- if (dev->rft > max_fifo_threshold) {
- dev_warn(&adev->dev, "requested RX FIFO threshold %u, adjusted down to %u\n",
- dev->rft, max_fifo_threshold);
- dev->rft = max_fifo_threshold;
+ if (priv->rft > max_fifo_threshold) {
+ dev_warn(dev, "requested RX FIFO threshold %u, adjusted down to %u\n",
+ priv->rft, max_fifo_threshold);
+ priv->rft = max_fifo_threshold;
}
- amba_set_drvdata(adev, dev);
+ amba_set_drvdata(adev, priv);
- dev->virtbase = devm_ioremap(&adev->dev, adev->res.start,
- resource_size(&adev->res));
- if (!dev->virtbase)
+ priv->virtbase = devm_ioremap(dev, adev->res.start,
+ resource_size(&adev->res));
+ if (!priv->virtbase)
return -ENOMEM;
- dev->irq = adev->irq[0];
- ret = devm_request_irq(&adev->dev, dev->irq, i2c_irq_handler, 0,
- DRIVER_NAME, dev);
+ priv->irq = adev->irq[0];
+ ret = devm_request_irq(dev, priv->irq, i2c_irq_handler, 0,
+ DRIVER_NAME, priv);
if (ret)
- return dev_err_probe(&adev->dev, ret,
- "cannot claim the irq %d\n", dev->irq);
+ return dev_err_probe(dev, ret,
+ "cannot claim the irq %d\n", priv->irq);
- dev->clk = devm_clk_get_enabled(&adev->dev, NULL);
- if (IS_ERR(dev->clk))
- return dev_err_probe(&adev->dev, PTR_ERR(dev->clk),
+ priv->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk),
"could enable i2c clock\n");
- init_hw(dev);
+ init_hw(priv);
- adap = &dev->adap;
+ adap = &priv->adap;
adap->dev.of_node = np;
- adap->dev.parent = &adev->dev;
+ adap->dev.parent = dev;
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_DEPRECATED;
adap->algo = &nmk_i2c_algo;
- adap->timeout = msecs_to_jiffies(dev->timeout);
+ adap->timeout = usecs_to_jiffies(priv->timeout_usecs);
snprintf(adap->name, sizeof(adap->name),
"Nomadik I2C at %pR", &adev->res);
- i2c_set_adapdata(adap, dev);
+ i2c_set_adapdata(adap, priv);
- dev_info(&adev->dev,
+ dev_info(dev,
"initialize %s on virtual base %p\n",
- adap->name, dev->virtbase);
+ adap->name, priv->virtbase);
ret = i2c_add_adapter(adap);
if (ret)
return ret;
- pm_runtime_put(&adev->dev);
+ pm_runtime_put(dev);
return 0;
}
static void nmk_i2c_remove(struct amba_device *adev)
{
- struct nmk_i2c_dev *dev = amba_get_drvdata(adev);
+ struct nmk_i2c_dev *priv = amba_get_drvdata(adev);
- i2c_del_adapter(&dev->adap);
- flush_i2c_fifo(dev);
- disable_all_interrupts(dev);
- clear_all_interrupts(dev);
+ i2c_del_adapter(&priv->adap);
+ flush_i2c_fifo(priv);
+ disable_all_interrupts(priv);
+ clear_all_interrupts(priv);
/* disable the controller */
- i2c_clr_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
+ i2c_clr_bit(priv->virtbase + I2C_CR, I2C_CR_PE);
}
static struct i2c_vendor_data vendor_stn8815 = {
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
index 54181b3f19196..2fe68615942ef 100644
--- a/drivers/i2c/busses/i2c-npcm7xx.c
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
@@ -1264,9 +1264,6 @@ static int npcm_i2c_reg_slave(struct i2c_client *client)
bus->slave = client;
- if (!bus->slave)
- return -EINVAL;
-
if (client->flags & I2C_CLIENT_TEN)
return -EAFNOSUPPORT;
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 76f79b68cef84..888ca636f3f3b 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -324,6 +324,7 @@ static void decode_ISR(unsigned int val)
decode_bits(KERN_DEBUG "ISR", isr_bits, ARRAY_SIZE(isr_bits), val);
}
+#ifdef CONFIG_I2C_PXA_SLAVE
static const struct bits icr_bits[] = {
PXA_BIT(ICR_START, "START", NULL),
PXA_BIT(ICR_STOP, "STOP", NULL),
@@ -342,7 +343,6 @@ static const struct bits icr_bits[] = {
PXA_BIT(ICR_UR, "UR", "ur"),
};
-#ifdef CONFIG_I2C_PXA_SLAVE
static void decode_ICR(unsigned int val)
{
decode_bits(KERN_DEBUG "ICR", icr_bits, ARRAY_SIZE(icr_bits), val);
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 5adbe62cf6212..c65ac3d7eadc5 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -773,7 +773,7 @@ static int sh_mobile_i2c_r8a7740_workaround(struct sh_mobile_i2c_data *pd)
iic_wr(pd, ICCR, ICCR_TRS);
udelay(10);
- return sh_mobile_i2c_init(pd);
+ return sh_mobile_i2c_v2_init(pd);
}
static const struct sh_mobile_dt_config default_dt_config = {
@@ -783,11 +783,6 @@ static const struct sh_mobile_dt_config default_dt_config = {
static const struct sh_mobile_dt_config fast_clock_dt_config = {
.clks_per_count = 2,
- .setup = sh_mobile_i2c_init,
-};
-
-static const struct sh_mobile_dt_config v2_freq_calc_dt_config = {
- .clks_per_count = 2,
.setup = sh_mobile_i2c_v2_init,
};
@@ -799,17 +794,17 @@ static const struct sh_mobile_dt_config r8a7740_dt_config = {
static const struct of_device_id sh_mobile_i2c_dt_ids[] = {
{ .compatible = "renesas,iic-r8a73a4", .data = &fast_clock_dt_config },
{ .compatible = "renesas,iic-r8a7740", .data = &r8a7740_dt_config },
- { .compatible = "renesas,iic-r8a774c0", .data = &v2_freq_calc_dt_config },
- { .compatible = "renesas,iic-r8a7790", .data = &v2_freq_calc_dt_config },
- { .compatible = "renesas,iic-r8a7791", .data = &v2_freq_calc_dt_config },
- { .compatible = "renesas,iic-r8a7792", .data = &v2_freq_calc_dt_config },
- { .compatible = "renesas,iic-r8a7793", .data = &v2_freq_calc_dt_config },
- { .compatible = "renesas,iic-r8a7794", .data = &v2_freq_calc_dt_config },
- { .compatible = "renesas,iic-r8a7795", .data = &v2_freq_calc_dt_config },
- { .compatible = "renesas,iic-r8a77990", .data = &v2_freq_calc_dt_config },
+ { .compatible = "renesas,iic-r8a774c0", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,iic-r8a7790", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,iic-r8a7791", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,iic-r8a7792", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,iic-r8a7793", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,iic-r8a7794", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,iic-r8a7795", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,iic-r8a77990", .data = &fast_clock_dt_config },
{ .compatible = "renesas,iic-sh73a0", .data = &fast_clock_dt_config },
- { .compatible = "renesas,rcar-gen2-iic", .data = &v2_freq_calc_dt_config },
- { .compatible = "renesas,rcar-gen3-iic", .data = &v2_freq_calc_dt_config },
+ { .compatible = "renesas,rcar-gen2-iic", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,rcar-gen3-iic", .data = &fast_clock_dt_config },
{ .compatible = "renesas,rmobile-iic", .data = &default_dt_config },
{},
};
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index c52d1bec60b4c..28c88901d9bcd 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -570,7 +570,7 @@ err_rpm_put:
return ret;
}
-static int sprd_i2c_remove(struct platform_device *pdev)
+static void sprd_i2c_remove(struct platform_device *pdev)
{
struct sprd_i2c *i2c_dev = platform_get_drvdata(pdev);
int ret;
@@ -586,8 +586,6 @@ static int sprd_i2c_remove(struct platform_device *pdev)
pm_runtime_put_noidle(i2c_dev->dev);
pm_runtime_disable(i2c_dev->dev);
-
- return 0;
}
static int __maybe_unused sprd_i2c_suspend_noirq(struct device *dev)
@@ -645,7 +643,7 @@ MODULE_DEVICE_TABLE(of, sprd_i2c_of_match);
static struct platform_driver sprd_i2c_driver = {
.probe = sprd_i2c_probe,
- .remove = sprd_i2c_remove,
+ .remove_new = sprd_i2c_remove,
.driver = {
.name = "sprd-i2c",
.of_match_table = sprd_i2c_of_match,
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 3bd48d4b6318f..ff5c486a1dbb1 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -701,7 +701,7 @@ const struct bus_type i2c_bus_type = {
};
EXPORT_SYMBOL_GPL(i2c_bus_type);
-struct device_type i2c_client_type = {
+const struct device_type i2c_client_type = {
.groups = i2c_dev_groups,
.uevent = i2c_device_uevent,
.release = i2c_client_dev_release,
@@ -1343,7 +1343,7 @@ static struct attribute *i2c_adapter_attrs[] = {
};
ATTRIBUTE_GROUPS(i2c_adapter);
-struct device_type i2c_adapter_type = {
+const struct device_type i2c_adapter_type = {
.groups = i2c_adapter_groups,
.release = i2c_adapter_dev_release,
};
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index 74807c6db596d..97f338b123b11 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -351,13 +351,18 @@ void i2c_register_spd(struct i2c_adapter *adap)
if (!dimm_count)
return;
- dev_info(&adap->dev, "%d/%d memory slots populated (from DMI)\n",
- dimm_count, slot_count);
-
- if (slot_count > 8) {
- dev_warn(&adap->dev,
- "Systems with more than 8 memory slots not supported yet, not instantiating SPD\n");
- return;
+ /*
+ * If we're a child adapter on a muxed segment, then limit slots to 8,
+ * as this is the max number of SPD EEPROMs that can be addressed per bus.
+ */
+ if (i2c_parent_is_i2c_adapter(adap)) {
+ slot_count = 8;
+ } else {
+ if (slot_count > 8) {
+ dev_warn(&adap->dev,
+ "More than 8 memory slots on a single bus, contact i801 maintainer to add missing mux config\n");
+ return;
+ }
}
/*
diff --git a/drivers/i2c/muxes/i2c-mux-mlxcpld.c b/drivers/i2c/muxes/i2c-mux-mlxcpld.c
index 3dda00f1df78d..4c6ed1d58c79a 100644
--- a/drivers/i2c/muxes/i2c-mux-mlxcpld.c
+++ b/drivers/i2c/muxes/i2c-mux-mlxcpld.c
@@ -187,7 +187,7 @@ static struct platform_driver mlxcpld_mux_driver = {
module_platform_driver(mlxcpld_mux_driver);
-MODULE_AUTHOR("Michael Shych (michaels@mellanox.com)");
+MODULE_AUTHOR("Michael Shych <michaels@mellanox.com>");
MODULE_DESCRIPTION("Mellanox I2C-CPLD-MUX driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("platform:i2c-mux-mlxcpld");
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 2219062104fbc..c3f4ff08ac385 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -49,6 +49,7 @@
#include <linux/pm.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <dt-bindings/mux/mux.h>
@@ -57,6 +58,20 @@
#define PCA954X_IRQ_OFFSET 4
+/*
+ * MAX7357's configuration register is writeable after POR, but
+ * can be locked by setting the basic mode bit. MAX7358 configuration
+ * register is locked by default and needs to be unlocked first.
+ * The configuration register holds the following settings:
+ */
+#define MAX7357_CONF_INT_ENABLE BIT(0)
+#define MAX7357_CONF_FLUSH_OUT BIT(1)
+#define MAX7357_CONF_RELEASE_INT BIT(2)
+#define MAX7357_CONF_DISCON_SINGLE_CHAN BIT(4)
+#define MAX7357_CONF_PRECONNECT_TEST BIT(7)
+
+#define MAX7357_POR_DEFAULT_CONF MAX7357_CONF_INT_ENABLE
+
enum pca_type {
max_7356,
max_7357,
@@ -102,6 +117,9 @@ struct pca954x {
unsigned int irq_mask;
raw_spinlock_t lock;
struct regulator *supply;
+
+ struct gpio_desc *reset_gpio;
+ struct reset_control *reset_cont;
};
/* Provide specs for the MAX735x, PCA954x and PCA984x types we know about */
@@ -470,13 +488,69 @@ static int pca954x_init(struct i2c_client *client, struct pca954x *data)
else
data->last_chan = 0; /* Disconnect multiplexer */
- ret = i2c_smbus_write_byte(client, data->last_chan);
+ if (device_is_compatible(&client->dev, "maxim,max7357")) {
+ if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) {
+ u8 conf = MAX7357_POR_DEFAULT_CONF;
+ /*
+ * The interrupt signal is shared with the reset pin. Release the
+ * interrupt after 1.6 seconds to allow using the pin as reset.
+ */
+ conf |= MAX7357_CONF_RELEASE_INT;
+
+ if (device_property_read_bool(&client->dev, "maxim,isolate-stuck-channel"))
+ conf |= MAX7357_CONF_DISCON_SINGLE_CHAN;
+ if (device_property_read_bool(&client->dev,
+ "maxim,send-flush-out-sequence"))
+ conf |= MAX7357_CONF_FLUSH_OUT;
+ if (device_property_read_bool(&client->dev,
+ "maxim,preconnection-wiggle-test-enable"))
+ conf |= MAX7357_CONF_PRECONNECT_TEST;
+
+ ret = i2c_smbus_write_byte_data(client, data->last_chan, conf);
+ } else {
+ dev_warn(&client->dev, "Write byte data not supported."
+ "Cannot enable enhanced mode features\n");
+ ret = i2c_smbus_write_byte(client, data->last_chan);
+ }
+ } else {
+ ret = i2c_smbus_write_byte(client, data->last_chan);
+ }
+
if (ret < 0)
data->last_chan = 0;
return ret;
}
+static int pca954x_get_reset(struct device *dev, struct pca954x *data)
+{
+ data->reset_cont = devm_reset_control_get_optional_shared(dev, NULL);
+ if (IS_ERR(data->reset_cont))
+ return dev_err_probe(dev, PTR_ERR(data->reset_cont),
+ "Failed to get reset\n");
+ else if (data->reset_cont)
+ return 0;
+
+ /*
+ * fallback to legacy reset-gpios
+ */
+ data->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(data->reset_gpio)) {
+ return dev_err_probe(dev, PTR_ERR(data->reset_gpio),
+ "Failed to get reset gpio");
+ }
+
+ return 0;
+}
+
+static void pca954x_reset_deassert(struct pca954x *data)
+{
+ if (data->reset_cont)
+ reset_control_deassert(data->reset_cont);
+ else
+ gpiod_set_value_cansleep(data->reset_gpio, 0);
+}
+
/*
* I2C init/probing/exit functions
*/
@@ -485,7 +559,6 @@ static int pca954x_probe(struct i2c_client *client)
const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct i2c_adapter *adap = client->adapter;
struct device *dev = &client->dev;
- struct gpio_desc *gpio;
struct i2c_mux_core *muxc;
struct pca954x *data;
int num;
@@ -513,15 +586,13 @@ static int pca954x_probe(struct i2c_client *client)
return dev_err_probe(dev, ret,
"Failed to enable vdd supply\n");
- /* Reset the mux if a reset GPIO is specified. */
- gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(gpio)) {
- ret = PTR_ERR(gpio);
+ ret = pca954x_get_reset(dev, data);
+ if (ret)
goto fail_cleanup;
- }
- if (gpio) {
+
+ if (data->reset_cont || data->reset_gpio) {
udelay(1);
- gpiod_set_value_cansleep(gpio, 0);
+ pca954x_reset_deassert(data);
/* Give the chip some time to recover. */
udelay(1);
}
diff --git a/drivers/i3c/internals.h b/drivers/i3c/internals.h
index 908a807badaf9..4d99a3524171b 100644
--- a/drivers/i3c/internals.h
+++ b/drivers/i3c/internals.h
@@ -10,7 +10,7 @@
#include <linux/i3c/master.h>
-extern struct bus_type i3c_bus_type;
+extern const struct bus_type i3c_bus_type;
void i3c_bus_normaluse_lock(struct i3c_bus *bus);
void i3c_bus_normaluse_unlock(struct i3c_bus *bus);
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 3afa530c5e322..f32c591ae325c 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -335,7 +335,7 @@ static void i3c_device_remove(struct device *dev)
i3c_device_free_ibi(i3cdev);
}
-struct bus_type i3c_bus_type = {
+const struct bus_type i3c_bus_type = {
.name = "i3c",
.match = i3c_device_match,
.probe = i3c_device_probe,
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index ef5751e91cc9e..276153e10f5a4 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -1163,8 +1163,10 @@ static void dw_i3c_master_set_sir_enabled(struct dw_i3c_master *master,
global = reg == 0xffffffff;
reg &= ~BIT(idx);
} else {
- global = reg == 0;
+ bool hj_rejected = !!(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_HOT_JOIN_NACK);
+
reg |= BIT(idx);
+ global = (reg == 0xffffffff) && hj_rejected;
}
writel(reg, master->regs + IBI_SIR_REQ_REJECT);
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index 52eb46ef84c1b..9c351ffc7bed6 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -71,6 +71,15 @@ config IIO_TRIGGERED_EVENT
help
Provides helper functions for setting up triggered events.
+config IIO_BACKEND
+ tristate
+ help
+ Framework to handle complex IIO aggregate devices. The typical
+ architecture that can make use of this framework is to have one
+ device as the frontend device which can be "linked" against one or
+ multiple backend devices. The framework then makes it easy to get
+ and control such backend devices.
+
source "drivers/iio/accel/Kconfig"
source "drivers/iio/adc/Kconfig"
source "drivers/iio/addac/Kconfig"
diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile
index 9622347a1c1be..0ba0e1521ba4f 100644
--- a/drivers/iio/Makefile
+++ b/drivers/iio/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_IIO_GTS_HELPER) += industrialio-gts-helper.o
obj-$(CONFIG_IIO_SW_DEVICE) += industrialio-sw-device.o
obj-$(CONFIG_IIO_SW_TRIGGER) += industrialio-sw-trigger.o
obj-$(CONFIG_IIO_TRIGGERED_EVENT) += industrialio-triggered-event.o
+obj-$(CONFIG_IIO_BACKEND) += industrialio-backend.o
obj-y += accel/
obj-y += adc/
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index c9d7afe489e83..c2da5066e9a7b 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -256,11 +256,11 @@ config BMC150_ACCEL_SPI
config BMI088_ACCEL
tristate "Bosch BMI088 Accelerometer Driver"
- depends on SPI
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
select REGMAP
- select BMI088_ACCEL_SPI
+ select BMI088_ACCEL_SPI if SPI
+ select BMI088_ACCEL_I2C if I2C
help
Say yes here to build support for the following Bosch accelerometers:
BMI088, BMI085, BMI090L. Note that all of these are combo module that
@@ -269,6 +269,10 @@ config BMI088_ACCEL
This driver only implements the accelerometer part, which has its own
address and register map. BMG160 provides the gyroscope driver.
+config BMI088_ACCEL_I2C
+ tristate
+ select REGMAP_I2C
+
config BMI088_ACCEL_SPI
tristate
select REGMAP_SPI
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
index 311ead9c3ef18..db90532ba24aa 100644
--- a/drivers/iio/accel/Makefile
+++ b/drivers/iio/accel/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_BMC150_ACCEL) += bmc150-accel-core.o
obj-$(CONFIG_BMC150_ACCEL_I2C) += bmc150-accel-i2c.o
obj-$(CONFIG_BMC150_ACCEL_SPI) += bmc150-accel-spi.o
obj-$(CONFIG_BMI088_ACCEL) += bmi088-accel-core.o
+obj-$(CONFIG_BMI088_ACCEL_I2C) += bmi088-accel-i2c.o
obj-$(CONFIG_BMI088_ACCEL_SPI) += bmi088-accel-spi.o
obj-$(CONFIG_DA280) += da280.o
obj-$(CONFIG_DA311) += da311.o
diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c
index 484fe2e9fb174..210228affb80d 100644
--- a/drivers/iio/accel/adxl367.c
+++ b/drivers/iio/accel/adxl367.c
@@ -339,22 +339,17 @@ static int adxl367_set_act_threshold(struct adxl367_state *st,
{
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
ret = adxl367_set_measure_en(st, false);
if (ret)
- goto out;
+ return ret;
ret = _adxl367_set_act_threshold(st, act, threshold);
if (ret)
- goto out;
-
- ret = adxl367_set_measure_en(st, true);
-
-out:
- mutex_unlock(&st->lock);
+ return ret;
- return ret;
+ return adxl367_set_measure_en(st, true);
}
static int adxl367_set_act_proc_mode(struct adxl367_state *st,
@@ -482,51 +477,45 @@ static int adxl367_set_fifo_watermark(struct adxl367_state *st,
static int adxl367_set_range(struct iio_dev *indio_dev,
enum adxl367_range range)
{
- struct adxl367_state *st = iio_priv(indio_dev);
- int ret;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ struct adxl367_state *st = iio_priv(indio_dev);
+ int ret;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- mutex_lock(&st->lock);
-
- ret = adxl367_set_measure_en(st, false);
- if (ret)
- goto out;
+ guard(mutex)(&st->lock);
- ret = regmap_update_bits(st->regmap, ADXL367_REG_FILTER_CTL,
- ADXL367_FILTER_CTL_RANGE_MASK,
- FIELD_PREP(ADXL367_FILTER_CTL_RANGE_MASK,
- range));
- if (ret)
- goto out;
+ ret = adxl367_set_measure_en(st, false);
+ if (ret)
+ return ret;
- adxl367_scale_act_thresholds(st, st->range, range);
+ ret = regmap_update_bits(st->regmap, ADXL367_REG_FILTER_CTL,
+ ADXL367_FILTER_CTL_RANGE_MASK,
+ FIELD_PREP(ADXL367_FILTER_CTL_RANGE_MASK,
+ range));
+ if (ret)
+ return ret;
- /* Activity thresholds depend on range */
- ret = _adxl367_set_act_threshold(st, ADXL367_ACTIVITY,
- st->act_threshold);
- if (ret)
- goto out;
+ adxl367_scale_act_thresholds(st, st->range, range);
- ret = _adxl367_set_act_threshold(st, ADXL367_INACTIVITY,
- st->inact_threshold);
- if (ret)
- goto out;
-
- ret = adxl367_set_measure_en(st, true);
- if (ret)
- goto out;
+ /* Activity thresholds depend on range */
+ ret = _adxl367_set_act_threshold(st, ADXL367_ACTIVITY,
+ st->act_threshold);
+ if (ret)
+ return ret;
- st->range = range;
+ ret = _adxl367_set_act_threshold(st, ADXL367_INACTIVITY,
+ st->inact_threshold);
+ if (ret)
+ return ret;
-out:
- mutex_unlock(&st->lock);
+ ret = adxl367_set_measure_en(st, true);
+ if (ret)
+ return ret;
- iio_device_release_direct_mode(indio_dev);
+ st->range = range;
- return ret;
+ return 0;
+ }
+ unreachable();
}
static int adxl367_time_ms_to_samples(struct adxl367_state *st, unsigned int ms)
@@ -587,11 +576,11 @@ static int adxl367_set_act_time_ms(struct adxl367_state *st,
{
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
ret = adxl367_set_measure_en(st, false);
if (ret)
- goto out;
+ return ret;
if (act == ADXL367_ACTIVITY)
ret = _adxl367_set_act_time_ms(st, ms);
@@ -599,14 +588,9 @@ static int adxl367_set_act_time_ms(struct adxl367_state *st,
ret = _adxl367_set_inact_time_ms(st, ms);
if (ret)
- goto out;
-
- ret = adxl367_set_measure_en(st, true);
-
-out:
- mutex_unlock(&st->lock);
+ return ret;
- return ret;
+ return adxl367_set_measure_en(st, true);
}
static int _adxl367_set_odr(struct adxl367_state *st, enum adxl367_odr odr)
@@ -636,31 +620,23 @@ static int _adxl367_set_odr(struct adxl367_state *st, enum adxl367_odr odr)
static int adxl367_set_odr(struct iio_dev *indio_dev, enum adxl367_odr odr)
{
- struct adxl367_state *st = iio_priv(indio_dev);
- int ret;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ struct adxl367_state *st = iio_priv(indio_dev);;
+ int ret;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ guard(mutex)(&st->lock);
- mutex_lock(&st->lock);
-
- ret = adxl367_set_measure_en(st, false);
- if (ret)
- goto out;
-
- ret = _adxl367_set_odr(st, odr);
- if (ret)
- goto out;
-
- ret = adxl367_set_measure_en(st, true);
-
-out:
- mutex_unlock(&st->lock);
+ ret = adxl367_set_measure_en(st, false);
+ if (ret)
+ return ret;
- iio_device_release_direct_mode(indio_dev);
+ ret = _adxl367_set_odr(st, odr);
+ if (ret)
+ return ret;
- return ret;
+ return adxl367_set_measure_en(st, true);
+ }
+ unreachable();
}
static int adxl367_set_temp_adc_en(struct adxl367_state *st, unsigned int reg,
@@ -749,36 +725,32 @@ static int adxl367_read_sample(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val)
{
- struct adxl367_state *st = iio_priv(indio_dev);
- u16 sample;
- int ret;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ struct adxl367_state *st = iio_priv(indio_dev);
+ u16 sample;
+ int ret;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ guard(mutex)(&st->lock);
- mutex_lock(&st->lock);
-
- ret = adxl367_set_temp_adc_reg_en(st, chan->address, true);
- if (ret)
- goto out;
-
- ret = regmap_bulk_read(st->regmap, chan->address, &st->sample_buf,
- sizeof(st->sample_buf));
- if (ret)
- goto out;
-
- sample = FIELD_GET(ADXL367_DATA_MASK, be16_to_cpu(st->sample_buf));
- *val = sign_extend32(sample, chan->scan_type.realbits - 1);
+ ret = adxl367_set_temp_adc_reg_en(st, chan->address, true);
+ if (ret)
+ return ret;
- ret = adxl367_set_temp_adc_reg_en(st, chan->address, false);
+ ret = regmap_bulk_read(st->regmap, chan->address, &st->sample_buf,
+ sizeof(st->sample_buf));
+ if (ret)
+ return ret;
-out:
- mutex_unlock(&st->lock);
+ sample = FIELD_GET(ADXL367_DATA_MASK, be16_to_cpu(st->sample_buf));
+ *val = sign_extend32(sample, chan->scan_type.realbits - 1);
- iio_device_release_direct_mode(indio_dev);
+ ret = adxl367_set_temp_adc_reg_en(st, chan->address, false);
+ if (ret)
+ return ret;
- return ret ?: IIO_VAL_INT;
+ return IIO_VAL_INT;
+ }
+ unreachable();
}
static int adxl367_get_status(struct adxl367_state *st, u8 *status,
@@ -886,12 +858,12 @@ static int adxl367_read_raw(struct iio_dev *indio_dev,
return adxl367_read_sample(indio_dev, chan, val);
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
- case IIO_ACCEL:
- mutex_lock(&st->lock);
+ case IIO_ACCEL: {
+ guard(mutex)(&st->lock);
*val = adxl367_range_scale_tbl[st->range][0];
*val2 = adxl367_range_scale_tbl[st->range][1];
- mutex_unlock(&st->lock);
return IIO_VAL_INT_PLUS_NANO;
+ }
case IIO_TEMP:
*val = 1000;
*val2 = ADXL367_TEMP_PER_C;
@@ -914,12 +886,12 @@ static int adxl367_read_raw(struct iio_dev *indio_dev,
default:
return -EINVAL;
}
- case IIO_CHAN_INFO_SAMP_FREQ:
- mutex_lock(&st->lock);
+ case IIO_CHAN_INFO_SAMP_FREQ: {
+ guard(mutex)(&st->lock);
*val = adxl367_samp_freq_tbl[st->odr][0];
*val2 = adxl367_samp_freq_tbl[st->odr][1];
- mutex_unlock(&st->lock);
return IIO_VAL_INT_PLUS_MICRO;
+ }
default:
return -EINVAL;
}
@@ -1004,18 +976,15 @@ static int adxl367_read_event_value(struct iio_dev *indio_dev,
{
struct adxl367_state *st = iio_priv(indio_dev);
+ guard(mutex)(&st->lock);
switch (info) {
case IIO_EV_INFO_VALUE: {
switch (dir) {
case IIO_EV_DIR_RISING:
- mutex_lock(&st->lock);
*val = st->act_threshold;
- mutex_unlock(&st->lock);
return IIO_VAL_INT;
case IIO_EV_DIR_FALLING:
- mutex_lock(&st->lock);
*val = st->inact_threshold;
- mutex_unlock(&st->lock);
return IIO_VAL_INT;
default:
return -EINVAL;
@@ -1024,15 +993,11 @@ static int adxl367_read_event_value(struct iio_dev *indio_dev,
case IIO_EV_INFO_PERIOD:
switch (dir) {
case IIO_EV_DIR_RISING:
- mutex_lock(&st->lock);
*val = st->act_time_ms;
- mutex_unlock(&st->lock);
*val2 = 1000;
return IIO_VAL_FRACTIONAL;
case IIO_EV_DIR_FALLING:
- mutex_lock(&st->lock);
*val = st->inact_time_ms;
- mutex_unlock(&st->lock);
*val2 = 1000;
return IIO_VAL_FRACTIONAL;
default:
@@ -1110,9 +1075,7 @@ static int adxl367_write_event_config(struct iio_dev *indio_dev,
enum iio_event_direction dir,
int state)
{
- struct adxl367_state *st = iio_priv(indio_dev);
enum adxl367_activity_type act;
- int ret;
switch (dir) {
case IIO_EV_DIR_RISING:
@@ -1125,33 +1088,28 @@ static int adxl367_write_event_config(struct iio_dev *indio_dev,
return -EINVAL;
}
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- mutex_lock(&st->lock);
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ struct adxl367_state *st = iio_priv(indio_dev);
+ int ret;
- ret = adxl367_set_measure_en(st, false);
- if (ret)
- goto out;
+ guard(mutex)(&st->lock);
- ret = adxl367_set_act_interrupt_en(st, act, state);
- if (ret)
- goto out;
-
- ret = adxl367_set_act_en(st, act, state ? ADCL367_ACT_REF_ENABLED
- : ADXL367_ACT_DISABLED);
- if (ret)
- goto out;
-
- ret = adxl367_set_measure_en(st, true);
+ ret = adxl367_set_measure_en(st, false);
+ if (ret)
+ return ret;
-out:
- mutex_unlock(&st->lock);
+ ret = adxl367_set_act_interrupt_en(st, act, state);
+ if (ret)
+ return ret;
- iio_device_release_direct_mode(indio_dev);
+ ret = adxl367_set_act_en(st, act, state ? ADCL367_ACT_REF_ENABLED
+ : ADXL367_ACT_DISABLED);
+ if (ret)
+ return ret;
- return ret;
+ return adxl367_set_measure_en(st, true);
+ }
+ unreachable();
}
static ssize_t adxl367_get_fifo_enabled(struct device *dev,
@@ -1176,9 +1134,8 @@ static ssize_t adxl367_get_fifo_watermark(struct device *dev,
struct adxl367_state *st = iio_priv(dev_to_iio_dev(dev));
unsigned int fifo_watermark;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
fifo_watermark = st->fifo_watermark;
- mutex_unlock(&st->lock);
return sysfs_emit(buf, "%d\n", fifo_watermark);
}
@@ -1207,22 +1164,17 @@ static int adxl367_set_watermark(struct iio_dev *indio_dev, unsigned int val)
if (val > ADXL367_FIFO_MAX_WATERMARK)
return -EINVAL;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
ret = adxl367_set_measure_en(st, false);
if (ret)
- goto out;
+ return ret;
ret = adxl367_set_fifo_watermark(st, val);
if (ret)
- goto out;
-
- ret = adxl367_set_measure_en(st, true);
-
-out:
- mutex_unlock(&st->lock);
+ return ret;
- return ret;
+ return adxl367_set_measure_en(st, true);
}
static bool adxl367_find_mask_fifo_format(const unsigned long *scan_mask,
@@ -1253,27 +1205,24 @@ static int adxl367_update_scan_mode(struct iio_dev *indio_dev,
if (!adxl367_find_mask_fifo_format(active_scan_mask, &fifo_format))
return -EINVAL;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
ret = adxl367_set_measure_en(st, false);
if (ret)
- goto out;
+ return ret;
ret = adxl367_set_fifo_format(st, fifo_format);
if (ret)
- goto out;
+ return ret;
ret = adxl367_set_measure_en(st, true);
if (ret)
- goto out;
+ return ret;
st->fifo_set_size = bitmap_weight(active_scan_mask,
indio_dev->masklength);
-out:
- mutex_unlock(&st->lock);
-
- return ret;
+ return 0;
}
static int adxl367_buffer_postenable(struct iio_dev *indio_dev)
@@ -1281,31 +1230,26 @@ static int adxl367_buffer_postenable(struct iio_dev *indio_dev)
struct adxl367_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
ret = adxl367_set_temp_adc_mask_en(st, indio_dev->active_scan_mask,
true);
if (ret)
- goto out;
+ return ret;
ret = adxl367_set_measure_en(st, false);
if (ret)
- goto out;
+ return ret;
ret = adxl367_set_fifo_watermark_interrupt_en(st, true);
if (ret)
- goto out;
+ return ret;
ret = adxl367_set_fifo_mode(st, ADXL367_FIFO_MODE_STREAM);
if (ret)
- goto out;
-
- ret = adxl367_set_measure_en(st, true);
-
-out:
- mutex_unlock(&st->lock);
+ return ret;
- return ret;
+ return adxl367_set_measure_en(st, true);
}
static int adxl367_buffer_predisable(struct iio_dev *indio_dev)
@@ -1313,31 +1257,26 @@ static int adxl367_buffer_predisable(struct iio_dev *indio_dev)
struct adxl367_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
ret = adxl367_set_measure_en(st, false);
if (ret)
- goto out;
+ return ret;
ret = adxl367_set_fifo_mode(st, ADXL367_FIFO_MODE_DISABLED);
if (ret)
- goto out;
+ return ret;
ret = adxl367_set_fifo_watermark_interrupt_en(st, false);
if (ret)
- goto out;
+ return ret;
ret = adxl367_set_measure_en(st, true);
if (ret)
- goto out;
-
- ret = adxl367_set_temp_adc_mask_en(st, indio_dev->active_scan_mask,
- false);
-
-out:
- mutex_unlock(&st->lock);
+ return ret;
- return ret;
+ return adxl367_set_temp_adc_mask_en(st, indio_dev->active_scan_mask,
+ false);
}
static const struct iio_buffer_setup_ops adxl367_buffer_ops = {
diff --git a/drivers/iio/accel/adxl372_spi.c b/drivers/iio/accel/adxl372_spi.c
index 75a88f16c6c90..787699773f96f 100644
--- a/drivers/iio/accel/adxl372_spi.c
+++ b/drivers/iio/accel/adxl372_spi.c
@@ -6,8 +6,8 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/regmap.h>
-#include <linux/of.h>
#include <linux/spi/spi.h>
#include "adxl372.h"
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index ab4fccb24b6c0..6581772cb0c46 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -13,10 +13,10 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/of.h>
#include <linux/bitops.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
diff --git a/drivers/iio/accel/bmc150-accel-i2c.c b/drivers/iio/accel/bmc150-accel-i2c.c
index ee1ba134ad423..1c2e40369839a 100644
--- a/drivers/iio/accel/bmc150-accel-i2c.c
+++ b/drivers/iio/accel/bmc150-accel-i2c.c
@@ -224,6 +224,19 @@ static const struct acpi_device_id bmc150_accel_acpi_match[] = {
{"BMA250E"},
{"BMC150A"},
{"BMI055A"},
+ /*
+ * The "BOSC0200" identifier used here is not unique to devices using
+ * bmc150. The same "BOSC0200" identifier is found in the ACPI tables
+ * of the ASUS ROG ALLY and Ayaneo AIR Plus which both use a Bosch
+ * BMI323 chip. This creates a conflict with duplicate ACPI identifiers
+ * which multiple drivers want to use. Fortunately, when the bmc150
+ * driver starts to load on the ASUS ROG ALLY, the chip ID check
+ * portion fails (correctly) because the chip IDs received (via i2c)
+ * are unique between bmc150 and bmi323 and a dmesg output similar to
+ * this: "bmc150_accel_i2c i2c-BOSC0200:00: Invalid chip 0" can be
+ * seen. This allows the bmi323 driver to take over for ASUS ROG ALLY,
+ * and other devices using the bmi323 chip.
+ */
{"BOSC0200"},
{"BSBA0150"},
{"DUAL250E"},
@@ -266,7 +279,7 @@ static struct i2c_driver bmc150_accel_driver = {
.driver = {
.name = "bmc150_accel_i2c",
.of_match_table = bmc150_accel_of_match,
- .acpi_match_table = ACPI_PTR(bmc150_accel_acpi_match),
+ .acpi_match_table = bmc150_accel_acpi_match,
.pm = &bmc150_accel_pm_ops,
},
.probe = bmc150_accel_probe,
diff --git a/drivers/iio/accel/bmc150-accel-spi.c b/drivers/iio/accel/bmc150-accel-spi.c
index 921fb46be0b8f..a6b9f599eb7bd 100644
--- a/drivers/iio/accel/bmc150-accel-spi.c
+++ b/drivers/iio/accel/bmc150-accel-spi.c
@@ -7,7 +7,6 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/acpi.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
@@ -70,7 +69,7 @@ MODULE_DEVICE_TABLE(spi, bmc150_accel_id);
static struct spi_driver bmc150_accel_driver = {
.driver = {
.name = "bmc150_accel_spi",
- .acpi_match_table = ACPI_PTR(bmc150_accel_acpi_match),
+ .acpi_match_table = bmc150_accel_acpi_match,
.pm = &bmc150_accel_pm_ops,
},
.probe = bmc150_accel_probe,
diff --git a/drivers/iio/accel/bmi088-accel-i2c.c b/drivers/iio/accel/bmi088-accel-i2c.c
new file mode 100644
index 0000000000000..17e9156bbe89e
--- /dev/null
+++ b/drivers/iio/accel/bmi088-accel-i2c.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * 3-axis accelerometer driver supporting following Bosch-Sensortec chips:
+ * - BMI088
+ * - BMI085
+ * - BMI090L
+ *
+ * Copyright 2023 Jun Yan <jerrysteve1101@gmail.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "bmi088-accel.h"
+
+static int bmi088_accel_probe(struct i2c_client *i2c)
+{
+ struct regmap *regmap;
+ const struct i2c_device_id *id = i2c_client_get_device_id(i2c);
+
+ regmap = devm_regmap_init_i2c(i2c, &bmi088_regmap_conf);
+ if (IS_ERR(regmap)) {
+ dev_err(&i2c->dev, "Failed to initialize i2c regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ return bmi088_accel_core_probe(&i2c->dev, regmap, i2c->irq,
+ id->driver_data);
+}
+
+static void bmi088_accel_remove(struct i2c_client *i2c)
+{
+ bmi088_accel_core_remove(&i2c->dev);
+}
+
+static const struct of_device_id bmi088_of_match[] = {
+ { .compatible = "bosch,bmi085-accel" },
+ { .compatible = "bosch,bmi088-accel" },
+ { .compatible = "bosch,bmi090l-accel" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bmi088_of_match);
+
+static const struct i2c_device_id bmi088_accel_id[] = {
+ { "bmi085-accel", BOSCH_BMI085 },
+ { "bmi088-accel", BOSCH_BMI088 },
+ { "bmi090l-accel", BOSCH_BMI090L },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, bmi088_accel_id);
+
+static struct i2c_driver bmi088_accel_driver = {
+ .driver = {
+ .name = "bmi088_accel_i2c",
+ .pm = pm_ptr(&bmi088_accel_pm_ops),
+ .of_match_table = bmi088_of_match,
+ },
+ .probe = bmi088_accel_probe,
+ .remove = bmi088_accel_remove,
+ .id_table = bmi088_accel_id,
+};
+module_i2c_driver(bmi088_accel_driver);
+
+MODULE_AUTHOR("Jun Yan <jerrysteve1101@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("BMI088 accelerometer driver (I2C)");
+MODULE_IMPORT_NS(IIO_BMI088);
diff --git a/drivers/iio/accel/da280.c b/drivers/iio/accel/da280.c
index 572bfe9694b07..9922868288449 100644
--- a/drivers/iio/accel/da280.c
+++ b/drivers/iio/accel/da280.c
@@ -23,8 +23,6 @@
#define DA280_MODE_ENABLE 0x1e
#define DA280_MODE_DISABLE 0x9e
-enum da280_chipset { da217, da226, da280 };
-
/*
* a value of + or -4096 corresponds to + or - 1G
* scale = 9.81 / 4096 = 0.002395019
@@ -47,6 +45,11 @@ static const struct iio_chan_spec da280_channels[] = {
DA280_CHANNEL(DA280_REG_ACC_Z_LSB, Z),
};
+struct da280_match_data {
+ const char *name;
+ int num_channels;
+};
+
struct da280_data {
struct i2c_client *client;
};
@@ -89,17 +92,6 @@ static const struct iio_info da280_info = {
.read_raw = da280_read_raw,
};
-static enum da280_chipset da280_match_acpi_device(struct device *dev)
-{
- const struct acpi_device_id *id;
-
- id = acpi_match_device(dev->driver->acpi_match_table, dev);
- if (!id)
- return -EINVAL;
-
- return (enum da280_chipset) id->driver_data;
-}
-
static void da280_disable(void *client)
{
da280_enable(client, false);
@@ -107,16 +99,21 @@ static void da280_disable(void *client)
static int da280_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
- int ret;
+ const struct da280_match_data *match_data;
struct iio_dev *indio_dev;
struct da280_data *data;
- enum da280_chipset chip;
+ int ret;
ret = i2c_smbus_read_byte_data(client, DA280_REG_CHIP_ID);
if (ret != DA280_CHIP_ID)
return (ret < 0) ? ret : -ENODEV;
+ match_data = i2c_get_match_data(client);
+ if (!match_data) {
+ dev_err(&client->dev, "Error match-data not set\n");
+ return -EINVAL;
+ }
+
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
@@ -127,23 +124,8 @@ static int da280_probe(struct i2c_client *client)
indio_dev->info = &da280_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = da280_channels;
-
- if (ACPI_HANDLE(&client->dev)) {
- chip = da280_match_acpi_device(&client->dev);
- } else {
- chip = id->driver_data;
- }
-
- if (chip == da217) {
- indio_dev->name = "da217";
- indio_dev->num_channels = 3;
- } else if (chip == da226) {
- indio_dev->name = "da226";
- indio_dev->num_channels = 2;
- } else {
- indio_dev->name = "da280";
- indio_dev->num_channels = 3;
- }
+ indio_dev->num_channels = match_data->num_channels;
+ indio_dev->name = match_data->name;
ret = da280_enable(client, true);
if (ret < 0)
@@ -168,17 +150,21 @@ static int da280_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(da280_pm_ops, da280_suspend, da280_resume);
+static const struct da280_match_data da217_match_data = { "da217", 3 };
+static const struct da280_match_data da226_match_data = { "da226", 2 };
+static const struct da280_match_data da280_match_data = { "da280", 3 };
+
static const struct acpi_device_id da280_acpi_match[] = {
- {"NSA2513", da217},
- {"MIRAACC", da280},
- {},
+ { "NSA2513", (kernel_ulong_t)&da217_match_data },
+ { "MIRAACC", (kernel_ulong_t)&da280_match_data },
+ {}
};
MODULE_DEVICE_TABLE(acpi, da280_acpi_match);
static const struct i2c_device_id da280_i2c_id[] = {
- { "da217", da217 },
- { "da226", da226 },
- { "da280", da280 },
+ { "da217", (kernel_ulong_t)&da217_match_data },
+ { "da226", (kernel_ulong_t)&da226_match_data },
+ { "da280", (kernel_ulong_t)&da280_match_data },
{}
};
MODULE_DEVICE_TABLE(i2c, da280_i2c_id);
@@ -186,7 +172,7 @@ MODULE_DEVICE_TABLE(i2c, da280_i2c_id);
static struct i2c_driver da280_driver = {
.driver = {
.name = "da280",
- .acpi_match_table = ACPI_PTR(da280_acpi_match),
+ .acpi_match_table = da280_acpi_match,
.pm = pm_sleep_ptr(&da280_pm_ops),
},
.probe = da280_probe,
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 894709286b0cc..126e8bdd6d0ed 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -422,6 +422,23 @@ static int kiox010a_dsm(struct device *dev, int fn_index)
ACPI_FREE(obj);
return 0;
}
+
+static const struct acpi_device_id kx_acpi_match[] = {
+ {"KXCJ1013", KXCJK1013},
+ {"KXCJ1008", KXCJ91008},
+ {"KXCJ9000", KXCJ91008},
+ {"KIOX0008", KXCJ91008},
+ {"KIOX0009", KXTJ21009},
+ {"KIOX000A", KXCJ91008},
+ {"KIOX010A", KXCJ91008}, /* KXCJ91008 in the display of a yoga 2-in-1 */
+ {"KIOX020A", KXCJ91008}, /* KXCJ91008 in the base of a yoga 2-in-1 */
+ {"KXTJ1009", KXTJ21009},
+ {"KXJ2109", KXTJ21009},
+ {"SMO8500", KXCJ91008},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, kx_acpi_match);
+
#endif
static int kxcjk1013_set_mode(struct kxcjk1013_data *data,
@@ -619,6 +636,84 @@ static int kxcjk1013_set_power_state(struct kxcjk1013_data *data, bool on)
return 0;
}
+#ifdef CONFIG_ACPI
+static bool kxj_acpi_orientation(struct device *dev,
+ struct iio_mount_matrix *orientation)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ char *str;
+ union acpi_object *obj, *elements;
+ acpi_status status;
+ int i, j, val[3];
+ bool ret = false;
+
+ if (!acpi_has_method(adev->handle, "ROTM"))
+ return false;
+
+ status = acpi_evaluate_object(adev->handle, "ROTM", NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "Failed to get ACPI mount matrix: %d\n", status);
+ return false;
+ }
+
+ obj = buffer.pointer;
+ if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count != 3) {
+ dev_err(dev, "Unknown ACPI mount matrix package format\n");
+ goto out_free_buffer;
+ }
+
+ elements = obj->package.elements;
+ for (i = 0; i < 3; i++) {
+ if (elements[i].type != ACPI_TYPE_STRING) {
+ dev_err(dev, "Unknown ACPI mount matrix element format\n");
+ goto out_free_buffer;
+ }
+
+ str = elements[i].string.pointer;
+ if (sscanf(str, "%d %d %d", &val[0], &val[1], &val[2]) != 3) {
+ dev_err(dev, "Incorrect ACPI mount matrix string format\n");
+ goto out_free_buffer;
+ }
+
+ for (j = 0; j < 3; j++) {
+ switch (val[j]) {
+ case -1: str = "-1"; break;
+ case 0: str = "0"; break;
+ case 1: str = "1"; break;
+ default:
+ dev_err(dev, "Invalid value in ACPI mount matrix: %d\n", val[j]);
+ goto out_free_buffer;
+ }
+ orientation->rotation[i * 3 + j] = str;
+ }
+ }
+
+ ret = true;
+
+out_free_buffer:
+ kfree(buffer.pointer);
+ return ret;
+}
+
+static bool kxj1009_apply_acpi_orientation(struct device *dev,
+ struct iio_mount_matrix *orientation)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+
+ if (adev && acpi_dev_hid_uid_match(adev, "KIOX000A", NULL))
+ return kxj_acpi_orientation(dev, orientation);
+
+ return false;
+}
+#else
+static bool kxj1009_apply_acpi_orientation(struct device *dev,
+ struct iio_mount_matrix *orientation)
+{
+ return false;
+}
+#endif
+
static int kxcjk1013_chip_update_thresholds(struct kxcjk1013_data *data)
{
int ret;
@@ -1449,9 +1544,12 @@ static int kxcjk1013_probe(struct i2c_client *client)
} else {
data->active_high_intr = true; /* default polarity */
- ret = iio_read_mount_matrix(&client->dev, &data->orientation);
- if (ret)
- return ret;
+ if (!kxj1009_apply_acpi_orientation(&client->dev, &data->orientation)) {
+ ret = iio_read_mount_matrix(&client->dev, &data->orientation);
+ if (ret)
+ return ret;
+ }
+
}
ret = devm_regulator_bulk_get_enable(&client->dev,
@@ -1687,22 +1785,6 @@ static const struct dev_pm_ops kxcjk1013_pm_ops = {
kxcjk1013_runtime_resume, NULL)
};
-static const struct acpi_device_id kx_acpi_match[] = {
- {"KXCJ1013", KXCJK1013},
- {"KXCJ1008", KXCJ91008},
- {"KXCJ9000", KXCJ91008},
- {"KIOX0008", KXCJ91008},
- {"KIOX0009", KXTJ21009},
- {"KIOX000A", KXCJ91008},
- {"KIOX010A", KXCJ91008}, /* KXCJ91008 in the display of a yoga 2-in-1 */
- {"KIOX020A", KXCJ91008}, /* KXCJ91008 in the base of a yoga 2-in-1 */
- {"KXTJ1009", KXTJ21009},
- {"KXJ2109", KXTJ21009},
- {"SMO8500", KXCJ91008},
- { },
-};
-MODULE_DEVICE_TABLE(acpi, kx_acpi_match);
-
static const struct i2c_device_id kxcjk1013_id[] = {
{"kxcjk1013", KXCJK1013},
{"kxcj91008", KXCJ91008},
diff --git a/drivers/iio/accel/kxsd9-spi.c b/drivers/iio/accel/kxsd9-spi.c
index 1719a9f1d90ad..4414670dfb436 100644
--- a/drivers/iio/accel/kxsd9-spi.c
+++ b/drivers/iio/accel/kxsd9-spi.c
@@ -1,9 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/of.h>
#include <linux/spi/spi.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/regmap.h>
diff --git a/drivers/iio/accel/mma9551.c b/drivers/iio/accel/mma9551.c
index d823f2edc6d42..083c08f65bafe 100644
--- a/drivers/iio/accel/mma9551.c
+++ b/drivers/iio/accel/mma9551.c
@@ -604,9 +604,9 @@ MODULE_DEVICE_TABLE(i2c, mma9551_id);
static struct i2c_driver mma9551_driver = {
.driver = {
.name = MMA9551_DRV_NAME,
- .acpi_match_table = ACPI_PTR(mma9551_acpi_match),
+ .acpi_match_table = mma9551_acpi_match,
.pm = pm_ptr(&mma9551_pm_ops),
- },
+ },
.probe = mma9551_probe,
.remove = mma9551_remove,
.id_table = mma9551_id,
diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c
index d01aba4aecba5..3cbd0fd4e6240 100644
--- a/drivers/iio/accel/mma9553.c
+++ b/drivers/iio/accel/mma9553.c
@@ -1243,9 +1243,9 @@ MODULE_DEVICE_TABLE(i2c, mma9553_id);
static struct i2c_driver mma9553_driver = {
.driver = {
.name = MMA9553_DRV_NAME,
- .acpi_match_table = ACPI_PTR(mma9553_acpi_match),
+ .acpi_match_table = mma9553_acpi_match,
.pm = pm_ptr(&mma9553_pm_ops),
- },
+ },
.probe = mma9553_probe,
.remove = mma9553_remove,
.id_table = mma9553_id,
diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c
index 82e8d0b390495..61839be501c21 100644
--- a/drivers/iio/accel/mxc4005.c
+++ b/drivers/iio/accel/mxc4005.c
@@ -8,7 +8,7 @@
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
-#include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
#include <linux/regmap.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/trigger.h>
@@ -472,6 +472,7 @@ static int mxc4005_probe(struct i2c_client *client)
static const struct acpi_device_id mxc4005_acpi_match[] = {
{"MXC4005", 0},
{"MXC6655", 0},
+ {"MDA6655", 0},
{ },
};
MODULE_DEVICE_TABLE(acpi, mxc4005_acpi_match);
@@ -493,7 +494,7 @@ MODULE_DEVICE_TABLE(i2c, mxc4005_id);
static struct i2c_driver mxc4005_driver = {
.driver = {
.name = MXC4005_DRV_NAME,
- .acpi_match_table = ACPI_PTR(mxc4005_acpi_match),
+ .acpi_match_table = mxc4005_acpi_match,
.of_match_table = mxc4005_of_match,
},
.probe = mxc4005_probe,
diff --git a/drivers/iio/accel/mxc6255.c b/drivers/iio/accel/mxc6255.c
index 33c2253561e6b..ac228128c4f9e 100644
--- a/drivers/iio/accel/mxc6255.c
+++ b/drivers/iio/accel/mxc6255.c
@@ -12,7 +12,7 @@
#include <linux/init.h>
#include <linux/iio/iio.h>
#include <linux/delay.h>
-#include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
#include <linux/regmap.h>
#include <linux/iio/sysfs.h>
@@ -181,7 +181,7 @@ MODULE_DEVICE_TABLE(i2c, mxc6255_id);
static struct i2c_driver mxc6255_driver = {
.driver = {
.name = MXC6255_DRV_NAME,
- .acpi_match_table = ACPI_PTR(mxc6255_acpi_match),
+ .acpi_match_table = mxc6255_acpi_match,
},
.probe = mxc6255_probe,
.id_table = mxc6255_id,
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index 71ee861b29808..fd37498711215 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
-#include <linux/acpi.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
@@ -127,14 +126,12 @@ static const struct of_device_id st_accel_of_match[] = {
};
MODULE_DEVICE_TABLE(of, st_accel_of_match);
-#ifdef CONFIG_ACPI
static const struct acpi_device_id st_accel_acpi_match[] = {
{"SMO8840", (kernel_ulong_t)LIS2DH12_ACCEL_DEV_NAME},
{"SMO8A90", (kernel_ulong_t)LNG2DM_ACCEL_DEV_NAME},
{ },
};
MODULE_DEVICE_TABLE(acpi, st_accel_acpi_match);
-#endif
static const struct i2c_device_id st_accel_id_table[] = {
{ LSM303DLH_ACCEL_DEV_NAME },
@@ -204,7 +201,7 @@ static struct i2c_driver st_accel_driver = {
.driver = {
.name = "st-accel-i2c",
.of_match_table = st_accel_of_match,
- .acpi_match_table = ACPI_PTR(st_accel_acpi_match),
+ .acpi_match_table = st_accel_acpi_match,
},
.probe = st_accel_i2c_probe,
.id_table = st_accel_id_table,
diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c
index 3415ac1b44953..668edc88c89dc 100644
--- a/drivers/iio/accel/stk8ba50.c
+++ b/drivers/iio/accel/stk8ba50.c
@@ -7,11 +7,11 @@
* STK8BA50 7-bit I2C address: 0x18.
*/
-#include <linux/acpi.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -541,7 +541,7 @@ static struct i2c_driver stk8ba50_driver = {
.driver = {
.name = "stk8ba50",
.pm = pm_sleep_ptr(&stk8ba50_pm_ops),
- .acpi_match_table = ACPI_PTR(stk8ba50_acpi_id),
+ .acpi_match_table = stk8ba50_acpi_id,
},
.probe = stk8ba50_probe,
.remove = stk8ba50_remove,
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 3b73c509bd68e..0d9282fa67f59 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -291,7 +291,7 @@ config AD799X
config AD9467
tristate "Analog Devices AD9467 High Speed ADC driver"
depends on SPI
- depends on ADI_AXI_ADC
+ select IIO_BACKEND
help
Say yes here to build support for Analog Devices:
* AD9467 16-Bit, 200 MSPS/250 MSPS Analog-to-Digital Converter
@@ -309,7 +309,7 @@ config ADI_AXI_ADC
select IIO_BUFFER_HW_CONSUMER
select IIO_BUFFER_DMAENGINE
select REGMAP_MMIO
- depends on OF
+ select IIO_BACKEND
help
Say yes here to build support for Analog Devices Generic
AXI ADC IP core. The IP core is used for interfacing with
@@ -930,6 +930,17 @@ config NPCM_ADC
This driver can also be built as a module. If so, the module
will be called npcm_adc.
+config PAC1934
+ tristate "Microchip Technology PAC1934 driver"
+ depends on I2C
+ help
+ Say yes here to build support for Microchip Technology's PAC1931,
+ PAC1932, PAC1933, PAC1934 Single/Multi-Channel Power Monitor with
+ Accumulator.
+
+ This driver can also be built as a module. If so, the module
+ will be called pac1934.
+
config PALMAS_GPADC
tristate "TI Palmas General Purpose ADC"
depends on MFD_PALMAS
@@ -1312,6 +1323,17 @@ config TI_ADS1100
This driver can also be built as a module. If so, the module will be
called ti-ads1100.
+config TI_ADS1298
+ tristate "Texas Instruments ADS1298"
+ depends on SPI
+ select IIO_BUFFER
+ help
+ If you say yes here you get support for Texas Instruments ADS1298
+ medical ADC chips
+
+ This driver can also be built as a module. If so, the module will be
+ called ti-ads1298.
+
config TI_ADS7950
tristate "Texas Instruments ADS7950 ADC driver"
depends on SPI && GPIOLIB
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index d2fda54a3259c..b3c434722364a 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -86,6 +86,7 @@ obj-$(CONFIG_MP2629_ADC) += mp2629_adc.o
obj-$(CONFIG_MXS_LRADC_ADC) += mxs-lradc-adc.o
obj-$(CONFIG_NAU7802) += nau7802.o
obj-$(CONFIG_NPCM_ADC) += npcm_adc.o
+obj-$(CONFIG_PAC1934) += pac1934.o
obj-$(CONFIG_PALMAS_GPADC) += palmas_gpadc.o
obj-$(CONFIG_QCOM_SPMI_ADC5) += qcom-spmi-adc5.o
obj-$(CONFIG_QCOM_SPMI_IADC) += qcom-spmi-iadc.o
@@ -116,6 +117,7 @@ obj-$(CONFIG_TI_ADC128S052) += ti-adc128s052.o
obj-$(CONFIG_TI_ADC161S626) += ti-adc161s626.o
obj-$(CONFIG_TI_ADS1015) += ti-ads1015.o
obj-$(CONFIG_TI_ADS1100) += ti-ads1100.o
+obj-$(CONFIG_TI_ADS1298) += ti-ads1298.o
obj-$(CONFIG_TI_ADS7924) += ti-ads7924.o
obj-$(CONFIG_TI_ADS7950) += ti-ads7950.o
obj-$(CONFIG_TI_ADS8344) += ti-ads8344.o
diff --git a/drivers/iio/adc/ad4130.c b/drivers/iio/adc/ad4130.c
index 62490424b6aed..febb64e67955e 100644
--- a/drivers/iio/adc/ad4130.c
+++ b/drivers/iio/adc/ad4130.c
@@ -887,9 +887,9 @@ static int ad4130_set_filter_mode(struct iio_dev *indio_dev,
unsigned int old_fs;
int ret = 0;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
if (setup_info->filter_mode == val)
- goto out;
+ return 0;
old_fs = setup_info->fs;
old_filter_mode = setup_info->filter_mode;
@@ -911,12 +911,10 @@ static int ad4130_set_filter_mode(struct iio_dev *indio_dev,
if (ret) {
setup_info->fs = old_fs;
setup_info->filter_mode = old_filter_mode;
+ return ret;
}
- out:
- mutex_unlock(&st->lock);
-
- return ret;
+ return 0;
}
static int ad4130_get_filter_mode(struct iio_dev *indio_dev,
@@ -927,9 +925,8 @@ static int ad4130_get_filter_mode(struct iio_dev *indio_dev,
struct ad4130_setup_info *setup_info = &st->chans_info[channel].setup;
enum ad4130_filter_mode filter_mode;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
filter_mode = setup_info->filter_mode;
- mutex_unlock(&st->lock);
return filter_mode;
}
@@ -971,7 +968,7 @@ static int ad4130_set_channel_pga(struct ad4130_state *st, unsigned int channel,
struct ad4130_chan_info *chan_info = &st->chans_info[channel];
struct ad4130_setup_info *setup_info = &chan_info->setup;
unsigned int pga, old_pga;
- int ret = 0;
+ int ret;
for (pga = 0; pga < AD4130_MAX_PGA; pga++)
if (val == st->scale_tbls[setup_info->ref_sel][pga][0] &&
@@ -981,21 +978,20 @@ static int ad4130_set_channel_pga(struct ad4130_state *st, unsigned int channel,
if (pga == AD4130_MAX_PGA)
return -EINVAL;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
if (pga == setup_info->pga)
- goto out;
+ return 0;
old_pga = setup_info->pga;
setup_info->pga = pga;
ret = ad4130_write_channel_setup(st, channel, false);
- if (ret)
+ if (ret) {
setup_info->pga = old_pga;
+ return ret;
+ }
-out:
- mutex_unlock(&st->lock);
-
- return ret;
+ return 0;
}
static int ad4130_set_channel_freq(struct ad4130_state *st,
@@ -1004,26 +1000,25 @@ static int ad4130_set_channel_freq(struct ad4130_state *st,
struct ad4130_chan_info *chan_info = &st->chans_info[channel];
struct ad4130_setup_info *setup_info = &chan_info->setup;
unsigned int fs, old_fs;
- int ret = 0;
+ int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
old_fs = setup_info->fs;
ad4130_freq_to_fs(setup_info->filter_mode, val, val2, &fs);
if (fs == setup_info->fs)
- goto out;
+ return 0;
setup_info->fs = fs;
ret = ad4130_write_channel_setup(st, channel, false);
- if (ret)
+ if (ret) {
setup_info->fs = old_fs;
+ return ret;
+ }
-out:
- mutex_unlock(&st->lock);
-
- return ret;
+ return 0;
}
static int _ad4130_read_sample(struct iio_dev *indio_dev, unsigned int channel,
@@ -1065,20 +1060,13 @@ static int _ad4130_read_sample(struct iio_dev *indio_dev, unsigned int channel,
static int ad4130_read_sample(struct iio_dev *indio_dev, unsigned int channel,
int *val)
{
- struct ad4130_state *st = iio_priv(indio_dev);
- int ret;
-
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ struct ad4130_state *st = iio_priv(indio_dev);
- mutex_lock(&st->lock);
- ret = _ad4130_read_sample(indio_dev, channel, val);
- mutex_unlock(&st->lock);
-
- iio_device_release_direct_mode(indio_dev);
-
- return ret;
+ guard(mutex)(&st->lock);
+ return _ad4130_read_sample(indio_dev, channel, val);
+ }
+ unreachable();
}
static int ad4130_read_raw(struct iio_dev *indio_dev,
@@ -1092,24 +1080,24 @@ static int ad4130_read_raw(struct iio_dev *indio_dev,
switch (info) {
case IIO_CHAN_INFO_RAW:
return ad4130_read_sample(indio_dev, channel, val);
- case IIO_CHAN_INFO_SCALE:
- mutex_lock(&st->lock);
+ case IIO_CHAN_INFO_SCALE: {
+ guard(mutex)(&st->lock);
*val = st->scale_tbls[setup_info->ref_sel][setup_info->pga][0];
*val2 = st->scale_tbls[setup_info->ref_sel][setup_info->pga][1];
- mutex_unlock(&st->lock);
return IIO_VAL_INT_PLUS_NANO;
+ }
case IIO_CHAN_INFO_OFFSET:
*val = st->bipolar ? -BIT(chan->scan_type.realbits - 1) : 0;
return IIO_VAL_INT;
- case IIO_CHAN_INFO_SAMP_FREQ:
- mutex_lock(&st->lock);
+ case IIO_CHAN_INFO_SAMP_FREQ: {
+ guard(mutex)(&st->lock);
ad4130_fs_to_freq(setup_info->filter_mode, setup_info->fs,
val, val2);
- mutex_unlock(&st->lock);
return IIO_VAL_INT_PLUS_NANO;
+ }
default:
return -EINVAL;
}
@@ -1134,9 +1122,9 @@ static int ad4130_read_avail(struct iio_dev *indio_dev,
return IIO_AVAIL_LIST;
case IIO_CHAN_INFO_SAMP_FREQ:
- mutex_lock(&st->lock);
- filter_config = &ad4130_filter_configs[setup_info->filter_mode];
- mutex_unlock(&st->lock);
+ scoped_guard(mutex, &st->lock) {
+ filter_config = &ad4130_filter_configs[setup_info->filter_mode];
+ }
*vals = (int *)filter_config->samp_freq_avail;
*length = filter_config->samp_freq_avail_len * 2;
@@ -1197,21 +1185,18 @@ static int ad4130_update_scan_mode(struct iio_dev *indio_dev,
unsigned int val = 0;
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
for_each_set_bit(channel, scan_mask, indio_dev->num_channels) {
ret = ad4130_set_channel_enable(st, channel, true);
if (ret)
- goto out;
+ return ret;
val++;
}
st->num_enabled_channels = val;
-out:
- mutex_unlock(&st->lock);
-
return 0;
}
@@ -1232,22 +1217,19 @@ static int ad4130_set_fifo_watermark(struct iio_dev *indio_dev, unsigned int val
*/
eff = rounddown(AD4130_FIFO_SIZE, st->num_enabled_channels);
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
ret = regmap_update_bits(st->regmap, AD4130_FIFO_CONTROL_REG,
AD4130_FIFO_CONTROL_WM_MASK,
FIELD_PREP(AD4130_FIFO_CONTROL_WM_MASK,
ad4130_watermark_reg_val(eff)));
if (ret)
- goto out;
+ return ret;
st->effective_watermark = eff;
st->watermark = val;
-out:
- mutex_unlock(&st->lock);
-
- return ret;
+ return 0;
}
static const struct iio_info ad4130_info = {
@@ -1265,26 +1247,21 @@ static int ad4130_buffer_postenable(struct iio_dev *indio_dev)
struct ad4130_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
ret = ad4130_set_watermark_interrupt_en(st, true);
if (ret)
- goto out;
+ return ret;
ret = irq_set_irq_type(st->spi->irq, st->inv_irq_trigger);
if (ret)
- goto out;
+ return ret;
ret = ad4130_set_fifo_mode(st, AD4130_FIFO_MODE_WM);
if (ret)
- goto out;
-
- ret = ad4130_set_mode(st, AD4130_MODE_CONTINUOUS);
-
-out:
- mutex_unlock(&st->lock);
+ return ret;
- return ret;
+ return ad4130_set_mode(st, AD4130_MODE_CONTINUOUS);
}
static int ad4130_buffer_predisable(struct iio_dev *indio_dev)
@@ -1293,23 +1270,23 @@ static int ad4130_buffer_predisable(struct iio_dev *indio_dev)
unsigned int i;
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
ret = ad4130_set_mode(st, AD4130_MODE_IDLE);
if (ret)
- goto out;
+ return ret;
ret = irq_set_irq_type(st->spi->irq, st->irq_trigger);
if (ret)
- goto out;
+ return ret;
ret = ad4130_set_fifo_mode(st, AD4130_FIFO_MODE_DISABLED);
if (ret)
- goto out;
+ return ret;
ret = ad4130_set_watermark_interrupt_en(st, false);
if (ret)
- goto out;
+ return ret;
/*
* update_scan_mode() is not called in the disable path, disable all
@@ -1318,13 +1295,10 @@ static int ad4130_buffer_predisable(struct iio_dev *indio_dev)
for (i = 0; i < indio_dev->num_channels; i++) {
ret = ad4130_set_channel_enable(st, i, false);
if (ret)
- goto out;
+ return ret;
}
-out:
- mutex_unlock(&st->lock);
-
- return ret;
+ return 0;
}
static const struct iio_buffer_setup_ops ad4130_buffer_ops = {
@@ -1338,9 +1312,8 @@ static ssize_t hwfifo_watermark_show(struct device *dev,
struct ad4130_state *st = iio_priv(dev_to_iio_dev(dev));
unsigned int val;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
val = st->watermark;
- mutex_unlock(&st->lock);
return sysfs_emit(buf, "%d\n", val);
}
diff --git a/drivers/iio/adc/ad7091r-base.c b/drivers/iio/adc/ad7091r-base.c
index f4255b91acfc9..d6876259ad144 100644
--- a/drivers/iio/adc/ad7091r-base.c
+++ b/drivers/iio/adc/ad7091r-base.c
@@ -86,28 +86,25 @@ static int ad7091r_read_raw(struct iio_dev *iio_dev,
unsigned int read_val;
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
switch (m) {
case IIO_CHAN_INFO_RAW:
- if (st->mode != AD7091R_MODE_COMMAND) {
- ret = -EBUSY;
- goto unlock;
- }
+ if (st->mode != AD7091R_MODE_COMMAND)
+ return -EBUSY;
ret = ad7091r_read_one(iio_dev, chan->channel, &read_val);
if (ret)
- goto unlock;
+ return ret;
*val = read_val;
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
if (st->vref) {
ret = regulator_get_voltage(st->vref);
if (ret < 0)
- goto unlock;
+ return ret;
*val = ret / 1000;
} else {
@@ -115,17 +112,11 @@ static int ad7091r_read_raw(struct iio_dev *iio_dev,
}
*val2 = chan->scan_type.realbits;
- ret = IIO_VAL_FRACTIONAL_LOG2;
- break;
+ return IIO_VAL_FRACTIONAL_LOG2;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
-
-unlock:
- mutex_unlock(&st->lock);
- return ret;
}
static int ad7091r_read_event_config(struct iio_dev *indio_dev,
diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c
index 6581fce4ba959..7475ec2a56c72 100644
--- a/drivers/iio/adc/ad9467.c
+++ b/drivers/iio/adc/ad9467.c
@@ -17,13 +17,12 @@
#include <linux/of.h>
+#include <linux/iio/backend.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/clk.h>
-#include <linux/iio/adc/adi-axi-adc.h>
-
/*
* ADI High-Speed ADC common spi interface registers
* See Application-Note AN-877:
@@ -102,15 +101,20 @@
#define AD9467_REG_VREF_MASK 0x0F
struct ad9467_chip_info {
- struct adi_axi_adc_chip_info axi_adc_info;
- unsigned int default_output_mode;
- unsigned int vref_mask;
+ const char *name;
+ unsigned int id;
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+ const unsigned int (*scale_table)[2];
+ int num_scales;
+ unsigned long max_rate;
+ unsigned int default_output_mode;
+ unsigned int vref_mask;
};
-#define to_ad9467_chip_info(_info) \
- container_of(_info, struct ad9467_chip_info, axi_adc_info)
-
struct ad9467_state {
+ const struct ad9467_chip_info *info;
+ struct iio_backend *back;
struct spi_device *spi;
struct clk *clk;
unsigned int output_mode;
@@ -151,10 +155,10 @@ static int ad9467_spi_write(struct spi_device *spi, unsigned int reg,
return spi_write(spi, buf, ARRAY_SIZE(buf));
}
-static int ad9467_reg_access(struct adi_axi_adc_conv *conv, unsigned int reg,
+static int ad9467_reg_access(struct iio_dev *indio_dev, unsigned int reg,
unsigned int writeval, unsigned int *readval)
{
- struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ struct ad9467_state *st = iio_priv(indio_dev);
struct spi_device *spi = st->spi;
int ret;
@@ -191,10 +195,10 @@ static const unsigned int ad9467_scale_table[][2] = {
{2300, 8}, {2400, 9}, {2500, 10},
};
-static void __ad9467_get_scale(struct adi_axi_adc_conv *conv, int index,
+static void __ad9467_get_scale(struct ad9467_state *st, int index,
unsigned int *val, unsigned int *val2)
{
- const struct adi_axi_adc_chip_info *info = conv->chip_info;
+ const struct ad9467_chip_info *info = st->info;
const struct iio_chan_spec *chan = &info->channels[0];
unsigned int tmp;
@@ -229,52 +233,44 @@ static const struct iio_chan_spec ad9467_channels[] = {
};
static const struct ad9467_chip_info ad9467_chip_tbl = {
- .axi_adc_info = {
- .name = "ad9467",
- .id = CHIPID_AD9467,
- .max_rate = 250000000UL,
- .scale_table = ad9467_scale_table,
- .num_scales = ARRAY_SIZE(ad9467_scale_table),
- .channels = ad9467_channels,
- .num_channels = ARRAY_SIZE(ad9467_channels),
- },
+ .name = "ad9467",
+ .id = CHIPID_AD9467,
+ .max_rate = 250000000UL,
+ .scale_table = ad9467_scale_table,
+ .num_scales = ARRAY_SIZE(ad9467_scale_table),
+ .channels = ad9467_channels,
+ .num_channels = ARRAY_SIZE(ad9467_channels),
.default_output_mode = AD9467_DEF_OUTPUT_MODE,
.vref_mask = AD9467_REG_VREF_MASK,
};
static const struct ad9467_chip_info ad9434_chip_tbl = {
- .axi_adc_info = {
- .name = "ad9434",
- .id = CHIPID_AD9434,
- .max_rate = 500000000UL,
- .scale_table = ad9434_scale_table,
- .num_scales = ARRAY_SIZE(ad9434_scale_table),
- .channels = ad9434_channels,
- .num_channels = ARRAY_SIZE(ad9434_channels),
- },
+ .name = "ad9434",
+ .id = CHIPID_AD9434,
+ .max_rate = 500000000UL,
+ .scale_table = ad9434_scale_table,
+ .num_scales = ARRAY_SIZE(ad9434_scale_table),
+ .channels = ad9434_channels,
+ .num_channels = ARRAY_SIZE(ad9434_channels),
.default_output_mode = AD9434_DEF_OUTPUT_MODE,
.vref_mask = AD9434_REG_VREF_MASK,
};
static const struct ad9467_chip_info ad9265_chip_tbl = {
- .axi_adc_info = {
- .name = "ad9265",
- .id = CHIPID_AD9265,
- .max_rate = 125000000UL,
- .scale_table = ad9265_scale_table,
- .num_scales = ARRAY_SIZE(ad9265_scale_table),
- .channels = ad9467_channels,
- .num_channels = ARRAY_SIZE(ad9467_channels),
- },
+ .name = "ad9265",
+ .id = CHIPID_AD9265,
+ .max_rate = 125000000UL,
+ .scale_table = ad9265_scale_table,
+ .num_scales = ARRAY_SIZE(ad9265_scale_table),
+ .channels = ad9467_channels,
+ .num_channels = ARRAY_SIZE(ad9467_channels),
.default_output_mode = AD9265_DEF_OUTPUT_MODE,
.vref_mask = AD9265_REG_VREF_MASK,
};
-static int ad9467_get_scale(struct adi_axi_adc_conv *conv, int *val, int *val2)
+static int ad9467_get_scale(struct ad9467_state *st, int *val, int *val2)
{
- const struct adi_axi_adc_chip_info *info = conv->chip_info;
- const struct ad9467_chip_info *info1 = to_ad9467_chip_info(info);
- struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ const struct ad9467_chip_info *info = st->info;
unsigned int i, vref_val;
int ret;
@@ -282,7 +278,7 @@ static int ad9467_get_scale(struct adi_axi_adc_conv *conv, int *val, int *val2)
if (ret < 0)
return ret;
- vref_val = ret & info1->vref_mask;
+ vref_val = ret & info->vref_mask;
for (i = 0; i < info->num_scales; i++) {
if (vref_val == info->scale_table[i][1])
@@ -292,15 +288,14 @@ static int ad9467_get_scale(struct adi_axi_adc_conv *conv, int *val, int *val2)
if (i == info->num_scales)
return -ERANGE;
- __ad9467_get_scale(conv, i, val, val2);
+ __ad9467_get_scale(st, i, val, val2);
return IIO_VAL_INT_PLUS_MICRO;
}
-static int ad9467_set_scale(struct adi_axi_adc_conv *conv, int val, int val2)
+static int ad9467_set_scale(struct ad9467_state *st, int val, int val2)
{
- const struct adi_axi_adc_chip_info *info = conv->chip_info;
- struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ const struct ad9467_chip_info *info = st->info;
unsigned int scale_val[2];
unsigned int i;
int ret;
@@ -309,7 +304,7 @@ static int ad9467_set_scale(struct adi_axi_adc_conv *conv, int val, int val2)
return -EINVAL;
for (i = 0; i < info->num_scales; i++) {
- __ad9467_get_scale(conv, i, &scale_val[0], &scale_val[1]);
+ __ad9467_get_scale(st, i, &scale_val[0], &scale_val[1]);
if (scale_val[0] != val || scale_val[1] != val2)
continue;
@@ -326,15 +321,15 @@ static int ad9467_set_scale(struct adi_axi_adc_conv *conv, int val, int val2)
return -EINVAL;
}
-static int ad9467_read_raw(struct adi_axi_adc_conv *conv,
+static int ad9467_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long m)
{
- struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ struct ad9467_state *st = iio_priv(indio_dev);
switch (m) {
case IIO_CHAN_INFO_SCALE:
- return ad9467_get_scale(conv, val, val2);
+ return ad9467_get_scale(st, val, val2);
case IIO_CHAN_INFO_SAMP_FREQ:
*val = clk_get_rate(st->clk);
@@ -344,17 +339,17 @@ static int ad9467_read_raw(struct adi_axi_adc_conv *conv,
}
}
-static int ad9467_write_raw(struct adi_axi_adc_conv *conv,
+static int ad9467_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
- const struct adi_axi_adc_chip_info *info = conv->chip_info;
- struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ struct ad9467_state *st = iio_priv(indio_dev);
+ const struct ad9467_chip_info *info = st->info;
long r_clk;
switch (mask) {
case IIO_CHAN_INFO_SCALE:
- return ad9467_set_scale(conv, val, val2);
+ return ad9467_set_scale(st, val, val2);
case IIO_CHAN_INFO_SAMP_FREQ:
r_clk = clk_round_rate(st->clk, val);
if (r_clk < 0 || r_clk > info->max_rate) {
@@ -369,13 +364,13 @@ static int ad9467_write_raw(struct adi_axi_adc_conv *conv,
}
}
-static int ad9467_read_avail(struct adi_axi_adc_conv *conv,
+static int ad9467_read_avail(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
const int **vals, int *type, int *length,
long mask)
{
- const struct adi_axi_adc_chip_info *info = conv->chip_info;
- struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ struct ad9467_state *st = iio_priv(indio_dev);
+ const struct ad9467_chip_info *info = st->info;
switch (mask) {
case IIO_CHAN_INFO_SCALE:
@@ -389,6 +384,33 @@ static int ad9467_read_avail(struct adi_axi_adc_conv *conv,
}
}
+static int ad9467_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct ad9467_state *st = iio_priv(indio_dev);
+ unsigned int c;
+ int ret;
+
+ for (c = 0; c < st->info->num_channels; c++) {
+ if (test_bit(c, scan_mask))
+ ret = iio_backend_chan_enable(st->back, c);
+ else
+ ret = iio_backend_chan_disable(st->back, c);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct iio_info ad9467_info = {
+ .read_raw = ad9467_read_raw,
+ .write_raw = ad9467_write_raw,
+ .update_scan_mode = ad9467_update_scan_mode,
+ .debugfs_reg_access = ad9467_reg_access,
+ .read_avail = ad9467_read_avail,
+};
+
static int ad9467_outputmode_set(struct spi_device *spi, unsigned int mode)
{
int ret;
@@ -401,10 +423,9 @@ static int ad9467_outputmode_set(struct spi_device *spi, unsigned int mode)
AN877_ADC_TRANSFER_SYNC);
}
-static int ad9467_scale_fill(struct adi_axi_adc_conv *conv)
+static int ad9467_scale_fill(struct ad9467_state *st)
{
- const struct adi_axi_adc_chip_info *info = conv->chip_info;
- struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ const struct ad9467_chip_info *info = st->info;
unsigned int i, val1, val2;
st->scales = devm_kmalloc_array(&st->spi->dev, info->num_scales,
@@ -413,7 +434,7 @@ static int ad9467_scale_fill(struct adi_axi_adc_conv *conv)
return -ENOMEM;
for (i = 0; i < info->num_scales; i++) {
- __ad9467_get_scale(conv, i, &val1, &val2);
+ __ad9467_get_scale(st, i, &val1, &val2);
st->scales[i][0] = val1;
st->scales[i][1] = val2;
}
@@ -421,11 +442,27 @@ static int ad9467_scale_fill(struct adi_axi_adc_conv *conv)
return 0;
}
-static int ad9467_preenable_setup(struct adi_axi_adc_conv *conv)
+static int ad9467_setup(struct ad9467_state *st)
{
- struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ struct iio_backend_data_fmt data = {
+ .sign_extend = true,
+ .enable = true,
+ };
+ unsigned int c, mode;
+ int ret;
+
+ mode = st->info->default_output_mode | AN877_ADC_OUTPUT_MODE_TWOS_COMPLEMENT;
+ ret = ad9467_outputmode_set(st->spi, mode);
+ if (ret)
+ return ret;
- return ad9467_outputmode_set(st->spi, st->output_mode);
+ for (c = 0; c < st->info->num_channels; c++) {
+ ret = iio_backend_data_format_set(st->back, c, &data);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static int ad9467_reset(struct device *dev)
@@ -443,25 +480,65 @@ static int ad9467_reset(struct device *dev)
return 0;
}
+static int ad9467_iio_backend_get(struct ad9467_state *st)
+{
+ struct device *dev = &st->spi->dev;
+ struct device_node *__back;
+
+ st->back = devm_iio_backend_get(dev, NULL);
+ if (!IS_ERR(st->back))
+ return 0;
+ /* If not found, don't error out as we might have legacy DT property */
+ if (PTR_ERR(st->back) != -ENOENT)
+ return PTR_ERR(st->back);
+
+ /*
+ * if we don't get the backend using the normal API's, use the legacy
+ * 'adi,adc-dev' property. So we get all nodes with that property, and
+ * look for the one pointing at us. Then we directly lookup that fwnode
+ * on the backend list of registered devices. This is done so we don't
+ * make io-backends mandatory which would break DT ABI.
+ */
+ for_each_node_with_property(__back, "adi,adc-dev") {
+ struct device_node *__me;
+
+ __me = of_parse_phandle(__back, "adi,adc-dev", 0);
+ if (!__me)
+ continue;
+
+ if (!device_match_of_node(dev, __me)) {
+ of_node_put(__me);
+ continue;
+ }
+
+ of_node_put(__me);
+ st->back = __devm_iio_backend_get_from_fwnode_lookup(dev,
+ of_fwnode_handle(__back));
+ of_node_put(__back);
+ return PTR_ERR_OR_ZERO(st->back);
+ }
+
+ return -ENODEV;
+}
+
static int ad9467_probe(struct spi_device *spi)
{
- const struct ad9467_chip_info *info;
- struct adi_axi_adc_conv *conv;
+ struct iio_dev *indio_dev;
struct ad9467_state *st;
unsigned int id;
int ret;
- info = spi_get_device_match_data(spi);
- if (!info)
- return -ENODEV;
-
- conv = devm_adi_axi_adc_conv_register(&spi->dev, sizeof(*st));
- if (IS_ERR(conv))
- return PTR_ERR(conv);
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
- st = adi_axi_adc_conv_priv(conv);
+ st = iio_priv(indio_dev);
st->spi = spi;
+ st->info = spi_get_device_match_data(spi);
+ if (!st->info)
+ return -ENODEV;
+
st->clk = devm_clk_get_enabled(&spi->dev, "adc-clk");
if (IS_ERR(st->clk))
return PTR_ERR(st->clk);
@@ -475,29 +552,39 @@ static int ad9467_probe(struct spi_device *spi)
if (ret)
return ret;
- conv->chip_info = &info->axi_adc_info;
-
- ret = ad9467_scale_fill(conv);
+ ret = ad9467_scale_fill(st);
if (ret)
return ret;
id = ad9467_spi_read(spi, AN877_ADC_REG_CHIP_ID);
- if (id != conv->chip_info->id) {
+ if (id != st->info->id) {
dev_err(&spi->dev, "Mismatch CHIP_ID, got 0x%X, expected 0x%X\n",
- id, conv->chip_info->id);
+ id, st->info->id);
return -ENODEV;
}
- conv->reg_access = ad9467_reg_access;
- conv->write_raw = ad9467_write_raw;
- conv->read_raw = ad9467_read_raw;
- conv->read_avail = ad9467_read_avail;
- conv->preenable_setup = ad9467_preenable_setup;
+ indio_dev->name = st->info->name;
+ indio_dev->channels = st->info->channels;
+ indio_dev->num_channels = st->info->num_channels;
+ indio_dev->info = &ad9467_info;
- st->output_mode = info->default_output_mode |
- AN877_ADC_OUTPUT_MODE_TWOS_COMPLEMENT;
+ ret = ad9467_iio_backend_get(st);
+ if (ret)
+ return ret;
- return 0;
+ ret = devm_iio_backend_request_buffer(&spi->dev, st->back, indio_dev);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_backend_enable(&spi->dev, st->back);
+ if (ret)
+ return ret;
+
+ ret = ad9467_setup(st);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct of_device_id ad9467_of_match[] = {
@@ -529,4 +616,4 @@ module_spi_driver(ad9467_driver);
MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("Analog Devices AD9467 ADC driver");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(IIO_ADI_AXI);
+MODULE_IMPORT_NS(IIO_BACKEND);
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index 55442eddf57ce..a602429cdde4e 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -568,6 +568,7 @@ EXPORT_SYMBOL_NS_GPL(ad_sd_validate_trigger, IIO_AD_SIGMA_DELTA);
static int devm_ad_sd_probe_trigger(struct device *dev, struct iio_dev *indio_dev)
{
struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
+ unsigned long irq_flags = irq_get_trigger_type(sigma_delta->spi->irq);
int ret;
if (dev != &sigma_delta->spi->dev) {
@@ -588,9 +589,13 @@ static int devm_ad_sd_probe_trigger(struct device *dev, struct iio_dev *indio_de
/* the IRQ core clears IRQ_DISABLE_UNLAZY flag when freeing an IRQ */
irq_set_status_flags(sigma_delta->spi->irq, IRQ_DISABLE_UNLAZY);
+ /* Allow overwriting the flags from firmware */
+ if (!irq_flags)
+ irq_flags = sigma_delta->info->irq_flags;
+
ret = devm_request_irq(dev, sigma_delta->spi->irq,
ad_sd_data_rdy_trig_poll,
- sigma_delta->info->irq_flags | IRQF_NO_AUTOEN,
+ irq_flags | IRQF_NO_AUTOEN,
indio_dev->name,
sigma_delta);
if (ret)
diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
index c247ff1541d28..4156639b3c8bd 100644
--- a/drivers/iio/adc/adi-axi-adc.c
+++ b/drivers/iio/adc/adi-axi-adc.c
@@ -8,6 +8,7 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/err.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/module.h>
@@ -17,13 +18,12 @@
#include <linux/regmap.h>
#include <linux/slab.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
-#include <linux/iio/buffer-dmaengine.h>
-
#include <linux/fpga/adi-axi-common.h>
-#include <linux/iio/adc/adi-axi-adc.h>
+
+#include <linux/iio/backend.h>
+#include <linux/iio/buffer-dmaengine.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
/*
* Register definitions:
@@ -44,6 +44,7 @@
#define ADI_AXI_REG_CHAN_CTRL_PN_SEL_OWR BIT(10)
#define ADI_AXI_REG_CHAN_CTRL_IQCOR_EN BIT(9)
#define ADI_AXI_REG_CHAN_CTRL_DCFILT_EN BIT(8)
+#define ADI_AXI_REG_CHAN_CTRL_FMT_MASK GENMASK(6, 4)
#define ADI_AXI_REG_CHAN_CTRL_FMT_SIGNEXT BIT(6)
#define ADI_AXI_REG_CHAN_CTRL_FMT_TYPE BIT(5)
#define ADI_AXI_REG_CHAN_CTRL_FMT_EN BIT(4)
@@ -55,286 +56,100 @@
ADI_AXI_REG_CHAN_CTRL_FMT_EN | \
ADI_AXI_REG_CHAN_CTRL_ENABLE)
-struct adi_axi_adc_core_info {
- unsigned int version;
-};
-
struct adi_axi_adc_state {
- struct mutex lock;
-
- struct adi_axi_adc_client *client;
struct regmap *regmap;
-};
-
-struct adi_axi_adc_client {
- struct list_head entry;
- struct adi_axi_adc_conv conv;
- struct adi_axi_adc_state *state;
struct device *dev;
- const struct adi_axi_adc_core_info *info;
};
-static LIST_HEAD(registered_clients);
-static DEFINE_MUTEX(registered_clients_lock);
-
-static struct adi_axi_adc_client *conv_to_client(struct adi_axi_adc_conv *conv)
-{
- return container_of(conv, struct adi_axi_adc_client, conv);
-}
-
-void *adi_axi_adc_conv_priv(struct adi_axi_adc_conv *conv)
-{
- struct adi_axi_adc_client *cl = conv_to_client(conv);
-
- return (char *)cl + ALIGN(sizeof(struct adi_axi_adc_client),
- IIO_DMA_MINALIGN);
-}
-EXPORT_SYMBOL_NS_GPL(adi_axi_adc_conv_priv, IIO_ADI_AXI);
-
-static int adi_axi_adc_config_dma_buffer(struct device *dev,
- struct iio_dev *indio_dev)
-{
- const char *dma_name;
-
- if (!device_property_present(dev, "dmas"))
- return 0;
-
- if (device_property_read_string(dev, "dma-names", &dma_name))
- dma_name = "rx";
-
- return devm_iio_dmaengine_buffer_setup(indio_dev->dev.parent,
- indio_dev, dma_name);
-}
-
-static int adi_axi_adc_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2, long mask)
-{
- struct adi_axi_adc_state *st = iio_priv(indio_dev);
- struct adi_axi_adc_conv *conv = &st->client->conv;
-
- if (!conv->read_raw)
- return -EOPNOTSUPP;
-
- return conv->read_raw(conv, chan, val, val2, mask);
-}
-
-static int adi_axi_adc_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val, int val2, long mask)
-{
- struct adi_axi_adc_state *st = iio_priv(indio_dev);
- struct adi_axi_adc_conv *conv = &st->client->conv;
-
- if (!conv->write_raw)
- return -EOPNOTSUPP;
-
- return conv->write_raw(conv, chan, val, val2, mask);
-}
-
-static int adi_axi_adc_read_avail(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- const int **vals, int *type, int *length,
- long mask)
-{
- struct adi_axi_adc_state *st = iio_priv(indio_dev);
- struct adi_axi_adc_conv *conv = &st->client->conv;
-
- if (!conv->read_avail)
- return -EOPNOTSUPP;
-
- return conv->read_avail(conv, chan, vals, type, length, mask);
-}
-
-static int adi_axi_adc_update_scan_mode(struct iio_dev *indio_dev,
- const unsigned long *scan_mask)
+static int axi_adc_enable(struct iio_backend *back)
{
- struct adi_axi_adc_state *st = iio_priv(indio_dev);
- struct adi_axi_adc_conv *conv = &st->client->conv;
- unsigned int i;
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
int ret;
- for (i = 0; i < conv->chip_info->num_channels; i++) {
- if (test_bit(i, scan_mask))
- ret = regmap_set_bits(st->regmap,
- ADI_AXI_REG_CHAN_CTRL(i),
- ADI_AXI_REG_CHAN_CTRL_ENABLE);
- else
- ret = regmap_clear_bits(st->regmap,
- ADI_AXI_REG_CHAN_CTRL(i),
- ADI_AXI_REG_CHAN_CTRL_ENABLE);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static struct adi_axi_adc_conv *adi_axi_adc_conv_register(struct device *dev,
- size_t sizeof_priv)
-{
- struct adi_axi_adc_client *cl;
- size_t alloc_size;
-
- alloc_size = ALIGN(sizeof(struct adi_axi_adc_client), IIO_DMA_MINALIGN);
- if (sizeof_priv)
- alloc_size += ALIGN(sizeof_priv, IIO_DMA_MINALIGN);
-
- cl = kzalloc(alloc_size, GFP_KERNEL);
- if (!cl)
- return ERR_PTR(-ENOMEM);
-
- mutex_lock(&registered_clients_lock);
-
- cl->dev = get_device(dev);
-
- list_add_tail(&cl->entry, &registered_clients);
-
- mutex_unlock(&registered_clients_lock);
+ ret = regmap_set_bits(st->regmap, ADI_AXI_REG_RSTN,
+ ADI_AXI_REG_RSTN_MMCM_RSTN);
+ if (ret)
+ return ret;
- return &cl->conv;
+ fsleep(10000);
+ return regmap_set_bits(st->regmap, ADI_AXI_REG_RSTN,
+ ADI_AXI_REG_RSTN_RSTN | ADI_AXI_REG_RSTN_MMCM_RSTN);
}
-static void adi_axi_adc_conv_unregister(struct adi_axi_adc_conv *conv)
+static void axi_adc_disable(struct iio_backend *back)
{
- struct adi_axi_adc_client *cl = conv_to_client(conv);
-
- mutex_lock(&registered_clients_lock);
-
- list_del(&cl->entry);
- put_device(cl->dev);
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
- mutex_unlock(&registered_clients_lock);
-
- kfree(cl);
+ regmap_write(st->regmap, ADI_AXI_REG_RSTN, 0);
}
-static void devm_adi_axi_adc_conv_release(void *conv)
+static int axi_adc_data_format_set(struct iio_backend *back, unsigned int chan,
+ const struct iio_backend_data_fmt *data)
{
- adi_axi_adc_conv_unregister(conv);
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
+ u32 val;
+
+ if (!data->enable)
+ return regmap_clear_bits(st->regmap,
+ ADI_AXI_REG_CHAN_CTRL(chan),
+ ADI_AXI_REG_CHAN_CTRL_FMT_EN);
+
+ val = FIELD_PREP(ADI_AXI_REG_CHAN_CTRL_FMT_EN, true);
+ if (data->sign_extend)
+ val |= FIELD_PREP(ADI_AXI_REG_CHAN_CTRL_FMT_SIGNEXT, true);
+ if (data->type == IIO_BACKEND_OFFSET_BINARY)
+ val |= FIELD_PREP(ADI_AXI_REG_CHAN_CTRL_FMT_TYPE, true);
+
+ return regmap_update_bits(st->regmap, ADI_AXI_REG_CHAN_CTRL(chan),
+ ADI_AXI_REG_CHAN_CTRL_FMT_MASK, val);
}
-struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev,
- size_t sizeof_priv)
+static int axi_adc_chan_enable(struct iio_backend *back, unsigned int chan)
{
- struct adi_axi_adc_conv *conv;
- int ret;
-
- conv = adi_axi_adc_conv_register(dev, sizeof_priv);
- if (IS_ERR(conv))
- return conv;
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
- ret = devm_add_action_or_reset(dev, devm_adi_axi_adc_conv_release,
- conv);
- if (ret)
- return ERR_PTR(ret);
-
- return conv;
+ return regmap_set_bits(st->regmap, ADI_AXI_REG_CHAN_CTRL(chan),
+ ADI_AXI_REG_CHAN_CTRL_ENABLE);
}
-EXPORT_SYMBOL_NS_GPL(devm_adi_axi_adc_conv_register, IIO_ADI_AXI);
-
-static const struct iio_info adi_axi_adc_info = {
- .read_raw = &adi_axi_adc_read_raw,
- .write_raw = &adi_axi_adc_write_raw,
- .update_scan_mode = &adi_axi_adc_update_scan_mode,
- .read_avail = &adi_axi_adc_read_avail,
-};
-
-static const struct adi_axi_adc_core_info adi_axi_adc_10_0_a_info = {
- .version = ADI_AXI_PCORE_VER(10, 0, 'a'),
-};
-static struct adi_axi_adc_client *adi_axi_adc_attach_client(struct device *dev)
+static int axi_adc_chan_disable(struct iio_backend *back, unsigned int chan)
{
- const struct adi_axi_adc_core_info *info;
- struct adi_axi_adc_client *cl;
- struct device_node *cln;
-
- info = of_device_get_match_data(dev);
- if (!info)
- return ERR_PTR(-ENODEV);
-
- cln = of_parse_phandle(dev->of_node, "adi,adc-dev", 0);
- if (!cln) {
- dev_err(dev, "No 'adi,adc-dev' node defined\n");
- return ERR_PTR(-ENODEV);
- }
-
- mutex_lock(&registered_clients_lock);
-
- list_for_each_entry(cl, &registered_clients, entry) {
- if (!cl->dev)
- continue;
-
- if (cl->dev->of_node != cln)
- continue;
-
- if (!try_module_get(cl->dev->driver->owner)) {
- mutex_unlock(&registered_clients_lock);
- of_node_put(cln);
- return ERR_PTR(-ENODEV);
- }
-
- get_device(cl->dev);
- cl->info = info;
- mutex_unlock(&registered_clients_lock);
- of_node_put(cln);
- return cl;
- }
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
- mutex_unlock(&registered_clients_lock);
- of_node_put(cln);
-
- return ERR_PTR(-EPROBE_DEFER);
+ return regmap_clear_bits(st->regmap, ADI_AXI_REG_CHAN_CTRL(chan),
+ ADI_AXI_REG_CHAN_CTRL_ENABLE);
}
-static int adi_axi_adc_setup_channels(struct device *dev,
- struct adi_axi_adc_state *st)
+static struct iio_buffer *axi_adc_request_buffer(struct iio_backend *back,
+ struct iio_dev *indio_dev)
{
- struct adi_axi_adc_conv *conv = &st->client->conv;
- int i, ret;
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
+ struct iio_buffer *buffer;
+ const char *dma_name;
+ int ret;
- if (conv->preenable_setup) {
- ret = conv->preenable_setup(conv);
- if (ret)
- return ret;
- }
+ if (device_property_read_string(st->dev, "dma-names", &dma_name))
+ dma_name = "rx";
- for (i = 0; i < conv->chip_info->num_channels; i++) {
- ret = regmap_write(st->regmap, ADI_AXI_REG_CHAN_CTRL(i),
- ADI_AXI_REG_CHAN_CTRL_DEFAULTS);
- if (ret)
- return ret;
+ buffer = iio_dmaengine_buffer_alloc(st->dev, dma_name);
+ if (IS_ERR(buffer)) {
+ dev_err(st->dev, "Could not get DMA buffer, %ld\n",
+ PTR_ERR(buffer));
+ return ERR_CAST(buffer);
}
- return 0;
-}
-
-static int axi_adc_reset(struct adi_axi_adc_state *st)
-{
- int ret;
-
- ret = regmap_write(st->regmap, ADI_AXI_REG_RSTN, 0);
- if (ret)
- return ret;
-
- mdelay(10);
- ret = regmap_write(st->regmap, ADI_AXI_REG_RSTN,
- ADI_AXI_REG_RSTN_MMCM_RSTN);
+ indio_dev->modes |= INDIO_BUFFER_HARDWARE;
+ ret = iio_device_attach_buffer(indio_dev, buffer);
if (ret)
- return ret;
+ return ERR_PTR(ret);
- mdelay(10);
- return regmap_write(st->regmap, ADI_AXI_REG_RSTN,
- ADI_AXI_REG_RSTN_RSTN | ADI_AXI_REG_RSTN_MMCM_RSTN);
+ return buffer;
}
-static void adi_axi_adc_cleanup(void *data)
+static void axi_adc_free_buffer(struct iio_backend *back,
+ struct iio_buffer *buffer)
{
- struct adi_axi_adc_client *cl = data;
-
- put_device(cl->dev);
- module_put(cl->dev->driver->owner);
+ iio_dmaengine_buffer_free(buffer);
}
static const struct regmap_config axi_adc_regmap_config = {
@@ -344,45 +159,47 @@ static const struct regmap_config axi_adc_regmap_config = {
.max_register = 0x0800,
};
+static const struct iio_backend_ops adi_axi_adc_generic = {
+ .enable = axi_adc_enable,
+ .disable = axi_adc_disable,
+ .data_format_set = axi_adc_data_format_set,
+ .chan_enable = axi_adc_chan_enable,
+ .chan_disable = axi_adc_chan_disable,
+ .request_buffer = axi_adc_request_buffer,
+ .free_buffer = axi_adc_free_buffer,
+};
+
static int adi_axi_adc_probe(struct platform_device *pdev)
{
- struct adi_axi_adc_conv *conv;
- struct iio_dev *indio_dev;
- struct adi_axi_adc_client *cl;
+ const unsigned int *expected_ver;
struct adi_axi_adc_state *st;
void __iomem *base;
unsigned int ver;
int ret;
- cl = adi_axi_adc_attach_client(&pdev->dev);
- if (IS_ERR(cl))
- return PTR_ERR(cl);
-
- ret = devm_add_action_or_reset(&pdev->dev, adi_axi_adc_cleanup, cl);
- if (ret)
- return ret;
-
- indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st));
- if (indio_dev == NULL)
+ st = devm_kzalloc(&pdev->dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
return -ENOMEM;
- st = iio_priv(indio_dev);
- st->client = cl;
- cl->state = st;
- mutex_init(&st->lock);
-
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
+ st->dev = &pdev->dev;
st->regmap = devm_regmap_init_mmio(&pdev->dev, base,
&axi_adc_regmap_config);
if (IS_ERR(st->regmap))
return PTR_ERR(st->regmap);
- conv = &st->client->conv;
+ expected_ver = device_get_match_data(&pdev->dev);
+ if (!expected_ver)
+ return -ENODEV;
- ret = axi_adc_reset(st);
+ /*
+ * Force disable the core. Up to the frontend to enable us. And we can
+ * still read/write registers...
+ */
+ ret = regmap_write(st->regmap, ADI_AXI_REG_RSTN, 0);
if (ret)
return ret;
@@ -390,33 +207,19 @@ static int adi_axi_adc_probe(struct platform_device *pdev)
if (ret)
return ret;
- if (cl->info->version > ver) {
+ if (*expected_ver > ver) {
dev_err(&pdev->dev,
"IP core version is too old. Expected %d.%.2d.%c, Reported %d.%.2d.%c\n",
- ADI_AXI_PCORE_VER_MAJOR(cl->info->version),
- ADI_AXI_PCORE_VER_MINOR(cl->info->version),
- ADI_AXI_PCORE_VER_PATCH(cl->info->version),
+ ADI_AXI_PCORE_VER_MAJOR(*expected_ver),
+ ADI_AXI_PCORE_VER_MINOR(*expected_ver),
+ ADI_AXI_PCORE_VER_PATCH(*expected_ver),
ADI_AXI_PCORE_VER_MAJOR(ver),
ADI_AXI_PCORE_VER_MINOR(ver),
ADI_AXI_PCORE_VER_PATCH(ver));
return -ENODEV;
}
- indio_dev->info = &adi_axi_adc_info;
- indio_dev->name = "adi-axi-adc";
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->num_channels = conv->chip_info->num_channels;
- indio_dev->channels = conv->chip_info->channels;
-
- ret = adi_axi_adc_config_dma_buffer(&pdev->dev, indio_dev);
- if (ret)
- return ret;
-
- ret = adi_axi_adc_setup_channels(&pdev->dev, st);
- if (ret)
- return ret;
-
- ret = devm_iio_device_register(&pdev->dev, indio_dev);
+ ret = devm_iio_backend_register(&pdev->dev, &adi_axi_adc_generic, st);
if (ret)
return ret;
@@ -428,6 +231,8 @@ static int adi_axi_adc_probe(struct platform_device *pdev)
return 0;
}
+static unsigned int adi_axi_adc_10_0_a_info = ADI_AXI_PCORE_VER(10, 0, 'a');
+
/* Match table for of_platform binding */
static const struct of_device_id adi_axi_adc_of_match[] = {
{ .compatible = "adi,axi-adc-10.0.a", .data = &adi_axi_adc_10_0_a_info },
@@ -447,3 +252,5 @@ module_platform_driver(adi_axi_adc_driver);
MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("Analog Devices Generic AXI ADC IP core driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(IIO_DMAENGINE_BUFFER);
+MODULE_IMPORT_NS(IIO_BACKEND);
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index 7c2a98b8c3a96..8b5bc96cb9fbf 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -357,62 +357,55 @@ static int max1363_read_single_chan(struct iio_dev *indio_dev,
int *val,
long m)
{
- int ret = 0;
- s32 data;
- u8 rxbuf[2];
- struct max1363_state *st = iio_priv(indio_dev);
- struct i2c_client *client = st->client;
-
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
- mutex_lock(&st->lock);
-
- /*
- * If monitor mode is enabled, the method for reading a single
- * channel will have to be rather different and has not yet
- * been implemented.
- *
- * Also, cannot read directly if buffered capture enabled.
- */
- if (st->monitor_on) {
- ret = -EBUSY;
- goto error_ret;
- }
-
- /* Check to see if current scan mode is correct */
- if (st->current_mode != &max1363_mode_table[chan->address]) {
- /* Update scan mode if needed */
- st->current_mode = &max1363_mode_table[chan->address];
- ret = max1363_set_scan_mode(st);
- if (ret < 0)
- goto error_ret;
- }
- if (st->chip_info->bits != 8) {
- /* Get reading */
- data = st->recv(client, rxbuf, 2);
- if (data < 0) {
- ret = data;
- goto error_ret;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ s32 data;
+ u8 rxbuf[2];
+ struct max1363_state *st = iio_priv(indio_dev);
+ struct i2c_client *client = st->client;
+
+ guard(mutex)(&st->lock);
+
+ /*
+ * If monitor mode is enabled, the method for reading a single
+ * channel will have to be rather different and has not yet
+ * been implemented.
+ *
+ * Also, cannot read directly if buffered capture enabled.
+ */
+ if (st->monitor_on)
+ return -EBUSY;
+
+ /* Check to see if current scan mode is correct */
+ if (st->current_mode != &max1363_mode_table[chan->address]) {
+ int ret;
+
+ /* Update scan mode if needed */
+ st->current_mode = &max1363_mode_table[chan->address];
+ ret = max1363_set_scan_mode(st);
+ if (ret < 0)
+ return ret;
}
- data = (rxbuf[1] | rxbuf[0] << 8) &
- ((1 << st->chip_info->bits) - 1);
- } else {
- /* Get reading */
- data = st->recv(client, rxbuf, 1);
- if (data < 0) {
- ret = data;
- goto error_ret;
+ if (st->chip_info->bits != 8) {
+ /* Get reading */
+ data = st->recv(client, rxbuf, 2);
+ if (data < 0)
+ return data;
+
+ data = (rxbuf[1] | rxbuf[0] << 8) &
+ ((1 << st->chip_info->bits) - 1);
+ } else {
+ /* Get reading */
+ data = st->recv(client, rxbuf, 1);
+ if (data < 0)
+ return data;
+
+ data = rxbuf[0];
}
- data = rxbuf[0];
- }
- *val = data;
-
-error_ret:
- mutex_unlock(&st->lock);
- iio_device_release_direct_mode(indio_dev);
- return ret;
+ *val = data;
+ return 0;
+ }
+ unreachable();
}
static int max1363_read_raw(struct iio_dev *indio_dev,
@@ -710,9 +703,8 @@ static ssize_t max1363_monitor_store_freq(struct device *dev,
if (!found)
return -EINVAL;
- mutex_lock(&st->lock);
- st->monitor_speed = i;
- mutex_unlock(&st->lock);
+ scoped_guard(mutex, &st->lock)
+ st->monitor_speed = i;
return 0;
}
@@ -815,12 +807,11 @@ static int max1363_read_event_config(struct iio_dev *indio_dev,
int val;
int number = chan->channel;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
if (dir == IIO_EV_DIR_FALLING)
val = (1 << number) & st->mask_low;
else
val = (1 << number) & st->mask_high;
- mutex_unlock(&st->lock);
return val;
}
@@ -962,46 +953,42 @@ static int max1363_write_event_config(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, enum iio_event_type type,
enum iio_event_direction dir, int state)
{
- int ret = 0;
struct max1363_state *st = iio_priv(indio_dev);
- u16 unifiedmask;
- int number = chan->channel;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
- mutex_lock(&st->lock);
-
- unifiedmask = st->mask_low | st->mask_high;
- if (dir == IIO_EV_DIR_FALLING) {
-
- if (state == 0)
- st->mask_low &= ~(1 << number);
- else {
- ret = __max1363_check_event_mask((1 << number),
- unifiedmask);
- if (ret)
- goto error_ret;
- st->mask_low |= (1 << number);
- }
- } else {
- if (state == 0)
- st->mask_high &= ~(1 << number);
- else {
- ret = __max1363_check_event_mask((1 << number),
- unifiedmask);
- if (ret)
- goto error_ret;
- st->mask_high |= (1 << number);
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ int number = chan->channel;
+ u16 unifiedmask;
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ unifiedmask = st->mask_low | st->mask_high;
+ if (dir == IIO_EV_DIR_FALLING) {
+
+ if (state == 0)
+ st->mask_low &= ~(1 << number);
+ else {
+ ret = __max1363_check_event_mask((1 << number),
+ unifiedmask);
+ if (ret)
+ return ret;
+ st->mask_low |= (1 << number);
+ }
+ } else {
+ if (state == 0)
+ st->mask_high &= ~(1 << number);
+ else {
+ ret = __max1363_check_event_mask((1 << number),
+ unifiedmask);
+ if (ret)
+ return ret;
+ st->mask_high |= (1 << number);
+ }
}
}
-
max1363_monitor_mode_update(st, !!(st->mask_high | st->mask_low));
-error_ret:
- mutex_unlock(&st->lock);
- iio_device_release_direct_mode(indio_dev);
- return ret;
+ return 0;
}
/*
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index f3b81798b3c93..da1421bd7b629 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -371,6 +371,11 @@ static const struct mcp320x_chip_info mcp320x_chip_infos[] = {
},
};
+static void mcp320x_regulator_disable(void *reg)
+{
+ regulator_disable(reg);
+}
+
static int mcp320x_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
@@ -388,7 +393,6 @@ static int mcp320x_probe(struct spi_device *spi)
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &mcp320x_info;
- spi_set_drvdata(spi, indio_dev);
device_index = spi_get_device_id(spi)->driver_data;
chip_info = &mcp320x_chip_infos[device_index];
@@ -445,27 +449,13 @@ static int mcp320x_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- mutex_init(&adc->lock);
-
- ret = iio_device_register(indio_dev);
+ ret = devm_add_action_or_reset(&spi->dev, mcp320x_regulator_disable, adc->reg);
if (ret < 0)
- goto reg_disable;
-
- return 0;
-
-reg_disable:
- regulator_disable(adc->reg);
-
- return ret;
-}
+ return ret;
-static void mcp320x_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct mcp320x *adc = iio_priv(indio_dev);
+ mutex_init(&adc->lock);
- iio_device_unregister(indio_dev);
- regulator_disable(adc->reg);
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct of_device_id mcp320x_dt_ids[] = {
@@ -520,7 +510,6 @@ static struct spi_driver mcp320x_driver = {
.of_match_table = mcp320x_dt_ids,
},
.probe = mcp320x_probe,
- .remove = mcp320x_remove,
.id_table = mcp320x_id,
};
module_spi_driver(mcp320x_driver);
diff --git a/drivers/iio/adc/pac1934.c b/drivers/iio/adc/pac1934.c
new file mode 100644
index 0000000000000..e0c2742da5236
--- /dev/null
+++ b/drivers/iio/adc/pac1934.c
@@ -0,0 +1,1636 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * IIO driver for PAC1934 Multi-Channel DC Power/Energy Monitor
+ *
+ * Copyright (C) 2017-2024 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Bogdan Bolocan <bogdan.bolocan@microchip.com>
+ * Author: Victor Tudose
+ * Author: Marius Cristea <marius.cristea@microchip.com>
+ *
+ * Datasheet for PAC1931, PAC1932, PAC1933 and PAC1934 can be found here:
+ * https://ww1.microchip.com/downloads/aemDocuments/documents/OTH/ProductDocuments/DataSheets/PAC1931-Family-Data-Sheet-DS20005850E.pdf
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <asm/unaligned.h>
+
+/*
+ * maximum accumulation time should be (17 * 60 * 1000) around 17 minutes@1024 sps
+ * till PAC1934 accumulation registers starts to saturate
+ */
+#define PAC1934_MAX_RFSH_LIMIT_MS 60000
+/* 50msec is the timeout for validity of the cached registers */
+#define PAC1934_MIN_POLLING_TIME_MS 50
+/*
+ * 1000usec is the minimum wait time for normal conversions when sample
+ * rate doesn't change
+ */
+#define PAC1934_MIN_UPDATE_WAIT_TIME_US 1000
+
+/* 32000mV */
+#define PAC1934_VOLTAGE_MILLIVOLTS_MAX 32000
+/* voltage bits resolution when set for unsigned values */
+#define PAC1934_VOLTAGE_U_RES 16
+/* voltage bits resolution when set for signed values */
+#define PAC1934_VOLTAGE_S_RES 15
+
+/*
+ * max signed value that can be stored on 32 bits and 8 digits fractional value
+ * (2^31 - 1) * 10^8 + 99999999
+ */
+#define PAC_193X_MAX_POWER_ACC 214748364799999999LL
+/*
+ * min signed value that can be stored on 32 bits and 8 digits fractional value
+ * -(2^31) * 10^8 - 99999999
+ */
+#define PAC_193X_MIN_POWER_ACC -214748364899999999LL
+
+#define PAC1934_MAX_NUM_CHANNELS 4
+
+#define PAC1934_MEAS_REG_LEN 76
+#define PAC1934_CTRL_REG_LEN 12
+
+#define PAC1934_DEFAULT_CHIP_SAMP_SPEED_HZ 1024
+
+/* I2C address map */
+#define PAC1934_REFRESH_REG_ADDR 0x00
+#define PAC1934_CTRL_REG_ADDR 0x01
+#define PAC1934_ACC_COUNT_REG_ADDR 0x02
+#define PAC1934_VPOWER_ACC_1_ADDR 0x03
+#define PAC1934_VPOWER_ACC_2_ADDR 0x04
+#define PAC1934_VPOWER_ACC_3_ADDR 0x05
+#define PAC1934_VPOWER_ACC_4_ADDR 0x06
+#define PAC1934_VBUS_1_ADDR 0x07
+#define PAC1934_VBUS_2_ADDR 0x08
+#define PAC1934_VBUS_3_ADDR 0x09
+#define PAC1934_VBUS_4_ADDR 0x0A
+#define PAC1934_VSENSE_1_ADDR 0x0B
+#define PAC1934_VSENSE_2_ADDR 0x0C
+#define PAC1934_VSENSE_3_ADDR 0x0D
+#define PAC1934_VSENSE_4_ADDR 0x0E
+#define PAC1934_VBUS_AVG_1_ADDR 0x0F
+#define PAC1934_VBUS_AVG_2_ADDR 0x10
+#define PAC1934_VBUS_AVG_3_ADDR 0x11
+#define PAC1934_VBUS_AVG_4_ADDR 0x12
+#define PAC1934_VSENSE_AVG_1_ADDR 0x13
+#define PAC1934_VSENSE_AVG_2_ADDR 0x14
+#define PAC1934_VSENSE_AVG_3_ADDR 0x15
+#define PAC1934_VSENSE_AVG_4_ADDR 0x16
+#define PAC1934_VPOWER_1_ADDR 0x17
+#define PAC1934_VPOWER_2_ADDR 0x18
+#define PAC1934_VPOWER_3_ADDR 0x19
+#define PAC1934_VPOWER_4_ADDR 0x1A
+#define PAC1934_REFRESH_V_REG_ADDR 0x1F
+#define PAC1934_CTRL_STAT_REGS_ADDR 0x1C
+#define PAC1934_PID_REG_ADDR 0xFD
+#define PAC1934_MID_REG_ADDR 0xFE
+#define PAC1934_RID_REG_ADDR 0xFF
+
+/* PRODUCT ID REGISTER + MANUFACTURER ID REGISTER + REVISION ID REGISTER */
+#define PAC1934_ID_REG_LEN 3
+#define PAC1934_PID_IDX 0
+#define PAC1934_MID_IDX 1
+#define PAC1934_RID_IDX 2
+
+#define PAC1934_ACPI_GET_NAMES_AND_MOHMS_VALS 1
+#define PAC1934_ACPI_GET_UOHMS_VALS 2
+#define PAC1934_ACPI_GET_BIPOLAR_SETTINGS 4
+#define PAC1934_ACPI_GET_SAMP 5
+
+#define PAC1934_SAMPLE_RATE_SHIFT 6
+
+#define PAC1934_VBUS_SENSE_REG_LEN 2
+#define PAC1934_ACC_REG_LEN 3
+#define PAC1934_VPOWER_REG_LEN 4
+#define PAC1934_VPOWER_ACC_REG_LEN 6
+#define PAC1934_MAX_REGISTER_LENGTH 6
+
+#define PAC1934_CUSTOM_ATTR_FOR_CHANNEL 1
+
+/*
+ * relative offsets when using multi-byte reads/writes even though these
+ * bytes are read one after the other, they are not at adjacent memory
+ * locations within the I2C memory map. The chip can skip some addresses
+ */
+#define PAC1934_CHANNEL_DIS_REG_OFF 0
+#define PAC1934_NEG_PWR_REG_OFF 1
+
+/*
+ * when reading/writing multiple bytes from offset PAC1934_CHANNEL_DIS_REG_OFF,
+ * the chip jumps over the 0x1E (REFRESH_G) and 0x1F (REFRESH_V) offsets
+ */
+#define PAC1934_SLOW_REG_OFF 2
+#define PAC1934_CTRL_ACT_REG_OFF 3
+#define PAC1934_CHANNEL_DIS_ACT_REG_OFF 4
+#define PAC1934_NEG_PWR_ACT_REG_OFF 5
+#define PAC1934_CTRL_LAT_REG_OFF 6
+#define PAC1934_CHANNEL_DIS_LAT_REG_OFF 7
+#define PAC1934_NEG_PWR_LAT_REG_OFF 8
+#define PAC1934_PID_REG_OFF 9
+#define PAC1934_MID_REG_OFF 10
+#define PAC1934_REV_REG_OFF 11
+#define PAC1934_CTRL_STATUS_INFO_LEN 12
+
+#define PAC1934_MID 0x5D
+#define PAC1931_PID 0x58
+#define PAC1932_PID 0x59
+#define PAC1933_PID 0x5A
+#define PAC1934_PID 0x5B
+
+/* Scale constant = (10^3 * 3.2 * 10^9 / 2^28) for mili Watt-second */
+#define PAC1934_SCALE_CONSTANT 11921
+
+#define PAC1934_MAX_VPOWER_RSHIFTED_BY_28B 11921
+#define PAC1934_MAX_VSENSE_RSHIFTED_BY_16B 1525
+
+#define PAC1934_DEV_ATTR(name) (&iio_dev_attr_##name.dev_attr.attr)
+
+#define PAC1934_CRTL_SAMPLE_RATE_MASK GENMASK(7, 6)
+#define PAC1934_CHAN_SLEEP_MASK BIT(5)
+#define PAC1934_CHAN_SLEEP_SET BIT(5)
+#define PAC1934_CHAN_SINGLE_MASK BIT(4)
+#define PAC1934_CHAN_SINGLE_SHOT_SET BIT(4)
+#define PAC1934_CHAN_ALERT_MASK BIT(3)
+#define PAC1934_CHAN_ALERT_EN BIT(3)
+#define PAC1934_CHAN_ALERT_CC_MASK BIT(2)
+#define PAC1934_CHAN_ALERT_CC_EN BIT(2)
+#define PAC1934_CHAN_OVF_ALERT_MASK BIT(1)
+#define PAC1934_CHAN_OVF_ALERT_EN BIT(1)
+#define PAC1934_CHAN_OVF_MASK BIT(0)
+
+#define PAC1934_CHAN_DIS_CH1_OFF_MASK BIT(7)
+#define PAC1934_CHAN_DIS_CH2_OFF_MASK BIT(6)
+#define PAC1934_CHAN_DIS_CH3_OFF_MASK BIT(5)
+#define PAC1934_CHAN_DIS_CH4_OFF_MASK BIT(4)
+#define PAC1934_SMBUS_TIMEOUT_MASK BIT(3)
+#define PAC1934_SMBUS_BYTECOUNT_MASK BIT(2)
+#define PAC1934_SMBUS_NO_SKIP_MASK BIT(1)
+
+#define PAC1934_NEG_PWR_CH1_BIDI_MASK BIT(7)
+#define PAC1934_NEG_PWR_CH2_BIDI_MASK BIT(6)
+#define PAC1934_NEG_PWR_CH3_BIDI_MASK BIT(5)
+#define PAC1934_NEG_PWR_CH4_BIDI_MASK BIT(4)
+#define PAC1934_NEG_PWR_CH1_BIDV_MASK BIT(3)
+#define PAC1934_NEG_PWR_CH2_BIDV_MASK BIT(2)
+#define PAC1934_NEG_PWR_CH3_BIDV_MASK BIT(1)
+#define PAC1934_NEG_PWR_CH4_BIDV_MASK BIT(0)
+
+/*
+ * Universal Unique Identifier (UUID),
+ * 033771E0-1705-47B4-9535-D1BBE14D9A09,
+ * is reserved to Microchip for the PAC1934.
+ */
+#define PAC1934_DSM_UUID "033771E0-1705-47B4-9535-D1BBE14D9A09"
+
+enum pac1934_ids {
+ PAC1931,
+ PAC1932,
+ PAC1933,
+ PAC1934
+};
+
+enum pac1934_samps {
+ PAC1934_SAMP_1024SPS,
+ PAC1934_SAMP_256SPS,
+ PAC1934_SAMP_64SPS,
+ PAC1934_SAMP_8SPS
+};
+
+/*
+ * these indexes are exactly describing the element order within a single
+ * PAC1934 phys channel IIO channel descriptor; see the static const struct
+ * iio_chan_spec pac1934_single_channel[] declaration
+ */
+enum pac1934_ch_idx {
+ PAC1934_CH_ENERGY,
+ PAC1934_CH_POWER,
+ PAC1934_CH_VOLTAGE,
+ PAC1934_CH_CURRENT,
+ PAC1934_CH_VOLTAGE_AVERAGE,
+ PAC1934_CH_CURRENT_AVERAGE
+};
+
+/**
+ * struct pac1934_features - features of a pac1934 instance
+ * @phys_channels: number of physical channels supported by the chip
+ * @name: chip's name
+ */
+struct pac1934_features {
+ u8 phys_channels;
+ const char *name;
+};
+
+struct samp_rate_mapping {
+ u16 samp_rate;
+ u8 shift2value;
+};
+
+static const unsigned int samp_rate_map_tbl[] = {
+ [PAC1934_SAMP_1024SPS] = 1024,
+ [PAC1934_SAMP_256SPS] = 256,
+ [PAC1934_SAMP_64SPS] = 64,
+ [PAC1934_SAMP_8SPS] = 8,
+};
+
+static const struct pac1934_features pac1934_chip_config[] = {
+ [PAC1931] = {
+ .phys_channels = 1,
+ .name = "pac1931",
+ },
+ [PAC1932] = {
+ .phys_channels = 2,
+ .name = "pac1932",
+ },
+ [PAC1933] = {
+ .phys_channels = 3,
+ .name = "pac1933",
+ },
+ [PAC1934] = {
+ .phys_channels = 4,
+ .name = "pac1934",
+ },
+};
+
+/**
+ * struct reg_data - data from the registers
+ * @meas_regs: snapshot of raw measurements registers
+ * @ctrl_regs: snapshot of control registers
+ * @energy_sec_acc: snapshot of energy values
+ * @vpower_acc: accumulated vpower values
+ * @vpower: snapshot of vpower registers
+ * @vbus: snapshot of vbus registers
+ * @vbus_avg: averages of vbus registers
+ * @vsense: snapshot of vsense registers
+ * @vsense_avg: averages of vsense registers
+ * @num_enabled_channels: count of how many chip channels are currently enabled
+ */
+struct reg_data {
+ u8 meas_regs[PAC1934_MEAS_REG_LEN];
+ u8 ctrl_regs[PAC1934_CTRL_REG_LEN];
+ s64 energy_sec_acc[PAC1934_MAX_NUM_CHANNELS];
+ s64 vpower_acc[PAC1934_MAX_NUM_CHANNELS];
+ s32 vpower[PAC1934_MAX_NUM_CHANNELS];
+ s32 vbus[PAC1934_MAX_NUM_CHANNELS];
+ s32 vbus_avg[PAC1934_MAX_NUM_CHANNELS];
+ s32 vsense[PAC1934_MAX_NUM_CHANNELS];
+ s32 vsense_avg[PAC1934_MAX_NUM_CHANNELS];
+ u8 num_enabled_channels;
+};
+
+/**
+ * struct pac1934_chip_info - information about the chip
+ * @client: the i2c-client attached to the device
+ * @lock: synchronize access to driver's state members
+ * @work_chip_rfsh: work queue used for refresh commands
+ * @phys_channels: phys channels count
+ * @active_channels: array of values, true means that channel is active
+ * @enable_energy: array of values, true means that channel energy is measured
+ * @bi_dir: array of bools, true means that channel is bidirectional
+ * @chip_variant: chip variant
+ * @chip_revision: chip revision
+ * @shunts: shunts
+ * @chip_reg_data: chip reg data
+ * @sample_rate_value: sampling frequency
+ * @labels: table with channels labels
+ * @iio_info: iio_info
+ * @tstamp: chip's uptime
+ */
+struct pac1934_chip_info {
+ struct i2c_client *client;
+ struct mutex lock; /* synchronize access to driver's state members */
+ struct delayed_work work_chip_rfsh;
+ u8 phys_channels;
+ bool active_channels[PAC1934_MAX_NUM_CHANNELS];
+ bool enable_energy[PAC1934_MAX_NUM_CHANNELS];
+ bool bi_dir[PAC1934_MAX_NUM_CHANNELS];
+ u8 chip_variant;
+ u8 chip_revision;
+ u32 shunts[PAC1934_MAX_NUM_CHANNELS];
+ struct reg_data chip_reg_data;
+ s32 sample_rate_value;
+ char *labels[PAC1934_MAX_NUM_CHANNELS];
+ struct iio_info iio_info;
+ unsigned long tstamp;
+};
+
+#define TO_PAC1934_CHIP_INFO(d) container_of(d, struct pac1934_chip_info, work_chip_rfsh)
+
+#define PAC1934_VPOWER_ACC_CHANNEL(_index, _si, _address) { \
+ .type = IIO_ENERGY, \
+ .address = (_address), \
+ .indexed = 1, \
+ .channel = (_index), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_ENABLE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = (_si), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 48, \
+ .storagebits = 64, \
+ .endianness = IIO_CPU, \
+ } \
+}
+
+#define PAC1934_VBUS_CHANNEL(_index, _si, _address) { \
+ .type = IIO_VOLTAGE, \
+ .address = (_address), \
+ .indexed = 1, \
+ .channel = (_index), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = (_si), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ } \
+}
+
+#define PAC1934_VBUS_AVG_CHANNEL(_index, _si, _address) { \
+ .type = IIO_VOLTAGE, \
+ .address = (_address), \
+ .indexed = 1, \
+ .channel = (_index), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_AVERAGE_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = (_si), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ } \
+}
+
+#define PAC1934_VSENSE_CHANNEL(_index, _si, _address) { \
+ .type = IIO_CURRENT, \
+ .address = (_address), \
+ .indexed = 1, \
+ .channel = (_index), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = (_si), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ } \
+}
+
+#define PAC1934_VSENSE_AVG_CHANNEL(_index, _si, _address) { \
+ .type = IIO_CURRENT, \
+ .address = (_address), \
+ .indexed = 1, \
+ .channel = (_index), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_AVERAGE_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = (_si), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ } \
+}
+
+#define PAC1934_VPOWER_CHANNEL(_index, _si, _address) { \
+ .type = IIO_POWER, \
+ .address = (_address), \
+ .indexed = 1, \
+ .channel = (_index), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = (_si), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 28, \
+ .storagebits = 32, \
+ .shift = 4, \
+ .endianness = IIO_CPU, \
+ } \
+}
+
+static const struct iio_chan_spec pac1934_single_channel[] = {
+ PAC1934_VPOWER_ACC_CHANNEL(0, 0, PAC1934_VPOWER_ACC_1_ADDR),
+ PAC1934_VPOWER_CHANNEL(0, 0, PAC1934_VPOWER_1_ADDR),
+ PAC1934_VBUS_CHANNEL(0, 0, PAC1934_VBUS_1_ADDR),
+ PAC1934_VSENSE_CHANNEL(0, 0, PAC1934_VSENSE_1_ADDR),
+ PAC1934_VBUS_AVG_CHANNEL(0, 0, PAC1934_VBUS_AVG_1_ADDR),
+ PAC1934_VSENSE_AVG_CHANNEL(0, 0, PAC1934_VSENSE_AVG_1_ADDR),
+};
+
+/* Low-level I2c functions used to transfer up to 76 bytes at once */
+static int pac1934_i2c_read(struct i2c_client *client, u8 reg_addr,
+ void *databuf, u8 len)
+{
+ int ret;
+ struct i2c_msg msgs[2] = {
+ {
+ .addr = client->addr,
+ .len = 1,
+ .buf = (u8 *)&reg_addr,
+ },
+ {
+ .addr = client->addr,
+ .len = len,
+ .buf = databuf,
+ .flags = I2C_M_RD
+ }
+ };
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int pac1934_get_samp_rate_idx(struct pac1934_chip_info *info,
+ u32 new_samp_rate)
+{
+ int cnt;
+
+ for (cnt = 0; cnt < ARRAY_SIZE(samp_rate_map_tbl); cnt++)
+ if (new_samp_rate == samp_rate_map_tbl[cnt])
+ return cnt;
+
+ /* not a valid sample rate value */
+ return -EINVAL;
+}
+
+static ssize_t pac1934_shunt_value_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct pac1934_chip_info *info = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ return sysfs_emit(buf, "%u\n", info->shunts[this_attr->address]);
+}
+
+static ssize_t pac1934_shunt_value_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct pac1934_chip_info *info = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int sh_val;
+
+ if (kstrtouint(buf, 10, &sh_val)) {
+ dev_err(dev, "Shunt value is not valid\n");
+ return -EINVAL;
+ }
+
+ scoped_guard(mutex, &info->lock)
+ info->shunts[this_attr->address] = sh_val;
+
+ return count;
+}
+
+static int pac1934_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *channel,
+ const int **vals, int *type, int *length, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *type = IIO_VAL_INT;
+ *vals = samp_rate_map_tbl;
+ *length = ARRAY_SIZE(samp_rate_map_tbl);
+ return IIO_AVAIL_LIST;
+ }
+
+ return -EINVAL;
+}
+
+static int pac1934_send_refresh(struct pac1934_chip_info *info,
+ u8 refresh_cmd, u32 wait_time)
+{
+ /* this function only sends REFRESH or REFRESH_V */
+ struct i2c_client *client = info->client;
+ int ret;
+ u8 bidir_reg;
+ bool revision_bug = false;
+
+ if (info->chip_revision == 2 || info->chip_revision == 3) {
+ /*
+ * chip rev 2 and 3 bug workaround
+ * see: PAC1934 Family Data Sheet Errata DS80000836A.pdf
+ */
+ revision_bug = true;
+
+ bidir_reg =
+ FIELD_PREP(PAC1934_NEG_PWR_CH1_BIDI_MASK, info->bi_dir[0]) |
+ FIELD_PREP(PAC1934_NEG_PWR_CH2_BIDI_MASK, info->bi_dir[1]) |
+ FIELD_PREP(PAC1934_NEG_PWR_CH3_BIDI_MASK, info->bi_dir[2]) |
+ FIELD_PREP(PAC1934_NEG_PWR_CH4_BIDI_MASK, info->bi_dir[3]) |
+ FIELD_PREP(PAC1934_NEG_PWR_CH1_BIDV_MASK, info->bi_dir[0]) |
+ FIELD_PREP(PAC1934_NEG_PWR_CH2_BIDV_MASK, info->bi_dir[1]) |
+ FIELD_PREP(PAC1934_NEG_PWR_CH3_BIDV_MASK, info->bi_dir[2]) |
+ FIELD_PREP(PAC1934_NEG_PWR_CH4_BIDV_MASK, info->bi_dir[3]);
+
+ ret = i2c_smbus_write_byte_data(client,
+ PAC1934_CTRL_STAT_REGS_ADDR +
+ PAC1934_NEG_PWR_REG_OFF,
+ bidir_reg);
+ if (ret)
+ return ret;
+ }
+
+ ret = i2c_smbus_write_byte(client, refresh_cmd);
+ if (ret) {
+ dev_err(&client->dev, "%s - cannot send 0x%02X\n",
+ __func__, refresh_cmd);
+ return ret;
+ }
+
+ if (revision_bug) {
+ /*
+ * chip rev 2 and 3 bug workaround - write again the same
+ * register write the updated registers back
+ */
+ ret = i2c_smbus_write_byte_data(client,
+ PAC1934_CTRL_STAT_REGS_ADDR +
+ PAC1934_NEG_PWR_REG_OFF, bidir_reg);
+ if (ret)
+ return ret;
+ }
+
+ /* register data retrieval timestamp */
+ info->tstamp = jiffies;
+
+ /* wait till the data is available */
+ usleep_range(wait_time, wait_time + 100);
+
+ return ret;
+}
+
+static int pac1934_reg_snapshot(struct pac1934_chip_info *info,
+ bool do_refresh, u8 refresh_cmd, u32 wait_time)
+{
+ int ret;
+ struct i2c_client *client = info->client;
+ u8 samp_shift, ctrl_regs_tmp;
+ u8 *offset_reg_data_p;
+ u16 tmp_value;
+ u32 samp_rate, cnt, tmp;
+ s64 curr_energy, inc;
+ u64 tmp_energy;
+ struct reg_data *reg_data;
+
+ guard(mutex)(&info->lock);
+
+ if (do_refresh) {
+ ret = pac1934_send_refresh(info, refresh_cmd, wait_time);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "%s - cannot send refresh\n",
+ __func__);
+ return ret;
+ }
+ }
+
+ ret = i2c_smbus_read_i2c_block_data(client, PAC1934_CTRL_STAT_REGS_ADDR,
+ PAC1934_CTRL_REG_LEN,
+ (u8 *)info->chip_reg_data.ctrl_regs);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "%s - cannot read ctrl/status registers\n",
+ __func__);
+ return ret;
+ }
+
+ reg_data = &info->chip_reg_data;
+
+ /* read the data registers */
+ ret = pac1934_i2c_read(client, PAC1934_ACC_COUNT_REG_ADDR,
+ (u8 *)reg_data->meas_regs, PAC1934_MEAS_REG_LEN);
+ if (ret) {
+ dev_err(&client->dev,
+ "%s - cannot read ACC_COUNT register: %d:%d\n",
+ __func__, ret, PAC1934_MEAS_REG_LEN);
+ return ret;
+ }
+
+ /* see how much shift is required by the sample rate */
+ samp_rate = samp_rate_map_tbl[((reg_data->ctrl_regs[PAC1934_CTRL_LAT_REG_OFF]) >> 6)];
+ samp_shift = get_count_order(samp_rate);
+
+ ctrl_regs_tmp = reg_data->ctrl_regs[PAC1934_CHANNEL_DIS_LAT_REG_OFF];
+ offset_reg_data_p = &reg_data->meas_regs[PAC1934_ACC_REG_LEN];
+
+ /* start with VPOWER_ACC */
+ for (cnt = 0; cnt < info->phys_channels; cnt++) {
+ /* check if the channel is active, skip all fields if disabled */
+ if ((ctrl_regs_tmp << cnt) & 0x80)
+ continue;
+
+ /* skip if the energy accumulation is disabled */
+ if (info->enable_energy[cnt]) {
+ curr_energy = info->chip_reg_data.energy_sec_acc[cnt];
+
+ tmp_energy = get_unaligned_be48(offset_reg_data_p);
+
+ if (info->bi_dir[cnt])
+ reg_data->vpower_acc[cnt] = sign_extend64(tmp_energy, 47);
+ else
+ reg_data->vpower_acc[cnt] = tmp_energy;
+
+ /*
+ * compute the scaled to 1 second accumulated energy value;
+ * energy accumulator scaled to 1sec = VPOWER_ACC/2^samp_shift
+ * the chip's sampling rate is 2^samp_shift samples/sec
+ */
+ inc = (reg_data->vpower_acc[cnt] >> samp_shift);
+
+ /* add the power_acc field */
+ curr_energy += inc;
+
+ clamp(curr_energy, PAC_193X_MIN_POWER_ACC, PAC_193X_MAX_POWER_ACC);
+
+ reg_data->energy_sec_acc[cnt] = curr_energy;
+ }
+
+ offset_reg_data_p += PAC1934_VPOWER_ACC_REG_LEN;
+ }
+
+ /* continue with VBUS */
+ for (cnt = 0; cnt < info->phys_channels; cnt++) {
+ if ((ctrl_regs_tmp << cnt) & 0x80)
+ continue;
+
+ tmp_value = get_unaligned_be16(offset_reg_data_p);
+
+ if (info->bi_dir[cnt])
+ reg_data->vbus[cnt] = sign_extend32((u32)(tmp_value), 15);
+ else
+ reg_data->vbus[cnt] = tmp_value;
+
+ offset_reg_data_p += PAC1934_VBUS_SENSE_REG_LEN;
+ }
+
+ /* VSENSE */
+ for (cnt = 0; cnt < info->phys_channels; cnt++) {
+ if ((ctrl_regs_tmp << cnt) & 0x80)
+ continue;
+
+ tmp_value = get_unaligned_be16(offset_reg_data_p);
+
+ if (info->bi_dir[cnt])
+ reg_data->vsense[cnt] = sign_extend32((u32)(tmp_value), 15);
+ else
+ reg_data->vsense[cnt] = tmp_value;
+
+ offset_reg_data_p += PAC1934_VBUS_SENSE_REG_LEN;
+ }
+
+ /* VBUS_AVG */
+ for (cnt = 0; cnt < info->phys_channels; cnt++) {
+ if ((ctrl_regs_tmp << cnt) & 0x80)
+ continue;
+
+ tmp_value = get_unaligned_be16(offset_reg_data_p);
+
+ if (info->bi_dir[cnt])
+ reg_data->vbus_avg[cnt] = sign_extend32((u32)(tmp_value), 15);
+ else
+ reg_data->vbus_avg[cnt] = tmp_value;
+
+ offset_reg_data_p += PAC1934_VBUS_SENSE_REG_LEN;
+ }
+
+ /* VSENSE_AVG */
+ for (cnt = 0; cnt < info->phys_channels; cnt++) {
+ if ((ctrl_regs_tmp << cnt) & 0x80)
+ continue;
+
+ tmp_value = get_unaligned_be16(offset_reg_data_p);
+
+ if (info->bi_dir[cnt])
+ reg_data->vsense_avg[cnt] = sign_extend32((u32)(tmp_value), 15);
+ else
+ reg_data->vsense_avg[cnt] = tmp_value;
+
+ offset_reg_data_p += PAC1934_VBUS_SENSE_REG_LEN;
+ }
+
+ /* VPOWER */
+ for (cnt = 0; cnt < info->phys_channels; cnt++) {
+ if ((ctrl_regs_tmp << cnt) & 0x80)
+ continue;
+
+ tmp = get_unaligned_be32(offset_reg_data_p) >> 4;
+
+ if (info->bi_dir[cnt])
+ reg_data->vpower[cnt] = sign_extend32(tmp, 27);
+ else
+ reg_data->vpower[cnt] = tmp;
+
+ offset_reg_data_p += PAC1934_VPOWER_REG_LEN;
+ }
+
+ return 0;
+}
+
+static int pac1934_retrieve_data(struct pac1934_chip_info *info,
+ u32 wait_time)
+{
+ int ret = 0;
+
+ /*
+ * check if the minimal elapsed time has passed and if so,
+ * re-read the chip, otherwise the cached info is just fine
+ */
+ if (time_after(jiffies, info->tstamp + msecs_to_jiffies(PAC1934_MIN_POLLING_TIME_MS))) {
+ ret = pac1934_reg_snapshot(info, true, PAC1934_REFRESH_REG_ADDR,
+ wait_time);
+
+ /*
+ * Re-schedule the work for the read registers on timeout
+ * (to prevent chip registers saturation)
+ */
+ mod_delayed_work(system_wq, &info->work_chip_rfsh,
+ msecs_to_jiffies(PAC1934_MAX_RFSH_LIMIT_MS));
+ }
+
+ return ret;
+}
+
+static int pac1934_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct pac1934_chip_info *info = iio_priv(indio_dev);
+ s64 curr_energy;
+ int ret, channel = chan->channel - 1;
+
+ ret = pac1934_retrieve_data(info, PAC1934_MIN_UPDATE_WAIT_TIME_US);
+ if (ret < 0)
+ return ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *val = info->chip_reg_data.vbus[channel];
+ return IIO_VAL_INT;
+ case IIO_CURRENT:
+ *val = info->chip_reg_data.vsense[channel];
+ return IIO_VAL_INT;
+ case IIO_POWER:
+ *val = info->chip_reg_data.vpower[channel];
+ return IIO_VAL_INT;
+ case IIO_ENERGY:
+ curr_energy = info->chip_reg_data.energy_sec_acc[channel];
+ *val = (u32)curr_energy;
+ *val2 = (u32)(curr_energy >> 32);
+ return IIO_VAL_INT_64;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_AVERAGE_RAW:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *val = info->chip_reg_data.vbus_avg[channel];
+ return IIO_VAL_INT;
+ case IIO_CURRENT:
+ *val = info->chip_reg_data.vsense_avg[channel];
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->address) {
+ /* Voltages - scale for millivolts */
+ case PAC1934_VBUS_1_ADDR:
+ case PAC1934_VBUS_2_ADDR:
+ case PAC1934_VBUS_3_ADDR:
+ case PAC1934_VBUS_4_ADDR:
+ case PAC1934_VBUS_AVG_1_ADDR:
+ case PAC1934_VBUS_AVG_2_ADDR:
+ case PAC1934_VBUS_AVG_3_ADDR:
+ case PAC1934_VBUS_AVG_4_ADDR:
+ *val = PAC1934_VOLTAGE_MILLIVOLTS_MAX;
+ if (chan->scan_type.sign == 'u')
+ *val2 = PAC1934_VOLTAGE_U_RES;
+ else
+ *val2 = PAC1934_VOLTAGE_S_RES;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ /*
+ * Currents - scale for mA - depends on the
+ * channel's shunt value
+ * (100mV * 1000000) / (2^16 * shunt(uohm))
+ */
+ case PAC1934_VSENSE_1_ADDR:
+ case PAC1934_VSENSE_2_ADDR:
+ case PAC1934_VSENSE_3_ADDR:
+ case PAC1934_VSENSE_4_ADDR:
+ case PAC1934_VSENSE_AVG_1_ADDR:
+ case PAC1934_VSENSE_AVG_2_ADDR:
+ case PAC1934_VSENSE_AVG_3_ADDR:
+ case PAC1934_VSENSE_AVG_4_ADDR:
+ *val = PAC1934_MAX_VSENSE_RSHIFTED_BY_16B;
+ if (chan->scan_type.sign == 'u')
+ *val2 = info->shunts[channel];
+ else
+ *val2 = info->shunts[channel] >> 1;
+ return IIO_VAL_FRACTIONAL;
+ /*
+ * Power - uW - it will use the combined scale
+ * for current and voltage
+ * current(mA) * voltage(mV) = power (uW)
+ */
+ case PAC1934_VPOWER_1_ADDR:
+ case PAC1934_VPOWER_2_ADDR:
+ case PAC1934_VPOWER_3_ADDR:
+ case PAC1934_VPOWER_4_ADDR:
+ *val = PAC1934_MAX_VPOWER_RSHIFTED_BY_28B;
+ if (chan->scan_type.sign == 'u')
+ *val2 = info->shunts[channel];
+ else
+ *val2 = info->shunts[channel] >> 1;
+ return IIO_VAL_FRACTIONAL;
+ case PAC1934_VPOWER_ACC_1_ADDR:
+ case PAC1934_VPOWER_ACC_2_ADDR:
+ case PAC1934_VPOWER_ACC_3_ADDR:
+ case PAC1934_VPOWER_ACC_4_ADDR:
+ /*
+ * expresses the 32 bit scale value here compute
+ * the scale for energy (miliWatt-second or miliJoule)
+ */
+ *val = PAC1934_SCALE_CONSTANT;
+
+ if (chan->scan_type.sign == 'u')
+ *val2 = info->shunts[channel];
+ else
+ *val2 = info->shunts[channel] >> 1;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = info->sample_rate_value;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_ENABLE:
+ *val = info->enable_energy[channel];
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int pac1934_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct pac1934_chip_info *info = iio_priv(indio_dev);
+ struct i2c_client *client = info->client;
+ int ret = -EINVAL;
+ s32 old_samp_rate;
+ u8 ctrl_reg;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = pac1934_get_samp_rate_idx(info, val);
+ if (ret < 0)
+ return ret;
+
+ /* write the new sampling value and trigger a snapshot(incl refresh) */
+ scoped_guard(mutex, &info->lock) {
+ ctrl_reg = FIELD_PREP(PAC1934_CRTL_SAMPLE_RATE_MASK, ret);
+ ret = i2c_smbus_write_byte_data(client, PAC1934_CTRL_REG_ADDR, ctrl_reg);
+ if (ret) {
+ dev_err(&client->dev,
+ "%s - can't update sample rate\n",
+ __func__);
+ return ret;
+ }
+ }
+
+ old_samp_rate = info->sample_rate_value;
+ info->sample_rate_value = val;
+
+ /*
+ * now, force a snapshot with refresh - call retrieve
+ * data in order to update the refresh timer
+ * alter the timestamp in order to force trigger a
+ * register snapshot and a timestamp update
+ */
+ info->tstamp -= msecs_to_jiffies(PAC1934_MIN_POLLING_TIME_MS);
+ ret = pac1934_retrieve_data(info, (1024 / old_samp_rate) * 1000);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "%s - cannot snapshot ctrl and measurement regs\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+ case IIO_CHAN_INFO_ENABLE:
+ scoped_guard(mutex, &info->lock) {
+ info->enable_energy[chan->channel - 1] = val ? true : false;
+ if (!val)
+ info->chip_reg_data.energy_sec_acc[chan->channel - 1] = 0;
+ }
+
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int pac1934_read_label(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, char *label)
+{
+ struct pac1934_chip_info *info = iio_priv(indio_dev);
+
+ switch (chan->address) {
+ case PAC1934_VBUS_1_ADDR:
+ case PAC1934_VBUS_2_ADDR:
+ case PAC1934_VBUS_3_ADDR:
+ case PAC1934_VBUS_4_ADDR:
+ return sysfs_emit(label, "%s_VBUS_%d\n",
+ info->labels[chan->scan_index],
+ chan->scan_index + 1);
+ case PAC1934_VBUS_AVG_1_ADDR:
+ case PAC1934_VBUS_AVG_2_ADDR:
+ case PAC1934_VBUS_AVG_3_ADDR:
+ case PAC1934_VBUS_AVG_4_ADDR:
+ return sysfs_emit(label, "%s_VBUS_AVG_%d\n",
+ info->labels[chan->scan_index],
+ chan->scan_index + 1);
+ case PAC1934_VSENSE_1_ADDR:
+ case PAC1934_VSENSE_2_ADDR:
+ case PAC1934_VSENSE_3_ADDR:
+ case PAC1934_VSENSE_4_ADDR:
+ return sysfs_emit(label, "%s_IBUS_%d\n",
+ info->labels[chan->scan_index],
+ chan->scan_index + 1);
+ case PAC1934_VSENSE_AVG_1_ADDR:
+ case PAC1934_VSENSE_AVG_2_ADDR:
+ case PAC1934_VSENSE_AVG_3_ADDR:
+ case PAC1934_VSENSE_AVG_4_ADDR:
+ return sysfs_emit(label, "%s_IBUS_AVG_%d\n",
+ info->labels[chan->scan_index],
+ chan->scan_index + 1);
+ case PAC1934_VPOWER_1_ADDR:
+ case PAC1934_VPOWER_2_ADDR:
+ case PAC1934_VPOWER_3_ADDR:
+ case PAC1934_VPOWER_4_ADDR:
+ return sysfs_emit(label, "%s_POWER_%d\n",
+ info->labels[chan->scan_index],
+ chan->scan_index + 1);
+ case PAC1934_VPOWER_ACC_1_ADDR:
+ case PAC1934_VPOWER_ACC_2_ADDR:
+ case PAC1934_VPOWER_ACC_3_ADDR:
+ case PAC1934_VPOWER_ACC_4_ADDR:
+ return sysfs_emit(label, "%s_ENERGY_%d\n",
+ info->labels[chan->scan_index],
+ chan->scan_index + 1);
+ }
+
+ return 0;
+}
+
+static void pac1934_work_periodic_rfsh(struct work_struct *work)
+{
+ struct pac1934_chip_info *info = TO_PAC1934_CHIP_INFO((struct delayed_work *)work);
+ struct device *dev = &info->client->dev;
+
+ dev_dbg(dev, "%s - Periodic refresh\n", __func__);
+
+ /* do a REFRESH, then read */
+ pac1934_reg_snapshot(info, true, PAC1934_REFRESH_REG_ADDR,
+ PAC1934_MIN_UPDATE_WAIT_TIME_US);
+
+ schedule_delayed_work(&info->work_chip_rfsh,
+ msecs_to_jiffies(PAC1934_MAX_RFSH_LIMIT_MS));
+}
+
+static int pac1934_read_revision(struct pac1934_chip_info *info, u8 *buf)
+{
+ int ret;
+ struct i2c_client *client = info->client;
+
+ ret = i2c_smbus_read_i2c_block_data(client, PAC1934_PID_REG_ADDR,
+ PAC1934_ID_REG_LEN,
+ buf);
+ if (ret < 0) {
+ dev_err(&client->dev, "cannot read revision\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int pac1934_chip_identify(struct pac1934_chip_info *info)
+{
+ u8 rev_info[PAC1934_ID_REG_LEN];
+ struct device *dev = &info->client->dev;
+ int ret = 0;
+
+ ret = pac1934_read_revision(info, (u8 *)rev_info);
+ if (ret)
+ return ret;
+
+ info->chip_variant = rev_info[PAC1934_PID_IDX];
+ info->chip_revision = rev_info[PAC1934_RID_IDX];
+
+ dev_dbg(dev, "Chip variant: 0x%02X\n", info->chip_variant);
+ dev_dbg(dev, "Chip revision: 0x%02X\n", info->chip_revision);
+
+ switch (info->chip_variant) {
+ case PAC1934_PID:
+ return PAC1934;
+ case PAC1933_PID:
+ return PAC1933;
+ case PAC1932_PID:
+ return PAC1932;
+ case PAC1931_PID:
+ return PAC1931;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * documentation related to the ACPI device definition
+ * https://ww1.microchip.com/downloads/aemDocuments/documents/OTH/ApplicationNotes/ApplicationNotes/PAC1934-Integration-Notes-for-Microsoft-Windows-10-and-Windows-11-Driver-Support-DS00002534.pdf
+ */
+static bool pac1934_acpi_parse_channel_config(struct i2c_client *client,
+ struct pac1934_chip_info *info)
+{
+ acpi_handle handle;
+ union acpi_object *rez;
+ struct device *dev = &client->dev;
+ unsigned short bi_dir_mask;
+ int idx, i;
+ guid_t guid;
+
+ handle = ACPI_HANDLE(dev);
+
+ guid_parse(PAC1934_DSM_UUID, &guid);
+
+ rez = acpi_evaluate_dsm(handle, &guid, 0, PAC1934_ACPI_GET_NAMES_AND_MOHMS_VALS, NULL);
+ if (!rez)
+ return false;
+
+ for (i = 0; i < rez->package.count; i += 2) {
+ idx = i / 2;
+ info->labels[idx] =
+ devm_kmemdup(dev, rez->package.elements[i].string.pointer,
+ (size_t)rez->package.elements[i].string.length + 1,
+ GFP_KERNEL);
+ info->labels[idx][rez->package.elements[i].string.length] = '\0';
+ info->shunts[idx] = rez->package.elements[i + 1].integer.value * 1000;
+ info->active_channels[idx] = (info->shunts[idx] != 0);
+ }
+
+ ACPI_FREE(rez);
+
+ rez = acpi_evaluate_dsm(handle, &guid, 1, PAC1934_ACPI_GET_UOHMS_VALS, NULL);
+ if (!rez) {
+ /*
+ * initializing with default values
+ * we assume all channels are unidirectional(the mask is zero)
+ * and assign the default sampling rate
+ */
+ info->sample_rate_value = PAC1934_DEFAULT_CHIP_SAMP_SPEED_HZ;
+ return true;
+ }
+
+ for (i = 0; i < rez->package.count; i++) {
+ idx = i;
+ info->shunts[idx] = rez->package.elements[i].integer.value;
+ info->active_channels[idx] = (info->shunts[idx] != 0);
+ }
+
+ ACPI_FREE(rez);
+
+ rez = acpi_evaluate_dsm(handle, &guid, 1, PAC1934_ACPI_GET_BIPOLAR_SETTINGS, NULL);
+ if (!rez)
+ return false;
+
+ bi_dir_mask = rez->package.elements[0].integer.value;
+ info->bi_dir[0] = ((bi_dir_mask & (1 << 3)) | (bi_dir_mask & (1 << 7))) != 0;
+ info->bi_dir[1] = ((bi_dir_mask & (1 << 2)) | (bi_dir_mask & (1 << 6))) != 0;
+ info->bi_dir[2] = ((bi_dir_mask & (1 << 1)) | (bi_dir_mask & (1 << 5))) != 0;
+ info->bi_dir[3] = ((bi_dir_mask & (1 << 0)) | (bi_dir_mask & (1 << 4))) != 0;
+
+ ACPI_FREE(rez);
+
+ rez = acpi_evaluate_dsm(handle, &guid, 1, PAC1934_ACPI_GET_SAMP, NULL);
+ if (!rez)
+ return false;
+
+ info->sample_rate_value = rez->package.elements[0].integer.value;
+
+ ACPI_FREE(rez);
+
+ return true;
+}
+
+static bool pac1934_of_parse_channel_config(struct i2c_client *client,
+ struct pac1934_chip_info *info)
+{
+ struct fwnode_handle *node, *fwnode;
+ struct device *dev = &client->dev;
+ unsigned int current_channel;
+ int idx, ret;
+
+ info->sample_rate_value = 1024;
+ current_channel = 1;
+
+ fwnode = dev_fwnode(dev);
+ fwnode_for_each_available_child_node(fwnode, node) {
+ ret = fwnode_property_read_u32(node, "reg", &idx);
+ if (ret) {
+ dev_err_probe(dev, ret,
+ "reading invalid channel index\n");
+ goto err_fwnode;
+ }
+ /* adjust idx to match channel index (1 to 4) from the datasheet */
+ idx--;
+
+ if (current_channel >= (info->phys_channels + 1) ||
+ idx >= info->phys_channels || idx < 0) {
+ dev_err_probe(dev, -EINVAL,
+ "%s: invalid channel_index %d value\n",
+ fwnode_get_name(node), idx);
+ goto err_fwnode;
+ }
+
+ /* enable channel */
+ info->active_channels[idx] = true;
+
+ ret = fwnode_property_read_u32(node, "shunt-resistor-micro-ohms",
+ &info->shunts[idx]);
+ if (ret) {
+ dev_err_probe(dev, ret,
+ "%s: invalid shunt-resistor value: %d\n",
+ fwnode_get_name(node), info->shunts[idx]);
+ goto err_fwnode;
+ }
+
+ if (fwnode_property_present(node, "label")) {
+ ret = fwnode_property_read_string(node, "label",
+ (const char **)&info->labels[idx]);
+ if (ret) {
+ dev_err_probe(dev, ret,
+ "%s: invalid rail-name value\n",
+ fwnode_get_name(node));
+ goto err_fwnode;
+ }
+ }
+
+ info->bi_dir[idx] = fwnode_property_read_bool(node, "bipolar");
+
+ current_channel++;
+ }
+
+ return true;
+
+err_fwnode:
+ fwnode_handle_put(node);
+
+ return false;
+}
+
+static void pac1934_cancel_delayed_work(void *dwork)
+{
+ cancel_delayed_work_sync(dwork);
+}
+
+static int pac1934_chip_configure(struct pac1934_chip_info *info)
+{
+ int cnt, ret;
+ struct i2c_client *client = info->client;
+ u8 regs[PAC1934_CTRL_STATUS_INFO_LEN], idx, ctrl_reg;
+ u32 wait_time;
+
+ info->chip_reg_data.num_enabled_channels = 0;
+ for (cnt = 0; cnt < info->phys_channels; cnt++) {
+ if (info->active_channels[cnt])
+ info->chip_reg_data.num_enabled_channels++;
+ }
+
+ /*
+ * read whatever information was gathered before the driver was loaded
+ * establish which channels are enabled/disabled and then establish the
+ * information retrieval mode (using SKIP or no).
+ * Read the chip ID values
+ */
+ ret = i2c_smbus_read_i2c_block_data(client, PAC1934_CTRL_STAT_REGS_ADDR,
+ ARRAY_SIZE(regs),
+ (u8 *)regs);
+ if (ret < 0) {
+ dev_err_probe(&client->dev, ret,
+ "%s - cannot read regs from 0x%02X\n",
+ __func__, PAC1934_CTRL_STAT_REGS_ADDR);
+ return ret;
+ }
+
+ /* write the CHANNEL_DIS and the NEG_PWR registers */
+ regs[PAC1934_CHANNEL_DIS_REG_OFF] =
+ FIELD_PREP(PAC1934_CHAN_DIS_CH1_OFF_MASK, info->active_channels[0] ? 0 : 1) |
+ FIELD_PREP(PAC1934_CHAN_DIS_CH2_OFF_MASK, info->active_channels[1] ? 0 : 1) |
+ FIELD_PREP(PAC1934_CHAN_DIS_CH3_OFF_MASK, info->active_channels[2] ? 0 : 1) |
+ FIELD_PREP(PAC1934_CHAN_DIS_CH4_OFF_MASK, info->active_channels[3] ? 0 : 1) |
+ FIELD_PREP(PAC1934_SMBUS_TIMEOUT_MASK, 0) |
+ FIELD_PREP(PAC1934_SMBUS_BYTECOUNT_MASK, 0) |
+ FIELD_PREP(PAC1934_SMBUS_NO_SKIP_MASK, 0);
+
+ regs[PAC1934_NEG_PWR_REG_OFF] =
+ FIELD_PREP(PAC1934_NEG_PWR_CH1_BIDI_MASK, info->bi_dir[0]) |
+ FIELD_PREP(PAC1934_NEG_PWR_CH2_BIDI_MASK, info->bi_dir[1]) |
+ FIELD_PREP(PAC1934_NEG_PWR_CH3_BIDI_MASK, info->bi_dir[2]) |
+ FIELD_PREP(PAC1934_NEG_PWR_CH4_BIDI_MASK, info->bi_dir[3]) |
+ FIELD_PREP(PAC1934_NEG_PWR_CH1_BIDV_MASK, info->bi_dir[0]) |
+ FIELD_PREP(PAC1934_NEG_PWR_CH2_BIDV_MASK, info->bi_dir[1]) |
+ FIELD_PREP(PAC1934_NEG_PWR_CH3_BIDV_MASK, info->bi_dir[2]) |
+ FIELD_PREP(PAC1934_NEG_PWR_CH4_BIDV_MASK, info->bi_dir[3]);
+
+ /* no SLOW triggered REFRESH, clear POR */
+ regs[PAC1934_SLOW_REG_OFF] = 0;
+
+ ret = i2c_smbus_write_block_data(client, PAC1934_CTRL_STAT_REGS_ADDR,
+ ARRAY_SIZE(regs), (u8 *)regs);
+ if (ret)
+ return ret;
+
+ /* Default sampling rate */
+ ctrl_reg = FIELD_PREP(PAC1934_CRTL_SAMPLE_RATE_MASK, PAC1934_SAMP_1024SPS);
+
+ ret = i2c_smbus_write_byte_data(client, PAC1934_CTRL_REG_ADDR, ctrl_reg);
+ if (ret)
+ return ret;
+
+ /*
+ * send a REFRESH to the chip, so the new settings take place
+ * as well as resetting the accumulators
+ */
+ ret = i2c_smbus_write_byte(client, PAC1934_REFRESH_REG_ADDR);
+ if (ret) {
+ dev_err(&client->dev,
+ "%s - cannot send 0x%02X\n",
+ __func__, PAC1934_REFRESH_REG_ADDR);
+ return ret;
+ }
+
+ /*
+ * get the current(in the chip) sampling speed and compute the
+ * required timeout based on its value
+ * the timeout is 1/sampling_speed
+ */
+ idx = regs[PAC1934_CTRL_ACT_REG_OFF] >> PAC1934_SAMPLE_RATE_SHIFT;
+ wait_time = (1024 / samp_rate_map_tbl[idx]) * 1000;
+
+ /*
+ * wait the maximum amount of time to be on the safe side
+ * the maximum wait time is for 8sps
+ */
+ usleep_range(wait_time, wait_time + 100);
+
+ INIT_DELAYED_WORK(&info->work_chip_rfsh, pac1934_work_periodic_rfsh);
+ /* Setup the latest moment for reading the regs before saturation */
+ schedule_delayed_work(&info->work_chip_rfsh,
+ msecs_to_jiffies(PAC1934_MAX_RFSH_LIMIT_MS));
+
+ return devm_add_action_or_reset(&client->dev, pac1934_cancel_delayed_work,
+ &info->work_chip_rfsh);
+}
+
+static int pac1934_prep_iio_channels(struct pac1934_chip_info *info, struct iio_dev *indio_dev)
+{
+ struct iio_chan_spec *ch_sp;
+ int channel_size, attribute_count, cnt;
+ void *dyn_ch_struct, *tmp_data;
+ struct device *dev = &info->client->dev;
+
+ /* find out dynamically how many IIO channels we need */
+ attribute_count = 0;
+ channel_size = 0;
+ for (cnt = 0; cnt < info->phys_channels; cnt++) {
+ if (!info->active_channels[cnt])
+ continue;
+
+ /* add the size of the properties of one chip physical channel */
+ channel_size += sizeof(pac1934_single_channel);
+ /* count how many enabled channels we have */
+ attribute_count += ARRAY_SIZE(pac1934_single_channel);
+ dev_dbg(dev, ":%s: Channel %d active\n", __func__, cnt + 1);
+ }
+
+ dyn_ch_struct = devm_kzalloc(dev, channel_size, GFP_KERNEL);
+ if (!dyn_ch_struct)
+ return -EINVAL;
+
+ tmp_data = dyn_ch_struct;
+
+ /* populate the dynamic channels and make all the adjustments */
+ for (cnt = 0; cnt < info->phys_channels; cnt++) {
+ if (!info->active_channels[cnt])
+ continue;
+
+ memcpy(tmp_data, pac1934_single_channel, sizeof(pac1934_single_channel));
+ ch_sp = (struct iio_chan_spec *)tmp_data;
+ ch_sp[PAC1934_CH_ENERGY].channel = cnt + 1;
+ ch_sp[PAC1934_CH_ENERGY].scan_index = cnt;
+ ch_sp[PAC1934_CH_ENERGY].address = cnt + PAC1934_VPOWER_ACC_1_ADDR;
+ ch_sp[PAC1934_CH_POWER].channel = cnt + 1;
+ ch_sp[PAC1934_CH_POWER].scan_index = cnt;
+ ch_sp[PAC1934_CH_POWER].address = cnt + PAC1934_VPOWER_1_ADDR;
+ ch_sp[PAC1934_CH_VOLTAGE].channel = cnt + 1;
+ ch_sp[PAC1934_CH_VOLTAGE].scan_index = cnt;
+ ch_sp[PAC1934_CH_VOLTAGE].address = cnt + PAC1934_VBUS_1_ADDR;
+ ch_sp[PAC1934_CH_CURRENT].channel = cnt + 1;
+ ch_sp[PAC1934_CH_CURRENT].scan_index = cnt;
+ ch_sp[PAC1934_CH_CURRENT].address = cnt + PAC1934_VSENSE_1_ADDR;
+
+ /*
+ * In order to be able to use labels for PAC1934_CH_VOLTAGE, and
+ * PAC1934_CH_VOLTAGE_AVERAGE,respectively PAC1934_CH_CURRENT
+ * and PAC1934_CH_CURRENT_AVERAGE we need to use different
+ * channel numbers. We will add +5 (+1 to maximum PAC channels).
+ */
+ ch_sp[PAC1934_CH_VOLTAGE_AVERAGE].channel = cnt + 5;
+ ch_sp[PAC1934_CH_VOLTAGE_AVERAGE].scan_index = cnt;
+ ch_sp[PAC1934_CH_VOLTAGE_AVERAGE].address = cnt + PAC1934_VBUS_AVG_1_ADDR;
+ ch_sp[PAC1934_CH_CURRENT_AVERAGE].channel = cnt + 5;
+ ch_sp[PAC1934_CH_CURRENT_AVERAGE].scan_index = cnt;
+ ch_sp[PAC1934_CH_CURRENT_AVERAGE].address = cnt + PAC1934_VSENSE_AVG_1_ADDR;
+
+ /*
+ * now modify the parameters in all channels if the
+ * whole chip rail(channel) is bi-directional
+ */
+ if (info->bi_dir[cnt]) {
+ ch_sp[PAC1934_CH_ENERGY].scan_type.sign = 's';
+ ch_sp[PAC1934_CH_ENERGY].scan_type.realbits = 47;
+ ch_sp[PAC1934_CH_POWER].scan_type.sign = 's';
+ ch_sp[PAC1934_CH_POWER].scan_type.realbits = 27;
+ ch_sp[PAC1934_CH_VOLTAGE].scan_type.sign = 's';
+ ch_sp[PAC1934_CH_VOLTAGE].scan_type.realbits = 15;
+ ch_sp[PAC1934_CH_CURRENT].scan_type.sign = 's';
+ ch_sp[PAC1934_CH_CURRENT].scan_type.realbits = 15;
+ ch_sp[PAC1934_CH_VOLTAGE_AVERAGE].scan_type.sign = 's';
+ ch_sp[PAC1934_CH_VOLTAGE_AVERAGE].scan_type.realbits = 15;
+ ch_sp[PAC1934_CH_CURRENT_AVERAGE].scan_type.sign = 's';
+ ch_sp[PAC1934_CH_CURRENT_AVERAGE].scan_type.realbits = 15;
+ }
+ tmp_data += sizeof(pac1934_single_channel);
+ }
+
+ /*
+ * send the updated dynamic channel structure information towards IIO
+ * prepare the required field for IIO class registration
+ */
+ indio_dev->num_channels = attribute_count;
+ indio_dev->channels = (const struct iio_chan_spec *)dyn_ch_struct;
+
+ return 0;
+}
+
+static IIO_DEVICE_ATTR(in_shunt_resistor1, 0644,
+ pac1934_shunt_value_show, pac1934_shunt_value_store, 0);
+static IIO_DEVICE_ATTR(in_shunt_resistor2, 0644,
+ pac1934_shunt_value_show, pac1934_shunt_value_store, 1);
+static IIO_DEVICE_ATTR(in_shunt_resistor3, 0644,
+ pac1934_shunt_value_show, pac1934_shunt_value_store, 2);
+static IIO_DEVICE_ATTR(in_shunt_resistor4, 0644,
+ pac1934_shunt_value_show, pac1934_shunt_value_store, 3);
+
+static int pac1934_prep_custom_attributes(struct pac1934_chip_info *info,
+ struct iio_dev *indio_dev)
+{
+ int i, active_channels_count = 0;
+ struct attribute **pac1934_custom_attr;
+ struct attribute_group *pac1934_group;
+ struct device *dev = &info->client->dev;
+
+ for (i = 0 ; i < info->phys_channels; i++)
+ if (info->active_channels[i])
+ active_channels_count++;
+
+ pac1934_group = devm_kzalloc(dev, sizeof(*pac1934_group), GFP_KERNEL);
+ if (!pac1934_group)
+ return -ENOMEM;
+
+ pac1934_custom_attr = devm_kzalloc(dev,
+ (PAC1934_CUSTOM_ATTR_FOR_CHANNEL *
+ active_channels_count)
+ * sizeof(*pac1934_group) + 1,
+ GFP_KERNEL);
+ if (!pac1934_custom_attr)
+ return -ENOMEM;
+
+ i = 0;
+ if (info->active_channels[0])
+ pac1934_custom_attr[i++] = PAC1934_DEV_ATTR(in_shunt_resistor1);
+
+ if (info->active_channels[1])
+ pac1934_custom_attr[i++] = PAC1934_DEV_ATTR(in_shunt_resistor2);
+
+ if (info->active_channels[2])
+ pac1934_custom_attr[i++] = PAC1934_DEV_ATTR(in_shunt_resistor3);
+
+ if (info->active_channels[3])
+ pac1934_custom_attr[i] = PAC1934_DEV_ATTR(in_shunt_resistor4);
+
+ pac1934_group->attrs = pac1934_custom_attr;
+ info->iio_info.attrs = pac1934_group;
+
+ return 0;
+}
+
+static void pac1934_mutex_destroy(void *data)
+{
+ struct mutex *lock = data;
+
+ mutex_destroy(lock);
+}
+
+static const struct iio_info pac1934_info = {
+ .read_raw = pac1934_read_raw,
+ .write_raw = pac1934_write_raw,
+ .read_avail = pac1934_read_avail,
+ .read_label = pac1934_read_label,
+};
+
+static int pac1934_probe(struct i2c_client *client)
+{
+ struct pac1934_chip_info *info;
+ const struct pac1934_features *chip;
+ struct iio_dev *indio_dev;
+ int cnt, ret;
+ bool match = false;
+ struct device *dev = &client->dev;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*info));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ info = iio_priv(indio_dev);
+
+ info->client = client;
+
+ /* always start with energy accumulation enabled */
+ for (cnt = 0; cnt < PAC1934_MAX_NUM_CHANNELS; cnt++)
+ info->enable_energy[cnt] = true;
+
+ ret = pac1934_chip_identify(info);
+ if (ret < 0) {
+ /*
+ * If failed to identify the hardware based on internal
+ * registers, try using fallback compatible in device tree
+ * to deal with some newer part number.
+ */
+ chip = i2c_get_match_data(client);
+ if (!chip)
+ return -EINVAL;
+
+ info->phys_channels = chip->phys_channels;
+ indio_dev->name = chip->name;
+ } else {
+ info->phys_channels = pac1934_chip_config[ret].phys_channels;
+ indio_dev->name = pac1934_chip_config[ret].name;
+ }
+
+ if (acpi_match_device(dev->driver->acpi_match_table, dev))
+ match = pac1934_acpi_parse_channel_config(client, info);
+ else
+ /*
+ * This makes it possible to use also ACPI PRP0001 for
+ * registering the device using device tree properties.
+ */
+ match = pac1934_of_parse_channel_config(client, info);
+
+ if (!match)
+ return dev_err_probe(dev, -EINVAL,
+ "parameter parsing returned an error\n");
+
+ mutex_init(&info->lock);
+ ret = devm_add_action_or_reset(dev, pac1934_mutex_destroy,
+ &info->lock);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * do now any chip specific initialization (e.g. read/write
+ * some registers), enable/disable certain channels, change the sampling
+ * rate to the requested value
+ */
+ ret = pac1934_chip_configure(info);
+ if (ret < 0)
+ return ret;
+
+ /* prepare the channel information */
+ ret = pac1934_prep_iio_channels(info, indio_dev);
+ if (ret < 0)
+ return ret;
+
+ info->iio_info = pac1934_info;
+ indio_dev->info = &info->iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = pac1934_prep_custom_attributes(info, indio_dev);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Can't configure custom attributes for PAC1934 device\n");
+
+ /*
+ * read whatever has been accumulated in the chip so far
+ * and reset the accumulators
+ */
+ ret = pac1934_reg_snapshot(info, true, PAC1934_REFRESH_REG_ADDR,
+ PAC1934_MIN_UPDATE_WAIT_TIME_US);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Can't register IIO device\n");
+
+ return 0;
+}
+
+static const struct i2c_device_id pac1934_id[] = {
+ { .name = "pac1931", .driver_data = (kernel_ulong_t)&pac1934_chip_config[PAC1931] },
+ { .name = "pac1932", .driver_data = (kernel_ulong_t)&pac1934_chip_config[PAC1932] },
+ { .name = "pac1933", .driver_data = (kernel_ulong_t)&pac1934_chip_config[PAC1933] },
+ { .name = "pac1934", .driver_data = (kernel_ulong_t)&pac1934_chip_config[PAC1934] },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, pac1934_id);
+
+static const struct of_device_id pac1934_of_match[] = {
+ {
+ .compatible = "microchip,pac1931",
+ .data = &pac1934_chip_config[PAC1931]
+ },
+ {
+ .compatible = "microchip,pac1932",
+ .data = &pac1934_chip_config[PAC1932]
+ },
+ {
+ .compatible = "microchip,pac1933",
+ .data = &pac1934_chip_config[PAC1933]
+ },
+ {
+ .compatible = "microchip,pac1934",
+ .data = &pac1934_chip_config[PAC1934]
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, pac1934_of_match);
+
+/*
+ * using MCHP1930 to be compatible with BIOS ACPI. See example:
+ * https://ww1.microchip.com/downloads/aemDocuments/documents/OTH/ApplicationNotes/ApplicationNotes/PAC1934-Integration-Notes-for-Microsoft-Windows-10-and-Windows-11-Driver-Support-DS00002534.pdf
+ */
+static const struct acpi_device_id pac1934_acpi_match[] = {
+ { "MCHP1930", .driver_data = (kernel_ulong_t)&pac1934_chip_config[PAC1934] },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, pac1934_acpi_match);
+
+static struct i2c_driver pac1934_driver = {
+ .driver = {
+ .name = "pac1934",
+ .of_match_table = pac1934_of_match,
+ .acpi_match_table = pac1934_acpi_match
+ },
+ .probe = pac1934_probe,
+ .id_table = pac1934_id,
+};
+
+module_i2c_driver(pac1934_driver);
+
+MODULE_AUTHOR("Bogdan Bolocan <bogdan.bolocan@microchip.com>");
+MODULE_AUTHOR("Victor Tudose");
+MODULE_AUTHOR("Marius Cristea <marius.cristea@microchip.com>");
+MODULE_DESCRIPTION("IIO driver for PAC1934 Multi-Channel DC Power/Energy Monitor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/qcom-pm8xxx-xoadc.c b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
index 01c5586df56dc..c9d2c66434e4f 100644
--- a/drivers/iio/adc/qcom-pm8xxx-xoadc.c
+++ b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
@@ -372,7 +372,6 @@ static const struct xoadc_channel pm8921_xoadc_channels[] = {
* @name: name of this channel
* @hwchan: pointer to hardware channel information (muxing & scaling settings)
* @calibration: whether to use absolute or ratiometric calibration
- * @scale_fn_type: scaling function type
* @decimation: 0,1,2,3
* @amux_ip_rsv: ratiometric scale value if using ratiometric
* calibration: 0, 1, 2, 4, 5.
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index dd94667a623bd..bbe954a738c7d 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -52,7 +52,7 @@
#define SARADC2_START BIT(4)
#define SARADC2_SINGLE_MODE BIT(5)
-#define SARADC2_CONV_CHANNELS GENMASK(15, 0)
+#define SARADC2_CONV_CHANNELS GENMASK(3, 0)
struct rockchip_saradc;
@@ -102,12 +102,12 @@ static void rockchip_saradc_start_v2(struct rockchip_saradc *info, int chn)
writel_relaxed(0xc, info->regs + SARADC_T_DAS_SOC);
writel_relaxed(0x20, info->regs + SARADC_T_PD_SOC);
val = FIELD_PREP(SARADC2_EN_END_INT, 1);
- val |= val << 16;
+ val |= SARADC2_EN_END_INT << 16;
writel_relaxed(val, info->regs + SARADC2_END_INT_EN);
val = FIELD_PREP(SARADC2_START, 1) |
FIELD_PREP(SARADC2_SINGLE_MODE, 1) |
FIELD_PREP(SARADC2_CONV_CHANNELS, chn);
- val |= val << 16;
+ val |= (SARADC2_START | SARADC2_SINGLE_MODE | SARADC2_CONV_CHANNELS) << 16;
writel(val, info->regs + SARADC2_CONV_CON);
}
@@ -450,16 +450,11 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
* The reset should be an optional property, as it should work
* with old devicetrees as well
*/
- info->reset = devm_reset_control_get_exclusive(&pdev->dev,
- "saradc-apb");
+ info->reset = devm_reset_control_get_optional_exclusive(&pdev->dev,
+ "saradc-apb");
if (IS_ERR(info->reset)) {
ret = PTR_ERR(info->reset);
- if (ret != -ENOENT)
- return dev_err_probe(&pdev->dev, ret,
- "failed to get saradc-apb\n");
-
- dev_dbg(&pdev->dev, "no reset control found\n");
- info->reset = NULL;
+ return dev_err_probe(&pdev->dev, ret, "failed to get saradc-apb\n");
}
init_completion(&info->completion);
diff --git a/drivers/iio/adc/rtq6056.c b/drivers/iio/adc/rtq6056.c
index ad4cea6839b27..a5464737e527c 100644
--- a/drivers/iio/adc/rtq6056.c
+++ b/drivers/iio/adc/rtq6056.c
@@ -39,6 +39,10 @@
#define RTQ6056_DEFAULT_CONFIG 0x4127
#define RTQ6056_CONT_ALLON 7
+#define RTQ6059_DEFAULT_CONFIG 0x3C47
+#define RTQ6059_VBUS_LSB_OFFSET 3
+#define RTQ6059_AVG_BASE 8
+
enum {
RTQ6056_CH_VSHUNT = 0,
RTQ6056_CH_VBUS,
@@ -47,19 +51,46 @@ enum {
RTQ6056_MAX_CHANNEL
};
+/*
+ * The enum is to present the 0x00 CONFIG RG bitfield for the 16bit RG value
+ * field value order from LSB to MSB
+ * RTQ6053/6 is OPMODE->VSHUNTCT->VBUSCT->AVG->RESET
+ * RTQ6059 is OPMODE->SADC->BADC->PGA->RESET
+ */
enum {
F_OPMODE = 0,
F_VSHUNTCT,
+ F_RTQ6059_SADC = F_VSHUNTCT,
F_VBUSCT,
+ F_RTQ6059_BADC = F_VBUSCT,
F_AVG,
+ F_RTQ6059_PGA = F_AVG,
F_RESET,
F_MAX_FIELDS
};
+struct rtq6056_priv;
+
+struct richtek_dev_data {
+ bool fixed_samp_freq;
+ u8 vbus_offset;
+ int default_conv_time_us;
+ unsigned int default_config;
+ unsigned int calib_coefficient;
+ const int *avg_sample_list;
+ int avg_sample_list_length;
+ const struct reg_field *reg_fields;
+ const struct iio_chan_spec *channels;
+ int num_channels;
+ int (*read_scale)(struct iio_chan_spec const *ch, int *val, int *val2);
+ int (*set_average)(struct rtq6056_priv *priv, int val);
+};
+
struct rtq6056_priv {
struct device *dev;
struct regmap *regmap;
struct regmap_field *rm_fields[F_MAX_FIELDS];
+ const struct richtek_dev_data *devdata;
u32 shunt_resistor_uohm;
int vshuntct_us;
int vbusct_us;
@@ -74,6 +105,14 @@ static const struct reg_field rtq6056_reg_fields[F_MAX_FIELDS] = {
[F_RESET] = REG_FIELD(RTQ6056_REG_CONFIG, 15, 15),
};
+static const struct reg_field rtq6059_reg_fields[F_MAX_FIELDS] = {
+ [F_OPMODE] = REG_FIELD(RTQ6056_REG_CONFIG, 0, 2),
+ [F_RTQ6059_SADC] = REG_FIELD(RTQ6056_REG_CONFIG, 3, 6),
+ [F_RTQ6059_BADC] = REG_FIELD(RTQ6056_REG_CONFIG, 7, 10),
+ [F_RTQ6059_PGA] = REG_FIELD(RTQ6056_REG_CONFIG, 11, 12),
+ [F_RESET] = REG_FIELD(RTQ6056_REG_CONFIG, 15, 15),
+};
+
static const struct iio_chan_spec rtq6056_channels[RTQ6056_MAX_CHANNEL + 1] = {
{
.type = IIO_VOLTAGE,
@@ -151,10 +190,93 @@ static const struct iio_chan_spec rtq6056_channels[RTQ6056_MAX_CHANNEL + 1] = {
IIO_CHAN_SOFT_TIMESTAMP(RTQ6056_MAX_CHANNEL),
};
+/*
+ * Difference between RTQ6056 and RTQ6059
+ * - Fixed sampling conversion time
+ * - Average sample numbers
+ * - Channel scale
+ * - calibration coefficient
+ */
+static const struct iio_chan_spec rtq6059_channels[RTQ6056_MAX_CHANNEL + 1] = {
+ {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .address = RTQ6056_REG_SHUNTVOLT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 1,
+ .address = RTQ6056_REG_BUSVOLT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ {
+ .type = IIO_POWER,
+ .indexed = 1,
+ .channel = 2,
+ .address = RTQ6056_REG_POWER,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .scan_index = 2,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ {
+ .type = IIO_CURRENT,
+ .indexed = 1,
+ .channel = 3,
+ .address = RTQ6056_REG_CURRENT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .scan_index = 3,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(RTQ6056_MAX_CHANNEL),
+};
+
static int rtq6056_adc_read_channel(struct rtq6056_priv *priv,
struct iio_chan_spec const *ch,
int *val)
{
+ const struct richtek_dev_data *devdata = priv->devdata;
struct device *dev = priv->dev;
unsigned int addr = ch->address;
unsigned int regval;
@@ -168,12 +290,21 @@ static int rtq6056_adc_read_channel(struct rtq6056_priv *priv,
return ret;
/* Power and VBUS is unsigned 16-bit, others are signed 16-bit */
- if (addr == RTQ6056_REG_BUSVOLT || addr == RTQ6056_REG_POWER)
+ switch (addr) {
+ case RTQ6056_REG_BUSVOLT:
+ regval >>= devdata->vbus_offset;
*val = regval;
- else
+ return IIO_VAL_INT;
+ case RTQ6056_REG_POWER:
+ *val = regval;
+ return IIO_VAL_INT;
+ case RTQ6056_REG_SHUNTVOLT:
+ case RTQ6056_REG_CURRENT:
*val = sign_extend32(regval, 16);
-
- return IIO_VAL_INT;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
}
static int rtq6056_adc_read_scale(struct iio_chan_spec const *ch, int *val,
@@ -199,6 +330,28 @@ static int rtq6056_adc_read_scale(struct iio_chan_spec const *ch, int *val,
}
}
+static int rtq6059_adc_read_scale(struct iio_chan_spec const *ch, int *val,
+ int *val2)
+{
+ switch (ch->address) {
+ case RTQ6056_REG_SHUNTVOLT:
+ /* VSHUNT lsb 10uV */
+ *val = 10000;
+ *val2 = 1000000;
+ return IIO_VAL_FRACTIONAL;
+ case RTQ6056_REG_BUSVOLT:
+ /* VBUS lsb 4mV */
+ *val = 4;
+ return IIO_VAL_INT;
+ case RTQ6056_REG_POWER:
+ /* Power lsb 20mW */
+ *val = 20;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
/*
* Sample frequency for channel VSHUNT and VBUS. The indices correspond
* with the bit value expected by the chip. And it can be found at
@@ -248,6 +401,10 @@ static const int rtq6056_avg_sample_list[] = {
1, 4, 16, 64, 128, 256, 512, 1024,
};
+static const int rtq6059_avg_sample_list[] = {
+ 1, 2, 4, 8, 16, 32, 64, 128,
+};
+
static int rtq6056_adc_set_average(struct rtq6056_priv *priv, int val)
{
unsigned int selector;
@@ -268,6 +425,30 @@ static int rtq6056_adc_set_average(struct rtq6056_priv *priv, int val)
return 0;
}
+static int rtq6059_adc_set_average(struct rtq6056_priv *priv, int val)
+{
+ unsigned int selector;
+ int ret;
+
+ if (val > 128 || val < 1)
+ return -EINVAL;
+
+ /* The supported average sample is 2^x (x from 0 to 7) */
+ selector = fls(val) - 1;
+
+ ret = regmap_field_write(priv->rm_fields[F_RTQ6059_BADC],
+ RTQ6059_AVG_BASE + selector);
+ if (ret)
+ return ret;
+
+ ret = regmap_field_write(priv->rm_fields[F_RTQ6059_SADC],
+ RTQ6059_AVG_BASE + selector);
+
+ priv->avg_sample = BIT(selector);
+
+ return 0;
+}
+
static int rtq6056_adc_get_sample_freq(struct rtq6056_priv *priv,
struct iio_chan_spec const *ch, int *val)
{
@@ -292,12 +473,13 @@ static int rtq6056_adc_read_raw(struct iio_dev *indio_dev,
int *val2, long mask)
{
struct rtq6056_priv *priv = iio_priv(indio_dev);
+ const struct richtek_dev_data *devdata = priv->devdata;
switch (mask) {
case IIO_CHAN_INFO_RAW:
return rtq6056_adc_read_channel(priv, chan, val);
case IIO_CHAN_INFO_SCALE:
- return rtq6056_adc_read_scale(chan, val, val2);
+ return devdata->read_scale(chan, val, val2);
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
*val = priv->avg_sample;
return IIO_VAL_INT;
@@ -313,6 +495,9 @@ static int rtq6056_adc_read_avail(struct iio_dev *indio_dev,
const int **vals, int *type, int *length,
long mask)
{
+ struct rtq6056_priv *priv = iio_priv(indio_dev);
+ const struct richtek_dev_data *devdata = priv->devdata;
+
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
*vals = rtq6056_samp_freq_list;
@@ -320,9 +505,9 @@ static int rtq6056_adc_read_avail(struct iio_dev *indio_dev,
*length = ARRAY_SIZE(rtq6056_samp_freq_list);
return IIO_AVAIL_LIST;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
- *vals = rtq6056_avg_sample_list;
+ *vals = devdata->avg_sample_list;
+ *length = devdata->avg_sample_list_length;
*type = IIO_VAL_INT;
- *length = ARRAY_SIZE(rtq6056_avg_sample_list);
return IIO_AVAIL_LIST;
default:
return -EINVAL;
@@ -334,6 +519,7 @@ static int rtq6056_adc_write_raw(struct iio_dev *indio_dev,
int val2, long mask)
{
struct rtq6056_priv *priv = iio_priv(indio_dev);
+ const struct richtek_dev_data *devdata = priv->devdata;
int ret;
ret = iio_device_claim_direct_mode(indio_dev);
@@ -342,10 +528,15 @@ static int rtq6056_adc_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
+ if (devdata->fixed_samp_freq) {
+ ret = -EINVAL;
+ break;
+ }
+
ret = rtq6056_adc_set_samp_freq(priv, chan, val);
break;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
- ret = rtq6056_adc_set_average(priv, val);
+ ret = devdata->set_average(priv, val);
break;
default:
ret = -EINVAL;
@@ -374,6 +565,7 @@ static int rtq6056_adc_read_label(struct iio_dev *indio_dev,
static int rtq6056_set_shunt_resistor(struct rtq6056_priv *priv,
int resistor_uohm)
{
+ const struct richtek_dev_data *devdata = priv->devdata;
unsigned int calib_val;
int ret;
@@ -382,8 +574,8 @@ static int rtq6056_set_shunt_resistor(struct rtq6056_priv *priv,
return -EINVAL;
}
- /* calibration = 5120000 / (Rshunt (uOhm) * current lsb (1mA)) */
- calib_val = 5120000 / resistor_uohm;
+ /* calibration = coefficient / (Rshunt (uOhm) * current lsb (1mA)) */
+ calib_val = devdata->calib_coefficient / resistor_uohm;
ret = regmap_write(priv->regmap, RTQ6056_REG_CALIBRATION, calib_val);
if (ret)
return ret;
@@ -450,6 +642,7 @@ static irqreturn_t rtq6056_buffer_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct rtq6056_priv *priv = iio_priv(indio_dev);
+ const struct richtek_dev_data *devdata = priv->devdata;
struct device *dev = priv->dev;
struct {
u16 vals[RTQ6056_MAX_CHANNEL];
@@ -469,6 +662,9 @@ static irqreturn_t rtq6056_buffer_trigger_handler(int irq, void *p)
if (ret)
goto out;
+ if (addr == RTQ6056_REG_BUSVOLT)
+ raw >>= devdata->vbus_offset;
+
data.vals[i++] = raw;
}
@@ -528,20 +724,26 @@ static int rtq6056_probe(struct i2c_client *i2c)
struct rtq6056_priv *priv;
struct device *dev = &i2c->dev;
struct regmap *regmap;
+ const struct richtek_dev_data *devdata;
unsigned int vendor_id, shunt_resistor_uohm;
int ret;
if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_WORD_DATA))
return -EOPNOTSUPP;
+ devdata = device_get_match_data(dev);
+ if (!devdata)
+ return dev_err_probe(dev, -EINVAL, "Invalid dev data\n");
+
indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
if (!indio_dev)
return -ENOMEM;
priv = iio_priv(indio_dev);
priv->dev = dev;
- priv->vshuntct_us = priv->vbusct_us = 1037;
+ priv->vshuntct_us = priv->vbusct_us = devdata->default_conv_time_us;
priv->avg_sample = 1;
+ priv->devdata = devdata;
i2c_set_clientdata(i2c, priv);
regmap = devm_regmap_init_i2c(i2c, &rtq6056_regmap_config);
@@ -561,15 +763,11 @@ static int rtq6056_probe(struct i2c_client *i2c)
"Invalid vendor id 0x%04x\n", vendor_id);
ret = devm_regmap_field_bulk_alloc(dev, regmap, priv->rm_fields,
- rtq6056_reg_fields, F_MAX_FIELDS);
+ devdata->reg_fields, F_MAX_FIELDS);
if (ret)
return dev_err_probe(dev, ret, "Failed to init regmap field\n");
- /*
- * By default, configure average sample as 1, bus and shunt conversion
- * time as 1037 microsecond, and operating mode to all on.
- */
- ret = regmap_write(regmap, RTQ6056_REG_CONFIG, RTQ6056_DEFAULT_CONFIG);
+ ret = regmap_write(regmap, RTQ6056_REG_CONFIG, devdata->default_config);
if (ret)
return dev_err_probe(dev, ret,
"Failed to enable continuous sensing\n");
@@ -598,8 +796,8 @@ static int rtq6056_probe(struct i2c_client *i2c)
indio_dev->name = "rtq6056";
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = rtq6056_channels;
- indio_dev->num_channels = ARRAY_SIZE(rtq6056_channels);
+ indio_dev->channels = devdata->channels;
+ indio_dev->num_channels = devdata->num_channels;
indio_dev->info = &rtq6056_info;
ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
@@ -640,8 +838,45 @@ static int rtq6056_runtime_resume(struct device *dev)
static DEFINE_RUNTIME_DEV_PM_OPS(rtq6056_pm_ops, rtq6056_runtime_suspend,
rtq6056_runtime_resume, NULL);
+static const struct richtek_dev_data rtq6056_devdata = {
+ .default_conv_time_us = 1037,
+ .calib_coefficient = 5120000,
+ /*
+ * By default, configure average sample as 1, bus and shunt conversion
+ * time as 1037 microsecond, and operating mode to all on.
+ */
+ .default_config = RTQ6056_DEFAULT_CONFIG,
+ .avg_sample_list = rtq6056_avg_sample_list,
+ .avg_sample_list_length = ARRAY_SIZE(rtq6056_avg_sample_list),
+ .reg_fields = rtq6056_reg_fields,
+ .channels = rtq6056_channels,
+ .num_channels = ARRAY_SIZE(rtq6056_channels),
+ .read_scale = rtq6056_adc_read_scale,
+ .set_average = rtq6056_adc_set_average,
+};
+
+static const struct richtek_dev_data rtq6059_devdata = {
+ .fixed_samp_freq = true,
+ .vbus_offset = RTQ6059_VBUS_LSB_OFFSET,
+ .default_conv_time_us = 532,
+ .calib_coefficient = 40960000,
+ /*
+ * By default, configure average sample as 1, bus and shunt conversion
+ * time as 532 microsecond, and operating mode to all on.
+ */
+ .default_config = RTQ6059_DEFAULT_CONFIG,
+ .avg_sample_list = rtq6059_avg_sample_list,
+ .avg_sample_list_length = ARRAY_SIZE(rtq6059_avg_sample_list),
+ .reg_fields = rtq6059_reg_fields,
+ .channels = rtq6059_channels,
+ .num_channels = ARRAY_SIZE(rtq6059_channels),
+ .read_scale = rtq6059_adc_read_scale,
+ .set_average = rtq6059_adc_set_average,
+};
+
static const struct of_device_id rtq6056_device_match[] = {
- { .compatible = "richtek,rtq6056" },
+ { .compatible = "richtek,rtq6056", .data = &rtq6056_devdata },
+ { .compatible = "richtek,rtq6059", .data = &rtq6059_devdata },
{}
};
MODULE_DEVICE_TABLE(of, rtq6056_device_match);
diff --git a/drivers/iio/adc/ti-adc108s102.c b/drivers/iio/adc/ti-adc108s102.c
index c82a161630e1d..69fcbbc7e418a 100644
--- a/drivers/iio/adc/ti-adc108s102.c
+++ b/drivers/iio/adc/ti-adc108s102.c
@@ -293,13 +293,11 @@ static const struct of_device_id adc108s102_of_match[] = {
};
MODULE_DEVICE_TABLE(of, adc108s102_of_match);
-#ifdef CONFIG_ACPI
static const struct acpi_device_id adc108s102_acpi_ids[] = {
{ "INT3495", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, adc108s102_acpi_ids);
-#endif
static const struct spi_device_id adc108s102_id[] = {
{ "adc108s102", 0 },
@@ -311,7 +309,7 @@ static struct spi_driver adc108s102_driver = {
.driver = {
.name = "adc108s102",
.of_match_table = adc108s102_of_match,
- .acpi_match_table = ACPI_PTR(adc108s102_acpi_ids),
+ .acpi_match_table = adc108s102_acpi_ids,
},
.probe = adc108s102_probe,
.id_table = adc108s102_id,
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index 6799ea49dbc73..6ae967e4d8fa7 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -925,7 +925,7 @@ static int ads1015_client_get_channels_config(struct i2c_client *client)
if (!fwnode_property_read_u32(node, "ti,gain", &pval)) {
pga = pval;
- if (pga > 6) {
+ if (pga > 5) {
dev_err(dev, "invalid gain on %pfw\n", node);
fwnode_handle_put(node);
return -EINVAL;
diff --git a/drivers/iio/adc/ti-ads1298.c b/drivers/iio/adc/ti-ads1298.c
new file mode 100644
index 0000000000000..1d1eaba3d6d12
--- /dev/null
+++ b/drivers/iio/adc/ti-ads1298.c
@@ -0,0 +1,771 @@
+// SPDX-License-Identifier: GPL-2.0
+/* TI ADS1298 chip family driver
+ * Copyright (C) 2023 - 2024 Topic Embedded Products
+ */
+
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/log2.h>
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/units.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/kfifo_buf.h>
+
+#include <asm/unaligned.h>
+
+/* Commands */
+#define ADS1298_CMD_WAKEUP 0x02
+#define ADS1298_CMD_STANDBY 0x04
+#define ADS1298_CMD_RESET 0x06
+#define ADS1298_CMD_START 0x08
+#define ADS1298_CMD_STOP 0x0a
+#define ADS1298_CMD_RDATAC 0x10
+#define ADS1298_CMD_SDATAC 0x11
+#define ADS1298_CMD_RDATA 0x12
+#define ADS1298_CMD_RREG 0x20
+#define ADS1298_CMD_WREG 0x40
+
+/* Registers */
+#define ADS1298_REG_ID 0x00
+#define ADS1298_MASK_ID_FAMILY GENMASK(7, 3)
+#define ADS1298_MASK_ID_CHANNELS GENMASK(2, 0)
+#define ADS1298_ID_FAMILY_ADS129X 0x90
+#define ADS1298_ID_FAMILY_ADS129XR 0xd0
+
+#define ADS1298_REG_CONFIG1 0x01
+#define ADS1298_MASK_CONFIG1_HR BIT(7)
+#define ADS1298_MASK_CONFIG1_DR GENMASK(2, 0)
+#define ADS1298_SHIFT_DR_HR 6
+#define ADS1298_SHIFT_DR_LP 7
+#define ADS1298_LOWEST_DR 0x06
+
+#define ADS1298_REG_CONFIG2 0x02
+#define ADS1298_MASK_CONFIG2_RESERVED BIT(6)
+#define ADS1298_MASK_CONFIG2_WCT_CHOP BIT(5)
+#define ADS1298_MASK_CONFIG2_INT_TEST BIT(4)
+#define ADS1298_MASK_CONFIG2_TEST_AMP BIT(2)
+#define ADS1298_MASK_CONFIG2_TEST_FREQ_DC GENMASK(1, 0)
+#define ADS1298_MASK_CONFIG2_TEST_FREQ_SLOW 0
+#define ADS1298_MASK_CONFIG2_TEST_FREQ_FAST BIT(0)
+
+#define ADS1298_REG_CONFIG3 0x03
+#define ADS1298_MASK_CONFIG3_PWR_REFBUF BIT(7)
+#define ADS1298_MASK_CONFIG3_RESERVED BIT(6)
+#define ADS1298_MASK_CONFIG3_VREF_4V BIT(5)
+
+#define ADS1298_REG_LOFF 0x04
+#define ADS1298_REG_CHnSET(n) (0x05 + n)
+#define ADS1298_MASK_CH_PD BIT(7)
+#define ADS1298_MASK_CH_PGA GENMASK(6, 4)
+#define ADS1298_MASK_CH_MUX GENMASK(2, 0)
+
+#define ADS1298_REG_LOFF_STATP 0x12
+#define ADS1298_REG_LOFF_STATN 0x13
+#define ADS1298_REG_CONFIG4 0x17
+#define ADS1298_MASK_CONFIG4_SINGLE_SHOT BIT(3)
+
+#define ADS1298_REG_WCT1 0x18
+#define ADS1298_REG_WCT2 0x19
+
+#define ADS1298_MAX_CHANNELS 8
+#define ADS1298_BITS_PER_SAMPLE 24
+#define ADS1298_CLK_RATE_HZ 2048000
+#define ADS1298_CLOCKS_TO_USECS(x) \
+ (DIV_ROUND_UP((x) * MICROHZ_PER_HZ, ADS1298_CLK_RATE_HZ))
+/*
+ * Read/write register commands require 4 clocks to decode, for speeds above
+ * 2x the clock rate, this would require extra time between the command byte and
+ * the data. Much simpler is to just limit the SPI transfer speed while doing
+ * register access.
+ */
+#define ADS1298_SPI_BUS_SPEED_SLOW ADS1298_CLK_RATE_HZ
+/* For reading and writing registers, we need a 3-byte buffer */
+#define ADS1298_SPI_CMD_BUFFER_SIZE 3
+/* Outputs status word and 'n' 24-bit samples, plus the command byte */
+#define ADS1298_SPI_RDATA_BUFFER_SIZE(n) (((n) + 1) * 3 + 1)
+#define ADS1298_SPI_RDATA_BUFFER_SIZE_MAX \
+ ADS1298_SPI_RDATA_BUFFER_SIZE(ADS1298_MAX_CHANNELS)
+
+struct ads1298_private {
+ const struct ads1298_chip_info *chip_info;
+ struct spi_device *spi;
+ struct regulator *reg_avdd;
+ struct regulator *reg_vref;
+ struct clk *clk;
+ struct regmap *regmap;
+ struct completion completion;
+ struct iio_trigger *trig;
+ struct spi_transfer rdata_xfer;
+ struct spi_message rdata_msg;
+ spinlock_t irq_busy_lock; /* Handshake between SPI and DRDY irqs */
+ /*
+ * rdata_xfer_busy increments when a DRDY occurs and decrements when SPI
+ * completion is reported. Hence its meaning is:
+ * 0 = Waiting for DRDY interrupt
+ * 1 = SPI transfer in progress
+ * 2 = DRDY during SPI transfer, start another transfer on completion
+ * >2 = Multiple DRDY during transfer, lost rdata_xfer_busy - 2 samples
+ */
+ unsigned int rdata_xfer_busy;
+
+ /* Temporary storage for demuxing data after SPI transfer */
+ u32 bounce_buffer[ADS1298_MAX_CHANNELS];
+
+ /* For synchronous SPI exchanges (read/write registers) */
+ u8 cmd_buffer[ADS1298_SPI_CMD_BUFFER_SIZE] __aligned(IIO_DMA_MINALIGN);
+
+ /* Buffer used for incoming SPI data */
+ u8 rx_buffer[ADS1298_SPI_RDATA_BUFFER_SIZE_MAX];
+ /* Contains the RDATA command and zeroes to clock out */
+ u8 tx_buffer[ADS1298_SPI_RDATA_BUFFER_SIZE_MAX];
+};
+
+/* Three bytes per sample in RX buffer, starting at offset 4 */
+#define ADS1298_OFFSET_IN_RX_BUFFER(index) (3 * (index) + 4)
+
+#define ADS1298_CHAN(index) \
+{ \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = index, \
+ .address = ADS1298_OFFSET_IN_RX_BUFFER(index), \
+ .info_mask_separate = \
+ BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = ADS1298_BITS_PER_SAMPLE, \
+ .storagebits = 32, \
+ .endianness = IIO_CPU, \
+ }, \
+}
+
+static const struct iio_chan_spec ads1298_channels[] = {
+ ADS1298_CHAN(0),
+ ADS1298_CHAN(1),
+ ADS1298_CHAN(2),
+ ADS1298_CHAN(3),
+ ADS1298_CHAN(4),
+ ADS1298_CHAN(5),
+ ADS1298_CHAN(6),
+ ADS1298_CHAN(7),
+};
+
+static int ads1298_write_cmd(struct ads1298_private *priv, u8 command)
+{
+ struct spi_transfer xfer = {
+ .tx_buf = priv->cmd_buffer,
+ .rx_buf = priv->cmd_buffer,
+ .len = 1,
+ .speed_hz = ADS1298_SPI_BUS_SPEED_SLOW,
+ .delay = {
+ .value = 2,
+ .unit = SPI_DELAY_UNIT_USECS,
+ },
+ };
+
+ priv->cmd_buffer[0] = command;
+
+ return spi_sync_transfer(priv->spi, &xfer, 1);
+}
+
+static int ads1298_read_one(struct ads1298_private *priv, int chan_index)
+{
+ int ret;
+
+ /* Enable the channel */
+ ret = regmap_update_bits(priv->regmap, ADS1298_REG_CHnSET(chan_index),
+ ADS1298_MASK_CH_PD, 0);
+ if (ret)
+ return ret;
+
+ /* Enable single-shot mode, so we don't need to send a STOP */
+ ret = regmap_update_bits(priv->regmap, ADS1298_REG_CONFIG4,
+ ADS1298_MASK_CONFIG4_SINGLE_SHOT,
+ ADS1298_MASK_CONFIG4_SINGLE_SHOT);
+ if (ret)
+ return ret;
+
+ reinit_completion(&priv->completion);
+
+ ret = ads1298_write_cmd(priv, ADS1298_CMD_START);
+ if (ret < 0) {
+ dev_err(&priv->spi->dev, "CMD_START error: %d\n", ret);
+ return ret;
+ }
+
+ /* Cannot take longer than 40ms (250Hz) */
+ ret = wait_for_completion_timeout(&priv->completion, msecs_to_jiffies(50));
+ if (!ret)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ads1298_get_samp_freq(struct ads1298_private *priv, int *val)
+{
+ unsigned long rate;
+ unsigned int cfg;
+ int ret;
+
+ ret = regmap_read(priv->regmap, ADS1298_REG_CONFIG1, &cfg);
+ if (ret)
+ return ret;
+
+ if (priv->clk)
+ rate = clk_get_rate(priv->clk);
+ else
+ rate = ADS1298_CLK_RATE_HZ;
+ if (!rate)
+ return -EINVAL;
+
+ /* Data rate shift depends on HR/LP mode */
+ if (cfg & ADS1298_MASK_CONFIG1_HR)
+ rate >>= ADS1298_SHIFT_DR_HR;
+ else
+ rate >>= ADS1298_SHIFT_DR_LP;
+
+ *val = rate >> (cfg & ADS1298_MASK_CONFIG1_DR);
+
+ return IIO_VAL_INT;
+}
+
+static int ads1298_set_samp_freq(struct ads1298_private *priv, int val)
+{
+ unsigned long rate;
+ unsigned int factor;
+ unsigned int cfg;
+
+ if (priv->clk)
+ rate = clk_get_rate(priv->clk);
+ else
+ rate = ADS1298_CLK_RATE_HZ;
+ if (!rate)
+ return -EINVAL;
+ if (val <= 0)
+ return -EINVAL;
+
+ factor = (rate >> ADS1298_SHIFT_DR_HR) / val;
+ if (factor >= BIT(ADS1298_SHIFT_DR_LP))
+ cfg = ADS1298_LOWEST_DR;
+ else if (factor)
+ cfg = ADS1298_MASK_CONFIG1_HR | ilog2(factor); /* Use HR mode */
+ else
+ cfg = ADS1298_MASK_CONFIG1_HR; /* Fastest possible */
+
+ return regmap_update_bits(priv->regmap, ADS1298_REG_CONFIG1,
+ ADS1298_MASK_CONFIG1_HR | ADS1298_MASK_CONFIG1_DR,
+ cfg);
+}
+
+static const u8 ads1298_pga_settings[] = { 6, 1, 2, 3, 4, 8, 12 };
+
+static int ads1298_get_scale(struct ads1298_private *priv,
+ int channel, int *val, int *val2)
+{
+ int ret;
+ unsigned int regval;
+ u8 gain;
+
+ if (priv->reg_vref) {
+ ret = regulator_get_voltage(priv->reg_vref);
+ if (ret < 0)
+ return ret;
+
+ *val = ret / MILLI; /* Convert to millivolts */
+ } else {
+ ret = regmap_read(priv->regmap, ADS1298_REG_CONFIG3, &regval);
+ if (ret)
+ return ret;
+
+ /* Refererence in millivolts */
+ *val = regval & ADS1298_MASK_CONFIG3_VREF_4V ? 4000 : 2400;
+ }
+
+ ret = regmap_read(priv->regmap, ADS1298_REG_CHnSET(channel), &regval);
+ if (ret)
+ return ret;
+
+ gain = ads1298_pga_settings[FIELD_GET(ADS1298_MASK_CH_PGA, regval)];
+ *val /= gain; /* Full scale is VREF / gain */
+
+ *val2 = ADS1298_BITS_PER_SAMPLE - 1; /* Signed, hence the -1 */
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+}
+
+static int ads1298_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ads1298_private *priv = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = ads1298_read_one(priv, chan->scan_index);
+
+ iio_device_release_direct_mode(indio_dev);
+
+ if (ret)
+ return ret;
+
+ *val = sign_extend32(get_unaligned_be24(priv->rx_buffer + chan->address),
+ ADS1298_BITS_PER_SAMPLE - 1);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ return ads1298_get_scale(priv, chan->channel, val, val2);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return ads1298_get_samp_freq(priv, val);
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ ret = regmap_read(priv->regmap, ADS1298_REG_CONFIG1, val);
+ if (ret)
+ return ret;
+
+ *val = 16 << (*val & ADS1298_MASK_CONFIG1_DR);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ads1298_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct ads1298_private *priv = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return ads1298_set_samp_freq(priv, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ads1298_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct ads1298_private *priv = context;
+ struct spi_transfer reg_write_xfer = {
+ .tx_buf = priv->cmd_buffer,
+ .rx_buf = priv->cmd_buffer,
+ .len = 3,
+ .speed_hz = ADS1298_SPI_BUS_SPEED_SLOW,
+ .delay = {
+ .value = 2,
+ .unit = SPI_DELAY_UNIT_USECS,
+ },
+ };
+
+ priv->cmd_buffer[0] = ADS1298_CMD_WREG | reg;
+ priv->cmd_buffer[1] = 0; /* Number of registers to be written - 1 */
+ priv->cmd_buffer[2] = val;
+
+ return spi_sync_transfer(priv->spi, &reg_write_xfer, 1);
+}
+
+static int ads1298_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct ads1298_private *priv = context;
+ struct spi_transfer reg_read_xfer = {
+ .tx_buf = priv->cmd_buffer,
+ .rx_buf = priv->cmd_buffer,
+ .len = 3,
+ .speed_hz = ADS1298_SPI_BUS_SPEED_SLOW,
+ .delay = {
+ .value = 2,
+ .unit = SPI_DELAY_UNIT_USECS,
+ },
+ };
+ int ret;
+
+ priv->cmd_buffer[0] = ADS1298_CMD_RREG | reg;
+ priv->cmd_buffer[1] = 0; /* Number of registers to be read - 1 */
+ priv->cmd_buffer[2] = 0;
+
+ ret = spi_sync_transfer(priv->spi, &reg_read_xfer, 1);
+ if (ret)
+ return ret;
+
+ *val = priv->cmd_buffer[2];
+
+ return 0;
+}
+
+static int ads1298_reg_access(struct iio_dev *indio_dev, unsigned int reg,
+ unsigned int writeval, unsigned int *readval)
+{
+ struct ads1298_private *priv = iio_priv(indio_dev);
+
+ if (readval)
+ return regmap_read(priv->regmap, reg, readval);
+
+ return regmap_write(priv->regmap, reg, writeval);
+}
+
+static void ads1298_rdata_unmark_busy(struct ads1298_private *priv)
+{
+ /* Notify we're no longer waiting for the SPI transfer to complete */
+ guard(spinlock_irqsave)(&priv->irq_busy_lock);
+ priv->rdata_xfer_busy = 0;
+}
+
+static int ads1298_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct ads1298_private *priv = iio_priv(indio_dev);
+ unsigned int val;
+ int ret;
+ int i;
+
+ /* Make the interrupt routines start with a clean slate */
+ ads1298_rdata_unmark_busy(priv);
+
+ /* Configure power-down bits to match scan mask */
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ val = test_bit(i, scan_mask) ? 0 : ADS1298_MASK_CH_PD;
+ ret = regmap_update_bits(priv->regmap, ADS1298_REG_CHnSET(i),
+ ADS1298_MASK_CH_PD, val);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct iio_info ads1298_info = {
+ .read_raw = &ads1298_read_raw,
+ .write_raw = &ads1298_write_raw,
+ .update_scan_mode = &ads1298_update_scan_mode,
+ .debugfs_reg_access = &ads1298_reg_access,
+};
+
+static void ads1298_rdata_release_busy_or_restart(struct ads1298_private *priv)
+{
+ guard(spinlock_irqsave)(&priv->irq_busy_lock);
+
+ if (priv->rdata_xfer_busy > 1) {
+ /*
+ * DRDY interrupt occurred before SPI completion. Start a new
+ * SPI transaction now to retrieve the data that wasn't latched
+ * into the ADS1298 chip's transfer buffer yet.
+ */
+ spi_async(priv->spi, &priv->rdata_msg);
+ /*
+ * If more than one DRDY took place, there was an overrun. Since
+ * the sample is already lost, reset the counter to 1 so that
+ * we will wait for a DRDY interrupt after this SPI transaction.
+ */
+ priv->rdata_xfer_busy = 1;
+ } else {
+ /* No pending data, wait for DRDY */
+ priv->rdata_xfer_busy = 0;
+ }
+}
+
+/* Called from SPI completion interrupt handler */
+static void ads1298_rdata_complete(void *context)
+{
+ struct iio_dev *indio_dev = context;
+ struct ads1298_private *priv = iio_priv(indio_dev);
+ int scan_index;
+ u32 *bounce = priv->bounce_buffer;
+
+ if (!iio_buffer_enabled(indio_dev)) {
+ /*
+ * for a single transfer mode we're kept in direct_mode until
+ * completion, avoiding a race with buffered IO.
+ */
+ ads1298_rdata_unmark_busy(priv);
+ complete(&priv->completion);
+ return;
+ }
+
+ /* Demux the channel data into our bounce buffer */
+ for_each_set_bit(scan_index, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ const struct iio_chan_spec *scan_chan =
+ &indio_dev->channels[scan_index];
+ const u8 *data = priv->rx_buffer + scan_chan->address;
+
+ *bounce++ = get_unaligned_be24(data);
+ }
+
+ /* rx_buffer can be overwritten from this point on */
+ ads1298_rdata_release_busy_or_restart(priv);
+
+ iio_push_to_buffers(indio_dev, priv->bounce_buffer);
+}
+
+static irqreturn_t ads1298_interrupt(int irq, void *dev_id)
+{
+ struct iio_dev *indio_dev = dev_id;
+ struct ads1298_private *priv = iio_priv(indio_dev);
+ unsigned int wasbusy;
+
+ guard(spinlock_irqsave)(&priv->irq_busy_lock);
+
+ wasbusy = priv->rdata_xfer_busy++;
+ /* When no SPI transfer in transit, start one now */
+ if (!wasbusy)
+ spi_async(priv->spi, &priv->rdata_msg);
+
+ return IRQ_HANDLED;
+};
+
+static int ads1298_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct ads1298_private *priv = iio_priv(indio_dev);
+ int ret;
+
+ /* Disable single-shot mode */
+ ret = regmap_update_bits(priv->regmap, ADS1298_REG_CONFIG4,
+ ADS1298_MASK_CONFIG4_SINGLE_SHOT, 0);
+ if (ret)
+ return ret;
+
+ return ads1298_write_cmd(priv, ADS1298_CMD_START);
+}
+
+static int ads1298_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct ads1298_private *priv = iio_priv(indio_dev);
+
+ return ads1298_write_cmd(priv, ADS1298_CMD_STOP);
+}
+
+static const struct iio_buffer_setup_ops ads1298_setup_ops = {
+ .postenable = &ads1298_buffer_postenable,
+ .predisable = &ads1298_buffer_predisable,
+};
+
+static void ads1298_reg_disable(void *reg)
+{
+ regulator_disable(reg);
+}
+
+static const struct regmap_range ads1298_regmap_volatile_range[] = {
+ regmap_reg_range(ADS1298_REG_LOFF_STATP, ADS1298_REG_LOFF_STATN),
+};
+
+static const struct regmap_access_table ads1298_regmap_volatile = {
+ .yes_ranges = ads1298_regmap_volatile_range,
+ .n_yes_ranges = ARRAY_SIZE(ads1298_regmap_volatile_range),
+};
+
+static const struct regmap_config ads1298_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .reg_read = ads1298_reg_read,
+ .reg_write = ads1298_reg_write,
+ .max_register = ADS1298_REG_WCT2,
+ .volatile_table = &ads1298_regmap_volatile,
+ .cache_type = REGCACHE_MAPLE,
+};
+
+static int ads1298_init(struct iio_dev *indio_dev)
+{
+ struct ads1298_private *priv = iio_priv(indio_dev);
+ struct device *dev = &priv->spi->dev;
+ const char *suffix;
+ unsigned int val;
+ int ret;
+
+ /* Device initializes into RDATAC mode, which we don't want */
+ ret = ads1298_write_cmd(priv, ADS1298_CMD_SDATAC);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(priv->regmap, ADS1298_REG_ID, &val);
+ if (ret)
+ return ret;
+
+ /* Fill in name and channel count based on what the chip told us */
+ indio_dev->num_channels = 4 + 2 * (val & ADS1298_MASK_ID_CHANNELS);
+ switch (val & ADS1298_MASK_ID_FAMILY) {
+ case ADS1298_ID_FAMILY_ADS129X:
+ suffix = "";
+ break;
+ case ADS1298_ID_FAMILY_ADS129XR:
+ suffix = "r";
+ break;
+ default:
+ return dev_err_probe(dev, -ENODEV, "Unknown ID: 0x%x\n", val);
+ }
+ indio_dev->name = devm_kasprintf(dev, GFP_KERNEL, "ads129%u%s",
+ indio_dev->num_channels, suffix);
+
+ /* Enable internal test signal, double amplitude, double frequency */
+ ret = regmap_write(priv->regmap, ADS1298_REG_CONFIG2,
+ ADS1298_MASK_CONFIG2_RESERVED |
+ ADS1298_MASK_CONFIG2_INT_TEST |
+ ADS1298_MASK_CONFIG2_TEST_AMP |
+ ADS1298_MASK_CONFIG2_TEST_FREQ_FAST);
+ if (ret)
+ return ret;
+
+ val = ADS1298_MASK_CONFIG3_RESERVED; /* Must write 1 always */
+ if (!priv->reg_vref) {
+ /* Enable internal reference */
+ val |= ADS1298_MASK_CONFIG3_PWR_REFBUF;
+ /* Use 4V VREF when power supply is at least 4.4V */
+ if (regulator_get_voltage(priv->reg_avdd) >= 4400000)
+ val |= ADS1298_MASK_CONFIG3_VREF_4V;
+ }
+ return regmap_write(priv->regmap, ADS1298_REG_CONFIG3, val);
+}
+
+static int ads1298_probe(struct spi_device *spi)
+{
+ struct ads1298_private *priv;
+ struct iio_dev *indio_dev;
+ struct device *dev = &spi->dev;
+ struct gpio_desc *reset_gpio;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ priv = iio_priv(indio_dev);
+
+ /* Reset to be asserted before enabling clock and power */
+ reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(reset_gpio),
+ "Cannot get reset GPIO\n");
+
+ /* VREF can be supplied externally, otherwise use internal reference */
+ priv->reg_vref = devm_regulator_get_optional(dev, "vref");
+ if (IS_ERR(priv->reg_vref)) {
+ if (PTR_ERR(priv->reg_vref) != -ENODEV)
+ return dev_err_probe(dev, PTR_ERR(priv->reg_vref),
+ "Failed to get vref regulator\n");
+
+ priv->reg_vref = NULL;
+ } else {
+ ret = regulator_enable(priv->reg_vref);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, ads1298_reg_disable, priv->reg_vref);
+ if (ret)
+ return ret;
+ }
+
+ priv->clk = devm_clk_get_optional_enabled(dev, "clk");
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk), "Failed to get clk\n");
+
+ priv->reg_avdd = devm_regulator_get(dev, "avdd");
+ if (IS_ERR(priv->reg_avdd))
+ return dev_err_probe(dev, PTR_ERR(priv->reg_avdd),
+ "Failed to get avdd regulator\n");
+
+ ret = regulator_enable(priv->reg_avdd);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable avdd regulator\n");
+
+ ret = devm_add_action_or_reset(dev, ads1298_reg_disable, priv->reg_avdd);
+ if (ret)
+ return ret;
+
+ priv->spi = spi;
+ init_completion(&priv->completion);
+ spin_lock_init(&priv->irq_busy_lock);
+ priv->regmap = devm_regmap_init(dev, NULL, priv, &ads1298_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
+ indio_dev->channels = ads1298_channels;
+ indio_dev->info = &ads1298_info;
+
+ if (reset_gpio) {
+ /*
+ * Deassert reset now that clock and power are active.
+ * Minimum reset pulsewidth is 2 clock cycles.
+ */
+ fsleep(ADS1298_CLOCKS_TO_USECS(2));
+ gpiod_set_value_cansleep(reset_gpio, 0);
+ } else {
+ ret = ads1298_write_cmd(priv, ADS1298_CMD_RESET);
+ if (ret)
+ return dev_err_probe(dev, ret, "RESET failed\n");
+ }
+ /* Wait 18 clock cycles for reset command to complete */
+ fsleep(ADS1298_CLOCKS_TO_USECS(18));
+
+ ret = ads1298_init(indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Init failed\n");
+
+ priv->tx_buffer[0] = ADS1298_CMD_RDATA;
+ priv->rdata_xfer.tx_buf = priv->tx_buffer;
+ priv->rdata_xfer.rx_buf = priv->rx_buffer;
+ priv->rdata_xfer.len = ADS1298_SPI_RDATA_BUFFER_SIZE(indio_dev->num_channels);
+ /* Must keep CS low for 4 clocks */
+ priv->rdata_xfer.delay.value = 2;
+ priv->rdata_xfer.delay.unit = SPI_DELAY_UNIT_USECS;
+ spi_message_init_with_transfers(&priv->rdata_msg, &priv->rdata_xfer, 1);
+ priv->rdata_msg.complete = &ads1298_rdata_complete;
+ priv->rdata_msg.context = indio_dev;
+
+ ret = devm_request_irq(dev, spi->irq, &ads1298_interrupt,
+ IRQF_TRIGGER_FALLING, indio_dev->name,
+ indio_dev);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_kfifo_buffer_setup(dev, indio_dev, &ads1298_setup_ops);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct spi_device_id ads1298_id[] = {
+ { "ads1298" },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ads1298_id);
+
+static const struct of_device_id ads1298_of_table[] = {
+ { .compatible = "ti,ads1298" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ads1298_of_table);
+
+static struct spi_driver ads1298_driver = {
+ .driver = {
+ .name = "ads1298",
+ .of_match_table = ads1298_of_table,
+ },
+ .probe = ads1298_probe,
+ .id_table = ads1298_id,
+};
+module_spi_driver(ads1298_driver);
+
+MODULE_AUTHOR("Mike Looijmans <mike.looijmans@topic.nl>");
+MODULE_DESCRIPTION("TI ADS1298 ADC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index ef06a897421ac..9440a268a78c4 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -11,7 +11,7 @@
#include <linux/regulator/consumer.h>
#include <linux/err.h>
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/mod_devicetable.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
diff --git a/drivers/iio/amplifiers/hmc425a.c b/drivers/iio/amplifiers/hmc425a.c
index ed4d729226961..2ee4c0d70281e 100644
--- a/drivers/iio/amplifiers/hmc425a.c
+++ b/drivers/iio/amplifiers/hmc425a.c
@@ -2,9 +2,10 @@
/*
* HMC425A and similar Gain Amplifiers
*
- * Copyright 2020 Analog Devices Inc.
+ * Copyright 2020, 2024 Analog Devices Inc.
*/
+#include <linux/bits.h>
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -12,6 +13,7 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/kernel.h>
+#include <linux/math.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -20,10 +22,24 @@
#include <linux/regulator/consumer.h>
#include <linux/sysfs.h>
+/*
+ * The LTC6373 amplifier supports configuring gain using GPIO's with the following
+ * values (OUTPUT_V / INPUT_V): 0(shutdown), 0.25, 0.5, 1, 2, 4, 8, 16
+ *
+ * Except for the shutdown value, all can be converted to dB using 20 * log10(x)
+ * From here, it is observed that all values are multiples of the '2' gain setting,
+ * with the correspondent of 6.020dB.
+ */
+#define LTC6373_CONVERSION_CONSTANT 6020
+#define LTC6373_MIN_GAIN_CODE 0x6
+#define LTC6373_CONVERSION_MASK GENMASK(2, 0)
+#define LTC6373_SHUTDOWN GENMASK(2, 0)
+
enum hmc425a_type {
ID_HMC425A,
ID_HMC540S,
- ID_ADRF5740
+ ID_ADRF5740,
+ ID_LTC6373,
};
struct hmc425a_chip_info {
@@ -34,16 +50,110 @@ struct hmc425a_chip_info {
int gain_min;
int gain_max;
int default_gain;
+ int powerdown_val;
+ bool has_powerdown;
+
+ int (*gain_dB_to_code)(int gain, int *code);
+ int (*code_to_gain_dB)(int code, int *val, int *val2);
};
struct hmc425a_state {
struct mutex lock; /* protect sensor state */
- struct hmc425a_chip_info *chip_info;
+ const struct hmc425a_chip_info *chip_info;
struct gpio_descs *gpios;
- enum hmc425a_type type;
u32 gain;
+ bool powerdown;
};
+static int gain_dB_to_code(struct hmc425a_state *st, int val, int val2, int *code)
+{
+ const struct hmc425a_chip_info *inf = st->chip_info;
+ int gain;
+
+ if (val < 0)
+ gain = (val * 1000) - (val2 / 1000);
+ else
+ gain = (val * 1000) + (val2 / 1000);
+
+ if (gain > inf->gain_max || gain < inf->gain_min)
+ return -EINVAL;
+ if (st->powerdown)
+ return -EPERM;
+
+ return st->chip_info->gain_dB_to_code(gain, code);
+}
+
+static int hmc425a_gain_dB_to_code(int gain, int *code)
+{
+ *code = ~((abs(gain) / 500) & 0x3F);
+ return 0;
+}
+
+static int hmc540s_gain_dB_to_code(int gain, int *code)
+{
+ *code = ~((abs(gain) / 1000) & 0xF);
+ return 0;
+}
+
+static int adrf5740_gain_dB_to_code(int gain, int *code)
+{
+ int temp = (abs(gain) / 2000) & 0xF;
+
+ /* Bit [0-3]: 2dB 4dB 8dB 8dB */
+ *code = temp & BIT(3) ? temp | BIT(2) : temp;
+ return 0;
+}
+
+static int ltc6373_gain_dB_to_code(int gain, int *code)
+{
+ *code = ~(DIV_ROUND_CLOSEST(gain, LTC6373_CONVERSION_CONSTANT) + 3)
+ & LTC6373_CONVERSION_MASK;
+ return 0;
+}
+
+static int code_to_gain_dB(struct hmc425a_state *st, int *val, int *val2)
+{
+ if (st->powerdown)
+ return -EPERM;
+ return st->chip_info->code_to_gain_dB(st->gain, val, val2);
+}
+
+static int hmc425a_code_to_gain_dB(int code, int *val, int *val2)
+{
+ *val = (~code * -500) / 1000;
+ *val2 = ((~code * -500) % 1000) * 1000;
+ return 0;
+}
+
+static int hmc540s_code_to_gain_dB(int code, int *val, int *val2)
+{
+ *val = (~code * -1000) / 1000;
+ *val2 = ((~code * -1000) % 1000) * 1000;
+ return 0;
+}
+
+static int adrf5740_code_to_gain_dB(int code, int *val, int *val2)
+{
+ /*
+ * Bit [0-3]: 2dB 4dB 8dB 8dB
+ * When BIT(3) is set, unset BIT(2) and use 3 as double the place value
+ */
+ code = code & BIT(3) ? code & ~BIT(2) : code;
+ *val = (code * -2000) / 1000;
+ *val2 = ((code * -2000) % 1000) * 1000;
+ return 0;
+}
+
+static int ltc6373_code_to_gain_dB(int code, int *val, int *val2)
+{
+ int gain = ((~code & LTC6373_CONVERSION_MASK) - 3) *
+ LTC6373_CONVERSION_CONSTANT;
+
+ *val = gain / 1000;
+ *val2 = (gain % 1000) * 1000;
+ return 0;
+}
+
static int hmc425a_write(struct iio_dev *indio_dev, u32 value)
{
struct hmc425a_state *st = iio_priv(indio_dev);
@@ -61,30 +171,14 @@ static int hmc425a_read_raw(struct iio_dev *indio_dev,
int *val2, long m)
{
struct hmc425a_state *st = iio_priv(indio_dev);
- int code, gain = 0;
int ret;
mutex_lock(&st->lock);
switch (m) {
case IIO_CHAN_INFO_HARDWAREGAIN:
- code = st->gain;
-
- switch (st->type) {
- case ID_HMC425A:
- gain = ~code * -500;
+ ret = code_to_gain_dB(st, val, val2);
+ if (ret)
break;
- case ID_HMC540S:
- gain = ~code * -1000;
- break;
- case ID_ADRF5740:
- code = code & BIT(3) ? code & ~BIT(2) : code;
- gain = code * -2000;
- break;
- }
-
- *val = gain / 1000;
- *val2 = (gain % 1000) * 1000;
-
ret = IIO_VAL_INT_PLUS_MICRO_DB;
break;
default:
@@ -100,34 +194,14 @@ static int hmc425a_write_raw(struct iio_dev *indio_dev,
int val2, long mask)
{
struct hmc425a_state *st = iio_priv(indio_dev);
- struct hmc425a_chip_info *inf = st->chip_info;
- int code = 0, gain;
- int ret;
-
- if (val < 0)
- gain = (val * 1000) - (val2 / 1000);
- else
- gain = (val * 1000) + (val2 / 1000);
-
- if (gain > inf->gain_max || gain < inf->gain_min)
- return -EINVAL;
-
- switch (st->type) {
- case ID_HMC425A:
- code = ~((abs(gain) / 500) & 0x3F);
- break;
- case ID_HMC540S:
- code = ~((abs(gain) / 1000) & 0xF);
- break;
- case ID_ADRF5740:
- code = (abs(gain) / 2000) & 0xF;
- code = code & BIT(3) ? code | BIT(2) : code;
- break;
- }
+ int code = 0, ret;
mutex_lock(&st->lock);
switch (mask) {
case IIO_CHAN_INFO_HARDWAREGAIN:
+ ret = gain_dB_to_code(st, val, val2, &code);
+ if (ret)
+ break;
st->gain = code;
ret = hmc425a_write(indio_dev, st->gain);
@@ -158,6 +232,48 @@ static const struct iio_info hmc425a_info = {
.write_raw_get_fmt = &hmc425a_write_raw_get_fmt,
};
+static ssize_t ltc6373_read_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct hmc425a_state *st = iio_priv(indio_dev);
+
+ return sysfs_emit(buf, "%d\n", st->powerdown);
+}
+
+static ssize_t ltc6373_write_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf,
+ size_t len)
+{
+ struct hmc425a_state *st = iio_priv(indio_dev);
+ bool powerdown;
+ int code, ret;
+
+ ret = kstrtobool(buf, &powerdown);
+ if (ret)
+ return ret;
+
+ mutex_lock(&st->lock);
+ st->powerdown = powerdown;
+ code = (powerdown) ? LTC6373_SHUTDOWN : st->gain;
+ hmc425a_write(indio_dev, code);
+ mutex_unlock(&st->lock);
+ return len;
+}
+
+static const struct iio_chan_spec_ext_info ltc6373_ext_info[] = {
+ {
+ .name = "powerdown",
+ .read = ltc6373_read_powerdown,
+ .write = ltc6373_write_powerdown,
+ .shared = IIO_SEPARATE,
+ },
+ {}
+};
+
#define HMC425A_CHAN(_channel) \
{ \
.type = IIO_VOLTAGE, \
@@ -167,20 +283,25 @@ static const struct iio_info hmc425a_info = {
.info_mask_separate = BIT(IIO_CHAN_INFO_HARDWAREGAIN), \
}
+#define LTC6373_CHAN(_channel) \
+{ \
+ .type = IIO_VOLTAGE, \
+ .output = 1, \
+ .indexed = 1, \
+ .channel = _channel, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_HARDWAREGAIN), \
+ .ext_info = ltc6373_ext_info, \
+}
+
static const struct iio_chan_spec hmc425a_channels[] = {
HMC425A_CHAN(0),
};
-/* Match table for of_platform binding */
-static const struct of_device_id hmc425a_of_match[] = {
- { .compatible = "adi,hmc425a", .data = (void *)ID_HMC425A },
- { .compatible = "adi,hmc540s", .data = (void *)ID_HMC540S },
- { .compatible = "adi,adrf5740", .data = (void *)ID_ADRF5740 },
- {},
+static const struct iio_chan_spec ltc6373_channels[] = {
+ LTC6373_CHAN(0),
};
-MODULE_DEVICE_TABLE(of, hmc425a_of_match);
-static struct hmc425a_chip_info hmc425a_chip_info_tbl[] = {
+static const struct hmc425a_chip_info hmc425a_chip_info_tbl[] = {
[ID_HMC425A] = {
.name = "hmc425a",
.channels = hmc425a_channels,
@@ -189,6 +310,8 @@ static struct hmc425a_chip_info hmc425a_chip_info_tbl[] = {
.gain_min = -31500,
.gain_max = 0,
.default_gain = -0x40, /* set default gain -31.5db*/
+ .gain_dB_to_code = hmc425a_gain_dB_to_code,
+ .code_to_gain_dB = hmc425a_code_to_gain_dB,
},
[ID_HMC540S] = {
.name = "hmc540s",
@@ -198,6 +321,8 @@ static struct hmc425a_chip_info hmc425a_chip_info_tbl[] = {
.gain_min = -15000,
.gain_max = 0,
.default_gain = -0x10, /* set default gain -15.0db*/
+ .gain_dB_to_code = hmc540s_gain_dB_to_code,
+ .code_to_gain_dB = hmc540s_code_to_gain_dB,
},
[ID_ADRF5740] = {
.name = "adrf5740",
@@ -207,6 +332,21 @@ static struct hmc425a_chip_info hmc425a_chip_info_tbl[] = {
.gain_min = -22000,
.gain_max = 0,
.default_gain = 0xF, /* set default gain -22.0db*/
+ .gain_dB_to_code = adrf5740_gain_dB_to_code,
+ .code_to_gain_dB = adrf5740_code_to_gain_dB,
+ },
+ [ID_LTC6373] = {
+ .name = "ltc6373",
+ .channels = ltc6373_channels,
+ .num_channels = ARRAY_SIZE(ltc6373_channels),
+ .num_gpios = 3,
+ .gain_min = -12041, /* gain setting x0.25*/
+ .gain_max = 24082, /* gain setting x16 */
+ .default_gain = LTC6373_MIN_GAIN_CODE,
+ .powerdown_val = LTC6373_SHUTDOWN,
+ .has_powerdown = true,
+ .gain_dB_to_code = ltc6373_gain_dB_to_code,
+ .code_to_gain_dB = ltc6373_code_to_gain_dB,
},
};
@@ -221,9 +361,8 @@ static int hmc425a_probe(struct platform_device *pdev)
return -ENOMEM;
st = iio_priv(indio_dev);
- st->type = (uintptr_t)device_get_match_data(&pdev->dev);
- st->chip_info = &hmc425a_chip_info_tbl[st->type];
+ st->chip_info = device_get_match_data(&pdev->dev);
indio_dev->num_channels = st->chip_info->num_channels;
indio_dev->channels = st->chip_info->channels;
indio_dev->name = st->chip_info->name;
@@ -249,12 +388,31 @@ static int hmc425a_probe(struct platform_device *pdev)
indio_dev->info = &hmc425a_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- /* Set default gain */
- hmc425a_write(indio_dev, st->gain);
+ if (st->chip_info->has_powerdown) {
+ st->powerdown = true;
+ hmc425a_write(indio_dev, st->chip_info->powerdown_val);
+ } else {
+ /* Set default gain */
+ hmc425a_write(indio_dev, st->gain);
+ }
return devm_iio_device_register(&pdev->dev, indio_dev);
}
+/* Match table for of_platform binding */
+static const struct of_device_id hmc425a_of_match[] = {
+ { .compatible = "adi,hmc425a",
+ .data = &hmc425a_chip_info_tbl[ID_HMC425A]},
+ { .compatible = "adi,hmc540s",
+ .data = &hmc425a_chip_info_tbl[ID_HMC540S]},
+ { .compatible = "adi,adrf5740",
+ .data = &hmc425a_chip_info_tbl[ID_ADRF5740]},
+ { .compatible = "adi,ltc6373",
+ .data = &hmc425a_chip_info_tbl[ID_LTC6373]},
+ {}
+};
+MODULE_DEVICE_TABLE(of, hmc425a_of_match);
+
static struct platform_driver hmc425a_driver = {
.driver = {
.name = KBUILD_MODNAME,
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index 5f85ba38e6f6e..a18c1da292af2 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -159,7 +159,7 @@ static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = {
* Once done using the buffer iio_dmaengine_buffer_free() should be used to
* release it.
*/
-static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
+struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
const char *channel)
{
struct dmaengine_buffer *dmaengine_buffer;
@@ -210,6 +210,7 @@ err_free:
kfree(dmaengine_buffer);
return ERR_PTR(ret);
}
+EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_alloc, IIO_DMAENGINE_BUFFER);
/**
* iio_dmaengine_buffer_free() - Free dmaengine buffer
@@ -217,7 +218,7 @@ err_free:
*
* Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
*/
-static void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
+void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
{
struct dmaengine_buffer *dmaengine_buffer =
iio_buffer_to_dmaengine_buffer(buffer);
@@ -227,6 +228,7 @@ static void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
iio_buffer_put(buffer);
}
+EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, IIO_DMAENGINE_BUFFER);
static void __devm_iio_dmaengine_buffer_free(void *buffer)
{
@@ -279,8 +281,7 @@ int devm_iio_dmaengine_buffer_setup(struct device *dev,
{
struct iio_buffer *buffer;
- buffer = devm_iio_dmaengine_buffer_alloc(indio_dev->dev.parent,
- channel);
+ buffer = devm_iio_dmaengine_buffer_alloc(dev, channel);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
@@ -288,7 +289,7 @@ int devm_iio_dmaengine_buffer_setup(struct device *dev,
return iio_device_attach_buffer(indio_dev, buffer);
}
-EXPORT_SYMBOL_GPL(devm_iio_dmaengine_buffer_setup);
+EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup, IIO_DMAENGINE_BUFFER);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("DMA buffer for the IIO framework");
diff --git a/drivers/iio/chemical/pms7003.c b/drivers/iio/chemical/pms7003.c
index b5cf15a515d25..43025866d5b79 100644
--- a/drivers/iio/chemical/pms7003.c
+++ b/drivers/iio/chemical/pms7003.c
@@ -211,8 +211,8 @@ static bool pms7003_frame_is_okay(struct pms7003_frame *frame)
return checksum == pms7003_calc_checksum(frame);
}
-static ssize_t pms7003_receive_buf(struct serdev_device *serdev, const u8 *buf,
- size_t size)
+static size_t pms7003_receive_buf(struct serdev_device *serdev, const u8 *buf,
+ size_t size)
{
struct iio_dev *indio_dev = serdev_device_get_drvdata(serdev);
struct pms7003_state *state = iio_priv(indio_dev);
diff --git a/drivers/iio/chemical/scd30_serial.c b/drivers/iio/chemical/scd30_serial.c
index a47654591e555..2adb76dbb0209 100644
--- a/drivers/iio/chemical/scd30_serial.c
+++ b/drivers/iio/chemical/scd30_serial.c
@@ -174,8 +174,8 @@ static int scd30_serdev_command(struct scd30_state *state, enum scd30_cmd cmd, u
return 0;
}
-static ssize_t scd30_serdev_receive_buf(struct serdev_device *serdev,
- const u8 *buf, size_t size)
+static size_t scd30_serdev_receive_buf(struct serdev_device *serdev,
+ const u8 *buf, size_t size)
{
struct iio_dev *indio_dev = serdev_device_get_drvdata(serdev);
struct scd30_serdev_priv *priv;
diff --git a/drivers/iio/chemical/sps30_serial.c b/drivers/iio/chemical/sps30_serial.c
index 3afa89f8acc32..a6dfbe28c914c 100644
--- a/drivers/iio/chemical/sps30_serial.c
+++ b/drivers/iio/chemical/sps30_serial.c
@@ -210,8 +210,8 @@ static int sps30_serial_command(struct sps30_state *state, unsigned char cmd,
return rsp_size;
}
-static ssize_t sps30_serial_receive_buf(struct serdev_device *serdev,
- const u8 *buf, size_t size)
+static size_t sps30_serial_receive_buf(struct serdev_device *serdev,
+ const u8 *buf, size_t size)
{
struct iio_dev *indio_dev = dev_get_drvdata(&serdev->dev);
struct sps30_serial_priv *priv;
diff --git a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
index 03823ee57f598..3b0f9598a7c77 100644
--- a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
+++ b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
@@ -126,7 +126,7 @@ void inv_sensors_timestamp_interrupt(struct inv_sensors_timestamp *ts,
struct inv_sensors_timestamp_interval *it;
int64_t delta, interval;
const uint32_t fifo_mult = fifo_period / ts->chip.clock_period;
- uint32_t period = ts->period;
+ uint32_t period;
bool valid = false;
if (fifo_nb == 0)
diff --git a/drivers/iio/dac/mcp4821.c b/drivers/iio/dac/mcp4821.c
index 8a0480d338450..782e8f6b77829 100644
--- a/drivers/iio/dac/mcp4821.c
+++ b/drivers/iio/dac/mcp4821.c
@@ -17,7 +17,7 @@
*/
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/mod_devicetable.h>
#include <linux/spi/spi.h>
#include <linux/iio/iio.h>
diff --git a/drivers/iio/dummy/iio_dummy_evgen.c b/drivers/iio/dummy/iio_dummy_evgen.c
index 5a0072727ba4b..16d3f144dda04 100644
--- a/drivers/iio/dummy/iio_dummy_evgen.c
+++ b/drivers/iio/dummy/iio_dummy_evgen.c
@@ -31,8 +31,6 @@
* @regs: irq regs we are faking
* @lock: protect the evgen state
* @inuse: mask of which irqs are connected
- * @irq_sim: interrupt simulator
- * @base: base of irq range
* @irq_sim_domain: irq simulator domain
*/
struct iio_dummy_eventgen {
diff --git a/drivers/iio/dummy/iio_simple_dummy.c b/drivers/iio/dummy/iio_simple_dummy.c
index c24f609c2ade6..09efacaf8f78d 100644
--- a/drivers/iio/dummy/iio_simple_dummy.c
+++ b/drivers/iio/dummy/iio_simple_dummy.c
@@ -283,65 +283,63 @@ static int iio_dummy_read_raw(struct iio_dev *indio_dev,
long mask)
{
struct iio_dummy_state *st = iio_priv(indio_dev);
- int ret = -EINVAL;
- mutex_lock(&st->lock);
switch (mask) {
case IIO_CHAN_INFO_RAW: /* magic value - channel value read */
- switch (chan->type) {
- case IIO_VOLTAGE:
- if (chan->output) {
- /* Set integer part to cached value */
- *val = st->dac_val;
- ret = IIO_VAL_INT;
- } else if (chan->differential) {
- if (chan->channel == 1)
- *val = st->differential_adc_val[0];
- else
- *val = st->differential_adc_val[1];
- ret = IIO_VAL_INT;
- } else {
- *val = st->single_ended_adc_val;
- ret = IIO_VAL_INT;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ guard(mutex)(&st->lock);
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (chan->output) {
+ /* Set integer part to cached value */
+ *val = st->dac_val;
+ return IIO_VAL_INT;
+ } else if (chan->differential) {
+ if (chan->channel == 1)
+ *val = st->differential_adc_val[0];
+ else
+ *val = st->differential_adc_val[1];
+ return IIO_VAL_INT;
+ } else {
+ *val = st->single_ended_adc_val;
+ return IIO_VAL_INT;
+ }
+
+ case IIO_ACCEL:
+ *val = st->accel_val;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
}
- break;
- case IIO_ACCEL:
- *val = st->accel_val;
- ret = IIO_VAL_INT;
- break;
- default:
- break;
}
- break;
+ unreachable();
case IIO_CHAN_INFO_PROCESSED:
- switch (chan->type) {
- case IIO_STEPS:
- *val = st->steps;
- ret = IIO_VAL_INT;
- break;
- case IIO_ACTIVITY:
- switch (chan->channel2) {
- case IIO_MOD_RUNNING:
- *val = st->activity_running;
- ret = IIO_VAL_INT;
- break;
- case IIO_MOD_WALKING:
- *val = st->activity_walking;
- ret = IIO_VAL_INT;
- break;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ guard(mutex)(&st->lock);
+ switch (chan->type) {
+ case IIO_STEPS:
+ *val = st->steps;
+ return IIO_VAL_INT;
+ case IIO_ACTIVITY:
+ switch (chan->channel2) {
+ case IIO_MOD_RUNNING:
+ *val = st->activity_running;
+ return IIO_VAL_INT;
+ case IIO_MOD_WALKING:
+ *val = st->activity_walking;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
default:
- break;
+ return -EINVAL;
}
- break;
- default:
- break;
}
- break;
+ unreachable();
case IIO_CHAN_INFO_OFFSET:
/* only single ended adc -> 7 */
*val = 7;
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
@@ -350,60 +348,57 @@ static int iio_dummy_read_raw(struct iio_dev *indio_dev,
/* only single ended adc -> 0.001333 */
*val = 0;
*val2 = 1333;
- ret = IIO_VAL_INT_PLUS_MICRO;
- break;
+ return IIO_VAL_INT_PLUS_MICRO;
case 1:
/* all differential adc -> 0.000001344 */
*val = 0;
*val2 = 1344;
- ret = IIO_VAL_INT_PLUS_NANO;
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
}
- break;
default:
- break;
+ return -EINVAL;
}
- break;
- case IIO_CHAN_INFO_CALIBBIAS:
+ case IIO_CHAN_INFO_CALIBBIAS: {
+ guard(mutex)(&st->lock);
/* only the acceleration axis - read from cache */
*val = st->accel_calibbias;
- ret = IIO_VAL_INT;
- break;
- case IIO_CHAN_INFO_CALIBSCALE:
+ return IIO_VAL_INT;
+ }
+ case IIO_CHAN_INFO_CALIBSCALE: {
+ guard(mutex)(&st->lock);
*val = st->accel_calibscale->val;
*val2 = st->accel_calibscale->val2;
- ret = IIO_VAL_INT_PLUS_MICRO;
- break;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
case IIO_CHAN_INFO_SAMP_FREQ:
*val = 3;
*val2 = 33;
- ret = IIO_VAL_INT_PLUS_NANO;
- break;
- case IIO_CHAN_INFO_ENABLE:
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_ENABLE: {
+ guard(mutex)(&st->lock);
switch (chan->type) {
case IIO_STEPS:
*val = st->steps_enabled;
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
default:
- break;
+ return -EINVAL;
}
- break;
- case IIO_CHAN_INFO_CALIBHEIGHT:
+ }
+ case IIO_CHAN_INFO_CALIBHEIGHT: {
+ guard(mutex)(&st->lock);
switch (chan->type) {
case IIO_STEPS:
*val = st->height;
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
default:
- break;
+ return -EINVAL;
}
- break;
-
+ }
default:
- break;
+ return -EINVAL;
}
- mutex_unlock(&st->lock);
- return ret;
}
/**
@@ -426,7 +421,6 @@ static int iio_dummy_write_raw(struct iio_dev *indio_dev,
long mask)
{
int i;
- int ret = 0;
struct iio_dummy_state *st = iio_priv(indio_dev);
switch (mask) {
@@ -436,10 +430,10 @@ static int iio_dummy_write_raw(struct iio_dev *indio_dev,
if (chan->output == 0)
return -EINVAL;
- /* Locking not required as writing single value */
- mutex_lock(&st->lock);
- st->dac_val = val;
- mutex_unlock(&st->lock);
+ scoped_guard(mutex, &st->lock) {
+ /* Locking not required as writing single value */
+ st->dac_val = val;
+ }
return 0;
default:
return -EINVAL;
@@ -447,9 +441,9 @@ static int iio_dummy_write_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_PROCESSED:
switch (chan->type) {
case IIO_STEPS:
- mutex_lock(&st->lock);
- st->steps = val;
- mutex_unlock(&st->lock);
+ scoped_guard(mutex, &st->lock) {
+ st->steps = val;
+ }
return 0;
case IIO_ACTIVITY:
if (val < 0)
@@ -470,30 +464,29 @@ static int iio_dummy_write_raw(struct iio_dev *indio_dev,
default:
return -EINVAL;
}
- case IIO_CHAN_INFO_CALIBSCALE:
- mutex_lock(&st->lock);
+ case IIO_CHAN_INFO_CALIBSCALE: {
+ guard(mutex)(&st->lock);
/* Compare against table - hard matching here */
for (i = 0; i < ARRAY_SIZE(dummy_scales); i++)
if (val == dummy_scales[i].val &&
val2 == dummy_scales[i].val2)
break;
if (i == ARRAY_SIZE(dummy_scales))
- ret = -EINVAL;
- else
- st->accel_calibscale = &dummy_scales[i];
- mutex_unlock(&st->lock);
- return ret;
+ return -EINVAL;
+ st->accel_calibscale = &dummy_scales[i];
+ return 0;
+ }
case IIO_CHAN_INFO_CALIBBIAS:
- mutex_lock(&st->lock);
- st->accel_calibbias = val;
- mutex_unlock(&st->lock);
+ scoped_guard(mutex, &st->lock) {
+ st->accel_calibbias = val;
+ }
return 0;
case IIO_CHAN_INFO_ENABLE:
switch (chan->type) {
case IIO_STEPS:
- mutex_lock(&st->lock);
- st->steps_enabled = val;
- mutex_unlock(&st->lock);
+ scoped_guard(mutex, &st->lock) {
+ st->steps_enabled = val;
+ }
return 0;
default:
return -EINVAL;
diff --git a/drivers/iio/frequency/Kconfig b/drivers/iio/frequency/Kconfig
index 9e85dfa585081..c455be7d4a1c8 100644
--- a/drivers/iio/frequency/Kconfig
+++ b/drivers/iio/frequency/Kconfig
@@ -60,6 +60,16 @@ config ADF4377
To compile this driver as a module, choose M here: the
module will be called adf4377.
+config ADMFM2000
+ tristate "Analog Devices ADMFM2000 Dual Microwave Down Converter"
+ depends on GPIOLIB
+ help
+ Say yes here to build support for Analog Devices ADMFM2000 Dual
+ Microwave Down Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called admfm2000.
+
config ADMV1013
tristate "Analog Devices ADMV1013 Microwave Upconverter"
depends on SPI && COMMON_CLK
diff --git a/drivers/iio/frequency/Makefile b/drivers/iio/frequency/Makefile
index b616c29b4a087..70d0e0b70e802 100644
--- a/drivers/iio/frequency/Makefile
+++ b/drivers/iio/frequency/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_AD9523) += ad9523.o
obj-$(CONFIG_ADF4350) += adf4350.o
obj-$(CONFIG_ADF4371) += adf4371.o
obj-$(CONFIG_ADF4377) += adf4377.o
+obj-$(CONFIG_ADMFM2000) += admfm2000.o
obj-$(CONFIG_ADMV1013) += admv1013.o
obj-$(CONFIG_ADMV1014) += admv1014.o
obj-$(CONFIG_ADMV4420) += admv4420.o
diff --git a/drivers/iio/frequency/admfm2000.c b/drivers/iio/frequency/admfm2000.c
new file mode 100644
index 0000000000000..c34d79e55a7c5
--- /dev/null
+++ b/drivers/iio/frequency/admfm2000.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ADMFM2000 Dual Microwave Down Converter
+ *
+ * Copyright 2024 Analog Devices Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#define ADMFM2000_MIXER_MODE 0
+#define ADMFM2000_DIRECT_IF_MODE 1
+#define ADMFM2000_DSA_GPIOS 5
+#define ADMFM2000_MODE_GPIOS 2
+#define ADMFM2000_MAX_GAIN 0
+#define ADMFM2000_MIN_GAIN -31000
+#define ADMFM2000_DEFAULT_GAIN -0x20
+
+struct admfm2000_state {
+ struct mutex lock; /* protect sensor state */
+ struct gpio_desc *sw1_ch[2];
+ struct gpio_desc *sw2_ch[2];
+ struct gpio_desc *dsa1_gpios[5];
+ struct gpio_desc *dsa2_gpios[5];
+ u32 gain[2];
+};
+
+static int admfm2000_mode(struct iio_dev *indio_dev, u32 chan, u32 mode)
+{
+ struct admfm2000_state *st = iio_priv(indio_dev);
+ int i;
+
+ switch (mode) {
+ case ADMFM2000_MIXER_MODE:
+ for (i = 0; i < ADMFM2000_MODE_GPIOS; i++) {
+ gpiod_set_value_cansleep(st->sw1_ch[i], (chan == 0) ? 1 : 0);
+ gpiod_set_value_cansleep(st->sw2_ch[i], (chan == 0) ? 0 : 1);
+ }
+ return 0;
+ case ADMFM2000_DIRECT_IF_MODE:
+ for (i = 0; i < ADMFM2000_MODE_GPIOS; i++) {
+ gpiod_set_value_cansleep(st->sw1_ch[i], (chan == 0) ? 0 : 1);
+ gpiod_set_value_cansleep(st->sw2_ch[i], (chan == 0) ? 1 : 0);
+ }
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int admfm2000_attenuation(struct iio_dev *indio_dev, u32 chan, u32 value)
+{
+ struct admfm2000_state *st = iio_priv(indio_dev);
+ int i;
+
+ switch (chan) {
+ case 0:
+ for (i = 0; i < ADMFM2000_DSA_GPIOS; i++)
+ gpiod_set_value_cansleep(st->dsa1_gpios[i], value & (1 << i));
+ return 0;
+ case 1:
+ for (i = 0; i < ADMFM2000_DSA_GPIOS; i++)
+ gpiod_set_value_cansleep(st->dsa2_gpios[i], value & (1 << i));
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int admfm2000_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct admfm2000_state *st = iio_priv(indio_dev);
+ int gain;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ mutex_lock(&st->lock);
+ gain = ~(st->gain[chan->channel]) * -1000;
+ *val = gain / 1000;
+ *val2 = (gain % 1000) * 1000;
+ mutex_unlock(&st->lock);
+
+ return IIO_VAL_INT_PLUS_MICRO_DB;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int admfm2000_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct admfm2000_state *st = iio_priv(indio_dev);
+ int gain, ret;
+
+ if (val < 0)
+ gain = (val * 1000) - (val2 / 1000);
+ else
+ gain = (val * 1000) + (val2 / 1000);
+
+ if (gain > ADMFM2000_MAX_GAIN || gain < ADMFM2000_MIN_GAIN)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ mutex_lock(&st->lock);
+ st->gain[chan->channel] = ~((abs(gain) / 1000) & 0x1F);
+
+ ret = admfm2000_attenuation(indio_dev, chan->channel,
+ st->gain[chan->channel]);
+ mutex_unlock(&st->lock);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int admfm2000_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ return IIO_VAL_INT_PLUS_MICRO_DB;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info admfm2000_info = {
+ .read_raw = &admfm2000_read_raw,
+ .write_raw = &admfm2000_write_raw,
+ .write_raw_get_fmt = &admfm2000_write_raw_get_fmt,
+};
+
+#define ADMFM2000_CHAN(_channel) { \
+ .type = IIO_VOLTAGE, \
+ .output = 1, \
+ .indexed = 1, \
+ .channel = _channel, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_HARDWAREGAIN), \
+}
+
+static const struct iio_chan_spec admfm2000_channels[] = {
+ ADMFM2000_CHAN(0),
+ ADMFM2000_CHAN(1),
+};
+
+static int admfm2000_channel_config(struct admfm2000_state *st,
+ struct iio_dev *indio_dev)
+{
+ struct platform_device *pdev = to_platform_device(indio_dev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct fwnode_handle *child;
+ struct gpio_desc **dsa;
+ struct gpio_desc **sw;
+ int ret, i;
+ bool mode;
+ u32 reg;
+
+ device_for_each_child_node(dev, child) {
+ ret = fwnode_property_read_u32(child, "reg", &reg);
+ if (ret) {
+ fwnode_handle_put(child);
+ return dev_err_probe(dev, ret,
+ "Failed to get reg property\n");
+ }
+
+ if (reg >= indio_dev->num_channels) {
+ fwnode_handle_put(child);
+ return dev_err_probe(dev, -EINVAL, "reg bigger than: %d\n",
+ indio_dev->num_channels);
+ }
+
+ if (fwnode_property_present(child, "adi,mixer-mode"))
+ mode = ADMFM2000_MIXER_MODE;
+ else
+ mode = ADMFM2000_DIRECT_IF_MODE;
+
+ switch (reg) {
+ case 0:
+ sw = st->sw1_ch;
+ dsa = st->dsa1_gpios;
+ break;
+ case 1:
+ sw = st->sw2_ch;
+ dsa = st->dsa2_gpios;
+ break;
+ default:
+ fwnode_handle_put(child);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ADMFM2000_MODE_GPIOS; i++) {
+ sw[i] = devm_fwnode_gpiod_get_index(dev, child, "switch",
+ i, GPIOD_OUT_LOW, NULL);
+ if (IS_ERR(sw[i])) {
+ fwnode_handle_put(child);
+ return dev_err_probe(dev, PTR_ERR(sw[i]),
+ "Failed to get gpios\n");
+ }
+ }
+
+ for (i = 0; i < ADMFM2000_DSA_GPIOS; i++) {
+ dsa[i] = devm_fwnode_gpiod_get_index(dev, child,
+ "attenuation", i,
+ GPIOD_OUT_LOW, NULL);
+ if (IS_ERR(dsa[i])) {
+ fwnode_handle_put(child);
+ return dev_err_probe(dev, PTR_ERR(dsa[i]),
+ "Failed to get gpios\n");
+ }
+ }
+
+ ret = admfm2000_mode(indio_dev, reg, mode);
+ if (ret) {
+ fwnode_handle_put(child);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int admfm2000_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct admfm2000_state *st;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ indio_dev->name = "admfm2000";
+ indio_dev->num_channels = ARRAY_SIZE(admfm2000_channels);
+ indio_dev->channels = admfm2000_channels;
+ indio_dev->info = &admfm2000_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ st->gain[0] = ADMFM2000_DEFAULT_GAIN;
+ st->gain[1] = ADMFM2000_DEFAULT_GAIN;
+
+ mutex_init(&st->lock);
+
+ ret = admfm2000_channel_config(st, indio_dev);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct of_device_id admfm2000_of_match[] = {
+ { .compatible = "adi,admfm2000" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, admfm2000_of_match);
+
+static struct platform_driver admfm2000_driver = {
+ .driver = {
+ .name = "admfm2000",
+ .of_match_table = admfm2000_of_match,
+ },
+ .probe = admfm2000_probe,
+};
+module_platform_driver(admfm2000_driver);
+
+MODULE_AUTHOR("Kim Seer Paller <kimseer.paller@analog.com>");
+MODULE_DESCRIPTION("ADMFM2000 Dual Microwave Down Converter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/gyro/bmg160_i2c.c b/drivers/iio/gyro/bmg160_i2c.c
index 2f9675596138b..9c8e20c25e96b 100644
--- a/drivers/iio/gyro/bmg160_i2c.c
+++ b/drivers/iio/gyro/bmg160_i2c.c
@@ -3,7 +3,7 @@
#include <linux/regmap.h>
#include <linux/iio/iio.h>
#include <linux/module.h>
-#include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
#include "bmg160.h"
@@ -66,7 +66,7 @@ MODULE_DEVICE_TABLE(of, bmg160_of_match);
static struct i2c_driver bmg160_i2c_driver = {
.driver = {
.name = "bmg160_i2c",
- .acpi_match_table = ACPI_PTR(bmg160_acpi_match),
+ .acpi_match_table = bmg160_acpi_match,
.of_match_table = bmg160_of_match,
.pm = &bmg160_pm_ops,
},
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index df3bc5c3d3786..1dbe48dae74ee 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -346,6 +346,13 @@ err:
return IRQ_HANDLED;
}
+static void afe4403_regulator_disable(void *data)
+{
+ struct regulator *regulator = data;
+
+ regulator_disable(regulator);
+}
+
#define AFE4403_TIMING_PAIRS \
{ AFE440X_LED2STC, 0x000050 }, \
{ AFE440X_LED2ENDC, 0x0003e7 }, \
@@ -495,19 +502,24 @@ static int afe4403_probe(struct spi_device *spi)
dev_err(afe->dev, "Unable to enable regulator\n");
return ret;
}
+ ret = devm_add_action_or_reset(afe->dev, afe4403_regulator_disable, afe->regulator);
+ if (ret) {
+ dev_err(afe->dev, "Unable to add regulator disable action\n");
+ return ret;
+ }
ret = regmap_write(afe->regmap, AFE440X_CONTROL0,
AFE440X_CONTROL0_SW_RESET);
if (ret) {
dev_err(afe->dev, "Unable to reset device\n");
- goto err_disable_reg;
+ return ret;
}
ret = regmap_multi_reg_write(afe->regmap, afe4403_reg_sequences,
ARRAY_SIZE(afe4403_reg_sequences));
if (ret) {
dev_err(afe->dev, "Unable to set register defaults\n");
- goto err_disable_reg;
+ return ret;
}
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -523,16 +535,15 @@ static int afe4403_probe(struct spi_device *spi)
iio_device_id(indio_dev));
if (!afe->trig) {
dev_err(afe->dev, "Unable to allocate IIO trigger\n");
- ret = -ENOMEM;
- goto err_disable_reg;
+ return -ENOMEM;
}
iio_trigger_set_drvdata(afe->trig, indio_dev);
- ret = iio_trigger_register(afe->trig);
+ ret = devm_iio_trigger_register(afe->dev, afe->trig);
if (ret) {
dev_err(afe->dev, "Unable to register IIO trigger\n");
- goto err_disable_reg;
+ return ret;
}
ret = devm_request_threaded_irq(afe->dev, afe->irq,
@@ -542,52 +553,25 @@ static int afe4403_probe(struct spi_device *spi)
afe->trig);
if (ret) {
dev_err(afe->dev, "Unable to request IRQ\n");
- goto err_trig;
+ return ret;
}
}
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- afe4403_trigger_handler, NULL);
+ ret = devm_iio_triggered_buffer_setup(afe->dev, indio_dev,
+ &iio_pollfunc_store_time,
+ afe4403_trigger_handler, NULL);
if (ret) {
dev_err(afe->dev, "Unable to setup buffer\n");
- goto err_trig;
+ return ret;
}
- ret = iio_device_register(indio_dev);
+ ret = devm_iio_device_register(afe->dev, indio_dev);
if (ret) {
dev_err(afe->dev, "Unable to register IIO device\n");
- goto err_buff;
+ return ret;
}
return 0;
-
-err_buff:
- iio_triggered_buffer_cleanup(indio_dev);
-err_trig:
- if (afe->irq > 0)
- iio_trigger_unregister(afe->trig);
-err_disable_reg:
- regulator_disable(afe->regulator);
-
- return ret;
-}
-
-static void afe4403_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct afe4403_data *afe = iio_priv(indio_dev);
- int ret;
-
- iio_device_unregister(indio_dev);
-
- iio_triggered_buffer_cleanup(indio_dev);
-
- if (afe->irq > 0)
- iio_trigger_unregister(afe->trig);
-
- ret = regulator_disable(afe->regulator);
- if (ret)
- dev_warn(afe->dev, "Unable to disable regulator\n");
}
static const struct spi_device_id afe4403_ids[] = {
@@ -603,7 +587,6 @@ static struct spi_driver afe4403_spi_driver = {
.pm = pm_sleep_ptr(&afe4403_pm_ops),
},
.probe = afe4403_probe,
- .remove = afe4403_remove,
.id_table = afe4403_ids,
};
module_spi_driver(afe4403_spi_driver);
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
index ede1e82013118..7768b07ef7a6f 100644
--- a/drivers/iio/health/afe4404.c
+++ b/drivers/iio/health/afe4404.c
@@ -349,6 +349,13 @@ err:
return IRQ_HANDLED;
}
+static void afe4404_regulator_disable(void *data)
+{
+ struct regulator *regulator = data;
+
+ regulator_disable(regulator);
+}
+
/* Default timings from data-sheet */
#define AFE4404_TIMING_PAIRS \
{ AFE440X_PRPCOUNT, 39999 }, \
@@ -502,19 +509,24 @@ static int afe4404_probe(struct i2c_client *client)
dev_err(afe->dev, "Unable to enable regulator\n");
return ret;
}
+ ret = devm_add_action_or_reset(afe->dev, afe4404_regulator_disable, afe->regulator);
+ if (ret) {
+ dev_err(afe->dev, "Unable to enable regulator\n");
+ return ret;
+ }
ret = regmap_write(afe->regmap, AFE440X_CONTROL0,
AFE440X_CONTROL0_SW_RESET);
if (ret) {
dev_err(afe->dev, "Unable to reset device\n");
- goto disable_reg;
+ return ret;
}
ret = regmap_multi_reg_write(afe->regmap, afe4404_reg_sequences,
ARRAY_SIZE(afe4404_reg_sequences));
if (ret) {
dev_err(afe->dev, "Unable to set register defaults\n");
- goto disable_reg;
+ return ret;
}
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -530,16 +542,15 @@ static int afe4404_probe(struct i2c_client *client)
iio_device_id(indio_dev));
if (!afe->trig) {
dev_err(afe->dev, "Unable to allocate IIO trigger\n");
- ret = -ENOMEM;
- goto disable_reg;
+ return -ENOMEM;
}
iio_trigger_set_drvdata(afe->trig, indio_dev);
- ret = iio_trigger_register(afe->trig);
+ ret = devm_iio_trigger_register(afe->dev, afe->trig);
if (ret) {
dev_err(afe->dev, "Unable to register IIO trigger\n");
- goto disable_reg;
+ return ret;
}
ret = devm_request_threaded_irq(afe->dev, afe->irq,
@@ -549,52 +560,25 @@ static int afe4404_probe(struct i2c_client *client)
afe->trig);
if (ret) {
dev_err(afe->dev, "Unable to request IRQ\n");
- goto disable_reg;
+ return ret;
}
}
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- afe4404_trigger_handler, NULL);
+ ret = devm_iio_triggered_buffer_setup(afe->dev, indio_dev,
+ &iio_pollfunc_store_time,
+ afe4404_trigger_handler, NULL);
if (ret) {
dev_err(afe->dev, "Unable to setup buffer\n");
- goto unregister_trigger;
+ return ret;
}
- ret = iio_device_register(indio_dev);
+ ret = devm_iio_device_register(afe->dev, indio_dev);
if (ret) {
dev_err(afe->dev, "Unable to register IIO device\n");
- goto unregister_triggered_buffer;
+ return ret;
}
return 0;
-
-unregister_triggered_buffer:
- iio_triggered_buffer_cleanup(indio_dev);
-unregister_trigger:
- if (afe->irq > 0)
- iio_trigger_unregister(afe->trig);
-disable_reg:
- regulator_disable(afe->regulator);
-
- return ret;
-}
-
-static void afe4404_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
- struct afe4404_data *afe = iio_priv(indio_dev);
- int ret;
-
- iio_device_unregister(indio_dev);
-
- iio_triggered_buffer_cleanup(indio_dev);
-
- if (afe->irq > 0)
- iio_trigger_unregister(afe->trig);
-
- ret = regulator_disable(afe->regulator);
- if (ret)
- dev_err(afe->dev, "Unable to disable regulator\n");
}
static const struct i2c_device_id afe4404_ids[] = {
@@ -610,7 +594,6 @@ static struct i2c_driver afe4404_i2c_driver = {
.pm = pm_sleep_ptr(&afe4404_pm_ops),
},
.probe = afe4404_probe,
- .remove = afe4404_remove,
.id_table = afe4404_ids,
};
module_i2c_driver(afe4404_i2c_driver);
diff --git a/drivers/iio/humidity/hdc3020.c b/drivers/iio/humidity/hdc3020.c
index ed70415512f68..1e5d0d4797b16 100644
--- a/drivers/iio/humidity/hdc3020.c
+++ b/drivers/iio/humidity/hdc3020.c
@@ -5,41 +5,66 @@
*
* Copyright (C) 2023
*
+ * Copyright (C) 2024 Liebherr-Electronics and Drives GmbH
+ *
* Datasheet: https://www.ti.com/lit/ds/symlink/hdc3020.pdf
*/
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/cleanup.h>
#include <linux/crc8.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/units.h>
#include <asm/unaligned.h>
+#include <linux/iio/events.h>
#include <linux/iio/iio.h>
-#define HDC3020_HEATER_CMD_MSB 0x30 /* shared by all heater commands */
-#define HDC3020_HEATER_ENABLE 0x6D
-#define HDC3020_HEATER_DISABLE 0x66
-#define HDC3020_HEATER_CONFIG 0x6E
+#define HDC3020_S_AUTO_10HZ_MOD0 0x2737
+#define HDC3020_S_STATUS 0x3041
+#define HDC3020_HEATER_DISABLE 0x3066
+#define HDC3020_HEATER_ENABLE 0x306D
+#define HDC3020_HEATER_CONFIG 0x306E
+#define HDC3020_EXIT_AUTO 0x3093
+#define HDC3020_S_T_RH_THRESH_LOW 0x6100
+#define HDC3020_S_T_RH_THRESH_LOW_CLR 0x610B
+#define HDC3020_S_T_RH_THRESH_HIGH_CLR 0x6116
+#define HDC3020_S_T_RH_THRESH_HIGH 0x611D
+#define HDC3020_R_T_RH_AUTO 0xE000
+#define HDC3020_R_T_LOW_AUTO 0xE002
+#define HDC3020_R_T_HIGH_AUTO 0xE003
+#define HDC3020_R_RH_LOW_AUTO 0xE004
+#define HDC3020_R_RH_HIGH_AUTO 0xE005
+#define HDC3020_R_T_RH_THRESH_LOW 0xE102
+#define HDC3020_R_T_RH_THRESH_LOW_CLR 0xE109
+#define HDC3020_R_T_RH_THRESH_HIGH_CLR 0xE114
+#define HDC3020_R_T_RH_THRESH_HIGH 0xE11F
+#define HDC3020_R_STATUS 0xF32D
+
+#define HDC3020_THRESH_TEMP_MASK GENMASK(8, 0)
+#define HDC3020_THRESH_TEMP_TRUNC_SHIFT 7
+#define HDC3020_THRESH_HUM_MASK GENMASK(15, 9)
+#define HDC3020_THRESH_HUM_TRUNC_SHIFT 9
+
+#define HDC3020_STATUS_T_LOW_ALERT BIT(6)
+#define HDC3020_STATUS_T_HIGH_ALERT BIT(7)
+#define HDC3020_STATUS_RH_LOW_ALERT BIT(8)
+#define HDC3020_STATUS_RH_HIGH_ALERT BIT(9)
#define HDC3020_READ_RETRY_TIMES 10
#define HDC3020_BUSY_DELAY_MS 10
#define HDC3020_CRC8_POLYNOMIAL 0x31
-static const u8 HDC3020_S_AUTO_10HZ_MOD0[2] = { 0x27, 0x37 };
-
-static const u8 HDC3020_EXIT_AUTO[2] = { 0x30, 0x93 };
-
-static const u8 HDC3020_R_T_RH_AUTO[2] = { 0xE0, 0x00 };
-static const u8 HDC3020_R_T_LOW_AUTO[2] = { 0xE0, 0x02 };
-static const u8 HDC3020_R_T_HIGH_AUTO[2] = { 0xE0, 0x03 };
-static const u8 HDC3020_R_RH_LOW_AUTO[2] = { 0xE0, 0x04 };
-static const u8 HDC3020_R_RH_HIGH_AUTO[2] = { 0xE0, 0x05 };
+#define HDC3020_MIN_TEMP -40
+#define HDC3020_MAX_TEMP 125
struct hdc3020_data {
struct i2c_client *client;
@@ -54,18 +79,37 @@ struct hdc3020_data {
static const int hdc3020_heater_vals[] = {0, 1, 0x3FFF};
+static const struct iio_event_spec hdc3020_t_rh_event[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_HYSTERESIS),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_HYSTERESIS),
+ },
+};
+
static const struct iio_chan_spec hdc3020_channels[] = {
{
.type = IIO_TEMP,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_PEAK) |
BIT(IIO_CHAN_INFO_TROUGH) | BIT(IIO_CHAN_INFO_OFFSET),
+ .event_spec = hdc3020_t_rh_event,
+ .num_event_specs = ARRAY_SIZE(hdc3020_t_rh_event),
},
{
.type = IIO_HUMIDITYRELATIVE,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_PEAK) |
BIT(IIO_CHAN_INFO_TROUGH),
+ .event_spec = hdc3020_t_rh_event,
+ .num_event_specs = ARRAY_SIZE(hdc3020_t_rh_event),
},
{
/*
@@ -82,7 +126,7 @@ static const struct iio_chan_spec hdc3020_channels[] = {
DECLARE_CRC8_TABLE(hdc3020_crc8_table);
-static int hdc3020_write_bytes(struct hdc3020_data *data, const u8 *buf, u8 len)
+static int hdc3020_write_bytes(struct hdc3020_data *data, u8 *buf, u8 len)
{
struct i2c_client *client = data->client;
struct i2c_msg msg;
@@ -90,7 +134,7 @@ static int hdc3020_write_bytes(struct hdc3020_data *data, const u8 *buf, u8 len)
msg.addr = client->addr;
msg.flags = 0;
- msg.buf = (char *)buf;
+ msg.buf = buf;
msg.len = len;
/*
@@ -109,26 +153,28 @@ static int hdc3020_write_bytes(struct hdc3020_data *data, const u8 *buf, u8 len)
return -ETIMEDOUT;
}
-static int hdc3020_read_bytes(struct hdc3020_data *data, const u8 *buf,
- void *val, int len)
+static
+int hdc3020_read_bytes(struct hdc3020_data *data, u16 reg, u8 *buf, int len)
{
+ u8 reg_buf[2];
int ret, cnt;
struct i2c_client *client = data->client;
struct i2c_msg msg[2] = {
[0] = {
.addr = client->addr,
.flags = 0,
- .buf = (char *)buf,
+ .buf = reg_buf,
.len = 2,
},
[1] = {
.addr = client->addr,
.flags = I2C_M_RD,
- .buf = val,
+ .buf = buf,
.len = len,
},
};
+ put_unaligned_be16(reg, reg_buf);
/*
* During the measurement process, HDC3020 will not return data.
* So wait for a while and try again
@@ -145,48 +191,12 @@ static int hdc3020_read_bytes(struct hdc3020_data *data, const u8 *buf,
return -ETIMEDOUT;
}
-static int hdc3020_read_measurement(struct hdc3020_data *data,
- enum iio_chan_type type, int *val)
-{
- u8 crc, buf[6];
- int ret;
-
- ret = hdc3020_read_bytes(data, HDC3020_R_T_RH_AUTO, buf, 6);
- if (ret < 0)
- return ret;
-
- /* CRC check of the temperature measurement */
- crc = crc8(hdc3020_crc8_table, buf, 2, CRC8_INIT_VALUE);
- if (crc != buf[2])
- return -EINVAL;
-
- /* CRC check of the relative humidity measurement */
- crc = crc8(hdc3020_crc8_table, buf + 3, 2, CRC8_INIT_VALUE);
- if (crc != buf[5])
- return -EINVAL;
-
- if (type == IIO_TEMP)
- *val = get_unaligned_be16(buf);
- else if (type == IIO_HUMIDITYRELATIVE)
- *val = get_unaligned_be16(&buf[3]);
- else
- return -EINVAL;
-
- return 0;
-}
-
-/*
- * After exiting the automatic measurement mode or resetting, the peak
- * value will be reset to the default value
- * This method is used to get the highest temp measured during automatic
- * measurement
- */
-static int hdc3020_read_high_peak_t(struct hdc3020_data *data, int *val)
+static int hdc3020_read_be16(struct hdc3020_data *data, u16 reg)
{
u8 crc, buf[3];
int ret;
- ret = hdc3020_read_bytes(data, HDC3020_R_T_HIGH_AUTO, buf, 3);
+ ret = hdc3020_read_bytes(data, reg, buf, 3);
if (ret < 0)
return ret;
@@ -194,73 +204,43 @@ static int hdc3020_read_high_peak_t(struct hdc3020_data *data, int *val)
if (crc != buf[2])
return -EINVAL;
- *val = get_unaligned_be16(buf);
-
- return 0;
+ return get_unaligned_be16(buf);
}
-/*
- * This method is used to get the lowest temp measured during automatic
- * measurement
- */
-static int hdc3020_read_low_peak_t(struct hdc3020_data *data, int *val)
+static int hdc3020_exec_cmd(struct hdc3020_data *data, u16 reg)
{
- u8 crc, buf[3];
- int ret;
+ u8 reg_buf[2];
- ret = hdc3020_read_bytes(data, HDC3020_R_T_LOW_AUTO, buf, 3);
- if (ret < 0)
- return ret;
-
- crc = crc8(hdc3020_crc8_table, buf, 2, CRC8_INIT_VALUE);
- if (crc != buf[2])
- return -EINVAL;
-
- *val = get_unaligned_be16(buf);
-
- return 0;
+ put_unaligned_be16(reg, reg_buf);
+ return hdc3020_write_bytes(data, reg_buf, 2);
}
-/*
- * This method is used to get the highest humidity measured during automatic
- * measurement
- */
-static int hdc3020_read_high_peak_rh(struct hdc3020_data *data, int *val)
+static int hdc3020_read_measurement(struct hdc3020_data *data,
+ enum iio_chan_type type, int *val)
{
- u8 crc, buf[3];
+ u8 crc, buf[6];
int ret;
- ret = hdc3020_read_bytes(data, HDC3020_R_RH_HIGH_AUTO, buf, 3);
+ ret = hdc3020_read_bytes(data, HDC3020_R_T_RH_AUTO, buf, 6);
if (ret < 0)
return ret;
+ /* CRC check of the temperature measurement */
crc = crc8(hdc3020_crc8_table, buf, 2, CRC8_INIT_VALUE);
if (crc != buf[2])
return -EINVAL;
- *val = get_unaligned_be16(buf);
-
- return 0;
-}
-
-/*
- * This method is used to get the lowest humidity measured during automatic
- * measurement
- */
-static int hdc3020_read_low_peak_rh(struct hdc3020_data *data, int *val)
-{
- u8 crc, buf[3];
- int ret;
-
- ret = hdc3020_read_bytes(data, HDC3020_R_RH_LOW_AUTO, buf, 3);
- if (ret < 0)
- return ret;
-
- crc = crc8(hdc3020_crc8_table, buf, 2, CRC8_INIT_VALUE);
- if (crc != buf[2])
+ /* CRC check of the relative humidity measurement */
+ crc = crc8(hdc3020_crc8_table, buf + 3, 2, CRC8_INIT_VALUE);
+ if (crc != buf[5])
return -EINVAL;
- *val = get_unaligned_be16(buf);
+ if (type == IIO_TEMP)
+ *val = get_unaligned_be16(buf);
+ else if (type == IIO_HUMIDITYRELATIVE)
+ *val = get_unaligned_be16(&buf[3]);
+ else
+ return -EINVAL;
return 0;
}
@@ -286,28 +266,28 @@ static int hdc3020_read_raw(struct iio_dev *indio_dev,
}
case IIO_CHAN_INFO_PEAK: {
guard(mutex)(&data->lock);
- if (chan->type == IIO_TEMP) {
- ret = hdc3020_read_high_peak_t(data, val);
- if (ret < 0)
- return ret;
- } else {
- ret = hdc3020_read_high_peak_rh(data, val);
- if (ret < 0)
- return ret;
- }
+ if (chan->type == IIO_TEMP)
+ ret = hdc3020_read_be16(data, HDC3020_R_T_HIGH_AUTO);
+ else
+ ret = hdc3020_read_be16(data, HDC3020_R_RH_HIGH_AUTO);
+
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
return IIO_VAL_INT;
}
case IIO_CHAN_INFO_TROUGH: {
guard(mutex)(&data->lock);
- if (chan->type == IIO_TEMP) {
- ret = hdc3020_read_low_peak_t(data, val);
- if (ret < 0)
- return ret;
- } else {
- ret = hdc3020_read_low_peak_rh(data, val);
- if (ret < 0)
- return ret;
- }
+ if (chan->type == IIO_TEMP)
+ ret = hdc3020_read_be16(data, HDC3020_R_T_LOW_AUTO);
+ else
+ ret = hdc3020_read_be16(data, HDC3020_R_RH_LOW_AUTO);
+
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
return IIO_VAL_INT;
}
case IIO_CHAN_INFO_SCALE:
@@ -352,23 +332,17 @@ static int hdc3020_update_heater(struct hdc3020_data *data, int val)
if (val < hdc3020_heater_vals[0] || val > hdc3020_heater_vals[2])
return -EINVAL;
- buf[0] = HDC3020_HEATER_CMD_MSB;
+ if (!val)
+ hdc3020_exec_cmd(data, HDC3020_HEATER_DISABLE);
- if (!val) {
- buf[1] = HDC3020_HEATER_DISABLE;
- return hdc3020_write_bytes(data, buf, 2);
- }
-
- buf[1] = HDC3020_HEATER_CONFIG;
+ put_unaligned_be16(HDC3020_HEATER_CONFIG, buf);
put_unaligned_be16(val & GENMASK(13, 0), &buf[2]);
buf[4] = crc8(hdc3020_crc8_table, buf + 2, 2, CRC8_INIT_VALUE);
ret = hdc3020_write_bytes(data, buf, 5);
if (ret < 0)
return ret;
- buf[1] = HDC3020_HEATER_ENABLE;
-
- return hdc3020_write_bytes(data, buf, 2);
+ return hdc3020_exec_cmd(data, HDC3020_HEATER_ENABLE);
}
static int hdc3020_write_raw(struct iio_dev *indio_dev,
@@ -389,15 +363,197 @@ static int hdc3020_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
+static int hdc3020_write_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct hdc3020_data *data = iio_priv(indio_dev);
+ u8 buf[5];
+ u64 tmp;
+ u16 reg;
+ int ret;
+
+ /* Supported temperature range is from –40 to 125 degree celsius */
+ if (val < HDC3020_MIN_TEMP || val > HDC3020_MAX_TEMP)
+ return -EINVAL;
+
+ /* Select threshold register */
+ if (info == IIO_EV_INFO_VALUE) {
+ if (dir == IIO_EV_DIR_RISING)
+ reg = HDC3020_S_T_RH_THRESH_HIGH;
+ else
+ reg = HDC3020_S_T_RH_THRESH_LOW;
+ } else {
+ if (dir == IIO_EV_DIR_RISING)
+ reg = HDC3020_S_T_RH_THRESH_HIGH_CLR;
+ else
+ reg = HDC3020_S_T_RH_THRESH_LOW_CLR;
+ }
+
+ guard(mutex)(&data->lock);
+ ret = hdc3020_read_be16(data, reg);
+ if (ret < 0)
+ return ret;
+
+ switch (chan->type) {
+ case IIO_TEMP:
+ /*
+ * Calculate temperature threshold, shift it down to get the
+ * truncated threshold representation in the 9LSBs while keeping
+ * the current humidity threshold in the 7 MSBs.
+ */
+ tmp = ((u64)(((val + 45) * MICRO) + val2)) * 65535ULL;
+ tmp = div_u64(tmp, MICRO * 175);
+ val = tmp >> HDC3020_THRESH_TEMP_TRUNC_SHIFT;
+ val = FIELD_PREP(HDC3020_THRESH_TEMP_MASK, val);
+ val |= (FIELD_GET(HDC3020_THRESH_HUM_MASK, ret) <<
+ HDC3020_THRESH_HUM_TRUNC_SHIFT);
+ break;
+ case IIO_HUMIDITYRELATIVE:
+ /*
+ * Calculate humidity threshold, shift it down and up to get the
+ * truncated threshold representation in the 7MSBs while keeping
+ * the current temperature threshold in the 9 LSBs.
+ */
+ tmp = ((u64)((val * MICRO) + val2)) * 65535ULL;
+ tmp = div_u64(tmp, MICRO * 100);
+ val = tmp >> HDC3020_THRESH_HUM_TRUNC_SHIFT;
+ val = FIELD_PREP(HDC3020_THRESH_HUM_MASK, val);
+ val |= FIELD_GET(HDC3020_THRESH_TEMP_MASK, ret);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_be16(val, buf + 2);
+ buf[4] = crc8(hdc3020_crc8_table, buf + 2, 2, CRC8_INIT_VALUE);
+ return hdc3020_write_bytes(data, buf, 5);
+}
+
+static int hdc3020_read_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct hdc3020_data *data = iio_priv(indio_dev);
+ u16 reg;
+ int ret;
+
+ /* Select threshold register */
+ if (info == IIO_EV_INFO_VALUE) {
+ if (dir == IIO_EV_DIR_RISING)
+ reg = HDC3020_R_T_RH_THRESH_HIGH;
+ else
+ reg = HDC3020_R_T_RH_THRESH_LOW;
+ } else {
+ if (dir == IIO_EV_DIR_RISING)
+ reg = HDC3020_R_T_RH_THRESH_HIGH_CLR;
+ else
+ reg = HDC3020_R_T_RH_THRESH_LOW_CLR;
+ }
+
+ guard(mutex)(&data->lock);
+ ret = hdc3020_read_be16(data, reg);
+ if (ret < 0)
+ return ret;
+
+ switch (chan->type) {
+ case IIO_TEMP:
+ /*
+ * Get the temperature threshold from 9 LSBs, shift them to get
+ * the truncated temperature threshold representation and
+ * calculate the threshold according to the formula in the
+ * datasheet.
+ */
+ *val = FIELD_GET(HDC3020_THRESH_TEMP_MASK, ret);
+ *val = *val << HDC3020_THRESH_TEMP_TRUNC_SHIFT;
+ *val = -2949075 + (175 * (*val));
+ *val2 = 65535;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_HUMIDITYRELATIVE:
+ /*
+ * Get the humidity threshold from 7 MSBs, shift them to get the
+ * truncated humidity threshold representation and calculate the
+ * threshold according to the formula in the datasheet.
+ */
+ *val = FIELD_GET(HDC3020_THRESH_HUM_MASK, ret);
+ *val = (*val << HDC3020_THRESH_HUM_TRUNC_SHIFT) * 100;
+ *val2 = 65535;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static irqreturn_t hdc3020_interrupt_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct hdc3020_data *data;
+ s64 time;
+ int ret;
+
+ data = iio_priv(indio_dev);
+ ret = hdc3020_read_be16(data, HDC3020_R_STATUS);
+ if (ret < 0)
+ return IRQ_HANDLED;
+
+ if (!(ret & (HDC3020_STATUS_T_HIGH_ALERT | HDC3020_STATUS_T_LOW_ALERT |
+ HDC3020_STATUS_RH_HIGH_ALERT | HDC3020_STATUS_RH_LOW_ALERT)))
+ return IRQ_NONE;
+
+ time = iio_get_time_ns(indio_dev);
+ if (ret & HDC3020_STATUS_T_HIGH_ALERT)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_TEMP, 0,
+ IIO_NO_MOD,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ time);
+
+ if (ret & HDC3020_STATUS_T_LOW_ALERT)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_TEMP, 0,
+ IIO_NO_MOD,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ time);
+
+ if (ret & HDC3020_STATUS_RH_HIGH_ALERT)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_HUMIDITYRELATIVE, 0,
+ IIO_NO_MOD,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ time);
+
+ if (ret & HDC3020_STATUS_RH_LOW_ALERT)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_HUMIDITYRELATIVE, 0,
+ IIO_NO_MOD,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ time);
+
+ return IRQ_HANDLED;
+}
+
static const struct iio_info hdc3020_info = {
.read_raw = hdc3020_read_raw,
.write_raw = hdc3020_write_raw,
.read_avail = hdc3020_read_available,
+ .read_event_value = hdc3020_read_thresh,
+ .write_event_value = hdc3020_write_thresh,
};
static void hdc3020_stop(void *data)
{
- hdc3020_write_bytes((struct hdc3020_data *)data, HDC3020_EXIT_AUTO, 2);
+ hdc3020_exec_cmd((struct hdc3020_data *)data, HDC3020_EXIT_AUTO);
}
static int hdc3020_probe(struct i2c_client *client)
@@ -424,8 +580,25 @@ static int hdc3020_probe(struct i2c_client *client)
indio_dev->info = &hdc3020_info;
indio_dev->channels = hdc3020_channels;
indio_dev->num_channels = ARRAY_SIZE(hdc3020_channels);
+ if (client->irq) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, hdc3020_interrupt_handler,
+ IRQF_ONESHOT, "hdc3020",
+ indio_dev);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to request IRQ\n");
+
+ /*
+ * The alert output is activated by default upon power up,
+ * hardware reset, and soft reset. Clear the status register.
+ */
+ ret = hdc3020_exec_cmd(data, HDC3020_S_STATUS);
+ if (ret)
+ return ret;
+ }
- ret = hdc3020_write_bytes(data, HDC3020_S_AUTO_10HZ_MOD0, 2);
+ ret = hdc3020_exec_cmd(data, HDC3020_S_AUTO_10HZ_MOD0);
if (ret)
return dev_err_probe(&client->dev, ret,
"Unable to set up measurement\n");
diff --git a/drivers/iio/humidity/hts221_i2c.c b/drivers/iio/humidity/hts221_i2c.c
index 30f2068ea1566..5cb263e0ef5ac 100644
--- a/drivers/iio/humidity/hts221_i2c.c
+++ b/drivers/iio/humidity/hts221_i2c.c
@@ -9,7 +9,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/regmap.h>
@@ -63,7 +63,7 @@ static struct i2c_driver hts221_driver = {
.name = "hts221_i2c",
.pm = pm_sleep_ptr(&hts221_pm_ops),
.of_match_table = hts221_i2c_of_match,
- .acpi_match_table = ACPI_PTR(hts221_acpi_match),
+ .acpi_match_table = hts221_acpi_match,
},
.probe = hts221_i2c_probe,
.id_table = hts221_i2c_id_table,
diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c
index 64be656f0b805..01f55cc902faa 100644
--- a/drivers/iio/imu/adis16475.c
+++ b/drivers/iio/imu/adis16475.c
@@ -1363,22 +1363,16 @@ static int adis16475_config_sync_mode(struct adis16475 *st)
static int adis16475_config_irq_pin(struct adis16475 *st)
{
int ret;
- struct irq_data *desc;
u32 irq_type;
u16 val = 0;
u8 polarity;
struct spi_device *spi = st->adis.spi;
- desc = irq_get_irq_data(spi->irq);
- if (!desc) {
- dev_err(&spi->dev, "Could not find IRQ %d\n", spi->irq);
- return -EINVAL;
- }
/*
* It is possible to configure the data ready polarity. Furthermore, we
* need to update the adis struct if we want data ready as active low.
*/
- irq_type = irqd_get_trigger_type(desc);
+ irq_type = irq_get_trigger_type(spi->irq);
if (irq_type == IRQ_TYPE_EDGE_RISING) {
polarity = 1;
st->adis.irq_flag = IRQF_TRIGGER_RISING;
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index fe520194a8371..b40a55bba30c1 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -1246,18 +1246,11 @@ static int adis16480_config_irq_pin(struct adis16480 *st)
{
struct device *dev = &st->adis.spi->dev;
struct fwnode_handle *fwnode = dev_fwnode(dev);
- struct irq_data *desc;
enum adis16480_int_pin pin;
unsigned int irq_type;
uint16_t val;
int i, irq = 0;
- desc = irq_get_irq_data(st->adis.spi->irq);
- if (!desc) {
- dev_err(dev, "Could not find IRQ %d\n", irq);
- return -EINVAL;
- }
-
/* Disable data ready since the default after reset is on */
val = ADIS16480_DRDY_EN(0);
@@ -1285,7 +1278,7 @@ static int adis16480_config_irq_pin(struct adis16480 *st)
* configured as positive or negative, corresponding to
* IRQ_TYPE_EDGE_RISING or IRQ_TYPE_EDGE_FALLING respectively.
*/
- irq_type = irqd_get_trigger_type(desc);
+ irq_type = irq_get_trigger_type(st->adis.spi->irq);
if (irq_type == IRQ_TYPE_EDGE_RISING) { /* Default */
val |= ADIS16480_DRDY_POL(1);
} else if (irq_type == IRQ_TYPE_EDGE_FALLING) {
diff --git a/drivers/iio/imu/bmi160/bmi160_i2c.c b/drivers/iio/imu/bmi160/bmi160_i2c.c
index 81652c08e6441..a081305254dbb 100644
--- a/drivers/iio/imu/bmi160/bmi160_i2c.c
+++ b/drivers/iio/imu/bmi160/bmi160_i2c.c
@@ -43,6 +43,15 @@ static const struct i2c_device_id bmi160_i2c_id[] = {
MODULE_DEVICE_TABLE(i2c, bmi160_i2c_id);
static const struct acpi_device_id bmi160_acpi_match[] = {
+ /*
+ * FIRMWARE BUG WORKAROUND
+ * Some manufacturers like GPD, Lenovo or Aya used the incorrect
+ * ID "10EC5280" for bmi160 in their DSDT. A fixed firmware is not
+ * available as of Feb 2024 after trying to work with OEMs, and
+ * this is not expected to change anymore since at least some of
+ * the affected devices are from 2021/2022.
+ */
+ {"10EC5280", 0},
{"BMI0160", 0},
{ },
};
diff --git a/drivers/iio/imu/bmi323/bmi323_core.c b/drivers/iio/imu/bmi323/bmi323_core.c
index 183af482828f8..5d42ab9b176a3 100644
--- a/drivers/iio/imu/bmi323/bmi323_core.c
+++ b/drivers/iio/imu/bmi323/bmi323_core.c
@@ -1668,52 +1668,41 @@ static int bmi323_write_raw(struct iio_dev *indio_dev,
int val2, long mask)
{
struct bmi323_data *data = iio_priv(indio_dev);
- int ret;
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- ret = bmi323_set_odr(data, bmi323_iio_to_sensor(chan->type),
- val, val2);
- iio_device_release_direct_mode(indio_dev);
- return ret;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
+ return bmi323_set_odr(data,
+ bmi323_iio_to_sensor(chan->type),
+ val, val2);
+ unreachable();
case IIO_CHAN_INFO_SCALE:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- ret = bmi323_set_scale(data, bmi323_iio_to_sensor(chan->type),
- val, val2);
- iio_device_release_direct_mode(indio_dev);
- return ret;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
+ return bmi323_set_scale(data,
+ bmi323_iio_to_sensor(chan->type),
+ val, val2);
+ unreachable();
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- ret = bmi323_set_average(data, bmi323_iio_to_sensor(chan->type),
- val);
-
- iio_device_release_direct_mode(indio_dev);
- return ret;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
+ return bmi323_set_average(data,
+ bmi323_iio_to_sensor(chan->type),
+ val);
+ unreachable();
case IIO_CHAN_INFO_ENABLE:
return bmi323_enable_steps(data, val);
- case IIO_CHAN_INFO_PROCESSED:
- scoped_guard(mutex, &data->mutex) {
- if (val || !FIELD_GET(BMI323_FEAT_IO0_STP_CNT_MSK,
- data->feature_events))
- return -EINVAL;
+ case IIO_CHAN_INFO_PROCESSED: {
+ guard(mutex)(&data->mutex);
- /* Clear step counter value */
- ret = bmi323_update_ext_reg(data, BMI323_STEP_SC1_REG,
- BMI323_STEP_SC1_RST_CNT_MSK,
- FIELD_PREP(BMI323_STEP_SC1_RST_CNT_MSK,
- 1));
- }
- return ret;
+ if (val || !FIELD_GET(BMI323_FEAT_IO0_STP_CNT_MSK,
+ data->feature_events))
+ return -EINVAL;
+
+ /* Clear step counter value */
+ return bmi323_update_ext_reg(data, BMI323_STEP_SC1_REG,
+ BMI323_STEP_SC1_RST_CNT_MSK,
+ FIELD_PREP(BMI323_STEP_SC1_RST_CNT_MSK,
+ 1));
+ }
default:
return -EINVAL;
}
@@ -1724,7 +1713,6 @@ static int bmi323_read_raw(struct iio_dev *indio_dev,
int *val2, long mask)
{
struct bmi323_data *data = iio_priv(indio_dev);
- int ret;
switch (mask) {
case IIO_CHAN_INFO_PROCESSED:
@@ -1733,14 +1721,10 @@ static int bmi323_read_raw(struct iio_dev *indio_dev,
switch (chan->type) {
case IIO_ACCEL:
case IIO_ANGL_VEL:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- ret = bmi323_read_axis(data, chan, val);
-
- iio_device_release_direct_mode(indio_dev);
- return ret;
+ iio_device_claim_direct_scoped(return -EBUSY,
+ indio_dev)
+ return bmi323_read_axis(data, chan, val);
+ unreachable();
case IIO_TEMP:
return bmi323_get_temp_data(data, val);
default:
diff --git a/drivers/iio/imu/bmi323/bmi323_i2c.c b/drivers/iio/imu/bmi323/bmi323_i2c.c
index 20a8001b9956a..52140bf057658 100644
--- a/drivers/iio/imu/bmi323/bmi323_i2c.c
+++ b/drivers/iio/imu/bmi323/bmi323_i2c.c
@@ -93,6 +93,26 @@ static int bmi323_i2c_probe(struct i2c_client *i2c)
return bmi323_core_probe(dev);
}
+static const struct acpi_device_id bmi323_acpi_match[] = {
+ /*
+ * The "BOSC0200" identifier used here is not unique to bmi323 devices.
+ * The same "BOSC0200" identifier is found in the ACPI tables of devices
+ * using the bmc150 chip. This creates a conflict with duplicate ACPI
+ * identifiers which multiple drivers want to use. If a non-bmi323
+ * device starts to load with this "BOSC0200" ACPI match here, then the
+ * chip ID check portion should fail because the chip IDs received (via
+ * i2c) are unique between bmc150 and bmi323 and the driver should
+ * relinquish the device. If and when a different driver (such as
+ * bmc150) starts to load with the "BOSC0200" ACPI match, a short reset
+ * should ensure that the device is not in a bad state during that
+ * driver initialization. This device reset does occur in both the
+ * bmi323 and bmc150 init sequences.
+ */
+ { "BOSC0200" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, bmi323_acpi_match);
+
static const struct i2c_device_id bmi323_i2c_ids[] = {
{ "bmi323" },
{ }
@@ -109,6 +129,7 @@ static struct i2c_driver bmi323_i2c_driver = {
.driver = {
.name = "bmi323",
.of_match_table = bmi323_of_i2c_match,
+ .acpi_match_table = bmi323_acpi_match,
},
.probe = bmi323_i2c_probe,
.id_table = bmi323_i2c_ids,
diff --git a/drivers/iio/imu/bno055/bno055_ser_core.c b/drivers/iio/imu/bno055/bno055_ser_core.c
index 5677bdf4f846e..694ff14a3aa27 100644
--- a/drivers/iio/imu/bno055/bno055_ser_core.c
+++ b/drivers/iio/imu/bno055/bno055_ser_core.c
@@ -378,8 +378,8 @@ static void bno055_ser_handle_rx(struct bno055_ser_priv *priv, int status)
* Also, we assume to RX one pkt per time (i.e. the HW doesn't send anything
* unless we require to AND we don't queue more than one request per time).
*/
-static ssize_t bno055_ser_receive_buf(struct serdev_device *serdev,
- const u8 *buf, size_t size)
+static size_t bno055_ser_receive_buf(struct serdev_device *serdev,
+ const u8 *buf, size_t size)
{
int status;
struct bno055_ser_priv *priv = serdev_device_get_drvdata(serdev);
diff --git a/drivers/iio/imu/fxos8700_i2c.c b/drivers/iio/imu/fxos8700_i2c.c
index 2ace306d0f9ab..e99677ad96a2f 100644
--- a/drivers/iio/imu/fxos8700_i2c.c
+++ b/drivers/iio/imu/fxos8700_i2c.c
@@ -10,7 +10,6 @@
* 1 | 0 | 0x1C
* 1 | 1 | 0x1F
*/
-#include <linux/acpi.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
@@ -57,7 +56,7 @@ MODULE_DEVICE_TABLE(of, fxos8700_of_match);
static struct i2c_driver fxos8700_i2c_driver = {
.driver = {
.name = "fxos8700_i2c",
- .acpi_match_table = ACPI_PTR(fxos8700_acpi_match),
+ .acpi_match_table = fxos8700_acpi_match,
.of_match_table = fxos8700_of_match,
},
.probe = fxos8700_i2c_probe,
diff --git a/drivers/iio/imu/fxos8700_spi.c b/drivers/iio/imu/fxos8700_spi.c
index 27e694cce173e..6b0dc7a776b9e 100644
--- a/drivers/iio/imu/fxos8700_spi.c
+++ b/drivers/iio/imu/fxos8700_spi.c
@@ -2,7 +2,6 @@
/*
* FXOS8700 - NXP IMU, SPI bits
*/
-#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/regmap.h>
@@ -46,7 +45,7 @@ static struct spi_driver fxos8700_spi_driver = {
.probe = fxos8700_spi_probe,
.id_table = fxos8700_spi_id,
.driver = {
- .acpi_match_table = ACPI_PTR(fxos8700_acpi_match),
+ .acpi_match_table = fxos8700_acpi_match,
.of_match_table = fxos8700_of_match,
.name = "fxos8700_spi",
},
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
index 958167b31241e..7d3e061f30463 100644
--- a/drivers/iio/imu/kmx61.c
+++ b/drivers/iio/imu/kmx61.c
@@ -1514,7 +1514,7 @@ MODULE_DEVICE_TABLE(i2c, kmx61_id);
static struct i2c_driver kmx61_driver = {
.driver = {
.name = KMX61_DRV_NAME,
- .acpi_match_table = ACPI_PTR(kmx61_acpi_match),
+ .acpi_match_table = kmx61_acpi_match,
.pm = pm_ptr(&kmx61_pm_ops),
},
.probe = kmx61_probe,
diff --git a/drivers/iio/imu/st_lsm6dsx/Kconfig b/drivers/iio/imu/st_lsm6dsx/Kconfig
index 5865a295a4df3..89d687ec3099c 100644
--- a/drivers/iio/imu/st_lsm6dsx/Kconfig
+++ b/drivers/iio/imu/st_lsm6dsx/Kconfig
@@ -11,11 +11,32 @@ config IIO_ST_LSM6DSX
select IIO_ST_LSM6DSX_I3C if (I3C)
help
Say yes here to build support for STMicroelectronics LSM6DSx imu
- sensor. Supported devices: lsm6ds3, lsm6ds3h, lsm6dsl, lsm6dsm,
- ism330dlc, lsm6dso, lsm6dsox, asm330lhh, asm330lhhx, lsm6dsr,
- lsm6ds3tr-c, ism330dhcx, lsm6dsrx, lsm6ds0, lsm6dsop, lsm6dstx,
- lsm6dsv, lsm6dsv16x, lsm6dso16is, ism330is, asm330lhb, lsm6dst
- and the accelerometer/gyroscope of lsm9ds1.
+ sensor.
+ Supported devices:
+ - asm330lhb
+ - asm330lhh
+ - asm330lhhx
+ - asm330lhhxg1
+ - ism330dhcx
+ - ism330dlc
+ - ism330is
+ - lsm6ds0
+ - lsm6ds3
+ - lsm6ds3h
+ - lsm6ds3tr-c
+ - lsm6dsl
+ - lsm6dsm
+ - lsm6dso
+ - lsm6dso16is
+ - lsm6dsop
+ - lsm6dsox
+ - lsm6dsr
+ - lsm6dsrx
+ - lsm6dst
+ - lsm6dstx
+ - lsm6dsv
+ - lsm6dsv16x
+ - lsm9ds1
To compile this driver as a module, choose M here: the module
will be called st_lsm6dsx.
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index c19237717e812..a3b93566533bc 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -38,6 +38,7 @@
#define ST_LSM6DSO16IS_DEV_NAME "lsm6dso16is"
#define ST_ISM330IS_DEV_NAME "ism330is"
#define ST_ASM330LHB_DEV_NAME "asm330lhb"
+#define ST_ASM330LHHXG1_DEV_NAME "asm330lhhxg1"
enum st_lsm6dsx_hw_id {
ST_LSM6DS3_ID = 1,
@@ -63,6 +64,7 @@ enum st_lsm6dsx_hw_id {
ST_LSM6DSO16IS_ID,
ST_ISM330IS_ID,
ST_ASM330LHB_ID,
+ ST_ASM330LHHXG1_ID,
ST_LSM6DSX_MAX_ID,
};
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index 066fe561c5e88..0a7cd8c1aa331 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -2,7 +2,7 @@
/*
* STMicroelectronics st_lsm6dsx FIFO buffer library driver
*
- * LSM6DS3/LSM6DS3H/LSM6DSL/LSM6DSM/ISM330DLC/LSM6DS3TR-C:
+ * Pattern FIFO:
* The FIFO buffer can be configured to store data from gyroscope and
* accelerometer. Samples are queued without any tag according to a
* specific pattern based on 'FIFO data sets' (6 bytes each):
@@ -14,12 +14,34 @@
* (e.g. Gx, Gy, Gz, Ax, Ay, Az), then data are repeated depending on the
* value of the decimation factor and ODR set for each FIFO data set.
*
- * LSM6DSO/LSM6DSOX/ASM330LHH/ASM330LHHX/LSM6DSR/LSM6DSRX/ISM330DHCX/
- * LSM6DST/LSM6DSOP/LSM6DSTX/LSM6DSV/ASM330LHB:
+ * Supported devices:
+ * - ISM330DLC
+ * - LSM6DS3
+ * - LSM6DS3H
+ * - LSM6DS3TR-C
+ * - LSM6DSL
+ * - LSM6DSM
+ *
+ * Tagged FIFO:
* The FIFO buffer can be configured to store data from gyroscope and
* accelerometer. Each sample is queued with a tag (1B) indicating data
* source (gyroscope, accelerometer, hw timer).
*
+ * Supported devices:
+ * - ASM330LHB
+ * - ASM330LHH
+ * - ASM330LHHX
+ * - ASM330LHHXG1
+ * - ISM330DHCX
+ * - LSM6DSO
+ * - LSM6DSOP
+ * - LSM6DSOX
+ * - LSM6DSR
+ * - LSM6DSRX
+ * - LSM6DST
+ * - LSM6DSTX
+ * - LSM6DSV
+ *
* FIFO supported modes:
* - BYPASS: FIFO disabled
* - CONTINUOUS: FIFO enabled. When the buffer is full, the FIFO index
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index b6e6b1df8a618..0716986f98129 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -14,34 +14,51 @@
* by a different driver.
*
* Supported sensors:
- * - LSM6DS3:
+ *
+ * - LSM6DS3
* - Accelerometer/Gyroscope supported ODR [Hz]: 12.5, 26, 52, 104, 208, 416
* - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
* - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
* - FIFO size: 8KB
*
- * - LSM6DS3H/LSM6DSL/LSM6DSM/ISM330DLC/LSM6DS3TR-C:
+ * - ISM330DLC
+ * - LSM6DS3H
+ * - LSM6DS3TR-C
+ * - LSM6DSL
+ * - LSM6DSM
* - Accelerometer/Gyroscope supported ODR [Hz]: 12.5, 26, 52, 104, 208, 416
* - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
* - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
* - FIFO size: 4KB
*
- * - LSM6DSO/LSM6DSOX/ASM330LHH/ASM330LHHX/LSM6DSR/ISM330DHCX/LSM6DST/LSM6DSOP/
- * LSM6DSTX/LSM6DSO16IS/ISM330IS:
+ * - ASM330LHH
+ * - ASM330LHHX
+ * - ASM330LHHXG1
+ * - ISM330DHCX
+ * - ISM330IS
+ * - LSM6DSO
+ * - LSM6DSO16IS
+ * - LSM6DSOP
+ * - LSM6DSOX
+ * - LSM6DSR
+ * - LSM6DST
+ * - LSM6DSTX
* - Accelerometer/Gyroscope supported ODR [Hz]: 12.5, 26, 52, 104, 208, 416,
* 833
* - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
* - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
* - FIFO size: 3KB
*
- * - LSM6DSV/LSM6DSV16X:
+ * - LSM6DSV
+ * - LSM6DSV16X
* - Accelerometer/Gyroscope supported ODR [Hz]: 7.5, 15, 30, 60, 120, 240,
* 480, 960
* - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
* - Gyroscope supported full-scale [dps]: +-125/+-250/+-500/+-1000/+-2000
* - FIFO size: 3KB
*
- * - LSM9DS1/LSM6DS0:
+ * - LSM6DS0
+ * - LSM9DS1
* - Accelerometer supported ODR [Hz]: 10, 50, 119, 238, 476, 952
* - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
* - Gyroscope supported ODR [Hz]: 15, 60, 119, 238, 476, 952
@@ -821,6 +838,10 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.name = ST_ASM330LHHX_DEV_NAME,
.wai = 0x6b,
}, {
+ .hw_id = ST_ASM330LHHXG1_ID,
+ .name = ST_ASM330LHHXG1_DEV_NAME,
+ .wai = 0x6b,
+ }, {
.hw_id = ST_LSM6DSTX_ID,
.name = ST_LSM6DSTX_DEV_NAME,
.wai = 0x6d,
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
index 911444ec57c01..cddf41cc0ca97 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
@@ -134,6 +134,10 @@ static const struct of_device_id st_lsm6dsx_i2c_of_match[] = {
.compatible = "st,asm330lhb",
.data = (void *)ST_ASM330LHB_ID,
},
+ {
+ .compatible = "st,asm330lhhxg1",
+ .data = (void *)ST_ASM330LHHXG1_ID,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_lsm6dsx_i2c_of_match);
@@ -168,6 +172,7 @@ static const struct i2c_device_id st_lsm6dsx_i2c_id_table[] = {
{ ST_LSM6DSO16IS_DEV_NAME, ST_LSM6DSO16IS_ID },
{ ST_ISM330IS_DEV_NAME, ST_ISM330IS_ID },
{ ST_ASM330LHB_DEV_NAME, ST_ASM330LHB_ID },
+ { ST_ASM330LHHXG1_DEV_NAME, ST_ASM330LHHXG1_ID },
{},
};
MODULE_DEVICE_TABLE(i2c, st_lsm6dsx_i2c_id_table);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
index f56c170c41a9d..c122c8831365a 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
@@ -129,6 +129,10 @@ static const struct of_device_id st_lsm6dsx_spi_of_match[] = {
.compatible = "st,asm330lhb",
.data = (void *)ST_ASM330LHB_ID,
},
+ {
+ .compatible = "st,asm330lhhxg1",
+ .data = (void *)ST_ASM330LHHXG1_ID,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_lsm6dsx_spi_of_match);
@@ -157,6 +161,7 @@ static const struct spi_device_id st_lsm6dsx_spi_id_table[] = {
{ ST_LSM6DSO16IS_DEV_NAME, ST_LSM6DSO16IS_ID },
{ ST_ISM330IS_DEV_NAME, ST_ISM330IS_ID },
{ ST_ASM330LHB_DEV_NAME, ST_ASM330LHB_ID },
+ { ST_ASM330LHHXG1_DEV_NAME, ST_ASM330LHHXG1_ID },
{},
};
MODULE_DEVICE_TABLE(spi, st_lsm6dsx_spi_id_table);
diff --git a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0.h b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0.h
index 76678cdefb074..e67d31b484418 100644
--- a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0.h
+++ b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0.h
@@ -4,9 +4,12 @@
#ifndef ST_LSM9DS0_H
#define ST_LSM9DS0_H
-struct iio_dev;
+struct device;
+struct regmap;
struct regulator;
+struct iio_dev;
+
struct st_lsm9ds0 {
struct device *dev;
const char *name;
diff --git a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_core.c b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_core.c
index e887b45cdbcd7..10c1b2ba7a3d9 100644
--- a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_core.c
+++ b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_core.c
@@ -7,10 +7,10 @@
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
*/
-#include <linux/device.h>
+#include <linux/array_size.h>
+#include <linux/dev_printk.h>
#include <linux/err.h>
#include <linux/module.h>
-#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/iio/common/st_sensors.h>
@@ -25,10 +25,9 @@ static int st_lsm9ds0_probe_accel(struct st_lsm9ds0 *lsm9ds0, struct regmap *reg
struct st_sensor_data *data;
settings = st_accel_get_settings(lsm9ds0->name);
- if (!settings) {
- dev_err(dev, "device name %s not recognized.\n", lsm9ds0->name);
- return -ENODEV;
- }
+ if (!settings)
+ return dev_err_probe(dev, -ENODEV, "device name %s not recognized.\n",
+ lsm9ds0->name);
lsm9ds0->accel = devm_iio_device_alloc(dev, sizeof(*data));
if (!lsm9ds0->accel)
@@ -51,10 +50,9 @@ static int st_lsm9ds0_probe_magn(struct st_lsm9ds0 *lsm9ds0, struct regmap *regm
struct st_sensor_data *data;
settings = st_magn_get_settings(lsm9ds0->name);
- if (!settings) {
- dev_err(dev, "device name %s not recognized.\n", lsm9ds0->name);
- return -ENODEV;
- }
+ if (!settings)
+ return dev_err_probe(dev, -ENODEV, "device name %s not recognized.\n",
+ lsm9ds0->name);
lsm9ds0->magn = devm_iio_device_alloc(dev, sizeof(*data));
if (!lsm9ds0->magn)
@@ -80,8 +78,7 @@ int st_lsm9ds0_probe(struct st_lsm9ds0 *lsm9ds0, struct regmap *regmap)
ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(regulator_names),
regulator_names);
if (ret)
- return dev_err_probe(dev, ret,
- "unable to enable Vdd supply\n");
+ return dev_err_probe(dev, ret, "unable to enable Vdd supply\n");
/* Setup accelerometer device */
ret = st_lsm9ds0_probe_accel(lsm9ds0, regmap);
diff --git a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c
index 61d855083aa01..d03cec3b24fed 100644
--- a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c
+++ b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c
@@ -7,8 +7,10 @@
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
*/
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gfp_types.h>
#include <linux/i2c.h>
-#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/regmap.h>
@@ -39,7 +41,7 @@ MODULE_DEVICE_TABLE(i2c, st_lsm9ds0_id_table);
static const struct acpi_device_id st_lsm9ds0_acpi_match[] = {
{"ACCL0001", (kernel_ulong_t)LSM303D_IMU_DEV_NAME},
- { },
+ {}
};
MODULE_DEVICE_TABLE(acpi, st_lsm9ds0_acpi_match);
diff --git a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c
index 8cc041d56cf76..69e9135795a37 100644
--- a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c
+++ b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c
@@ -7,7 +7,9 @@
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
*/
-#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gfp_types.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/regmap.h>
diff --git a/drivers/iio/industrialio-backend.c b/drivers/iio/industrialio-backend.c
new file mode 100644
index 0000000000000..2fea2bbbe47fd
--- /dev/null
+++ b/drivers/iio/industrialio-backend.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Framework to handle complex IIO aggregate devices.
+ *
+ * The typical architecture is to have one device as the frontend device which
+ * can be "linked" against one or multiple backend devices. All the IIO and
+ * userspace interface is expected to be registers/managed by the frontend
+ * device which will callback into the backends when needed (to get/set some
+ * configuration that it does not directly control).
+ *
+ * -------------------------------------------------------
+ * ------------------ | ------------ ------------ ------- FPGA|
+ * | ADC |------------------------| | ADC CORE |---------| DMA CORE |------| RAM | |
+ * | (Frontend/IIO) | Serial Data (eg: LVDS) | |(backend) |---------| |------| | |
+ * | |------------------------| ------------ ------------ ------- |
+ * ------------------ -------------------------------------------------------
+ *
+ * The framework interface is pretty simple:
+ * - Backends should register themselves with devm_iio_backend_register()
+ * - Frontend devices should get backends with devm_iio_backend_get()
+ *
+ * Also to note that the primary target for this framework are converters like
+ * ADC/DACs so iio_backend_ops will have some operations typical of converter
+ * devices. On top of that, this is "generic" for all IIO which means any kind
+ * of device can make use of the framework. That said, If the iio_backend_ops
+ * struct begins to grow out of control, we can always refactor things so that
+ * the industrialio-backend.c is only left with the really generic stuff. Then,
+ * we can build on top of it depending on the needs.
+ *
+ * Copyright (C) 2023-2024 Analog Devices Inc.
+ */
+#define dev_fmt(fmt) "iio-backend: " fmt
+
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <linux/iio/backend.h>
+
+struct iio_backend {
+ struct list_head entry;
+ const struct iio_backend_ops *ops;
+ struct device *dev;
+ struct module *owner;
+ void *priv;
+};
+
+/*
+ * Helper struct for requesting buffers. This ensures that we have all data
+ * that we need to free the buffer in a device managed action.
+ */
+struct iio_backend_buffer_pair {
+ struct iio_backend *back;
+ struct iio_buffer *buffer;
+};
+
+static LIST_HEAD(iio_back_list);
+static DEFINE_MUTEX(iio_back_lock);
+
+/*
+ * Helper macros to call backend ops. Makes sure the option is supported.
+ */
+#define iio_backend_check_op(back, op) ({ \
+ struct iio_backend *____back = back; \
+ int ____ret = 0; \
+ \
+ if (!____back->ops->op) \
+ ____ret = -EOPNOTSUPP; \
+ \
+ ____ret; \
+})
+
+#define iio_backend_op_call(back, op, args...) ({ \
+ struct iio_backend *__back = back; \
+ int __ret; \
+ \
+ __ret = iio_backend_check_op(__back, op); \
+ if (!__ret) \
+ __ret = __back->ops->op(__back, ##args); \
+ \
+ __ret; \
+})
+
+#define iio_backend_ptr_op_call(back, op, args...) ({ \
+ struct iio_backend *__back = back; \
+ void *ptr_err; \
+ int __ret; \
+ \
+ __ret = iio_backend_check_op(__back, op); \
+ if (__ret) \
+ ptr_err = ERR_PTR(__ret); \
+ else \
+ ptr_err = __back->ops->op(__back, ##args); \
+ \
+ ptr_err; \
+})
+
+#define iio_backend_void_op_call(back, op, args...) { \
+ struct iio_backend *__back = back; \
+ int __ret; \
+ \
+ __ret = iio_backend_check_op(__back, op); \
+ if (!__ret) \
+ __back->ops->op(__back, ##args); \
+}
+
+/**
+ * iio_backend_chan_enable - Enable a backend channel
+ * @back: Backend device
+ * @chan: Channel number
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+int iio_backend_chan_enable(struct iio_backend *back, unsigned int chan)
+{
+ return iio_backend_op_call(back, chan_enable, chan);
+}
+EXPORT_SYMBOL_NS_GPL(iio_backend_chan_enable, IIO_BACKEND);
+
+/**
+ * iio_backend_chan_disable - Disable a backend channel
+ * @back: Backend device
+ * @chan: Channel number
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+int iio_backend_chan_disable(struct iio_backend *back, unsigned int chan)
+{
+ return iio_backend_op_call(back, chan_disable, chan);
+}
+EXPORT_SYMBOL_NS_GPL(iio_backend_chan_disable, IIO_BACKEND);
+
+static void __iio_backend_disable(void *back)
+{
+ iio_backend_void_op_call(back, disable);
+}
+
+/**
+ * devm_iio_backend_enable - Device managed backend enable
+ * @dev: Consumer device for the backend
+ * @back: Backend device
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+int devm_iio_backend_enable(struct device *dev, struct iio_backend *back)
+{
+ int ret;
+
+ ret = iio_backend_op_call(back, enable);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, __iio_backend_disable, back);
+}
+EXPORT_SYMBOL_NS_GPL(devm_iio_backend_enable, IIO_BACKEND);
+
+/**
+ * iio_backend_data_format_set - Configure the channel data format
+ * @back: Backend device
+ * @chan: Channel number
+ * @data: Data format
+ *
+ * Properly configure a channel with respect to the expected data format. A
+ * @struct iio_backend_data_fmt must be passed with the settings.
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+int iio_backend_data_format_set(struct iio_backend *back, unsigned int chan,
+ const struct iio_backend_data_fmt *data)
+{
+ if (!data || data->type >= IIO_BACKEND_DATA_TYPE_MAX)
+ return -EINVAL;
+
+ return iio_backend_op_call(back, data_format_set, chan, data);
+}
+EXPORT_SYMBOL_NS_GPL(iio_backend_data_format_set, IIO_BACKEND);
+
+static void iio_backend_free_buffer(void *arg)
+{
+ struct iio_backend_buffer_pair *pair = arg;
+
+ iio_backend_void_op_call(pair->back, free_buffer, pair->buffer);
+}
+
+/**
+ * devm_iio_backend_request_buffer - Device managed buffer request
+ * @dev: Consumer device for the backend
+ * @back: Backend device
+ * @indio_dev: IIO device
+ *
+ * Request an IIO buffer from the backend. The type of the buffer (typically
+ * INDIO_BUFFER_HARDWARE) is up to the backend to decide. This is because,
+ * normally, the backend dictates what kind of buffering we can get.
+ *
+ * The backend .free_buffer() hooks is automatically called on @dev detach.
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+int devm_iio_backend_request_buffer(struct device *dev,
+ struct iio_backend *back,
+ struct iio_dev *indio_dev)
+{
+ struct iio_backend_buffer_pair *pair;
+ struct iio_buffer *buffer;
+
+ pair = devm_kzalloc(dev, sizeof(*pair), GFP_KERNEL);
+ if (!pair)
+ return -ENOMEM;
+
+ buffer = iio_backend_ptr_op_call(back, request_buffer, indio_dev);
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
+
+ /* weak reference should be all what we need */
+ pair->back = back;
+ pair->buffer = buffer;
+
+ return devm_add_action_or_reset(dev, iio_backend_free_buffer, pair);
+}
+EXPORT_SYMBOL_NS_GPL(devm_iio_backend_request_buffer, IIO_BACKEND);
+
+static void iio_backend_release(void *arg)
+{
+ struct iio_backend *back = arg;
+
+ module_put(back->owner);
+}
+
+static int __devm_iio_backend_get(struct device *dev, struct iio_backend *back)
+{
+ struct device_link *link;
+ int ret;
+
+ /*
+ * Make sure the provider cannot be unloaded before the consumer module.
+ * Note that device_links would still guarantee that nothing is
+ * accessible (and breaks) but this makes it explicit that the consumer
+ * module must be also unloaded.
+ */
+ if (!try_module_get(back->owner))
+ return dev_err_probe(dev, -ENODEV,
+ "Cannot get module reference\n");
+
+ ret = devm_add_action_or_reset(dev, iio_backend_release, back);
+ if (ret)
+ return ret;
+
+ link = device_link_add(dev, back->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!link)
+ return dev_err_probe(dev, -EINVAL,
+ "Could not link to supplier(%s)\n",
+ dev_name(back->dev));
+
+ dev_dbg(dev, "Found backend(%s) device\n", dev_name(back->dev));
+
+ return 0;
+}
+
+/**
+ * devm_iio_backend_get - Device managed backend device get
+ * @dev: Consumer device for the backend
+ * @name: Backend name
+ *
+ * Get's the backend associated with @dev.
+ *
+ * RETURNS:
+ * A backend pointer, negative error pointer otherwise.
+ */
+struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name)
+{
+ struct fwnode_handle *fwnode;
+ struct iio_backend *back;
+ unsigned int index;
+ int ret;
+
+ if (name) {
+ ret = device_property_match_string(dev, "io-backend-names",
+ name);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ index = ret;
+ } else {
+ index = 0;
+ }
+
+ fwnode = fwnode_find_reference(dev_fwnode(dev), "io-backends", index);
+ if (IS_ERR(fwnode)) {
+ dev_err_probe(dev, PTR_ERR(fwnode),
+ "Cannot get Firmware reference\n");
+ return ERR_CAST(fwnode);
+ }
+
+ guard(mutex)(&iio_back_lock);
+ list_for_each_entry(back, &iio_back_list, entry) {
+ if (!device_match_fwnode(back->dev, fwnode))
+ continue;
+
+ fwnode_handle_put(fwnode);
+ ret = __devm_iio_backend_get(dev, back);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return back;
+ }
+
+ fwnode_handle_put(fwnode);
+ return ERR_PTR(-EPROBE_DEFER);
+}
+EXPORT_SYMBOL_NS_GPL(devm_iio_backend_get, IIO_BACKEND);
+
+/**
+ * __devm_iio_backend_get_from_fwnode_lookup - Device managed fwnode backend device get
+ * @dev: Consumer device for the backend
+ * @fwnode: Firmware node of the backend device
+ *
+ * Search the backend list for a device matching @fwnode.
+ * This API should not be used and it's only present for preventing the first
+ * user of this framework to break it's DT ABI.
+ *
+ * RETURNS:
+ * A backend pointer, negative error pointer otherwise.
+ */
+struct iio_backend *
+__devm_iio_backend_get_from_fwnode_lookup(struct device *dev,
+ struct fwnode_handle *fwnode)
+{
+ struct iio_backend *back;
+ int ret;
+
+ guard(mutex)(&iio_back_lock);
+ list_for_each_entry(back, &iio_back_list, entry) {
+ if (!device_match_fwnode(back->dev, fwnode))
+ continue;
+
+ ret = __devm_iio_backend_get(dev, back);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return back;
+ }
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+EXPORT_SYMBOL_NS_GPL(__devm_iio_backend_get_from_fwnode_lookup, IIO_BACKEND);
+
+/**
+ * iio_backend_get_priv - Get driver private data
+ * @back: Backend device
+ */
+void *iio_backend_get_priv(const struct iio_backend *back)
+{
+ return back->priv;
+}
+EXPORT_SYMBOL_NS_GPL(iio_backend_get_priv, IIO_BACKEND);
+
+static void iio_backend_unregister(void *arg)
+{
+ struct iio_backend *back = arg;
+
+ guard(mutex)(&iio_back_lock);
+ list_del(&back->entry);
+}
+
+/**
+ * devm_iio_backend_register - Device managed backend device register
+ * @dev: Backend device being registered
+ * @ops: Backend ops
+ * @priv: Device private data
+ *
+ * @ops is mandatory. Not providing it results in -EINVAL.
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+int devm_iio_backend_register(struct device *dev,
+ const struct iio_backend_ops *ops, void *priv)
+{
+ struct iio_backend *back;
+
+ if (!ops)
+ return dev_err_probe(dev, -EINVAL, "No backend ops given\n");
+
+ /*
+ * Through device_links, we guarantee that a frontend device cannot be
+ * bound/exist if the backend driver is not around. Hence, we can bind
+ * the backend object lifetime with the device being passed since
+ * removing it will tear the frontend/consumer down.
+ */
+ back = devm_kzalloc(dev, sizeof(*back), GFP_KERNEL);
+ if (!back)
+ return -ENOMEM;
+
+ back->ops = ops;
+ back->owner = dev->driver->owner;
+ back->dev = dev;
+ back->priv = priv;
+ scoped_guard(mutex, &iio_back_lock)
+ list_add(&back->entry, &iio_back_list);
+
+ return devm_add_action_or_reset(dev, iio_backend_unregister, back);
+}
+EXPORT_SYMBOL_NS_GPL(devm_iio_backend_register, IIO_BACKEND);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("Framework to handle complex IIO aggregate devices");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 173dc00762a15..4302093b92c75 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -42,7 +42,7 @@ static DEFINE_IDA(iio_ida);
static dev_t iio_devt;
#define IIO_DEV_MAX 256
-struct bus_type iio_bus_type = {
+const struct bus_type iio_bus_type = {
.name = "iio",
};
EXPORT_SYMBOL(iio_bus_type);
@@ -213,9 +213,7 @@ bool iio_buffer_enabled(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
- return iio_dev_opaque->currentmode &
- (INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE |
- INDIO_BUFFER_TRIGGERED);
+ return iio_dev_opaque->currentmode & INDIO_ALL_BUFFER_MODES;
}
EXPORT_SYMBOL_GPL(iio_buffer_enabled);
diff --git a/drivers/iio/industrialio-gts-helper.c b/drivers/iio/industrialio-gts-helper.c
index 7653261d2dc2b..b51eb6cb766f3 100644
--- a/drivers/iio/industrialio-gts-helper.c
+++ b/drivers/iio/industrialio-gts-helper.c
@@ -34,24 +34,11 @@
static int iio_gts_get_gain(const u64 max, const u64 scale)
{
u64 full = max;
- int tmp = 1;
if (scale > full || !scale)
return -EINVAL;
- if (U64_MAX - full < scale) {
- /* Risk of overflow */
- if (full - scale < scale)
- return 1;
-
- full -= scale;
- tmp++;
- }
-
- while (full > scale * (u64)tmp)
- tmp++;
-
- return tmp;
+ return div64_u64(full, scale);
}
/**
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 143003232d1c2..fd5a9879a582c 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -87,13 +87,14 @@ config APDS9960
module will be called apds9960
config AS73211
- tristate "AMS AS73211 XYZ color sensor"
+ tristate "AMS AS73211 XYZ color sensor and AMS AS7331 UV sensor"
depends on I2C
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
If you say yes here you get support for the AMS AS73211
- JENCOLOR(R) Digital XYZ Sensor.
+ JENCOLOR(R) Digital XYZ and the AMS AS7331 UVA, UVB and UVC
+ ultraviolet sensors.
For triggered measurements, you will need an additional trigger driver
like IIO_HRTIMER_TRIGGER or IIO_SYSFS_TRIGGER.
diff --git a/drivers/iio/light/al3010.c b/drivers/iio/light/al3010.c
index 8f0119f392b70..53569587ccb7b 100644
--- a/drivers/iio/light/al3010.c
+++ b/drivers/iio/light/al3010.c
@@ -17,7 +17,7 @@
#include <linux/bitfield.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/mod_devicetable.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
diff --git a/drivers/iio/light/al3320a.c b/drivers/iio/light/al3320a.c
index d5957d85c2786..105f379b9b414 100644
--- a/drivers/iio/light/al3320a.c
+++ b/drivers/iio/light/al3320a.c
@@ -15,7 +15,6 @@
#include <linux/bitfield.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/mod_devicetable.h>
#include <linux/iio/iio.h>
diff --git a/drivers/iio/light/as73211.c b/drivers/iio/light/as73211.c
index ec97a3a468392..be0068081ebbb 100644
--- a/drivers/iio/light/as73211.c
+++ b/drivers/iio/light/as73211.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Support for AMS AS73211 JENCOLOR(R) Digital XYZ Sensor
+ * Support for AMS AS73211 JENCOLOR(R) Digital XYZ Sensor and AMS AS7331
+ * UVA, UVB and UVC (DUV) Ultraviolet Sensor
*
* Author: Christian Eggers <ceggers@arri.de>
*
@@ -9,7 +10,9 @@
* Color light sensor with 16-bit channels for x, y, z and temperature);
* 7-bit I2C slave address 0x74 .. 0x77.
*
- * Datasheet: https://ams.com/documents/20143/36005/AS73211_DS000556_3-01.pdf
+ * Datasheets:
+ * AS73211: https://ams.com/documents/20143/36005/AS73211_DS000556_3-01.pdf
+ * AS7331: https://ams.com/documents/20143/9106314/AS7331_DS001047_4-00.pdf
*/
#include <linux/bitfield.h>
@@ -84,6 +87,20 @@ static const int as73211_hardwaregain_avail[] = {
1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048,
};
+struct as73211_data;
+
+/**
+ * struct as73211_spec_dev_data - device-specific data
+ * @intensity_scale: Function to retrieve intensity scale values.
+ * @channels: Device channels.
+ * @num_channels: Number of channels of the device.
+ */
+struct as73211_spec_dev_data {
+ int (*intensity_scale)(struct as73211_data *data, int chan, int *val, int *val2);
+ struct iio_chan_spec const *channels;
+ int num_channels;
+};
+
/**
* struct as73211_data - Instance data for one AS73211
* @client: I2C client.
@@ -94,6 +111,7 @@ static const int as73211_hardwaregain_avail[] = {
* @mutex: Keeps cached registers in sync with the device.
* @completion: Completion to wait for interrupt.
* @int_time_avail: Available integration times (depend on sampling frequency).
+ * @spec_dev: device-specific configuration.
*/
struct as73211_data {
struct i2c_client *client;
@@ -104,6 +122,7 @@ struct as73211_data {
struct mutex mutex;
struct completion completion;
int int_time_avail[AS73211_SAMPLE_TIME_NUM * 2];
+ const struct as73211_spec_dev_data *spec_dev;
};
#define AS73211_COLOR_CHANNEL(_color, _si, _addr) { \
@@ -138,6 +157,10 @@ struct as73211_data {
#define AS73211_SCALE_Y 298384270 /* nW/m^2 */
#define AS73211_SCALE_Z 160241927 /* nW/m^2 */
+#define AS7331_SCALE_UVA 340000 /* nW/cm^2 */
+#define AS7331_SCALE_UVB 378000 /* nW/cm^2 */
+#define AS7331_SCALE_UVC 166000 /* nW/cm^2 */
+
/* Channel order MUST match devices result register order */
#define AS73211_SCAN_INDEX_TEMP 0
#define AS73211_SCAN_INDEX_X 1
@@ -176,6 +199,28 @@ static const struct iio_chan_spec as73211_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(AS73211_SCAN_INDEX_TS),
};
+static const struct iio_chan_spec as7331_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .address = AS73211_OUT_TEMP,
+ .scan_index = AS73211_SCAN_INDEX_TEMP,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ }
+ },
+ AS73211_COLOR_CHANNEL(LIGHT_UVA, AS73211_SCAN_INDEX_X, AS73211_OUT_MRES1),
+ AS73211_COLOR_CHANNEL(LIGHT_UVB, AS73211_SCAN_INDEX_Y, AS73211_OUT_MRES2),
+ AS73211_COLOR_CHANNEL(LIGHT_DUV, AS73211_SCAN_INDEX_Z, AS73211_OUT_MRES3),
+ IIO_CHAN_SOFT_TIMESTAMP(AS73211_SCAN_INDEX_TS),
+};
+
static unsigned int as73211_integration_time_1024cyc(struct as73211_data *data)
{
/*
@@ -316,6 +361,48 @@ static int as73211_req_data(struct as73211_data *data)
return 0;
}
+static int as73211_intensity_scale(struct as73211_data *data, int chan,
+ int *val, int *val2)
+{
+ switch (chan) {
+ case IIO_MOD_X:
+ *val = AS73211_SCALE_X;
+ break;
+ case IIO_MOD_Y:
+ *val = AS73211_SCALE_Y;
+ break;
+ case IIO_MOD_Z:
+ *val = AS73211_SCALE_Z;
+ break;
+ default:
+ return -EINVAL;
+ }
+ *val2 = as73211_integration_time_1024cyc(data) * as73211_gain(data);
+
+ return IIO_VAL_FRACTIONAL;
+}
+
+static int as7331_intensity_scale(struct as73211_data *data, int chan,
+ int *val, int *val2)
+{
+ switch (chan) {
+ case IIO_MOD_LIGHT_UVA:
+ *val = AS7331_SCALE_UVA;
+ break;
+ case IIO_MOD_LIGHT_UVB:
+ *val = AS7331_SCALE_UVB;
+ break;
+ case IIO_MOD_LIGHT_DUV:
+ *val = AS7331_SCALE_UVC;
+ break;
+ default:
+ return -EINVAL;
+ }
+ *val2 = as73211_integration_time_1024cyc(data) * as73211_gain(data);
+
+ return IIO_VAL_FRACTIONAL;
+}
+
static int as73211_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
@@ -355,30 +442,13 @@ static int as73211_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec cons
*val2 = AS73211_SCALE_TEMP_MICRO;
return IIO_VAL_INT_PLUS_MICRO;
- case IIO_INTENSITY: {
- unsigned int scale;
-
- switch (chan->channel2) {
- case IIO_MOD_X:
- scale = AS73211_SCALE_X;
- break;
- case IIO_MOD_Y:
- scale = AS73211_SCALE_Y;
- break;
- case IIO_MOD_Z:
- scale = AS73211_SCALE_Z;
- break;
- default:
- return -EINVAL;
- }
- scale /= as73211_gain(data);
- scale /= as73211_integration_time_1024cyc(data);
- *val = scale;
- return IIO_VAL_INT;
+ case IIO_INTENSITY:
+ return data->spec_dev->intensity_scale(data, chan->channel2,
+ val, val2);
default:
return -EINVAL;
- }}
+ }
case IIO_CHAN_INFO_SAMP_FREQ:
/* f_samp is configured in CREG3 in powers of 2 (x 1.024 MHz) */
@@ -676,13 +746,17 @@ static int as73211_probe(struct i2c_client *client)
i2c_set_clientdata(client, indio_dev);
data->client = client;
+ data->spec_dev = i2c_get_match_data(client);
+ if (!data->spec_dev)
+ return -EINVAL;
+
mutex_init(&data->mutex);
init_completion(&data->completion);
indio_dev->info = &as73211_info;
indio_dev->name = AS73211_DRV_NAME;
- indio_dev->channels = as73211_channels;
- indio_dev->num_channels = ARRAY_SIZE(as73211_channels);
+ indio_dev->channels = data->spec_dev->channels;
+ indio_dev->num_channels = data->spec_dev->num_channels;
indio_dev->modes = INDIO_DIRECT_MODE;
ret = i2c_smbus_read_byte_data(data->client, AS73211_REG_OSR);
@@ -772,14 +846,28 @@ static int as73211_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(as73211_pm_ops, as73211_suspend,
as73211_resume);
+static const struct as73211_spec_dev_data as73211_spec = {
+ .intensity_scale = as73211_intensity_scale,
+ .channels = as73211_channels,
+ .num_channels = ARRAY_SIZE(as73211_channels),
+};
+
+static const struct as73211_spec_dev_data as7331_spec = {
+ .intensity_scale = as7331_intensity_scale,
+ .channels = as7331_channels,
+ .num_channels = ARRAY_SIZE(as7331_channels),
+};
+
static const struct of_device_id as73211_of_match[] = {
- { .compatible = "ams,as73211" },
+ { .compatible = "ams,as73211", &as73211_spec },
+ { .compatible = "ams,as7331", &as7331_spec },
{ }
};
MODULE_DEVICE_TABLE(of, as73211_of_match);
static const struct i2c_device_id as73211_id[] = {
- { "as73211", 0 },
+ { "as73211", (kernel_ulong_t)&as73211_spec },
+ { "as7331", (kernel_ulong_t)&as7331_spec },
{ }
};
MODULE_DEVICE_TABLE(i2c, as73211_id);
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index b6c4bef2a7bb2..260281194f613 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -14,8 +14,11 @@
#include "../common/hid-sensors/hid-sensor-trigger.h"
enum {
- CHANNEL_SCAN_INDEX_INTENSITY = 0,
- CHANNEL_SCAN_INDEX_ILLUM = 1,
+ CHANNEL_SCAN_INDEX_INTENSITY,
+ CHANNEL_SCAN_INDEX_ILLUM,
+ CHANNEL_SCAN_INDEX_COLOR_TEMP,
+ CHANNEL_SCAN_INDEX_CHROMATICITY_X,
+ CHANNEL_SCAN_INDEX_CHROMATICITY_Y,
CHANNEL_SCAN_INDEX_MAX
};
@@ -25,6 +28,7 @@ struct als_state {
struct hid_sensor_hub_callbacks callbacks;
struct hid_sensor_common common_attributes;
struct hid_sensor_hub_attribute_info als[CHANNEL_SCAN_INDEX_MAX];
+ struct iio_chan_spec channels[CHANNEL_SCAN_INDEX_MAX + 1];
struct {
u32 illum[CHANNEL_SCAN_INDEX_MAX];
u64 timestamp __aligned(8);
@@ -33,7 +37,18 @@ struct als_state {
int scale_post_decml;
int scale_precision;
int value_offset;
+ int num_channels;
s64 timestamp;
+ unsigned long als_scan_mask[2];
+};
+
+/* The order of usage ids must match scan index starting from CHANNEL_SCAN_INDEX_INTENSITY */
+static const u32 als_usage_ids[] = {
+ HID_USAGE_SENSOR_LIGHT_ILLUM,
+ HID_USAGE_SENSOR_LIGHT_ILLUM,
+ HID_USAGE_SENSOR_LIGHT_COLOR_TEMPERATURE,
+ HID_USAGE_SENSOR_LIGHT_CHROMATICITY_X,
+ HID_USAGE_SENSOR_LIGHT_CHROMATICITY_Y,
};
static const u32 als_sensitivity_addresses[] = {
@@ -65,6 +80,40 @@ static const struct iio_chan_spec als_channels[] = {
BIT(IIO_CHAN_INFO_HYSTERESIS_RELATIVE),
.scan_index = CHANNEL_SCAN_INDEX_ILLUM,
},
+ {
+ .type = IIO_COLORTEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS_RELATIVE),
+ .scan_index = CHANNEL_SCAN_INDEX_COLOR_TEMP,
+ },
+ {
+ .type = IIO_CHROMATICITY,
+ .modified = 1,
+ .channel2 = IIO_MOD_X,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS_RELATIVE),
+ .scan_index = CHANNEL_SCAN_INDEX_CHROMATICITY_X,
+ },
+ {
+ .type = IIO_CHROMATICITY,
+ .modified = 1,
+ .channel2 = IIO_MOD_Y,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS_RELATIVE),
+ .scan_index = CHANNEL_SCAN_INDEX_CHROMATICITY_Y,
+ },
IIO_CHAN_SOFT_TIMESTAMP(CHANNEL_SCAN_INDEX_TIMESTAMP)
};
@@ -103,6 +152,21 @@ static int als_read_raw(struct iio_dev *indio_dev,
min = als_state->als[chan->scan_index].logical_minimum;
address = HID_USAGE_SENSOR_LIGHT_ILLUM;
break;
+ case CHANNEL_SCAN_INDEX_COLOR_TEMP:
+ report_id = als_state->als[chan->scan_index].report_id;
+ min = als_state->als[chan->scan_index].logical_minimum;
+ address = HID_USAGE_SENSOR_LIGHT_COLOR_TEMPERATURE;
+ break;
+ case CHANNEL_SCAN_INDEX_CHROMATICITY_X:
+ report_id = als_state->als[chan->scan_index].report_id;
+ min = als_state->als[chan->scan_index].logical_minimum;
+ address = HID_USAGE_SENSOR_LIGHT_CHROMATICITY_X;
+ break;
+ case CHANNEL_SCAN_INDEX_CHROMATICITY_Y:
+ report_id = als_state->als[chan->scan_index].report_id;
+ min = als_state->als[chan->scan_index].logical_minimum;
+ address = HID_USAGE_SENSOR_LIGHT_CHROMATICITY_Y;
+ break;
default:
report_id = -1;
break;
@@ -223,6 +287,18 @@ static int als_capture_sample(struct hid_sensor_hub_device *hsdev,
als_state->scan.illum[CHANNEL_SCAN_INDEX_ILLUM] = sample_data;
ret = 0;
break;
+ case HID_USAGE_SENSOR_LIGHT_COLOR_TEMPERATURE:
+ als_state->scan.illum[CHANNEL_SCAN_INDEX_COLOR_TEMP] = sample_data;
+ ret = 0;
+ break;
+ case HID_USAGE_SENSOR_LIGHT_CHROMATICITY_X:
+ als_state->scan.illum[CHANNEL_SCAN_INDEX_CHROMATICITY_X] = sample_data;
+ ret = 0;
+ break;
+ case HID_USAGE_SENSOR_LIGHT_CHROMATICITY_Y:
+ als_state->scan.illum[CHANNEL_SCAN_INDEX_CHROMATICITY_Y] = sample_data;
+ ret = 0;
+ break;
case HID_USAGE_SENSOR_TIME_TIMESTAMP:
als_state->timestamp = hid_sensor_convert_timestamp(&als_state->common_attributes,
*(s64 *)raw_data);
@@ -238,27 +314,38 @@ static int als_capture_sample(struct hid_sensor_hub_device *hsdev,
/* Parse report which is specific to an usage id*/
static int als_parse_report(struct platform_device *pdev,
struct hid_sensor_hub_device *hsdev,
- struct iio_chan_spec *channels,
unsigned usage_id,
struct als_state *st)
{
- int ret;
+ struct iio_chan_spec *channels;
+ int ret, index = 0;
int i;
- for (i = 0; i <= CHANNEL_SCAN_INDEX_ILLUM; ++i) {
+ channels = st->channels;
+
+ for (i = 0; i < CHANNEL_SCAN_INDEX_MAX; ++i) {
ret = sensor_hub_input_get_attribute_info(hsdev,
HID_INPUT_REPORT,
usage_id,
- HID_USAGE_SENSOR_LIGHT_ILLUM,
+ als_usage_ids[i],
&st->als[i]);
if (ret < 0)
- return ret;
- als_adjust_channel_bit_mask(channels, i, st->als[i].size);
+ continue;
+
+ channels[index] = als_channels[i];
+ st->als_scan_mask[0] |= BIT(i);
+ als_adjust_channel_bit_mask(channels, index, st->als[i].size);
+ ++index;
dev_dbg(&pdev->dev, "als %x:%x\n", st->als[i].index,
st->als[i].report_id);
}
+ st->num_channels = index;
+ /* Return success even if one usage id is present */
+ if (index)
+ ret = 0;
+
st->scale_precision = hid_sensor_format_scale(usage_id,
&st->als[CHANNEL_SCAN_INDEX_INTENSITY],
&st->scale_pre_decml, &st->scale_post_decml);
@@ -294,15 +381,7 @@ static int hid_als_probe(struct platform_device *pdev)
return ret;
}
- indio_dev->channels = devm_kmemdup(&pdev->dev, als_channels,
- sizeof(als_channels), GFP_KERNEL);
- if (!indio_dev->channels) {
- dev_err(&pdev->dev, "failed to duplicate channels\n");
- return -ENOMEM;
- }
-
ret = als_parse_report(pdev, hsdev,
- (struct iio_chan_spec *)indio_dev->channels,
hsdev->usage,
als_state);
if (ret) {
@@ -310,8 +389,15 @@ static int hid_als_probe(struct platform_device *pdev)
return ret;
}
- indio_dev->num_channels =
- ARRAY_SIZE(als_channels);
+ /* Add timestamp channel */
+ als_state->channels[als_state->num_channels] = als_channels[CHANNEL_SCAN_INDEX_TIMESTAMP];
+
+ /* +1 for adding timestamp channel */
+ indio_dev->num_channels = als_state->num_channels + 1;
+
+ indio_dev->channels = als_state->channels;
+ indio_dev->available_scan_masks = als_state->als_scan_mask;
+
indio_dev->info = &als_info;
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
diff --git a/drivers/iio/light/jsa1212.c b/drivers/iio/light/jsa1212.c
index 37e2807041a1d..869196746045e 100644
--- a/drivers/iio/light/jsa1212.c
+++ b/drivers/iio/light/jsa1212.c
@@ -12,10 +12,10 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
-#include <linux/acpi.h>
#include <linux/regmap.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -438,7 +438,7 @@ static struct i2c_driver jsa1212_driver = {
.driver = {
.name = JSA1212_DRIVER_NAME,
.pm = pm_sleep_ptr(&jsa1212_pm_ops),
- .acpi_match_table = ACPI_PTR(jsa1212_acpi_match),
+ .acpi_match_table = jsa1212_acpi_match,
},
.probe = jsa1212_probe,
.remove = jsa1212_remove,
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 061c122fdc5e7..8c516ede91161 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -10,6 +10,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/delay.h>
@@ -1639,7 +1640,7 @@ static struct i2c_driver ltr501_driver = {
.name = LTR501_DRV_NAME,
.of_match_table = ltr501_of_match,
.pm = pm_sleep_ptr(&ltr501_pm_ops),
- .acpi_match_table = ACPI_PTR(ltr_acpi_match),
+ .acpi_match_table = ltr_acpi_match,
},
.probe = ltr501_probe,
.remove = ltr501_remove,
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
index db96c5b73100c..26b464b1b650e 100644
--- a/drivers/iio/light/max44000.c
+++ b/drivers/iio/light/max44000.c
@@ -10,6 +10,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
@@ -19,7 +20,6 @@
#include <linux/iio/buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
-#include <linux/acpi.h>
#define MAX44000_DRV_NAME "max44000"
@@ -603,18 +603,16 @@ static const struct i2c_device_id max44000_id[] = {
};
MODULE_DEVICE_TABLE(i2c, max44000_id);
-#ifdef CONFIG_ACPI
static const struct acpi_device_id max44000_acpi_match[] = {
{"MAX44000", 0},
{ }
};
MODULE_DEVICE_TABLE(acpi, max44000_acpi_match);
-#endif
static struct i2c_driver max44000_driver = {
.driver = {
.name = MAX44000_DRV_NAME,
- .acpi_match_table = ACPI_PTR(max44000_acpi_match),
+ .acpi_match_table = max44000_acpi_match,
},
.probe = max44000_probe,
.id_table = max44000_id,
diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c
index bbb8581622f29..40d5732b5e320 100644
--- a/drivers/iio/light/rpr0521.c
+++ b/drivers/iio/light/rpr0521.c
@@ -10,11 +10,11 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
#include <linux/delay.h>
-#include <linux/acpi.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
@@ -1119,7 +1119,7 @@ static struct i2c_driver rpr0521_driver = {
.driver = {
.name = RPR0521_DRV_NAME,
.pm = pm_ptr(&rpr0521_pm_ops),
- .acpi_match_table = ACPI_PTR(rpr0521_acpi_match),
+ .acpi_match_table = rpr0521_acpi_match,
},
.probe = rpr0521_probe,
.remove = rpr0521_remove,
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index 72b08d870d337..7b71ad71d78de 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -7,11 +7,11 @@
* IIO driver for STK3310/STK3311. 7-bit I2C address: 0x48.
*/
-#include <linux/acpi.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/regmap.h>
#include <linux/iio/events.h>
#include <linux/iio/iio.h>
@@ -712,7 +712,7 @@ static struct i2c_driver stk3310_driver = {
.name = "stk3310",
.of_match_table = stk3310_of_match,
.pm = pm_sleep_ptr(&stk3310_pm_ops),
- .acpi_match_table = ACPI_PTR(stk3310_acpi_id),
+ .acpi_match_table = stk3310_acpi_id,
},
.probe = stk3310_probe,
.remove = stk3310_remove,
diff --git a/drivers/iio/light/us5182d.c b/drivers/iio/light/us5182d.c
index 61b3b2aea626f..9189a1d4d7e1a 100644
--- a/drivers/iio/light/us5182d.c
+++ b/drivers/iio/light/us5182d.c
@@ -9,7 +9,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/iio/events.h>
@@ -972,7 +972,7 @@ static struct i2c_driver us5182d_driver = {
.name = US5182D_DRV_NAME,
.pm = pm_ptr(&us5182d_pm_ops),
.of_match_table = us5182d_of_match,
- .acpi_match_table = ACPI_PTR(us5182d_acpi_match),
+ .acpi_match_table = us5182d_acpi_match,
},
.probe = us5182d_probe,
.remove = us5182d_remove,
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index fdf763a04b0bf..4e3641ff2ed44 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -90,6 +90,7 @@
#define VCNL4040_PS_CONF1_PS_SHUTDOWN BIT(0)
#define VCNL4040_PS_CONF2_PS_IT GENMASK(3, 1) /* Proximity integration time */
#define VCNL4040_CONF1_PS_PERS GENMASK(5, 4) /* Proximity interrupt persistence setting */
+#define VCNL4040_PS_CONF2_PS_HD BIT(11) /* Proximity high definition */
#define VCNL4040_PS_CONF2_PS_INT GENMASK(9, 8) /* Proximity interrupt mode */
#define VCNL4040_PS_CONF3_MPS GENMASK(6, 5) /* Proximity multi pulse number */
#define VCNL4040_PS_MS_LED_I GENMASK(10, 8) /* Proximity current */
@@ -114,6 +115,13 @@
#define VCNL4010_INT_DRDY \
(BIT(VCNL4010_INT_PROXIMITY) | BIT(VCNL4010_INT_ALS))
+#define VCNL4040_CONF3_PS_MPS_16BITS 3 /* 8 multi pulses */
+#define VCNL4040_CONF3_PS_LED_I_16BITS 3 /* 120 mA */
+
+#define VCNL4040_CONF3_PS_SAMPLE_16BITS \
+ (FIELD_PREP(VCNL4040_PS_CONF3_MPS, VCNL4040_CONF3_PS_MPS_16BITS) | \
+ FIELD_PREP(VCNL4040_PS_MS_LED_I, VCNL4040_CONF3_PS_LED_I_16BITS))
+
static const int vcnl4010_prox_sampling_frequency[][2] = {
{1, 950000},
{3, 906250},
@@ -195,6 +203,7 @@ struct vcnl4000_data {
enum vcnl4000_device_ids id;
int rev;
int al_scale;
+ int ps_scale;
u8 ps_int; /* proximity interrupt mode */
u8 als_int; /* ambient light interrupt mode*/
const struct vcnl4000_chip_spec *chip_spec;
@@ -345,6 +354,7 @@ static int vcnl4200_set_power_state(struct vcnl4000_data *data, bool on)
static int vcnl4200_init(struct vcnl4000_data *data)
{
int ret, id;
+ u16 regval;
ret = i2c_smbus_read_word_data(data->client, VCNL4200_DEV_ID);
if (ret < 0)
@@ -386,9 +396,32 @@ static int vcnl4200_init(struct vcnl4000_data *data)
break;
}
data->al_scale = data->chip_spec->ulux_step;
+ data->ps_scale = 16;
mutex_init(&data->vcnl4200_al.lock);
mutex_init(&data->vcnl4200_ps.lock);
+ /* Use 16 bits proximity sensor readings */
+ ret = i2c_smbus_read_word_data(data->client, VCNL4200_PS_CONF1);
+ if (ret < 0)
+ return ret;
+
+ regval = ret | VCNL4040_PS_CONF2_PS_HD;
+ ret = i2c_smbus_write_word_data(data->client, VCNL4200_PS_CONF1,
+ regval);
+ if (ret < 0)
+ return ret;
+
+ /* Align proximity sensor sample rate to 16 bits data width */
+ ret = i2c_smbus_read_word_data(data->client, VCNL4200_PS_CONF3);
+ if (ret < 0)
+ return ret;
+
+ regval = ret | VCNL4040_CONF3_PS_SAMPLE_16BITS;
+ ret = i2c_smbus_write_word_data(data->client, VCNL4200_PS_CONF3,
+ regval);
+ if (ret < 0)
+ return ret;
+
ret = data->chip_spec->set_power_state(data, true);
if (ret < 0)
return ret;
@@ -901,8 +934,9 @@ static int vcnl4000_read_raw(struct iio_dev *indio_dev,
break;
case IIO_PROXIMITY:
ret = data->chip_spec->measure_proximity(data, val);
+ *val2 = data->ps_scale;
if (!ret)
- ret = IIO_VAL_INT;
+ ret = IIO_VAL_FRACTIONAL;
break;
default:
ret = -EINVAL;
diff --git a/drivers/iio/light/vl6180.c b/drivers/iio/light/vl6180.c
index d4948dfc31ff1..dcadf6428a87d 100644
--- a/drivers/iio/light/vl6180.c
+++ b/drivers/iio/light/vl6180.c
@@ -20,7 +20,6 @@
#include <linux/i2c.h>
#include <linux/mutex.h>
#include <linux/err.h>
-#include <linux/of.h>
#include <linux/delay.h>
#include <linux/util_macros.h>
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 38532d840f2a3..cd2917d719047 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -6,6 +6,18 @@
menu "Magnetometer sensors"
+config AF8133J
+ tristate "Voltafield AF8133J 3-Axis Magnetometer"
+ depends on I2C
+ depends on OF
+ select REGMAP_I2C
+ help
+ Say yes here to build support for Voltafield AF8133J I2C-based
+ 3-axis magnetometer chip.
+
+ To compile this driver as a module, choose M here: the module
+ will be called af8133j.
+
config AK8974
tristate "Asahi Kasei AK8974 3-Axis Magnetometer"
depends on I2C
diff --git a/drivers/iio/magnetometer/Makefile b/drivers/iio/magnetometer/Makefile
index b1c784ea71c8e..ec5c46fbf999b 100644
--- a/drivers/iio/magnetometer/Makefile
+++ b/drivers/iio/magnetometer/Makefile
@@ -4,6 +4,7 @@
#
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_AF8133J) += af8133j.o
obj-$(CONFIG_AK8974) += ak8974.o
obj-$(CONFIG_AK8975) += ak8975.o
obj-$(CONFIG_BMC150_MAGN) += bmc150_magn.o
diff --git a/drivers/iio/magnetometer/af8133j.c b/drivers/iio/magnetometer/af8133j.c
new file mode 100644
index 0000000000000..742bbdf25f08c
--- /dev/null
+++ b/drivers/iio/magnetometer/af8133j.c
@@ -0,0 +1,528 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * af8133j.c - Voltafield AF8133J magnetometer driver
+ *
+ * Copyright 2021 Icenowy Zheng <icenowy@aosc.io>
+ * Copyright 2024 Ondřej Jirman <megi@xff.cz>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define AF8133J_REG_OUT 0x03
+#define AF8133J_REG_PCODE 0x00
+#define AF8133J_REG_PCODE_VAL 0x5e
+#define AF8133J_REG_STATUS 0x02
+#define AF8133J_REG_STATUS_ACQ BIT(0)
+#define AF8133J_REG_STATE 0x0a
+#define AF8133J_REG_STATE_STBY 0x00
+#define AF8133J_REG_STATE_WORK 0x01
+#define AF8133J_REG_RANGE 0x0b
+#define AF8133J_REG_RANGE_22G 0x12
+#define AF8133J_REG_RANGE_12G 0x34
+#define AF8133J_REG_SWR 0x11
+#define AF8133J_REG_SWR_PERFORM 0x81
+
+static const char * const af8133j_supply_names[] = {
+ "avdd",
+ "dvdd",
+};
+
+struct af8133j_data {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ /*
+ * Protect device internal state between starting a measurement
+ * and reading the result.
+ */
+ struct mutex mutex;
+ struct iio_mount_matrix orientation;
+
+ struct gpio_desc *reset_gpiod;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(af8133j_supply_names)];
+
+ u8 range;
+};
+
+enum af8133j_axis {
+ AXIS_X = 0,
+ AXIS_Y,
+ AXIS_Z,
+};
+
+static struct iio_mount_matrix *
+af8133j_get_mount_matrix(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct af8133j_data *data = iio_priv(indio_dev);
+
+ return &data->orientation;
+}
+
+static const struct iio_chan_spec_ext_info af8133j_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, af8133j_get_mount_matrix),
+ { }
+};
+
+#define AF8133J_CHANNEL(_si, _axis) { \
+ .type = IIO_MAGN, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_ ## _axis, \
+ .address = AXIS_ ## _axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE), \
+ .ext_info = af8133j_ext_info, \
+ .scan_index = _si, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_LE, \
+ }, \
+}
+
+static const struct iio_chan_spec af8133j_channels[] = {
+ AF8133J_CHANNEL(0, X),
+ AF8133J_CHANNEL(1, Y),
+ AF8133J_CHANNEL(2, Z),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
+static int af8133j_product_check(struct af8133j_data *data)
+{
+ struct device *dev = &data->client->dev;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(data->regmap, AF8133J_REG_PCODE, &val);
+ if (ret) {
+ dev_err(dev, "Error reading product code (%d)\n", ret);
+ return ret;
+ }
+
+ if (val != AF8133J_REG_PCODE_VAL) {
+ dev_warn(dev, "Invalid product code (0x%02x)\n", val);
+ return 0; /* Allow unknown ID so fallback compatibles work */
+ }
+
+ return 0;
+}
+
+static int af8133j_reset(struct af8133j_data *data)
+{
+ struct device *dev = &data->client->dev;
+ int ret;
+
+ if (data->reset_gpiod) {
+ /* If we have GPIO reset line, use it */
+ gpiod_set_value_cansleep(data->reset_gpiod, 1);
+ udelay(10);
+ gpiod_set_value_cansleep(data->reset_gpiod, 0);
+ } else {
+ /* Otherwise use software reset */
+ ret = regmap_write(data->regmap, AF8133J_REG_SWR,
+ AF8133J_REG_SWR_PERFORM);
+ if (ret) {
+ dev_err(dev, "Failed to reset the chip\n");
+ return ret;
+ }
+ }
+
+ /* Wait for reset to finish */
+ usleep_range(1000, 1100);
+
+ /* Restore range setting */
+ if (data->range == AF8133J_REG_RANGE_22G) {
+ ret = regmap_write(data->regmap, AF8133J_REG_RANGE, data->range);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void af8133j_power_down(struct af8133j_data *data)
+{
+ gpiod_set_value_cansleep(data->reset_gpiod, 1);
+ regulator_bulk_disable(ARRAY_SIZE(data->supplies), data->supplies);
+}
+
+static int af8133j_power_up(struct af8133j_data *data)
+{
+ struct device *dev = &data->client->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(data->supplies), data->supplies);
+ if (ret) {
+ dev_err(dev, "Could not enable regulators\n");
+ return ret;
+ }
+
+ gpiod_set_value_cansleep(data->reset_gpiod, 0);
+
+ /* Wait for power on reset */
+ usleep_range(15000, 16000);
+
+ ret = af8133j_reset(data);
+ if (ret) {
+ af8133j_power_down(data);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int af8133j_take_measurement(struct af8133j_data *data)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_write(data->regmap,
+ AF8133J_REG_STATE, AF8133J_REG_STATE_WORK);
+ if (ret)
+ return ret;
+
+ /* The datasheet says "Mesaure Time <1.5ms" */
+ ret = regmap_read_poll_timeout(data->regmap, AF8133J_REG_STATUS, val,
+ val & AF8133J_REG_STATUS_ACQ,
+ 500, 1500);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(data->regmap,
+ AF8133J_REG_STATE, AF8133J_REG_STATE_STBY);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int af8133j_read_measurement(struct af8133j_data *data, __le16 buf[3])
+{
+ struct device *dev = &data->client->dev;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
+ /*
+ * Ignore EACCES because that happens when RPM is disabled
+ * during system sleep, while userspace leave eg. hrtimer
+ * trigger attached and IIO core keeps trying to do measurements.
+ */
+ if (ret != -EACCES)
+ dev_err(dev, "Failed to power on (%d)\n", ret);
+ return ret;
+ }
+
+ scoped_guard(mutex, &data->mutex) {
+ ret = af8133j_take_measurement(data);
+ if (ret)
+ goto out_rpm_put;
+
+ ret = regmap_bulk_read(data->regmap, AF8133J_REG_OUT,
+ buf, sizeof(__le16) * 3);
+ }
+
+out_rpm_put:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+static const int af8133j_scales[][2] = {
+ [0] = { 0, 366210 }, /* 12 gauss */
+ [1] = { 0, 671386 }, /* 22 gauss */
+};
+
+static int af8133j_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct af8133j_data *data = iio_priv(indio_dev);
+ __le16 buf[3];
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = af8133j_read_measurement(data, buf);
+ if (ret)
+ return ret;
+
+ *val = sign_extend32(le16_to_cpu(buf[chan->address]),
+ chan->scan_type.realbits - 1);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+
+ if (data->range == AF8133J_REG_RANGE_12G)
+ *val2 = af8133j_scales[0][1];
+ else
+ *val2 = af8133j_scales[1][1];
+
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int af8133j_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *vals = (const int *)af8133j_scales;
+ *length = ARRAY_SIZE(af8133j_scales) * 2;
+ *type = IIO_VAL_INT_PLUS_NANO;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int af8133j_set_scale(struct af8133j_data *data,
+ unsigned int val, unsigned int val2)
+{
+ struct device *dev = &data->client->dev;
+ u8 range;
+ int ret = 0;
+
+ if (af8133j_scales[0][0] == val && af8133j_scales[0][1] == val2)
+ range = AF8133J_REG_RANGE_12G;
+ else if (af8133j_scales[1][0] == val && af8133j_scales[1][1] == val2)
+ range = AF8133J_REG_RANGE_22G;
+ else
+ return -EINVAL;
+
+ pm_runtime_disable(dev);
+
+ /*
+ * When suspended, just store the new range to data->range to be
+ * applied later during power up.
+ */
+ if (!pm_runtime_status_suspended(dev))
+ scoped_guard(mutex, &data->mutex)
+ ret = regmap_write(data->regmap,
+ AF8133J_REG_RANGE, range);
+
+ pm_runtime_enable(dev);
+
+ data->range = range;
+ return ret;
+}
+
+static int af8133j_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct af8133j_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return af8133j_set_scale(data, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int af8133j_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ return IIO_VAL_INT_PLUS_NANO;
+}
+
+static const struct iio_info af8133j_info = {
+ .read_raw = af8133j_read_raw,
+ .read_avail = af8133j_read_avail,
+ .write_raw = af8133j_write_raw,
+ .write_raw_get_fmt = af8133j_write_raw_get_fmt,
+};
+
+static irqreturn_t af8133j_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct af8133j_data *data = iio_priv(indio_dev);
+ s64 timestamp = iio_get_time_ns(indio_dev);
+ struct {
+ __le16 values[3];
+ s64 timestamp __aligned(8);
+ } sample;
+ int ret;
+
+ memset(&sample, 0, sizeof(sample));
+
+ ret = af8133j_read_measurement(data, sample.values);
+ if (ret)
+ goto out_done;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &sample, timestamp);
+
+out_done:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static const struct regmap_config af8133j_regmap_config = {
+ .name = "af8133j_regmap",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = AF8133J_REG_SWR,
+ .cache_type = REGCACHE_NONE,
+};
+
+static void af8133j_power_down_action(void *ptr)
+{
+ struct af8133j_data *data = ptr;
+
+ if (!pm_runtime_status_suspended(&data->client->dev))
+ af8133j_power_down(data);
+}
+
+static int af8133j_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct af8133j_data *data;
+ struct iio_dev *indio_dev;
+ struct regmap *regmap;
+ int ret, i;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ regmap = devm_regmap_init_i2c(client, &af8133j_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "regmap initialization failed\n");
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ data->regmap = regmap;
+ data->range = AF8133J_REG_RANGE_12G;
+ mutex_init(&data->mutex);
+
+ data->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(data->reset_gpiod))
+ return dev_err_probe(dev, PTR_ERR(data->reset_gpiod),
+ "Failed to get reset gpio\n");
+
+ for (i = 0; i < ARRAY_SIZE(af8133j_supply_names); i++)
+ data->supplies[i].supply = af8133j_supply_names[i];
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(data->supplies),
+ data->supplies);
+ if (ret)
+ return ret;
+
+ ret = iio_read_mount_matrix(dev, &data->orientation);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read mount matrix\n");
+
+ ret = af8133j_power_up(data);
+ if (ret)
+ return ret;
+
+ pm_runtime_set_active(dev);
+
+ ret = devm_add_action_or_reset(dev, af8133j_power_down_action, data);
+ if (ret)
+ return ret;
+
+ ret = af8133j_product_check(data);
+ if (ret)
+ return ret;
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, 500);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ pm_runtime_put_autosuspend(dev);
+
+ indio_dev->info = &af8133j_info;
+ indio_dev->name = "af8133j";
+ indio_dev->channels = af8133j_channels;
+ indio_dev->num_channels = ARRAY_SIZE(af8133j_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
+ &af8133j_trigger_handler, NULL);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to setup iio triggered buffer\n");
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register iio device");
+
+ return 0;
+}
+
+static int af8133j_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct af8133j_data *data = iio_priv(indio_dev);
+
+ af8133j_power_down(data);
+
+ return 0;
+}
+
+static int af8133j_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct af8133j_data *data = iio_priv(indio_dev);
+
+ return af8133j_power_up(data);
+}
+
+static const struct dev_pm_ops af8133j_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ RUNTIME_PM_OPS(af8133j_runtime_suspend, af8133j_runtime_resume, NULL)
+};
+
+static const struct of_device_id af8133j_of_match[] = {
+ { .compatible = "voltafield,af8133j", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, af8133j_of_match);
+
+static const struct i2c_device_id af8133j_id[] = {
+ { "af8133j", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, af8133j_id);
+
+static struct i2c_driver af8133j_driver = {
+ .driver = {
+ .name = "af8133j",
+ .of_match_table = af8133j_of_match,
+ .pm = pm_ptr(&af8133j_pm_ops),
+ },
+ .probe = af8133j_probe,
+ .id_table = af8133j_id,
+};
+
+module_i2c_driver(af8133j_driver);
+
+MODULE_AUTHOR("Icenowy Zheng <icenowy@aosc.io>");
+MODULE_AUTHOR("Ondřej Jirman <megi@xff.cz>");
+MODULE_DESCRIPTION("Voltafield AF8133J magnetic sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/magnetometer/bmc150_magn_i2c.c b/drivers/iio/magnetometer/bmc150_magn_i2c.c
index 281d1fa31c8e1..48d9c698f520e 100644
--- a/drivers/iio/magnetometer/bmc150_magn_i2c.c
+++ b/drivers/iio/magnetometer/bmc150_magn_i2c.c
@@ -11,7 +11,6 @@
#include <linux/mod_devicetable.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/acpi.h>
#include <linux/regmap.h>
#include "bmc150_magn.h"
@@ -68,7 +67,7 @@ static struct i2c_driver bmc150_magn_driver = {
.driver = {
.name = "bmc150_magn_i2c",
.of_match_table = bmc150_magn_of_match,
- .acpi_match_table = ACPI_PTR(bmc150_magn_acpi_match),
+ .acpi_match_table = bmc150_magn_acpi_match,
.pm = &bmc150_magn_pm_ops,
},
.probe = bmc150_magn_i2c_probe,
diff --git a/drivers/iio/magnetometer/bmc150_magn_spi.c b/drivers/iio/magnetometer/bmc150_magn_spi.c
index 882987721071b..abc75a05c46af 100644
--- a/drivers/iio/magnetometer/bmc150_magn_spi.c
+++ b/drivers/iio/magnetometer/bmc150_magn_spi.c
@@ -10,7 +10,6 @@
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/spi/spi.h>
-#include <linux/acpi.h>
#include <linux/regmap.h>
#include "bmc150_magn.h"
@@ -55,7 +54,7 @@ static struct spi_driver bmc150_magn_spi_driver = {
.remove = bmc150_magn_spi_remove,
.id_table = bmc150_magn_spi_id,
.driver = {
- .acpi_match_table = ACPI_PTR(bmc150_magn_acpi_match),
+ .acpi_match_table = bmc150_magn_acpi_match,
.name = "bmc150_magn_spi",
},
};
diff --git a/drivers/iio/magnetometer/mmc35240.c b/drivers/iio/magnetometer/mmc35240.c
index b495b8a639284..6b9f4b0561912 100644
--- a/drivers/iio/magnetometer/mmc35240.c
+++ b/drivers/iio/magnetometer/mmc35240.c
@@ -10,11 +10,11 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/regmap.h>
-#include <linux/acpi.h>
#include <linux/pm.h>
#include <linux/iio/iio.h>
@@ -573,7 +573,7 @@ static struct i2c_driver mmc35240_driver = {
.name = MMC35240_DRV_NAME,
.of_match_table = mmc35240_of_match,
.pm = pm_sleep_ptr(&mmc35240_pm_ops),
- .acpi_match_table = ACPI_PTR(mmc35240_acpi_match),
+ .acpi_match_table = mmc35240_acpi_match,
},
.probe = mmc35240_probe,
.id_table = mmc35240_id,
diff --git a/drivers/iio/potentiometer/max5487.c b/drivers/iio/potentiometer/max5487.c
index 42723c996c9f4..4838d2e72f53d 100644
--- a/drivers/iio/potentiometer/max5487.c
+++ b/drivers/iio/potentiometer/max5487.c
@@ -5,8 +5,8 @@
* Copyright (C) 2016 Cristina-Gabriela Moraru <cristina.moraru09@gmail.com>
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/spi/spi.h>
-#include <linux/acpi.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/iio.h>
@@ -144,7 +144,7 @@ MODULE_DEVICE_TABLE(acpi, max5487_acpi_match);
static struct spi_driver max5487_driver = {
.driver = {
.name = "max5487",
- .acpi_match_table = ACPI_PTR(max5487_acpi_match),
+ .acpi_match_table = max5487_acpi_match,
},
.id_table = max5487_id,
.probe = max5487_spi_probe,
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 79adfd059c3a7..3ad38506028ef 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -114,6 +114,8 @@ config HSC030PA
depends on (I2C || SPI_MASTER)
select HSC030PA_I2C if I2C
select HSC030PA_SPI if SPI_MASTER
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say Y here to build support for the Honeywell TruStability
HSC and SSC pressure and temperature sensor series.
@@ -181,7 +183,9 @@ config MPL3115
config MPRLS0025PA
tristate "Honeywell MPRLS0025PA (MicroPressure sensors series)"
- depends on I2C
+ depends on (I2C || SPI_MASTER)
+ select MPRLS0025PA_I2C if I2C
+ select MPRLS0025PA_SPI if SPI_MASTER
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
@@ -192,6 +196,16 @@ config MPRLS0025PA
To compile this driver as a module, choose M here: the module will be
called mprls0025pa.
+config MPRLS0025PA_I2C
+ tristate
+ depends on MPRLS0025PA
+ depends on I2C
+
+config MPRLS0025PA_SPI
+ tristate
+ depends on MPRLS0025PA
+ depends on SPI_MASTER
+
config MS5611
tristate "Measurement Specialties MS5611 pressure sensor driver"
select IIO_BUFFER
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index b0f8b94662f20..a93709e357607 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -24,6 +24,8 @@ obj-$(CONFIG_MPL115_I2C) += mpl115_i2c.o
obj-$(CONFIG_MPL115_SPI) += mpl115_spi.o
obj-$(CONFIG_MPL3115) += mpl3115.o
obj-$(CONFIG_MPRLS0025PA) += mprls0025pa.o
+obj-$(CONFIG_MPRLS0025PA_I2C) += mprls0025pa_i2c.o
+obj-$(CONFIG_MPRLS0025PA_SPI) += mprls0025pa_spi.o
obj-$(CONFIG_MS5611) += ms5611_core.o
obj-$(CONFIG_MS5611_I2C) += ms5611_i2c.o
obj-$(CONFIG_MS5611_SPI) += ms5611_spi.o
diff --git a/drivers/iio/pressure/hp206c.c b/drivers/iio/pressure/hp206c.c
index a072de6cb59c7..261af1562827c 100644
--- a/drivers/iio/pressure/hp206c.c
+++ b/drivers/iio/pressure/hp206c.c
@@ -11,12 +11,12 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/delay.h>
#include <linux/util_macros.h>
-#include <linux/acpi.h>
#include <asm/unaligned.h>
@@ -400,20 +400,18 @@ static const struct i2c_device_id hp206c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, hp206c_id);
-#ifdef CONFIG_ACPI
static const struct acpi_device_id hp206c_acpi_match[] = {
{"HOP206C", 0},
{ },
};
MODULE_DEVICE_TABLE(acpi, hp206c_acpi_match);
-#endif
static struct i2c_driver hp206c_driver = {
.probe = hp206c_probe,
.id_table = hp206c_id,
.driver = {
.name = "hp206c",
- .acpi_match_table = ACPI_PTR(hp206c_acpi_match),
+ .acpi_match_table = hp206c_acpi_match,
},
};
diff --git a/drivers/iio/pressure/hsc030pa.c b/drivers/iio/pressure/hsc030pa.c
index d6a51f0c335fa..1682b90d4557c 100644
--- a/drivers/iio/pressure/hsc030pa.c
+++ b/drivers/iio/pressure/hsc030pa.c
@@ -22,8 +22,11 @@
#include <linux/types.h>
#include <linux/units.h>
+#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
#include <asm/unaligned.h>
@@ -297,6 +300,29 @@ static int hsc_get_measurement(struct hsc_data *data)
return 0;
}
+static irqreturn_t hsc_trigger_handler(int irq, void *private)
+{
+ struct iio_poll_func *pf = private;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct hsc_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = hsc_get_measurement(data);
+ if (ret)
+ goto error;
+
+ memcpy(&data->scan.chan[0], &data->buffer[0], 2);
+ memcpy(&data->scan.chan[1], &data->buffer[2], 2);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ iio_get_time_ns(indio_dev));
+
+error:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
/*
* IIO ABI expects
* value = (conv + offset) * scale
@@ -382,13 +408,29 @@ static const struct iio_chan_spec hsc_channels[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_OFFSET),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 14,
+ .storagebits = 16,
+ .endianness = IIO_BE,
+ },
},
{
.type = IIO_TEMP,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_OFFSET),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 11,
+ .storagebits = 16,
+ .shift = 5,
+ .endianness = IIO_BE,
+ },
},
+ IIO_CHAN_SOFT_TIMESTAMP(2),
};
static const struct iio_info hsc_info = {
@@ -406,7 +448,7 @@ int hsc_common_probe(struct device *dev, hsc_recv_fn recv)
struct hsc_data *hsc;
struct iio_dev *indio_dev;
const char *triplet;
- u64 tmp;
+ s64 tmp;
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*hsc));
@@ -485,6 +527,11 @@ int hsc_common_probe(struct device *dev, hsc_recv_fn recv)
indio_dev->channels = hsc->chip->channels;
indio_dev->num_channels = hsc->chip->num_channels;
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
+ hsc_trigger_handler, NULL);
+ if (ret)
+ return ret;
+
return devm_iio_device_register(dev, indio_dev);
}
EXPORT_SYMBOL_NS(hsc_common_probe, IIO_HONEYWELL_HSC030PA);
diff --git a/drivers/iio/pressure/hsc030pa.h b/drivers/iio/pressure/hsc030pa.h
index d20420dba4f6b..9b40f46f575ff 100644
--- a/drivers/iio/pressure/hsc030pa.h
+++ b/drivers/iio/pressure/hsc030pa.h
@@ -10,7 +10,10 @@
#include <linux/types.h>
+#include <linux/iio/iio.h>
+
#define HSC_REG_MEASUREMENT_RD_SIZE 4
+#define HSC_RESP_TIME_MS 2
struct device;
@@ -53,6 +56,10 @@ struct hsc_data {
s32 p_scale_dec;
s64 p_offset;
s32 p_offset_dec;
+ struct {
+ __be16 chan[2];
+ s64 timestamp __aligned(8);
+ } scan;
u8 buffer[HSC_REG_MEASUREMENT_RD_SIZE] __aligned(IIO_DMA_MINALIGN);
};
diff --git a/drivers/iio/pressure/hsc030pa_i2c.c b/drivers/iio/pressure/hsc030pa_i2c.c
index e2b524b364170..b3fd230e71da1 100644
--- a/drivers/iio/pressure/hsc030pa_i2c.c
+++ b/drivers/iio/pressure/hsc030pa_i2c.c
@@ -4,14 +4,17 @@
*
* Copyright (c) 2023 Petre Rodan <petre.rodan@subdimension.ro>
*
- * Datasheet: https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/trustability-hsc-series/documents/sps-siot-trustability-hsc-series-high-accuracy-board-mount-pressure-sensors-50099148-a-en-ciid-151133.pdf [hsc]
- * Datasheet: https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/common/documents/sps-siot-i2c-comms-digital-output-pressure-sensors-tn-008201-3-en-ciid-45841.pdf [i2c related]
+ * Datasheet: https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/common/documents/sps-siot-i2c-comms-digital-output-pressure-sensors-tn-008201-3-en-ciid-45841.pdf
+ * Datasheet: https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/common/documents/sps-siot-sleep-mode-technical-note-008286-1-en-ciid-155793.pdf
*/
+#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/errno.h>
#include <linux/i2c.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/types.h>
#include <linux/iio/iio.h>
@@ -23,6 +26,8 @@ static int hsc_i2c_recv(struct hsc_data *data)
struct i2c_msg msg;
int ret;
+ msleep_interruptible(HSC_RESP_TIME_MS);
+
msg.addr = client->addr;
msg.flags = client->flags | I2C_M_RD;
msg.len = HSC_REG_MEASUREMENT_RD_SIZE;
diff --git a/drivers/iio/pressure/hsc030pa_spi.c b/drivers/iio/pressure/hsc030pa_spi.c
index a719bade83266..818fa63034545 100644
--- a/drivers/iio/pressure/hsc030pa_spi.c
+++ b/drivers/iio/pressure/hsc030pa_spi.c
@@ -4,13 +4,17 @@
*
* Copyright (c) 2023 Petre Rodan <petre.rodan@subdimension.ro>
*
- * Datasheet: https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/trustability-hsc-series/documents/sps-siot-trustability-hsc-series-high-accuracy-board-mount-pressure-sensors-50099148-a-en-ciid-151133.pdf
+ * Datasheet: https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/common/documents/sps-siot-spi-comms-digital-ouptu-pressure-sensors-tn-008202-3-en-ciid-45843.pdf
+ * Datasheet: https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/common/documents/sps-siot-sleep-mode-technical-note-008286-1-en-ciid-155793.pdf
*/
+#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
#include <linux/stddef.h>
+#include <linux/types.h>
#include <linux/iio/iio.h>
@@ -25,6 +29,7 @@ static int hsc_spi_recv(struct hsc_data *data)
.len = HSC_REG_MEASUREMENT_RD_SIZE,
};
+ msleep_interruptible(HSC_RESP_TIME_MS);
return spi_sync_transfer(spi, &xfer, 1);
}
diff --git a/drivers/iio/pressure/mprls0025pa.c b/drivers/iio/pressure/mprls0025pa.c
index 30fb2de368210..33a15d4c642c0 100644
--- a/drivers/iio/pressure/mprls0025pa.c
+++ b/drivers/iio/pressure/mprls0025pa.c
@@ -5,17 +5,13 @@
* Copyright (c) Andreas Klinger <ak@it-klinger.de>
*
* Data sheet:
- * https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/
- * products/sensors/pressure-sensors/board-mount-pressure-sensors/
- * micropressure-mpr-series/documents/
- * sps-siot-mpr-series-datasheet-32332628-ciid-172626.pdf
+ * https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/micropressure-mpr-series/documents/sps-siot-mpr-series-datasheet-32332628-ciid-172626.pdf
*
- * 7-bit I2C default slave address: 0x18
*/
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/i2c.h>
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/math64.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
@@ -25,7 +21,6 @@
#include <linux/gpio/consumer.h>
#include <linux/iio/buffer.h>
-#include <linux/iio/iio.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
@@ -33,11 +28,15 @@
#include <asm/unaligned.h>
-/* bits in i2c status byte */
-#define MPR_I2C_POWER BIT(6) /* device is powered */
-#define MPR_I2C_BUSY BIT(5) /* device is busy */
-#define MPR_I2C_MEMORY BIT(2) /* integrity test passed */
-#define MPR_I2C_MATH BIT(0) /* internal math saturation */
+#include "mprls0025pa.h"
+
+/* bits in status byte */
+#define MPR_ST_POWER BIT(6) /* device is powered */
+#define MPR_ST_BUSY BIT(5) /* device is busy */
+#define MPR_ST_MEMORY BIT(2) /* integrity test passed */
+#define MPR_ST_MATH BIT(0) /* internal math saturation */
+
+#define MPR_ST_ERR_FLAG (MPR_ST_BUSY | MPR_ST_MEMORY | MPR_ST_MATH)
/*
* support _RAW sysfs interface:
@@ -70,60 +69,87 @@
* transfer function B: 2.5% to 22.5% of 2^24
* transfer function C: 20% to 80% of 2^24
*/
-enum mpr_func_id {
- MPR_FUNCTION_A,
- MPR_FUNCTION_B,
- MPR_FUNCTION_C,
-};
-
struct mpr_func_spec {
u32 output_min;
u32 output_max;
};
static const struct mpr_func_spec mpr_func_spec[] = {
- [MPR_FUNCTION_A] = {.output_min = 1677722, .output_max = 15099494},
- [MPR_FUNCTION_B] = {.output_min = 419430, .output_max = 3774874},
- [MPR_FUNCTION_C] = {.output_min = 3355443, .output_max = 13421773},
+ [MPR_FUNCTION_A] = { .output_min = 1677722, .output_max = 15099494 },
+ [MPR_FUNCTION_B] = { .output_min = 419430, .output_max = 3774874 },
+ [MPR_FUNCTION_C] = { .output_min = 3355443, .output_max = 13421773 },
};
-struct mpr_chan {
- s32 pres; /* pressure value */
- s64 ts; /* timestamp */
+enum mpr_variants {
+ MPR0001BA = 0x00, MPR01_6BA = 0x01, MPR02_5BA = 0x02, MPR0060MG = 0x03,
+ MPR0100MG = 0x04, MPR0160MG = 0x05, MPR0250MG = 0x06, MPR0400MG = 0x07,
+ MPR0600MG = 0x08, MPR0001BG = 0x09, MPR01_6BG = 0x0a, MPR02_5BG = 0x0b,
+ MPR0100KA = 0x0c, MPR0160KA = 0x0d, MPR0250KA = 0x0e, MPR0006KG = 0x0f,
+ MPR0010KG = 0x10, MPR0016KG = 0x11, MPR0025KG = 0x12, MPR0040KG = 0x13,
+ MPR0060KG = 0x14, MPR0100KG = 0x15, MPR0160KG = 0x16, MPR0250KG = 0x17,
+ MPR0015PA = 0x18, MPR0025PA = 0x19, MPR0030PA = 0x1a, MPR0001PG = 0x1b,
+ MPR0005PG = 0x1c, MPR0015PG = 0x1d, MPR0030PG = 0x1e, MPR0300YG = 0x1f,
+ MPR_VARIANTS_MAX
+};
+
+static const char * const mpr_triplet_variants[MPR_VARIANTS_MAX] = {
+ [MPR0001BA] = "0001BA", [MPR01_6BA] = "01.6BA", [MPR02_5BA] = "02.5BA",
+ [MPR0060MG] = "0060MG", [MPR0100MG] = "0100MG", [MPR0160MG] = "0160MG",
+ [MPR0250MG] = "0250MG", [MPR0400MG] = "0400MG", [MPR0600MG] = "0600MG",
+ [MPR0001BG] = "0001BG", [MPR01_6BG] = "01.6BG", [MPR02_5BG] = "02.5BG",
+ [MPR0100KA] = "0100KA", [MPR0160KA] = "0160KA", [MPR0250KA] = "0250KA",
+ [MPR0006KG] = "0006KG", [MPR0010KG] = "0010KG", [MPR0016KG] = "0016KG",
+ [MPR0025KG] = "0025KG", [MPR0040KG] = "0040KG", [MPR0060KG] = "0060KG",
+ [MPR0100KG] = "0100KG", [MPR0160KG] = "0160KG", [MPR0250KG] = "0250KG",
+ [MPR0015PA] = "0015PA", [MPR0025PA] = "0025PA", [MPR0030PA] = "0030PA",
+ [MPR0001PG] = "0001PG", [MPR0005PG] = "0005PG", [MPR0015PG] = "0015PG",
+ [MPR0030PG] = "0030PG", [MPR0300YG] = "0300YG"
+};
+
+/**
+ * struct mpr_range_config - list of pressure ranges based on nomenclature
+ * @pmin: lowest pressure that can be measured
+ * @pmax: highest pressure that can be measured
+ */
+struct mpr_range_config {
+ const s32 pmin;
+ const s32 pmax;
};
-struct mpr_data {
- struct i2c_client *client;
- struct mutex lock; /*
- * access to device during read
- */
- u32 pmin; /* minimal pressure in pascal */
- u32 pmax; /* maximal pressure in pascal */
- enum mpr_func_id function; /* transfer function */
- u32 outmin; /*
- * minimal numerical range raw
- * value from sensor
- */
- u32 outmax; /*
- * maximal numerical range raw
- * value from sensor
- */
- int scale; /* int part of scale */
- int scale2; /* nano part of scale */
- int offset; /* int part of offset */
- int offset2; /* nano part of offset */
- struct gpio_desc *gpiod_reset; /* reset */
- int irq; /*
- * end of conversion irq;
- * used to distinguish between
- * irq mode and reading in a
- * loop until data is ready
- */
- struct completion completion; /* handshake from irq to read */
- struct mpr_chan chan; /*
- * channel values for buffered
- * mode
- */
+/* All min max limits have been converted to pascals */
+static const struct mpr_range_config mpr_range_config[MPR_VARIANTS_MAX] = {
+ [MPR0001BA] = { .pmin = 0, .pmax = 100000 },
+ [MPR01_6BA] = { .pmin = 0, .pmax = 160000 },
+ [MPR02_5BA] = { .pmin = 0, .pmax = 250000 },
+ [MPR0060MG] = { .pmin = 0, .pmax = 6000 },
+ [MPR0100MG] = { .pmin = 0, .pmax = 10000 },
+ [MPR0160MG] = { .pmin = 0, .pmax = 16000 },
+ [MPR0250MG] = { .pmin = 0, .pmax = 25000 },
+ [MPR0400MG] = { .pmin = 0, .pmax = 40000 },
+ [MPR0600MG] = { .pmin = 0, .pmax = 60000 },
+ [MPR0001BG] = { .pmin = 0, .pmax = 100000 },
+ [MPR01_6BG] = { .pmin = 0, .pmax = 160000 },
+ [MPR02_5BG] = { .pmin = 0, .pmax = 250000 },
+ [MPR0100KA] = { .pmin = 0, .pmax = 100000 },
+ [MPR0160KA] = { .pmin = 0, .pmax = 160000 },
+ [MPR0250KA] = { .pmin = 0, .pmax = 250000 },
+ [MPR0006KG] = { .pmin = 0, .pmax = 6000 },
+ [MPR0010KG] = { .pmin = 0, .pmax = 10000 },
+ [MPR0016KG] = { .pmin = 0, .pmax = 16000 },
+ [MPR0025KG] = { .pmin = 0, .pmax = 25000 },
+ [MPR0040KG] = { .pmin = 0, .pmax = 40000 },
+ [MPR0060KG] = { .pmin = 0, .pmax = 60000 },
+ [MPR0100KG] = { .pmin = 0, .pmax = 100000 },
+ [MPR0160KG] = { .pmin = 0, .pmax = 160000 },
+ [MPR0250KG] = { .pmin = 0, .pmax = 250000 },
+ [MPR0015PA] = { .pmin = 0, .pmax = 103421 },
+ [MPR0025PA] = { .pmin = 0, .pmax = 172369 },
+ [MPR0030PA] = { .pmin = 0, .pmax = 206843 },
+ [MPR0001PG] = { .pmin = 0, .pmax = 6895 },
+ [MPR0005PG] = { .pmin = 0, .pmax = 34474 },
+ [MPR0015PG] = { .pmin = 0, .pmax = 103421 },
+ [MPR0030PG] = { .pmin = 0, .pmax = 206843 },
+ [MPR0300YG] = { .pmin = 0, .pmax = 39997 }
};
static const struct iio_chan_spec mpr_channels[] = {
@@ -153,11 +179,11 @@ static void mpr_reset(struct mpr_data *data)
}
/**
- * mpr_read_pressure() - Read pressure value from sensor via I2C
+ * mpr_read_pressure() - Read pressure value from sensor
* @data: Pointer to private data struct.
* @press: Output value read from sensor.
*
- * Reading from the sensor by sending and receiving I2C telegrams.
+ * Reading from the sensor by sending and receiving telegrams.
*
* If there is an end of conversion (EOC) interrupt registered the function
* waits for a maximum of one second for the interrupt.
@@ -170,25 +196,17 @@ static void mpr_reset(struct mpr_data *data)
*/
static int mpr_read_pressure(struct mpr_data *data, s32 *press)
{
- struct device *dev = &data->client->dev;
+ struct device *dev = data->dev;
int ret, i;
- u8 wdata[] = {0xAA, 0x00, 0x00};
- s32 status;
int nloops = 10;
- u8 buf[4];
reinit_completion(&data->completion);
- ret = i2c_master_send(data->client, wdata, sizeof(wdata));
+ ret = data->ops->write(data, MPR_CMD_SYNC, MPR_PKT_SYNC_LEN);
if (ret < 0) {
dev_err(dev, "error while writing ret: %d\n", ret);
return ret;
}
- if (ret != sizeof(wdata)) {
- dev_err(dev, "received size doesn't fit - ret: %d / %u\n", ret,
- (u32)sizeof(wdata));
- return -EIO;
- }
if (data->irq > 0) {
ret = wait_for_completion_timeout(&data->completion, HZ);
@@ -206,14 +224,14 @@ static int mpr_read_pressure(struct mpr_data *data, s32 *press)
* quite long
*/
usleep_range(5000, 10000);
- status = i2c_smbus_read_byte(data->client);
- if (status < 0) {
+ ret = data->ops->read(data, MPR_CMD_NOP, 1);
+ if (ret < 0) {
dev_err(dev,
"error while reading, status: %d\n",
- status);
- return status;
+ ret);
+ return ret;
}
- if (!(status & MPR_I2C_BUSY))
+ if (!(data->buffer[0] & MPR_ST_ERR_FLAG))
break;
}
if (i == nloops) {
@@ -222,29 +240,19 @@ static int mpr_read_pressure(struct mpr_data *data, s32 *press)
}
}
- ret = i2c_master_recv(data->client, buf, sizeof(buf));
- if (ret < 0) {
- dev_err(dev, "error in i2c_master_recv ret: %d\n", ret);
+ ret = data->ops->read(data, MPR_CMD_NOP, MPR_PKT_NOP_LEN);
+ if (ret < 0)
return ret;
- }
- if (ret != sizeof(buf)) {
- dev_err(dev, "received size doesn't fit - ret: %d / %u\n", ret,
- (u32)sizeof(buf));
- return -EIO;
- }
- if (buf[0] & MPR_I2C_BUSY) {
- /*
- * it should never be the case that status still indicates
- * business
- */
- dev_err(dev, "data still not ready: %08x\n", buf[0]);
+ if (data->buffer[0] & MPR_ST_ERR_FLAG) {
+ dev_err(data->dev,
+ "unexpected status byte %02x\n", data->buffer[0]);
return -ETIMEDOUT;
}
- *press = get_unaligned_be24(&buf[1]);
+ *press = get_unaligned_be24(&data->buffer[1]);
- dev_dbg(dev, "received: %*ph cnt: %d\n", ret, buf, *press);
+ dev_dbg(dev, "received: %*ph cnt: %d\n", ret, data->buffer, *press);
return 0;
}
@@ -271,7 +279,7 @@ static irqreturn_t mpr_trigger_handler(int irq, void *p)
goto err;
iio_push_to_buffers_with_timestamp(indio_dev, &data->chan,
- iio_get_time_ns(indio_dev));
+ iio_get_time_ns(indio_dev));
err:
mutex_unlock(&data->lock);
@@ -316,25 +324,23 @@ static const struct iio_info mpr_info = {
.read_raw = &mpr_read_raw,
};
-static int mpr_probe(struct i2c_client *client)
+int mpr_common_probe(struct device *dev, const struct mpr_ops *ops, int irq)
{
int ret;
struct mpr_data *data;
struct iio_dev *indio_dev;
- struct device *dev = &client->dev;
+ const char *triplet;
s64 scale, offset;
-
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_BYTE))
- return dev_err_probe(dev, -EOPNOTSUPP,
- "I2C functionality not supported\n");
+ u32 func;
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
- return dev_err_probe(dev, -ENOMEM, "couldn't get iio_dev\n");
+ return -ENOMEM;
data = iio_priv(indio_dev);
- data->client = client;
- data->irq = client->irq;
+ data->dev = dev;
+ data->ops = ops;
+ data->irq = irq;
mutex_init(&data->lock);
init_completion(&data->completion);
@@ -348,103 +354,102 @@ static int mpr_probe(struct i2c_client *client)
ret = devm_regulator_get_enable(dev, "vdd");
if (ret)
return dev_err_probe(dev, ret,
- "can't get and enable vdd supply\n");
+ "can't get and enable vdd supply\n");
- if (dev_fwnode(dev)) {
+ ret = data->ops->init(data->dev);
+ if (ret)
+ return ret;
+
+ ret = device_property_read_u32(dev,
+ "honeywell,transfer-function", &func);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "honeywell,transfer-function could not be read\n");
+ data->function = func - 1;
+ if (data->function > MPR_FUNCTION_C)
+ return dev_err_probe(dev, -EINVAL,
+ "honeywell,transfer-function %d invalid\n",
+ data->function);
+
+ ret = device_property_read_string(dev, "honeywell,pressure-triplet",
+ &triplet);
+ if (ret) {
ret = device_property_read_u32(dev, "honeywell,pmin-pascal",
- &data->pmin);
+ &data->pmin);
if (ret)
return dev_err_probe(dev, ret,
- "honeywell,pmin-pascal could not be read\n");
+ "honeywell,pmin-pascal could not be read\n");
+
ret = device_property_read_u32(dev, "honeywell,pmax-pascal",
- &data->pmax);
+ &data->pmax);
if (ret)
return dev_err_probe(dev, ret,
- "honeywell,pmax-pascal could not be read\n");
- ret = device_property_read_u32(dev,
- "honeywell,transfer-function", &data->function);
- if (ret)
- return dev_err_probe(dev, ret,
- "honeywell,transfer-function could not be read\n");
- if (data->function > MPR_FUNCTION_C)
- return dev_err_probe(dev, -EINVAL,
- "honeywell,transfer-function %d invalid\n",
- data->function);
+ "honeywell,pmax-pascal could not be read\n");
} else {
- /* when loaded as i2c device we need to use default values */
- dev_notice(dev, "firmware node not found; using defaults\n");
- data->pmin = 0;
- data->pmax = 172369; /* 25 psi */
- data->function = MPR_FUNCTION_A;
+ ret = device_property_match_property_string(dev,
+ "honeywell,pressure-triplet",
+ mpr_triplet_variants,
+ MPR_VARIANTS_MAX);
+ if (ret < 0)
+ return dev_err_probe(dev, -EINVAL,
+ "honeywell,pressure-triplet is invalid\n");
+
+ data->pmin = mpr_range_config[ret].pmin;
+ data->pmax = mpr_range_config[ret].pmax;
}
+ if (data->pmin >= data->pmax)
+ return dev_err_probe(dev, -EINVAL,
+ "pressure limits are invalid\n");
+
data->outmin = mpr_func_spec[data->function].output_min;
data->outmax = mpr_func_spec[data->function].output_max;
/* use 64 bit calculation for preserving a reasonable precision */
scale = div_s64(((s64)(data->pmax - data->pmin)) * NANO,
- data->outmax - data->outmin);
+ data->outmax - data->outmin);
data->scale = div_s64_rem(scale, NANO, &data->scale2);
/*
* multiply with NANO before dividing by scale and later divide by NANO
* again.
*/
offset = ((-1LL) * (s64)data->outmin) * NANO -
- div_s64(div_s64((s64)data->pmin * NANO, scale), NANO);
+ div_s64(div_s64((s64)data->pmin * NANO, scale), NANO);
data->offset = div_s64_rem(offset, NANO, &data->offset2);
if (data->irq > 0) {
ret = devm_request_irq(dev, data->irq, mpr_eoc_handler,
- IRQF_TRIGGER_RISING, client->name, data);
+ IRQF_TRIGGER_RISING,
+ dev_name(dev),
+ data);
if (ret)
return dev_err_probe(dev, ret,
- "request irq %d failed\n", data->irq);
+ "request irq %d failed\n", data->irq);
}
data->gpiod_reset = devm_gpiod_get_optional(dev, "reset",
- GPIOD_OUT_HIGH);
+ GPIOD_OUT_HIGH);
if (IS_ERR(data->gpiod_reset))
return dev_err_probe(dev, PTR_ERR(data->gpiod_reset),
- "request reset-gpio failed\n");
+ "request reset-gpio failed\n");
mpr_reset(data);
ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
- mpr_trigger_handler, NULL);
+ mpr_trigger_handler, NULL);
if (ret)
return dev_err_probe(dev, ret,
- "iio triggered buffer setup failed\n");
+ "iio triggered buffer setup failed\n");
ret = devm_iio_device_register(dev, indio_dev);
if (ret)
return dev_err_probe(dev, ret,
- "unable to register iio device\n");
+ "unable to register iio device\n");
return 0;
}
-
-static const struct of_device_id mpr_matches[] = {
- { .compatible = "honeywell,mprls0025pa" },
- { }
-};
-MODULE_DEVICE_TABLE(of, mpr_matches);
-
-static const struct i2c_device_id mpr_id[] = {
- { "mprls0025pa" },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, mpr_id);
-
-static struct i2c_driver mpr_driver = {
- .probe = mpr_probe,
- .id_table = mpr_id,
- .driver = {
- .name = "mprls0025pa",
- .of_match_table = mpr_matches,
- },
-};
-module_i2c_driver(mpr_driver);
+EXPORT_SYMBOL_NS(mpr_common_probe, IIO_HONEYWELL_MPRLS0025PA);
MODULE_AUTHOR("Andreas Klinger <ak@it-klinger.de>");
-MODULE_DESCRIPTION("Honeywell MPRLS0025PA I2C driver");
+MODULE_DESCRIPTION("Honeywell MPR pressure sensor core driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/mprls0025pa.h b/drivers/iio/pressure/mprls0025pa.h
new file mode 100644
index 0000000000000..9d5c30afa9d69
--- /dev/null
+++ b/drivers/iio/pressure/mprls0025pa.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MPRLS0025PA - Honeywell MicroPressure pressure sensor series driver
+ *
+ * Copyright (c) Andreas Klinger <ak@it-klinger.de>
+ *
+ * Data sheet:
+ * https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/micropressure-mpr-series/documents/sps-siot-mpr-series-datasheet-32332628-ciid-172626.pdf
+ */
+
+#ifndef _MPRLS0025PA_H
+#define _MPRLS0025PA_H
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+#include <linux/iio/iio.h>
+
+#define MPR_MEASUREMENT_RD_SIZE 4
+#define MPR_CMD_NOP 0xf0
+#define MPR_CMD_SYNC 0xaa
+#define MPR_PKT_NOP_LEN MPR_MEASUREMENT_RD_SIZE
+#define MPR_PKT_SYNC_LEN 3
+
+struct device;
+
+struct iio_chan_spec;
+struct iio_dev;
+
+struct mpr_data;
+struct mpr_ops;
+
+/**
+ * struct mpr_chan
+ * @pres: pressure value
+ * @ts: timestamp
+ */
+struct mpr_chan {
+ s32 pres;
+ s64 ts;
+};
+
+enum mpr_func_id {
+ MPR_FUNCTION_A,
+ MPR_FUNCTION_B,
+ MPR_FUNCTION_C,
+};
+
+/**
+ * struct mpr_data
+ * @dev: current device structure
+ * @ops: functions that implement the sensor reads/writes, bus init
+ * @lock: access to device during read
+ * @pmin: minimal pressure in pascal
+ * @pmax: maximal pressure in pascal
+ * @function: transfer function
+ * @outmin: minimum raw pressure in counts (based on transfer function)
+ * @outmax: maximum raw pressure in counts (based on transfer function)
+ * @scale: pressure scale
+ * @scale2: pressure scale, decimal number
+ * @offset: pressure offset
+ * @offset2: pressure offset, decimal number
+ * @gpiod_reset: reset
+ * @irq: end of conversion irq. used to distinguish between irq mode and
+ * reading in a loop until data is ready
+ * @completion: handshake from irq to read
+ * @chan: channel values for buffered mode
+ * @buffer: raw conversion data
+ */
+struct mpr_data {
+ struct device *dev;
+ const struct mpr_ops *ops;
+ struct mutex lock;
+ u32 pmin;
+ u32 pmax;
+ enum mpr_func_id function;
+ u32 outmin;
+ u32 outmax;
+ int scale;
+ int scale2;
+ int offset;
+ int offset2;
+ struct gpio_desc *gpiod_reset;
+ int irq;
+ struct completion completion;
+ struct mpr_chan chan;
+ u8 buffer[MPR_MEASUREMENT_RD_SIZE] __aligned(IIO_DMA_MINALIGN);
+};
+
+struct mpr_ops {
+ int (*init)(struct device *dev);
+ int (*read)(struct mpr_data *data, const u8 cmd, const u8 cnt);
+ int (*write)(struct mpr_data *data, const u8 cmd, const u8 cnt);
+};
+
+int mpr_common_probe(struct device *dev, const struct mpr_ops *ops, int irq);
+
+#endif
diff --git a/drivers/iio/pressure/mprls0025pa_i2c.c b/drivers/iio/pressure/mprls0025pa_i2c.c
new file mode 100644
index 0000000000000..7a5c5aa2b456b
--- /dev/null
+++ b/drivers/iio/pressure/mprls0025pa_i2c.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MPRLS0025PA - Honeywell MicroPressure pressure sensor series driver
+ *
+ * Copyright (c) Andreas Klinger <ak@it-klinger.de>
+ *
+ * Data sheet:
+ * https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/micropressure-mpr-series/documents/sps-siot-mpr-series-datasheet-32332628-ciid-172626.pdf
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include "mprls0025pa.h"
+
+static int mpr_i2c_init(struct device *unused)
+{
+ return 0;
+}
+
+static int mpr_i2c_read(struct mpr_data *data, const u8 unused, const u8 cnt)
+{
+ int ret;
+ struct i2c_client *client = to_i2c_client(data->dev);
+
+ if (cnt > MPR_MEASUREMENT_RD_SIZE)
+ return -EOVERFLOW;
+
+ memset(data->buffer, 0, MPR_MEASUREMENT_RD_SIZE);
+ ret = i2c_master_recv(client, data->buffer, cnt);
+ if (ret < 0)
+ return ret;
+ else if (ret != cnt)
+ return -EIO;
+
+ return 0;
+}
+
+static int mpr_i2c_write(struct mpr_data *data, const u8 cmd, const u8 unused)
+{
+ int ret;
+ struct i2c_client *client = to_i2c_client(data->dev);
+ u8 wdata[MPR_PKT_SYNC_LEN];
+
+ memset(wdata, 0, sizeof(wdata));
+ wdata[0] = cmd;
+
+ ret = i2c_master_send(client, wdata, MPR_PKT_SYNC_LEN);
+ if (ret < 0)
+ return ret;
+ else if (ret != MPR_PKT_SYNC_LEN)
+ return -EIO;
+
+ return 0;
+}
+
+static const struct mpr_ops mpr_i2c_ops = {
+ .init = mpr_i2c_init,
+ .read = mpr_i2c_read,
+ .write = mpr_i2c_write,
+};
+
+static int mpr_i2c_probe(struct i2c_client *client)
+{
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_BYTE))
+ return -EOPNOTSUPP;
+
+ return mpr_common_probe(&client->dev, &mpr_i2c_ops, client->irq);
+}
+
+static const struct of_device_id mpr_i2c_match[] = {
+ { .compatible = "honeywell,mprls0025pa" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mpr_i2c_match);
+
+static const struct i2c_device_id mpr_i2c_id[] = {
+ { "mprls0025pa" },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mpr_i2c_id);
+
+static struct i2c_driver mpr_i2c_driver = {
+ .probe = mpr_i2c_probe,
+ .id_table = mpr_i2c_id,
+ .driver = {
+ .name = "mprls0025pa",
+ .of_match_table = mpr_i2c_match,
+ },
+};
+module_i2c_driver(mpr_i2c_driver);
+
+MODULE_AUTHOR("Andreas Klinger <ak@it-klinger.de>");
+MODULE_DESCRIPTION("Honeywell MPR pressure sensor i2c driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_HONEYWELL_MPRLS0025PA);
diff --git a/drivers/iio/pressure/mprls0025pa_spi.c b/drivers/iio/pressure/mprls0025pa_spi.c
new file mode 100644
index 0000000000000..3aed14cd95c5a
--- /dev/null
+++ b/drivers/iio/pressure/mprls0025pa_spi.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MPRLS0025PA - Honeywell MicroPressure MPR series SPI sensor driver
+ *
+ * Copyright (c) 2024 Petre Rodan <petre.rodan@subdimension.ro>
+ *
+ * Data sheet:
+ * https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/micropressure-mpr-series/documents/sps-siot-mpr-series-datasheet-32332628-ciid-172626.pdf
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+#include "mprls0025pa.h"
+
+struct mpr_spi_buf {
+ u8 tx[MPR_MEASUREMENT_RD_SIZE] __aligned(IIO_DMA_MINALIGN);
+};
+
+static int mpr_spi_init(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct mpr_spi_buf *buf;
+
+ buf = devm_kzalloc(dev, sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, buf);
+
+ return 0;
+}
+
+static int mpr_spi_xfer(struct mpr_data *data, const u8 cmd, const u8 pkt_len)
+{
+ struct spi_device *spi = to_spi_device(data->dev);
+ struct mpr_spi_buf *buf = spi_get_drvdata(spi);
+ struct spi_transfer xfer;
+
+ if (pkt_len > MPR_MEASUREMENT_RD_SIZE)
+ return -EOVERFLOW;
+
+ buf->tx[0] = cmd;
+ xfer.tx_buf = buf->tx;
+ xfer.rx_buf = data->buffer;
+ xfer.len = pkt_len;
+
+ return spi_sync_transfer(spi, &xfer, 1);
+}
+
+static const struct mpr_ops mpr_spi_ops = {
+ .init = mpr_spi_init,
+ .read = mpr_spi_xfer,
+ .write = mpr_spi_xfer,
+};
+
+static int mpr_spi_probe(struct spi_device *spi)
+{
+ return mpr_common_probe(&spi->dev, &mpr_spi_ops, spi->irq);
+}
+
+static const struct of_device_id mpr_spi_match[] = {
+ { .compatible = "honeywell,mprls0025pa" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mpr_spi_match);
+
+static const struct spi_device_id mpr_spi_id[] = {
+ { "mprls0025pa" },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, mpr_spi_id);
+
+static struct spi_driver mpr_spi_driver = {
+ .driver = {
+ .name = "mprls0025pa",
+ .of_match_table = mpr_spi_match,
+ },
+ .probe = mpr_spi_probe,
+ .id_table = mpr_spi_id,
+};
+module_spi_driver(mpr_spi_driver);
+
+MODULE_AUTHOR("Petre Rodan <petre.rodan@subdimension.ro>");
+MODULE_DESCRIPTION("Honeywell MPR pressure sensor spi driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_HONEYWELL_MPRLS0025PA);
diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c
index 5101552e3f384..389523d6ae321 100644
--- a/drivers/iio/pressure/st_pressure_i2c.c
+++ b/drivers/iio/pressure/st_pressure_i2c.c
@@ -7,7 +7,6 @@
* Denis Ciocca <denis.ciocca@st.com>
*/
-#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
@@ -55,13 +54,11 @@ static const struct of_device_id st_press_of_match[] = {
};
MODULE_DEVICE_TABLE(of, st_press_of_match);
-#ifdef CONFIG_ACPI
static const struct acpi_device_id st_press_acpi_match[] = {
{"SNO9210", LPS22HB},
{ },
};
MODULE_DEVICE_TABLE(acpi, st_press_acpi_match);
-#endif
static const struct i2c_device_id st_press_id_table[] = {
{ LPS001WP_PRESS_DEV_NAME, LPS001WP },
@@ -114,7 +111,7 @@ static struct i2c_driver st_press_driver = {
.driver = {
.name = "st-press-i2c",
.of_match_table = st_press_of_match,
- .acpi_match_table = ACPI_PTR(st_press_acpi_match),
+ .acpi_match_table = st_press_acpi_match,
},
.probe = st_press_i2c_probe,
.id_table = st_press_id_table,
diff --git a/drivers/iio/proximity/isl29501.c b/drivers/iio/proximity/isl29501.c
index bcebacaf3dab0..4982686fb4c30 100644
--- a/drivers/iio/proximity/isl29501.c
+++ b/drivers/iio/proximity/isl29501.c
@@ -995,17 +995,16 @@ static const struct i2c_device_id isl29501_id[] = {
MODULE_DEVICE_TABLE(i2c, isl29501_id);
-#if defined(CONFIG_OF)
static const struct of_device_id isl29501_i2c_matches[] = {
{ .compatible = "renesas,isl29501" },
{ }
};
MODULE_DEVICE_TABLE(of, isl29501_i2c_matches);
-#endif
static struct i2c_driver isl29501_driver = {
.driver = {
.name = "isl29501",
+ .of_match_table = isl29501_i2c_matches,
},
.id_table = isl29501_id,
.probe = isl29501_probe,
diff --git a/drivers/iio/proximity/sx9310.c b/drivers/iio/proximity/sx9310.c
index 0d230a0dff567..427c9343d6d16 100644
--- a/drivers/iio/proximity/sx9310.c
+++ b/drivers/iio/proximity/sx9310.c
@@ -337,28 +337,19 @@ static int sx9310_read_raw(struct iio_dev *indio_dev,
int *val2, long mask)
{
struct sx_common_data *data = iio_priv(indio_dev);
- int ret;
if (chan->type != IIO_PROXIMITY)
return -EINVAL;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- ret = sx_common_read_proximity(data, chan, val);
- iio_device_release_direct_mode(indio_dev);
- return ret;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
+ return sx_common_read_proximity(data, chan, val);
+ unreachable();
case IIO_CHAN_INFO_HARDWAREGAIN:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- ret = sx9310_read_gain(data, chan, val);
- iio_device_release_direct_mode(indio_dev);
- return ret;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
+ return sx9310_read_gain(data, chan, val);
+ unreachable();
case IIO_CHAN_INFO_SAMP_FREQ:
return sx9310_read_samp_freq(data, val, val2);
default:
@@ -546,12 +537,10 @@ static int sx9310_write_thresh(struct sx_common_data *data,
return -EINVAL;
regval = FIELD_PREP(SX9310_REG_PROX_CTRL8_9_PTHRESH_MASK, regval);
- mutex_lock(&data->mutex);
- ret = regmap_update_bits(data->regmap, reg,
- SX9310_REG_PROX_CTRL8_9_PTHRESH_MASK, regval);
- mutex_unlock(&data->mutex);
- return ret;
+ guard(mutex)(&data->mutex);
+ return regmap_update_bits(data->regmap, reg,
+ SX9310_REG_PROX_CTRL8_9_PTHRESH_MASK, regval);
}
static int sx9310_write_hysteresis(struct sx_common_data *data,
@@ -576,17 +565,14 @@ static int sx9310_write_hysteresis(struct sx_common_data *data,
return -EINVAL;
hyst = FIELD_PREP(SX9310_REG_PROX_CTRL10_HYST_MASK, hyst);
- mutex_lock(&data->mutex);
- ret = regmap_update_bits(data->regmap, SX9310_REG_PROX_CTRL10,
- SX9310_REG_PROX_CTRL10_HYST_MASK, hyst);
- mutex_unlock(&data->mutex);
- return ret;
+ guard(mutex)(&data->mutex);
+ return regmap_update_bits(data->regmap, SX9310_REG_PROX_CTRL10,
+ SX9310_REG_PROX_CTRL10_HYST_MASK, hyst);
}
static int sx9310_write_far_debounce(struct sx_common_data *data, int val)
{
- int ret;
unsigned int regval;
if (val > 0)
@@ -596,18 +582,14 @@ static int sx9310_write_far_debounce(struct sx_common_data *data, int val)
regval = FIELD_PREP(SX9310_REG_PROX_CTRL10_FAR_DEBOUNCE_MASK, val);
- mutex_lock(&data->mutex);
- ret = regmap_update_bits(data->regmap, SX9310_REG_PROX_CTRL10,
- SX9310_REG_PROX_CTRL10_FAR_DEBOUNCE_MASK,
- regval);
- mutex_unlock(&data->mutex);
-
- return ret;
+ guard(mutex)(&data->mutex);
+ return regmap_update_bits(data->regmap, SX9310_REG_PROX_CTRL10,
+ SX9310_REG_PROX_CTRL10_FAR_DEBOUNCE_MASK,
+ regval);
}
static int sx9310_write_close_debounce(struct sx_common_data *data, int val)
{
- int ret;
unsigned int regval;
if (val > 0)
@@ -617,13 +599,10 @@ static int sx9310_write_close_debounce(struct sx_common_data *data, int val)
regval = FIELD_PREP(SX9310_REG_PROX_CTRL10_CLOSE_DEBOUNCE_MASK, val);
- mutex_lock(&data->mutex);
- ret = regmap_update_bits(data->regmap, SX9310_REG_PROX_CTRL10,
- SX9310_REG_PROX_CTRL10_CLOSE_DEBOUNCE_MASK,
- regval);
- mutex_unlock(&data->mutex);
-
- return ret;
+ guard(mutex)(&data->mutex);
+ return regmap_update_bits(data->regmap, SX9310_REG_PROX_CTRL10,
+ SX9310_REG_PROX_CTRL10_CLOSE_DEBOUNCE_MASK,
+ regval);
}
static int sx9310_write_event_val(struct iio_dev *indio_dev,
@@ -658,7 +637,7 @@ static int sx9310_write_event_val(struct iio_dev *indio_dev,
static int sx9310_set_samp_freq(struct sx_common_data *data, int val, int val2)
{
- int i, ret;
+ int i;
for (i = 0; i < ARRAY_SIZE(sx9310_samp_freq_table); i++)
if (val == sx9310_samp_freq_table[i].val &&
@@ -668,23 +647,17 @@ static int sx9310_set_samp_freq(struct sx_common_data *data, int val, int val2)
if (i == ARRAY_SIZE(sx9310_samp_freq_table))
return -EINVAL;
- mutex_lock(&data->mutex);
-
- ret = regmap_update_bits(
+ guard(mutex)(&data->mutex);
+ return regmap_update_bits(
data->regmap, SX9310_REG_PROX_CTRL0,
SX9310_REG_PROX_CTRL0_SCANPERIOD_MASK,
FIELD_PREP(SX9310_REG_PROX_CTRL0_SCANPERIOD_MASK, i));
-
- mutex_unlock(&data->mutex);
-
- return ret;
}
static int sx9310_write_gain(struct sx_common_data *data,
const struct iio_chan_spec *chan, int val)
{
unsigned int gain, mask;
- int ret;
gain = ilog2(val);
@@ -703,12 +676,9 @@ static int sx9310_write_gain(struct sx_common_data *data,
return -EINVAL;
}
- mutex_lock(&data->mutex);
- ret = regmap_update_bits(data->regmap, SX9310_REG_PROX_CTRL3, mask,
- gain);
- mutex_unlock(&data->mutex);
-
- return ret;
+ guard(mutex)(&data->mutex);
+ return regmap_update_bits(data->regmap, SX9310_REG_PROX_CTRL3, mask,
+ gain);
}
static int sx9310_write_raw(struct iio_dev *indio_dev,
@@ -969,22 +939,18 @@ static int sx9310_suspend(struct device *dev)
disable_irq_nosync(data->client->irq);
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
ret = regmap_read(data->regmap, SX9310_REG_PROX_CTRL0,
&data->suspend_ctrl);
if (ret)
- goto out;
+ return ret;
ctrl0 = data->suspend_ctrl & ~SX9310_REG_PROX_CTRL0_SENSOREN_MASK;
ret = regmap_write(data->regmap, SX9310_REG_PROX_CTRL0, ctrl0);
if (ret)
- goto out;
-
- ret = regmap_write(data->regmap, SX9310_REG_PAUSE, 0);
+ return ret;
-out:
- mutex_unlock(&data->mutex);
- return ret;
+ return regmap_write(data->regmap, SX9310_REG_PAUSE, 0);
}
static int sx9310_resume(struct device *dev)
@@ -992,18 +958,16 @@ static int sx9310_resume(struct device *dev)
struct sx_common_data *data = iio_priv(dev_get_drvdata(dev));
int ret;
- mutex_lock(&data->mutex);
- ret = regmap_write(data->regmap, SX9310_REG_PAUSE, 1);
- if (ret)
- goto out;
-
- ret = regmap_write(data->regmap, SX9310_REG_PROX_CTRL0,
- data->suspend_ctrl);
+ scoped_guard(mutex, &data->mutex) {
+ ret = regmap_write(data->regmap, SX9310_REG_PAUSE, 1);
+ if (ret)
+ return ret;
-out:
- mutex_unlock(&data->mutex);
- if (ret)
- return ret;
+ ret = regmap_write(data->regmap, SX9310_REG_PROX_CTRL0,
+ data->suspend_ctrl);
+ if (ret)
+ return ret;
+ }
enable_irq(data->client->irq);
return 0;
diff --git a/drivers/iio/proximity/sx9324.c b/drivers/iio/proximity/sx9324.c
index ac2ed2da21ccc..aa0d14a49d5e0 100644
--- a/drivers/iio/proximity/sx9324.c
+++ b/drivers/iio/proximity/sx9324.c
@@ -429,25 +429,16 @@ static int sx9324_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct sx_common_data *data = iio_priv(indio_dev);
- int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- ret = sx_common_read_proximity(data, chan, val);
- iio_device_release_direct_mode(indio_dev);
- return ret;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
+ return sx_common_read_proximity(data, chan, val);
+ unreachable();
case IIO_CHAN_INFO_HARDWAREGAIN:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- ret = sx9324_read_gain(data, chan, val);
- iio_device_release_direct_mode(indio_dev);
- return ret;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
+ return sx9324_read_gain(data, chan, val);
+ unreachable();
case IIO_CHAN_INFO_SAMP_FREQ:
return sx9324_read_samp_freq(data, val, val2);
default:
@@ -484,7 +475,7 @@ static int sx9324_read_avail(struct iio_dev *indio_dev,
static int sx9324_set_samp_freq(struct sx_common_data *data,
int val, int val2)
{
- int i, ret;
+ int i;
for (i = 0; i < ARRAY_SIZE(sx9324_samp_freq_table); i++)
if (val == sx9324_samp_freq_table[i].val &&
@@ -494,15 +485,11 @@ static int sx9324_set_samp_freq(struct sx_common_data *data,
if (i == ARRAY_SIZE(sx9324_samp_freq_table))
return -EINVAL;
- mutex_lock(&data->mutex);
-
- ret = regmap_update_bits(data->regmap,
- SX9324_REG_GNRL_CTRL0,
- SX9324_REG_GNRL_CTRL0_SCANPERIOD_MASK, i);
-
- mutex_unlock(&data->mutex);
+ guard(mutex)(&data->mutex);
- return ret;
+ return regmap_update_bits(data->regmap,
+ SX9324_REG_GNRL_CTRL0,
+ SX9324_REG_GNRL_CTRL0_SCANPERIOD_MASK, i);
}
static int sx9324_read_thresh(struct sx_common_data *data,
@@ -623,7 +610,6 @@ static int sx9324_write_thresh(struct sx_common_data *data,
const struct iio_chan_spec *chan, int _val)
{
unsigned int reg, val = _val;
- int ret;
reg = SX9324_REG_PROX_CTRL6 + chan->channel / 2;
@@ -633,11 +619,9 @@ static int sx9324_write_thresh(struct sx_common_data *data,
if (val > 0xff)
return -EINVAL;
- mutex_lock(&data->mutex);
- ret = regmap_write(data->regmap, reg, val);
- mutex_unlock(&data->mutex);
+ guard(mutex)(&data->mutex);
- return ret;
+ return regmap_write(data->regmap, reg, val);
}
static int sx9324_write_hysteresis(struct sx_common_data *data,
@@ -662,18 +646,15 @@ static int sx9324_write_hysteresis(struct sx_common_data *data,
return -EINVAL;
hyst = FIELD_PREP(SX9324_REG_PROX_CTRL5_HYST_MASK, hyst);
- mutex_lock(&data->mutex);
- ret = regmap_update_bits(data->regmap, SX9324_REG_PROX_CTRL5,
- SX9324_REG_PROX_CTRL5_HYST_MASK, hyst);
- mutex_unlock(&data->mutex);
+ guard(mutex)(&data->mutex);
- return ret;
+ return regmap_update_bits(data->regmap, SX9324_REG_PROX_CTRL5,
+ SX9324_REG_PROX_CTRL5_HYST_MASK, hyst);
}
static int sx9324_write_far_debounce(struct sx_common_data *data, int _val)
{
unsigned int regval, val = _val;
- int ret;
if (val > 0)
val = ilog2(val);
@@ -682,19 +663,16 @@ static int sx9324_write_far_debounce(struct sx_common_data *data, int _val)
regval = FIELD_PREP(SX9324_REG_PROX_CTRL5_FAR_DEBOUNCE_MASK, val);
- mutex_lock(&data->mutex);
- ret = regmap_update_bits(data->regmap, SX9324_REG_PROX_CTRL5,
- SX9324_REG_PROX_CTRL5_FAR_DEBOUNCE_MASK,
- regval);
- mutex_unlock(&data->mutex);
+ guard(mutex)(&data->mutex);
- return ret;
+ return regmap_update_bits(data->regmap, SX9324_REG_PROX_CTRL5,
+ SX9324_REG_PROX_CTRL5_FAR_DEBOUNCE_MASK,
+ regval);
}
static int sx9324_write_close_debounce(struct sx_common_data *data, int _val)
{
unsigned int regval, val = _val;
- int ret;
if (val > 0)
val = ilog2(val);
@@ -703,13 +681,11 @@ static int sx9324_write_close_debounce(struct sx_common_data *data, int _val)
regval = FIELD_PREP(SX9324_REG_PROX_CTRL5_CLOSE_DEBOUNCE_MASK, val);
- mutex_lock(&data->mutex);
- ret = regmap_update_bits(data->regmap, SX9324_REG_PROX_CTRL5,
- SX9324_REG_PROX_CTRL5_CLOSE_DEBOUNCE_MASK,
- regval);
- mutex_unlock(&data->mutex);
+ guard(mutex)(&data->mutex);
- return ret;
+ return regmap_update_bits(data->regmap, SX9324_REG_PROX_CTRL5,
+ SX9324_REG_PROX_CTRL5_CLOSE_DEBOUNCE_MASK,
+ regval);
}
static int sx9324_write_event_val(struct iio_dev *indio_dev,
@@ -746,7 +722,6 @@ static int sx9324_write_gain(struct sx_common_data *data,
const struct iio_chan_spec *chan, int val)
{
unsigned int gain, reg;
- int ret;
reg = SX9324_REG_PROX_CTRL0 + chan->channel / 2;
@@ -756,13 +731,11 @@ static int sx9324_write_gain(struct sx_common_data *data,
gain = FIELD_PREP(SX9324_REG_PROX_CTRL0_GAIN_MASK, gain);
- mutex_lock(&data->mutex);
- ret = regmap_update_bits(data->regmap, reg,
- SX9324_REG_PROX_CTRL0_GAIN_MASK,
- gain);
- mutex_unlock(&data->mutex);
+ guard(mutex)(&data->mutex);
- return ret;
+ return regmap_update_bits(data->regmap, reg,
+ SX9324_REG_PROX_CTRL0_GAIN_MASK,
+ gain);
}
static int sx9324_write_raw(struct iio_dev *indio_dev,
@@ -873,6 +846,29 @@ static int sx9324_init_compensation(struct iio_dev *indio_dev)
20000, 2000000);
}
+static u8 sx9324_parse_phase_prop(struct device *dev,
+ struct sx_common_reg_default *reg_def,
+ const char *prop)
+{
+ unsigned int pin_defs[SX9324_NUM_PINS];
+ int count, ret, pin;
+ u32 raw = 0;
+
+ count = device_property_count_u32(dev, prop);
+ if (count != ARRAY_SIZE(pin_defs))
+ return reg_def->def;
+ ret = device_property_read_u32_array(dev, prop, pin_defs,
+ ARRAY_SIZE(pin_defs));
+ if (ret)
+ return reg_def->def;
+
+ for (pin = 0; pin < SX9324_NUM_PINS; pin++)
+ raw |= (pin_defs[pin] << (2 * pin)) &
+ SX9324_REG_AFE_PH0_PIN_MASK(pin);
+
+ return raw;
+}
+
static const struct sx_common_reg_default *
sx9324_get_default_reg(struct device *dev, int idx,
struct sx_common_reg_default *reg_def)
@@ -881,37 +877,29 @@ sx9324_get_default_reg(struct device *dev, int idx,
"highest" };
static const char * const sx9324_csidle[] = { "hi-z", "hi-z", "gnd",
"vdd" };
-#define SX9324_PIN_DEF "semtech,ph0-pin"
-#define SX9324_RESOLUTION_DEF "semtech,ph01-resolution"
-#define SX9324_PROXRAW_DEF "semtech,ph01-proxraw-strength"
- unsigned int pin_defs[SX9324_NUM_PINS];
- char prop[] = SX9324_PROXRAW_DEF;
u32 start = 0, raw = 0, pos = 0;
- int ret, count, ph, pin;
+ const char *prop;
+ int ret;
memcpy(reg_def, &sx9324_default_regs[idx], sizeof(*reg_def));
sx_common_get_raw_register_config(dev, reg_def);
switch (reg_def->reg) {
case SX9324_REG_AFE_PH0:
+ reg_def->def = sx9324_parse_phase_prop(dev, reg_def,
+ "semtech,ph0-pin");
+ break;
case SX9324_REG_AFE_PH1:
+ reg_def->def = sx9324_parse_phase_prop(dev, reg_def,
+ "semtech,ph1-pin");
+ break;
case SX9324_REG_AFE_PH2:
+ reg_def->def = sx9324_parse_phase_prop(dev, reg_def,
+ "semtech,ph2-pin");
+ break;
case SX9324_REG_AFE_PH3:
- ph = reg_def->reg - SX9324_REG_AFE_PH0;
- snprintf(prop, ARRAY_SIZE(prop), "semtech,ph%d-pin", ph);
-
- count = device_property_count_u32(dev, prop);
- if (count != ARRAY_SIZE(pin_defs))
- break;
- ret = device_property_read_u32_array(dev, prop, pin_defs,
- ARRAY_SIZE(pin_defs));
- if (ret)
- break;
-
- for (pin = 0; pin < SX9324_NUM_PINS; pin++)
- raw |= (pin_defs[pin] << (2 * pin)) &
- SX9324_REG_AFE_PH0_PIN_MASK(pin);
- reg_def->def = raw;
+ reg_def->def = sx9324_parse_phase_prop(dev, reg_def,
+ "semtech,ph3-pin");
break;
case SX9324_REG_AFE_CTRL0:
ret = device_property_match_property_string(dev, "semtech,cs-idle-sleep",
@@ -933,11 +921,9 @@ sx9324_get_default_reg(struct device *dev, int idx,
case SX9324_REG_AFE_CTRL4:
case SX9324_REG_AFE_CTRL7:
if (reg_def->reg == SX9324_REG_AFE_CTRL4)
- strncpy(prop, "semtech,ph01-resolution",
- ARRAY_SIZE(prop));
+ prop = "semtech,ph01-resolution";
else
- strncpy(prop, "semtech,ph23-resolution",
- ARRAY_SIZE(prop));
+ prop = "semtech,ph23-resolution";
ret = device_property_read_u32(dev, prop, &raw);
if (ret)
@@ -1008,11 +994,9 @@ sx9324_get_default_reg(struct device *dev, int idx,
case SX9324_REG_PROX_CTRL0:
case SX9324_REG_PROX_CTRL1:
if (reg_def->reg == SX9324_REG_PROX_CTRL0)
- strncpy(prop, "semtech,ph01-proxraw-strength",
- ARRAY_SIZE(prop));
+ prop = "semtech,ph01-proxraw-strength";
else
- strncpy(prop, "semtech,ph23-proxraw-strength",
- ARRAY_SIZE(prop));
+ prop = "semtech,ph23-proxraw-strength";
ret = device_property_read_u32(dev, prop, &raw);
if (ret)
break;
@@ -1081,34 +1065,30 @@ static int sx9324_suspend(struct device *dev)
disable_irq_nosync(data->client->irq);
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
ret = regmap_read(data->regmap, SX9324_REG_GNRL_CTRL1, &regval);
+ if (ret < 0)
+ return ret;
data->suspend_ctrl =
FIELD_GET(SX9324_REG_GNRL_CTRL1_PHEN_MASK, regval);
- if (ret < 0)
- goto out;
/* Disable all phases, send the device to sleep. */
- ret = regmap_write(data->regmap, SX9324_REG_GNRL_CTRL1, 0);
-
-out:
- mutex_unlock(&data->mutex);
- return ret;
+ return regmap_write(data->regmap, SX9324_REG_GNRL_CTRL1, 0);
}
static int sx9324_resume(struct device *dev)
{
struct sx_common_data *data = iio_priv(dev_get_drvdata(dev));
- int ret;
- mutex_lock(&data->mutex);
- ret = regmap_write(data->regmap, SX9324_REG_GNRL_CTRL1,
- data->suspend_ctrl | SX9324_REG_GNRL_CTRL1_PAUSECTRL);
- mutex_unlock(&data->mutex);
- if (ret)
- return ret;
+ scoped_guard(mutex, &data->mutex) {
+ int ret = regmap_write(data->regmap, SX9324_REG_GNRL_CTRL1,
+ data->suspend_ctrl |
+ SX9324_REG_GNRL_CTRL1_PAUSECTRL);
+ if (ret)
+ return ret;
+ }
enable_irq(data->client->irq);
return 0;
diff --git a/drivers/iio/proximity/sx9360.c b/drivers/iio/proximity/sx9360.c
index 2c4e14a4fe9fb..75a1c29f14ebe 100644
--- a/drivers/iio/proximity/sx9360.c
+++ b/drivers/iio/proximity/sx9360.c
@@ -322,25 +322,16 @@ static int sx9360_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct sx_common_data *data = iio_priv(indio_dev);
- int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- ret = sx_common_read_proximity(data, chan, val);
- iio_device_release_direct_mode(indio_dev);
- return ret;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
+ return sx_common_read_proximity(data, chan, val);
+ unreachable();
case IIO_CHAN_INFO_HARDWAREGAIN:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- ret = sx9360_read_gain(data, chan, val);
- iio_device_release_direct_mode(indio_dev);
- return ret;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
+ return sx9360_read_gain(data, chan, val);
+ unreachable();
case IIO_CHAN_INFO_SAMP_FREQ:
return sx9360_read_samp_freq(data, val, val2);
default:
@@ -387,19 +378,15 @@ static int sx9360_read_avail(struct iio_dev *indio_dev,
static int sx9360_set_samp_freq(struct sx_common_data *data,
int val, int val2)
{
- int ret, reg;
+ int reg;
__be16 buf;
reg = val * 8192 / SX9360_FOSC_HZ + val2 * 8192 / (SX9360_FOSC_MHZ);
buf = cpu_to_be16(reg);
- mutex_lock(&data->mutex);
-
- ret = regmap_bulk_write(data->regmap, SX9360_REG_GNRL_CTRL1, &buf,
- sizeof(buf));
+ guard(mutex)(&data->mutex);
- mutex_unlock(&data->mutex);
-
- return ret;
+ return regmap_bulk_write(data->regmap, SX9360_REG_GNRL_CTRL1, &buf,
+ sizeof(buf));
}
static int sx9360_read_thresh(struct sx_common_data *data, int *val)
@@ -510,7 +497,6 @@ static int sx9360_read_event_val(struct iio_dev *indio_dev,
static int sx9360_write_thresh(struct sx_common_data *data, int _val)
{
unsigned int val = _val;
- int ret;
if (val >= 1)
val = int_sqrt(2 * val);
@@ -518,11 +504,8 @@ static int sx9360_write_thresh(struct sx_common_data *data, int _val)
if (val > 0xff)
return -EINVAL;
- mutex_lock(&data->mutex);
- ret = regmap_write(data->regmap, SX9360_REG_PROX_CTRL5, val);
- mutex_unlock(&data->mutex);
-
- return ret;
+ guard(mutex)(&data->mutex);
+ return regmap_write(data->regmap, SX9360_REG_PROX_CTRL5, val);
}
static int sx9360_write_hysteresis(struct sx_common_data *data, int _val)
@@ -546,18 +529,14 @@ static int sx9360_write_hysteresis(struct sx_common_data *data, int _val)
return -EINVAL;
hyst = FIELD_PREP(SX9360_REG_PROX_CTRL4_HYST_MASK, hyst);
- mutex_lock(&data->mutex);
- ret = regmap_update_bits(data->regmap, SX9360_REG_PROX_CTRL4,
- SX9360_REG_PROX_CTRL4_HYST_MASK, hyst);
- mutex_unlock(&data->mutex);
-
- return ret;
+ guard(mutex)(&data->mutex);
+ return regmap_update_bits(data->regmap, SX9360_REG_PROX_CTRL4,
+ SX9360_REG_PROX_CTRL4_HYST_MASK, hyst);
}
static int sx9360_write_far_debounce(struct sx_common_data *data, int _val)
{
unsigned int regval, val = _val;
- int ret;
if (val > 0)
val = ilog2(val);
@@ -566,19 +545,15 @@ static int sx9360_write_far_debounce(struct sx_common_data *data, int _val)
regval = FIELD_PREP(SX9360_REG_PROX_CTRL4_FAR_DEBOUNCE_MASK, val);
- mutex_lock(&data->mutex);
- ret = regmap_update_bits(data->regmap, SX9360_REG_PROX_CTRL4,
- SX9360_REG_PROX_CTRL4_FAR_DEBOUNCE_MASK,
- regval);
- mutex_unlock(&data->mutex);
-
- return ret;
+ guard(mutex)(&data->mutex);
+ return regmap_update_bits(data->regmap, SX9360_REG_PROX_CTRL4,
+ SX9360_REG_PROX_CTRL4_FAR_DEBOUNCE_MASK,
+ regval);
}
static int sx9360_write_close_debounce(struct sx_common_data *data, int _val)
{
unsigned int regval, val = _val;
- int ret;
if (val > 0)
val = ilog2(val);
@@ -587,13 +562,10 @@ static int sx9360_write_close_debounce(struct sx_common_data *data, int _val)
regval = FIELD_PREP(SX9360_REG_PROX_CTRL4_CLOSE_DEBOUNCE_MASK, val);
- mutex_lock(&data->mutex);
- ret = regmap_update_bits(data->regmap, SX9360_REG_PROX_CTRL4,
- SX9360_REG_PROX_CTRL4_CLOSE_DEBOUNCE_MASK,
- regval);
- mutex_unlock(&data->mutex);
-
- return ret;
+ guard(mutex)(&data->mutex);
+ return regmap_update_bits(data->regmap, SX9360_REG_PROX_CTRL4,
+ SX9360_REG_PROX_CTRL4_CLOSE_DEBOUNCE_MASK,
+ regval);
}
static int sx9360_write_event_val(struct iio_dev *indio_dev,
@@ -630,19 +602,15 @@ static int sx9360_write_gain(struct sx_common_data *data,
const struct iio_chan_spec *chan, int val)
{
unsigned int gain, reg;
- int ret;
gain = ilog2(val);
reg = SX9360_REG_PROX_CTRL0_PHR + chan->channel;
gain = FIELD_PREP(SX9360_REG_PROX_CTRL0_GAIN_MASK, gain);
- mutex_lock(&data->mutex);
- ret = regmap_update_bits(data->regmap, reg,
- SX9360_REG_PROX_CTRL0_GAIN_MASK,
- gain);
- mutex_unlock(&data->mutex);
-
- return ret;
+ guard(mutex)(&data->mutex);
+ return regmap_update_bits(data->regmap, reg,
+ SX9360_REG_PROX_CTRL0_GAIN_MASK,
+ gain);
}
static int sx9360_write_raw(struct iio_dev *indio_dev,
@@ -827,36 +795,31 @@ static int sx9360_suspend(struct device *dev)
disable_irq_nosync(data->client->irq);
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
ret = regmap_read(data->regmap, SX9360_REG_GNRL_CTRL0, &regval);
+ if (ret < 0)
+ return ret;
data->suspend_ctrl =
FIELD_GET(SX9360_REG_GNRL_CTRL0_PHEN_MASK, regval);
- if (ret < 0)
- goto out;
/* Disable all phases, send the device to sleep. */
- ret = regmap_write(data->regmap, SX9360_REG_GNRL_CTRL0, 0);
-
-out:
- mutex_unlock(&data->mutex);
- return ret;
+ return regmap_write(data->regmap, SX9360_REG_GNRL_CTRL0, 0);
}
static int sx9360_resume(struct device *dev)
{
struct sx_common_data *data = iio_priv(dev_get_drvdata(dev));
- int ret;
-
- mutex_lock(&data->mutex);
- ret = regmap_update_bits(data->regmap, SX9360_REG_GNRL_CTRL0,
- SX9360_REG_GNRL_CTRL0_PHEN_MASK,
- data->suspend_ctrl);
- mutex_unlock(&data->mutex);
- if (ret)
- return ret;
+ scoped_guard(mutex, &data->mutex) {
+ int ret = regmap_update_bits(data->regmap,
+ SX9360_REG_GNRL_CTRL0,
+ SX9360_REG_GNRL_CTRL0_PHEN_MASK,
+ data->suspend_ctrl);
+ if (ret)
+ return ret;
+ }
enable_irq(data->client->irq);
return 0;
}
diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c
index fcb96c44d9540..39447c786af34 100644
--- a/drivers/iio/temperature/ltc2983.c
+++ b/drivers/iio/temperature/ltc2983.c
@@ -207,6 +207,7 @@ enum {
container_of(_sensor, struct ltc2983_temp, sensor)
struct ltc2983_chip_info {
+ const char *name;
unsigned int max_channels_nr;
bool has_temp;
bool has_eeprom;
@@ -1346,7 +1347,7 @@ static irqreturn_t ltc2983_irq_handler(int irq, void *data)
__chan; \
})
-static int ltc2983_parse_dt(struct ltc2983_data *st)
+static int ltc2983_parse_fw(struct ltc2983_data *st)
{
struct device *dev = &st->spi->dev;
struct fwnode_handle *child;
@@ -1605,7 +1606,6 @@ static int ltc2983_probe(struct spi_device *spi)
struct ltc2983_data *st;
struct iio_dev *indio_dev;
struct gpio_desc *gpio;
- const char *name = spi_get_device_id(spi)->name;
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
@@ -1614,9 +1614,7 @@ static int ltc2983_probe(struct spi_device *spi)
st = iio_priv(indio_dev);
- st->info = device_get_match_data(&spi->dev);
- if (!st->info)
- st->info = (void *)spi_get_device_id(spi)->driver_data;
+ st->info = spi_get_device_match_data(spi);
if (!st->info)
return -ENODEV;
@@ -1632,7 +1630,7 @@ static int ltc2983_probe(struct spi_device *spi)
st->eeprom_key = cpu_to_be32(LTC2983_EEPROM_KEY);
spi_set_drvdata(spi, st);
- ret = ltc2983_parse_dt(st);
+ ret = ltc2983_parse_fw(st);
if (ret)
return ret;
@@ -1657,7 +1655,7 @@ static int ltc2983_probe(struct spi_device *spi)
return ret;
ret = devm_request_irq(&spi->dev, spi->irq, ltc2983_irq_handler,
- IRQF_TRIGGER_RISING, name, st);
+ IRQF_TRIGGER_RISING, st->info->name, st);
if (ret) {
dev_err(&spi->dev, "failed to request an irq, %d", ret);
return ret;
@@ -1672,7 +1670,7 @@ static int ltc2983_probe(struct spi_device *spi)
return ret;
}
- indio_dev->name = name;
+ indio_dev->name = st->info->name;
indio_dev->num_channels = st->iio_channels;
indio_dev->channels = st->iio_chan;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -1703,15 +1701,25 @@ static DEFINE_SIMPLE_DEV_PM_OPS(ltc2983_pm_ops, ltc2983_suspend,
ltc2983_resume);
static const struct ltc2983_chip_info ltc2983_chip_info_data = {
+ .name = "ltc2983",
.max_channels_nr = 20,
};
static const struct ltc2983_chip_info ltc2984_chip_info_data = {
+ .name = "ltc2984",
.max_channels_nr = 20,
.has_eeprom = true,
};
static const struct ltc2983_chip_info ltc2986_chip_info_data = {
+ .name = "ltc2986",
+ .max_channels_nr = 10,
+ .has_temp = true,
+ .has_eeprom = true,
+};
+
+static const struct ltc2983_chip_info ltm2985_chip_info_data = {
+ .name = "ltm2985",
.max_channels_nr = 10,
.has_temp = true,
.has_eeprom = true,
@@ -1721,7 +1729,7 @@ static const struct spi_device_id ltc2983_id_table[] = {
{ "ltc2983", (kernel_ulong_t)&ltc2983_chip_info_data },
{ "ltc2984", (kernel_ulong_t)&ltc2984_chip_info_data },
{ "ltc2986", (kernel_ulong_t)&ltc2986_chip_info_data },
- { "ltm2985", (kernel_ulong_t)&ltc2986_chip_info_data },
+ { "ltm2985", (kernel_ulong_t)&ltm2985_chip_info_data },
{},
};
MODULE_DEVICE_TABLE(spi, ltc2983_id_table);
@@ -1730,7 +1738,7 @@ static const struct of_device_id ltc2983_of_match[] = {
{ .compatible = "adi,ltc2983", .data = &ltc2983_chip_info_data },
{ .compatible = "adi,ltc2984", .data = &ltc2984_chip_info_data },
{ .compatible = "adi,ltc2986", .data = &ltc2986_chip_info_data },
- { .compatible = "adi,ltm2985", .data = &ltc2986_chip_info_data },
+ { .compatible = "adi,ltm2985", .data = &ltm2985_chip_info_data },
{},
};
MODULE_DEVICE_TABLE(of, ltc2983_of_match);
diff --git a/drivers/iio/temperature/tmp117.c b/drivers/iio/temperature/tmp117.c
index 059953015ae72..8972083d903a2 100644
--- a/drivers/iio/temperature/tmp117.c
+++ b/drivers/iio/temperature/tmp117.c
@@ -9,6 +9,7 @@
* Note: This driver assumes that the sensor has been calibrated beforehand.
*/
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/module.h>
@@ -17,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/limits.h>
#include <linux/property.h>
+#include <linux/regulator/consumer.h>
#include <linux/iio/iio.h>
@@ -148,10 +150,17 @@ static int tmp117_probe(struct i2c_client *client)
struct tmp117_data *data;
struct iio_dev *indio_dev;
int dev_id;
+ int ret;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
return -EOPNOTSUPP;
+ ret = devm_regulator_get_enable(&client->dev, "vcc");
+ if (ret)
+ return ret;
+
+ fsleep(1500);
+
dev_id = i2c_smbus_read_word_swapped(client, TMP117_REG_DEVICE_ID);
if (dev_id < 0)
return dev_id;
diff --git a/drivers/iio/test/Kconfig b/drivers/iio/test/Kconfig
index 0b6e4e278a2f6..33cca49c8058a 100644
--- a/drivers/iio/test/Kconfig
+++ b/drivers/iio/test/Kconfig
@@ -4,6 +4,20 @@
#
# Keep in alphabetical order
+config IIO_GTS_KUNIT_TEST
+ tristate "Test IIO formatting functions" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ select IIO_GTS_HELPER
+ select TEST_KUNIT_DEVICE_HELPERS
+ default KUNIT_ALL_TESTS
+ help
+ build unit tests for the IIO light sensor gain-time-scale helpers.
+
+ For more information on KUnit and unit tests in general, please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N. Keep in alphabetical order
+
config IIO_RESCALE_KUNIT_TEST
tristate "Test IIO rescale conversion functions" if !KUNIT_ALL_TESTS
depends on KUNIT && IIO_RESCALE
diff --git a/drivers/iio/test/Makefile b/drivers/iio/test/Makefile
index d76eaf36da820..e9a4cf1ff57f0 100644
--- a/drivers/iio/test/Makefile
+++ b/drivers/iio/test/Makefile
@@ -6,4 +6,5 @@
# Keep in alphabetical order
obj-$(CONFIG_IIO_RESCALE_KUNIT_TEST) += iio-test-rescale.o
obj-$(CONFIG_IIO_FORMAT_KUNIT_TEST) += iio-test-format.o
+obj-$(CONFIG_IIO_GTS_KUNIT_TEST) += iio-test-gts.o
CFLAGS_iio-test-format.o += $(DISABLE_STRUCTLEAK_PLUGIN)
diff --git a/drivers/iio/test/iio-test-gts.c b/drivers/iio/test/iio-test-gts.c
new file mode 100644
index 0000000000000..cf7ab773ea0b1
--- /dev/null
+++ b/drivers/iio/test/iio-test-gts.c
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Unit tests for IIO light sensor gain-time-scale helpers
+ *
+ * Copyright (c) 2023 Matti Vaittinen <mazziesaccount@gmail.com>
+ */
+
+#include <kunit/device.h>
+#include <kunit/test.h>
+#include <linux/device.h>
+#include <linux/iio/iio-gts-helper.h>
+#include <linux/iio/types.h>
+
+/*
+ * Please, read the "rant" from the top of the lib/test_linear_ranges.c if
+ * you see a line of helper code which is not being tested.
+ *
+ * Then, please look at the line which is not being tested. Is this line
+ * somehow unusually complex? If answer is "no", then chances are that the
+ * "development inertia" caused by adding a test exceeds the benefits.
+ *
+ * If yes, then adding a test is probably a good idea but please stop for a
+ * moment and consider the effort of changing all the tests when code gets
+ * refactored. Eventually it neeeds to be.
+ */
+
+#define TEST_TSEL_50 1
+#define TEST_TSEL_X_MIN TEST_TSEL_50
+#define TEST_TSEL_100 0
+#define TEST_TSEL_200 2
+#define TEST_TSEL_400 4
+#define TEST_TSEL_X_MAX TEST_TSEL_400
+
+#define TEST_GSEL_1 0x00
+#define TEST_GSEL_X_MIN TEST_GSEL_1
+#define TEST_GSEL_4 0x08
+#define TEST_GSEL_16 0x0a
+#define TEST_GSEL_32 0x0b
+#define TEST_GSEL_64 0x0c
+#define TEST_GSEL_256 0x18
+#define TEST_GSEL_512 0x19
+#define TEST_GSEL_1024 0x1a
+#define TEST_GSEL_2048 0x1b
+#define TEST_GSEL_4096 0x1c
+#define TEST_GSEL_X_MAX TEST_GSEL_4096
+
+#define TEST_SCALE_1X 64
+#define TEST_SCALE_MIN_X TEST_SCALE_1X
+#define TEST_SCALE_2X 32
+#define TEST_SCALE_4X 16
+#define TEST_SCALE_8X 8
+#define TEST_SCALE_16X 4
+#define TEST_SCALE_32X 2
+#define TEST_SCALE_64X 1
+
+#define TEST_SCALE_NANO_128X 500000000
+#define TEST_SCALE_NANO_256X 250000000
+#define TEST_SCALE_NANO_512X 125000000
+#define TEST_SCALE_NANO_1024X 62500000
+#define TEST_SCALE_NANO_2048X 31250000
+#define TEST_SCALE_NANO_4096X 15625000
+#define TEST_SCALE_NANO_4096X2 7812500
+#define TEST_SCALE_NANO_4096X4 3906250
+#define TEST_SCALE_NANO_4096X8 1953125
+
+#define TEST_SCALE_NANO_MAX_X TEST_SCALE_NANO_4096X8
+
+/*
+ * Can't have this allocated from stack because the kunit clean-up will
+ * happen only after the test function has already gone
+ */
+static struct iio_gts gts;
+
+static const struct iio_gain_sel_pair gts_test_gains[] = {
+ GAIN_SCALE_GAIN(1, TEST_GSEL_1),
+ GAIN_SCALE_GAIN(4, TEST_GSEL_4),
+ GAIN_SCALE_GAIN(16, TEST_GSEL_16),
+ GAIN_SCALE_GAIN(32, TEST_GSEL_32),
+ GAIN_SCALE_GAIN(64, TEST_GSEL_64),
+ GAIN_SCALE_GAIN(256, TEST_GSEL_256),
+ GAIN_SCALE_GAIN(512, TEST_GSEL_512),
+ GAIN_SCALE_GAIN(1024, TEST_GSEL_1024),
+ GAIN_SCALE_GAIN(2048, TEST_GSEL_2048),
+ GAIN_SCALE_GAIN(4096, TEST_GSEL_4096),
+#define HWGAIN_MAX 4096
+};
+
+static const struct iio_itime_sel_mul gts_test_itimes[] = {
+ GAIN_SCALE_ITIME_US(400 * 1000, TEST_TSEL_400, 8),
+ GAIN_SCALE_ITIME_US(200 * 1000, TEST_TSEL_200, 4),
+ GAIN_SCALE_ITIME_US(100 * 1000, TEST_TSEL_100, 2),
+ GAIN_SCALE_ITIME_US(50 * 1000, TEST_TSEL_50, 1),
+#define TIMEGAIN_MAX 8
+};
+#define TOTAL_GAIN_MAX (HWGAIN_MAX * TIMEGAIN_MAX)
+#define IIO_GTS_TEST_DEV "iio-gts-test-dev"
+
+static struct device *__test_init_iio_gain_scale(struct kunit *test,
+ struct iio_gts *gts, const struct iio_gain_sel_pair *g_table,
+ int num_g, const struct iio_itime_sel_mul *i_table, int num_i)
+{
+ struct device *dev;
+ int ret;
+
+ dev = kunit_device_register(test, IIO_GTS_TEST_DEV);
+
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, dev);
+ if (IS_ERR_OR_NULL(dev))
+ return NULL;
+
+ ret = devm_iio_init_iio_gts(dev, TEST_SCALE_1X, 0, g_table, num_g,
+ i_table, num_i, gts);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ if (ret)
+ return NULL;
+
+ return dev;
+}
+
+#define test_init_iio_gain_scale(test, gts) \
+ __test_init_iio_gain_scale(test, gts, gts_test_gains, \
+ ARRAY_SIZE(gts_test_gains), gts_test_itimes, \
+ ARRAY_SIZE(gts_test_itimes))
+
+static void test_init_iio_gts_invalid(struct kunit *test)
+{
+ struct device *dev;
+ int ret;
+ const struct iio_itime_sel_mul itimes_neg[] = {
+ GAIN_SCALE_ITIME_US(-10, TEST_TSEL_400, 8),
+ GAIN_SCALE_ITIME_US(200 * 1000, TEST_TSEL_200, 4),
+ };
+ const struct iio_gain_sel_pair gains_neg[] = {
+ GAIN_SCALE_GAIN(1, TEST_GSEL_1),
+ GAIN_SCALE_GAIN(2, TEST_GSEL_4),
+ GAIN_SCALE_GAIN(-2, TEST_GSEL_16),
+ };
+ /* 55555 * 38656 = 2147534080 => overflows 32bit int */
+ const struct iio_itime_sel_mul itimes_overflow[] = {
+ GAIN_SCALE_ITIME_US(400 * 1000, TEST_TSEL_400, 55555),
+ GAIN_SCALE_ITIME_US(200 * 1000, TEST_TSEL_200, 4),
+ };
+ const struct iio_gain_sel_pair gains_overflow[] = {
+ GAIN_SCALE_GAIN(1, TEST_GSEL_1),
+ GAIN_SCALE_GAIN(2, TEST_GSEL_4),
+ GAIN_SCALE_GAIN(38656, TEST_GSEL_16),
+ };
+
+ dev = kunit_device_register(test, IIO_GTS_TEST_DEV);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, dev);
+ if (!dev)
+ return;
+
+ /* Ok gains, negative time */
+ ret = devm_iio_init_iio_gts(dev, TEST_SCALE_1X, 0, gts_test_gains,
+ ARRAY_SIZE(gts_test_gains), itimes_neg,
+ ARRAY_SIZE(itimes_neg), &gts);
+ KUNIT_EXPECT_EQ(test, -EINVAL, ret);
+
+ /* Ok times, negative gain */
+ ret = devm_iio_init_iio_gts(dev, TEST_SCALE_1X, 0, gains_neg,
+ ARRAY_SIZE(gains_neg), gts_test_itimes,
+ ARRAY_SIZE(gts_test_itimes), &gts);
+ KUNIT_EXPECT_EQ(test, -EINVAL, ret);
+
+ /* gain * time overflow int */
+ ret = devm_iio_init_iio_gts(dev, TEST_SCALE_1X, 0, gains_overflow,
+ ARRAY_SIZE(gains_overflow), itimes_overflow,
+ ARRAY_SIZE(itimes_overflow), &gts);
+ KUNIT_EXPECT_EQ(test, -EOVERFLOW, ret);
+}
+
+static void test_iio_gts_find_gain_for_scale_using_time(struct kunit *test)
+{
+ struct device *dev;
+ int ret, gain_sel;
+
+ dev = test_init_iio_gain_scale(test, &gts);
+ if (!dev)
+ return;
+
+ ret = iio_gts_find_gain_sel_for_scale_using_time(&gts, TEST_TSEL_100,
+ TEST_SCALE_8X, 0, &gain_sel);
+ /*
+ * Meas time 100 => gain by time 2x
+ * TEST_SCALE_8X matches total gain 8x
+ * => required HWGAIN 4x
+ */
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, TEST_GSEL_4, gain_sel);
+
+ ret = iio_gts_find_gain_sel_for_scale_using_time(&gts, TEST_TSEL_200, 0,
+ TEST_SCALE_NANO_256X, &gain_sel);
+ /*
+ * Meas time 200 => gain by time 4x
+ * TEST_SCALE_256X matches total gain 256x
+ * => required HWGAIN 256/4 => 64x
+ */
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, TEST_GSEL_64, gain_sel);
+
+ /* Min time, Min gain */
+ ret = iio_gts_find_gain_sel_for_scale_using_time(&gts, TEST_TSEL_X_MIN,
+ TEST_SCALE_MIN_X, 0, &gain_sel);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, TEST_GSEL_1, gain_sel);
+
+ /* Max time, Max gain */
+ ret = iio_gts_find_gain_sel_for_scale_using_time(&gts, TEST_TSEL_X_MAX,
+ 0, TEST_SCALE_NANO_MAX_X, &gain_sel);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, TEST_GSEL_4096, gain_sel);
+
+ ret = iio_gts_find_gain_sel_for_scale_using_time(&gts, TEST_TSEL_100, 0,
+ TEST_SCALE_NANO_256X, &gain_sel);
+ /*
+ * Meas time 100 => gain by time 2x
+ * TEST_SCALE_256X matches total gain 256x
+ * => required HWGAIN 256/2 => 128x (not in gain-table - unsupported)
+ */
+ KUNIT_EXPECT_NE(test, 0, ret);
+
+ ret = iio_gts_find_gain_sel_for_scale_using_time(&gts, TEST_TSEL_200, 0,
+ TEST_SCALE_NANO_MAX_X, &gain_sel);
+ /* We can't reach the max gain with integration time smaller than MAX */
+ KUNIT_EXPECT_NE(test, 0, ret);
+
+ ret = iio_gts_find_gain_sel_for_scale_using_time(&gts, TEST_TSEL_50, 0,
+ TEST_SCALE_NANO_MAX_X, &gain_sel);
+ /* We can't reach the max gain with integration time smaller than MAX */
+ KUNIT_EXPECT_NE(test, 0, ret);
+}
+
+static void test_iio_gts_find_new_gain_sel_by_old_gain_time(struct kunit *test)
+{
+ struct device *dev;
+ int ret, old_gain, new_gain, old_time_sel, new_time_sel;
+
+ dev = test_init_iio_gain_scale(test, &gts);
+ if (!dev)
+ return;
+
+ old_gain = 32;
+ old_time_sel = TEST_TSEL_200;
+ new_time_sel = TEST_TSEL_400;
+
+ ret = iio_gts_find_new_gain_sel_by_old_gain_time(&gts, old_gain,
+ old_time_sel, new_time_sel, &new_gain);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ /*
+ * Doubling the integration time doubles the total gain - so old
+ * (hw)gain must be divided by two to compensate. => 32 / 2 => 16
+ */
+ KUNIT_EXPECT_EQ(test, 16, new_gain);
+
+ old_gain = 4;
+ old_time_sel = TEST_TSEL_50;
+ new_time_sel = TEST_TSEL_200;
+ ret = iio_gts_find_new_gain_sel_by_old_gain_time(&gts, old_gain,
+ old_time_sel, new_time_sel, &new_gain);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ /*
+ * gain by time 1x => 4x - (hw)gain 4x => 1x
+ */
+ KUNIT_EXPECT_EQ(test, 1, new_gain);
+
+ old_gain = 512;
+ old_time_sel = TEST_TSEL_400;
+ new_time_sel = TEST_TSEL_50;
+ ret = iio_gts_find_new_gain_sel_by_old_gain_time(&gts, old_gain,
+ old_time_sel, new_time_sel, &new_gain);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ /*
+ * gain by time 8x => 1x - (hw)gain 512x => 4096x)
+ */
+ KUNIT_EXPECT_EQ(test, 4096, new_gain);
+
+ /* Unsupported gain 2x */
+ old_gain = 4;
+ old_time_sel = TEST_TSEL_200;
+ new_time_sel = TEST_TSEL_400;
+ ret = iio_gts_find_new_gain_sel_by_old_gain_time(&gts, old_gain,
+ old_time_sel, new_time_sel, &new_gain);
+ KUNIT_EXPECT_NE(test, 0, ret);
+
+ /* Too small gain */
+ old_gain = 4;
+ old_time_sel = TEST_TSEL_50;
+ new_time_sel = TEST_TSEL_400;
+ ret = iio_gts_find_new_gain_sel_by_old_gain_time(&gts, old_gain,
+ old_time_sel, new_time_sel, &new_gain);
+ KUNIT_EXPECT_NE(test, 0, ret);
+
+ /* Too big gain */
+ old_gain = 1024;
+ old_time_sel = TEST_TSEL_400;
+ new_time_sel = TEST_TSEL_50;
+ ret = iio_gts_find_new_gain_sel_by_old_gain_time(&gts, old_gain,
+ old_time_sel, new_time_sel, &new_gain);
+ KUNIT_EXPECT_NE(test, 0, ret);
+
+}
+
+static void test_iio_find_closest_gain_low(struct kunit *test)
+{
+ struct device *dev;
+ bool in_range;
+ int ret;
+
+ const struct iio_gain_sel_pair gts_test_gains_gain_low[] = {
+ GAIN_SCALE_GAIN(4, TEST_GSEL_4),
+ GAIN_SCALE_GAIN(16, TEST_GSEL_16),
+ GAIN_SCALE_GAIN(32, TEST_GSEL_32),
+ };
+
+ dev = test_init_iio_gain_scale(test, &gts);
+ if (!dev)
+ return;
+
+ ret = iio_find_closest_gain_low(&gts, 2, &in_range);
+ KUNIT_EXPECT_EQ(test, 1, ret);
+ KUNIT_EXPECT_EQ(test, true, in_range);
+
+ ret = iio_find_closest_gain_low(&gts, 1, &in_range);
+ KUNIT_EXPECT_EQ(test, 1, ret);
+ KUNIT_EXPECT_EQ(test, true, in_range);
+
+ ret = iio_find_closest_gain_low(&gts, 4095, &in_range);
+ KUNIT_EXPECT_EQ(test, 2048, ret);
+ KUNIT_EXPECT_EQ(test, true, in_range);
+
+ ret = iio_find_closest_gain_low(&gts, 4097, &in_range);
+ KUNIT_EXPECT_EQ(test, 4096, ret);
+ KUNIT_EXPECT_EQ(test, false, in_range);
+
+ kunit_device_unregister(test, dev);
+
+ dev = __test_init_iio_gain_scale(test, &gts, gts_test_gains_gain_low,
+ ARRAY_SIZE(gts_test_gains_gain_low),
+ gts_test_itimes, ARRAY_SIZE(gts_test_itimes));
+ if (!dev)
+ return;
+
+ ret = iio_find_closest_gain_low(&gts, 3, &in_range);
+ KUNIT_EXPECT_EQ(test, -EINVAL, ret);
+ KUNIT_EXPECT_EQ(test, false, in_range);
+}
+
+static void test_iio_gts_total_gain_to_scale(struct kunit *test)
+{
+ struct device *dev;
+ int ret, scale_int, scale_nano;
+
+ dev = test_init_iio_gain_scale(test, &gts);
+ if (!dev)
+ return;
+
+ ret = iio_gts_total_gain_to_scale(&gts, 1, &scale_int, &scale_nano);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, TEST_SCALE_1X, scale_int);
+ KUNIT_EXPECT_EQ(test, 0, scale_nano);
+
+ ret = iio_gts_total_gain_to_scale(&gts, 1, &scale_int, &scale_nano);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, TEST_SCALE_1X, scale_int);
+ KUNIT_EXPECT_EQ(test, 0, scale_nano);
+
+ ret = iio_gts_total_gain_to_scale(&gts, 4096 * 8, &scale_int,
+ &scale_nano);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, 0, scale_int);
+ KUNIT_EXPECT_EQ(test, TEST_SCALE_NANO_4096X8, scale_nano);
+}
+
+static void test_iio_gts_chk_times(struct kunit *test, const int *vals)
+{
+ static const int expected[] = {0, 50000, 0, 100000, 0, 200000, 0, 400000};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(expected); i++)
+ KUNIT_EXPECT_EQ(test, expected[i], vals[i]);
+}
+
+static void test_iio_gts_chk_scales_all(struct kunit *test, struct iio_gts *gts,
+ const int *vals, int len)
+{
+ static const int gains[] = {1, 2, 4, 8, 16, 32, 64, 128, 256, 512,
+ 1024, 2048, 4096, 4096 * 2, 4096 * 4,
+ 4096 * 8};
+ int expected[ARRAY_SIZE(gains) * 2];
+ int i, ret;
+ int exp_len = ARRAY_SIZE(gains) * 2;
+
+ KUNIT_EXPECT_EQ(test, exp_len, len);
+ if (len != exp_len)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(gains); i++) {
+ ret = iio_gts_total_gain_to_scale(gts, gains[i],
+ &expected[2 * i],
+ &expected[2 * i + 1]);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ if (ret)
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(expected); i++)
+ KUNIT_EXPECT_EQ(test, expected[i], vals[i]);
+}
+
+static void test_iio_gts_chk_scales_t200(struct kunit *test, struct iio_gts *gts,
+ const int *vals, int len)
+{
+ /* The gain caused by time 200 is 4x */
+ static const int gains[] = {
+ 1 * 4,
+ 4 * 4,
+ 16 * 4,
+ 32 * 4,
+ 64 * 4,
+ 256 * 4,
+ 512 * 4,
+ 1024 * 4,
+ 2048 * 4,
+ 4096 * 4
+ };
+ int expected[ARRAY_SIZE(gains) * 2];
+ int i, ret;
+
+ KUNIT_EXPECT_EQ(test, 2 * ARRAY_SIZE(gains), len);
+ if (len < 2 * ARRAY_SIZE(gains))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(gains); i++) {
+ ret = iio_gts_total_gain_to_scale(gts, gains[i],
+ &expected[2 * i],
+ &expected[2 * i + 1]);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ if (ret)
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(expected); i++)
+ KUNIT_EXPECT_EQ(test, expected[i], vals[i]);
+}
+
+static void test_iio_gts_avail_test(struct kunit *test)
+{
+ struct device *dev;
+ int ret;
+ int type, len;
+ const int *vals;
+
+ dev = test_init_iio_gain_scale(test, &gts);
+ if (!dev)
+ return;
+
+ /* test table building for times and iio_gts_avail_times() */
+ ret = iio_gts_avail_times(&gts, &vals, &type, &len);
+ KUNIT_EXPECT_EQ(test, IIO_AVAIL_LIST, ret);
+ if (ret)
+ return;
+
+ KUNIT_EXPECT_EQ(test, IIO_VAL_INT_PLUS_MICRO, type);
+ KUNIT_EXPECT_EQ(test, 8, len);
+ if (len < 8)
+ return;
+
+ test_iio_gts_chk_times(test, vals);
+
+ /* Test table building for all scales and iio_gts_all_avail_scales() */
+ ret = iio_gts_all_avail_scales(&gts, &vals, &type, &len);
+ KUNIT_EXPECT_EQ(test, IIO_AVAIL_LIST, ret);
+ if (ret)
+ return;
+
+ KUNIT_EXPECT_EQ(test, IIO_VAL_INT_PLUS_NANO, type);
+
+ test_iio_gts_chk_scales_all(test, &gts, vals, len);
+
+ /*
+ * Test table building for scales/time and
+ * iio_gts_avail_scales_for_time()
+ */
+ ret = iio_gts_avail_scales_for_time(&gts, 200000, &vals, &type, &len);
+ KUNIT_EXPECT_EQ(test, IIO_AVAIL_LIST, ret);
+ if (ret)
+ return;
+
+ KUNIT_EXPECT_EQ(test, IIO_VAL_INT_PLUS_NANO, type);
+ test_iio_gts_chk_scales_t200(test, &gts, vals, len);
+}
+
+static struct kunit_case iio_gts_test_cases[] = {
+ KUNIT_CASE(test_init_iio_gts_invalid),
+ KUNIT_CASE(test_iio_gts_find_gain_for_scale_using_time),
+ KUNIT_CASE(test_iio_gts_find_new_gain_sel_by_old_gain_time),
+ KUNIT_CASE(test_iio_find_closest_gain_low),
+ KUNIT_CASE(test_iio_gts_total_gain_to_scale),
+ KUNIT_CASE(test_iio_gts_avail_test),
+ {}
+};
+
+static struct kunit_suite iio_gts_test_suite = {
+ .name = "iio-gain-time-scale",
+ .test_cases = iio_gts_test_cases,
+};
+
+kunit_test_suite(iio_gts_test_suite);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Matti Vaittinen <mazziesaccount@gmail.com>");
+MODULE_DESCRIPTION("Test IIO light sensor gain-time-scale helpers");
+MODULE_IMPORT_NS(IIO_GTS_HELPER);
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index ff58058aeadca..07fb8d3c037f0 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -34,6 +34,7 @@ MODULE_AUTHOR("Sean Hefty");
MODULE_DESCRIPTION("InfiniBand CM");
MODULE_LICENSE("Dual BSD/GPL");
+#define CM_DESTROY_ID_WAIT_TIMEOUT 10000 /* msecs */
static const char * const ibcm_rej_reason_strs[] = {
[IB_CM_REJ_NO_QP] = "no QP",
[IB_CM_REJ_NO_EEC] = "no EEC",
@@ -1025,13 +1026,26 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
}
}
+static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id,
+ enum ib_cm_state old_state)
+{
+ struct cm_id_private *cm_id_priv;
+
+ cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+ pr_err("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
+ cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
+}
+
static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
{
struct cm_id_private *cm_id_priv;
+ enum ib_cm_state old_state;
struct cm_work *work;
+ int ret;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irq(&cm_id_priv->lock);
+ old_state = cm_id->state;
retest:
switch (cm_id->state) {
case IB_CM_LISTEN:
@@ -1135,7 +1149,14 @@ retest:
xa_erase(&cm.local_id_table, cm_local_id(cm_id->local_id));
cm_deref_id(cm_id_priv);
- wait_for_completion(&cm_id_priv->comp);
+ do {
+ ret = wait_for_completion_timeout(&cm_id_priv->comp,
+ msecs_to_jiffies(
+ CM_DESTROY_ID_WAIT_TIMEOUT));
+ if (!ret) /* timeout happened */
+ cm_destroy_id_wait_timeout(cm_id, old_state);
+ } while (!ret);
+
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
cm_free_work(work);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 67bcea7a153c6..07cb6c5ffda00 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -1730,7 +1730,7 @@ static int assign_client_id(struct ib_client *client)
{
int ret;
- down_write(&clients_rwsem);
+ lockdep_assert_held(&clients_rwsem);
/*
* The add/remove callbacks must be called in FIFO/LIFO order. To
* achieve this we assign client_ids so they are sorted in
@@ -1739,14 +1739,11 @@ static int assign_client_id(struct ib_client *client)
client->client_id = highest_client_id;
ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL);
if (ret)
- goto out;
+ return ret;
highest_client_id++;
xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED);
-
-out:
- up_write(&clients_rwsem);
- return ret;
+ return 0;
}
static void remove_client_id(struct ib_client *client)
@@ -1776,25 +1773,35 @@ int ib_register_client(struct ib_client *client)
{
struct ib_device *device;
unsigned long index;
+ bool need_unreg = false;
int ret;
refcount_set(&client->uses, 1);
init_completion(&client->uses_zero);
+
+ /*
+ * The devices_rwsem is held in write mode to ensure that a racing
+ * ib_register_device() sees a consisent view of clients and devices.
+ */
+ down_write(&devices_rwsem);
+ down_write(&clients_rwsem);
ret = assign_client_id(client);
if (ret)
- return ret;
+ goto out;
- down_read(&devices_rwsem);
+ need_unreg = true;
xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) {
ret = add_client_context(device, client);
- if (ret) {
- up_read(&devices_rwsem);
- ib_unregister_client(client);
- return ret;
- }
+ if (ret)
+ goto out;
}
- up_read(&devices_rwsem);
- return 0;
+ ret = 0;
+out:
+ up_write(&clients_rwsem);
+ up_write(&devices_rwsem);
+ if (need_unreg && ret)
+ ib_unregister_client(client);
+ return ret;
}
EXPORT_SYMBOL(ib_register_client);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 6de05ade2ba94..3d3ee3eca9830 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2737,7 +2737,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
case IB_FLOW_SPEC_ETH:
- ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz);
+ ib_filter_sz = sizeof(struct ib_flow_eth_filter);
actual_filter_sz = spec_filter_size(kern_spec_mask,
kern_filter_sz,
ib_filter_sz);
@@ -2748,7 +2748,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz);
break;
case IB_FLOW_SPEC_IPV4:
- ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz);
+ ib_filter_sz = sizeof(struct ib_flow_ipv4_filter);
actual_filter_sz = spec_filter_size(kern_spec_mask,
kern_filter_sz,
ib_filter_sz);
@@ -2759,7 +2759,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz);
break;
case IB_FLOW_SPEC_IPV6:
- ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz);
+ ib_filter_sz = sizeof(struct ib_flow_ipv6_filter);
actual_filter_sz = spec_filter_size(kern_spec_mask,
kern_filter_sz,
ib_filter_sz);
@@ -2775,7 +2775,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
break;
case IB_FLOW_SPEC_TCP:
case IB_FLOW_SPEC_UDP:
- ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz);
+ ib_filter_sz = sizeof(struct ib_flow_tcp_udp_filter);
actual_filter_sz = spec_filter_size(kern_spec_mask,
kern_filter_sz,
ib_filter_sz);
@@ -2786,7 +2786,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz);
break;
case IB_FLOW_SPEC_VXLAN_TUNNEL:
- ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz);
+ ib_filter_sz = sizeof(struct ib_flow_tunnel_filter);
actual_filter_sz = spec_filter_size(kern_spec_mask,
kern_filter_sz,
ib_filter_sz);
@@ -2801,7 +2801,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
return -EINVAL;
break;
case IB_FLOW_SPEC_ESP:
- ib_filter_sz = offsetof(struct ib_flow_esp_filter, real_sz);
+ ib_filter_sz = sizeof(struct ib_flow_esp_filter);
actual_filter_sz = spec_filter_size(kern_spec_mask,
kern_filter_sz,
ib_filter_sz);
@@ -2812,7 +2812,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
memcpy(&ib_spec->esp.mask, kern_spec_mask, actual_filter_sz);
break;
case IB_FLOW_SPEC_GRE:
- ib_filter_sz = offsetof(struct ib_flow_gre_filter, real_sz);
+ ib_filter_sz = sizeof(struct ib_flow_gre_filter);
actual_filter_sz = spec_filter_size(kern_spec_mask,
kern_filter_sz,
ib_filter_sz);
@@ -2823,7 +2823,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
memcpy(&ib_spec->gre.mask, kern_spec_mask, actual_filter_sz);
break;
case IB_FLOW_SPEC_MPLS:
- ib_filter_sz = offsetof(struct ib_flow_mpls_filter, real_sz);
+ ib_filter_sz = sizeof(struct ib_flow_mpls_filter);
actual_filter_sz = spec_filter_size(kern_spec_mask,
kern_filter_sz,
ib_filter_sz);
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index d9799706c58e9..f80da6a67e24e 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -36,13 +36,15 @@
#include "uverbs.h"
struct bundle_alloc_head {
- struct bundle_alloc_head *next;
+ struct_group_tagged(bundle_alloc_head_hdr, hdr,
+ struct bundle_alloc_head *next;
+ );
u8 data[];
};
struct bundle_priv {
/* Must be first */
- struct bundle_alloc_head alloc_head;
+ struct bundle_alloc_head_hdr alloc_head;
struct bundle_alloc_head *allocated_mem;
size_t internal_avail;
size_t internal_used;
@@ -64,7 +66,7 @@ struct bundle_priv {
* Must be last. bundle ends in a flex array which overlaps
* internal_buffer.
*/
- struct uverbs_attr_bundle bundle;
+ struct uverbs_attr_bundle_hdr bundle;
u64 internal_buffer[32];
};
@@ -77,9 +79,10 @@ void uapi_compute_bundle_size(struct uverbs_api_ioctl_method *method_elm,
unsigned int num_attrs)
{
struct bundle_priv *pbundle;
+ struct uverbs_attr_bundle *bundle;
size_t bundle_size =
offsetof(struct bundle_priv, internal_buffer) +
- sizeof(*pbundle->bundle.attrs) * method_elm->key_bitmap_len +
+ sizeof(*bundle->attrs) * method_elm->key_bitmap_len +
sizeof(*pbundle->uattrs) * num_attrs;
method_elm->use_stack = bundle_size <= sizeof(*pbundle);
@@ -107,7 +110,7 @@ __malloc void *_uverbs_alloc(struct uverbs_attr_bundle *bundle, size_t size,
gfp_t flags)
{
struct bundle_priv *pbundle =
- container_of(bundle, struct bundle_priv, bundle);
+ container_of(&bundle->hdr, struct bundle_priv, bundle);
size_t new_used;
void *res;
@@ -149,7 +152,7 @@ static int uverbs_set_output(const struct uverbs_attr_bundle *bundle,
const struct uverbs_attr *attr)
{
struct bundle_priv *pbundle =
- container_of(bundle, struct bundle_priv, bundle);
+ container_of(&bundle->hdr, struct bundle_priv, bundle);
u16 flags;
flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags |
@@ -166,6 +169,8 @@ static int uverbs_process_idrs_array(struct bundle_priv *pbundle,
struct ib_uverbs_attr *uattr,
u32 attr_bkey)
{
+ struct uverbs_attr_bundle *bundle =
+ container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr);
const struct uverbs_attr_spec *spec = &attr_uapi->spec;
size_t array_len;
u32 *idr_vals;
@@ -184,7 +189,7 @@ static int uverbs_process_idrs_array(struct bundle_priv *pbundle,
return -EINVAL;
attr->uobjects =
- uverbs_alloc(&pbundle->bundle,
+ uverbs_alloc(bundle,
array_size(array_len, sizeof(*attr->uobjects)));
if (IS_ERR(attr->uobjects))
return PTR_ERR(attr->uobjects);
@@ -209,7 +214,7 @@ static int uverbs_process_idrs_array(struct bundle_priv *pbundle,
for (i = 0; i != array_len; i++) {
attr->uobjects[i] = uverbs_get_uobject_from_file(
spec->u2.objs_arr.obj_type, spec->u2.objs_arr.access,
- idr_vals[i], &pbundle->bundle);
+ idr_vals[i], bundle);
if (IS_ERR(attr->uobjects[i])) {
ret = PTR_ERR(attr->uobjects[i]);
break;
@@ -240,7 +245,9 @@ static int uverbs_process_attr(struct bundle_priv *pbundle,
struct ib_uverbs_attr *uattr, u32 attr_bkey)
{
const struct uverbs_attr_spec *spec = &attr_uapi->spec;
- struct uverbs_attr *e = &pbundle->bundle.attrs[attr_bkey];
+ struct uverbs_attr_bundle *bundle =
+ container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr);
+ struct uverbs_attr *e = &bundle->attrs[attr_bkey];
const struct uverbs_attr_spec *val_spec = spec;
struct uverbs_obj_attr *o_attr;
@@ -288,7 +295,7 @@ static int uverbs_process_attr(struct bundle_priv *pbundle,
if (val_spec->alloc_and_copy && !uverbs_attr_ptr_is_inline(e)) {
void *p;
- p = uverbs_alloc(&pbundle->bundle, uattr->len);
+ p = uverbs_alloc(bundle, uattr->len);
if (IS_ERR(p))
return PTR_ERR(p);
@@ -321,7 +328,7 @@ static int uverbs_process_attr(struct bundle_priv *pbundle,
*/
o_attr->uobject = uverbs_get_uobject_from_file(
spec->u.obj.obj_type, spec->u.obj.access,
- uattr->data_s64, &pbundle->bundle);
+ uattr->data_s64, bundle);
if (IS_ERR(o_attr->uobject))
return PTR_ERR(o_attr->uobject);
__set_bit(attr_bkey, pbundle->uobj_finalize);
@@ -422,6 +429,8 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle,
unsigned int num_attrs)
{
int (*handler)(struct uverbs_attr_bundle *attrs);
+ struct uverbs_attr_bundle *bundle =
+ container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr);
size_t uattrs_size = array_size(sizeof(*pbundle->uattrs), num_attrs);
unsigned int destroy_bkey = pbundle->method_elm->destroy_bkey;
unsigned int i;
@@ -434,7 +443,7 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle,
if (!handler)
return -EIO;
- pbundle->uattrs = uverbs_alloc(&pbundle->bundle, uattrs_size);
+ pbundle->uattrs = uverbs_alloc(bundle, uattrs_size);
if (IS_ERR(pbundle->uattrs))
return PTR_ERR(pbundle->uattrs);
if (copy_from_user(pbundle->uattrs, pbundle->user_attrs, uattrs_size))
@@ -453,25 +462,23 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle,
return -EINVAL;
if (pbundle->method_elm->has_udata)
- uverbs_fill_udata(&pbundle->bundle,
- &pbundle->bundle.driver_udata,
+ uverbs_fill_udata(bundle, &pbundle->bundle.driver_udata,
UVERBS_ATTR_UHW_IN, UVERBS_ATTR_UHW_OUT);
else
pbundle->bundle.driver_udata = (struct ib_udata){};
if (destroy_bkey != UVERBS_API_ATTR_BKEY_LEN) {
- struct uverbs_obj_attr *destroy_attr =
- &pbundle->bundle.attrs[destroy_bkey].obj_attr;
+ struct uverbs_obj_attr *destroy_attr = &bundle->attrs[destroy_bkey].obj_attr;
- ret = uobj_destroy(destroy_attr->uobject, &pbundle->bundle);
+ ret = uobj_destroy(destroy_attr->uobject, bundle);
if (ret)
return ret;
__clear_bit(destroy_bkey, pbundle->uobj_finalize);
- ret = handler(&pbundle->bundle);
+ ret = handler(bundle);
uobj_put_destroy(destroy_attr->uobject);
} else {
- ret = handler(&pbundle->bundle);
+ ret = handler(bundle);
}
/*
@@ -481,10 +488,10 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle,
*/
if (!ret && pbundle->method_elm->has_udata) {
const struct uverbs_attr *attr =
- uverbs_attr_get(&pbundle->bundle, UVERBS_ATTR_UHW_OUT);
+ uverbs_attr_get(bundle, UVERBS_ATTR_UHW_OUT);
if (!IS_ERR(attr))
- ret = uverbs_set_output(&pbundle->bundle, attr);
+ ret = uverbs_set_output(bundle, attr);
}
/*
@@ -501,6 +508,8 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle,
static void bundle_destroy(struct bundle_priv *pbundle, bool commit)
{
unsigned int key_bitmap_len = pbundle->method_elm->key_bitmap_len;
+ struct uverbs_attr_bundle *bundle =
+ container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr);
struct bundle_alloc_head *memblock;
unsigned int i;
@@ -508,20 +517,19 @@ static void bundle_destroy(struct bundle_priv *pbundle, bool commit)
i = -1;
while ((i = find_next_bit(pbundle->uobj_finalize, key_bitmap_len,
i + 1)) < key_bitmap_len) {
- struct uverbs_attr *attr = &pbundle->bundle.attrs[i];
+ struct uverbs_attr *attr = &bundle->attrs[i];
uverbs_finalize_object(
attr->obj_attr.uobject,
attr->obj_attr.attr_elm->spec.u.obj.access,
test_bit(i, pbundle->uobj_hw_obj_valid),
- commit,
- &pbundle->bundle);
+ commit, bundle);
}
i = -1;
while ((i = find_next_bit(pbundle->spec_finalize, key_bitmap_len,
i + 1)) < key_bitmap_len) {
- struct uverbs_attr *attr = &pbundle->bundle.attrs[i];
+ struct uverbs_attr *attr = &bundle->attrs[i];
const struct uverbs_api_attr *attr_uapi;
void __rcu **slot;
@@ -535,7 +543,7 @@ static void bundle_destroy(struct bundle_priv *pbundle, bool commit)
if (attr_uapi->spec.type == UVERBS_ATTR_TYPE_IDRS_ARRAY) {
uverbs_free_idrs_array(attr_uapi, &attr->objs_arr_attr,
- commit, &pbundle->bundle);
+ commit, bundle);
}
}
@@ -578,7 +586,8 @@ static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile,
method_elm->bundle_size -
offsetof(struct bundle_priv, internal_buffer);
pbundle->alloc_head.next = NULL;
- pbundle->allocated_mem = &pbundle->alloc_head;
+ pbundle->allocated_mem = container_of(&pbundle->alloc_head,
+ struct bundle_alloc_head, hdr);
} else {
pbundle = &onstack;
pbundle->internal_avail = sizeof(pbundle->internal_buffer);
@@ -596,8 +605,9 @@ static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile,
pbundle->user_attrs = user_attrs;
pbundle->internal_used = ALIGN(pbundle->method_elm->key_bitmap_len *
- sizeof(*pbundle->bundle.attrs),
- sizeof(*pbundle->internal_buffer));
+ sizeof(*container_of(&pbundle->bundle,
+ struct uverbs_attr_bundle, hdr)->attrs),
+ sizeof(*pbundle->internal_buffer));
memset(pbundle->bundle.attr_present, 0,
sizeof(pbundle->bundle.attr_present));
memset(pbundle->uobj_finalize, 0, sizeof(pbundle->uobj_finalize));
@@ -700,11 +710,13 @@ void uverbs_fill_udata(struct uverbs_attr_bundle *bundle,
unsigned int attr_out)
{
struct bundle_priv *pbundle =
- container_of(bundle, struct bundle_priv, bundle);
+ container_of(&bundle->hdr, struct bundle_priv, bundle);
+ struct uverbs_attr_bundle *bundle_aux =
+ container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr);
const struct uverbs_attr *in =
- uverbs_attr_get(&pbundle->bundle, attr_in);
+ uverbs_attr_get(bundle_aux, attr_in);
const struct uverbs_attr *out =
- uverbs_attr_get(&pbundle->bundle, attr_out);
+ uverbs_attr_get(bundle_aux, attr_out);
if (!IS_ERR(in)) {
udata->inlen = in->ptr_attr.len;
@@ -829,7 +841,7 @@ void uverbs_finalize_uobj_create(const struct uverbs_attr_bundle *bundle,
u16 idx)
{
struct bundle_priv *pbundle =
- container_of(bundle, struct bundle_priv, bundle);
+ container_of(&bundle->hdr, struct bundle_priv, bundle);
__set_bit(uapi_bkey_attr(uapi_key_attr(idx)),
pbundle->uobj_hw_obj_valid);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 50cb2259bf874..fb8a0c2488667 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -930,8 +930,6 @@ void c4iw_id_table_free(struct c4iw_id_table *alloc);
typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb);
-int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
- struct l2t_entry *l2t);
void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid,
struct c4iw_dev_ucontext *uctx);
u32 c4iw_get_resource(struct c4iw_id_table *id_table);
diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
index e2bdec32ae805..926f9ff1f60fd 100644
--- a/drivers/infiniband/hw/efa/efa.h
+++ b/drivers/infiniband/hw/efa/efa.h
@@ -57,6 +57,7 @@ struct efa_dev {
u64 db_bar_addr;
u64 db_bar_len;
+ unsigned int num_irq_vectors;
int admin_msix_vector_idx;
struct efa_irq admin_irq;
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
index 7b1910a862164..5fa3603c80d82 100644
--- a/drivers/infiniband/hw/efa/efa_main.c
+++ b/drivers/infiniband/hw/efa/efa_main.c
@@ -322,7 +322,9 @@ static int efa_create_eqs(struct efa_dev *dev)
int err;
int i;
- neqs = min_t(unsigned int, neqs, num_online_cpus());
+ neqs = min_t(unsigned int, neqs,
+ dev->num_irq_vectors - EFA_COMP_EQS_VEC_BASE);
+
dev->neqs = neqs;
dev->eqs = kcalloc(neqs, sizeof(*dev->eqs), GFP_KERNEL);
if (!dev->eqs)
@@ -468,34 +470,30 @@ static void efa_disable_msix(struct efa_dev *dev)
static int efa_enable_msix(struct efa_dev *dev)
{
- int msix_vecs, irq_num;
+ int max_vecs, num_vecs;
/*
* Reserve the max msix vectors we might need, one vector is reserved
* for admin.
*/
- msix_vecs = min_t(int, pci_msix_vec_count(dev->pdev),
- num_online_cpus() + 1);
+ max_vecs = min_t(int, pci_msix_vec_count(dev->pdev),
+ num_online_cpus() + 1);
dev_dbg(&dev->pdev->dev, "Trying to enable MSI-X, vectors %d\n",
- msix_vecs);
+ max_vecs);
dev->admin_msix_vector_idx = EFA_MGMNT_MSIX_VEC_IDX;
- irq_num = pci_alloc_irq_vectors(dev->pdev, msix_vecs,
- msix_vecs, PCI_IRQ_MSIX);
+ num_vecs = pci_alloc_irq_vectors(dev->pdev, 1,
+ max_vecs, PCI_IRQ_MSIX);
- if (irq_num < 0) {
- dev_err(&dev->pdev->dev, "Failed to enable MSI-X. irq_num %d\n",
- irq_num);
+ if (num_vecs < 0) {
+ dev_err(&dev->pdev->dev, "Failed to enable MSI-X. error %d\n",
+ num_vecs);
return -ENOSPC;
}
- if (irq_num != msix_vecs) {
- efa_disable_msix(dev);
- dev_err(&dev->pdev->dev,
- "Allocated %d MSI-X (out of %d requested)\n",
- irq_num, msix_vecs);
- return -ENOSPC;
- }
+ dev_dbg(&dev->pdev->dev, "Allocated %d MSI-X vectors\n", num_vecs);
+
+ dev->num_irq_vectors = num_vecs;
return 0;
}
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index 18b05ffb415a3..c465966a1d9c9 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -315,7 +315,7 @@ int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit)
* This routine returns the receive context associated
* with a a qp's qpn.
*
- * Returns the context.
+ * Return: the context.
*/
static struct hfi1_ctxtdata *qp_to_rcd(struct rvt_dev_info *rdi,
struct rvt_qp *qp)
@@ -710,7 +710,7 @@ void hfi1_tid_rdma_flush_wait(struct rvt_qp *qp)
* The exp_lock must be held.
*
* Return:
- * On success: a value postive value between 0 and RXE_NUM_TID_FLOWS - 1
+ * On success: a value positive value between 0 and RXE_NUM_TID_FLOWS - 1
* On failure: -EAGAIN
*/
static int kern_reserve_flow(struct hfi1_ctxtdata *rcd, int last)
@@ -1007,7 +1007,7 @@ static u32 tid_flush_pages(struct tid_rdma_pageset *list,
* pages are tested two at a time, i, i + 1 for contiguous
* pages and i - 1 and i contiguous pages.
*
- * If any condition is false, any accumlated pages are flushed and
+ * If any condition is false, any accumulated pages are flushed and
* v0,v1 are emitted as separate PAGE_SIZE pagesets
*
* Otherwise, the current 8k is totaled for a future flush.
@@ -1434,7 +1434,7 @@ static void kern_program_rcvarray(struct tid_rdma_flow *flow)
* (5) computes a tidarray with formatted TID entries which can be sent
* to the sender
* (6) Reserves and programs HW flows.
- * (7) It also manages queing the QP when TID/flow resources are not
+ * (7) It also manages queueing the QP when TID/flow resources are not
* available.
*
* @req points to struct tid_rdma_request of which the segments are a part. The
@@ -1604,7 +1604,7 @@ void hfi1_kern_exp_rcv_clear_all(struct tid_rdma_request *req)
}
/**
- * hfi1_kern_exp_rcv_free_flows - free priviously allocated flow information
+ * hfi1_kern_exp_rcv_free_flows - free previously allocated flow information
* @req: the tid rdma request to be cleaned
*/
static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req)
@@ -2055,7 +2055,7 @@ static int tid_rdma_rcv_error(struct hfi1_packet *packet,
* req->clear_tail is advanced). However, when an earlier
* request is received, this request will not be complete any
* more (qp->s_tail_ack_queue is moved back, see below).
- * Consequently, we need to update the TID flow info everytime
+ * Consequently, we need to update the TID flow info every time
* a duplicate request is received.
*/
bth0 = be32_to_cpu(ohdr->bth[0]);
@@ -2219,7 +2219,7 @@ void hfi1_rc_rcv_tid_rdma_read_req(struct hfi1_packet *packet)
/*
* 1. Verify TID RDMA READ REQ as per IB_OPCODE_RC_RDMA_READ
* (see hfi1_rc_rcv())
- * 2. Put TID RDMA READ REQ into the response queueu (s_ack_queue)
+ * 2. Put TID RDMA READ REQ into the response queue (s_ack_queue)
* - Setup struct tid_rdma_req with request info
* - Initialize struct tid_rdma_flow info;
* - Copy TID entries;
@@ -2439,7 +2439,7 @@ find_tid_request(struct rvt_qp *qp, u32 psn, enum ib_wr_opcode opcode)
void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet)
{
- /* HANDLER FOR TID RDMA READ RESPONSE packet (Requestor side */
+ /* HANDLER FOR TID RDMA READ RESPONSE packet (Requester side) */
/*
* 1. Find matching SWQE
@@ -3649,7 +3649,7 @@ void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet)
* 1. Verify TID RDMA WRITE REQ as per IB_OPCODE_RC_RDMA_WRITE_FIRST
* (see hfi1_rc_rcv())
* - Don't allow 0-length requests.
- * 2. Put TID RDMA WRITE REQ into the response queueu (s_ack_queue)
+ * 2. Put TID RDMA WRITE REQ into the response queue (s_ack_queue)
* - Setup struct tid_rdma_req with request info
* - Prepare struct tid_rdma_flow array?
* 3. Set the qp->s_ack_state as state diagram in design doc.
@@ -4026,7 +4026,7 @@ unlock_r_lock:
void hfi1_rc_rcv_tid_rdma_write_resp(struct hfi1_packet *packet)
{
- /* HANDLER FOR TID RDMA WRITE RESPONSE packet (Requestor side */
+ /* HANDLER FOR TID RDMA WRITE RESPONSE packet (Requester side) */
/*
* 1. Find matching SWQE
@@ -5440,8 +5440,9 @@ static bool _hfi1_schedule_tid_send(struct rvt_qp *qp)
* the two state machines can step on each other with respect to the
* RVT_S_BUSY flag.
* Therefore, a modified test is used.
- * @return true if the second leg is scheduled;
- * false if the second leg is not scheduled.
+ *
+ * Return: %true if the second leg is scheduled;
+ * %false if the second leg is not scheduled.
*/
bool hfi1_schedule_tid_send(struct rvt_qp *qp)
{
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.h b/drivers/infiniband/hw/hns/hns_roce_cmd.h
index 052a3d60905aa..11dbbabebdc90 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.h
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h
@@ -108,6 +108,9 @@ enum {
HNS_ROCE_CMD_QUERY_CEQC = 0x92,
HNS_ROCE_CMD_DESTROY_CEQC = 0x93,
+ /* SCC CTX commands */
+ HNS_ROCE_CMD_QUERY_SCCC = 0xa2,
+
/* SCC CTX BT commands */
HNS_ROCE_CMD_READ_SCCC_BT0 = 0xa4,
HNS_ROCE_CMD_WRITE_SCCC_BT0 = 0xa5,
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 1b6d16af8c12b..7250d0643b5c5 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -133,14 +133,12 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct ib_device *ibdev = &hr_dev->ib_dev;
u64 mtts[MTT_MIN_COUNT] = {};
- dma_addr_t dma_handle;
int ret;
- ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts),
- &dma_handle);
- if (!ret) {
+ ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts));
+ if (ret) {
ibdev_err(ibdev, "failed to find CQ mtr, ret = %d.\n", ret);
- return -EINVAL;
+ return ret;
}
/* Get CQC memory HEM(Hardware Entry Memory) table */
@@ -157,7 +155,8 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
goto err_put;
}
- ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts, dma_handle);
+ ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts,
+ hns_roce_get_mtr_ba(&hr_cq->mtr));
if (ret)
goto err_xa;
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index b1fce5ddf6316..c3cbd0a494bfd 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -179,6 +179,7 @@ enum {
#define HNS_ROCE_CMD_SUCCESS 1
+#define HNS_ROCE_MAX_HOP_NUM 3
/* The minimum page size is 4K for hardware */
#define HNS_HW_PAGE_SHIFT 12
#define HNS_HW_PAGE_SIZE (1 << HNS_HW_PAGE_SHIFT)
@@ -269,6 +270,11 @@ struct hns_roce_hem_list {
dma_addr_t root_ba; /* pointer to the root ba table */
};
+enum mtr_type {
+ MTR_DEFAULT = 0,
+ MTR_PBL,
+};
+
struct hns_roce_buf_attr {
struct {
size_t size; /* region size */
@@ -277,7 +283,10 @@ struct hns_roce_buf_attr {
unsigned int region_count; /* valid region count */
unsigned int page_shift; /* buffer page shift */
unsigned int user_access; /* umem access flag */
+ u64 iova;
+ enum mtr_type type;
bool mtt_only; /* only alloc buffer-required MTT memory */
+ bool adaptive; /* adaptive for page_shift and hopnum */
};
struct hns_roce_hem_cfg {
@@ -585,6 +594,13 @@ struct hns_roce_work {
u32 queue_num;
};
+enum hns_roce_cong_type {
+ CONG_TYPE_DCQCN,
+ CONG_TYPE_LDCP,
+ CONG_TYPE_HC3,
+ CONG_TYPE_DIP,
+};
+
struct hns_roce_qp {
struct ib_qp ibqp;
struct hns_roce_wq rq;
@@ -628,6 +644,7 @@ struct hns_roce_qp {
struct list_head sq_node; /* all send qps are on a list */
struct hns_user_mmap_entry *dwqe_mmap_entry;
u32 config;
+ enum hns_roce_cong_type cong_type;
};
struct hns_roce_ib_iboe {
@@ -699,13 +716,6 @@ struct hns_roce_eq_table {
struct hns_roce_eq *eq;
};
-enum cong_type {
- CONG_TYPE_DCQCN,
- CONG_TYPE_LDCP,
- CONG_TYPE_HC3,
- CONG_TYPE_DIP,
-};
-
struct hns_roce_caps {
u64 fw_ver;
u8 num_ports;
@@ -835,7 +845,8 @@ struct hns_roce_caps {
u16 default_aeq_period;
u16 default_aeq_arm_st;
u16 default_ceq_arm_st;
- enum cong_type cong_type;
+ u8 cong_cap;
+ enum hns_roce_cong_type default_cong_type;
};
enum hns_roce_device_state {
@@ -936,6 +947,7 @@ struct hns_roce_hw {
int (*query_qpc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer);
int (*query_mpt)(struct hns_roce_dev *hr_dev, u32 key, void *buffer);
int (*query_srqc)(struct hns_roce_dev *hr_dev, u32 srqn, void *buffer);
+ int (*query_sccc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer);
int (*query_hw_counter)(struct hns_roce_dev *hr_dev,
u64 *stats, u32 port, int *hw_counters);
const struct ib_device_ops *hns_roce_dev_ops;
@@ -1152,8 +1164,13 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);
/* hns roce hw need current block and next block addr from mtt */
#define MTT_MIN_COUNT 2
+static inline dma_addr_t hns_roce_get_mtr_ba(struct hns_roce_mtr *mtr)
+{
+ return mtr->hem_cfg.root_ba;
+}
+
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
- u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr);
+ u32 offset, u64 *mtt_buf, int mtt_max);
int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
struct hns_roce_buf_attr *buf_attr,
unsigned int page_shift, struct ib_udata *udata,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index c4ac06a338696..a4b3f19161dc1 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -249,61 +249,34 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
}
static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
- int npages,
unsigned long hem_alloc_size,
gfp_t gfp_mask)
{
- struct hns_roce_hem_chunk *chunk = NULL;
struct hns_roce_hem *hem;
- struct scatterlist *mem;
int order;
void *buf;
WARN_ON(gfp_mask & __GFP_HIGHMEM);
+ order = get_order(hem_alloc_size);
+ if (PAGE_SIZE << order != hem_alloc_size) {
+ dev_err(hr_dev->dev, "invalid hem_alloc_size: %lu!\n",
+ hem_alloc_size);
+ return NULL;
+ }
+
hem = kmalloc(sizeof(*hem),
gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
if (!hem)
return NULL;
- INIT_LIST_HEAD(&hem->chunk_list);
-
- order = get_order(hem_alloc_size);
-
- while (npages > 0) {
- if (!chunk) {
- chunk = kmalloc(sizeof(*chunk),
- gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
- if (!chunk)
- goto fail;
-
- sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN);
- chunk->npages = 0;
- chunk->nsg = 0;
- memset(chunk->buf, 0, sizeof(chunk->buf));
- list_add_tail(&chunk->list, &hem->chunk_list);
- }
+ buf = dma_alloc_coherent(hr_dev->dev, hem_alloc_size,
+ &hem->dma, gfp_mask);
+ if (!buf)
+ goto fail;
- while (1 << order > npages)
- --order;
-
- /*
- * Alloc memory one time. If failed, don't alloc small block
- * memory, directly return fail.
- */
- mem = &chunk->mem[chunk->npages];
- buf = dma_alloc_coherent(hr_dev->dev, PAGE_SIZE << order,
- &sg_dma_address(mem), gfp_mask);
- if (!buf)
- goto fail;
-
- chunk->buf[chunk->npages] = buf;
- sg_dma_len(mem) = PAGE_SIZE << order;
-
- ++chunk->npages;
- ++chunk->nsg;
- npages -= 1 << order;
- }
+ hem->buf = buf;
+ hem->size = hem_alloc_size;
return hem;
@@ -314,20 +287,10 @@ fail:
void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
{
- struct hns_roce_hem_chunk *chunk, *tmp;
- int i;
-
if (!hem)
return;
- list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
- for (i = 0; i < chunk->npages; ++i)
- dma_free_coherent(hr_dev->dev,
- sg_dma_len(&chunk->mem[i]),
- chunk->buf[i],
- sg_dma_address(&chunk->mem[i]));
- kfree(chunk);
- }
+ dma_free_coherent(hr_dev->dev, hem->size, hem->buf, hem->dma);
kfree(hem);
}
@@ -415,7 +378,6 @@ static int alloc_mhop_hem(struct hns_roce_dev *hr_dev,
{
u32 bt_size = mhop->bt_chunk_size;
struct device *dev = hr_dev->dev;
- struct hns_roce_hem_iter iter;
gfp_t flag;
u64 bt_ba;
u32 size;
@@ -456,16 +418,15 @@ static int alloc_mhop_hem(struct hns_roce_dev *hr_dev,
*/
size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : bt_size;
flag = GFP_KERNEL | __GFP_NOWARN;
- table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size >> PAGE_SHIFT,
- size, flag);
+ table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size, flag);
if (!table->hem[index->buf]) {
ret = -ENOMEM;
goto err_alloc_hem;
}
index->inited |= HEM_INDEX_BUF;
- hns_roce_hem_first(table->hem[index->buf], &iter);
- bt_ba = hns_roce_hem_addr(&iter);
+ bt_ba = table->hem[index->buf]->dma;
+
if (table->type < HEM_TYPE_MTT) {
if (mhop->hop_num == 2)
*(table->bt_l1[index->l1] + mhop->l2_idx) = bt_ba;
@@ -586,7 +547,6 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
}
table->hem[i] = hns_roce_alloc_hem(hr_dev,
- table->table_chunk_size >> PAGE_SHIFT,
table->table_chunk_size,
GFP_KERNEL | __GFP_NOWARN);
if (!table->hem[i]) {
@@ -725,7 +685,6 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table,
unsigned long obj, dma_addr_t *dma_handle)
{
- struct hns_roce_hem_chunk *chunk;
struct hns_roce_hem_mhop mhop;
struct hns_roce_hem *hem;
unsigned long mhop_obj = obj;
@@ -734,7 +693,6 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
int offset, dma_offset;
void *addr = NULL;
u32 hem_idx = 0;
- int length;
int i, j;
mutex_lock(&table->mutex);
@@ -767,23 +725,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
if (!hem)
goto out;
- list_for_each_entry(chunk, &hem->chunk_list, list) {
- for (i = 0; i < chunk->npages; ++i) {
- length = sg_dma_len(&chunk->mem[i]);
- if (dma_handle && dma_offset >= 0) {
- if (length > (u32)dma_offset)
- *dma_handle = sg_dma_address(
- &chunk->mem[i]) + dma_offset;
- dma_offset -= length;
- }
-
- if (length > (u32)offset) {
- addr = chunk->buf[i] + offset;
- goto out;
- }
- offset -= length;
- }
- }
+ *dma_handle = hem->dma + dma_offset;
+ addr = hem->buf + offset;
out:
mutex_unlock(&table->mutex);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
index 7d23d3c51da46..6fb51db9682b8 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
@@ -56,10 +56,6 @@ enum {
HEM_TYPE_TRRL,
};
-#define HNS_ROCE_HEM_CHUNK_LEN \
- ((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \
- (sizeof(struct scatterlist) + sizeof(void *)))
-
#define check_whether_bt_num_3(type, hop_num) \
(type < HEM_TYPE_MTT && hop_num == 2)
@@ -72,25 +68,13 @@ enum {
(type >= HEM_TYPE_MTT && hop_num == 1) || \
(type >= HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0))
-struct hns_roce_hem_chunk {
- struct list_head list;
- int npages;
- int nsg;
- struct scatterlist mem[HNS_ROCE_HEM_CHUNK_LEN];
- void *buf[HNS_ROCE_HEM_CHUNK_LEN];
-};
-
struct hns_roce_hem {
- struct list_head chunk_list;
+ void *buf;
+ dma_addr_t dma;
+ unsigned long size;
refcount_t refcount;
};
-struct hns_roce_hem_iter {
- struct hns_roce_hem *hem;
- struct hns_roce_hem_chunk *chunk;
- int page_idx;
-};
-
struct hns_roce_hem_mhop {
u32 hop_num;
u32 buf_chunk_size;
@@ -133,38 +117,4 @@ void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list,
int offset, int *mtt_cnt);
-static inline void hns_roce_hem_first(struct hns_roce_hem *hem,
- struct hns_roce_hem_iter *iter)
-{
- iter->hem = hem;
- iter->chunk = list_empty(&hem->chunk_list) ? NULL :
- list_entry(hem->chunk_list.next,
- struct hns_roce_hem_chunk, list);
- iter->page_idx = 0;
-}
-
-static inline int hns_roce_hem_last(struct hns_roce_hem_iter *iter)
-{
- return !iter->chunk;
-}
-
-static inline void hns_roce_hem_next(struct hns_roce_hem_iter *iter)
-{
- if (++iter->page_idx >= iter->chunk->nsg) {
- if (iter->chunk->list.next == &iter->hem->chunk_list) {
- iter->chunk = NULL;
- return;
- }
-
- iter->chunk = list_entry(iter->chunk->list.next,
- struct hns_roce_hem_chunk, list);
- iter->page_idx = 0;
- }
-}
-
-static inline dma_addr_t hns_roce_hem_addr(struct hns_roce_hem_iter *iter)
-{
- return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
-}
-
#endif /* _HNS_ROCE_HEM_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 8206daea6767d..ba7ae792d279d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -2209,11 +2209,12 @@ static int hns_roce_query_caps(struct hns_roce_dev *hr_dev)
caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth);
caps->num_srqs = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_SRQS);
- caps->cong_type = hr_reg_read(resp_d, PF_CAPS_D_CONG_TYPE);
+ caps->cong_cap = hr_reg_read(resp_d, PF_CAPS_D_CONG_CAP);
caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth);
caps->ceqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_CEQ_DEPTH);
caps->num_comp_vectors = hr_reg_read(resp_d, PF_CAPS_D_NUM_CEQS);
caps->aeqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_AEQ_DEPTH);
+ caps->default_cong_type = hr_reg_read(resp_d, PF_CAPS_D_DEFAULT_ALG);
caps->reserved_pds = hr_reg_read(resp_d, PF_CAPS_D_RSV_PDS);
caps->num_uars = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_UARS);
caps->reserved_qps = hr_reg_read(resp_d, PF_CAPS_D_RSV_QPS);
@@ -3195,21 +3196,22 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 };
struct ib_device *ibdev = &hr_dev->ib_dev;
dma_addr_t pbl_ba;
- int i, count;
+ int ret;
+ int i;
- count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
- min_t(int, ARRAY_SIZE(pages), mr->npages),
- &pbl_ba);
- if (count < 1) {
- ibdev_err(ibdev, "failed to find PBL mtr, count = %d.\n",
- count);
- return -ENOBUFS;
+ ret = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
+ min_t(int, ARRAY_SIZE(pages), mr->npages));
+ if (ret) {
+ ibdev_err(ibdev, "failed to find PBL mtr, ret = %d.\n", ret);
+ return ret;
}
/* Aligned to the hardware address access unit */
- for (i = 0; i < count; i++)
+ for (i = 0; i < ARRAY_SIZE(pages); i++)
pages[i] >>= 6;
+ pbl_ba = hns_roce_get_mtr_ba(&mr->pbl_mtr);
+
mpt_entry->pbl_size = cpu_to_le32(mr->npages);
mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> 3);
hr_reg_write(mpt_entry, MPT_PBL_BA_H, upper_32_bits(pbl_ba >> 3));
@@ -3308,18 +3310,12 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev,
void *mb_buf, struct hns_roce_mr *mr)
{
- struct ib_device *ibdev = &hr_dev->ib_dev;
+ dma_addr_t pbl_ba = hns_roce_get_mtr_ba(&mr->pbl_mtr);
struct hns_roce_v2_mpt_entry *mpt_entry;
- dma_addr_t pbl_ba = 0;
mpt_entry = mb_buf;
memset(mpt_entry, 0, sizeof(*mpt_entry));
- if (hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, NULL, 0, &pbl_ba) < 0) {
- ibdev_err(ibdev, "failed to find frmr mtr.\n");
- return -ENOBUFS;
- }
-
hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE);
hr_reg_write(mpt_entry, MPT_PD, mr->pd);
@@ -4063,7 +4059,6 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj,
u32 step_idx)
{
- struct hns_roce_hem_iter iter;
struct hns_roce_hem_mhop mhop;
struct hns_roce_hem *hem;
unsigned long mhop_obj = obj;
@@ -4100,12 +4095,8 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
if (check_whether_last_step(hop_num, step_idx)) {
hem = table->hem[hem_idx];
- for (hns_roce_hem_first(hem, &iter);
- !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
- bt_ba = hns_roce_hem_addr(&iter);
- ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type,
- step_idx);
- }
+
+ ret = set_hem_to_hw(hr_dev, obj, hem->dma, table->type, step_idx);
} else {
if (step_idx == 0)
bt_ba = table->bt_l0_dma_addr[i];
@@ -4346,17 +4337,20 @@ static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
{
u64 mtts[MTT_MIN_COUNT] = { 0 };
u64 wqe_sge_ba;
- int count;
+ int ret;
/* Search qp buf's mtts */
- count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts,
- MTT_MIN_COUNT, &wqe_sge_ba);
- if (hr_qp->rq.wqe_cnt && count < 1) {
+ ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts,
+ MTT_MIN_COUNT);
+ if (hr_qp->rq.wqe_cnt && ret) {
ibdev_err(&hr_dev->ib_dev,
- "failed to find RQ WQE, QPN = 0x%lx.\n", hr_qp->qpn);
- return -EINVAL;
+ "failed to find QP(0x%lx) RQ WQE buf, ret = %d.\n",
+ hr_qp->qpn, ret);
+ return ret;
}
+ wqe_sge_ba = hns_roce_get_mtr_ba(&hr_qp->mtr);
+
context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3);
qpc_mask->wqe_sge_ba = 0;
@@ -4418,23 +4412,23 @@ static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
struct ib_device *ibdev = &hr_dev->ib_dev;
u64 sge_cur_blk = 0;
u64 sq_cur_blk = 0;
- int count;
+ int ret;
/* search qp buf's mtts */
- count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
- if (count < 1) {
- ibdev_err(ibdev, "failed to find QP(0x%lx) SQ buf.\n",
- hr_qp->qpn);
- return -EINVAL;
+ ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->sq.offset,
+ &sq_cur_blk, 1);
+ if (ret) {
+ ibdev_err(ibdev, "failed to find QP(0x%lx) SQ WQE buf, ret = %d.\n",
+ hr_qp->qpn, ret);
+ return ret;
}
if (hr_qp->sge.sge_cnt > 0) {
- count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
- hr_qp->sge.offset,
- &sge_cur_blk, 1, NULL);
- if (count < 1) {
- ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf.\n",
- hr_qp->qpn);
- return -EINVAL;
+ ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
+ hr_qp->sge.offset, &sge_cur_blk, 1);
+ if (ret) {
+ ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf, ret = %d.\n",
+ hr_qp->qpn, ret);
+ return ret;
}
}
@@ -4744,13 +4738,10 @@ enum {
static int check_cong_type(struct ib_qp *ibqp,
struct hns_roce_congestion_algorithm *cong_alg)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
-
- if (ibqp->qp_type == IB_QPT_UD)
- hr_dev->caps.cong_type = CONG_TYPE_DCQCN;
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
/* different congestion types match different configurations */
- switch (hr_dev->caps.cong_type) {
+ switch (hr_qp->cong_type) {
case CONG_TYPE_DCQCN:
cong_alg->alg_sel = CONG_DCQCN;
cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
@@ -4776,10 +4767,7 @@ static int check_cong_type(struct ib_qp *ibqp,
cong_alg->wnd_mode_sel = WND_LIMIT;
break;
default:
- ibdev_warn(&hr_dev->ib_dev,
- "invalid type(%u) for congestion selection.\n",
- hr_dev->caps.cong_type);
- hr_dev->caps.cong_type = CONG_TYPE_DCQCN;
+ hr_qp->cong_type = CONG_TYPE_DCQCN;
cong_alg->alg_sel = CONG_DCQCN;
cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
cong_alg->dip_vld = DIP_INVALID;
@@ -4798,6 +4786,7 @@ static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
struct hns_roce_congestion_algorithm cong_field;
struct ib_device *ibdev = ibqp->device;
struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
u32 dip_idx = 0;
int ret;
@@ -4810,7 +4799,7 @@ static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
return ret;
hr_reg_write(context, QPC_CONG_ALGO_TMPL_ID, hr_dev->cong_algo_tmpl_id +
- hr_dev->caps.cong_type * HNS_ROCE_CONG_SIZE);
+ hr_qp->cong_type * HNS_ROCE_CONG_SIZE);
hr_reg_clear(qpc_mask, QPC_CONG_ALGO_TMPL_ID);
hr_reg_write(&context->ext, QPCEX_CONG_ALG_SEL, cong_field.alg_sel);
hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SEL);
@@ -5328,6 +5317,30 @@ out:
return ret;
}
+static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 qpn,
+ void *buffer)
+{
+ struct hns_roce_v2_scc_context *context;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_SCCC,
+ qpn);
+ if (ret)
+ goto out;
+
+ context = mailbox->buf;
+ memcpy(buffer, context, sizeof(*context));
+
+out:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ return ret;
+}
+
static u8 get_qp_timeout_attr(struct hns_roce_dev *hr_dev,
struct hns_roce_v2_qp_context *context)
{
@@ -5581,18 +5594,20 @@ static int hns_roce_v2_write_srqc_index_queue(struct hns_roce_srq *srq,
struct ib_device *ibdev = srq->ibsrq.device;
struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
u64 mtts_idx[MTT_MIN_COUNT] = {};
- dma_addr_t dma_handle_idx = 0;
+ dma_addr_t dma_handle_idx;
int ret;
/* Get physical address of idx que buf */
ret = hns_roce_mtr_find(hr_dev, &idx_que->mtr, 0, mtts_idx,
- ARRAY_SIZE(mtts_idx), &dma_handle_idx);
- if (ret < 1) {
+ ARRAY_SIZE(mtts_idx));
+ if (ret) {
ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n",
ret);
- return -ENOBUFS;
+ return ret;
}
+ dma_handle_idx = hns_roce_get_mtr_ba(&idx_que->mtr);
+
hr_reg_write(ctx, SRQC_IDX_HOP_NUM,
to_hr_hem_hopnum(hr_dev->caps.idx_hop_num, srq->wqe_cnt));
@@ -5624,20 +5639,22 @@ static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf)
struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
struct hns_roce_srq_context *ctx = mb_buf;
u64 mtts_wqe[MTT_MIN_COUNT] = {};
- dma_addr_t dma_handle_wqe = 0;
+ dma_addr_t dma_handle_wqe;
int ret;
memset(ctx, 0, sizeof(*ctx));
/* Get the physical address of srq buf */
ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe,
- ARRAY_SIZE(mtts_wqe), &dma_handle_wqe);
- if (ret < 1) {
+ ARRAY_SIZE(mtts_wqe));
+ if (ret) {
ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n",
ret);
- return -ENOBUFS;
+ return ret;
}
+ dma_handle_wqe = hns_roce_get_mtr_ba(&srq->buf_mtr);
+
hr_reg_write(ctx, SRQC_SRQ_ST, 1);
hr_reg_write_bool(ctx, SRQC_SRQ_TYPE,
srq->ibsrq.srq_type == IB_SRQT_XRC);
@@ -6353,7 +6370,7 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
u64 eqe_ba[MTT_MIN_COUNT] = { 0 };
struct hns_roce_eq_context *eqc;
u64 bt_ba = 0;
- int count;
+ int ret;
eqc = mb_buf;
memset(eqc, 0, sizeof(struct hns_roce_eq_context));
@@ -6361,13 +6378,15 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
init_eq_config(hr_dev, eq);
/* if not multi-hop, eqe buffer only use one trunk */
- count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, MTT_MIN_COUNT,
- &bt_ba);
- if (count < 1) {
- dev_err(hr_dev->dev, "failed to find EQE mtr\n");
- return -ENOBUFS;
+ ret = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba,
+ ARRAY_SIZE(eqe_ba));
+ if (ret) {
+ dev_err(hr_dev->dev, "failed to find EQE mtr, ret = %d\n", ret);
+ return ret;
}
+ bt_ba = hns_roce_get_mtr_ba(&eq->mtr);
+
hr_reg_write(eqc, EQC_EQ_ST, HNS_ROCE_V2_EQ_STATE_VALID);
hr_reg_write(eqc, EQC_EQE_HOP_NUM, eq->hop_num);
hr_reg_write(eqc, EQC_OVER_IGNORE, eq->over_ignore);
@@ -6714,6 +6733,7 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.query_qpc = hns_roce_v2_query_qpc,
.query_mpt = hns_roce_v2_query_mpt,
.query_srqc = hns_roce_v2_query_srqc,
+ .query_sccc = hns_roce_v2_query_sccc,
.query_hw_counter = hns_roce_hw_v2_query_counter,
.hns_roce_dev_ops = &hns_roce_v2_dev_ops,
.hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index cd97cbee682a6..df04bc8ede57b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -646,6 +646,12 @@ struct hns_roce_v2_qp_context {
#define QPCEX_SQ_RQ_NOT_FORBID_EN QPCEX_FIELD_LOC(23, 23)
#define QPCEX_STASH QPCEX_FIELD_LOC(82, 82)
+#define SCC_CONTEXT_SIZE 16
+
+struct hns_roce_v2_scc_context {
+ __le32 data[SCC_CONTEXT_SIZE];
+};
+
#define V2_QP_RWE_S 1 /* rdma write enable */
#define V2_QP_RRE_S 2 /* rdma read enable */
#define V2_QP_ATE_S 3 /* rdma atomic enable */
@@ -1214,12 +1220,13 @@ struct hns_roce_query_pf_caps_d {
#define PF_CAPS_D_RQWQE_HOP_NUM PF_CAPS_D_FIELD_LOC(21, 20)
#define PF_CAPS_D_EX_SGE_HOP_NUM PF_CAPS_D_FIELD_LOC(23, 22)
#define PF_CAPS_D_SQWQE_HOP_NUM PF_CAPS_D_FIELD_LOC(25, 24)
-#define PF_CAPS_D_CONG_TYPE PF_CAPS_D_FIELD_LOC(29, 26)
+#define PF_CAPS_D_CONG_CAP PF_CAPS_D_FIELD_LOC(29, 26)
#define PF_CAPS_D_CEQ_DEPTH PF_CAPS_D_FIELD_LOC(85, 64)
#define PF_CAPS_D_NUM_CEQS PF_CAPS_D_FIELD_LOC(95, 86)
#define PF_CAPS_D_AEQ_DEPTH PF_CAPS_D_FIELD_LOC(117, 96)
#define PF_CAPS_D_AEQ_ARM_ST PF_CAPS_D_FIELD_LOC(119, 118)
#define PF_CAPS_D_CEQ_ARM_ST PF_CAPS_D_FIELD_LOC(121, 120)
+#define PF_CAPS_D_DEFAULT_ALG PF_CAPS_D_FIELD_LOC(127, 122)
#define PF_CAPS_D_RSV_PDS PF_CAPS_D_FIELD_LOC(147, 128)
#define PF_CAPS_D_NUM_UARS PF_CAPS_D_FIELD_LOC(155, 148)
#define PF_CAPS_D_RSV_QPS PF_CAPS_D_FIELD_LOC(179, 160)
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index b55fe6911f9f1..1dc60c2b2b7ab 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -394,6 +394,9 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
resp.config |= HNS_ROCE_RSP_CQE_INLINE_FLAGS;
}
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ resp.congest_type = hr_dev->caps.cong_cap;
+
ret = hns_roce_uar_alloc(hr_dev, &context->uar);
if (ret)
goto error_out;
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 91cd580480fe2..9e05b57a2d67d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -32,6 +32,7 @@
*/
#include <linux/vmalloc.h>
+#include <linux/count_zeros.h>
#include <rdma/ib_umem.h>
#include <linux/math.h>
#include "hns_roce_device.h"
@@ -103,14 +104,21 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
buf_attr.user_access = mr->access;
/* fast MR's buffer is alloced before mapping, not at creation */
buf_attr.mtt_only = is_fast;
+ buf_attr.iova = mr->iova;
+ /* pagesize and hopnum is fixed for fast MR */
+ buf_attr.adaptive = !is_fast;
+ buf_attr.type = MTR_PBL;
err = hns_roce_mtr_create(hr_dev, &mr->pbl_mtr, &buf_attr,
hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT,
udata, start);
- if (err)
+ if (err) {
ibdev_err(ibdev, "failed to alloc pbl mtr, ret = %d.\n", err);
- else
- mr->npages = mr->pbl_mtr.hem_cfg.buf_pg_count;
+ return err;
+ }
+
+ mr->npages = mr->pbl_mtr.hem_cfg.buf_pg_count;
+ mr->pbl_hop_num = buf_attr.region[0].hopnum;
return err;
}
@@ -695,7 +703,7 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
mtr->umem = NULL;
mtr->kmem = hns_roce_buf_alloc(hr_dev, total_size,
buf_attr->page_shift,
- mtr->hem_cfg.is_direct ?
+ !mtr_has_mtt(buf_attr) ?
HNS_ROCE_BUF_DIRECT : 0);
if (IS_ERR(mtr->kmem)) {
ibdev_err(ibdev, "failed to alloc kmem, ret = %ld.\n",
@@ -707,14 +715,41 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
return 0;
}
-static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
- int page_count, unsigned int page_shift)
+static int cal_mtr_pg_cnt(struct hns_roce_mtr *mtr)
+{
+ struct hns_roce_buf_region *region;
+ int page_cnt = 0;
+ int i;
+
+ for (i = 0; i < mtr->hem_cfg.region_count; i++) {
+ region = &mtr->hem_cfg.region[i];
+ page_cnt += region->count;
+ }
+
+ return page_cnt;
+}
+
+static bool need_split_huge_page(struct hns_roce_mtr *mtr)
+{
+ /* When HEM buffer uses 0-level addressing, the page size is
+ * equal to the whole buffer size. If the current MTR has multiple
+ * regions, we split the buffer into small pages(4k, required by hns
+ * ROCEE). These pages will be used in multiple regions.
+ */
+ return mtr->hem_cfg.is_direct && mtr->hem_cfg.region_count > 1;
+}
+
+static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
+ int page_count = cal_mtr_pg_cnt(mtr);
+ unsigned int page_shift;
dma_addr_t *pages;
int npage;
int ret;
+ page_shift = need_split_huge_page(mtr) ? HNS_HW_PAGE_SHIFT :
+ mtr->hem_cfg.buf_pg_shift;
/* alloc a tmp array to store buffer's dma address */
pages = kvcalloc(page_count, sizeof(dma_addr_t), GFP_KERNEL);
if (!pages)
@@ -734,7 +769,7 @@ static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
goto err_alloc_list;
}
- if (mtr->hem_cfg.is_direct && npage > 1) {
+ if (need_split_huge_page(mtr) && npage > 1) {
ret = mtr_check_direct_pages(pages, npage, page_shift);
if (ret) {
ibdev_err(ibdev, "failed to check %s page: %d / %d.\n",
@@ -809,47 +844,53 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
return ret;
}
-int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
- u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr)
+static int hns_roce_get_direct_addr_mtt(struct hns_roce_hem_cfg *cfg,
+ u32 start_index, u64 *mtt_buf,
+ int mtt_cnt)
{
- struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
- int mtt_count, left;
- u32 start_index;
+ int mtt_count;
int total = 0;
- __le64 *mtts;
u32 npage;
u64 addr;
- if (!mtt_buf || mtt_max < 1)
- goto done;
-
- /* no mtt memory in direct mode, so just return the buffer address */
- if (cfg->is_direct) {
- start_index = offset >> HNS_HW_PAGE_SHIFT;
- for (mtt_count = 0; mtt_count < cfg->region_count &&
- total < mtt_max; mtt_count++) {
- npage = cfg->region[mtt_count].offset;
- if (npage < start_index)
- continue;
+ if (mtt_cnt > cfg->region_count)
+ return -EINVAL;
- addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT);
- mtt_buf[total] = addr;
+ for (mtt_count = 0; mtt_count < cfg->region_count && total < mtt_cnt;
+ mtt_count++) {
+ npage = cfg->region[mtt_count].offset;
+ if (npage < start_index)
+ continue;
- total++;
- }
+ addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT);
+ mtt_buf[total] = addr;
- goto done;
+ total++;
}
- start_index = offset >> cfg->buf_pg_shift;
- left = mtt_max;
+ if (!total)
+ return -ENOENT;
+
+ return 0;
+}
+
+static int hns_roce_get_mhop_mtt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtr *mtr, u32 start_index,
+ u64 *mtt_buf, int mtt_cnt)
+{
+ int left = mtt_cnt;
+ int total = 0;
+ int mtt_count;
+ __le64 *mtts;
+ u32 npage;
+
while (left > 0) {
mtt_count = 0;
mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
start_index + total,
&mtt_count);
if (!mtts || !mtt_count)
- goto done;
+ break;
npage = min(mtt_count, left);
left -= npage;
@@ -857,69 +898,165 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
mtt_buf[total++] = le64_to_cpu(mtts[mtt_count]);
}
-done:
- if (base_addr)
- *base_addr = cfg->root_ba;
+ if (!total)
+ return -ENOENT;
+
+ return 0;
+}
+
+int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ u32 offset, u64 *mtt_buf, int mtt_max)
+{
+ struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
+ u32 start_index;
+ int ret;
+
+ if (!mtt_buf || mtt_max < 1)
+ return -EINVAL;
+
+ /* no mtt memory in direct mode, so just return the buffer address */
+ if (cfg->is_direct) {
+ start_index = offset >> HNS_HW_PAGE_SHIFT;
+ ret = hns_roce_get_direct_addr_mtt(cfg, start_index,
+ mtt_buf, mtt_max);
+ } else {
+ start_index = offset >> cfg->buf_pg_shift;
+ ret = hns_roce_get_mhop_mtt(hr_dev, mtr, start_index,
+ mtt_buf, mtt_max);
+ }
+ return ret;
+}
+
+static int get_best_page_shift(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtr *mtr,
+ struct hns_roce_buf_attr *buf_attr)
+{
+ unsigned int page_sz;
+
+ if (!buf_attr->adaptive || buf_attr->type != MTR_PBL || !mtr->umem)
+ return 0;
+
+ page_sz = ib_umem_find_best_pgsz(mtr->umem,
+ hr_dev->caps.page_size_cap,
+ buf_attr->iova);
+ if (!page_sz)
+ return -EINVAL;
+
+ buf_attr->page_shift = order_base_2(page_sz);
+ return 0;
+}
+
+static int get_best_hop_num(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtr *mtr,
+ struct hns_roce_buf_attr *buf_attr,
+ unsigned int ba_pg_shift)
+{
+#define INVALID_HOPNUM -1
+#define MIN_BA_CNT 1
+ size_t buf_pg_sz = 1 << buf_attr->page_shift;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ size_t ba_pg_sz = 1 << ba_pg_shift;
+ int hop_num = INVALID_HOPNUM;
+ size_t unit = MIN_BA_CNT;
+ size_t ba_cnt;
+ int j;
+
+ if (!buf_attr->adaptive || buf_attr->type != MTR_PBL)
+ return 0;
+
+ /* Caculating the number of buf pages, each buf page need a BA */
+ if (mtr->umem)
+ ba_cnt = ib_umem_num_dma_blocks(mtr->umem, buf_pg_sz);
+ else
+ ba_cnt = DIV_ROUND_UP(buf_attr->region[0].size, buf_pg_sz);
+
+ for (j = 0; j <= HNS_ROCE_MAX_HOP_NUM; j++) {
+ if (ba_cnt <= unit) {
+ hop_num = j;
+ break;
+ }
+ /* Number of BAs can be represented at per hop */
+ unit *= ba_pg_sz / BA_BYTE_LEN;
+ }
+
+ if (hop_num < 0) {
+ ibdev_err(ibdev,
+ "failed to calculate a valid hopnum.\n");
+ return -EINVAL;
+ }
- return total;
+ buf_attr->region[0].hopnum = hop_num;
+
+ return 0;
+}
+
+static bool is_buf_attr_valid(struct hns_roce_dev *hr_dev,
+ struct hns_roce_buf_attr *attr)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+
+ if (attr->region_count > ARRAY_SIZE(attr->region) ||
+ attr->region_count < 1 || attr->page_shift < HNS_HW_PAGE_SHIFT) {
+ ibdev_err(ibdev,
+ "invalid buf attr, region count %d, page shift %u.\n",
+ attr->region_count, attr->page_shift);
+ return false;
+ }
+
+ return true;
}
static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
- struct hns_roce_buf_attr *attr,
- struct hns_roce_hem_cfg *cfg,
- unsigned int *buf_page_shift, u64 unalinged_size)
+ struct hns_roce_mtr *mtr,
+ struct hns_roce_buf_attr *attr)
{
+ struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
struct hns_roce_buf_region *r;
- u64 first_region_padding;
- int page_cnt, region_cnt;
- unsigned int page_shift;
+ size_t buf_pg_sz;
size_t buf_size;
+ int page_cnt, i;
+ u64 pgoff = 0;
+
+ if (!is_buf_attr_valid(hr_dev, attr))
+ return -EINVAL;
/* If mtt is disabled, all pages must be within a continuous range */
cfg->is_direct = !mtr_has_mtt(attr);
+ cfg->region_count = attr->region_count;
buf_size = mtr_bufs_size(attr);
- if (cfg->is_direct) {
- /* When HEM buffer uses 0-level addressing, the page size is
- * equal to the whole buffer size, and we split the buffer into
- * small pages which is used to check whether the adjacent
- * units are in the continuous space and its size is fixed to
- * 4K based on hns ROCEE's requirement.
- */
- page_shift = HNS_HW_PAGE_SHIFT;
-
- /* The ROCEE requires the page size to be 4K * 2 ^ N. */
+ if (need_split_huge_page(mtr)) {
+ buf_pg_sz = HNS_HW_PAGE_SIZE;
cfg->buf_pg_count = 1;
+ /* The ROCEE requires the page size to be 4K * 2 ^ N. */
cfg->buf_pg_shift = HNS_HW_PAGE_SHIFT +
order_base_2(DIV_ROUND_UP(buf_size, HNS_HW_PAGE_SIZE));
- first_region_padding = 0;
} else {
- page_shift = attr->page_shift;
- cfg->buf_pg_count = DIV_ROUND_UP(buf_size + unalinged_size,
- 1 << page_shift);
- cfg->buf_pg_shift = page_shift;
- first_region_padding = unalinged_size;
+ buf_pg_sz = 1 << attr->page_shift;
+ cfg->buf_pg_count = mtr->umem ?
+ ib_umem_num_dma_blocks(mtr->umem, buf_pg_sz) :
+ DIV_ROUND_UP(buf_size, buf_pg_sz);
+ cfg->buf_pg_shift = attr->page_shift;
+ pgoff = mtr->umem ? mtr->umem->address & ~PAGE_MASK : 0;
}
/* Convert buffer size to page index and page count for each region and
* the buffer's offset needs to be appended to the first region.
*/
- for (page_cnt = 0, region_cnt = 0; region_cnt < attr->region_count &&
- region_cnt < ARRAY_SIZE(cfg->region); region_cnt++) {
- r = &cfg->region[region_cnt];
+ for (page_cnt = 0, i = 0; i < attr->region_count; i++) {
+ r = &cfg->region[i];
r->offset = page_cnt;
- buf_size = hr_hw_page_align(attr->region[region_cnt].size +
- first_region_padding);
- r->count = DIV_ROUND_UP(buf_size, 1 << page_shift);
- first_region_padding = 0;
+ buf_size = hr_hw_page_align(attr->region[i].size + pgoff);
+ if (attr->type == MTR_PBL && mtr->umem)
+ r->count = ib_umem_num_dma_blocks(mtr->umem, buf_pg_sz);
+ else
+ r->count = DIV_ROUND_UP(buf_size, buf_pg_sz);
+
+ pgoff = 0;
page_cnt += r->count;
- r->hopnum = to_hr_hem_hopnum(attr->region[region_cnt].hopnum,
- r->count);
+ r->hopnum = to_hr_hem_hopnum(attr->region[i].hopnum, r->count);
}
- cfg->region_count = region_cnt;
- *buf_page_shift = page_shift;
-
- return page_cnt;
+ return 0;
}
static u64 cal_pages_per_l1ba(unsigned int ba_per_bt, unsigned int hopnum)
@@ -1007,50 +1144,58 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
unsigned long user_addr)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
- unsigned int buf_page_shift = 0;
- int buf_page_cnt;
int ret;
- buf_page_cnt = mtr_init_buf_cfg(hr_dev, buf_attr, &mtr->hem_cfg,
- &buf_page_shift,
- udata ? user_addr & ~PAGE_MASK : 0);
- if (buf_page_cnt < 1 || buf_page_shift < HNS_HW_PAGE_SHIFT) {
- ibdev_err(ibdev, "failed to init mtr cfg, count %d shift %u.\n",
- buf_page_cnt, buf_page_shift);
- return -EINVAL;
- }
-
- ret = mtr_alloc_mtt(hr_dev, mtr, ba_page_shift);
- if (ret) {
- ibdev_err(ibdev, "failed to alloc mtr mtt, ret = %d.\n", ret);
- return ret;
- }
-
/* The caller has its own buffer list and invokes the hns_roce_mtr_map()
* to finish the MTT configuration.
*/
if (buf_attr->mtt_only) {
mtr->umem = NULL;
mtr->kmem = NULL;
- return 0;
+ } else {
+ ret = mtr_alloc_bufs(hr_dev, mtr, buf_attr, udata, user_addr);
+ if (ret) {
+ ibdev_err(ibdev,
+ "failed to alloc mtr bufs, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = get_best_page_shift(hr_dev, mtr, buf_attr);
+ if (ret)
+ goto err_init_buf;
+
+ ret = get_best_hop_num(hr_dev, mtr, buf_attr, ba_page_shift);
+ if (ret)
+ goto err_init_buf;
}
- ret = mtr_alloc_bufs(hr_dev, mtr, buf_attr, udata, user_addr);
+ ret = mtr_init_buf_cfg(hr_dev, mtr, buf_attr);
+ if (ret)
+ goto err_init_buf;
+
+ ret = mtr_alloc_mtt(hr_dev, mtr, ba_page_shift);
if (ret) {
- ibdev_err(ibdev, "failed to alloc mtr bufs, ret = %d.\n", ret);
- goto err_alloc_mtt;
+ ibdev_err(ibdev, "failed to alloc mtr mtt, ret = %d.\n", ret);
+ goto err_init_buf;
}
+ if (buf_attr->mtt_only)
+ return 0;
+
/* Write buffer's dma address to MTT */
- ret = mtr_map_bufs(hr_dev, mtr, buf_page_cnt, buf_page_shift);
- if (ret)
+ ret = mtr_map_bufs(hr_dev, mtr);
+ if (ret) {
ibdev_err(ibdev, "failed to map mtr bufs, ret = %d.\n", ret);
- else
- return 0;
+ goto err_alloc_mtt;
+ }
+
+ return 0;
- mtr_free_bufs(hr_dev, mtr);
err_alloc_mtt:
mtr_free_mtt(hr_dev, mtr);
+err_init_buf:
+ mtr_free_bufs(hr_dev, mtr);
+
return ret;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 31b147210688a..f35a66325d9a6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -1004,6 +1004,60 @@ static void free_kernel_wrid(struct hns_roce_qp *hr_qp)
kfree(hr_qp->sq.wrid);
}
+static void default_congest_type(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp)
+{
+ if (hr_qp->ibqp.qp_type == IB_QPT_UD ||
+ hr_qp->ibqp.qp_type == IB_QPT_GSI)
+ hr_qp->cong_type = CONG_TYPE_DCQCN;
+ else
+ hr_qp->cong_type = hr_dev->caps.default_cong_type;
+}
+
+static int set_congest_type(struct hns_roce_qp *hr_qp,
+ struct hns_roce_ib_create_qp *ucmd)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
+
+ switch (ucmd->cong_type_flags) {
+ case HNS_ROCE_CREATE_QP_FLAGS_DCQCN:
+ hr_qp->cong_type = CONG_TYPE_DCQCN;
+ break;
+ case HNS_ROCE_CREATE_QP_FLAGS_LDCP:
+ hr_qp->cong_type = CONG_TYPE_LDCP;
+ break;
+ case HNS_ROCE_CREATE_QP_FLAGS_HC3:
+ hr_qp->cong_type = CONG_TYPE_HC3;
+ break;
+ case HNS_ROCE_CREATE_QP_FLAGS_DIP:
+ hr_qp->cong_type = CONG_TYPE_DIP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!test_bit(hr_qp->cong_type, (unsigned long *)&hr_dev->caps.cong_cap))
+ return -EOPNOTSUPP;
+
+ if (hr_qp->ibqp.qp_type == IB_QPT_UD &&
+ hr_qp->cong_type != CONG_TYPE_DCQCN)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int set_congest_param(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct hns_roce_ib_create_qp *ucmd)
+{
+ if (ucmd->comp_mask & HNS_ROCE_CREATE_QP_MASK_CONGEST_TYPE)
+ return set_congest_type(hr_qp, ucmd);
+
+ default_congest_type(hr_dev, hr_qp);
+
+ return 0;
+}
+
static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata,
@@ -1043,6 +1097,10 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
ibdev_err(ibdev,
"failed to set user SQ size, ret = %d.\n",
ret);
+
+ ret = set_congest_param(hr_dev, hr_qp, ucmd);
+ if (ret)
+ return ret;
} else {
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
hr_qp->config = HNS_ROCE_EXSGE_FLAGS;
@@ -1051,6 +1109,8 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
ibdev_err(ibdev,
"failed to set kernel SQ size, ret = %d.\n",
ret);
+
+ default_congest_type(hr_dev, hr_qp);
}
return ret;
diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c
index f7f3c4cc7426d..356d988169497 100644
--- a/drivers/infiniband/hw/hns/hns_roce_restrack.c
+++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c
@@ -97,16 +97,33 @@ int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_qp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
- struct hns_roce_v2_qp_context context;
+ struct hns_roce_full_qp_ctx {
+ struct hns_roce_v2_qp_context qpc;
+ struct hns_roce_v2_scc_context sccc;
+ } context = {};
int ret;
if (!hr_dev->hw->query_qpc)
return -EINVAL;
- ret = hr_dev->hw->query_qpc(hr_dev, hr_qp->qpn, &context);
+ ret = hr_dev->hw->query_qpc(hr_dev, hr_qp->qpn, &context.qpc);
if (ret)
- return -EINVAL;
+ return ret;
+
+ /* If SCC is disabled or the query fails, the queried SCCC will
+ * be all 0.
+ */
+ if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) ||
+ !hr_dev->hw->query_sccc)
+ goto out;
+
+ ret = hr_dev->hw->query_sccc(hr_dev, hr_qp->qpn, &context.sccc);
+ if (ret)
+ ibdev_warn_ratelimited(&hr_dev->ib_dev,
+ "failed to query SCCC, ret = %d.\n",
+ ret);
+out:
ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
return ret;
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 0b046c061742b..12704efb7b19a 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -719,7 +719,6 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE);
ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
- ukinfo->qp_id = iwqp->ibqp.qp_num;
iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
@@ -944,7 +943,7 @@ static int irdma_create_qp(struct ib_qp *ibqp,
iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE;
init_info.pd = &iwpd->sc_pd;
- init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
+ init_info.qp_uk_init_info.qp_id = qp_num;
if (!rdma_protocol_roce(&iwdev->ibdev, 1))
init_info.qp_uk_init_info.first_sq_wq = 1;
iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c
index 83ebd070535ab..4a71e678d09c1 100644
--- a/drivers/infiniband/hw/mana/cq.c
+++ b/drivers/infiniband/hw/mana/cq.c
@@ -16,7 +16,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
int err;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- gc = mdev->gdma_dev->gdma_context;
+ gc = mdev_to_gc(mdev);
if (udata->inlen < sizeof(ucmd))
return -EINVAL;
@@ -48,7 +48,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
return err;
}
- err = mana_ib_gd_create_dma_region(mdev, cq->umem, &cq->gdma_region);
+ err = mana_ib_create_zero_offset_dma_region(mdev, cq->umem, &cq->gdma_region);
if (err) {
ibdev_dbg(ibdev,
"Failed to create dma region for create cq, %d\n",
@@ -57,7 +57,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
}
ibdev_dbg(ibdev,
- "mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
+ "create_dma_region ret %d gdma_region 0x%llx\n",
err, cq->gdma_region);
/*
@@ -81,7 +81,7 @@ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
int err;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- gc = mdev->gdma_dev->gdma_context;
+ gc = mdev_to_gc(mdev);
err = mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region);
if (err) {
@@ -100,10 +100,29 @@ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
return 0;
}
-void mana_ib_cq_handler(void *ctx, struct gdma_queue *gdma_cq)
+static void mana_ib_cq_handler(void *ctx, struct gdma_queue *gdma_cq)
{
struct mana_ib_cq *cq = ctx;
if (cq->ibcq.comp_handler)
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}
+
+int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
+{
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct gdma_queue *gdma_cq;
+
+ /* Create CQ table entry */
+ WARN_ON(gc->cq_table[cq->id]);
+ gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
+ if (!gdma_cq)
+ return -ENOMEM;
+
+ gdma_cq->cq.context = cq;
+ gdma_cq->type = GDMA_CQ;
+ gdma_cq->cq.callback = mana_ib_cq_handler;
+ gdma_cq->id = cq->id;
+ gc->cq_table[cq->id] = gdma_cq;
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index faca092456fa3..71e33feee61bb 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -8,13 +8,10 @@
void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
u32 port)
{
- struct gdma_dev *gd = &dev->gdma_dev->gdma_context->mana;
struct mana_port_context *mpc;
struct net_device *ndev;
- struct mana_context *mc;
- mc = gd->driver_data;
- ndev = mc->ports[port];
+ ndev = mana_ib_get_netdev(&dev->ib_dev, port);
mpc = netdev_priv(ndev);
mutex_lock(&pd->vport_mutex);
@@ -31,14 +28,11 @@ void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port, struct mana_ib_pd *pd,
u32 doorbell_id)
{
- struct gdma_dev *mdev = &dev->gdma_dev->gdma_context->mana;
struct mana_port_context *mpc;
- struct mana_context *mc;
struct net_device *ndev;
int err;
- mc = mdev->driver_data;
- ndev = mc->ports[port];
+ ndev = mana_ib_get_netdev(&dev->ib_dev, port);
mpc = netdev_priv(ndev);
mutex_lock(&pd->vport_mutex);
@@ -79,17 +73,17 @@ int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
struct gdma_create_pd_req req = {};
enum gdma_pd_flags flags = 0;
struct mana_ib_dev *dev;
- struct gdma_dev *mdev;
+ struct gdma_context *gc;
int err;
dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- mdev = dev->gdma_dev;
+ gc = mdev_to_gc(dev);
mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_PD, sizeof(req),
sizeof(resp));
req.flags = flags;
- err = mana_gd_send_request(mdev->gdma_context, sizeof(req), &req,
+ err = mana_gd_send_request(gc, sizeof(req), &req,
sizeof(resp), &resp);
if (err || resp.hdr.status) {
@@ -119,17 +113,17 @@ int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
struct gdma_destory_pd_resp resp = {};
struct gdma_destroy_pd_req req = {};
struct mana_ib_dev *dev;
- struct gdma_dev *mdev;
+ struct gdma_context *gc;
int err;
dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- mdev = dev->gdma_dev;
+ gc = mdev_to_gc(dev);
mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_PD, sizeof(req),
sizeof(resp));
req.pd_handle = pd->pd_handle;
- err = mana_gd_send_request(mdev->gdma_context, sizeof(req), &req,
+ err = mana_gd_send_request(gc, sizeof(req), &req,
sizeof(resp), &resp);
if (err || resp.hdr.status) {
@@ -206,13 +200,11 @@ int mana_ib_alloc_ucontext(struct ib_ucontext *ibcontext,
struct ib_device *ibdev = ibcontext->device;
struct mana_ib_dev *mdev;
struct gdma_context *gc;
- struct gdma_dev *dev;
int doorbell_page;
int ret;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- dev = mdev->gdma_dev;
- gc = dev->gdma_context;
+ gc = mdev_to_gc(mdev);
/* Allocate a doorbell page index */
ret = mana_gd_allocate_doorbell_page(gc, &doorbell_page);
@@ -238,7 +230,7 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
int ret;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- gc = mdev->gdma_dev->gdma_context;
+ gc = mdev_to_gc(mdev);
ret = mana_gd_destroy_doorbell_page(gc, mana_ucontext->doorbell);
if (ret)
@@ -309,8 +301,8 @@ mana_ib_gd_add_dma_region(struct mana_ib_dev *dev, struct gdma_context *gc,
return 0;
}
-int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
- mana_handle_t *gdma_region)
+static int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+ mana_handle_t *gdma_region, unsigned long page_sz)
{
struct gdma_dma_region_add_pages_req *add_req = NULL;
size_t num_pages_processed = 0, num_pages_to_handle;
@@ -322,23 +314,14 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
size_t max_pgs_create_cmd;
struct gdma_context *gc;
size_t num_pages_total;
- struct gdma_dev *mdev;
- unsigned long page_sz;
unsigned int tail = 0;
u64 *page_addr_list;
void *request_buf;
int err;
- mdev = dev->gdma_dev;
- gc = mdev->gdma_context;
+ gc = mdev_to_gc(dev);
hwc = gc->hwc.driver_data;
- /* Hardware requires dma region to align to chosen page size */
- page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, 0);
- if (!page_sz) {
- ibdev_dbg(&dev->ib_dev, "failed to find page size.\n");
- return -ENOMEM;
- }
num_pages_total = ib_umem_num_dma_blocks(umem, page_sz);
max_pgs_create_cmd =
@@ -358,7 +341,7 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
sizeof(struct gdma_create_dma_region_resp));
create_req->length = umem->length;
- create_req->offset_in_page = umem->address & (page_sz - 1);
+ create_req->offset_in_page = ib_umem_dma_offset(umem, page_sz);
create_req->gdma_page_type = order_base_2(page_sz) - PAGE_SHIFT;
create_req->page_count = num_pages_total;
@@ -424,12 +407,39 @@ out:
return err;
}
+int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+ mana_handle_t *gdma_region, u64 virt)
+{
+ unsigned long page_sz;
+
+ page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, virt);
+ if (!page_sz) {
+ ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
+ return -EINVAL;
+ }
+
+ return mana_ib_gd_create_dma_region(dev, umem, gdma_region, page_sz);
+}
+
+int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+ mana_handle_t *gdma_region)
+{
+ unsigned long page_sz;
+
+ /* Hardware requires dma region to align to chosen page size */
+ page_sz = ib_umem_find_best_pgoff(umem, PAGE_SZ_BM, 0);
+ if (!page_sz) {
+ ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
+ return -EINVAL;
+ }
+
+ return mana_ib_gd_create_dma_region(dev, umem, gdma_region, page_sz);
+}
+
int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev, u64 gdma_region)
{
- struct gdma_dev *mdev = dev->gdma_dev;
- struct gdma_context *gc;
+ struct gdma_context *gc = mdev_to_gc(dev);
- gc = mdev->gdma_context;
ibdev_dbg(&dev->ib_dev, "destroy dma region 0x%llx\n", gdma_region);
return mana_gd_destroy_dma_region(gc, gdma_region);
@@ -447,7 +457,7 @@ int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
int ret;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- gc = mdev->gdma_dev->gdma_context;
+ gc = mdev_to_gc(mdev);
if (vma->vm_pgoff != 0) {
ibdev_dbg(ibdev, "Unexpected vm_pgoff %lu\n", vma->vm_pgoff);
@@ -531,7 +541,7 @@ int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev)
req.hdr.resp.msg_version = GDMA_MESSAGE_V3;
req.hdr.dev_id = dev->gdma_dev->dev_id;
- err = mana_gd_send_request(dev->gdma_dev->gdma_context, sizeof(req),
+ err = mana_gd_send_request(mdev_to_gc(dev), sizeof(req),
&req, sizeof(resp), &resp);
if (err) {
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index 6bdc0f5498d5b..f83390eebb7d7 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -142,8 +142,29 @@ struct mana_ib_query_adapter_caps_resp {
u32 max_inline_data_size;
}; /* HW Data */
-int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
- mana_handle_t *gdma_region);
+static inline struct gdma_context *mdev_to_gc(struct mana_ib_dev *mdev)
+{
+ return mdev->gdma_dev->gdma_context;
+}
+
+static inline struct net_device *mana_ib_get_netdev(struct ib_device *ibdev, u32 port)
+{
+ struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct mana_context *mc = gc->mana.driver_data;
+
+ if (port < 1 || port > mc->num_ports)
+ return NULL;
+ return mc->ports[port - 1];
+}
+
+int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq);
+
+int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+ mana_handle_t *gdma_region);
+
+int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+ mana_handle_t *gdma_region, u64 virt);
int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
mana_handle_t gdma_region);
@@ -210,6 +231,4 @@ int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext);
int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *mdev);
-
-void mana_ib_cq_handler(void *ctx, struct gdma_queue *gdma_cq);
#endif
diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
index 351207c60eb65..b70b13484f097 100644
--- a/drivers/infiniband/hw/mana/mr.c
+++ b/drivers/infiniband/hw/mana/mr.c
@@ -30,12 +30,9 @@ static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
{
struct gdma_create_mr_response resp = {};
struct gdma_create_mr_request req = {};
- struct gdma_dev *mdev = dev->gdma_dev;
- struct gdma_context *gc;
+ struct gdma_context *gc = mdev_to_gc(dev);
int err;
- gc = mdev->gdma_context;
-
mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_MR, sizeof(req),
sizeof(resp));
req.pd_handle = mr_params->pd_handle;
@@ -77,12 +74,9 @@ static int mana_ib_gd_destroy_mr(struct mana_ib_dev *dev, u64 mr_handle)
{
struct gdma_destroy_mr_response resp = {};
struct gdma_destroy_mr_request req = {};
- struct gdma_dev *mdev = dev->gdma_dev;
- struct gdma_context *gc;
+ struct gdma_context *gc = mdev_to_gc(dev);
int err;
- gc = mdev->gdma_context;
-
mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_MR, sizeof(req),
sizeof(resp));
@@ -133,7 +127,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
goto err_free;
}
- err = mana_ib_gd_create_dma_region(dev, mr->umem, &dma_region_handle);
+ err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova);
if (err) {
ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n",
err);
@@ -141,7 +135,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
}
ibdev_dbg(ibdev,
- "mana_ib_gd_create_dma_region ret %d gdma_region %llx\n", err,
+ "create_dma_region ret %d gdma_region %llx\n", err,
dma_region_handle);
mr_params.pd_handle = pd->pd_handle;
@@ -164,8 +158,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
return &mr->ibmr;
err_dma_region:
- mana_gd_destroy_dma_region(dev->gdma_dev->gdma_context,
- dma_region_handle);
+ mana_gd_destroy_dma_region(mdev_to_gc(dev), dma_region_handle);
err_umem:
ib_umem_release(mr->umem);
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index 21ac9fcadf3f2..6e7627745c957 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -17,12 +17,10 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
struct mana_cfg_rx_steer_resp resp = {};
mana_handle_t *req_indir_tab;
struct gdma_context *gc;
- struct gdma_dev *mdev;
u32 req_buf_size;
int i, err;
- gc = dev->gdma_dev->gdma_context;
- mdev = &gc->mana;
+ gc = mdev_to_gc(dev);
req_buf_size =
sizeof(*req) + sizeof(mana_handle_t) * MANA_INDIRECT_TABLE_SIZE;
@@ -39,7 +37,7 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
req->rx_enable = 1;
req->update_default_rxobj = 1;
req->default_rxobj = default_rxobj;
- req->hdr.dev_id = mdev->dev_id;
+ req->hdr.dev_id = gc->mana.dev_id;
/* If there are more than 1 entries in indirection table, enable RSS */
if (log_ind_tbl_size)
@@ -99,20 +97,17 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
struct mana_ib_dev *mdev =
container_of(pd->device, struct mana_ib_dev, ib_dev);
+ struct gdma_context *gc = mdev_to_gc(mdev);
struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl;
struct mana_ib_create_qp_rss_resp resp = {};
struct mana_ib_create_qp_rss ucmd = {};
struct gdma_queue **gdma_cq_allocated;
mana_handle_t *mana_ind_table;
struct mana_port_context *mpc;
- struct gdma_queue *gdma_cq;
unsigned int ind_tbl_size;
- struct mana_context *mc;
struct net_device *ndev;
- struct gdma_context *gc;
struct mana_ib_cq *cq;
struct mana_ib_wq *wq;
- struct gdma_dev *gd;
struct mana_eq *eq;
struct ib_cq *ibcq;
struct ib_wq *ibwq;
@@ -120,10 +115,6 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
u32 port;
int ret;
- gc = mdev->gdma_dev->gdma_context;
- gd = &gc->mana;
- mc = gd->driver_data;
-
if (!udata || udata->inlen < sizeof(ucmd))
return -EINVAL;
@@ -166,12 +157,12 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
/* IB ports start with 1, MANA start with 0 */
port = ucmd.port;
- if (port < 1 || port > mc->num_ports) {
+ ndev = mana_ib_get_netdev(pd->device, port);
+ if (!ndev) {
ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
port);
return -EINVAL;
}
- ndev = mc->ports[port - 1];
mpc = netdev_priv(ndev);
ibdev_dbg(&mdev->ib_dev, "rx_hash_function %d port %d\n",
@@ -209,7 +200,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
cq_spec.gdma_region = cq->gdma_region;
cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE;
cq_spec.modr_ctx_id = 0;
- eq = &mc->eqs[cq->comp_vector % gc->max_num_queues];
+ eq = &mpc->ac->eqs[cq->comp_vector % gc->max_num_queues];
cq_spec.attached_eq = eq->eq->id;
ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ,
@@ -237,19 +228,11 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
mana_ind_table[i] = wq->rx_object;
/* Create CQ table entry */
- WARN_ON(gc->cq_table[cq->id]);
- gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
- if (!gdma_cq) {
- ret = -ENOMEM;
+ ret = mana_ib_install_cq_cb(mdev, cq);
+ if (ret)
goto fail;
- }
- gdma_cq_allocated[i] = gdma_cq;
- gdma_cq->cq.context = cq;
- gdma_cq->type = GDMA_CQ;
- gdma_cq->cq.callback = mana_ib_cq_handler;
- gdma_cq->id = cq->id;
- gc->cq_table[cq->id] = gdma_cq;
+ gdma_cq_allocated[i] = gc->cq_table[cq->id];
}
resp.num_entries = i;
@@ -306,14 +289,13 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
struct mana_ib_ucontext *mana_ucontext =
rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
ibucontext);
- struct gdma_dev *gd = &mdev->gdma_dev->gdma_context->mana;
+ struct gdma_context *gc = mdev_to_gc(mdev);
struct mana_ib_create_qp_resp resp = {};
struct mana_ib_create_qp ucmd = {};
struct gdma_queue *gdma_cq = NULL;
struct mana_obj_spec wq_spec = {};
struct mana_obj_spec cq_spec = {};
struct mana_port_context *mpc;
- struct mana_context *mc;
struct net_device *ndev;
struct ib_umem *umem;
struct mana_eq *eq;
@@ -321,8 +303,6 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
u32 port;
int err;
- mc = gd->driver_data;
-
if (!mana_ucontext || udata->inlen < sizeof(ucmd))
return -EINVAL;
@@ -333,11 +313,6 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
return err;
}
- /* IB ports start with 1, MANA Ethernet ports start with 0 */
- port = ucmd.port;
- if (port < 1 || port > mc->num_ports)
- return -EINVAL;
-
if (attr->cap.max_send_wr > mdev->adapter_caps.max_qp_wr) {
ibdev_dbg(&mdev->ib_dev,
"Requested max_send_wr %d exceeding limit\n",
@@ -352,11 +327,17 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
return -EINVAL;
}
- ndev = mc->ports[port - 1];
+ port = ucmd.port;
+ ndev = mana_ib_get_netdev(ibpd->device, port);
+ if (!ndev) {
+ ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
+ port);
+ return -EINVAL;
+ }
mpc = netdev_priv(ndev);
ibdev_dbg(&mdev->ib_dev, "port %u ndev %p mpc %p\n", port, ndev, mpc);
- err = mana_ib_cfg_vport(mdev, port - 1, pd, mana_ucontext->doorbell);
+ err = mana_ib_cfg_vport(mdev, port, pd, mana_ucontext->doorbell);
if (err)
return -ENODEV;
@@ -376,8 +357,8 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
}
qp->sq_umem = umem;
- err = mana_ib_gd_create_dma_region(mdev, qp->sq_umem,
- &qp->sq_gdma_region);
+ err = mana_ib_create_zero_offset_dma_region(mdev, qp->sq_umem,
+ &qp->sq_gdma_region);
if (err) {
ibdev_dbg(&mdev->ib_dev,
"Failed to create dma region for create qp-raw, %d\n",
@@ -386,7 +367,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
}
ibdev_dbg(&mdev->ib_dev,
- "mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
+ "create_dma_region ret %d gdma_region 0x%llx\n",
err, qp->sq_gdma_region);
/* Create a WQ on the same port handle used by the Ethernet */
@@ -396,8 +377,8 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
cq_spec.gdma_region = send_cq->gdma_region;
cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE;
cq_spec.modr_ctx_id = 0;
- eq_vec = send_cq->comp_vector % gd->gdma_context->max_num_queues;
- eq = &mc->eqs[eq_vec];
+ eq_vec = send_cq->comp_vector % gc->max_num_queues;
+ eq = &mpc->ac->eqs[eq_vec];
cq_spec.attached_eq = eq->eq->id;
err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec,
@@ -417,18 +398,9 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
send_cq->id = cq_spec.queue_index;
/* Create CQ table entry */
- WARN_ON(gd->gdma_context->cq_table[send_cq->id]);
- gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
- if (!gdma_cq) {
- err = -ENOMEM;
+ err = mana_ib_install_cq_cb(mdev, send_cq);
+ if (err)
goto err_destroy_wq_obj;
- }
-
- gdma_cq->cq.context = send_cq;
- gdma_cq->type = GDMA_CQ;
- gdma_cq->cq.callback = mana_ib_cq_handler;
- gdma_cq->id = send_cq->id;
- gd->gdma_context->cq_table[send_cq->id] = gdma_cq;
ibdev_dbg(&mdev->ib_dev,
"ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err,
@@ -450,7 +422,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
err_release_gdma_cq:
kfree(gdma_cq);
- gd->gdma_context->cq_table[send_cq->id] = NULL;
+ gc->cq_table[send_cq->id] = NULL;
err_destroy_wq_obj:
mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
@@ -462,7 +434,7 @@ err_release_umem:
ib_umem_release(umem);
err_free_vport:
- mana_ib_uncfg_vport(mdev, pd, port - 1);
+ mana_ib_uncfg_vport(mdev, pd, port);
return err;
}
@@ -500,16 +472,13 @@ static int mana_ib_destroy_qp_rss(struct mana_ib_qp *qp,
{
struct mana_ib_dev *mdev =
container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
- struct gdma_dev *gd = &mdev->gdma_dev->gdma_context->mana;
struct mana_port_context *mpc;
- struct mana_context *mc;
struct net_device *ndev;
struct mana_ib_wq *wq;
struct ib_wq *ibwq;
int i;
- mc = gd->driver_data;
- ndev = mc->ports[qp->port - 1];
+ ndev = mana_ib_get_netdev(qp->ibqp.device, qp->port);
mpc = netdev_priv(ndev);
for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
@@ -527,15 +496,12 @@ static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
{
struct mana_ib_dev *mdev =
container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
- struct gdma_dev *gd = &mdev->gdma_dev->gdma_context->mana;
struct ib_pd *ibpd = qp->ibqp.pd;
struct mana_port_context *mpc;
- struct mana_context *mc;
struct net_device *ndev;
struct mana_ib_pd *pd;
- mc = gd->driver_data;
- ndev = mc->ports[qp->port - 1];
+ ndev = mana_ib_get_netdev(qp->ibqp.device, qp->port);
mpc = netdev_priv(ndev);
pd = container_of(ibpd, struct mana_ib_pd, ibpd);
@@ -546,7 +512,7 @@ static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
ib_umem_release(qp->sq_umem);
}
- mana_ib_uncfg_vport(mdev, pd, qp->port - 1);
+ mana_ib_uncfg_vport(mdev, pd, qp->port);
return 0;
}
diff --git a/drivers/infiniband/hw/mana/wq.c b/drivers/infiniband/hw/mana/wq.c
index 372d361510e0c..7c9c699625734 100644
--- a/drivers/infiniband/hw/mana/wq.c
+++ b/drivers/infiniband/hw/mana/wq.c
@@ -46,7 +46,7 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
wq->wq_buf_size = ucmd.wq_buf_size;
wq->rx_object = INVALID_MANA_HANDLE;
- err = mana_ib_gd_create_dma_region(mdev, wq->umem, &wq->gdma_region);
+ err = mana_ib_create_zero_offset_dma_region(mdev, wq->umem, &wq->gdma_region);
if (err) {
ibdev_dbg(&mdev->ib_dev,
"Failed to create dma region for create wq, %d\n",
@@ -55,7 +55,7 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
}
ibdev_dbg(&mdev->ib_dev,
- "mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
+ "create_dma_region ret %d gdma_region 0x%llx\n",
err, wq->gdma_region);
/* WQ ID is returned at wq_create time, doesn't know the value yet */
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 0c3c4e64812c5..3e43687a7f6f7 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -188,7 +188,8 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
mdev = dev->mdev;
mdev_port_num = 1;
}
- if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
+ if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1 &&
+ !mlx5_core_mp_enabled(mdev)) {
/* set local port to one for Function-Per-Port HCA. */
mdev = dev->mdev;
mdev_port_num = 1;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index bbe79b86c7178..a8de35c07c9ef 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -1377,7 +1377,6 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props);
void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas,
u64 access_flags);
-void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev);
void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev);
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 54c723a6eddac..255677bc12b2a 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -33,6 +33,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
if (rxe->tfm)
crypto_free_shash(rxe->tfm);
+
+ mutex_destroy(&rxe->usdev_lock);
}
/* initialize rxe device parameters */
@@ -160,8 +162,6 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
port->attr.active_mtu = mtu;
port->mtu_cap = ib_mtu_enum_to_int(mtu);
-
- rxe_info_dev(rxe, "Set mtu to %d", port->mtu_cap);
}
/* called by ifc layer to create new rxe device.
@@ -181,7 +181,7 @@ static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
int err = 0;
if (is_vlan_dev(ndev)) {
- rxe_err("rxe creation allowed on top of a real device only");
+ rxe_err("rxe creation allowed on top of a real device only\n");
err = -EPERM;
goto err;
}
@@ -189,7 +189,7 @@ static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
rxe = rxe_get_dev_from_net(ndev);
if (rxe) {
ib_device_put(&rxe->ib_dev);
- rxe_err_dev(rxe, "already configured on %s", ndev->name);
+ rxe_err_dev(rxe, "already configured on %s\n", ndev->name);
err = -EEXIST;
goto err;
}
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index d33dd6cf83d37..d8fb2c7af30a7 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -38,7 +38,7 @@
#define RXE_ROCE_V2_SPORT (0xc000)
-#define rxe_dbg(fmt, ...) pr_debug("%s: " fmt "\n", __func__, ##__VA_ARGS__)
+#define rxe_dbg(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__)
#define rxe_dbg_dev(rxe, fmt, ...) ibdev_dbg(&(rxe)->ib_dev, \
"%s: " fmt, __func__, ##__VA_ARGS__)
#define rxe_dbg_uc(uc, fmt, ...) ibdev_dbg((uc)->ibuc.device, \
@@ -58,7 +58,7 @@
#define rxe_dbg_mw(mw, fmt, ...) ibdev_dbg((mw)->ibmw.device, \
"mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
-#define rxe_err(fmt, ...) pr_err_ratelimited("%s: " fmt "\n", __func__, \
+#define rxe_err(fmt, ...) pr_err_ratelimited("%s: " fmt, __func__, \
##__VA_ARGS__)
#define rxe_err_dev(rxe, fmt, ...) ibdev_err_ratelimited(&(rxe)->ib_dev, \
"%s: " fmt, __func__, ##__VA_ARGS__)
@@ -79,7 +79,7 @@
#define rxe_err_mw(mw, fmt, ...) ibdev_err_ratelimited((mw)->ibmw.device, \
"mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
-#define rxe_info(fmt, ...) pr_info_ratelimited("%s: " fmt "\n", __func__, \
+#define rxe_info(fmt, ...) pr_info_ratelimited("%s: " fmt, __func__, \
##__VA_ARGS__)
#define rxe_info_dev(rxe, fmt, ...) ibdev_info_ratelimited(&(rxe)->ib_dev, \
"%s: " fmt, __func__, ##__VA_ARGS__)
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index d0bdc2d8adc82..b78b8c0856abd 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -433,7 +433,7 @@ static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
}
} else {
if (wqe->status != IB_WC_WR_FLUSH_ERR)
- rxe_err_qp(qp, "non-flush error status = %d",
+ rxe_err_qp(qp, "non-flush error status = %d\n",
wqe->status);
}
}
@@ -582,7 +582,7 @@ static int flush_send_wqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
err = rxe_cq_post(qp->scq, &cqe, 0);
if (err)
- rxe_dbg_cq(qp->scq, "post cq failed, err = %d", err);
+ rxe_dbg_cq(qp->scq, "post cq failed, err = %d\n", err);
return err;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index d5486cbb3f100..fec87c9030abd 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -27,7 +27,7 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
if (cq) {
count = queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT);
if (cqe < count) {
- rxe_dbg_cq(cq, "cqe(%d) < current # elements in queue (%d)",
+ rxe_dbg_cq(cq, "cqe(%d) < current # elements in queue (%d)\n",
cqe, count);
goto err1;
}
@@ -96,7 +96,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT);
if (unlikely(full)) {
- rxe_err_cq(cq, "queue full");
+ rxe_err_cq(cq, "queue full\n");
spin_unlock_irqrestore(&cq->cq_lock, flags);
if (cq->ibcq.event_handler) {
ev.device = cq->ibcq.device;
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 4d2a8ef52c850..746110898a0e6 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -59,7 +59,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
/* rxe_mr.c */
u8 rxe_get_next_key(u32 last_key);
void rxe_mr_init_dma(int access, struct rxe_mr *mr);
-int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
+int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
int access, struct rxe_mr *mr);
int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr);
int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length);
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index f54042e9aeb26..da3dee520876a 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -34,7 +34,7 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
case IB_MR_TYPE_MEM_REG:
if (iova < mr->ibmr.iova ||
iova + length > mr->ibmr.iova + mr->ibmr.length) {
- rxe_dbg_mr(mr, "iova/length out of range");
+ rxe_dbg_mr(mr, "iova/length out of range\n");
return -EINVAL;
}
return 0;
@@ -126,7 +126,7 @@ static int rxe_mr_fill_pages_from_sgt(struct rxe_mr *mr, struct sg_table *sgt)
return xas_error(&xas);
}
-int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
+int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
int access, struct rxe_mr *mr)
{
struct ib_umem *umem;
@@ -319,7 +319,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
err = mr_check_range(mr, iova, length);
if (unlikely(err)) {
- rxe_dbg_mr(mr, "iova out of range");
+ rxe_dbg_mr(mr, "iova out of range\n");
return err;
}
@@ -477,7 +477,7 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
u64 *va;
if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
- rxe_dbg_mr(mr, "mr not in valid state");
+ rxe_dbg_mr(mr, "mr not in valid state\n");
return RESPST_ERR_RKEY_VIOLATION;
}
@@ -490,7 +490,7 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
err = mr_check_range(mr, iova, sizeof(value));
if (err) {
- rxe_dbg_mr(mr, "iova out of range");
+ rxe_dbg_mr(mr, "iova out of range\n");
return RESPST_ERR_RKEY_VIOLATION;
}
page_offset = rxe_mr_iova_to_page_offset(mr, iova);
@@ -501,7 +501,7 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
}
if (unlikely(page_offset & 0x7)) {
- rxe_dbg_mr(mr, "iova not aligned");
+ rxe_dbg_mr(mr, "iova not aligned\n");
return RESPST_ERR_MISALIGNED_ATOMIC;
}
@@ -534,7 +534,7 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
/* See IBA oA19-28 */
if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
- rxe_dbg_mr(mr, "mr not in valid state");
+ rxe_dbg_mr(mr, "mr not in valid state\n");
return RESPST_ERR_RKEY_VIOLATION;
}
@@ -548,7 +548,7 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
/* See IBA oA19-28 */
err = mr_check_range(mr, iova, sizeof(value));
if (unlikely(err)) {
- rxe_dbg_mr(mr, "iova out of range");
+ rxe_dbg_mr(mr, "iova out of range\n");
return RESPST_ERR_RKEY_VIOLATION;
}
page_offset = rxe_mr_iova_to_page_offset(mr, iova);
@@ -560,7 +560,7 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
/* See IBA A19.4.2 */
if (unlikely(page_offset & 0x7)) {
- rxe_dbg_mr(mr, "misaligned address");
+ rxe_dbg_mr(mr, "misaligned address\n");
return RESPST_ERR_MISALIGNED_ATOMIC;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_mw.c b/drivers/infiniband/sw/rxe/rxe_mw.c
index d9312b5c9d207..379e65bfcd49a 100644
--- a/drivers/infiniband/sw/rxe/rxe_mw.c
+++ b/drivers/infiniband/sw/rxe/rxe_mw.c
@@ -198,7 +198,7 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
}
if (access & ~RXE_ACCESS_SUPPORTED_MW) {
- rxe_err_mw(mw, "access %#x not supported", access);
+ rxe_err_mw(mw, "access %#x not supported\n", access);
ret = -EOPNOTSUPP;
goto err_drop_mr;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 28e379c108bce..e3589c02013ec 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -201,7 +201,7 @@ static int rxe_init_sq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size,
QUEUE_TYPE_FROM_CLIENT);
if (!qp->sq.queue) {
- rxe_err_qp(qp, "Unable to allocate send queue");
+ rxe_err_qp(qp, "Unable to allocate send queue\n");
err = -ENOMEM;
goto err_out;
}
@@ -211,7 +211,7 @@ static int rxe_init_sq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
qp->sq.queue->buf, qp->sq.queue->buf_size,
&qp->sq.queue->ip);
if (err) {
- rxe_err_qp(qp, "do_mmap_info failed, err = %d", err);
+ rxe_err_qp(qp, "do_mmap_info failed, err = %d\n", err);
goto err_free;
}
@@ -292,7 +292,7 @@ static int rxe_init_rq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr, wqe_size,
QUEUE_TYPE_FROM_CLIENT);
if (!qp->rq.queue) {
- rxe_err_qp(qp, "Unable to allocate recv queue");
+ rxe_err_qp(qp, "Unable to allocate recv queue\n");
err = -ENOMEM;
goto err_out;
}
@@ -302,7 +302,7 @@ static int rxe_init_rq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
qp->rq.queue->buf, qp->rq.queue->buf_size,
&qp->rq.queue->ip);
if (err) {
- rxe_err_qp(qp, "do_mmap_info failed, err = %d", err);
+ rxe_err_qp(qp, "do_mmap_info failed, err = %d\n", err);
goto err_free;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index da470a925efc7..963382f625d71 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -362,18 +362,18 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
if ((pkt->mask & RXE_START_MASK) &&
(pkt->mask & RXE_END_MASK)) {
if (unlikely(payload > mtu)) {
- rxe_dbg_qp(qp, "only packet too long");
+ rxe_dbg_qp(qp, "only packet too long\n");
return RESPST_ERR_LENGTH;
}
} else if ((pkt->mask & RXE_START_MASK) ||
(pkt->mask & RXE_MIDDLE_MASK)) {
if (unlikely(payload != mtu)) {
- rxe_dbg_qp(qp, "first or middle packet not mtu");
+ rxe_dbg_qp(qp, "first or middle packet not mtu\n");
return RESPST_ERR_LENGTH;
}
} else if (pkt->mask & RXE_END_MASK) {
if (unlikely((payload == 0) || (payload > mtu))) {
- rxe_dbg_qp(qp, "last packet zero or too long");
+ rxe_dbg_qp(qp, "last packet zero or too long\n");
return RESPST_ERR_LENGTH;
}
}
@@ -382,7 +382,7 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
/* See IBA C9-94 */
if (pkt->mask & RXE_RETH_MASK) {
if (reth_len(pkt) > (1U << 31)) {
- rxe_dbg_qp(qp, "dma length too long");
+ rxe_dbg_qp(qp, "dma length too long\n");
return RESPST_ERR_LENGTH;
}
}
@@ -1133,7 +1133,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
}
} else {
if (wc->status != IB_WC_WR_FLUSH_ERR)
- rxe_err_qp(qp, "non-flush error status = %d",
+ rxe_err_qp(qp, "non-flush error status = %d\n",
wc->status);
}
@@ -1442,7 +1442,7 @@ static int flush_recv_wqe(struct rxe_qp *qp, struct rxe_recv_wqe *wqe)
err = rxe_cq_post(qp->rcq, &cqe, 0);
if (err)
- rxe_dbg_cq(qp->rcq, "post cq failed err = %d", err);
+ rxe_dbg_cq(qp->rcq, "post cq failed err = %d\n", err);
return err;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index 1501120d4f524..80332638d9e3a 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -156,7 +156,7 @@ static void do_task(struct rxe_task *task)
default:
WARN_ON(1);
- rxe_dbg_qp(task->qp, "unexpected task state = %d",
+ rxe_dbg_qp(task->qp, "unexpected task state = %d\n",
task->state);
task->state = TASK_STATE_IDLE;
}
@@ -167,7 +167,7 @@ exit:
if (WARN_ON(task->num_done != task->num_sched))
rxe_dbg_qp(
task->qp,
- "%ld tasks scheduled, %ld tasks done",
+ "%ld tasks scheduled, %ld tasks done\n",
task->num_sched, task->num_done);
}
spin_unlock_irqrestore(&task->lock, flags);
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 48f86839d36a8..614581989b381 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -23,7 +23,7 @@ static int rxe_query_device(struct ib_device *ibdev,
int err;
if (udata->inlen || udata->outlen) {
- rxe_dbg_dev(rxe, "malformed udata");
+ rxe_dbg_dev(rxe, "malformed udata\n");
err = -EINVAL;
goto err_out;
}
@@ -33,7 +33,7 @@ static int rxe_query_device(struct ib_device *ibdev,
return 0;
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -45,7 +45,7 @@ static int rxe_query_port(struct ib_device *ibdev,
if (port_num != 1) {
err = -EINVAL;
- rxe_dbg_dev(rxe, "bad port_num = %d", port_num);
+ rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num);
goto err_out;
}
@@ -67,7 +67,7 @@ static int rxe_query_port(struct ib_device *ibdev,
return ret;
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -79,7 +79,7 @@ static int rxe_query_pkey(struct ib_device *ibdev,
if (index != 0) {
err = -EINVAL;
- rxe_dbg_dev(rxe, "bad pkey index = %d", index);
+ rxe_dbg_dev(rxe, "bad pkey index = %d\n", index);
goto err_out;
}
@@ -87,7 +87,7 @@ static int rxe_query_pkey(struct ib_device *ibdev,
return 0;
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -100,7 +100,7 @@ static int rxe_modify_device(struct ib_device *ibdev,
if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
IB_DEVICE_MODIFY_NODE_DESC)) {
err = -EOPNOTSUPP;
- rxe_dbg_dev(rxe, "unsupported mask = 0x%x", mask);
+ rxe_dbg_dev(rxe, "unsupported mask = 0x%x\n", mask);
goto err_out;
}
@@ -115,7 +115,7 @@ static int rxe_modify_device(struct ib_device *ibdev,
return 0;
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -128,14 +128,14 @@ static int rxe_modify_port(struct ib_device *ibdev, u32 port_num,
if (port_num != 1) {
err = -EINVAL;
- rxe_dbg_dev(rxe, "bad port_num = %d", port_num);
+ rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num);
goto err_out;
}
//TODO is shutdown useful
if (mask & ~(IB_PORT_RESET_QKEY_CNTR)) {
err = -EOPNOTSUPP;
- rxe_dbg_dev(rxe, "unsupported mask = 0x%x", mask);
+ rxe_dbg_dev(rxe, "unsupported mask = 0x%x\n", mask);
goto err_out;
}
@@ -149,7 +149,7 @@ static int rxe_modify_port(struct ib_device *ibdev, u32 port_num,
return 0;
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -161,14 +161,14 @@ static enum rdma_link_layer rxe_get_link_layer(struct ib_device *ibdev,
if (port_num != 1) {
err = -EINVAL;
- rxe_dbg_dev(rxe, "bad port_num = %d", port_num);
+ rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num);
goto err_out;
}
return IB_LINK_LAYER_ETHERNET;
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -181,7 +181,7 @@ static int rxe_port_immutable(struct ib_device *ibdev, u32 port_num,
if (port_num != 1) {
err = -EINVAL;
- rxe_dbg_dev(rxe, "bad port_num = %d", port_num);
+ rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num);
goto err_out;
}
@@ -197,7 +197,7 @@ static int rxe_port_immutable(struct ib_device *ibdev, u32 port_num,
return 0;
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -210,7 +210,7 @@ static int rxe_alloc_ucontext(struct ib_ucontext *ibuc, struct ib_udata *udata)
err = rxe_add_to_pool(&rxe->uc_pool, uc);
if (err)
- rxe_err_dev(rxe, "unable to create uc");
+ rxe_err_dev(rxe, "unable to create uc\n");
return err;
}
@@ -222,7 +222,7 @@ static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
err = rxe_cleanup(uc);
if (err)
- rxe_err_uc(uc, "cleanup failed, err = %d", err);
+ rxe_err_uc(uc, "cleanup failed, err = %d\n", err);
}
/* pd */
@@ -234,14 +234,14 @@ static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
err = rxe_add_to_pool(&rxe->pd_pool, pd);
if (err) {
- rxe_dbg_dev(rxe, "unable to alloc pd");
+ rxe_dbg_dev(rxe, "unable to alloc pd\n");
goto err_out;
}
return 0;
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -252,7 +252,7 @@ static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
err = rxe_cleanup(pd);
if (err)
- rxe_err_pd(pd, "cleanup failed, err = %d", err);
+ rxe_err_pd(pd, "cleanup failed, err = %d\n", err);
return 0;
}
@@ -279,7 +279,7 @@ static int rxe_create_ah(struct ib_ah *ibah,
err = rxe_add_to_pool_ah(&rxe->ah_pool, ah,
init_attr->flags & RDMA_CREATE_AH_SLEEPABLE);
if (err) {
- rxe_dbg_dev(rxe, "unable to create ah");
+ rxe_dbg_dev(rxe, "unable to create ah\n");
goto err_out;
}
@@ -288,7 +288,7 @@ static int rxe_create_ah(struct ib_ah *ibah,
err = rxe_ah_chk_attr(ah, init_attr->ah_attr);
if (err) {
- rxe_dbg_ah(ah, "bad attr");
+ rxe_dbg_ah(ah, "bad attr\n");
goto err_cleanup;
}
@@ -298,7 +298,7 @@ static int rxe_create_ah(struct ib_ah *ibah,
sizeof(uresp->ah_num));
if (err) {
err = -EFAULT;
- rxe_dbg_ah(ah, "unable to copy to user");
+ rxe_dbg_ah(ah, "unable to copy to user\n");
goto err_cleanup;
}
} else if (ah->is_user) {
@@ -314,9 +314,9 @@ static int rxe_create_ah(struct ib_ah *ibah,
err_cleanup:
cleanup_err = rxe_cleanup(ah);
if (cleanup_err)
- rxe_err_ah(ah, "cleanup failed, err = %d", cleanup_err);
+ rxe_err_ah(ah, "cleanup failed, err = %d\n", cleanup_err);
err_out:
- rxe_err_ah(ah, "returned err = %d", err);
+ rxe_err_ah(ah, "returned err = %d\n", err);
return err;
}
@@ -327,7 +327,7 @@ static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
err = rxe_ah_chk_attr(ah, attr);
if (err) {
- rxe_dbg_ah(ah, "bad attr");
+ rxe_dbg_ah(ah, "bad attr\n");
goto err_out;
}
@@ -336,7 +336,7 @@ static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
return 0;
err_out:
- rxe_err_ah(ah, "returned err = %d", err);
+ rxe_err_ah(ah, "returned err = %d\n", err);
return err;
}
@@ -358,7 +358,7 @@ static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
err = rxe_cleanup_ah(ah, flags & RDMA_DESTROY_AH_SLEEPABLE);
if (err)
- rxe_err_ah(ah, "cleanup failed, err = %d", err);
+ rxe_err_ah(ah, "cleanup failed, err = %d\n", err);
return 0;
}
@@ -376,7 +376,7 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
if (udata) {
if (udata->outlen < sizeof(*uresp)) {
err = -EINVAL;
- rxe_err_dev(rxe, "malformed udata");
+ rxe_err_dev(rxe, "malformed udata\n");
goto err_out;
}
uresp = udata->outbuf;
@@ -384,20 +384,20 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
if (init->srq_type != IB_SRQT_BASIC) {
err = -EOPNOTSUPP;
- rxe_dbg_dev(rxe, "srq type = %d, not supported",
+ rxe_dbg_dev(rxe, "srq type = %d, not supported\n",
init->srq_type);
goto err_out;
}
err = rxe_srq_chk_init(rxe, init);
if (err) {
- rxe_dbg_dev(rxe, "invalid init attributes");
+ rxe_dbg_dev(rxe, "invalid init attributes\n");
goto err_out;
}
err = rxe_add_to_pool(&rxe->srq_pool, srq);
if (err) {
- rxe_dbg_dev(rxe, "unable to create srq, err = %d", err);
+ rxe_dbg_dev(rxe, "unable to create srq, err = %d\n", err);
goto err_out;
}
@@ -406,7 +406,7 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
if (err) {
- rxe_dbg_srq(srq, "create srq failed, err = %d", err);
+ rxe_dbg_srq(srq, "create srq failed, err = %d\n", err);
goto err_cleanup;
}
@@ -415,9 +415,9 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
err_cleanup:
cleanup_err = rxe_cleanup(srq);
if (cleanup_err)
- rxe_err_srq(srq, "cleanup failed, err = %d", cleanup_err);
+ rxe_err_srq(srq, "cleanup failed, err = %d\n", cleanup_err);
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -433,34 +433,34 @@ static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
if (udata) {
if (udata->inlen < sizeof(cmd)) {
err = -EINVAL;
- rxe_dbg_srq(srq, "malformed udata");
+ rxe_dbg_srq(srq, "malformed udata\n");
goto err_out;
}
err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
if (err) {
err = -EFAULT;
- rxe_dbg_srq(srq, "unable to read udata");
+ rxe_dbg_srq(srq, "unable to read udata\n");
goto err_out;
}
}
err = rxe_srq_chk_attr(rxe, srq, attr, mask);
if (err) {
- rxe_dbg_srq(srq, "bad init attributes");
+ rxe_dbg_srq(srq, "bad init attributes\n");
goto err_out;
}
err = rxe_srq_from_attr(rxe, srq, attr, mask, &cmd, udata);
if (err) {
- rxe_dbg_srq(srq, "bad attr");
+ rxe_dbg_srq(srq, "bad attr\n");
goto err_out;
}
return 0;
err_out:
- rxe_err_srq(srq, "returned err = %d", err);
+ rxe_err_srq(srq, "returned err = %d\n", err);
return err;
}
@@ -471,7 +471,7 @@ static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
if (srq->error) {
err = -EINVAL;
- rxe_dbg_srq(srq, "srq in error state");
+ rxe_dbg_srq(srq, "srq in error state\n");
goto err_out;
}
@@ -481,7 +481,7 @@ static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
return 0;
err_out:
- rxe_err_srq(srq, "returned err = %d", err);
+ rxe_err_srq(srq, "returned err = %d\n", err);
return err;
}
@@ -505,7 +505,7 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
if (err) {
*bad_wr = wr;
- rxe_err_srq(srq, "returned err = %d", err);
+ rxe_err_srq(srq, "returned err = %d\n", err);
}
return err;
@@ -518,7 +518,7 @@ static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
err = rxe_cleanup(srq);
if (err)
- rxe_err_srq(srq, "cleanup failed, err = %d", err);
+ rxe_err_srq(srq, "cleanup failed, err = %d\n", err);
return 0;
}
@@ -536,13 +536,13 @@ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
if (udata) {
if (udata->inlen) {
err = -EINVAL;
- rxe_dbg_dev(rxe, "malformed udata, err = %d", err);
+ rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err);
goto err_out;
}
if (udata->outlen < sizeof(*uresp)) {
err = -EINVAL;
- rxe_dbg_dev(rxe, "malformed udata, err = %d", err);
+ rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err);
goto err_out;
}
@@ -554,25 +554,25 @@ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
if (init->create_flags) {
err = -EOPNOTSUPP;
- rxe_dbg_dev(rxe, "unsupported create_flags, err = %d", err);
+ rxe_dbg_dev(rxe, "unsupported create_flags, err = %d\n", err);
goto err_out;
}
err = rxe_qp_chk_init(rxe, init);
if (err) {
- rxe_dbg_dev(rxe, "bad init attr, err = %d", err);
+ rxe_dbg_dev(rxe, "bad init attr, err = %d\n", err);
goto err_out;
}
err = rxe_add_to_pool(&rxe->qp_pool, qp);
if (err) {
- rxe_dbg_dev(rxe, "unable to create qp, err = %d", err);
+ rxe_dbg_dev(rxe, "unable to create qp, err = %d\n", err);
goto err_out;
}
err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibqp->pd, udata);
if (err) {
- rxe_dbg_qp(qp, "create qp failed, err = %d", err);
+ rxe_dbg_qp(qp, "create qp failed, err = %d\n", err);
goto err_cleanup;
}
@@ -582,9 +582,9 @@ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
err_cleanup:
cleanup_err = rxe_cleanup(qp);
if (cleanup_err)
- rxe_err_qp(qp, "cleanup failed, err = %d", cleanup_err);
+ rxe_err_qp(qp, "cleanup failed, err = %d\n", cleanup_err);
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -597,20 +597,20 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (mask & ~IB_QP_ATTR_STANDARD_BITS) {
err = -EOPNOTSUPP;
- rxe_dbg_qp(qp, "unsupported mask = 0x%x, err = %d",
+ rxe_dbg_qp(qp, "unsupported mask = 0x%x, err = %d\n",
mask, err);
goto err_out;
}
err = rxe_qp_chk_attr(rxe, qp, attr, mask);
if (err) {
- rxe_dbg_qp(qp, "bad mask/attr, err = %d", err);
+ rxe_dbg_qp(qp, "bad mask/attr, err = %d\n", err);
goto err_out;
}
err = rxe_qp_from_attr(qp, attr, mask, udata);
if (err) {
- rxe_dbg_qp(qp, "modify qp failed, err = %d", err);
+ rxe_dbg_qp(qp, "modify qp failed, err = %d\n", err);
goto err_out;
}
@@ -622,7 +622,7 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return 0;
err_out:
- rxe_err_qp(qp, "returned err = %d", err);
+ rxe_err_qp(qp, "returned err = %d\n", err);
return err;
}
@@ -644,18 +644,18 @@ static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
err = rxe_qp_chk_destroy(qp);
if (err) {
- rxe_dbg_qp(qp, "unable to destroy qp, err = %d", err);
+ rxe_dbg_qp(qp, "unable to destroy qp, err = %d\n", err);
goto err_out;
}
err = rxe_cleanup(qp);
if (err)
- rxe_err_qp(qp, "cleanup failed, err = %d", err);
+ rxe_err_qp(qp, "cleanup failed, err = %d\n", err);
return 0;
err_out:
- rxe_err_qp(qp, "returned err = %d", err);
+ rxe_err_qp(qp, "returned err = %d\n", err);
return err;
}
@@ -675,12 +675,12 @@ static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
do {
mask = wr_opcode_mask(ibwr->opcode, qp);
if (!mask) {
- rxe_err_qp(qp, "bad wr opcode for qp type");
+ rxe_err_qp(qp, "bad wr opcode for qp type\n");
break;
}
if (num_sge > sq->max_sge) {
- rxe_err_qp(qp, "num_sge > max_sge");
+ rxe_err_qp(qp, "num_sge > max_sge\n");
break;
}
@@ -689,27 +689,27 @@ static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
length += ibwr->sg_list[i].length;
if (length > (1UL << 31)) {
- rxe_err_qp(qp, "message length too long");
+ rxe_err_qp(qp, "message length too long\n");
break;
}
if (mask & WR_ATOMIC_MASK) {
if (length != 8) {
- rxe_err_qp(qp, "atomic length != 8");
+ rxe_err_qp(qp, "atomic length != 8\n");
break;
}
if (atomic_wr(ibwr)->remote_addr & 0x7) {
- rxe_err_qp(qp, "misaligned atomic address");
+ rxe_err_qp(qp, "misaligned atomic address\n");
break;
}
}
if (ibwr->send_flags & IB_SEND_INLINE) {
if (!(mask & WR_INLINE_MASK)) {
- rxe_err_qp(qp, "opcode doesn't support inline data");
+ rxe_err_qp(qp, "opcode doesn't support inline data\n");
break;
}
if (length > sq->max_inline) {
- rxe_err_qp(qp, "inline length too big");
+ rxe_err_qp(qp, "inline length too big\n");
break;
}
}
@@ -747,7 +747,7 @@ static int init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
case IB_WR_SEND:
break;
default:
- rxe_err_qp(qp, "bad wr opcode %d for UD/GSI QP",
+ rxe_err_qp(qp, "bad wr opcode %d for UD/GSI QP\n",
wr->opcode);
return -EINVAL;
}
@@ -795,7 +795,7 @@ static int init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
case IB_WR_ATOMIC_WRITE:
break;
default:
- rxe_err_qp(qp, "unsupported wr opcode %d",
+ rxe_err_qp(qp, "unsupported wr opcode %d\n",
wr->opcode);
return -EINVAL;
}
@@ -870,7 +870,7 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr)
full = queue_full(sq->queue, QUEUE_TYPE_FROM_ULP);
if (unlikely(full)) {
- rxe_err_qp(qp, "send queue full");
+ rxe_err_qp(qp, "send queue full\n");
return -ENOMEM;
}
@@ -922,14 +922,14 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
/* caller has already called destroy_qp */
if (WARN_ON_ONCE(!qp->valid)) {
spin_unlock_irqrestore(&qp->state_lock, flags);
- rxe_err_qp(qp, "qp has been destroyed");
+ rxe_err_qp(qp, "qp has been destroyed\n");
return -EINVAL;
}
if (unlikely(qp_state(qp) < IB_QPS_RTS)) {
spin_unlock_irqrestore(&qp->state_lock, flags);
*bad_wr = wr;
- rxe_err_qp(qp, "qp not ready to send");
+ rxe_err_qp(qp, "qp not ready to send\n");
return -EINVAL;
}
spin_unlock_irqrestore(&qp->state_lock, flags);
@@ -959,13 +959,13 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
full = queue_full(rq->queue, QUEUE_TYPE_FROM_ULP);
if (unlikely(full)) {
err = -ENOMEM;
- rxe_dbg("queue full");
+ rxe_dbg("queue full\n");
goto err_out;
}
if (unlikely(num_sge > rq->max_sge)) {
err = -EINVAL;
- rxe_dbg("bad num_sge > max_sge");
+ rxe_dbg("bad num_sge > max_sge\n");
goto err_out;
}
@@ -976,7 +976,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
/* IBA max message size is 2^31 */
if (length >= (1UL<<31)) {
err = -EINVAL;
- rxe_dbg("message length too long");
+ rxe_dbg("message length too long\n");
goto err_out;
}
@@ -996,7 +996,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
return 0;
err_out:
- rxe_dbg("returned err = %d", err);
+ rxe_dbg("returned err = %d\n", err);
return err;
}
@@ -1012,7 +1012,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
/* caller has already called destroy_qp */
if (WARN_ON_ONCE(!qp->valid)) {
spin_unlock_irqrestore(&qp->state_lock, flags);
- rxe_err_qp(qp, "qp has been destroyed");
+ rxe_err_qp(qp, "qp has been destroyed\n");
return -EINVAL;
}
@@ -1020,14 +1020,14 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
if (unlikely((qp_state(qp) < IB_QPS_INIT))) {
spin_unlock_irqrestore(&qp->state_lock, flags);
*bad_wr = wr;
- rxe_dbg_qp(qp, "qp not ready to post recv");
+ rxe_dbg_qp(qp, "qp not ready to post recv\n");
return -EINVAL;
}
spin_unlock_irqrestore(&qp->state_lock, flags);
if (unlikely(qp->srq)) {
*bad_wr = wr;
- rxe_dbg_qp(qp, "qp has srq, use post_srq_recv instead");
+ rxe_dbg_qp(qp, "qp has srq, use post_srq_recv instead\n");
return -EINVAL;
}
@@ -1065,7 +1065,7 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
if (udata) {
if (udata->outlen < sizeof(*uresp)) {
err = -EINVAL;
- rxe_dbg_dev(rxe, "malformed udata, err = %d", err);
+ rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err);
goto err_out;
}
uresp = udata->outbuf;
@@ -1073,26 +1073,26 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
if (attr->flags) {
err = -EOPNOTSUPP;
- rxe_dbg_dev(rxe, "bad attr->flags, err = %d", err);
+ rxe_dbg_dev(rxe, "bad attr->flags, err = %d\n", err);
goto err_out;
}
err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
if (err) {
- rxe_dbg_dev(rxe, "bad init attributes, err = %d", err);
+ rxe_dbg_dev(rxe, "bad init attributes, err = %d\n", err);
goto err_out;
}
err = rxe_add_to_pool(&rxe->cq_pool, cq);
if (err) {
- rxe_dbg_dev(rxe, "unable to create cq, err = %d", err);
+ rxe_dbg_dev(rxe, "unable to create cq, err = %d\n", err);
goto err_out;
}
err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata,
uresp);
if (err) {
- rxe_dbg_cq(cq, "create cq failed, err = %d", err);
+ rxe_dbg_cq(cq, "create cq failed, err = %d\n", err);
goto err_cleanup;
}
@@ -1101,9 +1101,9 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
err_cleanup:
cleanup_err = rxe_cleanup(cq);
if (cleanup_err)
- rxe_err_cq(cq, "cleanup failed, err = %d", cleanup_err);
+ rxe_err_cq(cq, "cleanup failed, err = %d\n", cleanup_err);
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -1117,7 +1117,7 @@ static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
if (udata) {
if (udata->outlen < sizeof(*uresp)) {
err = -EINVAL;
- rxe_dbg_cq(cq, "malformed udata");
+ rxe_dbg_cq(cq, "malformed udata\n");
goto err_out;
}
uresp = udata->outbuf;
@@ -1125,20 +1125,20 @@ static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
if (err) {
- rxe_dbg_cq(cq, "bad attr, err = %d", err);
+ rxe_dbg_cq(cq, "bad attr, err = %d\n", err);
goto err_out;
}
err = rxe_cq_resize_queue(cq, cqe, uresp, udata);
if (err) {
- rxe_dbg_cq(cq, "resize cq failed, err = %d", err);
+ rxe_dbg_cq(cq, "resize cq failed, err = %d\n", err);
goto err_out;
}
return 0;
err_out:
- rxe_err_cq(cq, "returned err = %d", err);
+ rxe_err_cq(cq, "returned err = %d\n", err);
return err;
}
@@ -1202,18 +1202,18 @@ static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
*/
if (atomic_read(&cq->num_wq)) {
err = -EINVAL;
- rxe_dbg_cq(cq, "still in use");
+ rxe_dbg_cq(cq, "still in use\n");
goto err_out;
}
err = rxe_cleanup(cq);
if (err)
- rxe_err_cq(cq, "cleanup failed, err = %d", err);
+ rxe_err_cq(cq, "cleanup failed, err = %d\n", err);
return 0;
err_out:
- rxe_err_cq(cq, "returned err = %d", err);
+ rxe_err_cq(cq, "returned err = %d\n", err);
return err;
}
@@ -1231,7 +1231,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
err = rxe_add_to_pool(&rxe->mr_pool, mr);
if (err) {
- rxe_dbg_dev(rxe, "unable to create mr");
+ rxe_dbg_dev(rxe, "unable to create mr\n");
goto err_free;
}
@@ -1245,7 +1245,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
err_free:
kfree(mr);
- rxe_err_pd(pd, "returned err = %d", err);
+ rxe_err_pd(pd, "returned err = %d\n", err);
return ERR_PTR(err);
}
@@ -1259,7 +1259,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
int err, cleanup_err;
if (access & ~RXE_ACCESS_SUPPORTED_MR) {
- rxe_err_pd(pd, "access = %#x not supported (%#x)", access,
+ rxe_err_pd(pd, "access = %#x not supported (%#x)\n", access,
RXE_ACCESS_SUPPORTED_MR);
return ERR_PTR(-EOPNOTSUPP);
}
@@ -1270,7 +1270,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
err = rxe_add_to_pool(&rxe->mr_pool, mr);
if (err) {
- rxe_dbg_pd(pd, "unable to create mr");
+ rxe_dbg_pd(pd, "unable to create mr\n");
goto err_free;
}
@@ -1278,9 +1278,9 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
mr->ibmr.pd = ibpd;
mr->ibmr.device = ibpd->device;
- err = rxe_mr_init_user(rxe, start, length, iova, access, mr);
+ err = rxe_mr_init_user(rxe, start, length, access, mr);
if (err) {
- rxe_dbg_mr(mr, "reg_user_mr failed, err = %d", err);
+ rxe_dbg_mr(mr, "reg_user_mr failed, err = %d\n", err);
goto err_cleanup;
}
@@ -1290,10 +1290,10 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
err_cleanup:
cleanup_err = rxe_cleanup(mr);
if (cleanup_err)
- rxe_err_mr(mr, "cleanup failed, err = %d", cleanup_err);
+ rxe_err_mr(mr, "cleanup failed, err = %d\n", cleanup_err);
err_free:
kfree(mr);
- rxe_err_pd(pd, "returned err = %d", err);
+ rxe_err_pd(pd, "returned err = %d\n", err);
return ERR_PTR(err);
}
@@ -1310,7 +1310,7 @@ static struct ib_mr *rxe_rereg_user_mr(struct ib_mr *ibmr, int flags,
* rereg_pd and rereg_access
*/
if (flags & ~RXE_MR_REREG_SUPPORTED) {
- rxe_err_mr(mr, "flags = %#x not supported", flags);
+ rxe_err_mr(mr, "flags = %#x not supported\n", flags);
return ERR_PTR(-EOPNOTSUPP);
}
@@ -1322,7 +1322,7 @@ static struct ib_mr *rxe_rereg_user_mr(struct ib_mr *ibmr, int flags,
if (flags & IB_MR_REREG_ACCESS) {
if (access & ~RXE_ACCESS_SUPPORTED_MR) {
- rxe_err_mr(mr, "access = %#x not supported", access);
+ rxe_err_mr(mr, "access = %#x not supported\n", access);
return ERR_PTR(-EOPNOTSUPP);
}
mr->access = access;
@@ -1341,7 +1341,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
if (mr_type != IB_MR_TYPE_MEM_REG) {
err = -EINVAL;
- rxe_dbg_pd(pd, "mr type %d not supported, err = %d",
+ rxe_dbg_pd(pd, "mr type %d not supported, err = %d\n",
mr_type, err);
goto err_out;
}
@@ -1360,7 +1360,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
err = rxe_mr_init_fast(max_num_sg, mr);
if (err) {
- rxe_dbg_mr(mr, "alloc_mr failed, err = %d", err);
+ rxe_dbg_mr(mr, "alloc_mr failed, err = %d\n", err);
goto err_cleanup;
}
@@ -1370,11 +1370,11 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
err_cleanup:
cleanup_err = rxe_cleanup(mr);
if (cleanup_err)
- rxe_err_mr(mr, "cleanup failed, err = %d", err);
+ rxe_err_mr(mr, "cleanup failed, err = %d\n", err);
err_free:
kfree(mr);
err_out:
- rxe_err_pd(pd, "returned err = %d", err);
+ rxe_err_pd(pd, "returned err = %d\n", err);
return ERR_PTR(err);
}
@@ -1386,19 +1386,19 @@ static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
/* See IBA 10.6.7.2.6 */
if (atomic_read(&mr->num_mw) > 0) {
err = -EINVAL;
- rxe_dbg_mr(mr, "mr has mw's bound");
+ rxe_dbg_mr(mr, "mr has mw's bound\n");
goto err_out;
}
cleanup_err = rxe_cleanup(mr);
if (cleanup_err)
- rxe_err_mr(mr, "cleanup failed, err = %d", cleanup_err);
+ rxe_err_mr(mr, "cleanup failed, err = %d\n", cleanup_err);
kfree_rcu_mightsleep(mr);
return 0;
err_out:
- rxe_err_mr(mr, "returned err = %d", err);
+ rxe_err_mr(mr, "returned err = %d\n", err);
return err;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 319d4288eddde..8a4ab9ff0a681 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -287,8 +287,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
ah = ipoib_create_ah(dev, priv->pd, &av);
if (IS_ERR(ah)) {
- ipoib_warn(priv, "ib_address_create failed %ld\n",
- -PTR_ERR(ah));
+ ipoib_warn(priv, "ib_address_create failed %pe\n", ah);
/* use original error */
return PTR_ERR(ah);
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
index d3c436ead6946..4aa80c9388f05 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
@@ -133,7 +133,7 @@ static ssize_t mpath_policy_store(struct device *dev,
/* distinguish "mi" and "min-latency" with length */
len = strnlen(buf, NAME_MAX);
- if (buf[len - 1] == '\n')
+ if (len && buf[len - 1] == '\n')
len--;
if (!strncasecmp(buf, "round-robin", 11) ||
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 040234c01be4d..9632afbd727b6 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -3209,7 +3209,6 @@ static int srpt_add_one(struct ib_device *device)
INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
srpt_event_handler);
- ib_register_event_handler(&sdev->event_handler);
for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
sport = &sdev->port[i - 1];
@@ -3232,6 +3231,7 @@ static int srpt_add_one(struct ib_device *device)
}
}
+ ib_register_event_handler(&sdev->event_handler);
spin_lock(&srpt_dev_lock);
list_add_tail(&sdev->list, &srpt_dev_list);
spin_unlock(&srpt_dev_lock);
@@ -3242,7 +3242,6 @@ static int srpt_add_one(struct ib_device *device)
err_port:
srpt_unregister_mad_agent(sdev, i);
- ib_unregister_event_handler(&sdev->event_handler);
err_cm:
if (sdev->cm_id)
ib_destroy_cm_id(sdev->cm_id);
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index 34f416a3ebcb7..cfcc81c47b50f 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -38,7 +38,7 @@ static DEFINE_MUTEX(gameport_mutex);
static LIST_HEAD(gameport_list);
-static struct bus_type gameport_bus;
+static const struct bus_type gameport_bus;
static void gameport_add_port(struct gameport *gameport);
static void gameport_attach_driver(struct gameport_driver *drv);
@@ -813,7 +813,7 @@ static int gameport_bus_match(struct device *dev, struct device_driver *drv)
return !gameport_drv->ignore;
}
-static struct bus_type gameport_bus = {
+static const struct bus_type gameport_bus = {
.name = "gameport",
.dev_groups = gameport_device_groups,
.drv_groups = gameport_driver_groups,
diff --git a/drivers/input/input-leds.c b/drivers/input/input-leds.c
index 0e935914bc3aa..6bbf3806ea372 100644
--- a/drivers/input/input-leds.c
+++ b/drivers/input/input-leds.c
@@ -18,6 +18,12 @@
#define VT_TRIGGER(_name) .trigger = NULL
#endif
+#if IS_ENABLED(CONFIG_SND_CTL_LED)
+#define AUDIO_TRIGGER(_name) .trigger = _name
+#else
+#define AUDIO_TRIGGER(_name) .trigger = NULL
+#endif
+
static const struct {
const char *name;
const char *trigger;
@@ -29,7 +35,7 @@ static const struct {
[LED_KANA] = { "kana", VT_TRIGGER("kbd-kanalock") },
[LED_SLEEP] = { "sleep" } ,
[LED_SUSPEND] = { "suspend" },
- [LED_MUTE] = { "mute" },
+ [LED_MUTE] = { "mute", AUDIO_TRIGGER("audio-mute") },
[LED_MISC] = { "misc" },
[LED_MAIL] = { "mail" },
[LED_CHARGING] = { "charging" },
diff --git a/drivers/input/input.c b/drivers/input/input.c
index f71ea4fb173fd..7114854375678 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1918,7 +1918,7 @@ static char *input_devnode(const struct device *dev, umode_t *mode)
return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev));
}
-struct class input_class = {
+const struct class input_class = {
.name = "input",
.devnode = input_devnode,
};
@@ -2629,17 +2629,15 @@ int input_get_new_minor(int legacy_base, unsigned int legacy_num,
* locking is needed here.
*/
if (legacy_base >= 0) {
- int minor = ida_simple_get(&input_ida,
- legacy_base,
- legacy_base + legacy_num,
- GFP_KERNEL);
+ int minor = ida_alloc_range(&input_ida, legacy_base,
+ legacy_base + legacy_num - 1,
+ GFP_KERNEL);
if (minor >= 0 || !allow_dynamic)
return minor;
}
- return ida_simple_get(&input_ida,
- INPUT_FIRST_DYNAMIC_DEV, INPUT_MAX_CHAR_DEVICES,
- GFP_KERNEL);
+ return ida_alloc_range(&input_ida, INPUT_FIRST_DYNAMIC_DEV,
+ INPUT_MAX_CHAR_DEVICES - 1, GFP_KERNEL);
}
EXPORT_SYMBOL(input_get_new_minor);
@@ -2652,7 +2650,7 @@ EXPORT_SYMBOL(input_get_new_minor);
*/
void input_free_minor(unsigned int minor)
{
- ida_simple_remove(&input_ida, minor);
+ ida_free(&input_ida, minor);
}
EXPORT_SYMBOL(input_free_minor);
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 14c828adebf78..f50848ed5575d 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -127,6 +127,7 @@ static const struct xpad_device {
u8 mapping;
u8 xtype;
} xpad_device[] = {
+ /* Please keep this list sorted by vendor and product ID. */
{ 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
{ 0x03eb, 0xff01, "Wooting One (Legacy)", 0, XTYPE_XBOX360 },
{ 0x03eb, 0xff02, "Wooting Two (Legacy)", 0, XTYPE_XBOX360 },
@@ -152,9 +153,9 @@ static const struct xpad_device {
{ 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE },
{ 0x045e, 0x02dd, "Microsoft X-Box One pad (Firmware 2015)", 0, XTYPE_XBOXONE },
{ 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", MAP_PADDLES, XTYPE_XBOXONE },
- { 0x045e, 0x0b00, "Microsoft X-Box One Elite 2 pad", MAP_PADDLES, XTYPE_XBOXONE },
{ 0x045e, 0x02ea, "Microsoft X-Box One S pad", 0, XTYPE_XBOXONE },
{ 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
+ { 0x045e, 0x0b00, "Microsoft X-Box One Elite 2 pad", MAP_PADDLES, XTYPE_XBOXONE },
{ 0x045e, 0x0b0a, "Microsoft X-Box Adaptive Controller", MAP_PROFILE_BUTTON, XTYPE_XBOXONE },
{ 0x045e, 0x0b12, "Microsoft Xbox Series S|X Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE },
{ 0x046d, 0xc21d, "Logitech Gamepad F310", 0, XTYPE_XBOX360 },
@@ -340,7 +341,6 @@ static const struct xpad_device {
{ 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
{ 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE },
{ 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
- { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
{ 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
@@ -355,9 +355,9 @@ static const struct xpad_device {
{ 0x24c6, 0x5502, "Hori Fighting Stick VX Alt", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x5503, "Hori Fighting Edge", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x5506, "Hori SOULCALIBUR V Stick", 0, XTYPE_XBOX360 },
- { 0x24c6, 0x5510, "Hori Fighting Commander ONE (Xbox 360/PC Mode)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x550d, "Hori GEM Xbox controller", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x550e, "Hori Real Arcade Pro V Kai 360", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x24c6, 0x5510, "Hori Fighting Commander ONE (Xbox 360/PC Mode)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x551a, "PowerA FUSION Pro Controller", 0, XTYPE_XBOXONE },
{ 0x24c6, 0x561a, "PowerA FUSION Controller", 0, XTYPE_XBOXONE },
{ 0x24c6, 0x5b00, "ThrustMaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 },
@@ -366,8 +366,11 @@ static const struct xpad_device {
{ 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 },
{ 0x24c6, 0xfafe, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x2563, 0x058d, "OneXPlayer Gamepad", 0, XTYPE_XBOX360 },
+ { 0x294b, 0x3303, "Snakebyte GAMEPAD BASE X", 0, XTYPE_XBOXONE },
+ { 0x294b, 0x3404, "Snakebyte GAMEPAD RGB X", 0, XTYPE_XBOXONE },
{ 0x2dc8, 0x2000, "8BitDo Pro 2 Wired Controller fox Xbox", 0, XTYPE_XBOXONE },
{ 0x2dc8, 0x3106, "8BitDo Pro 2 Wired Controller", 0, XTYPE_XBOX360 },
+ { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
{ 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1210, "Wooting Lekker", 0, XTYPE_XBOX360 },
@@ -465,6 +468,10 @@ static const signed short xpad_btn_paddles[] = {
{ XPAD_XBOXONE_VENDOR_PROTOCOL((vend), 208) }
static const struct usb_device_id xpad_table[] = {
+ /*
+ * Please keep this list sorted by vendor ID. Note that there are 2
+ * macros - XPAD_XBOX360_VENDOR and XPAD_XBOXONE_VENDOR.
+ */
{ USB_INTERFACE_INFO('X', 'B', 0) }, /* Xbox USB-IF not-approved class */
XPAD_XBOX360_VENDOR(0x0079), /* GPD Win 2 controller */
XPAD_XBOX360_VENDOR(0x03eb), /* Wooting Keyboards (Legacy) */
@@ -507,6 +514,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA controllers */
XPAD_XBOX360_VENDOR(0x2563), /* OneXPlayer Gamepad */
XPAD_XBOX360_VENDOR(0x260d), /* Dareu H101 */
+ XPAD_XBOXONE_VENDOR(0x294b), /* Snakebyte */
XPAD_XBOX360_VENDOR(0x2c22), /* Qanba Controllers */
XPAD_XBOX360_VENDOR(0x2dc8), /* 8BitDo Pro 2 Wired Controller */
XPAD_XBOXONE_VENDOR(0x2dc8), /* 8BitDo Pro 2 Wired Controller for Xbox */
diff --git a/drivers/input/keyboard/amikbd.c b/drivers/input/keyboard/amikbd.c
index e305c44cd0aa9..ecfae0b0b6aa2 100644
--- a/drivers/input/keyboard/amikbd.c
+++ b/drivers/input/keyboard/amikbd.c
@@ -26,7 +26,7 @@ MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("Amiga keyboard driver");
MODULE_LICENSE("GPL");
-#ifdef CONFIG_HW_CONSOLE
+#ifdef CONFIG_VT
static unsigned char amikbd_keycode[0x78] __initdata = {
[0] = KEY_GRAVE,
[1] = KEY_1,
@@ -148,9 +148,9 @@ static void __init amikbd_init_console_keymaps(void)
memcpy(key_maps[i], temp_map, sizeof(temp_map));
}
}
-#else /* !CONFIG_HW_CONSOLE */
+#else /* !CONFIG_VT */
static inline void amikbd_init_console_keymaps(void) {}
-#endif /* !CONFIG_HW_CONSOLE */
+#endif /* !CONFIG_VT */
static const char *amikbd_messages[8] = {
[0] = KERN_ALERT "amikbd: Ctrl-Amiga-Amiga reset warning!!\n",
diff --git a/drivers/input/keyboard/bcm-keypad.c b/drivers/input/keyboard/bcm-keypad.c
index f3c3746acd4cf..6b46f83a9edb8 100644
--- a/drivers/input/keyboard/bcm-keypad.c
+++ b/drivers/input/keyboard/bcm-keypad.c
@@ -418,7 +418,7 @@ static struct platform_driver bcm_kp_device_driver = {
.probe = bcm_kp_probe,
.driver = {
.name = "bcm-keypad",
- .of_match_table = of_match_ptr(bcm_kp_of_match),
+ .of_match_table = bcm_kp_of_match,
}
};
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index 50fa764c82d2b..695c03e075b56 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -28,7 +28,9 @@ struct matrix_keypad {
struct input_dev *input_dev;
unsigned int row_shift;
- DECLARE_BITMAP(disabled_gpios, MATRIX_MAX_ROWS);
+ unsigned int row_irqs[MATRIX_MAX_ROWS];
+ unsigned int num_row_irqs;
+ DECLARE_BITMAP(wakeup_enabled_irqs, MATRIX_MAX_ROWS);
uint32_t last_key_state[MATRIX_MAX_COLS];
struct delayed_work work;
@@ -85,28 +87,18 @@ static bool row_asserted(const struct matrix_keypad_platform_data *pdata,
static void enable_row_irqs(struct matrix_keypad *keypad)
{
- const struct matrix_keypad_platform_data *pdata = keypad->pdata;
int i;
- if (pdata->clustered_irq > 0)
- enable_irq(pdata->clustered_irq);
- else {
- for (i = 0; i < pdata->num_row_gpios; i++)
- enable_irq(gpio_to_irq(pdata->row_gpios[i]));
- }
+ for (i = 0; i < keypad->num_row_irqs; i++)
+ enable_irq(keypad->row_irqs[i]);
}
static void disable_row_irqs(struct matrix_keypad *keypad)
{
- const struct matrix_keypad_platform_data *pdata = keypad->pdata;
int i;
- if (pdata->clustered_irq > 0)
- disable_irq_nosync(pdata->clustered_irq);
- else {
- for (i = 0; i < pdata->num_row_gpios; i++)
- disable_irq_nosync(gpio_to_irq(pdata->row_gpios[i]));
- }
+ for (i = 0; i < keypad->num_row_irqs; i++)
+ disable_irq_nosync(keypad->row_irqs[i]);
}
/*
@@ -232,44 +224,20 @@ static void matrix_keypad_stop(struct input_dev *dev)
static void matrix_keypad_enable_wakeup(struct matrix_keypad *keypad)
{
- const struct matrix_keypad_platform_data *pdata = keypad->pdata;
- unsigned int gpio;
int i;
- if (pdata->clustered_irq > 0) {
- if (enable_irq_wake(pdata->clustered_irq) == 0)
- keypad->gpio_all_disabled = true;
- } else {
-
- for (i = 0; i < pdata->num_row_gpios; i++) {
- if (!test_bit(i, keypad->disabled_gpios)) {
- gpio = pdata->row_gpios[i];
-
- if (enable_irq_wake(gpio_to_irq(gpio)) == 0)
- __set_bit(i, keypad->disabled_gpios);
- }
- }
- }
+ for_each_clear_bit(i, keypad->wakeup_enabled_irqs, keypad->num_row_irqs)
+ if (enable_irq_wake(keypad->row_irqs[i]) == 0)
+ __set_bit(i, keypad->wakeup_enabled_irqs);
}
static void matrix_keypad_disable_wakeup(struct matrix_keypad *keypad)
{
- const struct matrix_keypad_platform_data *pdata = keypad->pdata;
- unsigned int gpio;
int i;
- if (pdata->clustered_irq > 0) {
- if (keypad->gpio_all_disabled) {
- disable_irq_wake(pdata->clustered_irq);
- keypad->gpio_all_disabled = false;
- }
- } else {
- for (i = 0; i < pdata->num_row_gpios; i++) {
- if (test_and_clear_bit(i, keypad->disabled_gpios)) {
- gpio = pdata->row_gpios[i];
- disable_irq_wake(gpio_to_irq(gpio));
- }
- }
+ for_each_set_bit(i, keypad->wakeup_enabled_irqs, keypad->num_row_irqs) {
+ disable_irq_wake(keypad->row_irqs[i]);
+ __clear_bit(i, keypad->wakeup_enabled_irqs);
}
}
@@ -306,96 +274,83 @@ static int matrix_keypad_init_gpio(struct platform_device *pdev,
struct matrix_keypad *keypad)
{
const struct matrix_keypad_platform_data *pdata = keypad->pdata;
- int i, err;
+ int i, irq, err;
/* initialized strobe lines as outputs, activated */
for (i = 0; i < pdata->num_col_gpios; i++) {
- err = gpio_request(pdata->col_gpios[i], "matrix_kbd_col");
+ err = devm_gpio_request(&pdev->dev,
+ pdata->col_gpios[i], "matrix_kbd_col");
if (err) {
dev_err(&pdev->dev,
"failed to request GPIO%d for COL%d\n",
pdata->col_gpios[i], i);
- goto err_free_cols;
+ return err;
}
gpio_direction_output(pdata->col_gpios[i], !pdata->active_low);
}
for (i = 0; i < pdata->num_row_gpios; i++) {
- err = gpio_request(pdata->row_gpios[i], "matrix_kbd_row");
+ err = devm_gpio_request(&pdev->dev,
+ pdata->row_gpios[i], "matrix_kbd_row");
if (err) {
dev_err(&pdev->dev,
"failed to request GPIO%d for ROW%d\n",
pdata->row_gpios[i], i);
- goto err_free_rows;
+ return err;
}
gpio_direction_input(pdata->row_gpios[i]);
}
if (pdata->clustered_irq > 0) {
- err = request_any_context_irq(pdata->clustered_irq,
+ err = devm_request_any_context_irq(&pdev->dev,
+ pdata->clustered_irq,
matrix_keypad_interrupt,
pdata->clustered_irq_flags,
"matrix-keypad", keypad);
if (err < 0) {
dev_err(&pdev->dev,
"Unable to acquire clustered interrupt\n");
- goto err_free_rows;
+ return err;
}
+
+ keypad->row_irqs[0] = pdata->clustered_irq;
+ keypad->num_row_irqs = 1;
} else {
for (i = 0; i < pdata->num_row_gpios; i++) {
- err = request_any_context_irq(
- gpio_to_irq(pdata->row_gpios[i]),
+ irq = gpio_to_irq(pdata->row_gpios[i]);
+ if (irq < 0) {
+ err = irq;
+ dev_err(&pdev->dev,
+ "Unable to convert GPIO line %i to irq: %d\n",
+ pdata->row_gpios[i], err);
+ return err;
+ }
+
+ err = devm_request_any_context_irq(&pdev->dev,
+ irq,
matrix_keypad_interrupt,
IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_FALLING,
"matrix-keypad", keypad);
if (err < 0) {
dev_err(&pdev->dev,
"Unable to acquire interrupt for GPIO line %i\n",
pdata->row_gpios[i]);
- goto err_free_irqs;
+ return err;
}
+
+ keypad->row_irqs[i] = irq;
}
+
+ keypad->num_row_irqs = pdata->num_row_gpios;
}
/* initialized as disabled - enabled by input->open */
disable_row_irqs(keypad);
- return 0;
-
-err_free_irqs:
- while (--i >= 0)
- free_irq(gpio_to_irq(pdata->row_gpios[i]), keypad);
- i = pdata->num_row_gpios;
-err_free_rows:
- while (--i >= 0)
- gpio_free(pdata->row_gpios[i]);
- i = pdata->num_col_gpios;
-err_free_cols:
- while (--i >= 0)
- gpio_free(pdata->col_gpios[i]);
-
- return err;
-}
-
-static void matrix_keypad_free_gpio(struct matrix_keypad *keypad)
-{
- const struct matrix_keypad_platform_data *pdata = keypad->pdata;
- int i;
- if (pdata->clustered_irq > 0) {
- free_irq(pdata->clustered_irq, keypad);
- } else {
- for (i = 0; i < pdata->num_row_gpios; i++)
- free_irq(gpio_to_irq(pdata->row_gpios[i]), keypad);
- }
-
- for (i = 0; i < pdata->num_row_gpios; i++)
- gpio_free(pdata->row_gpios[i]);
-
- for (i = 0; i < pdata->num_col_gpios; i++)
- gpio_free(pdata->col_gpios[i]);
+ return 0;
}
#ifdef CONFIG_OF
@@ -494,12 +449,13 @@ static int matrix_keypad_probe(struct platform_device *pdev)
return -EINVAL;
}
- keypad = kzalloc(sizeof(struct matrix_keypad), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!keypad || !input_dev) {
- err = -ENOMEM;
- goto err_free_mem;
- }
+ keypad = devm_kzalloc(&pdev->dev, sizeof(*keypad), GFP_KERNEL);
+ if (!keypad)
+ return -ENOMEM;
+
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!input_dev)
+ return -ENOMEM;
keypad->input_dev = input_dev;
keypad->pdata = pdata;
@@ -510,7 +466,6 @@ static int matrix_keypad_probe(struct platform_device *pdev)
input_dev->name = pdev->name;
input_dev->id.bustype = BUS_HOST;
- input_dev->dev.parent = &pdev->dev;
input_dev->open = matrix_keypad_start;
input_dev->close = matrix_keypad_stop;
@@ -520,7 +475,7 @@ static int matrix_keypad_probe(struct platform_device *pdev)
NULL, input_dev);
if (err) {
dev_err(&pdev->dev, "failed to build keymap\n");
- goto err_free_mem;
+ return -ENOMEM;
}
if (!pdata->no_autorepeat)
@@ -530,32 +485,16 @@ static int matrix_keypad_probe(struct platform_device *pdev)
err = matrix_keypad_init_gpio(pdev, keypad);
if (err)
- goto err_free_mem;
+ return err;
err = input_register_device(keypad->input_dev);
if (err)
- goto err_free_gpio;
+ return err;
device_init_wakeup(&pdev->dev, pdata->wakeup);
platform_set_drvdata(pdev, keypad);
return 0;
-
-err_free_gpio:
- matrix_keypad_free_gpio(keypad);
-err_free_mem:
- input_free_device(input_dev);
- kfree(keypad);
- return err;
-}
-
-static void matrix_keypad_remove(struct platform_device *pdev)
-{
- struct matrix_keypad *keypad = platform_get_drvdata(pdev);
-
- matrix_keypad_free_gpio(keypad);
- input_unregister_device(keypad->input_dev);
- kfree(keypad);
}
#ifdef CONFIG_OF
@@ -568,7 +507,6 @@ MODULE_DEVICE_TABLE(of, matrix_keypad_dt_match);
static struct platform_driver matrix_keypad_driver = {
.probe = matrix_keypad_probe,
- .remove_new = matrix_keypad_remove,
.driver = {
.name = "matrix-keypad",
.pm = pm_sleep_ptr(&matrix_keypad_pm_ops),
diff --git a/drivers/input/misc/88pm80x_onkey.c b/drivers/input/misc/88pm80x_onkey.c
index 31f0702c3d01e..4b0685f961138 100644
--- a/drivers/input/misc/88pm80x_onkey.c
+++ b/drivers/input/misc/88pm80x_onkey.c
@@ -1,22 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Marvell 88PM80x ONKEY driver
*
* Copyright (C) 2012 Marvell International Ltd.
* Haojian Zhuang <haojian.zhuang@marvell.com>
* Qiao Zhou <zhouqiao@marvell.com>
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file "COPYING" in the main directory of this
- * archive for more details.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c
index 36aeeae776110..9ca5a743f19fe 100644
--- a/drivers/input/misc/iqs7222.c
+++ b/drivers/input/misc/iqs7222.c
@@ -622,6 +622,118 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
{
.prod_num = IQS7222_PROD_NUM_D,
+ .fw_major = 1,
+ .fw_minor = 2,
+ .touch_link = 1770,
+ .allow_offset = 9,
+ .event_offset = 10,
+ .comms_offset = 11,
+ .reg_grps = {
+ [IQS7222_REG_GRP_STAT] = {
+ .base = IQS7222_SYS_STATUS,
+ .num_row = 1,
+ .num_col = 7,
+ },
+ [IQS7222_REG_GRP_CYCLE] = {
+ .base = 0x8000,
+ .num_row = 7,
+ .num_col = 2,
+ },
+ [IQS7222_REG_GRP_GLBL] = {
+ .base = 0x8700,
+ .num_row = 1,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_BTN] = {
+ .base = 0x9000,
+ .num_row = 14,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_CHAN] = {
+ .base = 0xA000,
+ .num_row = 14,
+ .num_col = 4,
+ },
+ [IQS7222_REG_GRP_FILT] = {
+ .base = 0xAE00,
+ .num_row = 1,
+ .num_col = 2,
+ },
+ [IQS7222_REG_GRP_TPAD] = {
+ .base = 0xB000,
+ .num_row = 1,
+ .num_col = 24,
+ },
+ [IQS7222_REG_GRP_GPIO] = {
+ .base = 0xC000,
+ .num_row = 3,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_SYS] = {
+ .base = IQS7222_SYS_SETUP,
+ .num_row = 1,
+ .num_col = 12,
+ },
+ },
+ },
+ {
+ .prod_num = IQS7222_PROD_NUM_D,
+ .fw_major = 1,
+ .fw_minor = 1,
+ .touch_link = 1774,
+ .allow_offset = 9,
+ .event_offset = 10,
+ .comms_offset = 11,
+ .reg_grps = {
+ [IQS7222_REG_GRP_STAT] = {
+ .base = IQS7222_SYS_STATUS,
+ .num_row = 1,
+ .num_col = 7,
+ },
+ [IQS7222_REG_GRP_CYCLE] = {
+ .base = 0x8000,
+ .num_row = 7,
+ .num_col = 2,
+ },
+ [IQS7222_REG_GRP_GLBL] = {
+ .base = 0x8700,
+ .num_row = 1,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_BTN] = {
+ .base = 0x9000,
+ .num_row = 14,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_CHAN] = {
+ .base = 0xA000,
+ .num_row = 14,
+ .num_col = 4,
+ },
+ [IQS7222_REG_GRP_FILT] = {
+ .base = 0xAE00,
+ .num_row = 1,
+ .num_col = 2,
+ },
+ [IQS7222_REG_GRP_TPAD] = {
+ .base = 0xB000,
+ .num_row = 1,
+ .num_col = 24,
+ },
+ [IQS7222_REG_GRP_GPIO] = {
+ .base = 0xC000,
+ .num_row = 3,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_SYS] = {
+ .base = IQS7222_SYS_SETUP,
+ .num_row = 1,
+ .num_col = 12,
+ },
+ },
+ },
+ {
+ .prod_num = IQS7222_PROD_NUM_D,
.fw_major = 0,
.fw_minor = 37,
.touch_link = 1770,
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index 32cc4c62a716c..833b643f06164 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -439,16 +439,4 @@ config MOUSE_SYNAPTICS_USB
To compile this driver as a module, choose M here: the
module will be called synaptics_usb.
-config MOUSE_NAVPOINT_PXA27x
- tristate "Synaptics NavPoint (PXA27x SSP/SPI)"
- depends on PXA27x && PXA_SSP
- help
- This driver adds support for the Synaptics NavPoint touchpad connected
- to a PXA27x SSP port in SPI slave mode. The device emulates a mouse;
- a tap or tap-and-a-half drag gesture emulates the left mouse button.
- For example, use the xf86-input-evdev driver for an X pointing device.
-
- To compile this driver as a module, choose M here: the
- module will be called navpoint.
-
endif
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile
index 92b3204ce84e3..a1336d5bee6f3 100644
--- a/drivers/input/mouse/Makefile
+++ b/drivers/input/mouse/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_MOUSE_GPIO) += gpio_mouse.o
obj-$(CONFIG_MOUSE_INPORT) += inport.o
obj-$(CONFIG_MOUSE_LOGIBM) += logibm.o
obj-$(CONFIG_MOUSE_MAPLE) += maplemouse.o
-obj-$(CONFIG_MOUSE_NAVPOINT_PXA27x) += navpoint.o
obj-$(CONFIG_MOUSE_PC110PAD) += pc110pad.o
obj-$(CONFIG_MOUSE_PS2) += psmouse.o
obj-$(CONFIG_MOUSE_RISCPC) += rpcmouse.o
diff --git a/drivers/input/mouse/navpoint.c b/drivers/input/mouse/navpoint.c
deleted file mode 100644
index ba757783c258a..0000000000000
--- a/drivers/input/mouse/navpoint.c
+++ /dev/null
@@ -1,350 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Synaptics NavPoint (PXA27x SSP/SPI) driver.
- *
- * Copyright (C) 2012 Paul Parsons <lost.distance@yahoo.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/gpio/consumer.h>
-#include <linux/input.h>
-#include <linux/input/navpoint.h>
-#include <linux/interrupt.h>
-#include <linux/mutex.h>
-#include <linux/pxa2xx_ssp.h>
-#include <linux/slab.h>
-
-/*
- * Synaptics Modular Embedded Protocol: Module Packet Format.
- * Module header byte 2:0 = Length (# bytes that follow)
- * Module header byte 4:3 = Control
- * Module header byte 7:5 = Module Address
- */
-#define HEADER_LENGTH(byte) ((byte) & 0x07)
-#define HEADER_CONTROL(byte) (((byte) >> 3) & 0x03)
-#define HEADER_ADDRESS(byte) ((byte) >> 5)
-
-struct navpoint {
- struct ssp_device *ssp;
- struct input_dev *input;
- struct device *dev;
- struct gpio_desc *gpiod;
- int index;
- u8 data[1 + HEADER_LENGTH(0xff)];
-};
-
-/*
- * Initialization values for SSCR0_x, SSCR1_x, SSSR_x.
- */
-static const u32 sscr0 = 0
- | SSCR0_TUM /* TIM = 1; No TUR interrupts */
- | SSCR0_RIM /* RIM = 1; No ROR interrupts */
- | SSCR0_SSE /* SSE = 1; SSP enabled */
- | SSCR0_Motorola /* FRF = 0; Motorola SPI */
- | SSCR0_DataSize(16) /* DSS = 15; Data size = 16-bit */
- ;
-static const u32 sscr1 = 0
- | SSCR1_SCFR /* SCFR = 1; SSPSCLK only during transfers */
- | SSCR1_SCLKDIR /* SCLKDIR = 1; Slave mode */
- | SSCR1_SFRMDIR /* SFRMDIR = 1; Slave mode */
- | SSCR1_RWOT /* RWOT = 1; Receive without transmit mode */
- | SSCR1_RxTresh(1) /* RFT = 0; Receive FIFO threshold = 1 */
- | SSCR1_SPH /* SPH = 1; SSPSCLK inactive 0.5 + 1 cycles */
- | SSCR1_RIE /* RIE = 1; Receive FIFO interrupt enabled */
- ;
-static const u32 sssr = 0
- | SSSR_BCE /* BCE = 1; Clear BCE */
- | SSSR_TUR /* TUR = 1; Clear TUR */
- | SSSR_EOC /* EOC = 1; Clear EOC */
- | SSSR_TINT /* TINT = 1; Clear TINT */
- | SSSR_PINT /* PINT = 1; Clear PINT */
- | SSSR_ROR /* ROR = 1; Clear ROR */
- ;
-
-/*
- * MEP Query $22: Touchpad Coordinate Range Query is not supported by
- * the NavPoint module, so sampled values provide the default limits.
- */
-#define NAVPOINT_X_MIN 1278
-#define NAVPOINT_X_MAX 5340
-#define NAVPOINT_Y_MIN 1572
-#define NAVPOINT_Y_MAX 4396
-#define NAVPOINT_PRESSURE_MIN 0
-#define NAVPOINT_PRESSURE_MAX 255
-
-static void navpoint_packet(struct navpoint *navpoint)
-{
- int finger;
- int gesture;
- int x, y, z;
-
- switch (navpoint->data[0]) {
- case 0xff: /* Garbage (packet?) between reset and Hello packet */
- case 0x00: /* Module 0, NULL packet */
- break;
-
- case 0x0e: /* Module 0, Absolute packet */
- finger = (navpoint->data[1] & 0x01);
- gesture = (navpoint->data[1] & 0x02);
- x = ((navpoint->data[2] & 0x1f) << 8) | navpoint->data[3];
- y = ((navpoint->data[4] & 0x1f) << 8) | navpoint->data[5];
- z = navpoint->data[6];
- input_report_key(navpoint->input, BTN_TOUCH, finger);
- input_report_abs(navpoint->input, ABS_X, x);
- input_report_abs(navpoint->input, ABS_Y, y);
- input_report_abs(navpoint->input, ABS_PRESSURE, z);
- input_report_key(navpoint->input, BTN_TOOL_FINGER, finger);
- input_report_key(navpoint->input, BTN_LEFT, gesture);
- input_sync(navpoint->input);
- break;
-
- case 0x19: /* Module 0, Hello packet */
- if ((navpoint->data[1] & 0xf0) == 0x10)
- break;
- fallthrough;
- default:
- dev_warn(navpoint->dev,
- "spurious packet: data=0x%02x,0x%02x,...\n",
- navpoint->data[0], navpoint->data[1]);
- break;
- }
-}
-
-static irqreturn_t navpoint_irq(int irq, void *dev_id)
-{
- struct navpoint *navpoint = dev_id;
- struct ssp_device *ssp = navpoint->ssp;
- irqreturn_t ret = IRQ_NONE;
- u32 status;
-
- status = pxa_ssp_read_reg(ssp, SSSR);
- if (status & sssr) {
- dev_warn(navpoint->dev,
- "unexpected interrupt: status=0x%08x\n", status);
- pxa_ssp_write_reg(ssp, SSSR, (status & sssr));
- ret = IRQ_HANDLED;
- }
-
- while (status & SSSR_RNE) {
- u32 data;
-
- data = pxa_ssp_read_reg(ssp, SSDR);
- navpoint->data[navpoint->index + 0] = (data >> 8);
- navpoint->data[navpoint->index + 1] = data;
- navpoint->index += 2;
- if (HEADER_LENGTH(navpoint->data[0]) < navpoint->index) {
- navpoint_packet(navpoint);
- navpoint->index = 0;
- }
- status = pxa_ssp_read_reg(ssp, SSSR);
- ret = IRQ_HANDLED;
- }
-
- return ret;
-}
-
-static void navpoint_up(struct navpoint *navpoint)
-{
- struct ssp_device *ssp = navpoint->ssp;
- int timeout;
-
- clk_prepare_enable(ssp->clk);
-
- pxa_ssp_write_reg(ssp, SSCR1, sscr1);
- pxa_ssp_write_reg(ssp, SSSR, sssr);
- pxa_ssp_write_reg(ssp, SSTO, 0);
- pxa_ssp_write_reg(ssp, SSCR0, sscr0); /* SSCR0_SSE written last */
-
- /* Wait until SSP port is ready for slave clock operations */
- for (timeout = 100; timeout != 0; --timeout) {
- if (!(pxa_ssp_read_reg(ssp, SSSR) & SSSR_CSS))
- break;
- msleep(1);
- }
-
- if (timeout == 0)
- dev_err(navpoint->dev,
- "timeout waiting for SSSR[CSS] to clear\n");
-
- gpiod_set_value(navpoint->gpiod, 1);
-}
-
-static void navpoint_down(struct navpoint *navpoint)
-{
- struct ssp_device *ssp = navpoint->ssp;
-
- gpiod_set_value(navpoint->gpiod, 0);
-
- pxa_ssp_write_reg(ssp, SSCR0, 0);
-
- clk_disable_unprepare(ssp->clk);
-}
-
-static int navpoint_open(struct input_dev *input)
-{
- struct navpoint *navpoint = input_get_drvdata(input);
-
- navpoint_up(navpoint);
-
- return 0;
-}
-
-static void navpoint_close(struct input_dev *input)
-{
- struct navpoint *navpoint = input_get_drvdata(input);
-
- navpoint_down(navpoint);
-}
-
-static int navpoint_probe(struct platform_device *pdev)
-{
- const struct navpoint_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
- struct ssp_device *ssp;
- struct input_dev *input;
- struct navpoint *navpoint;
- int error;
-
- if (!pdata) {
- dev_err(&pdev->dev, "no platform data\n");
- return -EINVAL;
- }
-
- ssp = pxa_ssp_request(pdata->port, pdev->name);
- if (!ssp)
- return -ENODEV;
-
- /* HaRET does not disable devices before jumping into Linux */
- if (pxa_ssp_read_reg(ssp, SSCR0) & SSCR0_SSE) {
- pxa_ssp_write_reg(ssp, SSCR0, 0);
- dev_warn(&pdev->dev, "ssp%d already enabled\n", pdata->port);
- }
-
- navpoint = kzalloc(sizeof(*navpoint), GFP_KERNEL);
- input = input_allocate_device();
- if (!navpoint || !input) {
- error = -ENOMEM;
- goto err_free_mem;
- }
-
- navpoint->gpiod = gpiod_get_optional(&pdev->dev,
- NULL, GPIOD_OUT_LOW);
- if (IS_ERR(navpoint->gpiod)) {
- error = PTR_ERR(navpoint->gpiod);
- dev_err(&pdev->dev, "error getting GPIO\n");
- goto err_free_mem;
- }
- gpiod_set_consumer_name(navpoint->gpiod, "SYNAPTICS_ON");
-
- navpoint->ssp = ssp;
- navpoint->input = input;
- navpoint->dev = &pdev->dev;
-
- input->name = pdev->name;
- input->dev.parent = &pdev->dev;
-
- __set_bit(EV_KEY, input->evbit);
- __set_bit(EV_ABS, input->evbit);
- __set_bit(BTN_LEFT, input->keybit);
- __set_bit(BTN_TOUCH, input->keybit);
- __set_bit(BTN_TOOL_FINGER, input->keybit);
-
- input_set_abs_params(input, ABS_X,
- NAVPOINT_X_MIN, NAVPOINT_X_MAX, 0, 0);
- input_set_abs_params(input, ABS_Y,
- NAVPOINT_Y_MIN, NAVPOINT_Y_MAX, 0, 0);
- input_set_abs_params(input, ABS_PRESSURE,
- NAVPOINT_PRESSURE_MIN, NAVPOINT_PRESSURE_MAX,
- 0, 0);
-
- input->open = navpoint_open;
- input->close = navpoint_close;
-
- input_set_drvdata(input, navpoint);
-
- error = request_irq(ssp->irq, navpoint_irq, 0, pdev->name, navpoint);
- if (error)
- goto err_free_mem;
-
- error = input_register_device(input);
- if (error)
- goto err_free_irq;
-
- platform_set_drvdata(pdev, navpoint);
- dev_dbg(&pdev->dev, "ssp%d, irq %d\n", pdata->port, ssp->irq);
-
- return 0;
-
-err_free_irq:
- free_irq(ssp->irq, navpoint);
-err_free_mem:
- input_free_device(input);
- kfree(navpoint);
- pxa_ssp_free(ssp);
-
- return error;
-}
-
-static void navpoint_remove(struct platform_device *pdev)
-{
- struct navpoint *navpoint = platform_get_drvdata(pdev);
- struct ssp_device *ssp = navpoint->ssp;
-
- free_irq(ssp->irq, navpoint);
-
- input_unregister_device(navpoint->input);
- kfree(navpoint);
-
- pxa_ssp_free(ssp);
-}
-
-static int navpoint_suspend(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct navpoint *navpoint = platform_get_drvdata(pdev);
- struct input_dev *input = navpoint->input;
-
- mutex_lock(&input->mutex);
- if (input_device_enabled(input))
- navpoint_down(navpoint);
- mutex_unlock(&input->mutex);
-
- return 0;
-}
-
-static int navpoint_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct navpoint *navpoint = platform_get_drvdata(pdev);
- struct input_dev *input = navpoint->input;
-
- mutex_lock(&input->mutex);
- if (input_device_enabled(input))
- navpoint_up(navpoint);
- mutex_unlock(&input->mutex);
-
- return 0;
-}
-
-static DEFINE_SIMPLE_DEV_PM_OPS(navpoint_pm_ops,
- navpoint_suspend, navpoint_resume);
-
-static struct platform_driver navpoint_driver = {
- .probe = navpoint_probe,
- .remove_new = navpoint_remove,
- .driver = {
- .name = "navpoint",
- .pm = pm_sleep_ptr(&navpoint_pm_ops),
- },
-};
-
-module_platform_driver(navpoint_driver);
-
-MODULE_AUTHOR("Paul Parsons <lost.distance@yahoo.com>");
-MODULE_DESCRIPTION("Synaptics NavPoint (PXA27x SSP/SPI) driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:navpoint");
diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
index 1b45b1d3077de..343030290d788 100644
--- a/drivers/input/rmi4/rmi_bus.c
+++ b/drivers/input/rmi4/rmi_bus.c
@@ -344,7 +344,7 @@ static int rmi_bus_match(struct device *dev, struct device_driver *drv)
return physical || rmi_function_match(dev, drv);
}
-struct bus_type rmi_bus_type = {
+const struct bus_type rmi_bus_type = {
.match = rmi_bus_match,
.name = "rmi4",
};
diff --git a/drivers/input/rmi4/rmi_bus.h b/drivers/input/rmi4/rmi_bus.h
index 25df6320f9f1d..ea46ad9447ece 100644
--- a/drivers/input/rmi4/rmi_bus.h
+++ b/drivers/input/rmi4/rmi_bus.h
@@ -185,7 +185,7 @@ static inline int rmi_write_block(struct rmi_device *d, u16 addr,
int rmi_for_each_dev(void *data, int (*func)(struct device *dev, void *data));
-extern struct bus_type rmi_bus_type;
+extern const struct bus_type rmi_bus_type;
int rmi_of_property_read_u32(struct device *dev, u32 *result,
const char *prop, bool optional);
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 42eaebb3bf5cc..ef9ea295f9e03 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -1196,7 +1196,11 @@ static int rmi_driver_probe(struct device *dev)
}
rmi_driver_set_input_params(rmi_dev, data->input);
data->input->phys = devm_kasprintf(dev, GFP_KERNEL,
- "%s/input0", dev_name(dev));
+ "%s/input0", dev_name(dev));
+ if (!data->input->phys) {
+ retval = -ENOMEM;
+ goto err;
+ }
}
retval = rmi_init_functions(data);
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 767fc9efb4a86..a8838b5226276 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -1007,7 +1007,7 @@ irqreturn_t serio_interrupt(struct serio *serio,
}
EXPORT_SYMBOL(serio_interrupt);
-struct bus_type serio_bus = {
+const struct bus_type serio_bus = {
.name = "serio",
.drv_groups = serio_driver_groups,
.match = serio_bus_match,
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index d8f9faf2b5290..bb758346a33d8 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -219,8 +219,7 @@ static void sxps2_close(struct serio *pserio)
/**
* xps2_of_probe - probe method for the PS/2 device.
- * @of_dev: pointer to OF device structure
- * @match: pointer to the structure used for matching a device
+ * @ofdev: pointer to OF device structure
*
* This function probes the PS/2 device in the device tree.
* It initializes the driver data structure and the hardware.
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index e3e2324547b90..c821fe3ee794e 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -416,6 +416,37 @@ config TOUCHSCREEN_GOODIX
To compile this driver as a module, choose M here: the
module will be called goodix.
+config TOUCHSCREEN_GOODIX_BERLIN_CORE
+ tristate
+
+config TOUCHSCREEN_GOODIX_BERLIN_I2C
+ tristate "Goodix Berlin I2C touchscreen"
+ depends on I2C
+ select REGMAP_I2C
+ select TOUCHSCREEN_GOODIX_BERLIN_CORE
+ help
+ Say Y here if you have a Goodix Berlin IC connected to
+ your system via I2C.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called goodix_berlin_i2c.
+
+config TOUCHSCREEN_GOODIX_BERLIN_SPI
+ tristate "Goodix Berlin SPI touchscreen"
+ depends on SPI_MASTER
+ select REGMAP
+ select TOUCHSCREEN_GOODIX_BERLIN_CORE
+ help
+ Say Y here if you have a Goodix Berlin IC connected to
+ your system via SPI.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called goodix_berlin_spi.
+
config TOUCHSCREEN_HIDEEP
tristate "HiDeep Touch IC"
depends on I2C
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 62bd24f3ac8e0..a81cb5aa21a5b 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -47,6 +47,9 @@ obj-$(CONFIG_TOUCHSCREEN_EGALAX_SERIAL) += egalax_ts_serial.o
obj-$(CONFIG_TOUCHSCREEN_EXC3000) += exc3000.o
obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o
obj-$(CONFIG_TOUCHSCREEN_GOODIX) += goodix_ts.o
+obj-$(CONFIG_TOUCHSCREEN_GOODIX_BERLIN_CORE) += goodix_berlin_core.o
+obj-$(CONFIG_TOUCHSCREEN_GOODIX_BERLIN_I2C) += goodix_berlin_i2c.o
+obj-$(CONFIG_TOUCHSCREEN_GOODIX_BERLIN_SPI) += goodix_berlin_spi.o
obj-$(CONFIG_TOUCHSCREEN_HIDEEP) += hideep.o
obj-$(CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX) += hynitron_cstxxx.o
obj-$(CONFIG_TOUCHSCREEN_ILI210X) += ili210x.o
diff --git a/drivers/input/touchscreen/goodix_berlin.h b/drivers/input/touchscreen/goodix_berlin.h
new file mode 100644
index 0000000000000..1fd77eb69c9a6
--- /dev/null
+++ b/drivers/input/touchscreen/goodix_berlin.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Goodix Touchscreen Driver
+ * Copyright (C) 2020 - 2021 Goodix, Inc.
+ * Copyright (C) 2023 Linaro Ltd.
+ *
+ * Based on goodix_berlin_berlin driver.
+ */
+
+#ifndef __GOODIX_BERLIN_H_
+#define __GOODIX_BERLIN_H_
+
+#include <linux/pm.h>
+
+struct device;
+struct input_id;
+struct regmap;
+
+int goodix_berlin_probe(struct device *dev, int irq, const struct input_id *id,
+ struct regmap *regmap);
+
+extern const struct dev_pm_ops goodix_berlin_pm_ops;
+
+#endif
diff --git a/drivers/input/touchscreen/goodix_berlin_core.c b/drivers/input/touchscreen/goodix_berlin_core.c
new file mode 100644
index 0000000000000..e7b41a926ef88
--- /dev/null
+++ b/drivers/input/touchscreen/goodix_berlin_core.c
@@ -0,0 +1,755 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Goodix "Berlin" Touchscreen IC driver
+ * Copyright (C) 2020 - 2021 Goodix, Inc.
+ * Copyright (C) 2023 Linaro Ltd.
+ *
+ * Based on goodix_ts_berlin driver.
+ *
+ * This driver is distinct from goodix.c since hardware interface
+ * is different enough to require a new driver.
+ * None of the register address or data structure are close enough
+ * to the previous generations.
+ *
+ * Currently the driver only handles Multitouch events with already
+ * programmed firmware and "config" for "Revision D" Berlin IC.
+ *
+ * Support is missing for:
+ * - ESD Management
+ * - Firmware update/flashing
+ * - "Config" update/flashing
+ * - Stylus Events
+ * - Gesture Events
+ * - Support for older revisions (A & B)
+ */
+
+#include <linux/bitfield.h>
+#include <linux/gpio/consumer.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/sizes.h>
+#include <asm/unaligned.h>
+
+#include "goodix_berlin.h"
+
+#define GOODIX_BERLIN_MAX_TOUCH 10
+
+#define GOODIX_BERLIN_NORMAL_RESET_DELAY_MS 100
+
+#define GOODIX_BERLIN_TOUCH_EVENT BIT(7)
+#define GOODIX_BERLIN_REQUEST_EVENT BIT(6)
+#define GOODIX_BERLIN_TOUCH_COUNT_MASK GENMASK(3, 0)
+
+#define GOODIX_BERLIN_REQUEST_CODE_RESET 3
+
+#define GOODIX_BERLIN_POINT_TYPE_MASK GENMASK(3, 0)
+#define GOODIX_BERLIN_POINT_TYPE_STYLUS_HOVER 1
+#define GOODIX_BERLIN_POINT_TYPE_STYLUS 3
+
+#define GOODIX_BERLIN_TOUCH_ID_MASK GENMASK(7, 4)
+
+#define GOODIX_BERLIN_DEV_CONFIRM_VAL 0xAA
+#define GOODIX_BERLIN_BOOTOPTION_ADDR 0x10000
+#define GOODIX_BERLIN_FW_VERSION_INFO_ADDR 0x10014
+
+#define GOODIX_BERLIN_IC_INFO_MAX_LEN SZ_1K
+#define GOODIX_BERLIN_IC_INFO_ADDR 0x10070
+
+#define GOODIX_BERLIN_CHECKSUM_SIZE sizeof(u16)
+
+struct goodix_berlin_fw_version {
+ u8 rom_pid[6];
+ u8 rom_vid[3];
+ u8 rom_vid_reserved;
+ u8 patch_pid[8];
+ u8 patch_vid[4];
+ u8 patch_vid_reserved;
+ u8 sensor_id;
+ u8 reserved[2];
+ __le16 checksum;
+};
+
+struct goodix_berlin_ic_info_version {
+ u8 info_customer_id;
+ u8 info_version_id;
+ u8 ic_die_id;
+ u8 ic_version_id;
+ __le32 config_id;
+ u8 config_version;
+ u8 frame_data_customer_id;
+ u8 frame_data_version_id;
+ u8 touch_data_customer_id;
+ u8 touch_data_version_id;
+ u8 reserved[3];
+} __packed;
+
+struct goodix_berlin_ic_info_feature {
+ __le16 freqhop_feature;
+ __le16 calibration_feature;
+ __le16 gesture_feature;
+ __le16 side_touch_feature;
+ __le16 stylus_feature;
+} __packed;
+
+struct goodix_berlin_ic_info_misc {
+ __le32 cmd_addr;
+ __le16 cmd_max_len;
+ __le32 cmd_reply_addr;
+ __le16 cmd_reply_len;
+ __le32 fw_state_addr;
+ __le16 fw_state_len;
+ __le32 fw_buffer_addr;
+ __le16 fw_buffer_max_len;
+ __le32 frame_data_addr;
+ __le16 frame_data_head_len;
+ __le16 fw_attr_len;
+ __le16 fw_log_len;
+ u8 pack_max_num;
+ u8 pack_compress_version;
+ __le16 stylus_struct_len;
+ __le16 mutual_struct_len;
+ __le16 self_struct_len;
+ __le16 noise_struct_len;
+ __le32 touch_data_addr;
+ __le16 touch_data_head_len;
+ __le16 point_struct_len;
+ __le16 reserved1;
+ __le16 reserved2;
+ __le32 mutual_rawdata_addr;
+ __le32 mutual_diffdata_addr;
+ __le32 mutual_refdata_addr;
+ __le32 self_rawdata_addr;
+ __le32 self_diffdata_addr;
+ __le32 self_refdata_addr;
+ __le32 iq_rawdata_addr;
+ __le32 iq_refdata_addr;
+ __le32 im_rawdata_addr;
+ __le16 im_readata_len;
+ __le32 noise_rawdata_addr;
+ __le16 noise_rawdata_len;
+ __le32 stylus_rawdata_addr;
+ __le16 stylus_rawdata_len;
+ __le32 noise_data_addr;
+ __le32 esd_addr;
+} __packed;
+
+struct goodix_berlin_touch {
+ u8 status;
+ u8 reserved;
+ __le16 x;
+ __le16 y;
+ __le16 w;
+};
+#define GOODIX_BERLIN_TOUCH_SIZE sizeof(struct goodix_berlin_touch)
+
+struct goodix_berlin_header {
+ u8 status;
+ u8 reserved1;
+ u8 request_type;
+ u8 reserved2[3];
+ __le16 checksum;
+};
+#define GOODIX_BERLIN_HEADER_SIZE sizeof(struct goodix_berlin_header)
+
+struct goodix_berlin_event {
+ struct goodix_berlin_header hdr;
+ /* The data below is u16/__le16 aligned */
+ u8 data[GOODIX_BERLIN_TOUCH_SIZE * GOODIX_BERLIN_MAX_TOUCH +
+ GOODIX_BERLIN_CHECKSUM_SIZE];
+};
+
+struct goodix_berlin_core {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regulator *avdd;
+ struct regulator *iovdd;
+ struct gpio_desc *reset_gpio;
+ struct touchscreen_properties props;
+ struct goodix_berlin_fw_version fw_version;
+ struct input_dev *input_dev;
+ int irq;
+
+ /* Runtime parameters extracted from IC_INFO buffer */
+ u32 touch_data_addr;
+
+ struct goodix_berlin_event event;
+};
+
+static bool goodix_berlin_checksum_valid(const u8 *data, int size)
+{
+ u32 cal_checksum = 0;
+ u16 r_checksum;
+ int i;
+
+ if (size < GOODIX_BERLIN_CHECKSUM_SIZE)
+ return false;
+
+ for (i = 0; i < size - GOODIX_BERLIN_CHECKSUM_SIZE; i++)
+ cal_checksum += data[i];
+
+ r_checksum = get_unaligned_le16(&data[i]);
+
+ return (u16)cal_checksum == r_checksum;
+}
+
+static bool goodix_berlin_is_dummy_data(struct goodix_berlin_core *cd,
+ const u8 *data, int size)
+{
+ int i;
+
+ /*
+ * If the device is missing or doesn't respond the buffer
+ * could be filled with bus default line state, 0x00 or 0xff,
+ * so declare success the first time we encounter neither.
+ */
+ for (i = 0; i < size; i++)
+ if (data[i] > 0 && data[i] < 0xff)
+ return false;
+
+ return true;
+}
+
+static int goodix_berlin_dev_confirm(struct goodix_berlin_core *cd)
+{
+ u8 tx_buf[8], rx_buf[8];
+ int retry = 3;
+ int error;
+
+ memset(tx_buf, GOODIX_BERLIN_DEV_CONFIRM_VAL, sizeof(tx_buf));
+ while (retry--) {
+ error = regmap_raw_write(cd->regmap,
+ GOODIX_BERLIN_BOOTOPTION_ADDR,
+ tx_buf, sizeof(tx_buf));
+ if (error)
+ return error;
+
+ error = regmap_raw_read(cd->regmap,
+ GOODIX_BERLIN_BOOTOPTION_ADDR,
+ rx_buf, sizeof(rx_buf));
+ if (error)
+ return error;
+
+ if (!memcmp(tx_buf, rx_buf, sizeof(tx_buf)))
+ return 0;
+
+ usleep_range(5000, 5100);
+ }
+
+ dev_err(cd->dev, "device confirm failed, rx_buf: %*ph\n",
+ (int)sizeof(rx_buf), rx_buf);
+
+ return -EINVAL;
+}
+
+static int goodix_berlin_power_on(struct goodix_berlin_core *cd)
+{
+ int error;
+
+ error = regulator_enable(cd->iovdd);
+ if (error) {
+ dev_err(cd->dev, "Failed to enable iovdd: %d\n", error);
+ return error;
+ }
+
+ /* Vendor waits 3ms for IOVDD to settle */
+ usleep_range(3000, 3100);
+
+ error = regulator_enable(cd->avdd);
+ if (error) {
+ dev_err(cd->dev, "Failed to enable avdd: %d\n", error);
+ goto err_iovdd_disable;
+ }
+
+ /* Vendor waits 15ms for IOVDD to settle */
+ usleep_range(15000, 15100);
+
+ gpiod_set_value_cansleep(cd->reset_gpio, 0);
+
+ /* Vendor waits 4ms for Firmware to initialize */
+ usleep_range(4000, 4100);
+
+ error = goodix_berlin_dev_confirm(cd);
+ if (error)
+ goto err_dev_reset;
+
+ /* Vendor waits 100ms for Firmware to fully boot */
+ msleep(GOODIX_BERLIN_NORMAL_RESET_DELAY_MS);
+
+ return 0;
+
+err_dev_reset:
+ gpiod_set_value_cansleep(cd->reset_gpio, 1);
+ regulator_disable(cd->avdd);
+err_iovdd_disable:
+ regulator_disable(cd->iovdd);
+ return error;
+}
+
+static void goodix_berlin_power_off(struct goodix_berlin_core *cd)
+{
+ gpiod_set_value_cansleep(cd->reset_gpio, 1);
+ regulator_disable(cd->avdd);
+ regulator_disable(cd->iovdd);
+}
+
+static int goodix_berlin_read_version(struct goodix_berlin_core *cd)
+{
+ int error;
+
+ error = regmap_raw_read(cd->regmap, GOODIX_BERLIN_FW_VERSION_INFO_ADDR,
+ &cd->fw_version, sizeof(cd->fw_version));
+ if (error) {
+ dev_err(cd->dev, "error reading fw version, %d\n", error);
+ return error;
+ }
+
+ if (!goodix_berlin_checksum_valid((u8 *)&cd->fw_version,
+ sizeof(cd->fw_version))) {
+ dev_err(cd->dev, "invalid fw version: checksum error\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Only extract necessary data for runtime */
+static int goodix_berlin_parse_ic_info(struct goodix_berlin_core *cd,
+ const u8 *data, u16 length)
+{
+ struct goodix_berlin_ic_info_misc *misc;
+ unsigned int offset = 0;
+
+ offset += sizeof(__le16); /* length */
+ offset += sizeof(struct goodix_berlin_ic_info_version);
+ offset += sizeof(struct goodix_berlin_ic_info_feature);
+
+ /* IC_INFO Parameters, variable width structure */
+ offset += 4 * sizeof(u8); /* drv_num, sen_num, button_num, force_num */
+ if (offset >= length)
+ goto invalid_offset;
+
+#define ADVANCE_LE16_PARAMS() \
+ do { \
+ u8 param_num = data[offset++]; \
+ offset += param_num * sizeof(__le16); \
+ if (offset >= length) \
+ goto invalid_offset; \
+ } while (0)
+ ADVANCE_LE16_PARAMS(); /* active_scan_rate_num */
+ ADVANCE_LE16_PARAMS(); /* mutual_freq_num*/
+ ADVANCE_LE16_PARAMS(); /* self_tx_freq_num */
+ ADVANCE_LE16_PARAMS(); /* self_rx_freq_num */
+ ADVANCE_LE16_PARAMS(); /* stylus_freq_num */
+#undef ADVANCE_LE16_PARAMS
+
+ misc = (struct goodix_berlin_ic_info_misc *)&data[offset];
+ cd->touch_data_addr = le32_to_cpu(misc->touch_data_addr);
+
+ return 0;
+
+invalid_offset:
+ dev_err(cd->dev, "ic_info length is invalid (offset %d length %d)\n",
+ offset, length);
+ return -EINVAL;
+}
+
+static int goodix_berlin_get_ic_info(struct goodix_berlin_core *cd)
+{
+ u8 *afe_data __free(kfree) = NULL;
+ __le16 length_raw;
+ u16 length;
+ int error;
+
+ afe_data = kzalloc(GOODIX_BERLIN_IC_INFO_MAX_LEN, GFP_KERNEL);
+ if (!afe_data)
+ return -ENOMEM;
+
+ error = regmap_raw_read(cd->regmap, GOODIX_BERLIN_IC_INFO_ADDR,
+ &length_raw, sizeof(length_raw));
+ if (error) {
+ dev_err(cd->dev, "failed get ic info length, %d\n", error);
+ return error;
+ }
+
+ length = le16_to_cpu(length_raw);
+ if (length >= GOODIX_BERLIN_IC_INFO_MAX_LEN) {
+ dev_err(cd->dev, "invalid ic info length %d\n", length);
+ return -EINVAL;
+ }
+
+ error = regmap_raw_read(cd->regmap, GOODIX_BERLIN_IC_INFO_ADDR,
+ afe_data, length);
+ if (error) {
+ dev_err(cd->dev, "failed get ic info data, %d\n", error);
+ return error;
+ }
+
+ /* check whether the data is valid (ex. bus default values) */
+ if (goodix_berlin_is_dummy_data(cd, afe_data, length)) {
+ dev_err(cd->dev, "fw info data invalid\n");
+ return -EINVAL;
+ }
+
+ if (!goodix_berlin_checksum_valid(afe_data, length)) {
+ dev_err(cd->dev, "fw info checksum error\n");
+ return -EINVAL;
+ }
+
+ error = goodix_berlin_parse_ic_info(cd, afe_data, length);
+ if (error)
+ return error;
+
+ /* check some key info */
+ if (!cd->touch_data_addr) {
+ dev_err(cd->dev, "touch_data_addr is null\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int goodix_berlin_get_remaining_contacts(struct goodix_berlin_core *cd,
+ int n)
+{
+ size_t offset = 2 * GOODIX_BERLIN_TOUCH_SIZE +
+ GOODIX_BERLIN_CHECKSUM_SIZE;
+ u32 addr = cd->touch_data_addr + GOODIX_BERLIN_HEADER_SIZE + offset;
+ int error;
+
+ error = regmap_raw_read(cd->regmap, addr,
+ &cd->event.data[offset],
+ (n - 2) * GOODIX_BERLIN_TOUCH_SIZE);
+ if (error) {
+ dev_err_ratelimited(cd->dev, "failed to get touch data, %d\n",
+ error);
+ return error;
+ }
+
+ return 0;
+}
+
+static void goodix_berlin_report_state(struct goodix_berlin_core *cd, int n)
+{
+ struct goodix_berlin_touch *touch_data =
+ (struct goodix_berlin_touch *)cd->event.data;
+ struct goodix_berlin_touch *t;
+ int i;
+ u8 type, id;
+
+ for (i = 0; i < n; i++) {
+ t = &touch_data[i];
+
+ type = FIELD_GET(GOODIX_BERLIN_POINT_TYPE_MASK, t->status);
+ if (type == GOODIX_BERLIN_POINT_TYPE_STYLUS ||
+ type == GOODIX_BERLIN_POINT_TYPE_STYLUS_HOVER) {
+ dev_warn_once(cd->dev, "Stylus event type not handled\n");
+ continue;
+ }
+
+ id = FIELD_GET(GOODIX_BERLIN_TOUCH_ID_MASK, t->status);
+ if (id >= GOODIX_BERLIN_MAX_TOUCH) {
+ dev_warn_ratelimited(cd->dev, "invalid finger id %d\n", id);
+ continue;
+ }
+
+ input_mt_slot(cd->input_dev, id);
+ input_mt_report_slot_state(cd->input_dev, MT_TOOL_FINGER, true);
+
+ touchscreen_report_pos(cd->input_dev, &cd->props,
+ __le16_to_cpu(t->x), __le16_to_cpu(t->y),
+ true);
+ input_report_abs(cd->input_dev, ABS_MT_TOUCH_MAJOR,
+ __le16_to_cpu(t->w));
+ }
+
+ input_mt_sync_frame(cd->input_dev);
+ input_sync(cd->input_dev);
+}
+
+static void goodix_berlin_touch_handler(struct goodix_berlin_core *cd)
+{
+ u8 touch_num;
+ int error;
+
+ touch_num = FIELD_GET(GOODIX_BERLIN_TOUCH_COUNT_MASK,
+ cd->event.hdr.request_type);
+ if (touch_num > GOODIX_BERLIN_MAX_TOUCH) {
+ dev_warn(cd->dev, "invalid touch num %d\n", touch_num);
+ return;
+ }
+
+ if (touch_num > 2) {
+ /* read additional contact data if more than 2 touch events */
+ error = goodix_berlin_get_remaining_contacts(cd, touch_num);
+ if (error)
+ return;
+ }
+
+ if (touch_num) {
+ int len = touch_num * GOODIX_BERLIN_TOUCH_SIZE +
+ GOODIX_BERLIN_CHECKSUM_SIZE;
+ if (!goodix_berlin_checksum_valid(cd->event.data, len)) {
+ dev_err(cd->dev, "touch data checksum error: %*ph\n",
+ len, cd->event.data);
+ return;
+ }
+ }
+
+ goodix_berlin_report_state(cd, touch_num);
+}
+
+static int goodix_berlin_request_handle_reset(struct goodix_berlin_core *cd)
+{
+ gpiod_set_value_cansleep(cd->reset_gpio, 1);
+ usleep_range(2000, 2100);
+ gpiod_set_value_cansleep(cd->reset_gpio, 0);
+
+ msleep(GOODIX_BERLIN_NORMAL_RESET_DELAY_MS);
+
+ return 0;
+}
+
+static irqreturn_t goodix_berlin_irq(int irq, void *data)
+{
+ struct goodix_berlin_core *cd = data;
+ int error;
+
+ /*
+ * First, read buffer with space for 2 touch events:
+ * - GOODIX_BERLIN_HEADER_SIZE = 8 bytes
+ * - GOODIX_BERLIN_TOUCH_SIZE * 2 = 16 bytes
+ * - GOODIX_BERLIN_CHECKLSUM_SIZE = 2 bytes
+ * For a total of 26 bytes.
+ *
+ * If only a single finger is reported, we will read 8 bytes more than
+ * needed:
+ * - bytes 0-7: Header (GOODIX_BERLIN_HEADER_SIZE)
+ * - bytes 8-15: Finger 0 Data
+ * - bytes 24-25: Checksum
+ * - bytes 18-25: Unused 8 bytes
+ *
+ * If 2 fingers are reported, we would have read the exact needed
+ * amount of data and checksum would be at the end of the buffer:
+ * - bytes 0-7: Header (GOODIX_BERLIN_HEADER_SIZE)
+ * - bytes 8-15: Finger 0 Bytes 0-7
+ * - bytes 16-23: Finger 1 Bytes 0-7
+ * - bytes 24-25: Checksum
+ *
+ * If more than 2 fingers were reported, the "Checksum" bytes would
+ * in fact contain part of the next finger data, and then
+ * goodix_berlin_get_remaining_contacts() would complete the buffer
+ * with the missing bytes, including the trailing checksum.
+ * For example, if 3 fingers are reported, then we would do:
+ * Read 1:
+ * - bytes 0-7: Header (GOODIX_BERLIN_HEADER_SIZE)
+ * - bytes 8-15: Finger 0 Bytes 0-7
+ * - bytes 16-23: Finger 1 Bytes 0-7
+ * - bytes 24-25: Finger 2 Bytes 0-1
+ * Read 2 (with length of (3 - 2) * 8 = 8 bytes):
+ * - bytes 26-31: Finger 2 Bytes 2-7
+ * - bytes 32-33: Checksum
+ */
+ error = regmap_raw_read(cd->regmap, cd->touch_data_addr,
+ &cd->event,
+ GOODIX_BERLIN_HEADER_SIZE +
+ 2 * GOODIX_BERLIN_TOUCH_SIZE +
+ GOODIX_BERLIN_CHECKSUM_SIZE);
+ if (error) {
+ dev_warn_ratelimited(cd->dev,
+ "failed get event head data: %d\n", error);
+ goto out;
+ }
+
+ if (cd->event.hdr.status == 0)
+ goto out;
+
+ if (!goodix_berlin_checksum_valid((u8 *)&cd->event.hdr,
+ GOODIX_BERLIN_HEADER_SIZE)) {
+ dev_warn_ratelimited(cd->dev,
+ "touch head checksum error: %*ph\n",
+ (int)GOODIX_BERLIN_HEADER_SIZE,
+ &cd->event.hdr);
+ goto out_clear;
+ }
+
+ if (cd->event.hdr.status & GOODIX_BERLIN_TOUCH_EVENT)
+ goodix_berlin_touch_handler(cd);
+
+ if (cd->event.hdr.status & GOODIX_BERLIN_REQUEST_EVENT) {
+ switch (cd->event.hdr.request_type) {
+ case GOODIX_BERLIN_REQUEST_CODE_RESET:
+ if (cd->reset_gpio)
+ goodix_berlin_request_handle_reset(cd);
+ break;
+
+ default:
+ dev_warn(cd->dev, "unsupported request code 0x%x\n",
+ cd->event.hdr.request_type);
+ }
+ }
+
+
+out_clear:
+ /* Clear up status field */
+ regmap_write(cd->regmap, cd->touch_data_addr, 0);
+
+out:
+ return IRQ_HANDLED;
+}
+
+static int goodix_berlin_input_dev_config(struct goodix_berlin_core *cd,
+ const struct input_id *id)
+{
+ struct input_dev *input_dev;
+ int error;
+
+ input_dev = devm_input_allocate_device(cd->dev);
+ if (!input_dev)
+ return -ENOMEM;
+
+ cd->input_dev = input_dev;
+ input_set_drvdata(input_dev, cd);
+
+ input_dev->name = "Goodix Berlin Capacitive TouchScreen";
+ input_dev->phys = "input/ts";
+
+ input_dev->id = *id;
+
+ input_set_abs_params(cd->input_dev, ABS_MT_POSITION_X,
+ 0, SZ_64K - 1, 0, 0);
+ input_set_abs_params(cd->input_dev, ABS_MT_POSITION_Y,
+ 0, SZ_64K - 1, 0, 0);
+ input_set_abs_params(cd->input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+
+ touchscreen_parse_properties(cd->input_dev, true, &cd->props);
+
+ error = input_mt_init_slots(cd->input_dev, GOODIX_BERLIN_MAX_TOUCH,
+ INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+ if (error)
+ return error;
+
+ error = input_register_device(cd->input_dev);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int goodix_berlin_suspend(struct device *dev)
+{
+ struct goodix_berlin_core *cd = dev_get_drvdata(dev);
+
+ disable_irq(cd->irq);
+ goodix_berlin_power_off(cd);
+
+ return 0;
+}
+
+static int goodix_berlin_resume(struct device *dev)
+{
+ struct goodix_berlin_core *cd = dev_get_drvdata(dev);
+ int error;
+
+ error = goodix_berlin_power_on(cd);
+ if (error)
+ return error;
+
+ enable_irq(cd->irq);
+
+ return 0;
+}
+
+EXPORT_GPL_SIMPLE_DEV_PM_OPS(goodix_berlin_pm_ops,
+ goodix_berlin_suspend, goodix_berlin_resume);
+
+static void goodix_berlin_power_off_act(void *data)
+{
+ struct goodix_berlin_core *cd = data;
+
+ goodix_berlin_power_off(cd);
+}
+
+int goodix_berlin_probe(struct device *dev, int irq, const struct input_id *id,
+ struct regmap *regmap)
+{
+ struct goodix_berlin_core *cd;
+ int error;
+
+ if (irq <= 0) {
+ dev_err(dev, "Missing interrupt number\n");
+ return -EINVAL;
+ }
+
+ cd = devm_kzalloc(dev, sizeof(*cd), GFP_KERNEL);
+ if (!cd)
+ return -ENOMEM;
+
+ cd->dev = dev;
+ cd->regmap = regmap;
+ cd->irq = irq;
+
+ cd->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(cd->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(cd->reset_gpio),
+ "Failed to request reset gpio\n");
+
+ cd->avdd = devm_regulator_get(dev, "avdd");
+ if (IS_ERR(cd->avdd))
+ return dev_err_probe(dev, PTR_ERR(cd->avdd),
+ "Failed to request avdd regulator\n");
+
+ cd->iovdd = devm_regulator_get(dev, "iovdd");
+ if (IS_ERR(cd->iovdd))
+ return dev_err_probe(dev, PTR_ERR(cd->iovdd),
+ "Failed to request iovdd regulator\n");
+
+ error = goodix_berlin_power_on(cd);
+ if (error) {
+ dev_err(dev, "failed power on");
+ return error;
+ }
+
+ error = devm_add_action_or_reset(dev, goodix_berlin_power_off_act, cd);
+ if (error)
+ return error;
+
+ error = goodix_berlin_read_version(cd);
+ if (error) {
+ dev_err(dev, "failed to get version info");
+ return error;
+ }
+
+ error = goodix_berlin_get_ic_info(cd);
+ if (error) {
+ dev_err(dev, "invalid ic info, abort");
+ return error;
+ }
+
+ error = goodix_berlin_input_dev_config(cd, id);
+ if (error) {
+ dev_err(dev, "failed set input device");
+ return error;
+ }
+
+ error = devm_request_threaded_irq(dev, cd->irq, NULL, goodix_berlin_irq,
+ IRQF_ONESHOT, "goodix-berlin", cd);
+ if (error) {
+ dev_err(dev, "request threaded irq failed: %d\n", error);
+ return error;
+ }
+
+ dev_set_drvdata(dev, cd);
+
+ dev_dbg(dev, "Goodix Berlin %s Touchscreen Controller",
+ cd->fw_version.patch_pid);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(goodix_berlin_probe);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Goodix Berlin Core Touchscreen driver");
+MODULE_AUTHOR("Neil Armstrong <neil.armstrong@linaro.org>");
diff --git a/drivers/input/touchscreen/goodix_berlin_i2c.c b/drivers/input/touchscreen/goodix_berlin_i2c.c
new file mode 100644
index 0000000000000..6ed9aa8088cb2
--- /dev/null
+++ b/drivers/input/touchscreen/goodix_berlin_i2c.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Goodix Berlin Touchscreen Driver
+ *
+ * Copyright (C) 2020 - 2021 Goodix, Inc.
+ * Copyright (C) 2023 Linaro Ltd.
+ *
+ * Based on goodix_ts_berlin driver.
+ */
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/input.h>
+
+#include "goodix_berlin.h"
+
+#define I2C_MAX_TRANSFER_SIZE 256
+
+static const struct regmap_config goodix_berlin_i2c_regmap_conf = {
+ .reg_bits = 32,
+ .val_bits = 8,
+ .max_raw_read = I2C_MAX_TRANSFER_SIZE,
+ .max_raw_write = I2C_MAX_TRANSFER_SIZE,
+};
+
+/* vendor & product left unassigned here, should probably be updated from fw info */
+static const struct input_id goodix_berlin_i2c_input_id = {
+ .bustype = BUS_I2C,
+};
+
+static int goodix_berlin_i2c_probe(struct i2c_client *client)
+{
+ struct regmap *regmap;
+ int error;
+
+ regmap = devm_regmap_init_i2c(client, &goodix_berlin_i2c_regmap_conf);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ error = goodix_berlin_probe(&client->dev, client->irq,
+ &goodix_berlin_i2c_input_id, regmap);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static const struct i2c_device_id goodix_berlin_i2c_id[] = {
+ { "gt9916", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, goodix_berlin_i2c_id);
+
+static const struct of_device_id goodix_berlin_i2c_of_match[] = {
+ { .compatible = "goodix,gt9916", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, goodix_berlin_i2c_of_match);
+
+static struct i2c_driver goodix_berlin_i2c_driver = {
+ .driver = {
+ .name = "goodix-berlin-i2c",
+ .of_match_table = goodix_berlin_i2c_of_match,
+ .pm = pm_sleep_ptr(&goodix_berlin_pm_ops),
+ },
+ .probe = goodix_berlin_i2c_probe,
+ .id_table = goodix_berlin_i2c_id,
+};
+module_i2c_driver(goodix_berlin_i2c_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Goodix Berlin I2C Touchscreen driver");
+MODULE_AUTHOR("Neil Armstrong <neil.armstrong@linaro.org>");
diff --git a/drivers/input/touchscreen/goodix_berlin_spi.c b/drivers/input/touchscreen/goodix_berlin_spi.c
new file mode 100644
index 0000000000000..4cc557da048a7
--- /dev/null
+++ b/drivers/input/touchscreen/goodix_berlin_spi.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Goodix Berlin Touchscreen Driver
+ *
+ * Copyright (C) 2020 - 2021 Goodix, Inc.
+ * Copyright (C) 2023 Linaro Ltd.
+ *
+ * Based on goodix_ts_berlin driver.
+ */
+#include <asm/unaligned.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+#include <linux/input.h>
+
+#include "goodix_berlin.h"
+
+#define GOODIX_BERLIN_SPI_TRANS_PREFIX_LEN 1
+#define GOODIX_BERLIN_REGISTER_WIDTH 4
+#define GOODIX_BERLIN_SPI_READ_DUMMY_LEN 3
+#define GOODIX_BERLIN_SPI_READ_PREFIX_LEN (GOODIX_BERLIN_SPI_TRANS_PREFIX_LEN + \
+ GOODIX_BERLIN_REGISTER_WIDTH + \
+ GOODIX_BERLIN_SPI_READ_DUMMY_LEN)
+#define GOODIX_BERLIN_SPI_WRITE_PREFIX_LEN (GOODIX_BERLIN_SPI_TRANS_PREFIX_LEN + \
+ GOODIX_BERLIN_REGISTER_WIDTH)
+
+#define GOODIX_BERLIN_SPI_WRITE_FLAG 0xF0
+#define GOODIX_BERLIN_SPI_READ_FLAG 0xF1
+
+static int goodix_berlin_spi_read(void *context, const void *reg_buf,
+ size_t reg_size, void *val_buf,
+ size_t val_size)
+{
+ struct spi_device *spi = context;
+ struct spi_transfer xfers;
+ struct spi_message spi_msg;
+ const u32 *reg = reg_buf; /* reg is stored as native u32 at start of buffer */
+ u8 *buf;
+ int error;
+
+ if (reg_size != GOODIX_BERLIN_REGISTER_WIDTH)
+ return -EINVAL;
+
+ buf = kzalloc(GOODIX_BERLIN_SPI_READ_PREFIX_LEN + val_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ spi_message_init(&spi_msg);
+ memset(&xfers, 0, sizeof(xfers));
+
+ /* buffer format: 0xF1 + addr(4bytes) + dummy(3bytes) + data */
+ buf[0] = GOODIX_BERLIN_SPI_READ_FLAG;
+ put_unaligned_be32(*reg, buf + GOODIX_BERLIN_SPI_TRANS_PREFIX_LEN);
+ memset(buf + GOODIX_BERLIN_SPI_TRANS_PREFIX_LEN + GOODIX_BERLIN_REGISTER_WIDTH,
+ 0xff, GOODIX_BERLIN_SPI_READ_DUMMY_LEN);
+
+ xfers.tx_buf = buf;
+ xfers.rx_buf = buf;
+ xfers.len = GOODIX_BERLIN_SPI_READ_PREFIX_LEN + val_size;
+ xfers.cs_change = 0;
+ spi_message_add_tail(&xfers, &spi_msg);
+
+ error = spi_sync(spi, &spi_msg);
+ if (error < 0)
+ dev_err(&spi->dev, "spi transfer error, %d", error);
+ else
+ memcpy(val_buf, buf + GOODIX_BERLIN_SPI_READ_PREFIX_LEN, val_size);
+
+ kfree(buf);
+ return error;
+}
+
+static int goodix_berlin_spi_write(void *context, const void *data,
+ size_t count)
+{
+ unsigned int len = count - GOODIX_BERLIN_REGISTER_WIDTH;
+ struct spi_device *spi = context;
+ struct spi_transfer xfers;
+ struct spi_message spi_msg;
+ const u32 *reg = data; /* reg is stored as native u32 at start of buffer */
+ u8 *buf;
+ int error;
+
+ buf = kzalloc(GOODIX_BERLIN_SPI_WRITE_PREFIX_LEN + len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ spi_message_init(&spi_msg);
+ memset(&xfers, 0, sizeof(xfers));
+
+ buf[0] = GOODIX_BERLIN_SPI_WRITE_FLAG;
+ put_unaligned_be32(*reg, buf + GOODIX_BERLIN_SPI_TRANS_PREFIX_LEN);
+ memcpy(buf + GOODIX_BERLIN_SPI_WRITE_PREFIX_LEN,
+ data + GOODIX_BERLIN_REGISTER_WIDTH, len);
+
+ xfers.tx_buf = buf;
+ xfers.len = GOODIX_BERLIN_SPI_WRITE_PREFIX_LEN + len;
+ xfers.cs_change = 0;
+ spi_message_add_tail(&xfers, &spi_msg);
+
+ error = spi_sync(spi, &spi_msg);
+ if (error < 0)
+ dev_err(&spi->dev, "spi transfer error, %d", error);
+
+ kfree(buf);
+ return error;
+}
+
+static const struct regmap_config goodix_berlin_spi_regmap_conf = {
+ .reg_bits = 32,
+ .val_bits = 8,
+ .read = goodix_berlin_spi_read,
+ .write = goodix_berlin_spi_write,
+};
+
+/* vendor & product left unassigned here, should probably be updated from fw info */
+static const struct input_id goodix_berlin_spi_input_id = {
+ .bustype = BUS_SPI,
+};
+
+static int goodix_berlin_spi_probe(struct spi_device *spi)
+{
+ struct regmap_config regmap_config;
+ struct regmap *regmap;
+ size_t max_size;
+ int error = 0;
+
+ spi->mode = SPI_MODE_0;
+ spi->bits_per_word = 8;
+ error = spi_setup(spi);
+ if (error)
+ return error;
+
+ max_size = spi_max_transfer_size(spi);
+
+ regmap_config = goodix_berlin_spi_regmap_conf;
+ regmap_config.max_raw_read = max_size - GOODIX_BERLIN_SPI_READ_PREFIX_LEN;
+ regmap_config.max_raw_write = max_size - GOODIX_BERLIN_SPI_WRITE_PREFIX_LEN;
+
+ regmap = devm_regmap_init(&spi->dev, NULL, spi, &regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ error = goodix_berlin_probe(&spi->dev, spi->irq,
+ &goodix_berlin_spi_input_id, regmap);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static const struct spi_device_id goodix_berlin_spi_ids[] = {
+ { "gt9916" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, goodix_berlin_spi_ids);
+
+static const struct of_device_id goodix_berlin_spi_of_match[] = {
+ { .compatible = "goodix,gt9916", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, goodix_berlin_spi_of_match);
+
+static struct spi_driver goodix_berlin_spi_driver = {
+ .driver = {
+ .name = "goodix-berlin-spi",
+ .of_match_table = goodix_berlin_spi_of_match,
+ .pm = pm_sleep_ptr(&goodix_berlin_pm_ops),
+ },
+ .probe = goodix_berlin_spi_probe,
+ .id_table = goodix_berlin_spi_ids,
+};
+module_spi_driver(goodix_berlin_spi_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Goodix Berlin SPI Touchscreen driver");
+MODULE_AUTHOR("Neil Armstrong <neil.armstrong@linaro.org>");
diff --git a/drivers/input/touchscreen/imagis.c b/drivers/input/touchscreen/imagis.c
index 07111ca244556..074dd6c342ecb 100644
--- a/drivers/input/touchscreen/imagis.c
+++ b/drivers/input/touchscreen/imagis.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/delay.h>
#include <linux/i2c.h>
@@ -11,9 +12,15 @@
#include <linux/property.h>
#include <linux/regulator/consumer.h>
+#define IST3032C_WHOAMI 0x32c
+
+#define IST3038B_REG_STATUS 0x20
+#define IST3038B_REG_CHIPID 0x30
+#define IST3038B_WHOAMI 0x30380b
+
#define IST3038C_HIB_ACCESS (0x800B << 16)
#define IST3038C_DIRECT_ACCESS BIT(31)
-#define IST3038C_REG_CHIPID 0x40001000
+#define IST3038C_REG_CHIPID (0x40001000 | IST3038C_DIRECT_ACCESS)
#define IST3038C_REG_HIB_BASE 0x30000100
#define IST3038C_REG_TOUCH_STATUS (IST3038C_REG_HIB_BASE | IST3038C_HIB_ACCESS)
#define IST3038C_REG_TOUCH_COORD (IST3038C_REG_HIB_BASE | IST3038C_HIB_ACCESS | 0x8)
@@ -23,19 +30,29 @@
#define IST3038C_I2C_RETRY_COUNT 3
#define IST3038C_MAX_FINGER_NUM 10
#define IST3038C_X_MASK GENMASK(23, 12)
-#define IST3038C_X_SHIFT 12
#define IST3038C_Y_MASK GENMASK(11, 0)
#define IST3038C_AREA_MASK GENMASK(27, 24)
-#define IST3038C_AREA_SHIFT 24
#define IST3038C_FINGER_COUNT_MASK GENMASK(15, 12)
-#define IST3038C_FINGER_COUNT_SHIFT 12
#define IST3038C_FINGER_STATUS_MASK GENMASK(9, 0)
+#define IST3032C_KEY_STATUS_MASK GENMASK(20, 16)
+
+struct imagis_properties {
+ unsigned int interrupt_msg_cmd;
+ unsigned int touch_coord_cmd;
+ unsigned int whoami_cmd;
+ unsigned int whoami_val;
+ bool protocol_b;
+ bool touch_keys_supported;
+};
struct imagis_ts {
struct i2c_client *client;
+ const struct imagis_properties *tdata;
struct input_dev *input_dev;
struct touchscreen_properties prop;
struct regulator_bulk_data supplies[2];
+ u32 keycodes[5];
+ int num_keycodes;
};
static int imagis_i2c_read_reg(struct imagis_ts *ts,
@@ -80,20 +97,18 @@ static irqreturn_t imagis_interrupt(int irq, void *dev_id)
{
struct imagis_ts *ts = dev_id;
u32 intr_message, finger_status;
- unsigned int finger_count, finger_pressed;
+ unsigned int finger_count, finger_pressed, key_pressed;
int i;
int error;
- error = imagis_i2c_read_reg(ts, IST3038C_REG_INTR_MESSAGE,
- &intr_message);
+ error = imagis_i2c_read_reg(ts, ts->tdata->interrupt_msg_cmd, &intr_message);
if (error) {
dev_err(&ts->client->dev,
"failed to read the interrupt message: %d\n", error);
goto out;
}
- finger_count = (intr_message & IST3038C_FINGER_COUNT_MASK) >>
- IST3038C_FINGER_COUNT_SHIFT;
+ finger_count = FIELD_GET(IST3038C_FINGER_COUNT_MASK, intr_message);
if (finger_count > IST3038C_MAX_FINGER_NUM) {
dev_err(&ts->client->dev,
"finger count %d is more than maximum supported\n",
@@ -101,12 +116,16 @@ static irqreturn_t imagis_interrupt(int irq, void *dev_id)
goto out;
}
- finger_pressed = intr_message & IST3038C_FINGER_STATUS_MASK;
+ finger_pressed = FIELD_GET(IST3038C_FINGER_STATUS_MASK, intr_message);
for (i = 0; i < finger_count; i++) {
- error = imagis_i2c_read_reg(ts,
- IST3038C_REG_TOUCH_COORD + (i * 4),
- &finger_status);
+ if (ts->tdata->protocol_b)
+ error = imagis_i2c_read_reg(ts,
+ ts->tdata->touch_coord_cmd, &finger_status);
+ else
+ error = imagis_i2c_read_reg(ts,
+ ts->tdata->touch_coord_cmd + (i * 4),
+ &finger_status);
if (error) {
dev_err(&ts->client->dev,
"failed to read coordinates for finger %d: %d\n",
@@ -118,14 +137,19 @@ static irqreturn_t imagis_interrupt(int irq, void *dev_id)
input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER,
finger_pressed & BIT(i));
touchscreen_report_pos(ts->input_dev, &ts->prop,
- (finger_status & IST3038C_X_MASK) >>
- IST3038C_X_SHIFT,
- finger_status & IST3038C_Y_MASK, 1);
+ FIELD_GET(IST3038C_X_MASK, finger_status),
+ FIELD_GET(IST3038C_Y_MASK, finger_status),
+ true);
input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR,
- (finger_status & IST3038C_AREA_MASK) >>
- IST3038C_AREA_SHIFT);
+ FIELD_GET(IST3038C_AREA_MASK, finger_status));
}
+ key_pressed = FIELD_GET(IST3032C_KEY_STATUS_MASK, intr_message);
+
+ for (int i = 0; i < ts->num_keycodes; i++)
+ input_report_key(ts->input_dev, ts->keycodes[i],
+ key_pressed & BIT(i));
+
input_mt_sync_frame(ts->input_dev);
input_sync(ts->input_dev);
@@ -210,7 +234,24 @@ static int imagis_init_input_dev(struct imagis_ts *ts)
input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_X);
input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_Y);
- input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, 16, 0, 0);
+ if (ts->tdata->touch_keys_supported) {
+ ts->num_keycodes = of_property_read_variable_u32_array(
+ ts->client->dev.of_node, "linux,keycodes",
+ ts->keycodes, 0, ARRAY_SIZE(ts->keycodes));
+ if (ts->num_keycodes <= 0) {
+ ts->keycodes[0] = KEY_APPSELECT;
+ ts->keycodes[1] = KEY_BACK;
+ ts->num_keycodes = 2;
+ }
+
+ input_dev->keycodemax = ts->num_keycodes;
+ input_dev->keycodesize = sizeof(ts->keycodes[0]);
+ input_dev->keycode = ts->keycodes;
+ }
+
+ for (int i = 0; i < ts->num_keycodes; i++)
+ input_set_capability(input_dev, EV_KEY, ts->keycodes[i]);
touchscreen_parse_properties(input_dev, true, &ts->prop);
if (!ts->prop.max_x || !ts->prop.max_y) {
@@ -261,6 +302,12 @@ static int imagis_probe(struct i2c_client *i2c)
ts->client = i2c;
+ ts->tdata = device_get_match_data(dev);
+ if (!ts->tdata) {
+ dev_err(dev, "missing chip data\n");
+ return -EINVAL;
+ }
+
error = imagis_init_regulators(ts);
if (error) {
dev_err(dev, "regulator init error: %d\n", error);
@@ -279,15 +326,13 @@ static int imagis_probe(struct i2c_client *i2c)
return error;
}
- error = imagis_i2c_read_reg(ts,
- IST3038C_REG_CHIPID | IST3038C_DIRECT_ACCESS,
- &chip_id);
+ error = imagis_i2c_read_reg(ts, ts->tdata->whoami_cmd, &chip_id);
if (error) {
dev_err(dev, "chip ID read failure: %d\n", error);
return error;
}
- if (chip_id != IST3038C_WHOAMI) {
+ if (chip_id != ts->tdata->whoami_val) {
dev_err(dev, "unknown chip ID: 0x%x\n", chip_id);
return -EINVAL;
}
@@ -343,9 +388,34 @@ static int imagis_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(imagis_pm_ops, imagis_suspend, imagis_resume);
+static const struct imagis_properties imagis_3032c_data = {
+ .interrupt_msg_cmd = IST3038C_REG_INTR_MESSAGE,
+ .touch_coord_cmd = IST3038C_REG_TOUCH_COORD,
+ .whoami_cmd = IST3038C_REG_CHIPID,
+ .whoami_val = IST3032C_WHOAMI,
+ .touch_keys_supported = true,
+};
+
+static const struct imagis_properties imagis_3038b_data = {
+ .interrupt_msg_cmd = IST3038B_REG_STATUS,
+ .touch_coord_cmd = IST3038B_REG_STATUS,
+ .whoami_cmd = IST3038B_REG_CHIPID,
+ .whoami_val = IST3038B_WHOAMI,
+ .protocol_b = true,
+};
+
+static const struct imagis_properties imagis_3038c_data = {
+ .interrupt_msg_cmd = IST3038C_REG_INTR_MESSAGE,
+ .touch_coord_cmd = IST3038C_REG_TOUCH_COORD,
+ .whoami_cmd = IST3038C_REG_CHIPID,
+ .whoami_val = IST3038C_WHOAMI,
+};
+
#ifdef CONFIG_OF
static const struct of_device_id imagis_of_match[] = {
- { .compatible = "imagis,ist3038c", },
+ { .compatible = "imagis,ist3032c", .data = &imagis_3032c_data },
+ { .compatible = "imagis,ist3038b", .data = &imagis_3038b_data },
+ { .compatible = "imagis,ist3038c", .data = &imagis_3038c_data },
{ },
};
MODULE_DEVICE_TABLE(of, imagis_of_match);
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index 34324f8512ac2..294b7ceded272 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -157,7 +157,6 @@ static void titsc_step_config(struct titsc *ts_dev)
n++ == 0 ? STEPCONFIG_OPENDLY : 0);
}
- config = 0;
config = STEPCONFIG_MODE_HWSYNC |
STEPCONFIG_AVG_16 | ts_dev->bit_yn |
STEPCONFIG_INM_ADCREFM;
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 50bac2d79d9b5..7e9b996b47c83 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -176,6 +176,8 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
path->num_nodes = num_nodes;
+ mutex_lock(&icc_bw_lock);
+
for (i = num_nodes - 1; i >= 0; i--) {
node->provider->users++;
hlist_add_head(&path->reqs[i].req_node, &node->req_list);
@@ -186,6 +188,8 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
node = node->reverse;
}
+ mutex_unlock(&icc_bw_lock);
+
return path;
}
@@ -343,7 +347,7 @@ EXPORT_SYMBOL_GPL(icc_std_aggregate);
* an array of icc nodes specified in the icc_onecell_data struct when
* registering the provider.
*/
-struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
+struct icc_node *of_icc_xlate_onecell(const struct of_phandle_args *spec,
void *data)
{
struct icc_onecell_data *icc_data = data;
@@ -368,7 +372,7 @@ EXPORT_SYMBOL_GPL(of_icc_xlate_onecell);
* Returns a valid pointer to struct icc_node_data on success or ERR_PTR()
* on failure.
*/
-struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec)
+struct icc_node_data *of_icc_get_from_provider(const struct of_phandle_args *spec)
{
struct icc_node *node = ERR_PTR(-EPROBE_DEFER);
struct icc_node_data *data = NULL;
@@ -792,12 +796,16 @@ void icc_put(struct icc_path *path)
pr_err("%s: error (%d)\n", __func__, ret);
mutex_lock(&icc_lock);
+ mutex_lock(&icc_bw_lock);
+
for (i = 0; i < path->num_nodes; i++) {
node = path->reqs[i].node;
hlist_del(&path->reqs[i].req_node);
if (!WARN_ON(!node->provider->users))
node->provider->users--;
}
+
+ mutex_unlock(&icc_bw_lock);
mutex_unlock(&icc_lock);
kfree_const(path->name);
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index 697f96c49f6f4..1446a839184e1 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -8,6 +8,15 @@ config INTERCONNECT_QCOM
config INTERCONNECT_QCOM_BCM_VOTER
tristate
+config INTERCONNECT_QCOM_MSM8909
+ tristate "Qualcomm MSM8909 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on QCOM_SMD_RPM
+ select INTERCONNECT_QCOM_SMD_RPM
+ help
+ This is a driver for the Qualcomm Network-on-Chip on msm8909-based
+ platforms.
+
config INTERCONNECT_QCOM_MSM8916
tristate "Qualcomm MSM8916 interconnect driver"
depends on INTERCONNECT_QCOM
@@ -209,6 +218,15 @@ config INTERCONNECT_QCOM_SM6350
This is a driver for the Qualcomm Network-on-Chip on sm6350-based
platforms.
+config INTERCONNECT_QCOM_SM7150
+ tristate "Qualcomm SM7150 interconnect driver"
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on sm7150-based
+ platforms.
+
config INTERCONNECT_QCOM_SM8150
tristate "Qualcomm SM8150 interconnect driver"
depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index 7048461650221..2ea3113d0a4d5 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_INTERCONNECT_QCOM) += interconnect_qcom.o
interconnect_qcom-y := icc-common.o
icc-bcm-voter-objs := bcm-voter.o
+qnoc-msm8909-objs := msm8909.o
qnoc-msm8916-objs := msm8916.o
qnoc-msm8939-objs := msm8939.o
qnoc-msm8974-objs := msm8974.o
@@ -26,6 +27,7 @@ qnoc-sdx65-objs := sdx65.o
qnoc-sdx75-objs := sdx75.o
qnoc-sm6115-objs := sm6115.o
qnoc-sm6350-objs := sm6350.o
+qnoc-sm7150-objs := sm7150.o
qnoc-sm8150-objs := sm8150.o
qnoc-sm8250-objs := sm8250.o
qnoc-sm8350-objs := sm8350.o
@@ -36,6 +38,7 @@ qnoc-x1e80100-objs := x1e80100.o
icc-smd-rpm-objs := smd-rpm.o icc-rpm.o icc-rpm-clocks.o
obj-$(CONFIG_INTERCONNECT_QCOM_BCM_VOTER) += icc-bcm-voter.o
+obj-$(CONFIG_INTERCONNECT_QCOM_MSM8909) += qnoc-msm8909.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8916) += qnoc-msm8916.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8939) += qnoc-msm8939.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8974) += qnoc-msm8974.o
@@ -58,6 +61,7 @@ obj-$(CONFIG_INTERCONNECT_QCOM_SDX65) += qnoc-sdx65.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDX75) += qnoc-sdx75.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM6115) += qnoc-sm6115.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM6350) += qnoc-sm6350.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SM7150) += qnoc-sm7150.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8150) += qnoc-sm8150.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8250) += qnoc-sm8250.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8350) += qnoc-sm8350.o
diff --git a/drivers/interconnect/qcom/icc-common.c b/drivers/interconnect/qcom/icc-common.c
index f27f4fdc45317..9b9ee113f1727 100644
--- a/drivers/interconnect/qcom/icc-common.c
+++ b/drivers/interconnect/qcom/icc-common.c
@@ -9,7 +9,8 @@
#include "icc-common.h"
-struct icc_node_data *qcom_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+struct icc_node_data *qcom_icc_xlate_extended(const struct of_phandle_args *spec,
+ void *data)
{
struct icc_node_data *ndata;
struct icc_node *node;
diff --git a/drivers/interconnect/qcom/icc-common.h b/drivers/interconnect/qcom/icc-common.h
index 33bb2c38dff33..21c39b1639486 100644
--- a/drivers/interconnect/qcom/icc-common.h
+++ b/drivers/interconnect/qcom/icc-common.h
@@ -8,6 +8,7 @@
#include <linux/interconnect-provider.h>
-struct icc_node_data *qcom_icc_xlate_extended(struct of_phandle_args *spec, void *data);
+struct icc_node_data *qcom_icc_xlate_extended(const struct of_phandle_args *spec,
+ void *data);
#endif
diff --git a/drivers/interconnect/qcom/msm8909.c b/drivers/interconnect/qcom/msm8909.c
new file mode 100644
index 0000000000000..0d0cd7282f5b7
--- /dev/null
+++ b/drivers/interconnect/qcom/msm8909.c
@@ -0,0 +1,1329 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Based on data from msm8909-bus.dtsi in Qualcomm's msm-3.18 release:
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/interconnect/qcom,msm8909.h>
+
+#include "icc-rpm.h"
+
+enum {
+ QNOC_MASTER_AMPSS_M0 = 1,
+ QNOC_MASTER_GRAPHICS_3D,
+ QNOC_SNOC_BIMC_0_MAS,
+ QNOC_SNOC_BIMC_1_MAS,
+ QNOC_MASTER_TCU_0,
+ QNOC_MASTER_TCU_1,
+ QNOC_MASTER_AUDIO,
+ QNOC_MASTER_SPDM,
+ QNOC_MASTER_DEHR,
+ QNOC_MASTER_QPIC,
+ QNOC_MASTER_BLSP_1,
+ QNOC_MASTER_USB_HS,
+ QNOC_MASTER_CRYPTO_CORE0,
+ QNOC_MASTER_SDCC_1,
+ QNOC_MASTER_SDCC_2,
+ QNOC_SNOC_PNOC_MAS,
+ QNOC_MASTER_QDSS_BAM,
+ QNOC_BIMC_SNOC_MAS,
+ QNOC_MASTER_MDP_PORT0,
+ QNOC_PNOC_SNOC_MAS,
+ QNOC_MASTER_VIDEO_P0,
+ QNOC_MASTER_VFE,
+ QNOC_MASTER_QDSS_ETR,
+ QNOC_PNOC_M_0,
+ QNOC_PNOC_M_1,
+ QNOC_PNOC_INT_0,
+ QNOC_PNOC_INT_1,
+ QNOC_PNOC_SLV_0,
+ QNOC_PNOC_SLV_1,
+ QNOC_PNOC_SLV_2,
+ QNOC_PNOC_SLV_3,
+ QNOC_PNOC_SLV_4,
+ QNOC_PNOC_SLV_5,
+ QNOC_PNOC_SLV_7,
+ QNOC_SNOC_MM_INT_0,
+ QNOC_SNOC_MM_INT_1,
+ QNOC_SNOC_MM_INT_2,
+ QNOC_SNOC_MM_INT_BIMC,
+ QNOC_SNOC_QDSS_INT,
+ QNOC_SNOC_INT_0,
+ QNOC_SNOC_INT_1,
+ QNOC_SNOC_INT_BIMC,
+ QNOC_SLAVE_EBI_CH0,
+ QNOC_BIMC_SNOC_SLV,
+ QNOC_SLAVE_TCSR,
+ QNOC_SLAVE_SDCC_1,
+ QNOC_SLAVE_BLSP_1,
+ QNOC_SLAVE_CRYPTO_0_CFG,
+ QNOC_SLAVE_MESSAGE_RAM,
+ QNOC_SLAVE_PDM,
+ QNOC_SLAVE_PRNG,
+ QNOC_SLAVE_USB_HS,
+ QNOC_SLAVE_QPIC,
+ QNOC_SLAVE_SPDM,
+ QNOC_SLAVE_SDCC_2,
+ QNOC_SLAVE_AUDIO,
+ QNOC_SLAVE_DEHR_CFG,
+ QNOC_SLAVE_SNOC_CFG,
+ QNOC_SLAVE_QDSS_CFG,
+ QNOC_SLAVE_USB_PHYS_CFG,
+ QNOC_SLAVE_CAMERA_CFG,
+ QNOC_SLAVE_DISPLAY_CFG,
+ QNOC_SLAVE_VENUS_CFG,
+ QNOC_SLAVE_TLMM,
+ QNOC_SLAVE_GRAPHICS_3D_CFG,
+ QNOC_SLAVE_IMEM_CFG,
+ QNOC_SLAVE_BIMC_CFG,
+ QNOC_SLAVE_PMIC_ARB,
+ QNOC_SLAVE_TCU,
+ QNOC_PNOC_SNOC_SLV,
+ QNOC_SLAVE_APPSS,
+ QNOC_SNOC_BIMC_0_SLV,
+ QNOC_SNOC_BIMC_1_SLV,
+ QNOC_SLAVE_SYSTEM_IMEM,
+ QNOC_SNOC_PNOC_SLV,
+ QNOC_SLAVE_QDSS_STM,
+ QNOC_SLAVE_CATS_128,
+ QNOC_SLAVE_OCMEM_64,
+};
+
+static const u16 mas_apps_proc_links[] = {
+ QNOC_BIMC_SNOC_SLV,
+ QNOC_SLAVE_EBI_CH0
+};
+
+static struct qcom_icc_node mas_apps_proc = {
+ .name = "mas_apps_proc",
+ .id = QNOC_MASTER_AMPSS_M0,
+ .buswidth = 8,
+ .mas_rpm_id = 0,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 0,
+ .num_links = ARRAY_SIZE(mas_apps_proc_links),
+ .links = mas_apps_proc_links,
+};
+
+static const u16 mas_oxili_links[] = {
+ QNOC_BIMC_SNOC_SLV,
+ QNOC_SLAVE_EBI_CH0
+};
+
+static struct qcom_icc_node mas_oxili = {
+ .name = "mas_oxili",
+ .id = QNOC_MASTER_GRAPHICS_3D,
+ .buswidth = 8,
+ .mas_rpm_id = 6,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 2,
+ .num_links = ARRAY_SIZE(mas_oxili_links),
+ .links = mas_oxili_links,
+};
+
+static const u16 mas_snoc_bimc_0_links[] = {
+ QNOC_SLAVE_EBI_CH0
+};
+
+static struct qcom_icc_node mas_snoc_bimc_0 = {
+ .name = "mas_snoc_bimc_0",
+ .id = QNOC_SNOC_BIMC_0_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 3,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 3,
+ .num_links = ARRAY_SIZE(mas_snoc_bimc_0_links),
+ .links = mas_snoc_bimc_0_links,
+};
+
+static const u16 mas_snoc_bimc_1_links[] = {
+ QNOC_SLAVE_EBI_CH0
+};
+
+static struct qcom_icc_node mas_snoc_bimc_1 = {
+ .name = "mas_snoc_bimc_1",
+ .id = QNOC_SNOC_BIMC_1_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 76,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 4,
+ .num_links = ARRAY_SIZE(mas_snoc_bimc_1_links),
+ .links = mas_snoc_bimc_1_links,
+};
+
+static const u16 mas_tcu_0_links[] = {
+ QNOC_BIMC_SNOC_SLV,
+ QNOC_SLAVE_EBI_CH0
+};
+
+static struct qcom_icc_node mas_tcu_0 = {
+ .name = "mas_tcu_0",
+ .id = QNOC_MASTER_TCU_0,
+ .buswidth = 8,
+ .mas_rpm_id = 102,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 2,
+ .qos.qos_port = 5,
+ .num_links = ARRAY_SIZE(mas_tcu_0_links),
+ .links = mas_tcu_0_links,
+};
+
+static const u16 mas_tcu_1_links[] = {
+ QNOC_BIMC_SNOC_SLV,
+ QNOC_SLAVE_EBI_CH0
+};
+
+static struct qcom_icc_node mas_tcu_1 = {
+ .name = "mas_tcu_1",
+ .id = QNOC_MASTER_TCU_1,
+ .buswidth = 8,
+ .mas_rpm_id = 103,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 2,
+ .qos.qos_port = 6,
+ .num_links = ARRAY_SIZE(mas_tcu_1_links),
+ .links = mas_tcu_1_links,
+};
+
+static const u16 mas_audio_links[] = {
+ QNOC_PNOC_M_0
+};
+
+static struct qcom_icc_node mas_audio = {
+ .name = "mas_audio",
+ .id = QNOC_MASTER_AUDIO,
+ .buswidth = 4,
+ .mas_rpm_id = 78,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_audio_links),
+ .links = mas_audio_links,
+};
+
+static const u16 mas_spdm_links[] = {
+ QNOC_PNOC_M_0
+};
+
+static struct qcom_icc_node mas_spdm = {
+ .name = "mas_spdm",
+ .id = QNOC_MASTER_SPDM,
+ .buswidth = 4,
+ .mas_rpm_id = 50,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_spdm_links),
+ .links = mas_spdm_links,
+};
+
+static const u16 mas_dehr_links[] = {
+ QNOC_PNOC_M_0
+};
+
+static struct qcom_icc_node mas_dehr = {
+ .name = "mas_dehr",
+ .id = QNOC_MASTER_DEHR,
+ .buswidth = 4,
+ .mas_rpm_id = 48,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_dehr_links),
+ .links = mas_dehr_links,
+};
+
+static const u16 mas_qpic_links[] = {
+ QNOC_PNOC_M_0
+};
+
+static struct qcom_icc_node mas_qpic = {
+ .name = "mas_qpic",
+ .id = QNOC_MASTER_QPIC,
+ .buswidth = 4,
+ .mas_rpm_id = 58,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_qpic_links),
+ .links = mas_qpic_links,
+};
+
+static const u16 mas_blsp_1_links[] = {
+ QNOC_PNOC_M_1
+};
+
+static struct qcom_icc_node mas_blsp_1 = {
+ .name = "mas_blsp_1",
+ .id = QNOC_MASTER_BLSP_1,
+ .buswidth = 4,
+ .mas_rpm_id = 41,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_blsp_1_links),
+ .links = mas_blsp_1_links,
+};
+
+static const u16 mas_usb_hs_links[] = {
+ QNOC_PNOC_M_1
+};
+
+static struct qcom_icc_node mas_usb_hs = {
+ .name = "mas_usb_hs",
+ .id = QNOC_MASTER_USB_HS,
+ .buswidth = 4,
+ .mas_rpm_id = 42,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_usb_hs_links),
+ .links = mas_usb_hs_links,
+};
+
+static const u16 mas_crypto_links[] = {
+ QNOC_PNOC_INT_1
+};
+
+static struct qcom_icc_node mas_crypto = {
+ .name = "mas_crypto",
+ .id = QNOC_MASTER_CRYPTO_CORE0,
+ .buswidth = 8,
+ .mas_rpm_id = 23,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 0,
+ .num_links = ARRAY_SIZE(mas_crypto_links),
+ .links = mas_crypto_links,
+};
+
+static const u16 mas_sdcc_1_links[] = {
+ QNOC_PNOC_INT_1
+};
+
+static struct qcom_icc_node mas_sdcc_1 = {
+ .name = "mas_sdcc_1",
+ .id = QNOC_MASTER_SDCC_1,
+ .buswidth = 8,
+ .mas_rpm_id = 33,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 7,
+ .num_links = ARRAY_SIZE(mas_sdcc_1_links),
+ .links = mas_sdcc_1_links,
+};
+
+static const u16 mas_sdcc_2_links[] = {
+ QNOC_PNOC_INT_1
+};
+
+static struct qcom_icc_node mas_sdcc_2 = {
+ .name = "mas_sdcc_2",
+ .id = QNOC_MASTER_SDCC_2,
+ .buswidth = 8,
+ .mas_rpm_id = 35,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 8,
+ .num_links = ARRAY_SIZE(mas_sdcc_2_links),
+ .links = mas_sdcc_2_links,
+};
+
+static const u16 mas_snoc_pcnoc_links[] = {
+ QNOC_PNOC_INT_0
+};
+
+static struct qcom_icc_node mas_snoc_pcnoc = {
+ .name = "mas_snoc_pcnoc",
+ .id = QNOC_SNOC_PNOC_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 77,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 9,
+ .num_links = ARRAY_SIZE(mas_snoc_pcnoc_links),
+ .links = mas_snoc_pcnoc_links,
+};
+
+static const u16 mas_qdss_bam_links[] = {
+ QNOC_SNOC_QDSS_INT
+};
+
+static struct qcom_icc_node mas_qdss_bam = {
+ .name = "mas_qdss_bam",
+ .id = QNOC_MASTER_QDSS_BAM,
+ .buswidth = 4,
+ .mas_rpm_id = 19,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 11,
+ .num_links = ARRAY_SIZE(mas_qdss_bam_links),
+ .links = mas_qdss_bam_links,
+};
+
+static const u16 mas_bimc_snoc_links[] = {
+ QNOC_SNOC_INT_0,
+ QNOC_SNOC_INT_1
+};
+
+static struct qcom_icc_node mas_bimc_snoc = {
+ .name = "mas_bimc_snoc",
+ .id = QNOC_BIMC_SNOC_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 21,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_bimc_snoc_links),
+ .links = mas_bimc_snoc_links,
+};
+
+static const u16 mas_mdp_links[] = {
+ QNOC_SNOC_MM_INT_1,
+ QNOC_SNOC_MM_INT_2
+};
+
+static struct qcom_icc_node mas_mdp = {
+ .name = "mas_mdp",
+ .id = QNOC_MASTER_MDP_PORT0,
+ .buswidth = 16,
+ .mas_rpm_id = 8,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 7,
+ .num_links = ARRAY_SIZE(mas_mdp_links),
+ .links = mas_mdp_links,
+ .ab_coeff = 167,
+};
+
+static const u16 mas_pcnoc_snoc_links[] = {
+ QNOC_SNOC_INT_0,
+ QNOC_SNOC_INT_1,
+ QNOC_SNOC_INT_BIMC
+};
+
+static struct qcom_icc_node mas_pcnoc_snoc = {
+ .name = "mas_pcnoc_snoc",
+ .id = QNOC_PNOC_SNOC_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 29,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 5,
+ .num_links = ARRAY_SIZE(mas_pcnoc_snoc_links),
+ .links = mas_pcnoc_snoc_links,
+};
+
+static const u16 mas_venus_links[] = {
+ QNOC_SNOC_MM_INT_0,
+ QNOC_SNOC_MM_INT_2
+};
+
+static struct qcom_icc_node mas_venus = {
+ .name = "mas_venus",
+ .id = QNOC_MASTER_VIDEO_P0,
+ .buswidth = 16,
+ .mas_rpm_id = 9,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 8,
+ .num_links = ARRAY_SIZE(mas_venus_links),
+ .links = mas_venus_links,
+ .ab_coeff = 167,
+};
+
+static const u16 mas_vfe_links[] = {
+ QNOC_SNOC_MM_INT_1,
+ QNOC_SNOC_MM_INT_2
+};
+
+static struct qcom_icc_node mas_vfe = {
+ .name = "mas_vfe",
+ .id = QNOC_MASTER_VFE,
+ .buswidth = 16,
+ .mas_rpm_id = 11,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 9,
+ .num_links = ARRAY_SIZE(mas_vfe_links),
+ .links = mas_vfe_links,
+ .ab_coeff = 167,
+};
+
+static const u16 mas_qdss_etr_links[] = {
+ QNOC_SNOC_QDSS_INT
+};
+
+static struct qcom_icc_node mas_qdss_etr = {
+ .name = "mas_qdss_etr",
+ .id = QNOC_MASTER_QDSS_ETR,
+ .buswidth = 8,
+ .mas_rpm_id = 31,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 10,
+ .num_links = ARRAY_SIZE(mas_qdss_etr_links),
+ .links = mas_qdss_etr_links,
+};
+
+static const u16 pcnoc_m_0_links[] = {
+ QNOC_PNOC_SNOC_SLV
+};
+
+static struct qcom_icc_node pcnoc_m_0 = {
+ .name = "pcnoc_m_0",
+ .id = QNOC_PNOC_M_0,
+ .buswidth = 8,
+ .mas_rpm_id = 87,
+ .slv_rpm_id = 116,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 5,
+ .num_links = ARRAY_SIZE(pcnoc_m_0_links),
+ .links = pcnoc_m_0_links,
+};
+
+static const u16 pcnoc_m_1_links[] = {
+ QNOC_PNOC_SNOC_SLV
+};
+
+static struct qcom_icc_node pcnoc_m_1 = {
+ .name = "pcnoc_m_1",
+ .id = QNOC_PNOC_M_1,
+ .buswidth = 8,
+ .mas_rpm_id = 88,
+ .slv_rpm_id = 117,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 6,
+ .num_links = ARRAY_SIZE(pcnoc_m_1_links),
+ .links = pcnoc_m_1_links,
+};
+
+static const u16 pcnoc_int_0_links[] = {
+ QNOC_PNOC_SLV_3,
+ QNOC_PNOC_SLV_2,
+ QNOC_PNOC_SLV_1,
+ QNOC_PNOC_SLV_0,
+ QNOC_PNOC_SLV_7,
+ QNOC_PNOC_SLV_5,
+ QNOC_PNOC_SLV_4,
+ QNOC_SLAVE_TCU
+};
+
+static struct qcom_icc_node pcnoc_int_0 = {
+ .name = "pcnoc_int_0",
+ .id = QNOC_PNOC_INT_0,
+ .buswidth = 8,
+ .mas_rpm_id = 85,
+ .slv_rpm_id = 114,
+ .num_links = ARRAY_SIZE(pcnoc_int_0_links),
+ .links = pcnoc_int_0_links,
+};
+
+static const u16 pcnoc_int_1_links[] = {
+ QNOC_PNOC_SNOC_SLV
+};
+
+static struct qcom_icc_node pcnoc_int_1 = {
+ .name = "pcnoc_int_1",
+ .id = QNOC_PNOC_INT_1,
+ .buswidth = 8,
+ .mas_rpm_id = 86,
+ .slv_rpm_id = 115,
+ .num_links = ARRAY_SIZE(pcnoc_int_1_links),
+ .links = pcnoc_int_1_links,
+};
+
+static const u16 pcnoc_s_0_links[] = {
+ QNOC_SLAVE_SDCC_1,
+ QNOC_SLAVE_TCSR,
+ QNOC_SLAVE_BLSP_1
+};
+
+static struct qcom_icc_node pcnoc_s_0 = {
+ .name = "pcnoc_s_0",
+ .id = QNOC_PNOC_SLV_0,
+ .buswidth = 4,
+ .mas_rpm_id = 89,
+ .slv_rpm_id = 118,
+ .num_links = ARRAY_SIZE(pcnoc_s_0_links),
+ .links = pcnoc_s_0_links,
+};
+
+static const u16 pcnoc_s_1_links[] = {
+ QNOC_SLAVE_MESSAGE_RAM,
+ QNOC_SLAVE_CRYPTO_0_CFG,
+ QNOC_SLAVE_USB_HS,
+ QNOC_SLAVE_PDM,
+ QNOC_SLAVE_PRNG,
+ QNOC_SLAVE_QPIC
+};
+
+static struct qcom_icc_node pcnoc_s_1 = {
+ .name = "pcnoc_s_1",
+ .id = QNOC_PNOC_SLV_1,
+ .buswidth = 4,
+ .mas_rpm_id = 90,
+ .slv_rpm_id = 119,
+ .num_links = ARRAY_SIZE(pcnoc_s_1_links),
+ .links = pcnoc_s_1_links,
+};
+
+static const u16 pcnoc_s_2_links[] = {
+ QNOC_SLAVE_SPDM,
+ QNOC_SLAVE_SDCC_2,
+ QNOC_SLAVE_AUDIO,
+ QNOC_SLAVE_DEHR_CFG
+};
+
+static struct qcom_icc_node pcnoc_s_2 = {
+ .name = "pcnoc_s_2",
+ .id = QNOC_PNOC_SLV_2,
+ .buswidth = 4,
+ .mas_rpm_id = 91,
+ .slv_rpm_id = 120,
+ .num_links = ARRAY_SIZE(pcnoc_s_2_links),
+ .links = pcnoc_s_2_links,
+};
+
+static const u16 pcnoc_s_3_links[] = {
+ QNOC_SLAVE_QDSS_CFG,
+ QNOC_SLAVE_USB_PHYS_CFG,
+ QNOC_SLAVE_SNOC_CFG
+};
+
+static struct qcom_icc_node pcnoc_s_3 = {
+ .name = "pcnoc_s_3",
+ .id = QNOC_PNOC_SLV_3,
+ .buswidth = 4,
+ .mas_rpm_id = 92,
+ .slv_rpm_id = 121,
+ .num_links = ARRAY_SIZE(pcnoc_s_3_links),
+ .links = pcnoc_s_3_links,
+};
+
+static const u16 pcnoc_s_4_links[] = {
+ QNOC_SLAVE_CAMERA_CFG,
+ QNOC_SLAVE_DISPLAY_CFG,
+ QNOC_SLAVE_VENUS_CFG
+};
+
+static struct qcom_icc_node pcnoc_s_4 = {
+ .name = "pcnoc_s_4",
+ .id = QNOC_PNOC_SLV_4,
+ .buswidth = 4,
+ .mas_rpm_id = 93,
+ .slv_rpm_id = 122,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(pcnoc_s_4_links),
+ .links = pcnoc_s_4_links,
+};
+
+static const u16 pcnoc_s_5_links[] = {
+ QNOC_SLAVE_TLMM
+};
+
+static struct qcom_icc_node pcnoc_s_5 = {
+ .name = "pcnoc_s_5",
+ .id = QNOC_PNOC_SLV_5,
+ .buswidth = 4,
+ .mas_rpm_id = 129,
+ .slv_rpm_id = 189,
+ .num_links = ARRAY_SIZE(pcnoc_s_5_links),
+ .links = pcnoc_s_5_links,
+};
+
+static const u16 pcnoc_s_7_links[] = {
+ QNOC_SLAVE_GRAPHICS_3D_CFG,
+ QNOC_SLAVE_IMEM_CFG,
+ QNOC_SLAVE_BIMC_CFG,
+ QNOC_SLAVE_PMIC_ARB
+};
+
+static struct qcom_icc_node pcnoc_s_7 = {
+ .name = "pcnoc_s_7",
+ .id = QNOC_PNOC_SLV_7,
+ .buswidth = 4,
+ .mas_rpm_id = 95,
+ .slv_rpm_id = 124,
+ .num_links = ARRAY_SIZE(pcnoc_s_7_links),
+ .links = pcnoc_s_7_links,
+};
+
+static const u16 mm_int_0_links[] = {
+ QNOC_SNOC_MM_INT_BIMC
+};
+
+static struct qcom_icc_node mm_int_0 = {
+ .name = "mm_int_0",
+ .id = QNOC_SNOC_MM_INT_0,
+ .buswidth = 16,
+ .mas_rpm_id = 79,
+ .slv_rpm_id = 108,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mm_int_0_links),
+ .links = mm_int_0_links,
+ .ab_coeff = 167,
+};
+
+static const u16 mm_int_1_links[] = {
+ QNOC_SNOC_MM_INT_BIMC
+};
+
+static struct qcom_icc_node mm_int_1 = {
+ .name = "mm_int_1",
+ .id = QNOC_SNOC_MM_INT_1,
+ .buswidth = 16,
+ .mas_rpm_id = 80,
+ .slv_rpm_id = 109,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mm_int_1_links),
+ .links = mm_int_1_links,
+ .ab_coeff = 167,
+};
+
+static const u16 mm_int_2_links[] = {
+ QNOC_SNOC_INT_0
+};
+
+static struct qcom_icc_node mm_int_2 = {
+ .name = "mm_int_2",
+ .id = QNOC_SNOC_MM_INT_2,
+ .buswidth = 16,
+ .mas_rpm_id = 81,
+ .slv_rpm_id = 110,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mm_int_2_links),
+ .links = mm_int_2_links,
+ .ab_coeff = 167,
+};
+
+static const u16 mm_int_bimc_links[] = {
+ QNOC_SNOC_BIMC_1_SLV
+};
+
+static struct qcom_icc_node mm_int_bimc = {
+ .name = "mm_int_bimc",
+ .id = QNOC_SNOC_MM_INT_BIMC,
+ .buswidth = 16,
+ .mas_rpm_id = 82,
+ .slv_rpm_id = 111,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mm_int_bimc_links),
+ .links = mm_int_bimc_links,
+ .ab_coeff = 167,
+};
+
+static const u16 qdss_int_links[] = {
+ QNOC_SNOC_INT_0,
+ QNOC_SNOC_INT_BIMC
+};
+
+static struct qcom_icc_node qdss_int = {
+ .name = "qdss_int",
+ .id = QNOC_SNOC_QDSS_INT,
+ .buswidth = 8,
+ .mas_rpm_id = 98,
+ .slv_rpm_id = 128,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(qdss_int_links),
+ .links = qdss_int_links,
+};
+
+static const u16 snoc_int_0_links[] = {
+ QNOC_SLAVE_SYSTEM_IMEM,
+ QNOC_SLAVE_QDSS_STM,
+ QNOC_SNOC_PNOC_SLV
+};
+
+static struct qcom_icc_node snoc_int_0 = {
+ .name = "snoc_int_0",
+ .id = QNOC_SNOC_INT_0,
+ .buswidth = 8,
+ .mas_rpm_id = 99,
+ .slv_rpm_id = 130,
+ .num_links = ARRAY_SIZE(snoc_int_0_links),
+ .links = snoc_int_0_links,
+};
+
+static const u16 snoc_int_1_links[] = {
+ QNOC_SLAVE_CATS_128,
+ QNOC_SLAVE_APPSS,
+ QNOC_SLAVE_OCMEM_64
+};
+
+static struct qcom_icc_node snoc_int_1 = {
+ .name = "snoc_int_1",
+ .id = QNOC_SNOC_INT_1,
+ .buswidth = 8,
+ .mas_rpm_id = 100,
+ .slv_rpm_id = 131,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(snoc_int_1_links),
+ .links = snoc_int_1_links,
+};
+
+static const u16 snoc_int_bimc_links[] = {
+ QNOC_SNOC_BIMC_0_SLV
+};
+
+static struct qcom_icc_node snoc_int_bimc = {
+ .name = "snoc_int_bimc",
+ .id = QNOC_SNOC_INT_BIMC,
+ .buswidth = 8,
+ .mas_rpm_id = 101,
+ .slv_rpm_id = 132,
+ .num_links = ARRAY_SIZE(snoc_int_bimc_links),
+ .links = snoc_int_bimc_links,
+};
+
+static struct qcom_icc_node slv_ebi = {
+ .name = "slv_ebi",
+ .id = QNOC_SLAVE_EBI_CH0,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 0,
+};
+
+static const u16 slv_bimc_snoc_links[] = {
+ QNOC_BIMC_SNOC_MAS
+};
+
+static struct qcom_icc_node slv_bimc_snoc = {
+ .name = "slv_bimc_snoc",
+ .id = QNOC_BIMC_SNOC_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 2,
+ .num_links = ARRAY_SIZE(slv_bimc_snoc_links),
+ .links = slv_bimc_snoc_links,
+};
+
+static struct qcom_icc_node slv_tcsr = {
+ .name = "slv_tcsr",
+ .id = QNOC_SLAVE_TCSR,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 50,
+};
+
+static struct qcom_icc_node slv_sdcc_1 = {
+ .name = "slv_sdcc_1",
+ .id = QNOC_SLAVE_SDCC_1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 31,
+};
+
+static struct qcom_icc_node slv_blsp_1 = {
+ .name = "slv_blsp_1",
+ .id = QNOC_SLAVE_BLSP_1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 39,
+};
+
+static struct qcom_icc_node slv_crypto_0_cfg = {
+ .name = "slv_crypto_0_cfg",
+ .id = QNOC_SLAVE_CRYPTO_0_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 52,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_message_ram = {
+ .name = "slv_message_ram",
+ .id = QNOC_SLAVE_MESSAGE_RAM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 55,
+};
+
+static struct qcom_icc_node slv_pdm = {
+ .name = "slv_pdm",
+ .id = QNOC_SLAVE_PDM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 41,
+};
+
+static struct qcom_icc_node slv_prng = {
+ .name = "slv_prng",
+ .id = QNOC_SLAVE_PRNG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 44,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_usb_hs = {
+ .name = "slv_usb_hs",
+ .id = QNOC_SLAVE_USB_HS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 40,
+};
+
+static struct qcom_icc_node slv_qpic = {
+ .name = "slv_qpic",
+ .id = QNOC_SLAVE_QPIC,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 80,
+};
+
+static struct qcom_icc_node slv_spdm = {
+ .name = "slv_spdm",
+ .id = QNOC_SLAVE_SPDM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 60,
+};
+
+static struct qcom_icc_node slv_sdcc_2 = {
+ .name = "slv_sdcc_2",
+ .id = QNOC_SLAVE_SDCC_2,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 33,
+};
+
+static struct qcom_icc_node slv_audio = {
+ .name = "slv_audio",
+ .id = QNOC_SLAVE_AUDIO,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 105,
+};
+
+static struct qcom_icc_node slv_dehr_cfg = {
+ .name = "slv_dehr_cfg",
+ .id = QNOC_SLAVE_DEHR_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 61,
+};
+
+static struct qcom_icc_node slv_snoc_cfg = {
+ .name = "slv_snoc_cfg",
+ .id = QNOC_SLAVE_SNOC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 70,
+};
+
+static struct qcom_icc_node slv_qdss_cfg = {
+ .name = "slv_qdss_cfg",
+ .id = QNOC_SLAVE_QDSS_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 63,
+};
+
+static struct qcom_icc_node slv_usb_phy = {
+ .name = "slv_usb_phy",
+ .id = QNOC_SLAVE_USB_PHYS_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 95,
+};
+
+static struct qcom_icc_node slv_camera_ss_cfg = {
+ .name = "slv_camera_ss_cfg",
+ .id = QNOC_SLAVE_CAMERA_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 3,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_disp_ss_cfg = {
+ .name = "slv_disp_ss_cfg",
+ .id = QNOC_SLAVE_DISPLAY_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_venus_cfg = {
+ .name = "slv_venus_cfg",
+ .id = QNOC_SLAVE_VENUS_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 10,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_tlmm = {
+ .name = "slv_tlmm",
+ .id = QNOC_SLAVE_TLMM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 51,
+};
+
+static struct qcom_icc_node slv_gpu_cfg = {
+ .name = "slv_gpu_cfg",
+ .id = QNOC_SLAVE_GRAPHICS_3D_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 11,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_imem_cfg = {
+ .name = "slv_imem_cfg",
+ .id = QNOC_SLAVE_IMEM_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 54,
+};
+
+static struct qcom_icc_node slv_bimc_cfg = {
+ .name = "slv_bimc_cfg",
+ .id = QNOC_SLAVE_BIMC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 56,
+};
+
+static struct qcom_icc_node slv_pmic_arb = {
+ .name = "slv_pmic_arb",
+ .id = QNOC_SLAVE_PMIC_ARB,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 59,
+};
+
+static struct qcom_icc_node slv_tcu = {
+ .name = "slv_tcu",
+ .id = QNOC_SLAVE_TCU,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 133,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static const u16 slv_pcnoc_snoc_links[] = {
+ QNOC_PNOC_SNOC_MAS
+};
+
+static struct qcom_icc_node slv_pcnoc_snoc = {
+ .name = "slv_pcnoc_snoc",
+ .id = QNOC_PNOC_SNOC_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 45,
+ .num_links = ARRAY_SIZE(slv_pcnoc_snoc_links),
+ .links = slv_pcnoc_snoc_links,
+};
+
+static struct qcom_icc_node slv_kpss_ahb = {
+ .name = "slv_kpss_ahb",
+ .id = QNOC_SLAVE_APPSS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 20,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static const u16 slv_snoc_bimc_0_links[] = {
+ QNOC_SNOC_BIMC_0_MAS
+};
+
+static struct qcom_icc_node slv_snoc_bimc_0 = {
+ .name = "slv_snoc_bimc_0",
+ .id = QNOC_SNOC_BIMC_0_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 24,
+ .num_links = ARRAY_SIZE(slv_snoc_bimc_0_links),
+ .links = slv_snoc_bimc_0_links,
+};
+
+static const u16 slv_snoc_bimc_1_links[] = {
+ QNOC_SNOC_BIMC_1_MAS
+};
+
+static struct qcom_icc_node slv_snoc_bimc_1 = {
+ .name = "slv_snoc_bimc_1",
+ .id = QNOC_SNOC_BIMC_1_SLV,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 104,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_snoc_bimc_1_links),
+ .links = slv_snoc_bimc_1_links,
+};
+
+static struct qcom_icc_node slv_imem = {
+ .name = "slv_imem",
+ .id = QNOC_SLAVE_SYSTEM_IMEM,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 26,
+};
+
+static const u16 slv_snoc_pcnoc_links[] = {
+ QNOC_SNOC_PNOC_MAS
+};
+
+static struct qcom_icc_node slv_snoc_pcnoc = {
+ .name = "slv_snoc_pcnoc",
+ .id = QNOC_SNOC_PNOC_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 28,
+ .num_links = ARRAY_SIZE(slv_snoc_pcnoc_links),
+ .links = slv_snoc_pcnoc_links,
+};
+
+static struct qcom_icc_node slv_qdss_stm = {
+ .name = "slv_qdss_stm",
+ .id = QNOC_SLAVE_QDSS_STM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 30,
+};
+
+static struct qcom_icc_node slv_cats_0 = {
+ .name = "slv_cats_0",
+ .id = QNOC_SLAVE_CATS_128,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 106,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_cats_1 = {
+ .name = "slv_cats_1",
+ .id = QNOC_SLAVE_OCMEM_64,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 107,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node * const msm8909_bimc_nodes[] = {
+ [MAS_APPS_PROC] = &mas_apps_proc,
+ [MAS_OXILI] = &mas_oxili,
+ [MAS_SNOC_BIMC_0] = &mas_snoc_bimc_0,
+ [MAS_SNOC_BIMC_1] = &mas_snoc_bimc_1,
+ [MAS_TCU_0] = &mas_tcu_0,
+ [MAS_TCU_1] = &mas_tcu_1,
+ [SLV_EBI] = &slv_ebi,
+ [SLV_BIMC_SNOC] = &slv_bimc_snoc,
+};
+
+static const struct regmap_config msm8909_bimc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x62000,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc msm8909_bimc = {
+ .type = QCOM_ICC_BIMC,
+ .nodes = msm8909_bimc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8909_bimc_nodes),
+ .bus_clk_desc = &bimc_clk,
+ .regmap_cfg = &msm8909_bimc_regmap_config,
+ .qos_offset = 0x8000,
+ .ab_coeff = 154,
+};
+
+static struct qcom_icc_node * const msm8909_pcnoc_nodes[] = {
+ [MAS_AUDIO] = &mas_audio,
+ [MAS_SPDM] = &mas_spdm,
+ [MAS_DEHR] = &mas_dehr,
+ [MAS_QPIC] = &mas_qpic,
+ [MAS_BLSP_1] = &mas_blsp_1,
+ [MAS_USB_HS] = &mas_usb_hs,
+ [MAS_CRYPTO] = &mas_crypto,
+ [MAS_SDCC_1] = &mas_sdcc_1,
+ [MAS_SDCC_2] = &mas_sdcc_2,
+ [MAS_SNOC_PCNOC] = &mas_snoc_pcnoc,
+ [PCNOC_M_0] = &pcnoc_m_0,
+ [PCNOC_M_1] = &pcnoc_m_1,
+ [PCNOC_INT_0] = &pcnoc_int_0,
+ [PCNOC_INT_1] = &pcnoc_int_1,
+ [PCNOC_S_0] = &pcnoc_s_0,
+ [PCNOC_S_1] = &pcnoc_s_1,
+ [PCNOC_S_2] = &pcnoc_s_2,
+ [PCNOC_S_3] = &pcnoc_s_3,
+ [PCNOC_S_4] = &pcnoc_s_4,
+ [PCNOC_S_5] = &pcnoc_s_5,
+ [PCNOC_S_7] = &pcnoc_s_7,
+ [SLV_TCSR] = &slv_tcsr,
+ [SLV_SDCC_1] = &slv_sdcc_1,
+ [SLV_BLSP_1] = &slv_blsp_1,
+ [SLV_CRYPTO_0_CFG] = &slv_crypto_0_cfg,
+ [SLV_MESSAGE_RAM] = &slv_message_ram,
+ [SLV_PDM] = &slv_pdm,
+ [SLV_PRNG] = &slv_prng,
+ [SLV_USB_HS] = &slv_usb_hs,
+ [SLV_QPIC] = &slv_qpic,
+ [SLV_SPDM] = &slv_spdm,
+ [SLV_SDCC_2] = &slv_sdcc_2,
+ [SLV_AUDIO] = &slv_audio,
+ [SLV_DEHR_CFG] = &slv_dehr_cfg,
+ [SLV_SNOC_CFG] = &slv_snoc_cfg,
+ [SLV_QDSS_CFG] = &slv_qdss_cfg,
+ [SLV_USB_PHY] = &slv_usb_phy,
+ [SLV_CAMERA_SS_CFG] = &slv_camera_ss_cfg,
+ [SLV_DISP_SS_CFG] = &slv_disp_ss_cfg,
+ [SLV_VENUS_CFG] = &slv_venus_cfg,
+ [SLV_TLMM] = &slv_tlmm,
+ [SLV_GPU_CFG] = &slv_gpu_cfg,
+ [SLV_IMEM_CFG] = &slv_imem_cfg,
+ [SLV_BIMC_CFG] = &slv_bimc_cfg,
+ [SLV_PMIC_ARB] = &slv_pmic_arb,
+ [SLV_TCU] = &slv_tcu,
+ [SLV_PCNOC_SNOC] = &slv_pcnoc_snoc,
+};
+
+static const struct regmap_config msm8909_pcnoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x11000,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc msm8909_pcnoc = {
+ .type = QCOM_ICC_NOC,
+ .nodes = msm8909_pcnoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8909_pcnoc_nodes),
+ .bus_clk_desc = &bus_0_clk,
+ .regmap_cfg = &msm8909_pcnoc_regmap_config,
+ .qos_offset = 0x7000,
+};
+
+static struct qcom_icc_node * const msm8909_snoc_nodes[] = {
+ [MAS_QDSS_BAM] = &mas_qdss_bam,
+ [MAS_BIMC_SNOC] = &mas_bimc_snoc,
+ [MAS_MDP] = &mas_mdp,
+ [MAS_PCNOC_SNOC] = &mas_pcnoc_snoc,
+ [MAS_VENUS] = &mas_venus,
+ [MAS_VFE] = &mas_vfe,
+ [MAS_QDSS_ETR] = &mas_qdss_etr,
+ [MM_INT_0] = &mm_int_0,
+ [MM_INT_1] = &mm_int_1,
+ [MM_INT_2] = &mm_int_2,
+ [MM_INT_BIMC] = &mm_int_bimc,
+ [QDSS_INT] = &qdss_int,
+ [SNOC_INT_0] = &snoc_int_0,
+ [SNOC_INT_1] = &snoc_int_1,
+ [SNOC_INT_BIMC] = &snoc_int_bimc,
+ [SLV_KPSS_AHB] = &slv_kpss_ahb,
+ [SLV_SNOC_BIMC_0] = &slv_snoc_bimc_0,
+ [SLV_SNOC_BIMC_1] = &slv_snoc_bimc_1,
+ [SLV_IMEM] = &slv_imem,
+ [SLV_SNOC_PCNOC] = &slv_snoc_pcnoc,
+ [SLV_QDSS_STM] = &slv_qdss_stm,
+ [SLV_CATS_0] = &slv_cats_0,
+ [SLV_CATS_1] = &slv_cats_1,
+};
+
+static const struct regmap_config msm8909_snoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x13000,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc msm8909_snoc = {
+ .type = QCOM_ICC_NOC,
+ .nodes = msm8909_snoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8909_snoc_nodes),
+ .bus_clk_desc = &bus_1_clk,
+ .regmap_cfg = &msm8909_snoc_regmap_config,
+ .qos_offset = 0x7000,
+};
+
+static const struct of_device_id msm8909_noc_of_match[] = {
+ { .compatible = "qcom,msm8909-bimc", .data = &msm8909_bimc },
+ { .compatible = "qcom,msm8909-pcnoc", .data = &msm8909_pcnoc },
+ { .compatible = "qcom,msm8909-snoc", .data = &msm8909_snoc },
+ { }
+};
+MODULE_DEVICE_TABLE(of, msm8909_noc_of_match);
+
+static struct platform_driver msm8909_noc_driver = {
+ .probe = qnoc_probe,
+ .remove_new = qnoc_remove,
+ .driver = {
+ .name = "qnoc-msm8909",
+ .of_match_table = msm8909_noc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+module_platform_driver(msm8909_noc_driver);
+
+MODULE_DESCRIPTION("Qualcomm MSM8909 NoC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/qcom/sa8775p.c b/drivers/interconnect/qcom/sa8775p.c
index dd6281db08adf..a729775c2aa45 100644
--- a/drivers/interconnect/qcom/sa8775p.c
+++ b/drivers/interconnect/qcom/sa8775p.c
@@ -2092,11 +2092,11 @@ static struct qcom_icc_bcm bcm_sn10 = {
.nodes = { &xs_qdss_stm },
};
-static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
&bcm_sn3,
};
-static struct qcom_icc_node *aggre1_noc_nodes[] = {
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_QUP_3] = &qxm_qup3,
[MASTER_EMAC] = &xm_emac_0,
[MASTER_EMAC_1] = &xm_emac_1,
@@ -2115,12 +2115,12 @@ static const struct qcom_icc_desc sa8775p_aggre1_noc = {
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
-static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_ce0,
&bcm_sn4,
};
-static struct qcom_icc_node *aggre2_noc_nodes[] = {
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_QUP_0] = &qhm_qup0,
[MASTER_QUP_1] = &qhm_qup1,
@@ -2142,13 +2142,13 @@ static const struct qcom_icc_desc sa8775p_aggre2_noc = {
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
-static struct qcom_icc_bcm *clk_virt_bcms[] = {
+static struct qcom_icc_bcm * const clk_virt_bcms[] = {
&bcm_qup0,
&bcm_qup1,
&bcm_qup2,
};
-static struct qcom_icc_node *clk_virt_nodes[] = {
+static struct qcom_icc_node * const clk_virt_nodes[] = {
[MASTER_QUP_CORE_0] = &qup0_core_master,
[MASTER_QUP_CORE_1] = &qup1_core_master,
[MASTER_QUP_CORE_2] = &qup2_core_master,
@@ -2166,7 +2166,7 @@ static const struct qcom_icc_desc sa8775p_clk_virt = {
.num_bcms = ARRAY_SIZE(clk_virt_bcms),
};
-static struct qcom_icc_bcm *config_noc_bcms[] = {
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
&bcm_cn0,
&bcm_cn1,
&bcm_cn2,
@@ -2175,7 +2175,7 @@ static struct qcom_icc_bcm *config_noc_bcms[] = {
&bcm_sn10,
};
-static struct qcom_icc_node *config_noc_nodes[] = {
+static struct qcom_icc_node * const config_noc_nodes[] = {
[MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
[MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
[SLAVE_AHB2PHY_0] = &qhs_ahb2phy0,
@@ -2271,10 +2271,10 @@ static const struct qcom_icc_desc sa8775p_config_noc = {
.num_bcms = ARRAY_SIZE(config_noc_bcms),
};
-static struct qcom_icc_bcm *dc_noc_bcms[] = {
+static struct qcom_icc_bcm * const dc_noc_bcms[] = {
};
-static struct qcom_icc_node *dc_noc_nodes[] = {
+static struct qcom_icc_node * const dc_noc_nodes[] = {
[MASTER_CNOC_DC_NOC] = &qnm_cnoc_dc_noc,
[SLAVE_LLCC_CFG] = &qhs_llcc,
[SLAVE_GEM_NOC_CFG] = &qns_gemnoc,
@@ -2287,12 +2287,12 @@ static const struct qcom_icc_desc sa8775p_dc_noc = {
.num_bcms = ARRAY_SIZE(dc_noc_bcms),
};
-static struct qcom_icc_bcm *gem_noc_bcms[] = {
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh2,
};
-static struct qcom_icc_node *gem_noc_nodes[] = {
+static struct qcom_icc_node * const gem_noc_nodes[] = {
[MASTER_GPU_TCU] = &alm_gpu_tcu,
[MASTER_PCIE_TCU] = &alm_pcie_tcu,
[MASTER_SYS_TCU] = &alm_sys_tcu,
@@ -2323,12 +2323,12 @@ static const struct qcom_icc_desc sa8775p_gem_noc = {
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
-static struct qcom_icc_bcm *gpdsp_anoc_bcms[] = {
+static struct qcom_icc_bcm * const gpdsp_anoc_bcms[] = {
&bcm_gna0,
&bcm_gnb0,
};
-static struct qcom_icc_node *gpdsp_anoc_nodes[] = {
+static struct qcom_icc_node * const gpdsp_anoc_nodes[] = {
[MASTER_DSP0] = &qxm_dsp0,
[MASTER_DSP1] = &qxm_dsp1,
[SLAVE_GP_DSP_SAIL_NOC] = &qns_gp_dsp_sail_noc,
@@ -2341,11 +2341,11 @@ static const struct qcom_icc_desc sa8775p_gpdsp_anoc = {
.num_bcms = ARRAY_SIZE(gpdsp_anoc_bcms),
};
-static struct qcom_icc_bcm *lpass_ag_noc_bcms[] = {
+static struct qcom_icc_bcm * const lpass_ag_noc_bcms[] = {
&bcm_sn9,
};
-static struct qcom_icc_node *lpass_ag_noc_nodes[] = {
+static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
[MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc,
[MASTER_LPASS_PROC] = &qxm_lpass_dsp,
[SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core,
@@ -2364,12 +2364,12 @@ static const struct qcom_icc_desc sa8775p_lpass_ag_noc = {
.num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
};
-static struct qcom_icc_bcm *mc_virt_bcms[] = {
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
};
-static struct qcom_icc_node *mc_virt_nodes[] = {
+static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI1] = &ebi,
};
@@ -2381,12 +2381,12 @@ static const struct qcom_icc_desc sa8775p_mc_virt = {
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
-static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
};
-static struct qcom_icc_node *mmss_noc_nodes[] = {
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
[MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
[MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
@@ -2413,12 +2413,12 @@ static const struct qcom_icc_desc sa8775p_mmss_noc = {
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
-static struct qcom_icc_bcm *nspa_noc_bcms[] = {
+static struct qcom_icc_bcm * const nspa_noc_bcms[] = {
&bcm_nsa0,
&bcm_nsa1,
};
-static struct qcom_icc_node *nspa_noc_nodes[] = {
+static struct qcom_icc_node * const nspa_noc_nodes[] = {
[MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config,
[MASTER_CDSP_PROC] = &qxm_nsp,
[SLAVE_HCP_A] = &qns_hcp,
@@ -2433,12 +2433,12 @@ static const struct qcom_icc_desc sa8775p_nspa_noc = {
.num_bcms = ARRAY_SIZE(nspa_noc_bcms),
};
-static struct qcom_icc_bcm *nspb_noc_bcms[] = {
+static struct qcom_icc_bcm * const nspb_noc_bcms[] = {
&bcm_nsb0,
&bcm_nsb1,
};
-static struct qcom_icc_node *nspb_noc_nodes[] = {
+static struct qcom_icc_node * const nspb_noc_nodes[] = {
[MASTER_CDSPB_NOC_CFG] = &qhm_nspb_noc_config,
[MASTER_CDSP_PROC_B] = &qxm_nspb,
[SLAVE_CDSPB_MEM_NOC] = &qns_nspb_gemnoc,
@@ -2453,11 +2453,11 @@ static const struct qcom_icc_desc sa8775p_nspb_noc = {
.num_bcms = ARRAY_SIZE(nspb_noc_bcms),
};
-static struct qcom_icc_bcm *pcie_anoc_bcms[] = {
+static struct qcom_icc_bcm * const pcie_anoc_bcms[] = {
&bcm_pci0,
};
-static struct qcom_icc_node *pcie_anoc_nodes[] = {
+static struct qcom_icc_node * const pcie_anoc_nodes[] = {
[MASTER_PCIE_0] = &xm_pcie3_0,
[MASTER_PCIE_1] = &xm_pcie3_1,
[SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc,
@@ -2470,7 +2470,7 @@ static const struct qcom_icc_desc sa8775p_pcie_anoc = {
.num_bcms = ARRAY_SIZE(pcie_anoc_bcms),
};
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn1,
&bcm_sn3,
@@ -2478,7 +2478,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = {
&bcm_sn9,
};
-static struct qcom_icc_node *system_noc_nodes[] = {
+static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_GIC_AHB] = &qhm_gic,
[MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
[MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
diff --git a/drivers/interconnect/qcom/sm6115.c b/drivers/interconnect/qcom/sm6115.c
index 88b67634aa2f1..7e15ddf0a80a9 100644
--- a/drivers/interconnect/qcom/sm6115.c
+++ b/drivers/interconnect/qcom/sm6115.c
@@ -1193,7 +1193,7 @@ static struct qcom_icc_node slv_anoc_snoc = {
.links = slv_anoc_snoc_links,
};
-static struct qcom_icc_node *bimc_nodes[] = {
+static struct qcom_icc_node * const bimc_nodes[] = {
[MASTER_AMPSS_M0] = &apps_proc,
[MASTER_SNOC_BIMC_RT] = &mas_snoc_bimc_rt,
[MASTER_SNOC_BIMC_NRT] = &mas_snoc_bimc_nrt,
@@ -1223,7 +1223,7 @@ static const struct qcom_icc_desc sm6115_bimc = {
.ab_coeff = 153,
};
-static struct qcom_icc_node *config_noc_nodes[] = {
+static struct qcom_icc_node * const config_noc_nodes[] = {
[SNOC_CNOC_MAS] = &mas_snoc_cnoc,
[MASTER_QDSS_DAP] = &xm_dap,
[SLAVE_AHB2PHY_USB] = &qhs_ahb2phy_usb,
@@ -1294,7 +1294,7 @@ static const struct qcom_icc_desc sm6115_config_noc = {
.keep_alive = true,
};
-static struct qcom_icc_node *sys_noc_nodes[] = {
+static struct qcom_icc_node * const sys_noc_nodes[] = {
[MASTER_CRYPTO_CORE0] = &crypto_c0,
[MASTER_SNOC_CFG] = &qhm_snoc_cfg,
[MASTER_TIC] = &qhm_tic,
@@ -1339,7 +1339,7 @@ static const struct qcom_icc_desc sm6115_sys_noc = {
.keep_alive = true,
};
-static struct qcom_icc_node *clk_virt_nodes[] = {
+static struct qcom_icc_node * const clk_virt_nodes[] = {
[MASTER_QUP_CORE_0] = &qup0_core_master,
[SLAVE_QUP_CORE_0] = &qup0_core_slave,
};
@@ -1353,7 +1353,7 @@ static const struct qcom_icc_desc sm6115_clk_virt = {
.keep_alive = true,
};
-static struct qcom_icc_node *mmnrt_virt_nodes[] = {
+static struct qcom_icc_node * const mmnrt_virt_nodes[] = {
[MASTER_CAMNOC_SF] = &qnm_camera_nrt,
[MASTER_VIDEO_P0] = &qxm_venus0,
[MASTER_VIDEO_PROC] = &qxm_venus_cpu,
@@ -1370,7 +1370,7 @@ static const struct qcom_icc_desc sm6115_mmnrt_virt = {
.ab_coeff = 142,
};
-static struct qcom_icc_node *mmrt_virt_nodes[] = {
+static struct qcom_icc_node * const mmrt_virt_nodes[] = {
[MASTER_CAMNOC_HF] = &qnm_camera_rt,
[MASTER_MDP_PORT0] = &qxm_mdp0,
[SLAVE_SNOC_BIMC_RT] = &slv_snoc_bimc_rt,
diff --git a/drivers/interconnect/qcom/sm7150.c b/drivers/interconnect/qcom/sm7150.c
new file mode 100644
index 0000000000000..dc0d1343f5100
--- /dev/null
+++ b/drivers/interconnect/qcom/sm7150.c
@@ -0,0 +1,1754 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024, Danila Tikhonov <danila@jiaxyga.com>
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/interconnect/qcom,sm7150-rpmh.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+#include "sm7150.h"
+
+static struct qcom_icc_node qhm_a1noc_cfg = {
+ .name = "qhm-a1noc-cfg",
+ .id = SM7150_MASTER_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_SERVICE_A1NOC },
+};
+
+static struct qcom_icc_node qhm_qup_center = {
+ .name = "qhm_qup_center",
+ .id = SM7150_MASTER_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_A1NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qhm_tsif = {
+ .name = "qhm_tsif",
+ .id = SM7150_MASTER_TSIF,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_A1NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_emmc = {
+ .name = "xm_emmc",
+ .id = SM7150_MASTER_EMMC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_A1NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_sdc2 = {
+ .name = "xm_sdc2",
+ .id = SM7150_MASTER_SDCC_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_A1NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_sdc4 = {
+ .name = "xm_sdc4",
+ .id = SM7150_MASTER_SDCC_4,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_A1NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_ufs_mem = {
+ .name = "xm_ufs_mem",
+ .id = SM7150_MASTER_UFS_MEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_A1NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qhm_a2noc_cfg = {
+ .name = "qhm_a2noc_cfg",
+ .id = SM7150_MASTER_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_SERVICE_A2NOC },
+};
+
+static struct qcom_icc_node qhm_qdss_bam = {
+ .name = "qhm_qdss_bam",
+ .id = SM7150_MASTER_QDSS_BAM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qhm_qup_north = {
+ .name = "qhm_qup_north",
+ .id = SM7150_MASTER_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qnm_cnoc = {
+ .name = "qnm_cnoc",
+ .id = SM7150_MASTER_CNOC_A2NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+ .name = "qxm_crypto",
+ .id = SM7150_MASTER_CRYPTO_CORE_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qxm_ipa = {
+ .name = "qxm_ipa",
+ .id = SM7150_MASTER_IPA,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_pcie3_0 = {
+ .name = "xm_pcie3_0",
+ .id = SM7150_MASTER_PCIE,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr = {
+ .name = "xm_qdss_etr",
+ .id = SM7150_MASTER_QDSS_ETR,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_usb3_0 = {
+ .name = "xm_usb3_0",
+ .id = SM7150_MASTER_USB3,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qxm_camnoc_hf0_uncomp = {
+ .name = "qxm_camnoc_hf0_uncomp",
+ .id = SM7150_MASTER_CAMNOC_HF0_UNCOMP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_CAMNOC_UNCOMP },
+};
+
+static struct qcom_icc_node qxm_camnoc_rt_uncomp = {
+ .name = "qxm_camnoc_rt_uncomp",
+ .id = SM7150_MASTER_CAMNOC_RT_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_CAMNOC_UNCOMP },
+};
+
+static struct qcom_icc_node qxm_camnoc_sf_uncomp = {
+ .name = "qxm_camnoc_sf_uncomp",
+ .id = SM7150_MASTER_CAMNOC_SF_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_CAMNOC_UNCOMP },
+};
+
+static struct qcom_icc_node qxm_camnoc_nrt_uncomp = {
+ .name = "qxm_camnoc_nrt_uncomp",
+ .id = SM7150_MASTER_CAMNOC_NRT_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_CAMNOC_UNCOMP },
+};
+
+static struct qcom_icc_node qnm_npu = {
+ .name = "qnm_npu",
+ .id = SM7150_MASTER_NPU,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_CDSP_GEM_NOC },
+};
+
+static struct qcom_icc_node qhm_spdm = {
+ .name = "qhm_spdm",
+ .id = SM7150_MASTER_SPDM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_CNOC_A2NOC },
+};
+
+static struct qcom_icc_node qnm_snoc = {
+ .name = "qnm_snoc",
+ .id = SM7150_SNOC_CNOC_MAS,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 47,
+ .links = { SM7150_SLAVE_TLMM_SOUTH,
+ SM7150_SLAVE_CAMERA_CFG,
+ SM7150_SLAVE_SDCC_4,
+ SM7150_SLAVE_SDCC_2,
+ SM7150_SLAVE_CNOC_MNOC_CFG,
+ SM7150_SLAVE_UFS_MEM_CFG,
+ SM7150_SLAVE_QUP_0,
+ SM7150_SLAVE_GLM,
+ SM7150_SLAVE_PDM,
+ SM7150_SLAVE_CAMERA_NRT_THROTTLE_CFG,
+ SM7150_SLAVE_A2NOC_CFG,
+ SM7150_SLAVE_QDSS_CFG,
+ SM7150_SLAVE_CAMERA_RT_THROTTLE_CFG,
+ SM7150_SLAVE_DISPLAY_CFG,
+ SM7150_SLAVE_PCIE_CFG,
+ SM7150_SLAVE_DISPLAY_THROTTLE_CFG,
+ SM7150_SLAVE_TCSR,
+ SM7150_SLAVE_VENUS_CVP_THROTTLE_CFG,
+ SM7150_SLAVE_CNOC_DDRSS,
+ SM7150_SLAVE_AHB2PHY_NORTH,
+ SM7150_SLAVE_SNOC_CFG,
+ SM7150_SLAVE_GRAPHICS_3D_CFG,
+ SM7150_SLAVE_VENUS_CFG,
+ SM7150_SLAVE_TSIF,
+ SM7150_SLAVE_CDSP_CFG,
+ SM7150_SLAVE_CLK_CTL,
+ SM7150_SLAVE_AOP,
+ SM7150_SLAVE_QUP_1,
+ SM7150_SLAVE_AHB2PHY_SOUTH,
+ SM7150_SLAVE_SERVICE_CNOC,
+ SM7150_SLAVE_AHB2PHY_WEST,
+ SM7150_SLAVE_USB3,
+ SM7150_SLAVE_VENUS_THROTTLE_CFG,
+ SM7150_SLAVE_IPA_CFG,
+ SM7150_SLAVE_RBCPR_CX_CFG,
+ SM7150_SLAVE_TLMM_WEST,
+ SM7150_SLAVE_A1NOC_CFG,
+ SM7150_SLAVE_AOSS,
+ SM7150_SLAVE_PRNG,
+ SM7150_SLAVE_VSENSE_CTRL_CFG,
+ SM7150_SLAVE_EMMC_CFG,
+ SM7150_SLAVE_SPDM_WRAPPER,
+ SM7150_SLAVE_CRYPTO_0_CFG,
+ SM7150_SLAVE_PIMEM_CFG,
+ SM7150_SLAVE_TLMM_NORTH,
+ SM7150_SLAVE_RBCPR_MX_CFG,
+ SM7150_SLAVE_IMEM_CFG
+ },
+};
+
+static struct qcom_icc_node xm_qdss_dap = {
+ .name = "xm_qdss_dap",
+ .id = SM7150_MASTER_QDSS_DAP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 48,
+ .links = { SM7150_SLAVE_TLMM_SOUTH,
+ SM7150_SLAVE_CAMERA_CFG,
+ SM7150_SLAVE_SDCC_4,
+ SM7150_SLAVE_SDCC_2,
+ SM7150_SLAVE_CNOC_MNOC_CFG,
+ SM7150_SLAVE_UFS_MEM_CFG,
+ SM7150_SLAVE_QUP_0,
+ SM7150_SLAVE_GLM,
+ SM7150_SLAVE_PDM,
+ SM7150_SLAVE_CAMERA_NRT_THROTTLE_CFG,
+ SM7150_SLAVE_A2NOC_CFG,
+ SM7150_SLAVE_QDSS_CFG,
+ SM7150_SLAVE_CAMERA_RT_THROTTLE_CFG,
+ SM7150_SLAVE_DISPLAY_CFG,
+ SM7150_SLAVE_PCIE_CFG,
+ SM7150_SLAVE_DISPLAY_THROTTLE_CFG,
+ SM7150_SLAVE_TCSR,
+ SM7150_SLAVE_VENUS_CVP_THROTTLE_CFG,
+ SM7150_SLAVE_CNOC_DDRSS,
+ SM7150_SLAVE_CNOC_A2NOC,
+ SM7150_SLAVE_AHB2PHY_NORTH,
+ SM7150_SLAVE_SNOC_CFG,
+ SM7150_SLAVE_GRAPHICS_3D_CFG,
+ SM7150_SLAVE_VENUS_CFG,
+ SM7150_SLAVE_TSIF,
+ SM7150_SLAVE_CDSP_CFG,
+ SM7150_SLAVE_CLK_CTL,
+ SM7150_SLAVE_AOP,
+ SM7150_SLAVE_QUP_1,
+ SM7150_SLAVE_AHB2PHY_SOUTH,
+ SM7150_SLAVE_SERVICE_CNOC,
+ SM7150_SLAVE_AHB2PHY_WEST,
+ SM7150_SLAVE_USB3,
+ SM7150_SLAVE_VENUS_THROTTLE_CFG,
+ SM7150_SLAVE_IPA_CFG,
+ SM7150_SLAVE_RBCPR_CX_CFG,
+ SM7150_SLAVE_TLMM_WEST,
+ SM7150_SLAVE_A1NOC_CFG,
+ SM7150_SLAVE_AOSS,
+ SM7150_SLAVE_PRNG,
+ SM7150_SLAVE_VSENSE_CTRL_CFG,
+ SM7150_SLAVE_EMMC_CFG,
+ SM7150_SLAVE_SPDM_WRAPPER,
+ SM7150_SLAVE_CRYPTO_0_CFG,
+ SM7150_SLAVE_PIMEM_CFG,
+ SM7150_SLAVE_TLMM_NORTH,
+ SM7150_SLAVE_RBCPR_MX_CFG,
+ SM7150_SLAVE_IMEM_CFG
+ },
+};
+
+static struct qcom_icc_node qhm_cnoc_dc_noc = {
+ .name = "qhm_cnoc_dc_noc",
+ .id = SM7150_MASTER_CNOC_DC_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 2,
+ .links = { SM7150_SLAVE_LLCC_CFG,
+ SM7150_SLAVE_GEM_NOC_CFG
+ },
+};
+
+static struct qcom_icc_node acm_apps = {
+ .name = "acm_apps",
+ .id = SM7150_MASTER_AMPSS_M0,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 2,
+ .links = { SM7150_SLAVE_LLCC,
+ SM7150_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node acm_sys_tcu = {
+ .name = "acm_sys_tcu",
+ .id = SM7150_MASTER_SYS_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM7150_SLAVE_LLCC,
+ SM7150_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node qhm_gemnoc_cfg = {
+ .name = "qhm_gemnoc_cfg",
+ .id = SM7150_MASTER_GEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 2,
+ .links = { SM7150_SLAVE_SERVICE_GEM_NOC,
+ SM7150_SLAVE_MSS_PROC_MS_MPU_CFG
+ },
+};
+
+static struct qcom_icc_node qnm_cmpnoc = {
+ .name = "qnm_cmpnoc",
+ .id = SM7150_MASTER_COMPUTE_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM7150_SLAVE_LLCC,
+ SM7150_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf = {
+ .name = "qnm_mnoc_hf",
+ .id = SM7150_MASTER_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf = {
+ .name = "qnm_mnoc_sf",
+ .id = SM7150_MASTER_MNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM7150_SLAVE_LLCC,
+ SM7150_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node qnm_pcie = {
+ .name = "qnm_pcie",
+ .id = SM7150_MASTER_GEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM7150_SLAVE_LLCC,
+ SM7150_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node qnm_snoc_gc = {
+ .name = "qnm_snoc_gc",
+ .id = SM7150_MASTER_SNOC_GC_MEM_NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_snoc_sf = {
+ .name = "qnm_snoc_sf",
+ .id = SM7150_MASTER_SNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qxm_gpu = {
+ .name = "qxm_gpu",
+ .id = SM7150_MASTER_GRAPHICS_3D,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM7150_SLAVE_LLCC,
+ SM7150_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node llcc_mc = {
+ .name = "llcc_mc",
+ .id = SM7150_MASTER_LLCC,
+ .channels = 2,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_EBI_CH0 },
+};
+
+static struct qcom_icc_node qhm_mnoc_cfg = {
+ .name = "qhm_mnoc_cfg",
+ .id = SM7150_MASTER_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_SERVICE_MNOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_hf = {
+ .name = "qxm_camnoc_hf",
+ .id = SM7150_MASTER_CAMNOC_HF0,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_nrt = {
+ .name = "qxm_camnoc_nrt",
+ .id = SM7150_MASTER_CAMNOC_NRT,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_rt = {
+ .name = "qxm_camnoc_rt",
+ .id = SM7150_MASTER_CAMNOC_RT,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_sf = {
+ .name = "qxm_camnoc_sf",
+ .id = SM7150_MASTER_CAMNOC_SF,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_mdp0 = {
+ .name = "qxm_mdp0",
+ .id = SM7150_MASTER_MDP_PORT0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_mdp1 = {
+ .name = "qxm_mdp1",
+ .id = SM7150_MASTER_MDP_PORT1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_rot = {
+ .name = "qxm_rot",
+ .id = SM7150_MASTER_ROTATOR,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_venus0 = {
+ .name = "qxm_venus0",
+ .id = SM7150_MASTER_VIDEO_P0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_venus1 = {
+ .name = "qxm_venus1",
+ .id = SM7150_MASTER_VIDEO_P1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_venus_arm9 = {
+ .name = "qxm_venus_arm9",
+ .id = SM7150_MASTER_VIDEO_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qhm_snoc_cfg = {
+ .name = "qhm_snoc_cfg",
+ .id = SM7150_MASTER_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_SERVICE_SNOC },
+};
+
+static struct qcom_icc_node qnm_aggre1_noc = {
+ .name = "qnm_aggre1_noc",
+ .id = SM7150_A1NOC_SNOC_MAS,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 6,
+ .links = { SM7150_SLAVE_SNOC_GEM_NOC_SF,
+ SM7150_SLAVE_PIMEM,
+ SM7150_SLAVE_OCIMEM,
+ SM7150_SLAVE_APPSS,
+ SM7150_SNOC_CNOC_SLV,
+ SM7150_SLAVE_QDSS_STM
+ },
+};
+
+static struct qcom_icc_node qnm_aggre2_noc = {
+ .name = "qnm_aggre2_noc",
+ .id = SM7150_A2NOC_SNOC_MAS,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 7,
+ .links = { SM7150_SLAVE_SNOC_GEM_NOC_SF,
+ SM7150_SLAVE_PIMEM,
+ SM7150_SLAVE_OCIMEM,
+ SM7150_SLAVE_APPSS,
+ SM7150_SNOC_CNOC_SLV,
+ SM7150_SLAVE_TCU,
+ SM7150_SLAVE_QDSS_STM
+ },
+};
+
+static struct qcom_icc_node qnm_gemnoc = {
+ .name = "qnm_gemnoc",
+ .id = SM7150_MASTER_GEM_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 6,
+ .links = { SM7150_SLAVE_PIMEM,
+ SM7150_SLAVE_OCIMEM,
+ SM7150_SLAVE_APPSS,
+ SM7150_SNOC_CNOC_SLV,
+ SM7150_SLAVE_TCU,
+ SM7150_SLAVE_QDSS_STM
+ },
+};
+
+static struct qcom_icc_node qxm_pimem = {
+ .name = "qxm_pimem",
+ .id = SM7150_MASTER_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM7150_SLAVE_SNOC_GEM_NOC_GC,
+ SM7150_SLAVE_OCIMEM
+ },
+};
+
+static struct qcom_icc_node xm_gic = {
+ .name = "xm_gic",
+ .id = SM7150_MASTER_GIC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM7150_SLAVE_SNOC_GEM_NOC_GC,
+ SM7150_SLAVE_OCIMEM
+ },
+};
+
+static struct qcom_icc_node qns_a1noc_snoc = {
+ .name = "qns_a1noc_snoc",
+ .id = SM7150_A1NOC_SNOC_SLV,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM7150_A1NOC_SNOC_MAS },
+};
+
+static struct qcom_icc_node srvc_aggre1_noc = {
+ .name = "srvc_aggre1_noc",
+ .id = SM7150_SLAVE_SERVICE_A1NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_a2noc_snoc = {
+ .name = "qns_a2noc_snoc",
+ .id = SM7150_A2NOC_SNOC_SLV,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM7150_A2NOC_SNOC_MAS },
+};
+
+static struct qcom_icc_node qns_pcie_gemnoc = {
+ .name = "qns_pcie_gemnoc",
+ .id = SM7150_SLAVE_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_MASTER_GEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node srvc_aggre2_noc = {
+ .name = "srvc_aggre2_noc",
+ .id = SM7150_SLAVE_SERVICE_A2NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_camnoc_uncomp = {
+ .name = "qns_camnoc_uncomp",
+ .id = SM7150_SLAVE_CAMNOC_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+};
+
+static struct qcom_icc_node qns_cdsp_gemnoc = {
+ .name = "qns_cdsp_gemnoc",
+ .id = SM7150_SLAVE_CDSP_GEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_MASTER_COMPUTE_NOC },
+};
+
+static struct qcom_icc_node qhs_a1_noc_cfg = {
+ .name = "qhs_a1_noc_cfg",
+ .id = SM7150_SLAVE_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_MASTER_A1NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_a2_noc_cfg = {
+ .name = "qhs_a2_noc_cfg",
+ .id = SM7150_SLAVE_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_MASTER_A2NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_ahb2phy_north = {
+ .name = "qhs_ahb2phy_north",
+ .id = SM7150_SLAVE_AHB2PHY_NORTH,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy_south = {
+ .name = "qhs_ahb2phy_south",
+ .id = SM7150_SLAVE_AHB2PHY_SOUTH,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy_west = {
+ .name = "qhs_ahb2phy_west",
+ .id = SM7150_SLAVE_AHB2PHY_WEST,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_aop = {
+ .name = "qhs_aop",
+ .id = SM7150_SLAVE_AOP,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_aoss = {
+ .name = "qhs_aoss",
+ .id = SM7150_SLAVE_AOSS,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_camera_cfg = {
+ .name = "qhs_camera_cfg",
+ .id = SM7150_SLAVE_CAMERA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_camera_nrt_thrott_cfg = {
+ .name = "qhs_camera_nrt_thrott_cfg",
+ .id = SM7150_SLAVE_CAMERA_NRT_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_camera_rt_throttle_cfg = {
+ .name = "qhs_camera_rt_throttle_cfg",
+ .id = SM7150_SLAVE_CAMERA_RT_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+ .name = "qhs_clk_ctl",
+ .id = SM7150_SLAVE_CLK_CTL,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_compute_dsp_cfg = {
+ .name = "qhs_compute_dsp_cfg",
+ .id = SM7150_SLAVE_CDSP_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_cx = {
+ .name = "qhs_cpr_cx",
+ .id = SM7150_SLAVE_RBCPR_CX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_mx = {
+ .name = "qhs_cpr_mx",
+ .id = SM7150_SLAVE_RBCPR_MX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+ .name = "qhs_crypto0_cfg",
+ .id = SM7150_SLAVE_CRYPTO_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ddrss_cfg = {
+ .name = "qhs_ddrss_cfg",
+ .id = SM7150_SLAVE_CNOC_DDRSS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_MASTER_CNOC_DC_NOC },
+};
+
+static struct qcom_icc_node qhs_display_cfg = {
+ .name = "qhs_display_cfg",
+ .id = SM7150_SLAVE_DISPLAY_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_display_throttle_cfg = {
+ .name = "qhs_display_throttle_cfg",
+ .id = SM7150_SLAVE_DISPLAY_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_emmc_cfg = {
+ .name = "qhs_emmc_cfg",
+ .id = SM7150_SLAVE_EMMC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_glm = {
+ .name = "qhs_glm",
+ .id = SM7150_SLAVE_GLM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_gpuss_cfg = {
+ .name = "qhs_gpuss_cfg",
+ .id = SM7150_SLAVE_GRAPHICS_3D_CFG,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+ .name = "qhs_imem_cfg",
+ .id = SM7150_SLAVE_IMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ipa = {
+ .name = "qhs_ipa",
+ .id = SM7150_SLAVE_IPA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_mnoc_cfg = {
+ .name = "qhs_mnoc_cfg",
+ .id = SM7150_SLAVE_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_MASTER_CNOC_MNOC_CFG },
+};
+
+static struct qcom_icc_node qhs_pcie_cfg = {
+ .name = "qhs_pcie_cfg",
+ .id = SM7150_SLAVE_PCIE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pdm = {
+ .name = "qhs_pdm",
+ .id = SM7150_SLAVE_PDM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pimem_cfg = {
+ .name = "qhs_pimem_cfg",
+ .id = SM7150_SLAVE_PIMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_prng = {
+ .name = "qhs_prng",
+ .id = SM7150_SLAVE_PRNG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+ .name = "qhs_qdss_cfg",
+ .id = SM7150_SLAVE_QDSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qupv3_center = {
+ .name = "qhs_qupv3_center",
+ .id = SM7150_SLAVE_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qupv3_north = {
+ .name = "qhs_qupv3_north",
+ .id = SM7150_SLAVE_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc2 = {
+ .name = "qhs_sdc2",
+ .id = SM7150_SLAVE_SDCC_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc4 = {
+ .name = "qhs_sdc4",
+ .id = SM7150_SLAVE_SDCC_4,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_snoc_cfg = {
+ .name = "qhs_snoc_cfg",
+ .id = SM7150_SLAVE_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_MASTER_SNOC_CFG },
+};
+
+static struct qcom_icc_node qhs_spdm = {
+ .name = "qhs_spdm",
+ .id = SM7150_SLAVE_SPDM_WRAPPER,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+ .name = "qhs_tcsr",
+ .id = SM7150_SLAVE_TCSR,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm_north = {
+ .name = "qhs_tlmm_north",
+ .id = SM7150_SLAVE_TLMM_NORTH,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm_south = {
+ .name = "qhs_tlmm_south",
+ .id = SM7150_SLAVE_TLMM_SOUTH,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm_west = {
+ .name = "qhs_tlmm_west",
+ .id = SM7150_SLAVE_TLMM_WEST,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tsif = {
+ .name = "qhs_tsif",
+ .id = SM7150_SLAVE_TSIF,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ufs_mem_cfg = {
+ .name = "qhs_ufs_mem_cfg",
+ .id = SM7150_SLAVE_UFS_MEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_0 = {
+ .name = "qhs_usb3_0",
+ .id = SM7150_SLAVE_USB3,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_venus_cfg = {
+ .name = "qhs_venus_cfg",
+ .id = SM7150_SLAVE_VENUS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_venus_cvp_throttle_cfg = {
+ .name = "qhs_venus_cvp_throttle_cfg",
+ .id = SM7150_SLAVE_VENUS_CVP_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_venus_throttle_cfg = {
+ .name = "qhs_venus_throttle_cfg",
+ .id = SM7150_SLAVE_VENUS_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
+ .name = "qhs_vsense_ctrl_cfg",
+ .id = SM7150_SLAVE_VSENSE_CTRL_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_cnoc_a2noc = {
+ .name = "qns_cnoc_a2noc",
+ .id = SM7150_SLAVE_CNOC_A2NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_MASTER_CNOC_A2NOC },
+};
+
+static struct qcom_icc_node srvc_cnoc = {
+ .name = "srvc_cnoc",
+ .id = SM7150_SLAVE_SERVICE_CNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_gemnoc = {
+ .name = "qhs_gemnoc",
+ .id = SM7150_SLAVE_GEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_MASTER_GEM_NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_llcc = {
+ .name = "qhs_llcc",
+ .id = SM7150_SLAVE_LLCC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg = {
+ .name = "qhs_mdsp_ms_mpu_cfg",
+ .id = SM7150_SLAVE_MSS_PROC_MS_MPU_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_gem_noc_snoc = {
+ .name = "qns_gem_noc_snoc",
+ .id = SM7150_SLAVE_GEM_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_MASTER_GEM_NOC_SNOC },
+};
+
+static struct qcom_icc_node qns_llcc = {
+ .name = "qns_llcc",
+ .id = SM7150_SLAVE_LLCC,
+ .channels = 2,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM7150_MASTER_LLCC },
+};
+
+static struct qcom_icc_node srvc_gemnoc = {
+ .name = "srvc_gemnoc",
+ .id = SM7150_SLAVE_SERVICE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node ebi = {
+ .name = "ebi",
+ .id = SM7150_SLAVE_EBI_CH0,
+ .channels = 2,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns2_mem_noc = {
+ .name = "qns2_mem_noc",
+ .id = SM7150_SLAVE_MNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_MASTER_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_mem_noc_hf = {
+ .name = "qns_mem_noc_hf",
+ .id = SM7150_SLAVE_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_MASTER_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_mnoc = {
+ .name = "srvc_mnoc",
+ .id = SM7150_SLAVE_SERVICE_MNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_apss = {
+ .name = "qhs_apss",
+ .id = SM7150_SLAVE_APPSS,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qns_cnoc = {
+ .name = "qns_cnoc",
+ .id = SM7150_SNOC_CNOC_SLV,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_SNOC_CNOC_MAS },
+};
+
+static struct qcom_icc_node qns_gemnoc_gc = {
+ .name = "qns_gemnoc_gc",
+ .id = SM7150_SLAVE_SNOC_GEM_NOC_GC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_MASTER_SNOC_GC_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_gemnoc_sf = {
+ .name = "qns_gemnoc_sf",
+ .id = SM7150_SLAVE_SNOC_GEM_NOC_SF,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM7150_MASTER_SNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxs_imem = {
+ .name = "qxs_imem",
+ .id = SM7150_SLAVE_OCIMEM,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qxs_pimem = {
+ .name = "qxs_pimem",
+ .id = SM7150_SLAVE_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node srvc_snoc = {
+ .name = "srvc_snoc",
+ .id = SM7150_SLAVE_SERVICE_SNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+ .name = "xs_qdss_stm",
+ .id = SM7150_SLAVE_QDSS_STM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+ .name = "xs_sys_tcu_cfg",
+ .id = SM7150_SLAVE_TCU,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
+ .enable_mask = BIT(3),
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+ .name = "MM0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+ .name = "MM1",
+ .keepalive = true,
+ .num_nodes = 8,
+ .nodes = { &qxm_camnoc_hf0_uncomp,
+ &qxm_camnoc_rt_uncomp,
+ &qxm_camnoc_sf_uncomp,
+ &qxm_camnoc_nrt_uncomp,
+ &qxm_camnoc_hf,
+ &qxm_camnoc_rt,
+ &qxm_mdp0,
+ &qxm_mdp1
+ },
+};
+
+static struct qcom_icc_bcm bcm_sh2 = {
+ .name = "SH2",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_gem_noc_snoc },
+};
+
+static struct qcom_icc_bcm bcm_sh3 = {
+ .name = "SH3",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &acm_sys_tcu },
+};
+
+static struct qcom_icc_bcm bcm_mm2 = {
+ .name = "MM2",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qxm_camnoc_nrt,
+ &qns2_mem_noc
+ },
+};
+
+static struct qcom_icc_bcm bcm_mm3 = {
+ .name = "MM3",
+ .keepalive = false,
+ .num_nodes = 5,
+ .nodes = { &qxm_camnoc_sf,
+ &qxm_rot,
+ &qxm_venus0,
+ &qxm_venus1,
+ &qxm_venus_arm9
+ },
+};
+
+static struct qcom_icc_bcm bcm_sh5 = {
+ .name = "SH5",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &acm_apps },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_sh8 = {
+ .name = "SH8",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_cdsp_gemnoc },
+};
+
+static struct qcom_icc_bcm bcm_sh10 = {
+ .name = "SH10",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_npu },
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+ .name = "CN0",
+ .keepalive = true,
+ .num_nodes = 54,
+ .nodes = { &qhm_tsif,
+ &xm_emmc,
+ &xm_sdc2,
+ &xm_sdc4,
+ &qhm_spdm,
+ &qnm_snoc,
+ &qhs_a1_noc_cfg,
+ &qhs_a2_noc_cfg,
+ &qhs_ahb2phy_north,
+ &qhs_ahb2phy_south,
+ &qhs_ahb2phy_west,
+ &qhs_aop,
+ &qhs_aoss,
+ &qhs_camera_cfg,
+ &qhs_camera_nrt_thrott_cfg,
+ &qhs_camera_rt_throttle_cfg,
+ &qhs_clk_ctl,
+ &qhs_compute_dsp_cfg,
+ &qhs_cpr_cx,
+ &qhs_cpr_mx,
+ &qhs_crypto0_cfg,
+ &qhs_ddrss_cfg,
+ &qhs_display_cfg,
+ &qhs_display_throttle_cfg,
+ &qhs_emmc_cfg,
+ &qhs_glm,
+ &qhs_gpuss_cfg,
+ &qhs_imem_cfg,
+ &qhs_ipa,
+ &qhs_mnoc_cfg,
+ &qhs_pcie_cfg,
+ &qhs_pdm,
+ &qhs_pimem_cfg,
+ &qhs_prng,
+ &qhs_qdss_cfg,
+ &qhs_qupv3_center,
+ &qhs_qupv3_north,
+ &qhs_sdc2,
+ &qhs_sdc4,
+ &qhs_snoc_cfg,
+ &qhs_spdm,
+ &qhs_tcsr,
+ &qhs_tlmm_north,
+ &qhs_tlmm_south,
+ &qhs_tlmm_west,
+ &qhs_tsif,
+ &qhs_ufs_mem_cfg,
+ &qhs_usb3_0,
+ &qhs_venus_cfg,
+ &qhs_venus_cvp_throttle_cfg,
+ &qhs_venus_throttle_cfg,
+ &qhs_vsense_ctrl_cfg,
+ &qns_cnoc_a2noc,
+ &srvc_cnoc
+ },
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+ .name = "QUP0",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qhm_qup_center,
+ &qhm_qup_north
+ },
+};
+
+static struct qcom_icc_bcm bcm_sn1 = {
+ .name = "SN1",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxs_imem },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+ .name = "SN2",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_gc },
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+ .name = "SN4",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxs_pimem },
+};
+
+static struct qcom_icc_bcm bcm_sn9 = {
+ .name = "SN9",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qnm_aggre1_noc,
+ &qns_a1noc_snoc
+ },
+};
+
+static struct qcom_icc_bcm bcm_sn11 = {
+ .name = "SN11",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qnm_aggre2_noc,
+ &qns_a2noc_snoc
+ },
+};
+
+static struct qcom_icc_bcm bcm_sn12 = {
+ .name = "SN12",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qxm_pimem,
+ &xm_gic
+ },
+};
+
+static struct qcom_icc_bcm bcm_sn14 = {
+ .name = "SN14",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_pcie_gemnoc },
+};
+
+static struct qcom_icc_bcm bcm_sn15 = {
+ .name = "SN15",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_gemnoc },
+};
+
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
+ &bcm_cn0,
+ &bcm_qup0,
+ &bcm_sn9,
+};
+
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
+ [MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
+ [MASTER_QUP_0] = &qhm_qup_center,
+ [MASTER_TSIF] = &qhm_tsif,
+ [MASTER_EMMC] = &xm_emmc,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [MASTER_SDCC_4] = &xm_sdc4,
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [A1NOC_SNOC_SLV] = &qns_a1noc_snoc,
+ [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
+};
+
+static const struct qcom_icc_desc sm7150_aggre1_noc = {
+ .nodes = aggre1_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
+ .bcms = aggre1_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
+ &bcm_ce0,
+ &bcm_qup0,
+ &bcm_sn11,
+ &bcm_sn14,
+};
+
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
+ [MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_QUP_1] = &qhm_qup_north,
+ [MASTER_CNOC_A2NOC] = &qnm_cnoc,
+ [MASTER_CRYPTO_CORE_0] = &qxm_crypto,
+ [MASTER_IPA] = &qxm_ipa,
+ [MASTER_PCIE] = &xm_pcie3_0,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr,
+ [MASTER_USB3] = &xm_usb3_0,
+ [A2NOC_SNOC_SLV] = &qns_a2noc_snoc,
+ [SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_gemnoc,
+ [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
+};
+
+static const struct qcom_icc_desc sm7150_aggre2_noc = {
+ .nodes = aggre2_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
+ .bcms = aggre2_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const camnoc_virt_bcms[] = {
+ &bcm_mm1,
+};
+
+static struct qcom_icc_node * const camnoc_virt_nodes[] = {
+ [MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp,
+ [MASTER_CAMNOC_RT_UNCOMP] = &qxm_camnoc_rt_uncomp,
+ [MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp,
+ [MASTER_CAMNOC_NRT_UNCOMP] = &qxm_camnoc_nrt_uncomp,
+ [SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp,
+};
+
+static const struct qcom_icc_desc sm7150_camnoc_virt = {
+ .nodes = camnoc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(camnoc_virt_nodes),
+ .bcms = camnoc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(camnoc_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const compute_noc_bcms[] = {
+ &bcm_sh10,
+ &bcm_sh8,
+};
+
+static struct qcom_icc_node * const compute_noc_nodes[] = {
+ [MASTER_NPU] = &qnm_npu,
+ [SLAVE_CDSP_GEM_NOC] = &qns_cdsp_gemnoc,
+};
+
+static const struct qcom_icc_desc sm7150_compute_noc = {
+ .nodes = compute_noc_nodes,
+ .num_nodes = ARRAY_SIZE(compute_noc_nodes),
+ .bcms = compute_noc_bcms,
+ .num_bcms = ARRAY_SIZE(compute_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
+ &bcm_cn0,
+};
+
+static struct qcom_icc_node * const config_noc_nodes[] = {
+ [MASTER_SPDM] = &qhm_spdm,
+ [SNOC_CNOC_MAS] = &qnm_snoc,
+ [MASTER_QDSS_DAP] = &xm_qdss_dap,
+ [SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg,
+ [SLAVE_A2NOC_CFG] = &qhs_a2_noc_cfg,
+ [SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy_north,
+ [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy_south,
+ [SLAVE_AHB2PHY_WEST] = &qhs_ahb2phy_west,
+ [SLAVE_AOP] = &qhs_aop,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+ [SLAVE_CAMERA_NRT_THROTTLE_CFG] = &qhs_camera_nrt_thrott_cfg,
+ [SLAVE_CAMERA_RT_THROTTLE_CFG] = &qhs_camera_rt_throttle_cfg,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_CDSP_CFG] = &qhs_compute_dsp_cfg,
+ [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
+ [SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg,
+ [SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
+ [SLAVE_DISPLAY_THROTTLE_CFG] = &qhs_display_throttle_cfg,
+ [SLAVE_EMMC_CFG] = &qhs_emmc_cfg,
+ [SLAVE_GLM] = &qhs_glm,
+ [SLAVE_GRAPHICS_3D_CFG] = &qhs_gpuss_cfg,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_IPA_CFG] = &qhs_ipa,
+ [SLAVE_CNOC_MNOC_CFG] = &qhs_mnoc_cfg,
+ [SLAVE_PCIE_CFG] = &qhs_pcie_cfg,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
+ [SLAVE_PRNG] = &qhs_prng,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QUP_0] = &qhs_qupv3_center,
+ [SLAVE_QUP_1] = &qhs_qupv3_north,
+ [SLAVE_SDCC_2] = &qhs_sdc2,
+ [SLAVE_SDCC_4] = &qhs_sdc4,
+ [SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
+ [SLAVE_SPDM_WRAPPER] = &qhs_spdm,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM_NORTH] = &qhs_tlmm_north,
+ [SLAVE_TLMM_SOUTH] = &qhs_tlmm_south,
+ [SLAVE_TLMM_WEST] = &qhs_tlmm_west,
+ [SLAVE_TSIF] = &qhs_tsif,
+ [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+ [SLAVE_USB3] = &qhs_usb3_0,
+ [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+ [SLAVE_VENUS_CVP_THROTTLE_CFG] = &qhs_venus_cvp_throttle_cfg,
+ [SLAVE_VENUS_THROTTLE_CFG] = &qhs_venus_throttle_cfg,
+ [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
+ [SLAVE_CNOC_A2NOC] = &qns_cnoc_a2noc,
+ [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
+};
+
+static const struct qcom_icc_desc sm7150_config_noc = {
+ .nodes = config_noc_nodes,
+ .num_nodes = ARRAY_SIZE(config_noc_nodes),
+ .bcms = config_noc_bcms,
+ .num_bcms = ARRAY_SIZE(config_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const dc_noc_bcms[] = {
+};
+
+static struct qcom_icc_node * const dc_noc_nodes[] = {
+ [MASTER_CNOC_DC_NOC] = &qhm_cnoc_dc_noc,
+ [SLAVE_GEM_NOC_CFG] = &qhs_gemnoc,
+ [SLAVE_LLCC_CFG] = &qhs_llcc,
+};
+
+static const struct qcom_icc_desc sm7150_dc_noc = {
+ .nodes = dc_noc_nodes,
+ .num_nodes = ARRAY_SIZE(dc_noc_nodes),
+ .bcms = dc_noc_bcms,
+ .num_bcms = ARRAY_SIZE(dc_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
+ &bcm_sh0,
+ &bcm_sh2,
+ &bcm_sh3,
+ &bcm_sh5,
+};
+
+static struct qcom_icc_node * const gem_noc_nodes[] = {
+ [MASTER_AMPSS_M0] = &acm_apps,
+ [MASTER_SYS_TCU] = &acm_sys_tcu,
+ [MASTER_GEM_NOC_CFG] = &qhm_gemnoc_cfg,
+ [MASTER_COMPUTE_NOC] = &qnm_cmpnoc,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_pcie,
+ [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [MASTER_GRAPHICS_3D] = &qxm_gpu,
+ [SLAVE_MSS_PROC_MS_MPU_CFG] = &qhs_mdsp_ms_mpu_cfg,
+ [SLAVE_GEM_NOC_SNOC] = &qns_gem_noc_snoc,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_SERVICE_GEM_NOC] = &srvc_gemnoc,
+};
+
+static const struct qcom_icc_desc sm7150_gem_noc = {
+ .nodes = gem_noc_nodes,
+ .num_nodes = ARRAY_SIZE(gem_noc_nodes),
+ .bcms = gem_noc_bcms,
+ .num_bcms = ARRAY_SIZE(gem_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+};
+
+static struct qcom_icc_node * const mc_virt_nodes[] = {
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI_CH0] = &ebi,
+};
+
+static const struct qcom_icc_desc sm7150_mc_virt = {
+ .nodes = mc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+ .bcms = mc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
+ &bcm_mm0,
+ &bcm_mm1,
+ &bcm_mm2,
+ &bcm_mm3,
+};
+
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
+ [MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
+ [MASTER_CAMNOC_HF0] = &qxm_camnoc_hf,
+ [MASTER_CAMNOC_NRT] = &qxm_camnoc_nrt,
+ [MASTER_CAMNOC_RT] = &qxm_camnoc_rt,
+ [MASTER_CAMNOC_SF] = &qxm_camnoc_sf,
+ [MASTER_MDP_PORT0] = &qxm_mdp0,
+ [MASTER_MDP_PORT1] = &qxm_mdp1,
+ [MASTER_ROTATOR] = &qxm_rot,
+ [MASTER_VIDEO_P0] = &qxm_venus0,
+ [MASTER_VIDEO_P1] = &qxm_venus1,
+ [MASTER_VIDEO_PROC] = &qxm_venus_arm9,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns2_mem_noc,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+};
+
+static const struct qcom_icc_desc sm7150_mmss_noc = {
+ .nodes = mmss_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+ .bcms = mmss_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
+ &bcm_sn0,
+ &bcm_sn1,
+ &bcm_sn11,
+ &bcm_sn12,
+ &bcm_sn15,
+ &bcm_sn2,
+ &bcm_sn4,
+ &bcm_sn9,
+};
+
+static struct qcom_icc_node * const system_noc_nodes[] = {
+ [MASTER_SNOC_CFG] = &qhm_snoc_cfg,
+ [A1NOC_SNOC_MAS] = &qnm_aggre1_noc,
+ [A2NOC_SNOC_MAS] = &qnm_aggre2_noc,
+ [MASTER_GEM_NOC_SNOC] = &qnm_gemnoc,
+ [MASTER_PIMEM] = &qxm_pimem,
+ [MASTER_GIC] = &xm_gic,
+ [SLAVE_APPSS] = &qhs_apss,
+ [SNOC_CNOC_SLV] = &qns_cnoc,
+ [SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
+ [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+ [SLAVE_OCIMEM] = &qxs_imem,
+ [SLAVE_PIMEM] = &qxs_pimem,
+ [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static const struct qcom_icc_desc sm7150_system_noc = {
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,sm7150-aggre1-noc", .data = &sm7150_aggre1_noc },
+ { .compatible = "qcom,sm7150-aggre2-noc", .data = &sm7150_aggre2_noc },
+ { .compatible = "qcom,sm7150-camnoc-virt", .data = &sm7150_camnoc_virt },
+ { .compatible = "qcom,sm7150-compute-noc", .data = &sm7150_compute_noc },
+ { .compatible = "qcom,sm7150-config-noc", .data = &sm7150_config_noc },
+ { .compatible = "qcom,sm7150-dc-noc", .data = &sm7150_dc_noc },
+ { .compatible = "qcom,sm7150-gem-noc", .data = &sm7150_gem_noc },
+ { .compatible = "qcom,sm7150-mc-virt", .data = &sm7150_mc_virt },
+ { .compatible = "qcom,sm7150-mmss-noc", .data = &sm7150_mmss_noc },
+ { .compatible = "qcom,sm7150-system-noc", .data = &sm7150_system_noc },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qcom_icc_rpmh_probe,
+ .remove_new = qcom_icc_rpmh_remove,
+ .driver = {
+ .name = "qnoc-sm7150",
+ .of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+
+static int __init qnoc_driver_init(void)
+{
+ return platform_driver_register(&qnoc_driver);
+}
+core_initcall(qnoc_driver_init);
+
+static void __exit qnoc_driver_exit(void)
+{
+ platform_driver_unregister(&qnoc_driver);
+}
+module_exit(qnoc_driver_exit);
+
+MODULE_DESCRIPTION("Qualcomm SM7150 NoC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/qcom/sm7150.h b/drivers/interconnect/qcom/sm7150.h
new file mode 100644
index 0000000000000..e00a9b0c12793
--- /dev/null
+++ b/drivers/interconnect/qcom/sm7150.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Qualcomm #define SM7150 interconnect IDs
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024, Danila Tikhonov <danila@jiaxyga.com>
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_SM7150_H
+#define __DRIVERS_INTERCONNECT_QCOM_SM7150_H
+
+#define SM7150_A1NOC_SNOC_MAS 0
+#define SM7150_A1NOC_SNOC_SLV 1
+#define SM7150_A2NOC_SNOC_MAS 2
+#define SM7150_A2NOC_SNOC_SLV 3
+#define SM7150_MASTER_A1NOC_CFG 4
+#define SM7150_MASTER_A2NOC_CFG 5
+#define SM7150_MASTER_AMPSS_M0 6
+#define SM7150_MASTER_CAMNOC_HF0 7
+#define SM7150_MASTER_CAMNOC_HF0_UNCOMP 8
+#define SM7150_MASTER_CAMNOC_NRT 9
+#define SM7150_MASTER_CAMNOC_NRT_UNCOMP 10
+#define SM7150_MASTER_CAMNOC_RT 11
+#define SM7150_MASTER_CAMNOC_RT_UNCOMP 12
+#define SM7150_MASTER_CAMNOC_SF 13
+#define SM7150_MASTER_CAMNOC_SF_UNCOMP 14
+#define SM7150_MASTER_CNOC_A2NOC 15
+#define SM7150_MASTER_CNOC_DC_NOC 16
+#define SM7150_MASTER_CNOC_MNOC_CFG 17
+#define SM7150_MASTER_COMPUTE_NOC 18
+#define SM7150_MASTER_CRYPTO_CORE_0 19
+#define SM7150_MASTER_EMMC 20
+#define SM7150_MASTER_GEM_NOC_CFG 21
+#define SM7150_MASTER_GEM_NOC_PCIE_SNOC 22
+#define SM7150_MASTER_GEM_NOC_SNOC 23
+#define SM7150_MASTER_GIC 24
+#define SM7150_MASTER_GRAPHICS_3D 25
+#define SM7150_MASTER_IPA 26
+#define SM7150_MASTER_LLCC 27
+#define SM7150_MASTER_MDP_PORT0 28
+#define SM7150_MASTER_MDP_PORT1 29
+#define SM7150_MASTER_MNOC_HF_MEM_NOC 30
+#define SM7150_MASTER_MNOC_SF_MEM_NOC 31
+#define SM7150_MASTER_NPU 32
+#define SM7150_MASTER_PCIE 33
+#define SM7150_MASTER_PIMEM 34
+#define SM7150_MASTER_QDSS_BAM 35
+#define SM7150_MASTER_QDSS_DAP 36
+#define SM7150_MASTER_QDSS_ETR 37
+#define SM7150_MASTER_QUP_0 38
+#define SM7150_MASTER_QUP_1 39
+#define SM7150_MASTER_ROTATOR 40
+#define SM7150_MASTER_SDCC_2 41
+#define SM7150_MASTER_SDCC_4 42
+#define SM7150_MASTER_SNOC_CFG 43
+#define SM7150_MASTER_SNOC_GC_MEM_NOC 44
+#define SM7150_MASTER_SNOC_SF_MEM_NOC 45
+#define SM7150_MASTER_SPDM 46
+#define SM7150_MASTER_SYS_TCU 47
+#define SM7150_MASTER_TSIF 48
+#define SM7150_MASTER_UFS_MEM 49
+#define SM7150_MASTER_USB3 50
+#define SM7150_MASTER_VIDEO_P0 51
+#define SM7150_MASTER_VIDEO_P1 52
+#define SM7150_MASTER_VIDEO_PROC 53
+#define SM7150_SLAVE_A1NOC_CFG 54
+#define SM7150_SLAVE_A2NOC_CFG 55
+#define SM7150_SLAVE_AHB2PHY_NORTH 56
+#define SM7150_SLAVE_AHB2PHY_SOUTH 57
+#define SM7150_SLAVE_AHB2PHY_WEST 58
+#define SM7150_SLAVE_ANOC_PCIE_GEM_NOC 59
+#define SM7150_SLAVE_AOP 60
+#define SM7150_SLAVE_AOSS 61
+#define SM7150_SLAVE_APPSS 62
+#define SM7150_SLAVE_CAMERA_CFG 63
+#define SM7150_SLAVE_CAMERA_NRT_THROTTLE_CFG 64
+#define SM7150_SLAVE_CAMERA_RT_THROTTLE_CFG 65
+#define SM7150_SLAVE_CAMNOC_UNCOMP 66
+#define SM7150_SLAVE_CDSP_CFG 67
+#define SM7150_SLAVE_CDSP_GEM_NOC 68
+#define SM7150_SLAVE_CLK_CTL 69
+#define SM7150_SLAVE_CNOC_A2NOC 70
+#define SM7150_SLAVE_CNOC_DDRSS 71
+#define SM7150_SLAVE_CNOC_MNOC_CFG 72
+#define SM7150_SLAVE_CRYPTO_0_CFG 73
+#define SM7150_SLAVE_DISPLAY_CFG 74
+#define SM7150_SLAVE_DISPLAY_THROTTLE_CFG 75
+#define SM7150_SLAVE_EBI_CH0 76
+#define SM7150_SLAVE_EMMC_CFG 77
+#define SM7150_SLAVE_GEM_NOC_CFG 78
+#define SM7150_SLAVE_GEM_NOC_SNOC 79
+#define SM7150_SLAVE_GLM 80
+#define SM7150_SLAVE_GRAPHICS_3D_CFG 81
+#define SM7150_SLAVE_IMEM_CFG 82
+#define SM7150_SLAVE_IPA_CFG 83
+#define SM7150_SLAVE_LLCC 84
+#define SM7150_SLAVE_LLCC_CFG 85
+#define SM7150_SLAVE_MNOC_HF_MEM_NOC 86
+#define SM7150_SLAVE_MNOC_SF_MEM_NOC 87
+#define SM7150_SLAVE_MSS_PROC_MS_MPU_CFG 88
+#define SM7150_SLAVE_OCIMEM 89
+#define SM7150_SLAVE_PCIE_CFG 90
+#define SM7150_SLAVE_PDM 91
+#define SM7150_SLAVE_PIMEM 92
+#define SM7150_SLAVE_PIMEM_CFG 93
+#define SM7150_SLAVE_PRNG 94
+#define SM7150_SLAVE_QDSS_CFG 95
+#define SM7150_SLAVE_QDSS_STM 96
+#define SM7150_SLAVE_QUP_0 97
+#define SM7150_SLAVE_QUP_1 98
+#define SM7150_SLAVE_RBCPR_CX_CFG 99
+#define SM7150_SLAVE_RBCPR_MX_CFG 100
+#define SM7150_SLAVE_SDCC_2 101
+#define SM7150_SLAVE_SDCC_4 102
+#define SM7150_SLAVE_SERVICE_A1NOC 103
+#define SM7150_SLAVE_SERVICE_A2NOC 104
+#define SM7150_SLAVE_SERVICE_CNOC 105
+#define SM7150_SLAVE_SERVICE_GEM_NOC 106
+#define SM7150_SLAVE_SERVICE_MNOC 107
+#define SM7150_SLAVE_SERVICE_SNOC 108
+#define SM7150_SLAVE_SNOC_CFG 109
+#define SM7150_SLAVE_SNOC_GEM_NOC_GC 110
+#define SM7150_SLAVE_SNOC_GEM_NOC_SF 111
+#define SM7150_SLAVE_SPDM_WRAPPER 112
+#define SM7150_SLAVE_TCSR 113
+#define SM7150_SLAVE_TCU 114
+#define SM7150_SLAVE_TLMM_NORTH 115
+#define SM7150_SLAVE_TLMM_SOUTH 116
+#define SM7150_SLAVE_TLMM_WEST 117
+#define SM7150_SLAVE_TSIF 118
+#define SM7150_SLAVE_UFS_MEM_CFG 119
+#define SM7150_SLAVE_USB3 120
+#define SM7150_SLAVE_VENUS_CFG 121
+#define SM7150_SLAVE_VENUS_CVP_THROTTLE_CFG 122
+#define SM7150_SLAVE_VENUS_THROTTLE_CFG 123
+#define SM7150_SLAVE_VSENSE_CTRL_CFG 124
+#define SM7150_SNOC_CNOC_MAS 125
+#define SM7150_SNOC_CNOC_SLV 126
+
+#endif
diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c
index 02d40eea0d696..1879fa15761f5 100644
--- a/drivers/interconnect/qcom/sm8250.c
+++ b/drivers/interconnect/qcom/sm8250.c
@@ -1673,7 +1673,7 @@ static struct qcom_icc_bcm * const qup_virt_bcms[] = {
&bcm_qup0,
};
-static struct qcom_icc_node *qup_virt_nodes[] = {
+static struct qcom_icc_node * const qup_virt_nodes[] = {
[MASTER_QUP_CORE_0] = &qup0_core_master,
[MASTER_QUP_CORE_1] = &qup1_core_master,
[MASTER_QUP_CORE_2] = &qup2_core_master,
diff --git a/drivers/interconnect/qcom/sm8550.c b/drivers/interconnect/qcom/sm8550.c
index fc22cecf650fc..4d0e6fa9e003b 100644
--- a/drivers/interconnect/qcom/sm8550.c
+++ b/drivers/interconnect/qcom/sm8550.c
@@ -524,231 +524,6 @@ static struct qcom_icc_node xm_gic = {
.links = { SM8550_SLAVE_SNOC_GEM_NOC_GC },
};
-static struct qcom_icc_node qnm_mnoc_hf_disp = {
- .name = "qnm_mnoc_hf_disp",
- .id = SM8550_MASTER_MNOC_HF_MEM_NOC_DISP,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_SLAVE_LLCC_DISP },
-};
-
-static struct qcom_icc_node qnm_pcie_disp = {
- .name = "qnm_pcie_disp",
- .id = SM8550_MASTER_ANOC_PCIE_GEM_NOC_DISP,
- .channels = 1,
- .buswidth = 16,
- .num_links = 1,
- .links = { SM8550_SLAVE_LLCC_DISP },
-};
-
-static struct qcom_icc_node llcc_mc_disp = {
- .name = "llcc_mc_disp",
- .id = SM8550_MASTER_LLCC_DISP,
- .channels = 4,
- .buswidth = 4,
- .num_links = 1,
- .links = { SM8550_SLAVE_EBI1_DISP },
-};
-
-static struct qcom_icc_node qnm_mdp_disp = {
- .name = "qnm_mdp_disp",
- .id = SM8550_MASTER_MDP_DISP,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_SLAVE_MNOC_HF_MEM_NOC_DISP },
-};
-
-static struct qcom_icc_node qnm_mnoc_hf_cam_ife_0 = {
- .name = "qnm_mnoc_hf_cam_ife_0",
- .id = SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_0,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_SLAVE_LLCC_CAM_IFE_0 },
-};
-
-static struct qcom_icc_node qnm_mnoc_sf_cam_ife_0 = {
- .name = "qnm_mnoc_sf_cam_ife_0",
- .id = SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_0,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_SLAVE_LLCC_CAM_IFE_0 },
-};
-
-static struct qcom_icc_node qnm_pcie_cam_ife_0 = {
- .name = "qnm_pcie_cam_ife_0",
- .id = SM8550_MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_0,
- .channels = 1,
- .buswidth = 16,
- .num_links = 1,
- .links = { SM8550_SLAVE_LLCC_CAM_IFE_0 },
-};
-
-static struct qcom_icc_node llcc_mc_cam_ife_0 = {
- .name = "llcc_mc_cam_ife_0",
- .id = SM8550_MASTER_LLCC_CAM_IFE_0,
- .channels = 4,
- .buswidth = 4,
- .num_links = 1,
- .links = { SM8550_SLAVE_EBI1_CAM_IFE_0 },
-};
-
-static struct qcom_icc_node qnm_camnoc_hf_cam_ife_0 = {
- .name = "qnm_camnoc_hf_cam_ife_0",
- .id = SM8550_MASTER_CAMNOC_HF_CAM_IFE_0,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_0 },
-};
-
-static struct qcom_icc_node qnm_camnoc_icp_cam_ife_0 = {
- .name = "qnm_camnoc_icp_cam_ife_0",
- .id = SM8550_MASTER_CAMNOC_ICP_CAM_IFE_0,
- .channels = 1,
- .buswidth = 8,
- .num_links = 1,
- .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_0 },
-};
-
-static struct qcom_icc_node qnm_camnoc_sf_cam_ife_0 = {
- .name = "qnm_camnoc_sf_cam_ife_0",
- .id = SM8550_MASTER_CAMNOC_SF_CAM_IFE_0,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_0 },
-};
-
-static struct qcom_icc_node qnm_mnoc_hf_cam_ife_1 = {
- .name = "qnm_mnoc_hf_cam_ife_1",
- .id = SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_1,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_SLAVE_LLCC_CAM_IFE_1 },
-};
-
-static struct qcom_icc_node qnm_mnoc_sf_cam_ife_1 = {
- .name = "qnm_mnoc_sf_cam_ife_1",
- .id = SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_1,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_SLAVE_LLCC_CAM_IFE_1 },
-};
-
-static struct qcom_icc_node qnm_pcie_cam_ife_1 = {
- .name = "qnm_pcie_cam_ife_1",
- .id = SM8550_MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_1,
- .channels = 1,
- .buswidth = 16,
- .num_links = 1,
- .links = { SM8550_SLAVE_LLCC_CAM_IFE_1 },
-};
-
-static struct qcom_icc_node llcc_mc_cam_ife_1 = {
- .name = "llcc_mc_cam_ife_1",
- .id = SM8550_MASTER_LLCC_CAM_IFE_1,
- .channels = 4,
- .buswidth = 4,
- .num_links = 1,
- .links = { SM8550_SLAVE_EBI1_CAM_IFE_1 },
-};
-
-static struct qcom_icc_node qnm_camnoc_hf_cam_ife_1 = {
- .name = "qnm_camnoc_hf_cam_ife_1",
- .id = SM8550_MASTER_CAMNOC_HF_CAM_IFE_1,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_1 },
-};
-
-static struct qcom_icc_node qnm_camnoc_icp_cam_ife_1 = {
- .name = "qnm_camnoc_icp_cam_ife_1",
- .id = SM8550_MASTER_CAMNOC_ICP_CAM_IFE_1,
- .channels = 1,
- .buswidth = 8,
- .num_links = 1,
- .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_1 },
-};
-
-static struct qcom_icc_node qnm_camnoc_sf_cam_ife_1 = {
- .name = "qnm_camnoc_sf_cam_ife_1",
- .id = SM8550_MASTER_CAMNOC_SF_CAM_IFE_1,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_1 },
-};
-
-static struct qcom_icc_node qnm_mnoc_hf_cam_ife_2 = {
- .name = "qnm_mnoc_hf_cam_ife_2",
- .id = SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_2,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_SLAVE_LLCC_CAM_IFE_2 },
-};
-
-static struct qcom_icc_node qnm_mnoc_sf_cam_ife_2 = {
- .name = "qnm_mnoc_sf_cam_ife_2",
- .id = SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_2,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_SLAVE_LLCC_CAM_IFE_2 },
-};
-
-static struct qcom_icc_node qnm_pcie_cam_ife_2 = {
- .name = "qnm_pcie_cam_ife_2",
- .id = SM8550_MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_2,
- .channels = 1,
- .buswidth = 16,
- .num_links = 1,
- .links = { SM8550_SLAVE_LLCC_CAM_IFE_2 },
-};
-
-static struct qcom_icc_node llcc_mc_cam_ife_2 = {
- .name = "llcc_mc_cam_ife_2",
- .id = SM8550_MASTER_LLCC_CAM_IFE_2,
- .channels = 4,
- .buswidth = 4,
- .num_links = 1,
- .links = { SM8550_SLAVE_EBI1_CAM_IFE_2 },
-};
-
-static struct qcom_icc_node qnm_camnoc_hf_cam_ife_2 = {
- .name = "qnm_camnoc_hf_cam_ife_2",
- .id = SM8550_MASTER_CAMNOC_HF_CAM_IFE_2,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_2 },
-};
-
-static struct qcom_icc_node qnm_camnoc_icp_cam_ife_2 = {
- .name = "qnm_camnoc_icp_cam_ife_2",
- .id = SM8550_MASTER_CAMNOC_ICP_CAM_IFE_2,
- .channels = 1,
- .buswidth = 8,
- .num_links = 1,
- .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_2 },
-};
-
-static struct qcom_icc_node qnm_camnoc_sf_cam_ife_2 = {
- .name = "qnm_camnoc_sf_cam_ife_2",
- .id = SM8550_MASTER_CAMNOC_SF_CAM_IFE_2,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_2 },
-};
-
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
.id = SM8550_SLAVE_A1NOC_SNOC,
@@ -1342,137 +1117,6 @@ static struct qcom_icc_node qns_gemnoc_sf = {
.links = { SM8550_MASTER_SNOC_SF_MEM_NOC },
};
-static struct qcom_icc_node qns_llcc_disp = {
- .name = "qns_llcc_disp",
- .id = SM8550_SLAVE_LLCC_DISP,
- .channels = 4,
- .buswidth = 16,
- .num_links = 1,
- .links = { SM8550_MASTER_LLCC_DISP },
-};
-
-static struct qcom_icc_node ebi_disp = {
- .name = "ebi_disp",
- .id = SM8550_SLAVE_EBI1_DISP,
- .channels = 4,
- .buswidth = 4,
- .num_links = 0,
-};
-
-static struct qcom_icc_node qns_mem_noc_hf_disp = {
- .name = "qns_mem_noc_hf_disp",
- .id = SM8550_SLAVE_MNOC_HF_MEM_NOC_DISP,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_MASTER_MNOC_HF_MEM_NOC_DISP },
-};
-
-static struct qcom_icc_node qns_llcc_cam_ife_0 = {
- .name = "qns_llcc_cam_ife_0",
- .id = SM8550_SLAVE_LLCC_CAM_IFE_0,
- .channels = 4,
- .buswidth = 16,
- .num_links = 1,
- .links = { SM8550_MASTER_LLCC_CAM_IFE_0 },
-};
-
-static struct qcom_icc_node ebi_cam_ife_0 = {
- .name = "ebi_cam_ife_0",
- .id = SM8550_SLAVE_EBI1_CAM_IFE_0,
- .channels = 4,
- .buswidth = 4,
- .num_links = 0,
-};
-
-static struct qcom_icc_node qns_mem_noc_hf_cam_ife_0 = {
- .name = "qns_mem_noc_hf_cam_ife_0",
- .id = SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_0,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_0 },
-};
-
-static struct qcom_icc_node qns_mem_noc_sf_cam_ife_0 = {
- .name = "qns_mem_noc_sf_cam_ife_0",
- .id = SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_0,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_0 },
-};
-
-static struct qcom_icc_node qns_llcc_cam_ife_1 = {
- .name = "qns_llcc_cam_ife_1",
- .id = SM8550_SLAVE_LLCC_CAM_IFE_1,
- .channels = 4,
- .buswidth = 16,
- .num_links = 1,
- .links = { SM8550_MASTER_LLCC_CAM_IFE_1 },
-};
-
-static struct qcom_icc_node ebi_cam_ife_1 = {
- .name = "ebi_cam_ife_1",
- .id = SM8550_SLAVE_EBI1_CAM_IFE_1,
- .channels = 4,
- .buswidth = 4,
- .num_links = 0,
-};
-
-static struct qcom_icc_node qns_mem_noc_hf_cam_ife_1 = {
- .name = "qns_mem_noc_hf_cam_ife_1",
- .id = SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_1,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_1 },
-};
-
-static struct qcom_icc_node qns_mem_noc_sf_cam_ife_1 = {
- .name = "qns_mem_noc_sf_cam_ife_1",
- .id = SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_1,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_1 },
-};
-
-static struct qcom_icc_node qns_llcc_cam_ife_2 = {
- .name = "qns_llcc_cam_ife_2",
- .id = SM8550_SLAVE_LLCC_CAM_IFE_2,
- .channels = 4,
- .buswidth = 16,
- .num_links = 1,
- .links = { SM8550_MASTER_LLCC_CAM_IFE_2 },
-};
-
-static struct qcom_icc_node ebi_cam_ife_2 = {
- .name = "ebi_cam_ife_2",
- .id = SM8550_SLAVE_EBI1_CAM_IFE_2,
- .channels = 4,
- .buswidth = 4,
- .num_links = 0,
-};
-
-static struct qcom_icc_node qns_mem_noc_hf_cam_ife_2 = {
- .name = "qns_mem_noc_hf_cam_ife_2",
- .id = SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_2,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_2 },
-};
-
-static struct qcom_icc_node qns_mem_noc_sf_cam_ife_2 = {
- .name = "qns_mem_noc_sf_cam_ife_2",
- .id = SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_2,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_2 },
-};
-
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
.enable_mask = 0x8,
@@ -1639,161 +1283,6 @@ static struct qcom_icc_bcm bcm_sn7 = {
.nodes = { &qns_pcie_mem_noc },
};
-static struct qcom_icc_bcm bcm_acv_disp = {
- .name = "ACV",
- .enable_mask = 0x1,
- .num_nodes = 1,
- .nodes = { &ebi_disp },
-};
-
-static struct qcom_icc_bcm bcm_mc0_disp = {
- .name = "MC0",
- .num_nodes = 1,
- .nodes = { &ebi_disp },
-};
-
-static struct qcom_icc_bcm bcm_mm0_disp = {
- .name = "MM0",
- .num_nodes = 1,
- .nodes = { &qns_mem_noc_hf_disp },
-};
-
-static struct qcom_icc_bcm bcm_sh0_disp = {
- .name = "SH0",
- .num_nodes = 1,
- .nodes = { &qns_llcc_disp },
-};
-
-static struct qcom_icc_bcm bcm_sh1_disp = {
- .name = "SH1",
- .enable_mask = 0x1,
- .num_nodes = 2,
- .nodes = { &qnm_mnoc_hf_disp, &qnm_pcie_disp },
-};
-
-static struct qcom_icc_bcm bcm_acv_cam_ife_0 = {
- .name = "ACV",
- .enable_mask = 0x0,
- .num_nodes = 1,
- .nodes = { &ebi_cam_ife_0 },
-};
-
-static struct qcom_icc_bcm bcm_mc0_cam_ife_0 = {
- .name = "MC0",
- .num_nodes = 1,
- .nodes = { &ebi_cam_ife_0 },
-};
-
-static struct qcom_icc_bcm bcm_mm0_cam_ife_0 = {
- .name = "MM0",
- .num_nodes = 1,
- .nodes = { &qns_mem_noc_hf_cam_ife_0 },
-};
-
-static struct qcom_icc_bcm bcm_mm1_cam_ife_0 = {
- .name = "MM1",
- .enable_mask = 0x1,
- .num_nodes = 4,
- .nodes = { &qnm_camnoc_hf_cam_ife_0, &qnm_camnoc_icp_cam_ife_0,
- &qnm_camnoc_sf_cam_ife_0, &qns_mem_noc_sf_cam_ife_0 },
-};
-
-static struct qcom_icc_bcm bcm_sh0_cam_ife_0 = {
- .name = "SH0",
- .num_nodes = 1,
- .nodes = { &qns_llcc_cam_ife_0 },
-};
-
-static struct qcom_icc_bcm bcm_sh1_cam_ife_0 = {
- .name = "SH1",
- .enable_mask = 0x1,
- .num_nodes = 3,
- .nodes = { &qnm_mnoc_hf_cam_ife_0, &qnm_mnoc_sf_cam_ife_0,
- &qnm_pcie_cam_ife_0 },
-};
-
-static struct qcom_icc_bcm bcm_acv_cam_ife_1 = {
- .name = "ACV",
- .enable_mask = 0x0,
- .num_nodes = 1,
- .nodes = { &ebi_cam_ife_1 },
-};
-
-static struct qcom_icc_bcm bcm_mc0_cam_ife_1 = {
- .name = "MC0",
- .num_nodes = 1,
- .nodes = { &ebi_cam_ife_1 },
-};
-
-static struct qcom_icc_bcm bcm_mm0_cam_ife_1 = {
- .name = "MM0",
- .num_nodes = 1,
- .nodes = { &qns_mem_noc_hf_cam_ife_1 },
-};
-
-static struct qcom_icc_bcm bcm_mm1_cam_ife_1 = {
- .name = "MM1",
- .enable_mask = 0x1,
- .num_nodes = 4,
- .nodes = { &qnm_camnoc_hf_cam_ife_1, &qnm_camnoc_icp_cam_ife_1,
- &qnm_camnoc_sf_cam_ife_1, &qns_mem_noc_sf_cam_ife_1 },
-};
-
-static struct qcom_icc_bcm bcm_sh0_cam_ife_1 = {
- .name = "SH0",
- .num_nodes = 1,
- .nodes = { &qns_llcc_cam_ife_1 },
-};
-
-static struct qcom_icc_bcm bcm_sh1_cam_ife_1 = {
- .name = "SH1",
- .enable_mask = 0x1,
- .num_nodes = 3,
- .nodes = { &qnm_mnoc_hf_cam_ife_1, &qnm_mnoc_sf_cam_ife_1,
- &qnm_pcie_cam_ife_1 },
-};
-
-static struct qcom_icc_bcm bcm_acv_cam_ife_2 = {
- .name = "ACV",
- .enable_mask = 0x0,
- .num_nodes = 1,
- .nodes = { &ebi_cam_ife_2 },
-};
-
-static struct qcom_icc_bcm bcm_mc0_cam_ife_2 = {
- .name = "MC0",
- .num_nodes = 1,
- .nodes = { &ebi_cam_ife_2 },
-};
-
-static struct qcom_icc_bcm bcm_mm0_cam_ife_2 = {
- .name = "MM0",
- .num_nodes = 1,
- .nodes = { &qns_mem_noc_hf_cam_ife_2 },
-};
-
-static struct qcom_icc_bcm bcm_mm1_cam_ife_2 = {
- .name = "MM1",
- .enable_mask = 0x1,
- .num_nodes = 4,
- .nodes = { &qnm_camnoc_hf_cam_ife_2, &qnm_camnoc_icp_cam_ife_2,
- &qnm_camnoc_sf_cam_ife_2, &qns_mem_noc_sf_cam_ife_2 },
-};
-
-static struct qcom_icc_bcm bcm_sh0_cam_ife_2 = {
- .name = "SH0",
- .num_nodes = 1,
- .nodes = { &qns_llcc_cam_ife_2 },
-};
-
-static struct qcom_icc_bcm bcm_sh1_cam_ife_2 = {
- .name = "SH1",
- .enable_mask = 0x1,
- .num_nodes = 3,
- .nodes = { &qnm_mnoc_hf_cam_ife_2, &qnm_mnoc_sf_cam_ife_2,
- &qnm_pcie_cam_ife_2 },
-};
-
static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
};
@@ -1945,14 +1434,6 @@ static const struct qcom_icc_desc sm8550_cnoc_main = {
static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh1,
- &bcm_sh0_disp,
- &bcm_sh1_disp,
- &bcm_sh0_cam_ife_0,
- &bcm_sh1_cam_ife_0,
- &bcm_sh0_cam_ife_1,
- &bcm_sh1_cam_ife_1,
- &bcm_sh0_cam_ife_2,
- &bcm_sh1_cam_ife_2,
};
static struct qcom_icc_node * const gem_noc_nodes[] = {
@@ -1971,21 +1452,6 @@ static struct qcom_icc_node * const gem_noc_nodes[] = {
[SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc,
[SLAVE_LLCC] = &qns_llcc,
[SLAVE_MEM_NOC_PCIE_SNOC] = &qns_pcie,
- [MASTER_MNOC_HF_MEM_NOC_DISP] = &qnm_mnoc_hf_disp,
- [MASTER_ANOC_PCIE_GEM_NOC_DISP] = &qnm_pcie_disp,
- [SLAVE_LLCC_DISP] = &qns_llcc_disp,
- [MASTER_MNOC_HF_MEM_NOC_CAM_IFE_0] = &qnm_mnoc_hf_cam_ife_0,
- [MASTER_MNOC_SF_MEM_NOC_CAM_IFE_0] = &qnm_mnoc_sf_cam_ife_0,
- [MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_0] = &qnm_pcie_cam_ife_0,
- [SLAVE_LLCC_CAM_IFE_0] = &qns_llcc_cam_ife_0,
- [MASTER_MNOC_HF_MEM_NOC_CAM_IFE_1] = &qnm_mnoc_hf_cam_ife_1,
- [MASTER_MNOC_SF_MEM_NOC_CAM_IFE_1] = &qnm_mnoc_sf_cam_ife_1,
- [MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_1] = &qnm_pcie_cam_ife_1,
- [SLAVE_LLCC_CAM_IFE_1] = &qns_llcc_cam_ife_1,
- [MASTER_MNOC_HF_MEM_NOC_CAM_IFE_2] = &qnm_mnoc_hf_cam_ife_2,
- [MASTER_MNOC_SF_MEM_NOC_CAM_IFE_2] = &qnm_mnoc_sf_cam_ife_2,
- [MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_2] = &qnm_pcie_cam_ife_2,
- [SLAVE_LLCC_CAM_IFE_2] = &qns_llcc_cam_ife_2,
};
static const struct qcom_icc_desc sm8550_gem_noc = {
@@ -2044,27 +1510,11 @@ static const struct qcom_icc_desc sm8550_lpass_lpicx_noc = {
static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
- &bcm_acv_disp,
- &bcm_mc0_disp,
- &bcm_acv_cam_ife_0,
- &bcm_mc0_cam_ife_0,
- &bcm_acv_cam_ife_1,
- &bcm_mc0_cam_ife_1,
- &bcm_acv_cam_ife_2,
- &bcm_mc0_cam_ife_2,
};
static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI1] = &ebi,
- [MASTER_LLCC_DISP] = &llcc_mc_disp,
- [SLAVE_EBI1_DISP] = &ebi_disp,
- [MASTER_LLCC_CAM_IFE_0] = &llcc_mc_cam_ife_0,
- [SLAVE_EBI1_CAM_IFE_0] = &ebi_cam_ife_0,
- [MASTER_LLCC_CAM_IFE_1] = &llcc_mc_cam_ife_1,
- [SLAVE_EBI1_CAM_IFE_1] = &ebi_cam_ife_1,
- [MASTER_LLCC_CAM_IFE_2] = &llcc_mc_cam_ife_2,
- [SLAVE_EBI1_CAM_IFE_2] = &ebi_cam_ife_2,
};
static const struct qcom_icc_desc sm8550_mc_virt = {
@@ -2077,13 +1527,6 @@ static const struct qcom_icc_desc sm8550_mc_virt = {
static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
- &bcm_mm0_disp,
- &bcm_mm0_cam_ife_0,
- &bcm_mm1_cam_ife_0,
- &bcm_mm0_cam_ife_1,
- &bcm_mm1_cam_ife_1,
- &bcm_mm0_cam_ife_2,
- &bcm_mm1_cam_ife_2,
};
static struct qcom_icc_node * const mmss_noc_nodes[] = {
@@ -2100,23 +1543,6 @@ static struct qcom_icc_node * const mmss_noc_nodes[] = {
[SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
[SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
[SLAVE_SERVICE_MNOC] = &srvc_mnoc,
- [MASTER_MDP_DISP] = &qnm_mdp_disp,
- [SLAVE_MNOC_HF_MEM_NOC_DISP] = &qns_mem_noc_hf_disp,
- [MASTER_CAMNOC_HF_CAM_IFE_0] = &qnm_camnoc_hf_cam_ife_0,
- [MASTER_CAMNOC_ICP_CAM_IFE_0] = &qnm_camnoc_icp_cam_ife_0,
- [MASTER_CAMNOC_SF_CAM_IFE_0] = &qnm_camnoc_sf_cam_ife_0,
- [SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_0] = &qns_mem_noc_hf_cam_ife_0,
- [SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_0] = &qns_mem_noc_sf_cam_ife_0,
- [MASTER_CAMNOC_HF_CAM_IFE_1] = &qnm_camnoc_hf_cam_ife_1,
- [MASTER_CAMNOC_ICP_CAM_IFE_1] = &qnm_camnoc_icp_cam_ife_1,
- [MASTER_CAMNOC_SF_CAM_IFE_1] = &qnm_camnoc_sf_cam_ife_1,
- [SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_1] = &qns_mem_noc_hf_cam_ife_1,
- [SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_1] = &qns_mem_noc_sf_cam_ife_1,
- [MASTER_CAMNOC_HF_CAM_IFE_2] = &qnm_camnoc_hf_cam_ife_2,
- [MASTER_CAMNOC_ICP_CAM_IFE_2] = &qnm_camnoc_icp_cam_ife_2,
- [MASTER_CAMNOC_SF_CAM_IFE_2] = &qnm_camnoc_sf_cam_ife_2,
- [SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_2] = &qns_mem_noc_hf_cam_ife_2,
- [SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_2] = &qns_mem_noc_sf_cam_ife_2,
};
static const struct qcom_icc_desc sm8550_mmss_noc = {
diff --git a/drivers/interconnect/qcom/sm8550.h b/drivers/interconnect/qcom/sm8550.h
index 8d5862c04bca2..c9b2986e12933 100644
--- a/drivers/interconnect/qcom/sm8550.h
+++ b/drivers/interconnect/qcom/sm8550.h
@@ -12,167 +12,127 @@
#define SM8550_MASTER_A1NOC_SNOC 0
#define SM8550_MASTER_A2NOC_SNOC 1
#define SM8550_MASTER_ANOC_PCIE_GEM_NOC 2
-#define SM8550_MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_0 3
-#define SM8550_MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_1 4
-#define SM8550_MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_2 5
-#define SM8550_MASTER_ANOC_PCIE_GEM_NOC_DISP 6
-#define SM8550_MASTER_APPSS_PROC 7
-#define SM8550_MASTER_CAMNOC_HF 8
-#define SM8550_MASTER_CAMNOC_HF_CAM_IFE_0 9
-#define SM8550_MASTER_CAMNOC_HF_CAM_IFE_1 10
-#define SM8550_MASTER_CAMNOC_HF_CAM_IFE_2 11
-#define SM8550_MASTER_CAMNOC_ICP 12
-#define SM8550_MASTER_CAMNOC_ICP_CAM_IFE_0 13
-#define SM8550_MASTER_CAMNOC_ICP_CAM_IFE_1 14
-#define SM8550_MASTER_CAMNOC_ICP_CAM_IFE_2 15
-#define SM8550_MASTER_CAMNOC_SF 16
-#define SM8550_MASTER_CAMNOC_SF_CAM_IFE_0 17
-#define SM8550_MASTER_CAMNOC_SF_CAM_IFE_1 18
-#define SM8550_MASTER_CAMNOC_SF_CAM_IFE_2 19
-#define SM8550_MASTER_CDSP_HCP 20
-#define SM8550_MASTER_CDSP_PROC 21
-#define SM8550_MASTER_CNOC_CFG 22
-#define SM8550_MASTER_CNOC_MNOC_CFG 23
-#define SM8550_MASTER_COMPUTE_NOC 24
-#define SM8550_MASTER_CRYPTO 25
-#define SM8550_MASTER_GEM_NOC_CNOC 26
-#define SM8550_MASTER_GEM_NOC_PCIE_SNOC 27
-#define SM8550_MASTER_GFX3D 28
-#define SM8550_MASTER_GIC 29
-#define SM8550_MASTER_GIC_AHB 30
-#define SM8550_MASTER_GPU_TCU 31
-#define SM8550_MASTER_IPA 32
-#define SM8550_MASTER_LLCC 33
-#define SM8550_MASTER_LLCC_CAM_IFE_0 34
-#define SM8550_MASTER_LLCC_CAM_IFE_1 35
-#define SM8550_MASTER_LLCC_CAM_IFE_2 36
-#define SM8550_MASTER_LLCC_DISP 37
-#define SM8550_MASTER_LPASS_GEM_NOC 38
-#define SM8550_MASTER_LPASS_LPINOC 39
-#define SM8550_MASTER_LPASS_PROC 40
-#define SM8550_MASTER_LPIAON_NOC 41
-#define SM8550_MASTER_MDP 42
-#define SM8550_MASTER_MDP_DISP 43
-#define SM8550_MASTER_MNOC_HF_MEM_NOC 44
-#define SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_0 45
-#define SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_1 46
-#define SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_2 47
-#define SM8550_MASTER_MNOC_HF_MEM_NOC_DISP 48
-#define SM8550_MASTER_MNOC_SF_MEM_NOC 49
-#define SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_0 50
-#define SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_1 51
-#define SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_2 52
-#define SM8550_MASTER_MSS_PROC 53
-#define SM8550_MASTER_PCIE_0 54
-#define SM8550_MASTER_PCIE_1 55
-#define SM8550_MASTER_PCIE_ANOC_CFG 56
-#define SM8550_MASTER_QDSS_BAM 57
-#define SM8550_MASTER_QDSS_ETR 58
-#define SM8550_MASTER_QDSS_ETR_1 59
-#define SM8550_MASTER_QSPI_0 60
-#define SM8550_MASTER_QUP_1 61
-#define SM8550_MASTER_QUP_2 62
-#define SM8550_MASTER_QUP_CORE_0 63
-#define SM8550_MASTER_QUP_CORE_1 64
-#define SM8550_MASTER_QUP_CORE_2 65
-#define SM8550_MASTER_SDCC_2 66
-#define SM8550_MASTER_SDCC_4 67
-#define SM8550_MASTER_SNOC_GC_MEM_NOC 68
-#define SM8550_MASTER_SNOC_SF_MEM_NOC 69
-#define SM8550_MASTER_SP 70
-#define SM8550_MASTER_SYS_TCU 71
-#define SM8550_MASTER_UFS_MEM 72
-#define SM8550_MASTER_USB3_0 73
-#define SM8550_MASTER_VIDEO 74
-#define SM8550_MASTER_VIDEO_CV_PROC 75
-#define SM8550_MASTER_VIDEO_PROC 76
-#define SM8550_MASTER_VIDEO_V_PROC 77
-#define SM8550_SLAVE_A1NOC_SNOC 78
-#define SM8550_SLAVE_A2NOC_SNOC 79
-#define SM8550_SLAVE_AHB2PHY_NORTH 80
-#define SM8550_SLAVE_AHB2PHY_SOUTH 81
-#define SM8550_SLAVE_ANOC_PCIE_GEM_NOC 82
-#define SM8550_SLAVE_AOSS 83
-#define SM8550_SLAVE_APPSS 84
-#define SM8550_SLAVE_BOOT_IMEM 85
-#define SM8550_SLAVE_CAMERA_CFG 86
-#define SM8550_SLAVE_CDSP_MEM_NOC 87
-#define SM8550_SLAVE_CLK_CTL 88
-#define SM8550_SLAVE_CNOC_CFG 89
-#define SM8550_SLAVE_CNOC_MNOC_CFG 90
-#define SM8550_SLAVE_CNOC_MSS 91
-#define SM8550_SLAVE_CPR_NSPCX 92
-#define SM8550_SLAVE_CRYPTO_0_CFG 93
-#define SM8550_SLAVE_CX_RDPM 94
-#define SM8550_SLAVE_DDRSS_CFG 95
-#define SM8550_SLAVE_DISPLAY_CFG 96
-#define SM8550_SLAVE_EBI1 97
-#define SM8550_SLAVE_EBI1_CAM_IFE_0 98
-#define SM8550_SLAVE_EBI1_CAM_IFE_1 99
-#define SM8550_SLAVE_EBI1_CAM_IFE_2 100
-#define SM8550_SLAVE_EBI1_DISP 101
-#define SM8550_SLAVE_GEM_NOC_CNOC 102
-#define SM8550_SLAVE_GFX3D_CFG 103
-#define SM8550_SLAVE_I2C 104
-#define SM8550_SLAVE_IMEM 105
-#define SM8550_SLAVE_IMEM_CFG 106
-#define SM8550_SLAVE_IPA_CFG 107
-#define SM8550_SLAVE_IPC_ROUTER_CFG 108
-#define SM8550_SLAVE_LLCC 109
-#define SM8550_SLAVE_LLCC_CAM_IFE_0 110
-#define SM8550_SLAVE_LLCC_CAM_IFE_1 111
-#define SM8550_SLAVE_LLCC_CAM_IFE_2 112
-#define SM8550_SLAVE_LLCC_DISP 113
-#define SM8550_SLAVE_LPASS_GEM_NOC 114
-#define SM8550_SLAVE_LPASS_QTB_CFG 115
-#define SM8550_SLAVE_LPIAON_NOC_LPASS_AG_NOC 116
-#define SM8550_SLAVE_LPICX_NOC_LPIAON_NOC 117
-#define SM8550_SLAVE_MEM_NOC_PCIE_SNOC 118
-#define SM8550_SLAVE_MNOC_HF_MEM_NOC 119
-#define SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_0 120
-#define SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_1 121
-#define SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_2 122
-#define SM8550_SLAVE_MNOC_HF_MEM_NOC_DISP 123
-#define SM8550_SLAVE_MNOC_SF_MEM_NOC 124
-#define SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_0 125
-#define SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_1 126
-#define SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_2 127
-#define SM8550_SLAVE_MX_RDPM 128
-#define SM8550_SLAVE_NSP_QTB_CFG 129
-#define SM8550_SLAVE_PCIE_0 130
-#define SM8550_SLAVE_PCIE_0_CFG 131
-#define SM8550_SLAVE_PCIE_1 132
-#define SM8550_SLAVE_PCIE_1_CFG 133
-#define SM8550_SLAVE_PCIE_ANOC_CFG 134
-#define SM8550_SLAVE_PDM 135
-#define SM8550_SLAVE_PIMEM_CFG 136
-#define SM8550_SLAVE_PRNG 137
-#define SM8550_SLAVE_QDSS_CFG 138
-#define SM8550_SLAVE_QDSS_STM 139
-#define SM8550_SLAVE_QSPI_0 140
-#define SM8550_SLAVE_QUP_1 141
-#define SM8550_SLAVE_QUP_2 142
-#define SM8550_SLAVE_QUP_CORE_0 143
-#define SM8550_SLAVE_QUP_CORE_1 144
-#define SM8550_SLAVE_QUP_CORE_2 145
-#define SM8550_SLAVE_RBCPR_CX_CFG 146
-#define SM8550_SLAVE_RBCPR_MMCX_CFG 147
-#define SM8550_SLAVE_RBCPR_MXA_CFG 148
-#define SM8550_SLAVE_RBCPR_MXC_CFG 149
-#define SM8550_SLAVE_SDCC_2 150
-#define SM8550_SLAVE_SDCC_4 151
-#define SM8550_SLAVE_SERVICE_MNOC 152
-#define SM8550_SLAVE_SERVICE_PCIE_ANOC 153
-#define SM8550_SLAVE_SNOC_GEM_NOC_GC 154
-#define SM8550_SLAVE_SNOC_GEM_NOC_SF 155
-#define SM8550_SLAVE_SPSS_CFG 156
-#define SM8550_SLAVE_TCSR 157
-#define SM8550_SLAVE_TCU 158
-#define SM8550_SLAVE_TLMM 159
-#define SM8550_SLAVE_TME_CFG 160
-#define SM8550_SLAVE_UFS_MEM_CFG 161
-#define SM8550_SLAVE_USB3_0 162
-#define SM8550_SLAVE_VENUS_CFG 163
-#define SM8550_SLAVE_VSENSE_CTRL_CFG 164
+#define SM8550_MASTER_APPSS_PROC 3
+#define SM8550_MASTER_CAMNOC_HF 4
+#define SM8550_MASTER_CAMNOC_ICP 5
+#define SM8550_MASTER_CAMNOC_SF 6
+#define SM8550_MASTER_CDSP_HCP 7
+#define SM8550_MASTER_CDSP_PROC 8
+#define SM8550_MASTER_CNOC_CFG 9
+#define SM8550_MASTER_CNOC_MNOC_CFG 10
+#define SM8550_MASTER_COMPUTE_NOC 11
+#define SM8550_MASTER_CRYPTO 12
+#define SM8550_MASTER_GEM_NOC_CNOC 13
+#define SM8550_MASTER_GEM_NOC_PCIE_SNOC 14
+#define SM8550_MASTER_GFX3D 15
+#define SM8550_MASTER_GIC 16
+#define SM8550_MASTER_GIC_AHB 17
+#define SM8550_MASTER_GPU_TCU 18
+#define SM8550_MASTER_IPA 19
+#define SM8550_MASTER_LLCC 20
+#define SM8550_MASTER_LPASS_GEM_NOC 21
+#define SM8550_MASTER_LPASS_LPINOC 22
+#define SM8550_MASTER_LPASS_PROC 23
+#define SM8550_MASTER_LPIAON_NOC 24
+#define SM8550_MASTER_MDP 25
+#define SM8550_MASTER_MNOC_HF_MEM_NOC 26
+#define SM8550_MASTER_MNOC_SF_MEM_NOC 27
+#define SM8550_MASTER_MSS_PROC 28
+#define SM8550_MASTER_PCIE_0 29
+#define SM8550_MASTER_PCIE_1 30
+#define SM8550_MASTER_PCIE_ANOC_CFG 31
+#define SM8550_MASTER_QDSS_BAM 32
+#define SM8550_MASTER_QDSS_ETR 33
+#define SM8550_MASTER_QDSS_ETR_1 34
+#define SM8550_MASTER_QSPI_0 35
+#define SM8550_MASTER_QUP_1 36
+#define SM8550_MASTER_QUP_2 37
+#define SM8550_MASTER_QUP_CORE_0 38
+#define SM8550_MASTER_QUP_CORE_1 39
+#define SM8550_MASTER_QUP_CORE_2 40
+#define SM8550_MASTER_SDCC_2 41
+#define SM8550_MASTER_SDCC_4 42
+#define SM8550_MASTER_SNOC_GC_MEM_NOC 43
+#define SM8550_MASTER_SNOC_SF_MEM_NOC 44
+#define SM8550_MASTER_SP 45
+#define SM8550_MASTER_SYS_TCU 46
+#define SM8550_MASTER_UFS_MEM 47
+#define SM8550_MASTER_USB3_0 48
+#define SM8550_MASTER_VIDEO 49
+#define SM8550_MASTER_VIDEO_CV_PROC 50
+#define SM8550_MASTER_VIDEO_PROC 51
+#define SM8550_MASTER_VIDEO_V_PROC 52
+#define SM8550_SLAVE_A1NOC_SNOC 53
+#define SM8550_SLAVE_A2NOC_SNOC 54
+#define SM8550_SLAVE_AHB2PHY_NORTH 55
+#define SM8550_SLAVE_AHB2PHY_SOUTH 56
+#define SM8550_SLAVE_ANOC_PCIE_GEM_NOC 57
+#define SM8550_SLAVE_AOSS 58
+#define SM8550_SLAVE_APPSS 59
+#define SM8550_SLAVE_BOOT_IMEM 60
+#define SM8550_SLAVE_CAMERA_CFG 61
+#define SM8550_SLAVE_CDSP_MEM_NOC 62
+#define SM8550_SLAVE_CLK_CTL 63
+#define SM8550_SLAVE_CNOC_CFG 64
+#define SM8550_SLAVE_CNOC_MNOC_CFG 65
+#define SM8550_SLAVE_CNOC_MSS 66
+#define SM8550_SLAVE_CPR_NSPCX 67
+#define SM8550_SLAVE_CRYPTO_0_CFG 68
+#define SM8550_SLAVE_CX_RDPM 69
+#define SM8550_SLAVE_DDRSS_CFG 70
+#define SM8550_SLAVE_DISPLAY_CFG 71
+#define SM8550_SLAVE_EBI1 72
+#define SM8550_SLAVE_GEM_NOC_CNOC 73
+#define SM8550_SLAVE_GFX3D_CFG 74
+#define SM8550_SLAVE_I2C 75
+#define SM8550_SLAVE_IMEM 76
+#define SM8550_SLAVE_IMEM_CFG 77
+#define SM8550_SLAVE_IPA_CFG 78
+#define SM8550_SLAVE_IPC_ROUTER_CFG 79
+#define SM8550_SLAVE_LLCC 80
+#define SM8550_SLAVE_LPASS_GEM_NOC 81
+#define SM8550_SLAVE_LPASS_QTB_CFG 82
+#define SM8550_SLAVE_LPIAON_NOC_LPASS_AG_NOC 83
+#define SM8550_SLAVE_LPICX_NOC_LPIAON_NOC 84
+#define SM8550_SLAVE_MEM_NOC_PCIE_SNOC 85
+#define SM8550_SLAVE_MNOC_HF_MEM_NOC 86
+#define SM8550_SLAVE_MNOC_SF_MEM_NOC 87
+#define SM8550_SLAVE_MX_RDPM 88
+#define SM8550_SLAVE_NSP_QTB_CFG 89
+#define SM8550_SLAVE_PCIE_0 90
+#define SM8550_SLAVE_PCIE_0_CFG 91
+#define SM8550_SLAVE_PCIE_1 92
+#define SM8550_SLAVE_PCIE_1_CFG 93
+#define SM8550_SLAVE_PCIE_ANOC_CFG 94
+#define SM8550_SLAVE_PDM 95
+#define SM8550_SLAVE_PIMEM_CFG 96
+#define SM8550_SLAVE_PRNG 97
+#define SM8550_SLAVE_QDSS_CFG 98
+#define SM8550_SLAVE_QDSS_STM 99
+#define SM8550_SLAVE_QSPI_0 100
+#define SM8550_SLAVE_QUP_1 101
+#define SM8550_SLAVE_QUP_2 102
+#define SM8550_SLAVE_QUP_CORE_0 103
+#define SM8550_SLAVE_QUP_CORE_1 104
+#define SM8550_SLAVE_QUP_CORE_2 105
+#define SM8550_SLAVE_RBCPR_CX_CFG 106
+#define SM8550_SLAVE_RBCPR_MMCX_CFG 107
+#define SM8550_SLAVE_RBCPR_MXA_CFG 108
+#define SM8550_SLAVE_RBCPR_MXC_CFG 109
+#define SM8550_SLAVE_SDCC_2 110
+#define SM8550_SLAVE_SDCC_4 111
+#define SM8550_SLAVE_SERVICE_MNOC 112
+#define SM8550_SLAVE_SERVICE_PCIE_ANOC 113
+#define SM8550_SLAVE_SNOC_GEM_NOC_GC 114
+#define SM8550_SLAVE_SNOC_GEM_NOC_SF 115
+#define SM8550_SLAVE_SPSS_CFG 116
+#define SM8550_SLAVE_TCSR 117
+#define SM8550_SLAVE_TCU 118
+#define SM8550_SLAVE_TLMM 119
+#define SM8550_SLAVE_TME_CFG 120
+#define SM8550_SLAVE_UFS_MEM_CFG 121
+#define SM8550_SLAVE_USB3_0 122
+#define SM8550_SLAVE_VENUS_CFG 123
+#define SM8550_SLAVE_VSENSE_CTRL_CFG 124
#endif
diff --git a/drivers/interconnect/qcom/x1e80100.c b/drivers/interconnect/qcom/x1e80100.c
index cbaf4f9c41be6..654abb9ce08ee 100644
--- a/drivers/interconnect/qcom/x1e80100.c
+++ b/drivers/interconnect/qcom/x1e80100.c
@@ -116,15 +116,6 @@ static struct qcom_icc_node xm_sdc2 = {
.links = { X1E80100_SLAVE_A2NOC_SNOC },
};
-static struct qcom_icc_node ddr_perf_mode_master = {
- .name = "ddr_perf_mode_master",
- .id = X1E80100_MASTER_DDR_PERF_MODE,
- .channels = 1,
- .buswidth = 4,
- .num_links = 1,
- .links = { X1E80100_SLAVE_DDR_PERF_MODE },
-};
-
static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master",
.id = X1E80100_MASTER_QUP_CORE_0,
@@ -670,150 +661,6 @@ static struct qcom_icc_node xm_usb4_2 = {
.links = { X1E80100_SLAVE_AGGRE_USB_SOUTH },
};
-static struct qcom_icc_node qnm_mnoc_hf_disp = {
- .name = "qnm_mnoc_hf_disp",
- .id = X1E80100_MASTER_MNOC_HF_MEM_NOC_DISP,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { X1E80100_SLAVE_LLCC_DISP },
-};
-
-static struct qcom_icc_node qnm_pcie_disp = {
- .name = "qnm_pcie_disp",
- .id = X1E80100_MASTER_ANOC_PCIE_GEM_NOC_DISP,
- .channels = 1,
- .buswidth = 64,
- .num_links = 1,
- .links = { X1E80100_SLAVE_LLCC_DISP },
-};
-
-static struct qcom_icc_node llcc_mc_disp = {
- .name = "llcc_mc_disp",
- .id = X1E80100_MASTER_LLCC_DISP,
- .channels = 8,
- .buswidth = 4,
- .num_links = 1,
- .links = { X1E80100_SLAVE_EBI1_DISP },
-};
-
-static struct qcom_icc_node qnm_mdp_disp = {
- .name = "qnm_mdp_disp",
- .id = X1E80100_MASTER_MDP_DISP,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { X1E80100_SLAVE_MNOC_HF_MEM_NOC_DISP },
-};
-
-static struct qcom_icc_node qnm_pcie_pcie = {
- .name = "qnm_pcie_pcie",
- .id = X1E80100_MASTER_ANOC_PCIE_GEM_NOC_PCIE,
- .channels = 1,
- .buswidth = 64,
- .num_links = 1,
- .links = { X1E80100_SLAVE_LLCC_PCIE },
-};
-
-static struct qcom_icc_node llcc_mc_pcie = {
- .name = "llcc_mc_pcie",
- .id = X1E80100_MASTER_LLCC_PCIE,
- .channels = 8,
- .buswidth = 4,
- .num_links = 1,
- .links = { X1E80100_SLAVE_EBI1_PCIE },
-};
-
-static struct qcom_icc_node qnm_pcie_north_gem_noc_pcie = {
- .name = "qnm_pcie_north_gem_noc_pcie",
- .id = X1E80100_MASTER_PCIE_NORTH_PCIE,
- .channels = 1,
- .buswidth = 64,
- .num_links = 1,
- .links = { X1E80100_SLAVE_ANOC_PCIE_GEM_NOC_PCIE },
-};
-
-static struct qcom_icc_node qnm_pcie_south_gem_noc_pcie = {
- .name = "qnm_pcie_south_gem_noc_pcie",
- .id = X1E80100_MASTER_PCIE_SOUTH_PCIE,
- .channels = 1,
- .buswidth = 64,
- .num_links = 1,
- .links = { X1E80100_SLAVE_ANOC_PCIE_GEM_NOC_PCIE },
-};
-
-static struct qcom_icc_node xm_pcie_3_pcie = {
- .name = "xm_pcie_3_pcie",
- .id = X1E80100_MASTER_PCIE_3_PCIE,
- .channels = 1,
- .buswidth = 64,
- .num_links = 1,
- .links = { X1E80100_SLAVE_PCIE_NORTH_PCIE },
-};
-
-static struct qcom_icc_node xm_pcie_4_pcie = {
- .name = "xm_pcie_4_pcie",
- .id = X1E80100_MASTER_PCIE_4_PCIE,
- .channels = 1,
- .buswidth = 8,
- .num_links = 1,
- .links = { X1E80100_SLAVE_PCIE_NORTH_PCIE },
-};
-
-static struct qcom_icc_node xm_pcie_5_pcie = {
- .name = "xm_pcie_5_pcie",
- .id = X1E80100_MASTER_PCIE_5_PCIE,
- .channels = 1,
- .buswidth = 8,
- .num_links = 1,
- .links = { X1E80100_SLAVE_PCIE_NORTH_PCIE },
-};
-
-static struct qcom_icc_node xm_pcie_0_pcie = {
- .name = "xm_pcie_0_pcie",
- .id = X1E80100_MASTER_PCIE_0_PCIE,
- .channels = 1,
- .buswidth = 16,
- .num_links = 1,
- .links = { X1E80100_SLAVE_PCIE_SOUTH_PCIE },
-};
-
-static struct qcom_icc_node xm_pcie_1_pcie = {
- .name = "xm_pcie_1_pcie",
- .id = X1E80100_MASTER_PCIE_1_PCIE,
- .channels = 1,
- .buswidth = 16,
- .num_links = 1,
- .links = { X1E80100_SLAVE_PCIE_SOUTH_PCIE },
-};
-
-static struct qcom_icc_node xm_pcie_2_pcie = {
- .name = "xm_pcie_2_pcie",
- .id = X1E80100_MASTER_PCIE_2_PCIE,
- .channels = 1,
- .buswidth = 16,
- .num_links = 1,
- .links = { X1E80100_SLAVE_PCIE_SOUTH_PCIE },
-};
-
-static struct qcom_icc_node xm_pcie_6a_pcie = {
- .name = "xm_pcie_6a_pcie",
- .id = X1E80100_MASTER_PCIE_6A_PCIE,
- .channels = 1,
- .buswidth = 32,
- .num_links = 1,
- .links = { X1E80100_SLAVE_PCIE_SOUTH_PCIE },
-};
-
-static struct qcom_icc_node xm_pcie_6b_pcie = {
- .name = "xm_pcie_6b_pcie",
- .id = X1E80100_MASTER_PCIE_6B_PCIE,
- .channels = 1,
- .buswidth = 16,
- .num_links = 1,
- .links = { X1E80100_SLAVE_PCIE_SOUTH_PCIE },
-};
-
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
.id = X1E80100_SLAVE_A1NOC_SNOC,
@@ -832,14 +679,6 @@ static struct qcom_icc_node qns_a2noc_snoc = {
.links = { X1E80100_MASTER_A2NOC_SNOC },
};
-static struct qcom_icc_node ddr_perf_mode_slave = {
- .name = "ddr_perf_mode_slave",
- .id = X1E80100_SLAVE_DDR_PERF_MODE,
- .channels = 1,
- .buswidth = 4,
- .num_links = 0,
-};
-
static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave",
.id = X1E80100_SLAVE_QUP_CORE_0,
@@ -1514,76 +1353,6 @@ static struct qcom_icc_node qns_aggre_usb_south_snoc = {
.links = { X1E80100_MASTER_AGGRE_USB_SOUTH },
};
-static struct qcom_icc_node qns_llcc_disp = {
- .name = "qns_llcc_disp",
- .id = X1E80100_SLAVE_LLCC_DISP,
- .channels = 8,
- .buswidth = 16,
- .num_links = 1,
- .links = { X1E80100_MASTER_LLCC_DISP },
-};
-
-static struct qcom_icc_node ebi_disp = {
- .name = "ebi_disp",
- .id = X1E80100_SLAVE_EBI1_DISP,
- .channels = 8,
- .buswidth = 4,
- .num_links = 0,
-};
-
-static struct qcom_icc_node qns_mem_noc_hf_disp = {
- .name = "qns_mem_noc_hf_disp",
- .id = X1E80100_SLAVE_MNOC_HF_MEM_NOC_DISP,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { X1E80100_MASTER_MNOC_HF_MEM_NOC_DISP },
-};
-
-static struct qcom_icc_node qns_llcc_pcie = {
- .name = "qns_llcc_pcie",
- .id = X1E80100_SLAVE_LLCC_PCIE,
- .channels = 8,
- .buswidth = 16,
- .num_links = 1,
- .links = { X1E80100_MASTER_LLCC_PCIE },
-};
-
-static struct qcom_icc_node ebi_pcie = {
- .name = "ebi_pcie",
- .id = X1E80100_SLAVE_EBI1_PCIE,
- .channels = 8,
- .buswidth = 4,
- .num_links = 0,
-};
-
-static struct qcom_icc_node qns_pcie_mem_noc_pcie = {
- .name = "qns_pcie_mem_noc_pcie",
- .id = X1E80100_SLAVE_ANOC_PCIE_GEM_NOC_PCIE,
- .channels = 1,
- .buswidth = 64,
- .num_links = 1,
- .links = { X1E80100_MASTER_ANOC_PCIE_GEM_NOC_PCIE },
-};
-
-static struct qcom_icc_node qns_pcie_north_gem_noc_pcie = {
- .name = "qns_pcie_north_gem_noc_pcie",
- .id = X1E80100_SLAVE_PCIE_NORTH_PCIE,
- .channels = 1,
- .buswidth = 64,
- .num_links = 1,
- .links = { X1E80100_MASTER_PCIE_NORTH_PCIE },
-};
-
-static struct qcom_icc_node qns_pcie_south_gem_noc_pcie = {
- .name = "qns_pcie_south_gem_noc_pcie",
- .id = X1E80100_SLAVE_PCIE_SOUTH_PCIE,
- .channels = 1,
- .buswidth = 64,
- .num_links = 1,
- .links = { X1E80100_MASTER_PCIE_SOUTH_PCIE },
-};
-
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
.enable_mask = BIT(3),
@@ -1591,12 +1360,6 @@ static struct qcom_icc_bcm bcm_acv = {
.nodes = { &ebi },
};
-static struct qcom_icc_bcm bcm_acv_perf = {
- .name = "ACV_PERF",
- .num_nodes = 1,
- .nodes = { &ddr_perf_mode_slave },
-};
-
static struct qcom_icc_bcm bcm_ce0 = {
.name = "CE0",
.num_nodes = 1,
@@ -1756,73 +1519,7 @@ static struct qcom_icc_bcm bcm_sn4 = {
.nodes = { &qnm_usb_anoc },
};
-static struct qcom_icc_bcm bcm_acv_disp = {
- .name = "ACV",
- .num_nodes = 1,
- .nodes = { &ebi_disp },
-};
-
-static struct qcom_icc_bcm bcm_mc0_disp = {
- .name = "MC0",
- .num_nodes = 1,
- .nodes = { &ebi_disp },
-};
-
-static struct qcom_icc_bcm bcm_mm0_disp = {
- .name = "MM0",
- .num_nodes = 1,
- .nodes = { &qns_mem_noc_hf_disp },
-};
-
-static struct qcom_icc_bcm bcm_mm1_disp = {
- .name = "MM1",
- .num_nodes = 1,
- .nodes = { &qnm_mdp_disp },
-};
-
-static struct qcom_icc_bcm bcm_sh0_disp = {
- .name = "SH0",
- .num_nodes = 1,
- .nodes = { &qns_llcc_disp },
-};
-
-static struct qcom_icc_bcm bcm_sh1_disp = {
- .name = "SH1",
- .num_nodes = 2,
- .nodes = { &qnm_mnoc_hf_disp, &qnm_pcie_disp },
-};
-
-static struct qcom_icc_bcm bcm_acv_pcie = {
- .name = "ACV",
- .num_nodes = 1,
- .nodes = { &ebi_pcie },
-};
-
-static struct qcom_icc_bcm bcm_mc0_pcie = {
- .name = "MC0",
- .num_nodes = 1,
- .nodes = { &ebi_pcie },
-};
-
-static struct qcom_icc_bcm bcm_pc0_pcie = {
- .name = "PC0",
- .num_nodes = 1,
- .nodes = { &qns_pcie_mem_noc_pcie },
-};
-
-static struct qcom_icc_bcm bcm_sh0_pcie = {
- .name = "SH0",
- .num_nodes = 1,
- .nodes = { &qns_llcc_pcie },
-};
-
-static struct qcom_icc_bcm bcm_sh1_pcie = {
- .name = "SH1",
- .num_nodes = 1,
- .nodes = { &qnm_pcie_pcie },
-};
-
-static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
};
static struct qcom_icc_node * const aggre1_noc_nodes[] = {
@@ -1863,18 +1560,15 @@ static const struct qcom_icc_desc x1e80100_aggre2_noc = {
};
static struct qcom_icc_bcm * const clk_virt_bcms[] = {
- &bcm_acv_perf,
&bcm_qup0,
&bcm_qup1,
&bcm_qup2,
};
static struct qcom_icc_node * const clk_virt_nodes[] = {
- [MASTER_DDR_PERF_MODE] = &ddr_perf_mode_master,
[MASTER_QUP_CORE_0] = &qup0_core_master,
[MASTER_QUP_CORE_1] = &qup1_core_master,
[MASTER_QUP_CORE_2] = &qup2_core_master,
- [SLAVE_DDR_PERF_MODE] = &ddr_perf_mode_slave,
[SLAVE_QUP_CORE_0] = &qup0_core_slave,
[SLAVE_QUP_CORE_1] = &qup1_core_slave,
[SLAVE_QUP_CORE_2] = &qup2_core_slave,
@@ -1983,10 +1677,6 @@ static const struct qcom_icc_desc x1e80100_cnoc_main = {
static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh1,
- &bcm_sh0_disp,
- &bcm_sh1_disp,
- &bcm_sh0_pcie,
- &bcm_sh1_pcie,
};
static struct qcom_icc_node * const gem_noc_nodes[] = {
@@ -2005,11 +1695,6 @@ static struct qcom_icc_node * const gem_noc_nodes[] = {
[SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc,
[SLAVE_LLCC] = &qns_llcc,
[SLAVE_MEM_NOC_PCIE_SNOC] = &qns_pcie,
- [MASTER_MNOC_HF_MEM_NOC_DISP] = &qnm_mnoc_hf_disp,
- [MASTER_ANOC_PCIE_GEM_NOC_DISP] = &qnm_pcie_disp,
- [SLAVE_LLCC_DISP] = &qns_llcc_disp,
- [MASTER_ANOC_PCIE_GEM_NOC_PCIE] = &qnm_pcie_pcie,
- [SLAVE_LLCC_PCIE] = &qns_llcc_pcie,
};
static const struct qcom_icc_desc x1e80100_gem_noc = {
@@ -2019,7 +1704,7 @@ static const struct qcom_icc_desc x1e80100_gem_noc = {
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
-static struct qcom_icc_bcm *lpass_ag_noc_bcms[] = {
+static struct qcom_icc_bcm * const lpass_ag_noc_bcms[] = {
};
static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
@@ -2068,19 +1753,11 @@ static const struct qcom_icc_desc x1e80100_lpass_lpicx_noc = {
static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
- &bcm_acv_disp,
- &bcm_mc0_disp,
- &bcm_acv_pcie,
- &bcm_mc0_pcie,
};
static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI1] = &ebi,
- [MASTER_LLCC_DISP] = &llcc_mc_disp,
- [SLAVE_EBI1_DISP] = &ebi_disp,
- [MASTER_LLCC_PCIE] = &llcc_mc_pcie,
- [SLAVE_EBI1_PCIE] = &ebi_pcie,
};
static const struct qcom_icc_desc x1e80100_mc_virt = {
@@ -2093,8 +1770,6 @@ static const struct qcom_icc_desc x1e80100_mc_virt = {
static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
- &bcm_mm0_disp,
- &bcm_mm1_disp,
};
static struct qcom_icc_node * const mmss_noc_nodes[] = {
@@ -2111,8 +1786,6 @@ static struct qcom_icc_node * const mmss_noc_nodes[] = {
[SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
[SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
[SLAVE_SERVICE_MNOC] = &srvc_mnoc,
- [MASTER_MDP_DISP] = &qnm_mdp_disp,
- [SLAVE_MNOC_HF_MEM_NOC_DISP] = &qns_mem_noc_hf_disp,
};
static const struct qcom_icc_desc x1e80100_mmss_noc = {
@@ -2140,16 +1813,12 @@ static const struct qcom_icc_desc x1e80100_nsp_noc = {
static struct qcom_icc_bcm * const pcie_center_anoc_bcms[] = {
&bcm_pc0,
- &bcm_pc0_pcie,
};
static struct qcom_icc_node * const pcie_center_anoc_nodes[] = {
[MASTER_PCIE_NORTH] = &qnm_pcie_north_gem_noc,
[MASTER_PCIE_SOUTH] = &qnm_pcie_south_gem_noc,
[SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc,
- [MASTER_PCIE_NORTH_PCIE] = &qnm_pcie_north_gem_noc_pcie,
- [MASTER_PCIE_SOUTH_PCIE] = &qnm_pcie_south_gem_noc_pcie,
- [SLAVE_ANOC_PCIE_GEM_NOC_PCIE] = &qns_pcie_mem_noc_pcie,
};
static const struct qcom_icc_desc x1e80100_pcie_center_anoc = {
@@ -2167,10 +1836,6 @@ static struct qcom_icc_node * const pcie_north_anoc_nodes[] = {
[MASTER_PCIE_4] = &xm_pcie_4,
[MASTER_PCIE_5] = &xm_pcie_5,
[SLAVE_PCIE_NORTH] = &qns_pcie_north_gem_noc,
- [MASTER_PCIE_3_PCIE] = &xm_pcie_3_pcie,
- [MASTER_PCIE_4_PCIE] = &xm_pcie_4_pcie,
- [MASTER_PCIE_5_PCIE] = &xm_pcie_5_pcie,
- [SLAVE_PCIE_NORTH_PCIE] = &qns_pcie_north_gem_noc_pcie,
};
static const struct qcom_icc_desc x1e80100_pcie_north_anoc = {
@@ -2180,7 +1845,7 @@ static const struct qcom_icc_desc x1e80100_pcie_north_anoc = {
.num_bcms = ARRAY_SIZE(pcie_north_anoc_bcms),
};
-static struct qcom_icc_bcm *pcie_south_anoc_bcms[] = {
+static struct qcom_icc_bcm * const pcie_south_anoc_bcms[] = {
};
static struct qcom_icc_node * const pcie_south_anoc_nodes[] = {
@@ -2190,12 +1855,6 @@ static struct qcom_icc_node * const pcie_south_anoc_nodes[] = {
[MASTER_PCIE_6A] = &xm_pcie_6a,
[MASTER_PCIE_6B] = &xm_pcie_6b,
[SLAVE_PCIE_SOUTH] = &qns_pcie_south_gem_noc,
- [MASTER_PCIE_0_PCIE] = &xm_pcie_0_pcie,
- [MASTER_PCIE_1_PCIE] = &xm_pcie_1_pcie,
- [MASTER_PCIE_2_PCIE] = &xm_pcie_2_pcie,
- [MASTER_PCIE_6A_PCIE] = &xm_pcie_6a_pcie,
- [MASTER_PCIE_6B_PCIE] = &xm_pcie_6b_pcie,
- [SLAVE_PCIE_SOUTH_PCIE] = &qns_pcie_south_gem_noc_pcie,
};
static const struct qcom_icc_desc x1e80100_pcie_south_anoc = {
@@ -2205,7 +1864,7 @@ static const struct qcom_icc_desc x1e80100_pcie_south_anoc = {
.num_bcms = ARRAY_SIZE(pcie_south_anoc_bcms),
};
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn2,
&bcm_sn3,
@@ -2243,7 +1902,7 @@ static const struct qcom_icc_desc x1e80100_usb_center_anoc = {
.num_bcms = ARRAY_SIZE(usb_center_anoc_bcms),
};
-static struct qcom_icc_bcm *usb_north_anoc_bcms[] = {
+static struct qcom_icc_bcm * const usb_north_anoc_bcms[] = {
};
static struct qcom_icc_node * const usb_north_anoc_nodes[] = {
@@ -2259,7 +1918,7 @@ static const struct qcom_icc_desc x1e80100_usb_north_anoc = {
.num_bcms = ARRAY_SIZE(usb_north_anoc_bcms),
};
-static struct qcom_icc_bcm *usb_south_anoc_bcms[] = {
+static struct qcom_icc_bcm * const usb_south_anoc_bcms[] = {
};
static struct qcom_icc_node * const usb_south_anoc_nodes[] = {
diff --git a/drivers/interconnect/samsung/exynos.c b/drivers/interconnect/samsung/exynos.c
index 1ba14cb45d5a2..c9e5361e17c5b 100644
--- a/drivers/interconnect/samsung/exynos.c
+++ b/drivers/interconnect/samsung/exynos.c
@@ -82,7 +82,7 @@ static int exynos_generic_icc_set(struct icc_node *src, struct icc_node *dst)
return 0;
}
-static struct icc_node *exynos_generic_icc_xlate(struct of_phandle_args *spec,
+static struct icc_node *exynos_generic_icc_xlate(const struct of_phandle_args *spec,
void *data)
{
struct exynos_icc_priv *priv = data;
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index e7a44929f0daf..ac6754a85f350 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -3228,30 +3228,33 @@ out:
static void iommu_snp_enable(void)
{
#ifdef CONFIG_KVM_AMD_SEV
- if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+ if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return;
/*
* The SNP support requires that IOMMU must be enabled, and is
- * not configured in the passthrough mode.
+ * configured with V1 page table (DTE[Mode] = 0 is not supported).
*/
if (no_iommu || iommu_default_passthrough()) {
- pr_err("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n");
- return;
+ pr_warn("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n");
+ goto disable_snp;
+ }
+
+ if (amd_iommu_pgtable != AMD_IOMMU_V1) {
+ pr_warn("SNP: IOMMU is configured with V2 page table mode, SNP cannot be supported.\n");
+ goto disable_snp;
}
amd_iommu_snp_en = check_feature(FEATURE_SNP);
if (!amd_iommu_snp_en) {
- pr_err("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n");
- return;
+ pr_warn("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n");
+ goto disable_snp;
}
pr_info("IOMMU SNP support enabled.\n");
+ return;
- /* Enforce IOMMU v1 pagetable when SNP is enabled. */
- if (amd_iommu_pgtable != AMD_IOMMU_V1) {
- pr_warn("Forcing use of AMD IOMMU v1 page table due to SNP.\n");
- amd_iommu_pgtable = AMD_IOMMU_V1;
- }
+disable_snp:
+ cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
#endif
}
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index d35c1b8c8e65c..e692217fcb280 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -1692,26 +1692,29 @@ int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
static u16 domain_id_alloc(void)
{
+ unsigned long flags;
int id;
- spin_lock(&pd_bitmap_lock);
+ spin_lock_irqsave(&pd_bitmap_lock, flags);
id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
BUG_ON(id == 0);
if (id > 0 && id < MAX_DOMAIN_ID)
__set_bit(id, amd_iommu_pd_alloc_bitmap);
else
id = 0;
- spin_unlock(&pd_bitmap_lock);
+ spin_unlock_irqrestore(&pd_bitmap_lock, flags);
return id;
}
static void domain_id_free(int id)
{
- spin_lock(&pd_bitmap_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pd_bitmap_lock, flags);
if (id > 0 && id < MAX_DOMAIN_ID)
__clear_bit(id, amd_iommu_pd_alloc_bitmap);
- spin_unlock(&pd_bitmap_lock);
+ spin_unlock_irqrestore(&pd_bitmap_lock, flags);
}
static void free_gcr3_tbl_level1(u64 *tbl)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 5ed036225e69b..41f93c3ab160d 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1139,7 +1139,8 @@ static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid,
* requires a breaking update, zero the V bit, write all qwords
* but 0, then set qword 0
*/
- unused_update.data[0] = entry->data[0] & (~STRTAB_STE_0_V);
+ unused_update.data[0] = entry->data[0] &
+ cpu_to_le64(~STRTAB_STE_0_V);
entry_set(smmu, sid, entry, &unused_update, 0, 1);
entry_set(smmu, sid, entry, target, 1, num_entry_qwords - 1);
entry_set(smmu, sid, entry, target, 0, 1);
@@ -1453,14 +1454,17 @@ static void arm_smmu_make_abort_ste(struct arm_smmu_ste *target)
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT));
}
-static void arm_smmu_make_bypass_ste(struct arm_smmu_ste *target)
+static void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu,
+ struct arm_smmu_ste *target)
{
memset(target, 0, sizeof(*target));
target->data[0] = cpu_to_le64(
STRTAB_STE_0_V |
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS));
- target->data[1] = cpu_to_le64(
- FIELD_PREP(STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING));
+
+ if (smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR)
+ target->data[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
+ STRTAB_STE_1_SHCFG_INCOMING));
}
static void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
@@ -1523,6 +1527,7 @@ static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr =
&pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
u64 vtcr_val;
+ struct arm_smmu_device *smmu = master->smmu;
memset(target, 0, sizeof(*target));
target->data[0] = cpu_to_le64(
@@ -1531,9 +1536,11 @@ static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
target->data[1] = cpu_to_le64(
FIELD_PREP(STRTAB_STE_1_EATS,
- master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0) |
- FIELD_PREP(STRTAB_STE_1_SHCFG,
- STRTAB_STE_1_SHCFG_INCOMING));
+ master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0));
+
+ if (smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR)
+ target->data[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
+ STRTAB_STE_1_SHCFG_INCOMING));
vtcr_val = FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, vtcr->tsz) |
FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, vtcr->sl) |
@@ -1560,7 +1567,8 @@ static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
* This can safely directly manipulate the STE memory without a sync sequence
* because the STE table has not been installed in the SMMU yet.
*/
-static void arm_smmu_init_initial_stes(struct arm_smmu_ste *strtab,
+static void arm_smmu_init_initial_stes(struct arm_smmu_device *smmu,
+ struct arm_smmu_ste *strtab,
unsigned int nent)
{
unsigned int i;
@@ -1569,7 +1577,7 @@ static void arm_smmu_init_initial_stes(struct arm_smmu_ste *strtab,
if (disable_bypass)
arm_smmu_make_abort_ste(strtab);
else
- arm_smmu_make_bypass_ste(strtab);
+ arm_smmu_make_bypass_ste(smmu, strtab);
strtab++;
}
}
@@ -1597,7 +1605,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
return -ENOMEM;
}
- arm_smmu_init_initial_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
+ arm_smmu_init_initial_stes(smmu, desc->l2ptr, 1 << STRTAB_SPLIT);
arm_smmu_write_strtab_l1_desc(strtab, desc);
return 0;
}
@@ -2637,8 +2645,9 @@ static int arm_smmu_attach_dev_identity(struct iommu_domain *domain,
struct device *dev)
{
struct arm_smmu_ste ste;
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
- arm_smmu_make_bypass_ste(&ste);
+ arm_smmu_make_bypass_ste(master->smmu, &ste);
return arm_smmu_attach_dev_ste(dev, &ste);
}
@@ -3264,7 +3273,7 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits);
cfg->strtab_base_cfg = reg;
- arm_smmu_init_initial_stes(strtab, cfg->num_l1_ents);
+ arm_smmu_init_initial_stes(smmu, strtab, cfg->num_l1_ents);
return 0;
}
@@ -3777,6 +3786,9 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
return -ENXIO;
}
+ if (reg & IDR1_ATTR_TYPES_OVR)
+ smmu->features |= ARM_SMMU_FEAT_ATTR_TYPES_OVR;
+
/* Queue sizes, capped to ensure natural alignment */
smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
FIELD_GET(IDR1_CMDQS, reg));
@@ -3992,7 +4004,7 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
* STE table is not programmed to HW, see
* arm_smmu_initial_bypass_stes()
*/
- arm_smmu_make_bypass_ste(
+ arm_smmu_make_bypass_ste(smmu,
arm_smmu_get_step_for_sid(smmu, rmr->sids[i]));
}
}
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 23baf117e7e4b..2a19bb63e5c6d 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -44,6 +44,7 @@
#define IDR1_TABLES_PRESET (1 << 30)
#define IDR1_QUEUES_PRESET (1 << 29)
#define IDR1_REL (1 << 28)
+#define IDR1_ATTR_TYPES_OVR (1 << 27)
#define IDR1_CMDQS GENMASK(25, 21)
#define IDR1_EVTQS GENMASK(20, 16)
#define IDR1_PRIQS GENMASK(15, 11)
@@ -647,6 +648,7 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_SVA (1 << 17)
#define ARM_SMMU_FEAT_E2H (1 << 18)
#define ARM_SMMU_FEAT_NESTING (1 << 19)
+#define ARM_SMMU_FEAT_ATTR_TYPES_OVR (1 << 20)
u32 features;
#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index b58f5a3311c3a..e4cb26f6a9434 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -1711,6 +1711,14 @@ static size_t iommu_dma_opt_mapping_size(void)
return iova_rcache_range();
}
+static size_t iommu_dma_max_mapping_size(struct device *dev)
+{
+ if (dev_is_untrusted(dev))
+ return swiotlb_max_mapping_size(dev);
+
+ return SIZE_MAX;
+}
+
static const struct dma_map_ops iommu_dma_ops = {
.flags = DMA_F_PCI_P2PDMA_SUPPORTED,
.alloc = iommu_dma_alloc,
@@ -1733,6 +1741,7 @@ static const struct dma_map_ops iommu_dma_ops = {
.unmap_resource = iommu_dma_unmap_resource,
.get_merge_boundary = iommu_dma_get_merge_boundary,
.opt_mapping_size = iommu_dma_opt_mapping_size,
+ .max_mapping_size = iommu_dma_max_mapping_size,
};
/*
diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig
index 6cf9f48e7d8c8..f52fb39c968eb 100644
--- a/drivers/iommu/intel/Kconfig
+++ b/drivers/iommu/intel/Kconfig
@@ -87,8 +87,8 @@ config INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
the default value.
config INTEL_IOMMU_PERF_EVENTS
- def_bool y
bool "Intel IOMMU performance events"
+ default y
depends on INTEL_IOMMU && PERF_EVENTS
help
Selecting this option will enable the performance monitoring
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 50eb9aed47cc5..a7ecd90303dc4 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -4299,9 +4299,11 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
}
dev_iommu_priv_set(dev, info);
- ret = device_rbtree_insert(iommu, info);
- if (ret)
- goto free;
+ if (pdev && pci_ats_supported(pdev)) {
+ ret = device_rbtree_insert(iommu, info);
+ if (ret)
+ goto free;
+ }
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
ret = intel_pasid_alloc_table(dev);
@@ -4336,7 +4338,8 @@ static void intel_iommu_release_device(struct device *dev)
struct intel_iommu *iommu = info->iommu;
mutex_lock(&iommu->iopf_lock);
- device_rbtree_remove(info);
+ if (dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev)))
+ device_rbtree_remove(info);
mutex_unlock(&iommu->iopf_lock);
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) &&
diff --git a/drivers/iommu/intel/perfmon.c b/drivers/iommu/intel/perfmon.c
index cf43e798eca49..44083d01852db 100644
--- a/drivers/iommu/intel/perfmon.c
+++ b/drivers/iommu/intel/perfmon.c
@@ -438,7 +438,7 @@ static int iommu_pmu_assign_event(struct iommu_pmu *iommu_pmu,
iommu_pmu_set_filter(domain, event->attr.config1,
IOMMU_PMU_FILTER_DOMAIN, idx,
event->attr.config1);
- iommu_pmu_set_filter(pasid, event->attr.config1,
+ iommu_pmu_set_filter(pasid, event->attr.config2,
IOMMU_PMU_FILTER_PASID, idx,
event->attr.config1);
iommu_pmu_set_filter(ats, event->attr.config2,
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index c1bed89b10261..ee3b469e2da15 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -66,7 +66,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
struct page *pages;
int irq, ret;
- pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
+ pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
if (!pages) {
pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
iommu->name);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 098869007c69e..a95a483def2d2 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -3354,6 +3354,7 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
{
/* Caller must be a probed driver on dev */
struct iommu_group *group = dev->iommu_group;
+ struct group_device *device;
void *curr;
int ret;
@@ -3363,10 +3364,18 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
if (!group)
return -ENODEV;
- if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner)
+ if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner ||
+ pasid == IOMMU_NO_PASID)
return -EINVAL;
mutex_lock(&group->mutex);
+ for_each_group_device(group, device) {
+ if (pasid >= device->dev->iommu->max_pasids) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ }
+
curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL);
if (curr) {
ret = xa_err(curr) ? : -EBUSY;
diff --git a/drivers/iommu/iommufd/Kconfig b/drivers/iommu/iommufd/Kconfig
index 99d4b075df49e..76656fe0470d7 100644
--- a/drivers/iommu/iommufd/Kconfig
+++ b/drivers/iommu/iommufd/Kconfig
@@ -37,6 +37,7 @@ config IOMMUFD_TEST
depends on DEBUG_KERNEL
depends on FAULT_INJECTION
depends on RUNTIME_TESTING_MENU
+ select IOMMUFD_DRIVER
default n
help
This is dangerous, do not enable unless running
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index b8c47f18bc261..6a2707fe7a78c 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -1790,6 +1790,7 @@ static const struct of_device_id mtk_iommu_of_ids[] = {
{ .compatible = "mediatek,mt8365-m4u", .data = &mt8365_data},
{}
};
+MODULE_DEVICE_TABLE(of, mtk_iommu_of_ids);
static struct platform_driver mtk_iommu_driver = {
.probe = mtk_iommu_probe,
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index a9fa2a54dc9b3..d6e4002200bd3 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -600,6 +600,7 @@ static const struct of_device_id mtk_iommu_v1_of_ids[] = {
{ .compatible = "mediatek,mt2701-m4u", },
{}
};
+MODULE_DEVICE_TABLE(of, mtk_iommu_v1_of_ids);
static const struct component_master_ops mtk_iommu_v1_com_ops = {
.bind = mtk_iommu_v1_bind,
diff --git a/drivers/ipack/ipack.c b/drivers/ipack/ipack.c
index b1471ba016a51..866bf48d803bc 100644
--- a/drivers/ipack/ipack.c
+++ b/drivers/ipack/ipack.c
@@ -187,7 +187,7 @@ static struct attribute *ipack_attrs[] = {
};
ATTRIBUTE_GROUPS(ipack);
-static struct bus_type ipack_bus_type = {
+static const struct bus_type ipack_bus_type = {
.name = "ipack",
.probe = ipack_bus_probe,
.match = ipack_bus_match,
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index a55528469278c..4b021a67bdfe4 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -316,7 +316,7 @@ static int armada_370_xp_msi_init(struct device_node *node,
return 0;
}
#else
-static void armada_370_xp_msi_reenable_percpu(void) {}
+static __maybe_unused void armada_370_xp_msi_reenable_percpu(void) {}
static inline int armada_370_xp_msi_init(struct device_node *node,
phys_addr_t main_int_phys_base)
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index fca888b36680d..2a537cbfcb077 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -786,6 +786,7 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
+ struct its_vpe *vpe = valid_vpe(its, desc->its_vmapp_cmd.vpe);
unsigned long vpt_addr, vconf_addr;
u64 target;
bool alloc;
@@ -798,6 +799,11 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
if (is_v4_1(its)) {
alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
its_encode_alloc(cmd, alloc);
+ /*
+ * Unmapping a VPE is self-synchronizing on GICv4.1,
+ * no need to issue a VSYNC.
+ */
+ vpe = NULL;
}
goto out;
@@ -832,7 +838,7 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
out:
its_fixup_cmd(cmd);
- return valid_vpe(its, desc->its_vmapp_cmd.vpe);
+ return vpe;
}
static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
index 9494fc26259c3..ae67fec2ab468 100644
--- a/drivers/irqchip/irq-renesas-rzg2l.c
+++ b/drivers/irqchip/irq-renesas-rzg2l.c
@@ -85,10 +85,9 @@ static struct rzg2l_irqc_priv *irq_data_to_priv(struct irq_data *data)
return data->domain->host_data;
}
-static void rzg2l_irq_eoi(struct irq_data *d)
+static void rzg2l_clear_irq_int(struct rzg2l_irqc_priv *priv, unsigned int hwirq)
{
- unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START;
- struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ unsigned int hw_irq = hwirq - IRQC_IRQ_START;
u32 bit = BIT(hw_irq);
u32 iitsr, iscr;
@@ -99,20 +98,30 @@ static void rzg2l_irq_eoi(struct irq_data *d)
* ISCR can only be cleared if the type is falling-edge, rising-edge or
* falling/rising-edge.
*/
- if ((iscr & bit) && (iitsr & IITSR_IITSEL_MASK(hw_irq)))
+ if ((iscr & bit) && (iitsr & IITSR_IITSEL_MASK(hw_irq))) {
writel_relaxed(iscr & ~bit, priv->base + ISCR);
+ /*
+ * Enforce that the posted write is flushed to prevent that the
+ * just handled interrupt is raised again.
+ */
+ readl_relaxed(priv->base + ISCR);
+ }
}
-static void rzg2l_tint_eoi(struct irq_data *d)
+static void rzg2l_clear_tint_int(struct rzg2l_irqc_priv *priv, unsigned int hwirq)
{
- unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_TINT_START;
- struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
- u32 bit = BIT(hw_irq);
+ u32 bit = BIT(hwirq - IRQC_TINT_START);
u32 reg;
reg = readl_relaxed(priv->base + TSCR);
- if (reg & bit)
+ if (reg & bit) {
writel_relaxed(reg & ~bit, priv->base + TSCR);
+ /*
+ * Enforce that the posted write is flushed to prevent that the
+ * just handled interrupt is raised again.
+ */
+ readl_relaxed(priv->base + TSCR);
+ }
}
static void rzg2l_irqc_eoi(struct irq_data *d)
@@ -122,9 +131,9 @@ static void rzg2l_irqc_eoi(struct irq_data *d)
raw_spin_lock(&priv->lock);
if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT)
- rzg2l_irq_eoi(d);
+ rzg2l_clear_irq_int(priv, hw_irq);
else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ)
- rzg2l_tint_eoi(d);
+ rzg2l_clear_tint_int(priv, hw_irq);
raw_spin_unlock(&priv->lock);
irq_chip_eoi_parent(d);
}
@@ -142,7 +151,7 @@ static void rzg2l_irqc_irq_disable(struct irq_data *d)
raw_spin_lock(&priv->lock);
reg = readl_relaxed(priv->base + TSSR(tssr_index));
- reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset));
+ reg &= ~(TIEN << TSSEL_SHIFT(tssr_offset));
writel_relaxed(reg, priv->base + TSSR(tssr_index));
raw_spin_unlock(&priv->lock);
}
@@ -154,7 +163,6 @@ static void rzg2l_irqc_irq_enable(struct irq_data *d)
unsigned int hw_irq = irqd_to_hwirq(d);
if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) {
- unsigned long tint = (uintptr_t)irq_data_get_irq_chip_data(d);
struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
u32 offset = hw_irq - IRQC_TINT_START;
u32 tssr_offset = TSSR_OFFSET(offset);
@@ -163,7 +171,7 @@ static void rzg2l_irqc_irq_enable(struct irq_data *d)
raw_spin_lock(&priv->lock);
reg = readl_relaxed(priv->base + TSSR(tssr_index));
- reg |= (TIEN | tint) << TSSEL_SHIFT(tssr_offset);
+ reg |= TIEN << TSSEL_SHIFT(tssr_offset);
writel_relaxed(reg, priv->base + TSSR(tssr_index));
raw_spin_unlock(&priv->lock);
}
@@ -172,8 +180,10 @@ static void rzg2l_irqc_irq_enable(struct irq_data *d)
static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
{
- unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START;
struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
+ u32 iitseln = hwirq - IRQC_IRQ_START;
+ bool clear_irq_int = false;
u16 sense, tmp;
switch (type & IRQ_TYPE_SENSE_MASK) {
@@ -183,14 +193,17 @@ static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
case IRQ_TYPE_EDGE_FALLING:
sense = IITSR_IITSEL_EDGE_FALLING;
+ clear_irq_int = true;
break;
case IRQ_TYPE_EDGE_RISING:
sense = IITSR_IITSEL_EDGE_RISING;
+ clear_irq_int = true;
break;
case IRQ_TYPE_EDGE_BOTH:
sense = IITSR_IITSEL_EDGE_BOTH;
+ clear_irq_int = true;
break;
default:
@@ -199,21 +212,40 @@ static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
raw_spin_lock(&priv->lock);
tmp = readl_relaxed(priv->base + IITSR);
- tmp &= ~IITSR_IITSEL_MASK(hw_irq);
- tmp |= IITSR_IITSEL(hw_irq, sense);
+ tmp &= ~IITSR_IITSEL_MASK(iitseln);
+ tmp |= IITSR_IITSEL(iitseln, sense);
+ if (clear_irq_int)
+ rzg2l_clear_irq_int(priv, hwirq);
writel_relaxed(tmp, priv->base + IITSR);
raw_spin_unlock(&priv->lock);
return 0;
}
+static u32 rzg2l_disable_tint_and_set_tint_source(struct irq_data *d, struct rzg2l_irqc_priv *priv,
+ u32 reg, u32 tssr_offset, u8 tssr_index)
+{
+ u32 tint = (u32)(uintptr_t)irq_data_get_irq_chip_data(d);
+ u32 tien = reg & (TIEN << TSSEL_SHIFT(tssr_offset));
+
+ /* Clear the relevant byte in reg */
+ reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset));
+ /* Set TINT and leave TIEN clear */
+ reg |= tint << TSSEL_SHIFT(tssr_offset);
+ writel_relaxed(reg, priv->base + TSSR(tssr_index));
+
+ return reg | tien;
+}
+
static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type)
{
struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
unsigned int hwirq = irqd_to_hwirq(d);
u32 titseln = hwirq - IRQC_TINT_START;
+ u32 tssr_offset = TSSR_OFFSET(titseln);
+ u8 tssr_index = TSSR_INDEX(titseln);
u8 index, sense;
- u32 reg;
+ u32 reg, tssr;
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_RISING:
@@ -235,10 +267,14 @@ static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type)
}
raw_spin_lock(&priv->lock);
+ tssr = readl_relaxed(priv->base + TSSR(tssr_index));
+ tssr = rzg2l_disable_tint_and_set_tint_source(d, priv, tssr, tssr_offset, tssr_index);
reg = readl_relaxed(priv->base + TITSR(index));
reg &= ~(IRQ_MASK << (titseln * TITSEL_WIDTH));
reg |= sense << (titseln * TITSEL_WIDTH);
writel_relaxed(reg, priv->base + TITSR(index));
+ rzg2l_clear_tint_int(priv, hwirq);
+ writel_relaxed(tssr, priv->base + TSSR(tssr_index));
raw_spin_unlock(&priv->lock);
return 0;
diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
index f87aeab460eb8..9e71c44288141 100644
--- a/drivers/irqchip/irq-riscv-intc.c
+++ b/drivers/irqchip/irq-riscv-intc.c
@@ -149,8 +149,9 @@ static int riscv_intc_domain_alloc(struct irq_domain *domain,
* Only allow hwirq for which we have corresponding standard or
* custom interrupt enable register.
*/
- if ((hwirq >= riscv_intc_nr_irqs && hwirq < riscv_intc_custom_base) ||
- (hwirq >= riscv_intc_custom_base + riscv_intc_custom_nr_irqs))
+ if (hwirq >= riscv_intc_nr_irqs &&
+ (hwirq < riscv_intc_custom_base ||
+ hwirq >= riscv_intc_custom_base + riscv_intc_custom_nr_irqs))
return -EINVAL;
for (i = 0; i < nr_irqs; i++) {
@@ -183,10 +184,12 @@ static int __init riscv_intc_init_common(struct fwnode_handle *fn, struct irq_ch
return -ENXIO;
}
- if (riscv_isa_extension_available(NULL, SxAIA))
+ if (riscv_isa_extension_available(NULL, SxAIA)) {
+ riscv_intc_nr_irqs = 64;
rc = set_handle_irq(&riscv_intc_aia_irq);
- else
+ } else {
rc = set_handle_irq(&riscv_intc_irq);
+ }
if (rc) {
pr_err("failed to set irq handler\n");
return rc;
@@ -195,7 +198,7 @@ static int __init riscv_intc_init_common(struct fwnode_handle *fn, struct irq_ch
riscv_set_intc_hwnode_fn(riscv_intc_hwnode);
pr_info("%d local interrupts mapped%s\n",
- riscv_isa_extension_available(NULL, SxAIA) ? 64 : riscv_intc_nr_irqs,
+ riscv_intc_nr_irqs,
riscv_isa_extension_available(NULL, SxAIA) ? " using AIA" : "");
if (riscv_intc_custom_nr_irqs)
pr_info("%d custom local interrupts mapped\n", riscv_intc_custom_nr_irqs);
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 2776ca5fc33f3..b215b28cad7b7 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -401,23 +401,23 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
}
static int data_sock_setsockopt(struct socket *sock, int level, int optname,
- sockptr_t optval, unsigned int len)
+ sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
int err = 0, opt = 0;
if (*debug & DEBUG_SOCKET)
printk(KERN_DEBUG "%s(%p, %d, %x, optval, %d)\n", __func__, sock,
- level, optname, len);
+ level, optname, optlen);
lock_sock(sk);
switch (optname) {
case MISDN_TIME_STAMP:
- if (copy_from_sockptr(&opt, optval, sizeof(int))) {
- err = -EFAULT;
+ err = copy_safe_from_sockptr(&opt, sizeof(opt),
+ optval, optlen);
+ if (err)
break;
- }
if (opt)
_pms(sk)->cmask |= MISDN_TIME_STAMP;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index d721b254e1e45..05e6af88b88cd 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -6,6 +6,12 @@ config LEDS_GPIO_REGISTER
As this function is used by arch code it must not be compiled as a
module.
+# This library does not depend on NEW_LEDS and must be independent so it can be
+# selected from other subsystems (specifically backlight).
+config LEDS_EXPRESSWIRE
+ bool
+ depends on GPIOLIB
+
menuconfig NEW_LEDS
bool "LED Support"
help
@@ -186,6 +192,10 @@ config LEDS_EL15203000
To compile this driver as a module, choose M here: the module
will be called leds-el15203000.
+config LEDS_EXPRESSWIRE
+ bool
+ depends on GPIOLIB
+
config LEDS_TURRIS_OMNIA
tristate "LED support for CZ.NIC's Turris Omnia"
depends on LEDS_CLASS_MULTICOLOR
@@ -395,7 +405,7 @@ config LEDS_LP3952
config LEDS_LP50XX
tristate "LED Support for TI LP5036/30/24/18/12/09 LED driver chip"
depends on LEDS_CLASS && REGMAP_I2C
- depends on LEDS_CLASS_MULTICOLOR || !LEDS_CLASS_MULTICOLOR
+ depends on LEDS_CLASS_MULTICOLOR
help
If you say yes here you get support for the Texas Instruments
LP5036, LP5030, LP5024, LP5018, LP5012 and LP5009 LED driver.
@@ -406,7 +416,7 @@ config LEDS_LP50XX
config LEDS_LP55XX_COMMON
tristate "Common Driver for TI/National LP5521/5523/55231/5562/8501"
depends on LEDS_CLASS
- depends on LEDS_CLASS_MULTICOLOR || !LEDS_CLASS_MULTICOLOR
+ depends on LEDS_CLASS_MULTICOLOR
depends on OF
depends on I2C
select FW_LOADER
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index ce07dc295ff00..effdfc6f1e951 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -91,6 +91,9 @@ obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o
obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o
obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o
+# Kinetic ExpressWire Protocol
+obj-$(CONFIG_LEDS_EXPRESSWIRE) += leds-expresswire.o
+
# LED SPI Drivers
obj-$(CONFIG_LEDS_CR0014114) += leds-cr0014114.o
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/flash/Kconfig b/drivers/leds/flash/Kconfig
index 4e08dbc057096..809b6d98bb3e6 100644
--- a/drivers/leds/flash/Kconfig
+++ b/drivers/leds/flash/Kconfig
@@ -23,7 +23,8 @@ config LEDS_AS3645A
config LEDS_KTD2692
tristate "LED support for Kinetic KTD2692 flash LED controller"
depends on OF
- depends on GPIOLIB || COMPILE_TEST
+ depends on GPIOLIB
+ select LEDS_EXPRESSWIRE
help
This option enables support for Kinetic KTD2692 LED flash connected
through ExpressWire interface.
@@ -51,8 +52,8 @@ config LEDS_MAX77693
config LEDS_MT6360
tristate "LED Support for Mediatek MT6360 PMIC"
depends on LEDS_CLASS && OF
- depends on LEDS_CLASS_FLASH || !LEDS_CLASS_FLASH
- depends on LEDS_CLASS_MULTICOLOR || !LEDS_CLASS_MULTICOLOR
+ depends on LEDS_CLASS_FLASH
+ depends on LEDS_CLASS_MULTICOLOR
depends on V4L2_FLASH_LED_CLASS || !V4L2_FLASH_LED_CLASS
depends on MFD_MT6360
help
diff --git a/drivers/leds/flash/leds-ktd2692.c b/drivers/leds/flash/leds-ktd2692.c
index 598eee5daa527..7bb0aa2753e36 100644
--- a/drivers/leds/flash/leds-ktd2692.c
+++ b/drivers/leds/flash/leds-ktd2692.c
@@ -6,9 +6,9 @@
* Ingi Kim <ingi2.kim@samsung.com>
*/
-#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
+#include <linux/leds-expresswire.h>
#include <linux/led-class-flash.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -37,22 +37,9 @@
#define KTD2692_REG_FLASH_CURRENT_BASE 0x80
#define KTD2692_REG_MODE_BASE 0xA0
-/* Set bit coding time for expresswire interface */
-#define KTD2692_TIME_RESET_US 700
-#define KTD2692_TIME_DATA_START_TIME_US 10
-#define KTD2692_TIME_HIGH_END_OF_DATA_US 350
-#define KTD2692_TIME_LOW_END_OF_DATA_US 10
-#define KTD2692_TIME_SHORT_BITSET_US 4
-#define KTD2692_TIME_LONG_BITSET_US 12
-
/* KTD2692 default length of name */
#define KTD2692_NAME_LENGTH 20
-enum ktd2692_bitset {
- KTD2692_LOW = 0,
- KTD2692_HIGH,
-};
-
/* Movie / Flash Mode Control */
enum ktd2692_led_mode {
KTD2692_MODE_DISABLE = 0, /* default */
@@ -71,7 +58,19 @@ struct ktd2692_led_config_data {
enum led_brightness max_brightness;
};
+const struct expresswire_timing ktd2692_timing = {
+ .poweroff_us = 700,
+ .data_start_us = 10,
+ .end_of_data_low_us = 10,
+ .end_of_data_high_us = 350,
+ .short_bitset_us = 4,
+ .long_bitset_us = 12
+};
+
struct ktd2692_context {
+ /* Common ExpressWire properties (ctrl GPIO and timing) */
+ struct expresswire_common_props props;
+
/* Related LED Flash class device */
struct led_classdev_flash fled_cdev;
@@ -80,7 +79,6 @@ struct ktd2692_context {
struct regulator *regulator;
struct gpio_desc *aux_gpio;
- struct gpio_desc *ctrl_gpio;
enum ktd2692_led_mode mode;
enum led_brightness torch_brightness;
@@ -92,67 +90,6 @@ static struct ktd2692_context *fled_cdev_to_led(
return container_of(fled_cdev, struct ktd2692_context, fled_cdev);
}
-static void ktd2692_expresswire_start(struct ktd2692_context *led)
-{
- gpiod_direction_output(led->ctrl_gpio, KTD2692_HIGH);
- udelay(KTD2692_TIME_DATA_START_TIME_US);
-}
-
-static void ktd2692_expresswire_reset(struct ktd2692_context *led)
-{
- gpiod_direction_output(led->ctrl_gpio, KTD2692_LOW);
- udelay(KTD2692_TIME_RESET_US);
-}
-
-static void ktd2692_expresswire_end(struct ktd2692_context *led)
-{
- gpiod_direction_output(led->ctrl_gpio, KTD2692_LOW);
- udelay(KTD2692_TIME_LOW_END_OF_DATA_US);
- gpiod_direction_output(led->ctrl_gpio, KTD2692_HIGH);
- udelay(KTD2692_TIME_HIGH_END_OF_DATA_US);
-}
-
-static void ktd2692_expresswire_set_bit(struct ktd2692_context *led, bool bit)
-{
- /*
- * The Low Bit(0) and High Bit(1) is based on a time detection
- * algorithm between time low and time high
- * Time_(L_LB) : Low time of the Low Bit(0)
- * Time_(H_LB) : High time of the LOW Bit(0)
- * Time_(L_HB) : Low time of the High Bit(1)
- * Time_(H_HB) : High time of the High Bit(1)
- *
- * It can be simplified to:
- * Low Bit(0) : 2 * Time_(H_LB) < Time_(L_LB)
- * High Bit(1) : 2 * Time_(L_HB) < Time_(H_HB)
- * HIGH ___ ____ _.. _________ ___
- * |_________| |_.. |____| |__|
- * LOW <L_LB> <H_LB> <L_HB> <H_HB>
- * [ Low Bit (0) ] [ High Bit(1) ]
- */
- if (bit) {
- gpiod_direction_output(led->ctrl_gpio, KTD2692_LOW);
- udelay(KTD2692_TIME_SHORT_BITSET_US);
- gpiod_direction_output(led->ctrl_gpio, KTD2692_HIGH);
- udelay(KTD2692_TIME_LONG_BITSET_US);
- } else {
- gpiod_direction_output(led->ctrl_gpio, KTD2692_LOW);
- udelay(KTD2692_TIME_LONG_BITSET_US);
- gpiod_direction_output(led->ctrl_gpio, KTD2692_HIGH);
- udelay(KTD2692_TIME_SHORT_BITSET_US);
- }
-}
-
-static void ktd2692_expresswire_write(struct ktd2692_context *led, u8 value)
-{
- int i;
-
- ktd2692_expresswire_start(led);
- for (i = 7; i >= 0; i--)
- ktd2692_expresswire_set_bit(led, value & BIT(i));
- ktd2692_expresswire_end(led);
-}
-
static int ktd2692_led_brightness_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
@@ -163,14 +100,14 @@ static int ktd2692_led_brightness_set(struct led_classdev *led_cdev,
if (brightness == LED_OFF) {
led->mode = KTD2692_MODE_DISABLE;
- gpiod_direction_output(led->aux_gpio, KTD2692_LOW);
+ gpiod_direction_output(led->aux_gpio, 0);
} else {
- ktd2692_expresswire_write(led, brightness |
+ expresswire_write_u8(&led->props, brightness |
KTD2692_REG_MOVIE_CURRENT_BASE);
led->mode = KTD2692_MODE_MOVIE;
}
- ktd2692_expresswire_write(led, led->mode | KTD2692_REG_MODE_BASE);
+ expresswire_write_u8(&led->props, led->mode | KTD2692_REG_MODE_BASE);
mutex_unlock(&led->lock);
return 0;
@@ -187,17 +124,17 @@ static int ktd2692_led_flash_strobe_set(struct led_classdev_flash *fled_cdev,
if (state) {
flash_tm_reg = GET_TIMEOUT_OFFSET(timeout->val, timeout->step);
- ktd2692_expresswire_write(led, flash_tm_reg
+ expresswire_write_u8(&led->props, flash_tm_reg
| KTD2692_REG_FLASH_TIMEOUT_BASE);
led->mode = KTD2692_MODE_FLASH;
- gpiod_direction_output(led->aux_gpio, KTD2692_HIGH);
+ gpiod_direction_output(led->aux_gpio, 1);
} else {
led->mode = KTD2692_MODE_DISABLE;
- gpiod_direction_output(led->aux_gpio, KTD2692_LOW);
+ gpiod_direction_output(led->aux_gpio, 0);
}
- ktd2692_expresswire_write(led, led->mode | KTD2692_REG_MODE_BASE);
+ expresswire_write_u8(&led->props, led->mode | KTD2692_REG_MODE_BASE);
fled_cdev->led_cdev.brightness = LED_OFF;
led->mode = KTD2692_MODE_DISABLE;
@@ -247,12 +184,12 @@ static void ktd2692_init_flash_timeout(struct led_classdev_flash *fled_cdev,
static void ktd2692_setup(struct ktd2692_context *led)
{
led->mode = KTD2692_MODE_DISABLE;
- ktd2692_expresswire_reset(led);
- gpiod_direction_output(led->aux_gpio, KTD2692_LOW);
+ expresswire_power_off(&led->props);
+ gpiod_direction_output(led->aux_gpio, 0);
- ktd2692_expresswire_write(led, (KTD2692_MM_MIN_CURR_THRESHOLD_SCALE - 1)
+ expresswire_write_u8(&led->props, (KTD2692_MM_MIN_CURR_THRESHOLD_SCALE - 1)
| KTD2692_REG_MM_MIN_CURR_THRESHOLD_BASE);
- ktd2692_expresswire_write(led, KTD2692_FLASH_MODE_CURR_PERCENT(45)
+ expresswire_write_u8(&led->props, KTD2692_FLASH_MODE_CURR_PERCENT(45)
| KTD2692_REG_FLASH_CURRENT_BASE);
}
@@ -277,8 +214,8 @@ static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev,
if (!np)
return -ENXIO;
- led->ctrl_gpio = devm_gpiod_get(dev, "ctrl", GPIOD_ASIS);
- ret = PTR_ERR_OR_ZERO(led->ctrl_gpio);
+ led->props.ctrl_gpio = devm_gpiod_get(dev, "ctrl", GPIOD_ASIS);
+ ret = PTR_ERR_OR_ZERO(led->props.ctrl_gpio);
if (ret)
return dev_err_probe(dev, ret, "cannot get ctrl-gpios\n");
@@ -412,6 +349,7 @@ static struct platform_driver ktd2692_driver = {
module_platform_driver(ktd2692_driver);
+MODULE_IMPORT_NS(EXPRESSWIRE);
MODULE_AUTHOR("Ingi Kim <ingi2.kim@samsung.com>");
MODULE_DESCRIPTION("Kinetic KTD2692 LED driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/flash/leds-lm3601x.c b/drivers/leds/flash/leds-lm3601x.c
index 8191be0ef0c69..7e93c447fec5c 100644
--- a/drivers/leds/flash/leds-lm3601x.c
+++ b/drivers/leds/flash/leds-lm3601x.c
@@ -70,12 +70,11 @@ enum lm3601x_type {
};
/**
- * struct lm3601x_led -
+ * struct lm3601x_led - private lm3601x LED data
* @fled_cdev: flash LED class device pointer
* @client: Pointer to the I2C client
* @regmap: Devices register map
* @lock: Lock for reading/writing the device
- * @led_name: LED label for the Torch or IR LED
* @flash_timeout: the timeout for the flash
* @last_flag: last known flags register value
* @torch_current_max: maximum current for the torch
diff --git a/drivers/leds/flash/leds-sgm3140.c b/drivers/leds/flash/leds-sgm3140.c
index eb648ff54b4e5..db0ac6641954e 100644
--- a/drivers/leds/flash/leds-sgm3140.c
+++ b/drivers/leds/flash/leds-sgm3140.c
@@ -114,8 +114,11 @@ static int sgm3140_brightness_set(struct led_classdev *led_cdev,
"failed to enable regulator: %d\n", ret);
return ret;
}
+ gpiod_set_value_cansleep(priv->flash_gpio, 0);
gpiod_set_value_cansleep(priv->enable_gpio, 1);
} else {
+ del_timer_sync(&priv->powerdown_timer);
+ gpiod_set_value_cansleep(priv->flash_gpio, 0);
gpiod_set_value_cansleep(priv->enable_gpio, 0);
ret = regulator_disable(priv->vin_regulator);
if (ret) {
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index ba1be15cfd8ea..24fcff682b24a 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -552,6 +552,12 @@ int led_classdev_register_ext(struct device *parent,
led_init_core(led_cdev);
#ifdef CONFIG_LEDS_TRIGGERS
+ /*
+ * If no default trigger was given and hw_control_trigger is set,
+ * make it the default trigger.
+ */
+ if (!led_cdev->default_trigger && led_cdev->hw_control_trigger)
+ led_cdev->default_trigger = led_cdev->hw_control_trigger;
led_trigger_set_default(led_cdev);
#endif
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index bd59a14a4a90c..0f5ac30053ad2 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -23,7 +23,7 @@
* Nests outside led_cdev->trigger_lock
*/
static DECLARE_RWSEM(triggers_list_lock);
-LIST_HEAD(trigger_list);
+static LIST_HEAD(trigger_list);
/* Used by LED Class */
@@ -247,9 +247,23 @@ void led_trigger_remove(struct led_classdev *led_cdev)
}
EXPORT_SYMBOL_GPL(led_trigger_remove);
+static bool led_match_default_trigger(struct led_classdev *led_cdev,
+ struct led_trigger *trig)
+{
+ if (!strcmp(led_cdev->default_trigger, trig->name) &&
+ trigger_relevant(led_cdev, trig)) {
+ led_cdev->flags |= LED_INIT_DEFAULT_TRIGGER;
+ led_trigger_set(led_cdev, trig);
+ return true;
+ }
+
+ return false;
+}
+
void led_trigger_set_default(struct led_classdev *led_cdev)
{
struct led_trigger *trig;
+ bool found = false;
if (!led_cdev->default_trigger)
return;
@@ -257,15 +271,19 @@ void led_trigger_set_default(struct led_classdev *led_cdev)
down_read(&triggers_list_lock);
down_write(&led_cdev->trigger_lock);
list_for_each_entry(trig, &trigger_list, next_trig) {
- if (!strcmp(led_cdev->default_trigger, trig->name) &&
- trigger_relevant(led_cdev, trig)) {
- led_cdev->flags |= LED_INIT_DEFAULT_TRIGGER;
- led_trigger_set(led_cdev, trig);
+ found = led_match_default_trigger(led_cdev, trig);
+ if (found)
break;
- }
}
up_write(&led_cdev->trigger_lock);
up_read(&triggers_list_lock);
+
+ /*
+ * If default trigger wasn't found, maybe trigger module isn't loaded yet.
+ * Once loaded it will re-probe with all led_cdev's.
+ */
+ if (!found)
+ request_module_nowait("ledtrig:%s", led_cdev->default_trigger);
}
EXPORT_SYMBOL_GPL(led_trigger_set_default);
@@ -297,12 +315,8 @@ int led_trigger_register(struct led_trigger *trig)
down_read(&leds_list_lock);
list_for_each_entry(led_cdev, &leds_list, node) {
down_write(&led_cdev->trigger_lock);
- if (!led_cdev->trigger && led_cdev->default_trigger &&
- !strcmp(led_cdev->default_trigger, trig->name) &&
- trigger_relevant(led_cdev, trig)) {
- led_cdev->flags |= LED_INIT_DEFAULT_TRIGGER;
- led_trigger_set(led_cdev, trig);
- }
+ if (!led_cdev->trigger && led_cdev->default_trigger)
+ led_match_default_trigger(led_cdev, trig);
up_write(&led_cdev->trigger_lock);
}
up_read(&leds_list_lock);
diff --git a/drivers/leds/leds-aw200xx.c b/drivers/leds/leds-aw200xx.c
index f584a7f98fc5b..6c8c9f2c19e33 100644
--- a/drivers/leds/leds-aw200xx.c
+++ b/drivers/leds/leds-aw200xx.c
@@ -282,7 +282,7 @@ static int aw200xx_set_imax(const struct aw200xx *const chip,
u32 led_imax_uA)
{
u32 g_imax_uA = aw200xx_imax_to_global(chip, led_imax_uA);
- u32 coeff_table[] = {1, 2, 3, 4, 6, 8, 12, 16};
+ static const u32 coeff_table[] = {1, 2, 3, 4, 6, 8, 12, 16};
u32 gccr_imax = UINT_MAX;
u32 cur_imax = 0;
int i;
diff --git a/drivers/leds/leds-aw2013.c b/drivers/leds/leds-aw2013.c
index 91f44b23cb113..17235a5e576ae 100644
--- a/drivers/leds/leds-aw2013.c
+++ b/drivers/leds/leds-aw2013.c
@@ -405,6 +405,7 @@ error_reg:
chip->regulators);
error:
+ mutex_unlock(&chip->mutex);
mutex_destroy(&chip->mutex);
return ret;
}
diff --git a/drivers/leds/leds-expresswire.c b/drivers/leds/leds-expresswire.c
new file mode 100644
index 0000000000000..e4937a8e0f441
--- /dev/null
+++ b/drivers/leds/leds-expresswire.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Shared library for Kinetic's ExpressWire protocol.
+ * This protocol works by pulsing the ExpressWire IC's control GPIO.
+ * ktd2692 and ktd2801 are known to use this protocol.
+ */
+
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/gpio/consumer.h>
+#include <linux/types.h>
+
+#include <linux/leds-expresswire.h>
+
+void expresswire_power_off(struct expresswire_common_props *props)
+{
+ gpiod_set_value_cansleep(props->ctrl_gpio, 0);
+ usleep_range(props->timing.poweroff_us, props->timing.poweroff_us * 2);
+}
+EXPORT_SYMBOL_NS_GPL(expresswire_power_off, EXPRESSWIRE);
+
+void expresswire_enable(struct expresswire_common_props *props)
+{
+ gpiod_set_value(props->ctrl_gpio, 1);
+ udelay(props->timing.detect_delay_us);
+ gpiod_set_value(props->ctrl_gpio, 0);
+ udelay(props->timing.detect_us);
+ gpiod_set_value(props->ctrl_gpio, 1);
+}
+EXPORT_SYMBOL_NS_GPL(expresswire_enable, EXPRESSWIRE);
+
+void expresswire_start(struct expresswire_common_props *props)
+{
+ gpiod_set_value(props->ctrl_gpio, 1);
+ udelay(props->timing.data_start_us);
+}
+EXPORT_SYMBOL_NS_GPL(expresswire_start, EXPRESSWIRE);
+
+void expresswire_end(struct expresswire_common_props *props)
+{
+ gpiod_set_value(props->ctrl_gpio, 0);
+ udelay(props->timing.end_of_data_low_us);
+ gpiod_set_value(props->ctrl_gpio, 1);
+ udelay(props->timing.end_of_data_high_us);
+}
+EXPORT_SYMBOL_NS_GPL(expresswire_end, EXPRESSWIRE);
+
+void expresswire_set_bit(struct expresswire_common_props *props, bool bit)
+{
+ if (bit) {
+ gpiod_set_value(props->ctrl_gpio, 0);
+ udelay(props->timing.short_bitset_us);
+ gpiod_set_value(props->ctrl_gpio, 1);
+ udelay(props->timing.long_bitset_us);
+ } else {
+ gpiod_set_value(props->ctrl_gpio, 0);
+ udelay(props->timing.long_bitset_us);
+ gpiod_set_value(props->ctrl_gpio, 1);
+ udelay(props->timing.short_bitset_us);
+ }
+}
+EXPORT_SYMBOL_NS_GPL(expresswire_set_bit, EXPRESSWIRE);
+
+void expresswire_write_u8(struct expresswire_common_props *props, u8 val)
+{
+ expresswire_start(props);
+ for (int i = 7; i >= 0; i--)
+ expresswire_set_bit(props, val & BIT(i));
+ expresswire_end(props);
+}
+EXPORT_SYMBOL_NS_GPL(expresswire_write_u8, EXPRESSWIRE);
diff --git a/drivers/leds/leds-mlxcpld.c b/drivers/leds/leds-mlxcpld.c
index 1355c84a2919b..718f55096e902 100644
--- a/drivers/leds/leds-mlxcpld.c
+++ b/drivers/leds/leds-mlxcpld.c
@@ -77,7 +77,7 @@ struct mlxcpld_param {
/**
* struct mlxcpld_led_priv - LED private data:
- * @cled: LED class device instance
+ * @cdev: LED class device instance
* @param: LED CPLD access parameters
**/
struct mlxcpld_led_priv {
diff --git a/drivers/leds/leds-mlxreg.c b/drivers/leds/leds-mlxreg.c
index d8e3d5d8d2d09..5595788d98d20 100644
--- a/drivers/leds/leds-mlxreg.c
+++ b/drivers/leds/leds-mlxreg.c
@@ -29,7 +29,6 @@
* @data: led configuration data;
* @led_cdev: led class data;
* @base_color: base led color (other colors have constant offset from base);
- * @led_data: led data;
* @data_parent: pointer to private device control data of parent;
* @led_cdev_name: class device name
*/
diff --git a/drivers/leds/leds-pca963x.c b/drivers/leds/leds-pca963x.c
index 47223c850e4b4..b53905da35920 100644
--- a/drivers/leds/leds-pca963x.c
+++ b/drivers/leds/leds-pca963x.c
@@ -39,6 +39,7 @@
#define PCA963X_LED_PWM 0x2 /* Controlled through PWM */
#define PCA963X_LED_GRP_PWM 0x3 /* Controlled through PWM/GRPPWM */
+#define PCA963X_MODE1_SLEEP 0x04 /* Normal mode or Low Power mode, oscillator off */
#define PCA963X_MODE2_OUTDRV 0x04 /* Open-drain or totem pole */
#define PCA963X_MODE2_INVRT 0x10 /* Normal or inverted direction */
#define PCA963X_MODE2_DMBLNK 0x20 /* Enable blinking */
@@ -380,6 +381,32 @@ err:
return ret;
}
+static int pca963x_suspend(struct device *dev)
+{
+ struct pca963x *chip = dev_get_drvdata(dev);
+ u8 reg;
+
+ reg = i2c_smbus_read_byte_data(chip->client, PCA963X_MODE1);
+ reg = reg | BIT(PCA963X_MODE1_SLEEP);
+ i2c_smbus_write_byte_data(chip->client, PCA963X_MODE1, reg);
+
+ return 0;
+}
+
+static int pca963x_resume(struct device *dev)
+{
+ struct pca963x *chip = dev_get_drvdata(dev);
+ u8 reg;
+
+ reg = i2c_smbus_read_byte_data(chip->client, PCA963X_MODE1);
+ reg = reg & ~BIT(PCA963X_MODE1_SLEEP);
+ i2c_smbus_write_byte_data(chip->client, PCA963X_MODE1, reg);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(pca963x_pm, pca963x_suspend, pca963x_resume);
+
static const struct of_device_id of_pca963x_match[] = {
{ .compatible = "nxp,pca9632", },
{ .compatible = "nxp,pca9633", },
@@ -430,6 +457,7 @@ static struct i2c_driver pca963x_driver = {
.driver = {
.name = "leds-pca963x",
.of_match_table = of_pca963x_match,
+ .pm = pm_sleep_ptr(&pca963x_pm)
},
.probe = pca963x_probe,
.id_table = pca963x_id,
diff --git a/drivers/leds/leds-spi-byte.c b/drivers/leds/leds-spi-byte.c
index 9d91f21842f2b..96296db5f410d 100644
--- a/drivers/leds/leds-spi-byte.c
+++ b/drivers/leds/leds-spi-byte.c
@@ -83,7 +83,7 @@ static int spi_byte_probe(struct spi_device *spi)
struct device_node *child;
struct device *dev = &spi->dev;
struct spi_byte_led *led;
- const char *name = "leds-spi-byte::";
+ struct led_init_data init_data = {};
const char *state;
int ret;
@@ -97,12 +97,9 @@ static int spi_byte_probe(struct spi_device *spi)
if (!led)
return -ENOMEM;
- of_property_read_string(child, "label", &name);
- strscpy(led->name, name, sizeof(led->name));
led->spi = spi;
mutex_init(&led->mutex);
led->cdef = device_get_match_data(dev);
- led->ldev.name = led->name;
led->ldev.brightness = LED_OFF;
led->ldev.max_brightness = led->cdef->max_value - led->cdef->off_value;
led->ldev.brightness_set_blocking = spi_byte_brightness_set_blocking;
@@ -120,7 +117,11 @@ static int spi_byte_probe(struct spi_device *spi)
spi_byte_brightness_set_blocking(&led->ldev,
led->ldev.brightness);
- ret = devm_led_classdev_register(&spi->dev, &led->ldev);
+ init_data.fwnode = of_fwnode_handle(child);
+ init_data.devicename = "leds-spi-byte";
+ init_data.default_label = ":";
+
+ ret = devm_led_classdev_register_ext(&spi->dev, &led->ldev, &init_data);
if (ret) {
mutex_destroy(&led->mutex);
return ret;
diff --git a/drivers/leds/leds-sunfire.c b/drivers/leds/leds-sunfire.c
index 6fd89efb420aa..a621e5e5c75c5 100644
--- a/drivers/leds/leds-sunfire.c
+++ b/drivers/leds/leds-sunfire.c
@@ -17,7 +17,7 @@
#include <asm/fhc.h>
#include <asm/upa.h>
-MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
MODULE_DESCRIPTION("Sun Fire LED driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h
index 345062ccabdaa..1138e2ab82e55 100644
--- a/drivers/leds/leds.h
+++ b/drivers/leds/leds.h
@@ -30,7 +30,6 @@ ssize_t led_trigger_write(struct file *filp, struct kobject *kobj,
extern struct rw_semaphore leds_list_lock;
extern struct list_head leds_list;
-extern struct list_head trigger_list;
extern const char * const led_colors[LED_COLOR_ID_MAX];
#endif /* __LEDS_H_INCLUDED */
diff --git a/drivers/leds/rgb/Kconfig b/drivers/leds/rgb/Kconfig
index e66bd21b9852e..8fc12d6a2958a 100644
--- a/drivers/leds/rgb/Kconfig
+++ b/drivers/leds/rgb/Kconfig
@@ -27,6 +27,17 @@ config LEDS_KTD202X
To compile this driver as a module, choose M here: the module
will be called leds-ktd202x.
+config LEDS_NCP5623
+ tristate "LED support for NCP5623"
+ depends on I2C
+ depends on OF
+ help
+ This option enables support for ON semiconductor NCP5623
+ Triple Output I2C Controlled RGB LED Driver.
+
+ To compile this driver as a module, choose M here: the module
+ will be called leds-ncp5623.
+
config LEDS_PWM_MULTICOLOR
tristate "PWM driven multi-color LED Support"
depends on PWM
@@ -41,6 +52,7 @@ config LEDS_QCOM_LPG
tristate "LED support for Qualcomm LPG"
depends on OF
depends on PWM
+ depends on QCOM_PBS || !QCOM_PBS
depends on SPMI
help
This option enables support for the Light Pulse Generator found in a
diff --git a/drivers/leds/rgb/Makefile b/drivers/leds/rgb/Makefile
index 243f31e4d70d4..a501fd27f1793 100644
--- a/drivers/leds/rgb/Makefile
+++ b/drivers/leds/rgb/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_LEDS_GROUP_MULTICOLOR) += leds-group-multicolor.o
obj-$(CONFIG_LEDS_KTD202X) += leds-ktd202x.o
+obj-$(CONFIG_LEDS_NCP5623) += leds-ncp5623.o
obj-$(CONFIG_LEDS_PWM_MULTICOLOR) += leds-pwm-multicolor.o
obj-$(CONFIG_LEDS_QCOM_LPG) += leds-qcom-lpg.o
obj-$(CONFIG_LEDS_MT6370_RGB) += leds-mt6370-rgb.o
diff --git a/drivers/leds/rgb/leds-group-multicolor.c b/drivers/leds/rgb/leds-group-multicolor.c
index 39f58be32af51..b6c7679015fdf 100644
--- a/drivers/leds/rgb/leds-group-multicolor.c
+++ b/drivers/leds/rgb/leds-group-multicolor.c
@@ -69,7 +69,7 @@ static int leds_gmc_probe(struct platform_device *pdev)
struct mc_subled *subled;
struct leds_multicolor *priv;
unsigned int max_brightness = 0;
- int i, ret, count = 0;
+ int i, ret, count = 0, common_flags = 0;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -91,6 +91,7 @@ static int leds_gmc_probe(struct platform_device *pdev)
if (!priv->monochromatics)
return -ENOMEM;
+ common_flags |= led_cdev->flags;
priv->monochromatics[count] = led_cdev;
max_brightness = max(max_brightness, led_cdev->max_brightness);
@@ -114,12 +115,15 @@ static int leds_gmc_probe(struct platform_device *pdev)
/* Initialise the multicolor's LED class device */
cdev = &priv->mc_cdev.led_cdev;
- cdev->flags = LED_CORE_SUSPENDRESUME;
cdev->brightness_set_blocking = leds_gmc_set;
cdev->max_brightness = max_brightness;
cdev->color = LED_COLOR_ID_MULTI;
priv->mc_cdev.num_colors = count;
+ /* we only need suspend/resume if a sub-led requests it */
+ if (common_flags & LED_CORE_SUSPENDRESUME)
+ cdev->flags = LED_CORE_SUSPENDRESUME;
+
init_data.fwnode = dev_fwnode(dev);
ret = devm_led_classdev_multicolor_register_ext(dev, &priv->mc_cdev, &init_data);
if (ret)
diff --git a/drivers/leds/rgb/leds-ncp5623.c b/drivers/leds/rgb/leds-ncp5623.c
new file mode 100644
index 0000000000000..2be4ff9185169
--- /dev/null
+++ b/drivers/leds/rgb/leds-ncp5623.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * NCP5623 Multi-LED Driver
+ *
+ * Author: Abdel Alkuor <alkuor@gmail.com>
+ * Datasheet: https://www.onsemi.com/pdf/datasheet/ncp5623-d.pdf
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+#include <linux/led-class-multicolor.h>
+
+#define NCP5623_FUNCTION_OFFSET 0x5
+#define NCP5623_REG(x) ((x) << NCP5623_FUNCTION_OFFSET)
+
+#define NCP5623_SHUTDOWN_REG NCP5623_REG(0x0)
+#define NCP5623_ILED_REG NCP5623_REG(0x1)
+#define NCP5623_PWM_REG(index) NCP5623_REG(0x2 + (index))
+#define NCP5623_UPWARD_STEP_REG NCP5623_REG(0x5)
+#define NCP5623_DOWNWARD_STEP_REG NCP5623_REG(0x6)
+#define NCP5623_DIMMING_TIME_REG NCP5623_REG(0x7)
+
+#define NCP5623_MAX_BRIGHTNESS 0x1f
+#define NCP5623_MAX_DIM_TIME_MS 240
+#define NCP5623_DIM_STEP_MS 8
+
+struct ncp5623 {
+ struct i2c_client *client;
+ struct led_classdev_mc mc_dev;
+ struct mutex lock;
+
+ int current_brightness;
+ unsigned long delay;
+};
+
+static int ncp5623_write(struct i2c_client *client, u8 reg, u8 data)
+{
+ return i2c_smbus_write_byte_data(client, reg | data, 0);
+}
+
+static int ncp5623_brightness_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct led_classdev_mc *mc_cdev = lcdev_to_mccdev(cdev);
+ struct ncp5623 *ncp = container_of(mc_cdev, struct ncp5623, mc_dev);
+ int ret;
+
+ guard(mutex)(&ncp->lock);
+
+ if (ncp->delay && time_is_after_jiffies(ncp->delay))
+ return -EBUSY;
+
+ ncp->delay = 0;
+
+ for (int i = 0; i < mc_cdev->num_colors; i++) {
+ ret = ncp5623_write(ncp->client,
+ NCP5623_PWM_REG(mc_cdev->subled_info[i].channel),
+ min(mc_cdev->subled_info[i].intensity,
+ NCP5623_MAX_BRIGHTNESS));
+ if (ret)
+ return ret;
+ }
+
+ ret = ncp5623_write(ncp->client, NCP5623_DIMMING_TIME_REG, 0);
+ if (ret)
+ return ret;
+
+ ret = ncp5623_write(ncp->client, NCP5623_ILED_REG, brightness);
+ if (ret)
+ return ret;
+
+ ncp->current_brightness = brightness;
+
+ return 0;
+}
+
+static int ncp5623_pattern_set(struct led_classdev *cdev,
+ struct led_pattern *pattern,
+ u32 len, int repeat)
+{
+ struct led_classdev_mc *mc_cdev = lcdev_to_mccdev(cdev);
+ struct ncp5623 *ncp = container_of(mc_cdev, struct ncp5623, mc_dev);
+ int brightness_diff;
+ u8 reg;
+ int ret;
+
+ guard(mutex)(&ncp->lock);
+
+ if (ncp->delay && time_is_after_jiffies(ncp->delay))
+ return -EBUSY;
+
+ ncp->delay = 0;
+
+ if (pattern[0].delta_t > NCP5623_MAX_DIM_TIME_MS ||
+ (pattern[0].delta_t % NCP5623_DIM_STEP_MS) != 0)
+ return -EINVAL;
+
+ brightness_diff = pattern[0].brightness - ncp->current_brightness;
+
+ if (brightness_diff == 0)
+ return 0;
+
+ if (pattern[0].delta_t) {
+ if (brightness_diff > 0)
+ reg = NCP5623_UPWARD_STEP_REG;
+ else
+ reg = NCP5623_DOWNWARD_STEP_REG;
+ } else {
+ reg = NCP5623_ILED_REG;
+ }
+
+ ret = ncp5623_write(ncp->client, reg,
+ min(pattern[0].brightness, NCP5623_MAX_BRIGHTNESS));
+ if (ret)
+ return ret;
+
+ ret = ncp5623_write(ncp->client,
+ NCP5623_DIMMING_TIME_REG,
+ pattern[0].delta_t / NCP5623_DIM_STEP_MS);
+ if (ret)
+ return ret;
+
+ /*
+ * During testing, when the brightness difference is 1, for some
+ * unknown reason, the time factor it takes to change to the new
+ * value is the longest time possible. Otherwise, the time factor
+ * is simply the brightness difference.
+ *
+ * For example:
+ * current_brightness = 20 and new_brightness = 21 then the time it
+ * takes to set the new brightness increments to the maximum possible
+ * brightness from 20 then from 0 to 21.
+ * time_factor = max_brightness - 20 + 21
+ */
+ if (abs(brightness_diff) == 1)
+ ncp->delay = NCP5623_MAX_BRIGHTNESS + brightness_diff;
+ else
+ ncp->delay = abs(brightness_diff);
+
+ ncp->delay = msecs_to_jiffies(ncp->delay * pattern[0].delta_t) + jiffies;
+
+ ncp->current_brightness = pattern[0].brightness;
+
+ return 0;
+}
+
+static int ncp5623_pattern_clear(struct led_classdev *led_cdev)
+{
+ return 0;
+}
+
+static int ncp5623_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct fwnode_handle *mc_node, *led_node;
+ struct led_init_data init_data = { };
+ int num_subleds = 0;
+ struct ncp5623 *ncp;
+ struct mc_subled *subled_info;
+ u32 color_index;
+ u32 reg;
+ int ret;
+
+ ncp = devm_kzalloc(dev, sizeof(*ncp), GFP_KERNEL);
+ if (!ncp)
+ return -ENOMEM;
+
+ ncp->client = client;
+
+ mc_node = device_get_named_child_node(dev, "multi-led");
+ if (!mc_node)
+ return -EINVAL;
+
+ fwnode_for_each_child_node(mc_node, led_node)
+ num_subleds++;
+
+ subled_info = devm_kcalloc(dev, num_subleds, sizeof(*subled_info), GFP_KERNEL);
+ if (!subled_info) {
+ ret = -ENOMEM;
+ goto release_mc_node;
+ }
+
+ fwnode_for_each_available_child_node(mc_node, led_node) {
+ ret = fwnode_property_read_u32(led_node, "color", &color_index);
+ if (ret) {
+ fwnode_handle_put(led_node);
+ goto release_mc_node;
+ }
+
+ ret = fwnode_property_read_u32(led_node, "reg", &reg);
+ if (ret) {
+ fwnode_handle_put(led_node);
+ goto release_mc_node;
+ }
+
+ subled_info[ncp->mc_dev.num_colors].channel = reg;
+ subled_info[ncp->mc_dev.num_colors++].color_index = color_index;
+ }
+
+ init_data.fwnode = mc_node;
+
+ ncp->mc_dev.led_cdev.max_brightness = NCP5623_MAX_BRIGHTNESS;
+ ncp->mc_dev.subled_info = subled_info;
+ ncp->mc_dev.led_cdev.brightness_set_blocking = ncp5623_brightness_set;
+ ncp->mc_dev.led_cdev.pattern_set = ncp5623_pattern_set;
+ ncp->mc_dev.led_cdev.pattern_clear = ncp5623_pattern_clear;
+ ncp->mc_dev.led_cdev.default_trigger = "pattern";
+
+ mutex_init(&ncp->lock);
+ i2c_set_clientdata(client, ncp);
+
+ ret = led_classdev_multicolor_register_ext(dev, &ncp->mc_dev, &init_data);
+ if (ret)
+ goto destroy_lock;
+
+ return 0;
+
+destroy_lock:
+ mutex_destroy(&ncp->lock);
+
+release_mc_node:
+ fwnode_handle_put(mc_node);
+
+ return ret;
+}
+
+static void ncp5623_remove(struct i2c_client *client)
+{
+ struct ncp5623 *ncp = i2c_get_clientdata(client);
+
+ mutex_lock(&ncp->lock);
+ ncp->delay = 0;
+ mutex_unlock(&ncp->lock);
+
+ ncp5623_write(client, NCP5623_DIMMING_TIME_REG, 0);
+ led_classdev_multicolor_unregister(&ncp->mc_dev);
+ mutex_destroy(&ncp->lock);
+}
+
+static void ncp5623_shutdown(struct i2c_client *client)
+{
+ struct ncp5623 *ncp = i2c_get_clientdata(client);
+
+ if (!(ncp->mc_dev.led_cdev.flags & LED_RETAIN_AT_SHUTDOWN))
+ ncp5623_write(client, NCP5623_SHUTDOWN_REG, 0);
+
+ mutex_destroy(&ncp->lock);
+}
+
+static const struct of_device_id ncp5623_id[] = {
+ { .compatible = "onnn,ncp5623" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ncp5623_id);
+
+static struct i2c_driver ncp5623_i2c_driver = {
+ .driver = {
+ .name = "ncp5623",
+ .of_match_table = ncp5623_id,
+ },
+ .probe = ncp5623_probe,
+ .remove = ncp5623_remove,
+ .shutdown = ncp5623_shutdown,
+};
+
+module_i2c_driver(ncp5623_i2c_driver);
+
+MODULE_AUTHOR("Abdel Alkuor <alkuor@gmail.com>");
+MODULE_DESCRIPTION("NCP5623 Multi-LED driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c
index 0a7acf59a4201..6bdc5b923f981 100644
--- a/drivers/leds/rgb/leds-qcom-lpg.c
+++ b/drivers/leds/rgb/leds-qcom-lpg.c
@@ -8,11 +8,13 @@
#include <linux/bitfield.h>
#include <linux/led-class-multicolor.h>
#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/soc/qcom/qcom-pbs.h>
#define LPG_SUBTYPE_REG 0x05
#define LPG_SUBTYPE_LPG 0x2
@@ -39,6 +41,10 @@
#define PWM_SEC_ACCESS_REG 0xd0
#define PWM_DTEST_REG(x) (0xe2 + (x) - 1)
+#define SDAM_REG_PBS_SEQ_EN 0x42
+#define SDAM_PBS_TRIG_SET 0xe5
+#define SDAM_PBS_TRIG_CLR 0xe6
+
#define TRI_LED_SRC_SEL 0x45
#define TRI_LED_EN_CTL 0x46
#define TRI_LED_ATC_CTL 0x47
@@ -48,9 +54,31 @@
#define LPG_RESOLUTION_9BIT BIT(9)
#define LPG_RESOLUTION_15BIT BIT(15)
+#define PPG_MAX_LED_BRIGHTNESS 255
+
#define LPG_MAX_M 7
#define LPG_MAX_PREDIV 6
+#define DEFAULT_TICK_DURATION_US 7800
+#define RAMP_STEP_DURATION(x) (((x) * 1000 / DEFAULT_TICK_DURATION_US) & 0xff)
+
+#define SDAM_MAX_DEVICES 2
+/* LPG common config settings for PPG */
+#define SDAM_START_BASE 0x40
+#define SDAM_REG_RAMP_STEP_DURATION 0x47
+
+#define SDAM_LUT_SDAM_LUT_PATTERN_OFFSET 0x45
+#define SDAM_LPG_SDAM_LUT_PATTERN_OFFSET 0x80
+
+/* LPG per channel config settings for PPG */
+#define SDAM_LUT_EN_OFFSET 0x0
+#define SDAM_PATTERN_CONFIG_OFFSET 0x1
+#define SDAM_END_INDEX_OFFSET 0x3
+#define SDAM_START_INDEX_OFFSET 0x4
+#define SDAM_PBS_SCRATCH_LUT_COUNTER_OFFSET 0x6
+#define SDAM_PAUSE_HI_MULTIPLIER_OFFSET 0x8
+#define SDAM_PAUSE_LO_MULTIPLIER_OFFSET 0x9
+
struct lpg_channel;
struct lpg_data;
@@ -64,6 +92,10 @@ struct lpg_data;
* @lut_base: base address of the LUT block (optional)
* @lut_size: number of entries in the LUT block
* @lut_bitmap: allocation bitmap for LUT entries
+ * @pbs_dev: PBS device
+ * @lpg_chan_sdam: LPG SDAM peripheral device
+ * @lut_sdam: LUT SDAM peripheral device
+ * @pbs_en_bitmap: bitmap for tracking PBS triggers
* @triled_base: base address of the TRILED block (optional)
* @triled_src: power-source for the TRILED
* @triled_has_atc_ctl: true if there is TRI_LED_ATC_CTL register
@@ -85,6 +117,11 @@ struct lpg {
u32 lut_size;
unsigned long *lut_bitmap;
+ struct pbs_dev *pbs_dev;
+ struct nvmem_device *lpg_chan_sdam;
+ struct nvmem_device *lut_sdam;
+ unsigned long pbs_en_bitmap;
+
u32 triled_base;
u32 triled_src;
bool triled_has_atc_ctl;
@@ -101,6 +138,7 @@ struct lpg {
* @triled_mask: mask in TRILED to enable this channel
* @lut_mask: mask in LUT to start pattern generator for this channel
* @subtype: PMIC hardware block subtype
+ * @sdam_offset: channel offset in LPG SDAM
* @in_use: channel is exposed to LED framework
* @color: color of the LED attached to this channel
* @dtest_line: DTEST line for output, or 0 if disabled
@@ -129,6 +167,7 @@ struct lpg_channel {
unsigned int triled_mask;
unsigned int lut_mask;
unsigned int subtype;
+ u32 sdam_offset;
bool in_use;
@@ -178,10 +217,12 @@ struct lpg_led {
/**
* struct lpg_channel_data - per channel initialization data
+ * @sdam_offset: Channel offset in LPG SDAM
* @base: base address for PWM channel registers
* @triled_mask: bitmask for controlling this channel in TRILED
*/
struct lpg_channel_data {
+ unsigned int sdam_offset;
unsigned int base;
u8 triled_mask;
};
@@ -206,6 +247,65 @@ struct lpg_data {
const struct lpg_channel_data *channels;
};
+#define PBS_SW_TRIG_BIT BIT(0)
+
+static int lpg_clear_pbs_trigger(struct lpg *lpg, unsigned int lut_mask)
+{
+ u8 val = 0;
+ int rc;
+
+ lpg->pbs_en_bitmap &= (~lut_mask);
+ if (!lpg->pbs_en_bitmap) {
+ rc = nvmem_device_write(lpg->lpg_chan_sdam, SDAM_REG_PBS_SEQ_EN, 1, &val);
+ if (rc < 0)
+ return rc;
+
+ if (lpg->lut_sdam) {
+ val = PBS_SW_TRIG_BIT;
+ rc = nvmem_device_write(lpg->lpg_chan_sdam, SDAM_PBS_TRIG_CLR, 1, &val);
+ if (rc < 0)
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int lpg_set_pbs_trigger(struct lpg *lpg, unsigned int lut_mask)
+{
+ u8 val = PBS_SW_TRIG_BIT;
+ int rc;
+
+ if (!lpg->pbs_en_bitmap) {
+ rc = nvmem_device_write(lpg->lpg_chan_sdam, SDAM_REG_PBS_SEQ_EN, 1, &val);
+ if (rc < 0)
+ return rc;
+
+ if (lpg->lut_sdam) {
+ rc = nvmem_device_write(lpg->lpg_chan_sdam, SDAM_PBS_TRIG_SET, 1, &val);
+ if (rc < 0)
+ return rc;
+ } else {
+ rc = qcom_pbs_trigger_event(lpg->pbs_dev, val);
+ if (rc < 0)
+ return rc;
+ }
+ }
+ lpg->pbs_en_bitmap |= lut_mask;
+
+ return 0;
+}
+
+static int lpg_sdam_configure_triggers(struct lpg_channel *chan, u8 set_trig)
+{
+ u32 addr = SDAM_LUT_EN_OFFSET + chan->sdam_offset;
+
+ if (!chan->lpg->lpg_chan_sdam)
+ return 0;
+
+ return nvmem_device_write(chan->lpg->lpg_chan_sdam, addr, 1, &set_trig);
+}
+
static int triled_set(struct lpg *lpg, unsigned int mask, unsigned int enable)
{
/* Skip if we don't have a triled block */
@@ -216,6 +316,47 @@ static int triled_set(struct lpg *lpg, unsigned int mask, unsigned int enable)
mask, enable);
}
+static int lpg_lut_store_sdam(struct lpg *lpg, struct led_pattern *pattern,
+ size_t len, unsigned int *lo_idx, unsigned int *hi_idx)
+{
+ unsigned int idx;
+ u8 brightness;
+ int i, rc;
+ u16 addr;
+
+ if (len > lpg->lut_size) {
+ dev_err(lpg->dev, "Pattern length (%zu) exceeds maximum pattern length (%d)\n",
+ len, lpg->lut_size);
+ return -EINVAL;
+ }
+
+ idx = bitmap_find_next_zero_area(lpg->lut_bitmap, lpg->lut_size, 0, len, 0);
+ if (idx >= lpg->lut_size)
+ return -ENOSPC;
+
+ for (i = 0; i < len; i++) {
+ brightness = pattern[i].brightness;
+
+ if (lpg->lut_sdam) {
+ addr = SDAM_LUT_SDAM_LUT_PATTERN_OFFSET + i + idx;
+ rc = nvmem_device_write(lpg->lut_sdam, addr, 1, &brightness);
+ } else {
+ addr = SDAM_LPG_SDAM_LUT_PATTERN_OFFSET + i + idx;
+ rc = nvmem_device_write(lpg->lpg_chan_sdam, addr, 1, &brightness);
+ }
+
+ if (rc < 0)
+ return rc;
+ }
+
+ bitmap_set(lpg->lut_bitmap, idx, len);
+
+ *lo_idx = idx;
+ *hi_idx = idx + len - 1;
+
+ return 0;
+}
+
static int lpg_lut_store(struct lpg *lpg, struct led_pattern *pattern,
size_t len, unsigned int *lo_idx, unsigned int *hi_idx)
{
@@ -256,6 +397,9 @@ static void lpg_lut_free(struct lpg *lpg, unsigned int lo_idx, unsigned int hi_i
static int lpg_lut_sync(struct lpg *lpg, unsigned int mask)
{
+ if (!lpg->lut_base)
+ return 0;
+
return regmap_write(lpg->map, lpg->lut_base + RAMP_CONTROL_REG, mask);
}
@@ -462,6 +606,49 @@ static void lpg_apply_pwm_value(struct lpg_channel *chan)
#define LPG_PATTERN_CONFIG_PAUSE_HI BIT(1)
#define LPG_PATTERN_CONFIG_PAUSE_LO BIT(0)
+static void lpg_sdam_apply_lut_control(struct lpg_channel *chan)
+{
+ struct nvmem_device *lpg_chan_sdam = chan->lpg->lpg_chan_sdam;
+ unsigned int lo_idx = chan->pattern_lo_idx;
+ unsigned int hi_idx = chan->pattern_hi_idx;
+ u8 val = 0, conf = 0, lut_offset = 0;
+ unsigned int hi_pause, lo_pause;
+ struct lpg *lpg = chan->lpg;
+
+ if (!chan->ramp_enabled || chan->pattern_lo_idx == chan->pattern_hi_idx)
+ return;
+
+ hi_pause = DIV_ROUND_UP(chan->ramp_hi_pause_ms, chan->ramp_tick_ms);
+ lo_pause = DIV_ROUND_UP(chan->ramp_lo_pause_ms, chan->ramp_tick_ms);
+
+ if (!chan->ramp_oneshot)
+ conf |= LPG_PATTERN_CONFIG_REPEAT;
+ if (chan->ramp_hi_pause_ms && lpg->lut_sdam)
+ conf |= LPG_PATTERN_CONFIG_PAUSE_HI;
+ if (chan->ramp_lo_pause_ms && lpg->lut_sdam)
+ conf |= LPG_PATTERN_CONFIG_PAUSE_LO;
+
+ if (lpg->lut_sdam) {
+ lut_offset = SDAM_LUT_SDAM_LUT_PATTERN_OFFSET - SDAM_START_BASE;
+ hi_idx += lut_offset;
+ lo_idx += lut_offset;
+ }
+
+ nvmem_device_write(lpg_chan_sdam, SDAM_PBS_SCRATCH_LUT_COUNTER_OFFSET + chan->sdam_offset, 1, &val);
+ nvmem_device_write(lpg_chan_sdam, SDAM_PATTERN_CONFIG_OFFSET + chan->sdam_offset, 1, &conf);
+ nvmem_device_write(lpg_chan_sdam, SDAM_END_INDEX_OFFSET + chan->sdam_offset, 1, &hi_idx);
+ nvmem_device_write(lpg_chan_sdam, SDAM_START_INDEX_OFFSET + chan->sdam_offset, 1, &lo_idx);
+
+ val = RAMP_STEP_DURATION(chan->ramp_tick_ms);
+ nvmem_device_write(lpg_chan_sdam, SDAM_REG_RAMP_STEP_DURATION, 1, &val);
+
+ if (lpg->lut_sdam) {
+ nvmem_device_write(lpg_chan_sdam, SDAM_PAUSE_HI_MULTIPLIER_OFFSET + chan->sdam_offset, 1, &hi_pause);
+ nvmem_device_write(lpg_chan_sdam, SDAM_PAUSE_LO_MULTIPLIER_OFFSET + chan->sdam_offset, 1, &lo_pause);
+ }
+
+}
+
static void lpg_apply_lut_control(struct lpg_channel *chan)
{
struct lpg *lpg = chan->lpg;
@@ -596,7 +783,10 @@ static void lpg_apply(struct lpg_channel *chan)
lpg_apply_pwm_value(chan);
lpg_apply_control(chan);
lpg_apply_sync(chan);
- lpg_apply_lut_control(chan);
+ if (chan->lpg->lpg_chan_sdam)
+ lpg_sdam_apply_lut_control(chan);
+ else
+ lpg_apply_lut_control(chan);
lpg_enable_glitch(chan);
}
@@ -621,6 +811,7 @@ static void lpg_brightness_set(struct lpg_led *led, struct led_classdev *cdev,
chan->ramp_enabled = false;
} else if (chan->pattern_lo_idx != chan->pattern_hi_idx) {
lpg_calc_freq(chan, NSEC_PER_MSEC);
+ lpg_sdam_configure_triggers(chan, 1);
chan->enabled = true;
chan->ramp_enabled = true;
@@ -648,8 +839,10 @@ static void lpg_brightness_set(struct lpg_led *led, struct led_classdev *cdev,
triled_set(lpg, triled_mask, triled_enabled);
/* Trigger start of ramp generator(s) */
- if (lut_mask)
+ if (lut_mask) {
lpg_lut_sync(lpg, lut_mask);
+ lpg_set_pbs_trigger(lpg, lut_mask);
+ }
}
static int lpg_brightness_single_set(struct led_classdev *cdev,
@@ -766,9 +959,9 @@ static int lpg_pattern_set(struct lpg_led *led, struct led_pattern *led_pattern,
struct led_pattern *pattern;
unsigned int brightness_a;
unsigned int brightness_b;
+ unsigned int hi_pause = 0;
+ unsigned int lo_pause = 0;
unsigned int actual_len;
- unsigned int hi_pause;
- unsigned int lo_pause;
unsigned int delta_t;
unsigned int lo_idx;
unsigned int hi_idx;
@@ -835,18 +1028,24 @@ static int lpg_pattern_set(struct lpg_led *led, struct led_pattern *led_pattern,
* If the specified pattern is a palindrome the ping pong mode is
* enabled. In this scenario the delta_t of the middle entry (i.e. the
* last in the programmed pattern) determines the "high pause".
+ *
+ * SDAM-based devices do not support "ping pong", and only supports
+ * "low pause" and "high pause" with a dedicated SDAM LUT.
*/
/* Detect palindromes and use "ping pong" to reduce LUT usage */
- for (i = 0; i < len / 2; i++) {
- brightness_a = pattern[i].brightness;
- brightness_b = pattern[len - i - 1].brightness;
-
- if (brightness_a != brightness_b) {
- ping_pong = false;
- break;
+ if (lpg->lut_base) {
+ for (i = 0; i < len / 2; i++) {
+ brightness_a = pattern[i].brightness;
+ brightness_b = pattern[len - i - 1].brightness;
+
+ if (brightness_a != brightness_b) {
+ ping_pong = false;
+ break;
+ }
}
- }
+ } else
+ ping_pong = false;
/* The pattern length to be written to the LUT */
if (ping_pong)
@@ -874,12 +1073,27 @@ static int lpg_pattern_set(struct lpg_led *led, struct led_pattern *led_pattern,
if (delta_t >= BIT(9))
goto out_free_pattern;
- /* Find "low pause" and "high pause" in the pattern */
- lo_pause = pattern[0].delta_t;
- hi_pause = pattern[actual_len - 1].delta_t;
+ /*
+ * Find "low pause" and "high pause" in the pattern in the LUT case.
+ * SDAM-based devices without dedicated LUT SDAM require equal
+ * duration of all steps.
+ */
+ if (lpg->lut_base || lpg->lut_sdam) {
+ lo_pause = pattern[0].delta_t;
+ hi_pause = pattern[actual_len - 1].delta_t;
+ } else {
+ if (delta_t != pattern[0].delta_t || delta_t != pattern[actual_len - 1].delta_t)
+ goto out_free_pattern;
+ }
+
mutex_lock(&lpg->lock);
- ret = lpg_lut_store(lpg, pattern, actual_len, &lo_idx, &hi_idx);
+
+ if (lpg->lut_base)
+ ret = lpg_lut_store(lpg, pattern, actual_len, &lo_idx, &hi_idx);
+ else
+ ret = lpg_lut_store_sdam(lpg, pattern, actual_len, &lo_idx, &hi_idx);
+
if (ret < 0)
goto out_unlock;
@@ -927,7 +1141,12 @@ static int lpg_pattern_mc_set(struct led_classdev *cdev,
{
struct led_classdev_mc *mc = lcdev_to_mccdev(cdev);
struct lpg_led *led = container_of(mc, struct lpg_led, mcdev);
- int ret;
+ unsigned int triled_mask = 0;
+ int ret, i;
+
+ for (i = 0; i < led->num_channels; i++)
+ triled_mask |= led->channels[i]->triled_mask;
+ triled_set(led->lpg, triled_mask, 0);
ret = lpg_pattern_set(led, pattern, len, repeat);
if (ret < 0)
@@ -952,6 +1171,8 @@ static int lpg_pattern_clear(struct lpg_led *led)
for (i = 0; i < led->num_channels; i++) {
chan = led->channels[i];
+ lpg_sdam_configure_triggers(chan, 0);
+ lpg_clear_pbs_trigger(chan->lpg, chan->lut_mask);
chan->pattern_lo_idx = 0;
chan->pattern_hi_idx = 0;
}
@@ -1191,8 +1412,8 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np)
cdev->brightness_set_blocking = lpg_brightness_mc_set;
cdev->blink_set = lpg_blink_mc_set;
- /* Register pattern accessors only if we have a LUT block */
- if (lpg->lut_base) {
+ /* Register pattern accessors if we have a LUT block or when using PPG */
+ if (lpg->lut_base || lpg->lpg_chan_sdam) {
cdev->pattern_set = lpg_pattern_mc_set;
cdev->pattern_clear = lpg_pattern_mc_clear;
}
@@ -1205,15 +1426,19 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np)
cdev->brightness_set_blocking = lpg_brightness_single_set;
cdev->blink_set = lpg_blink_single_set;
- /* Register pattern accessors only if we have a LUT block */
- if (lpg->lut_base) {
+ /* Register pattern accessors if we have a LUT block or when using PPG */
+ if (lpg->lut_base || lpg->lpg_chan_sdam) {
cdev->pattern_set = lpg_pattern_single_set;
cdev->pattern_clear = lpg_pattern_single_clear;
}
}
cdev->default_trigger = of_get_property(np, "linux,default-trigger", NULL);
- cdev->max_brightness = LPG_RESOLUTION_9BIT - 1;
+
+ if (lpg->lpg_chan_sdam)
+ cdev->max_brightness = PPG_MAX_LED_BRIGHTNESS;
+ else
+ cdev->max_brightness = LPG_RESOLUTION_9BIT - 1;
if (!of_property_read_string(np, "default-state", &state) &&
!strcmp(state, "on"))
@@ -1254,6 +1479,7 @@ static int lpg_init_channels(struct lpg *lpg)
chan->base = data->channels[i].base;
chan->triled_mask = data->channels[i].triled_mask;
chan->lut_mask = BIT(i);
+ chan->sdam_offset = data->channels[i].sdam_offset;
regmap_read(lpg->map, chan->base + LPG_SUBTYPE_REG, &chan->subtype);
}
@@ -1299,11 +1525,12 @@ static int lpg_init_lut(struct lpg *lpg)
{
const struct lpg_data *data = lpg->data;
- if (!data->lut_base)
+ if (!data->lut_size)
return 0;
- lpg->lut_base = data->lut_base;
lpg->lut_size = data->lut_size;
+ if (data->lut_base)
+ lpg->lut_base = data->lut_base;
lpg->lut_bitmap = devm_bitmap_zalloc(lpg->dev, lpg->lut_size, GFP_KERNEL);
if (!lpg->lut_bitmap)
@@ -1312,6 +1539,59 @@ static int lpg_init_lut(struct lpg *lpg)
return 0;
}
+static int lpg_init_sdam(struct lpg *lpg)
+{
+ int i, sdam_count, rc;
+ u8 val = 0;
+
+ sdam_count = of_property_count_strings(lpg->dev->of_node, "nvmem-names");
+ if (sdam_count <= 0)
+ return 0;
+ if (sdam_count > SDAM_MAX_DEVICES)
+ return -EINVAL;
+
+ /* Get the 1st SDAM device for LPG/LUT config */
+ lpg->lpg_chan_sdam = devm_nvmem_device_get(lpg->dev, "lpg_chan_sdam");
+ if (IS_ERR(lpg->lpg_chan_sdam))
+ return dev_err_probe(lpg->dev, PTR_ERR(lpg->lpg_chan_sdam),
+ "Failed to get LPG chan SDAM device\n");
+
+ if (sdam_count == 1) {
+ /* Get PBS device node if single SDAM device */
+ lpg->pbs_dev = get_pbs_client_device(lpg->dev);
+ if (IS_ERR(lpg->pbs_dev))
+ return dev_err_probe(lpg->dev, PTR_ERR(lpg->pbs_dev),
+ "Failed to get PBS client device\n");
+ } else if (sdam_count == 2) {
+ /* Get the 2nd SDAM device for LUT pattern */
+ lpg->lut_sdam = devm_nvmem_device_get(lpg->dev, "lut_sdam");
+ if (IS_ERR(lpg->lut_sdam))
+ return dev_err_probe(lpg->dev, PTR_ERR(lpg->lut_sdam),
+ "Failed to get LPG LUT SDAM device\n");
+ }
+
+ for (i = 0; i < lpg->num_channels; i++) {
+ struct lpg_channel *chan = &lpg->channels[i];
+
+ if (chan->sdam_offset) {
+ rc = nvmem_device_write(lpg->lpg_chan_sdam,
+ SDAM_PBS_SCRATCH_LUT_COUNTER_OFFSET + chan->sdam_offset, 1, &val);
+ if (rc < 0)
+ return rc;
+
+ rc = lpg_sdam_configure_triggers(chan, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = lpg_clear_pbs_trigger(chan->lpg, chan->lut_mask);
+ if (rc < 0)
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
static int lpg_probe(struct platform_device *pdev)
{
struct device_node *np;
@@ -1346,6 +1626,10 @@ static int lpg_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
+ ret = lpg_init_sdam(lpg);
+ if (ret < 0)
+ return ret;
+
ret = lpg_init_lut(lpg);
if (ret < 0)
return ret;
@@ -1364,6 +1648,23 @@ static int lpg_probe(struct platform_device *pdev)
return lpg_add_pwm(lpg);
}
+static const struct lpg_data pm660l_lpg_data = {
+ .lut_base = 0xb000,
+ .lut_size = 49,
+
+ .triled_base = 0xd000,
+ .triled_has_atc_ctl = true,
+ .triled_has_src_sel = true,
+
+ .num_channels = 4,
+ .channels = (const struct lpg_channel_data[]) {
+ { .base = 0xb100, .triled_mask = BIT(5) },
+ { .base = 0xb200, .triled_mask = BIT(6) },
+ { .base = 0xb300, .triled_mask = BIT(7) },
+ { .base = 0xb400 },
+ },
+};
+
static const struct lpg_data pm8916_pwm_data = {
.num_channels = 1,
.channels = (const struct lpg_channel_data[]) {
@@ -1411,11 +1712,13 @@ static const struct lpg_data pm8994_lpg_data = {
static const struct lpg_data pmi632_lpg_data = {
.triled_base = 0xd000,
+ .lut_size = 64,
+
.num_channels = 5,
.channels = (const struct lpg_channel_data[]) {
- { .base = 0xb300, .triled_mask = BIT(7) },
- { .base = 0xb400, .triled_mask = BIT(6) },
- { .base = 0xb500, .triled_mask = BIT(5) },
+ { .base = 0xb300, .triled_mask = BIT(7), .sdam_offset = 0x48 },
+ { .base = 0xb400, .triled_mask = BIT(6), .sdam_offset = 0x56 },
+ { .base = 0xb500, .triled_mask = BIT(5), .sdam_offset = 0x64 },
{ .base = 0xb600 },
{ .base = 0xb700 },
},
@@ -1488,11 +1791,13 @@ static const struct lpg_data pm8150l_lpg_data = {
static const struct lpg_data pm8350c_pwm_data = {
.triled_base = 0xef00,
+ .lut_size = 122,
+
.num_channels = 4,
.channels = (const struct lpg_channel_data[]) {
- { .base = 0xe800, .triled_mask = BIT(7) },
- { .base = 0xe900, .triled_mask = BIT(6) },
- { .base = 0xea00, .triled_mask = BIT(5) },
+ { .base = 0xe800, .triled_mask = BIT(7), .sdam_offset = 0x48 },
+ { .base = 0xe900, .triled_mask = BIT(6), .sdam_offset = 0x56 },
+ { .base = 0xea00, .triled_mask = BIT(5), .sdam_offset = 0x64 },
{ .base = 0xeb00 },
},
};
@@ -1506,6 +1811,7 @@ static const struct lpg_data pmk8550_pwm_data = {
};
static const struct of_device_id lpg_of_table[] = {
+ { .compatible = "qcom,pm660l-lpg", .data = &pm660l_lpg_data },
{ .compatible = "qcom,pm8150b-lpg", .data = &pm8150b_lpg_data },
{ .compatible = "qcom,pm8150l-lpg", .data = &pm8150l_lpg_data },
{ .compatible = "qcom,pm8350c-pwm", .data = &pm8350c_pwm_data },
diff --git a/drivers/leds/trigger/ledtrig-audio.c b/drivers/leds/trigger/ledtrig-audio.c
index c6b437e6369b8..2ecd4b760fc36 100644
--- a/drivers/leds/trigger/ledtrig-audio.c
+++ b/drivers/leds/trigger/ledtrig-audio.c
@@ -63,3 +63,5 @@ module_exit(ledtrig_audio_exit);
MODULE_DESCRIPTION("LED trigger for audio mute control");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("ledtrig:audio-mute");
+MODULE_ALIAS("ledtrig:audio-micmute");
diff --git a/drivers/leds/trigger/ledtrig-default-on.c b/drivers/leds/trigger/ledtrig-default-on.c
index 8207f85eceb16..8678e64a5c337 100644
--- a/drivers/leds/trigger/ledtrig-default-on.c
+++ b/drivers/leds/trigger/ledtrig-default-on.c
@@ -28,3 +28,4 @@ module_led_trigger(defon_led_trigger);
MODULE_AUTHOR("Nick Forbes <nick.forbes@incepta.com>");
MODULE_DESCRIPTION("Default-ON LED trigger");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("ledtrig:default-on");
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
index 8e5475819590e..ea00f6c708826 100644
--- a/drivers/leds/trigger/ledtrig-netdev.c
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -18,10 +18,12 @@
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/leds.h>
+#include <linux/linkmode.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/mutex.h>
+#include <linux/phy.h>
#include <linux/rtnetlink.h>
#include <linux/timer.h>
#include "../leds.h"
@@ -65,12 +67,15 @@ struct led_netdev_data {
unsigned long mode;
int link_speed;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_link_modes);
u8 duplex;
bool carrier_link_up;
bool hw_control;
};
+static const struct attribute_group netdev_trig_link_speed_attrs_group;
+
static void set_baseline_state(struct led_netdev_data *trigger_data)
{
int current_brightness;
@@ -218,13 +223,20 @@ static void get_device_state(struct led_netdev_data *trigger_data)
struct ethtool_link_ksettings cmd;
trigger_data->carrier_link_up = netif_carrier_ok(trigger_data->net_dev);
- if (!trigger_data->carrier_link_up)
+
+ if (__ethtool_get_link_ksettings(trigger_data->net_dev, &cmd))
return;
- if (!__ethtool_get_link_ksettings(trigger_data->net_dev, &cmd)) {
+ if (trigger_data->carrier_link_up) {
trigger_data->link_speed = cmd.base.speed;
trigger_data->duplex = cmd.base.duplex;
}
+
+ /*
+ * Have a local copy of the link speed supported to avoid rtnl lock every time
+ * modes are refreshed on any change event
+ */
+ linkmode_copy(trigger_data->supported_link_modes, cmd.link_modes.supported);
}
static ssize_t device_name_show(struct device *dev,
@@ -277,7 +289,10 @@ static int set_device_name(struct led_netdev_data *trigger_data,
trigger_data->last_activity = 0;
- set_baseline_state(trigger_data);
+ /* Skip if we're called from netdev_trig_activate() and hw_control is true */
+ if (!trigger_data->hw_control || led_get_trigger_data(trigger_data->led_cdev))
+ set_baseline_state(trigger_data);
+
mutex_unlock(&trigger_data->lock);
rtnl_unlock();
@@ -295,6 +310,10 @@ static ssize_t device_name_store(struct device *dev,
if (ret < 0)
return ret;
+
+ /* Refresh link_speed visibility */
+ sysfs_update_group(&dev->kobj, &netdev_trig_link_speed_attrs_group);
+
return size;
}
@@ -458,15 +477,63 @@ static ssize_t offloaded_show(struct device *dev,
static DEVICE_ATTR_RO(offloaded);
-static struct attribute *netdev_trig_attrs[] = {
- &dev_attr_device_name.attr,
- &dev_attr_link.attr,
+#define CHECK_LINK_MODE_ATTR(link_speed) \
+ do { \
+ if (attr == &dev_attr_link_##link_speed.attr && \
+ link_ksettings.base.speed == SPEED_##link_speed) \
+ return attr->mode; \
+ } while (0)
+
+static umode_t netdev_trig_link_speed_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct led_netdev_data *trigger_data;
+ unsigned long *supported_link_modes;
+ u32 mode;
+
+ trigger_data = led_trigger_get_drvdata(dev);
+ supported_link_modes = trigger_data->supported_link_modes;
+
+ /*
+ * Search in the supported link mode mask a matching supported mode.
+ * Stop at the first matching entry as we care only to check if a particular
+ * speed is supported and not the kind.
+ */
+ for_each_set_bit(mode, supported_link_modes, __ETHTOOL_LINK_MODE_MASK_NBITS) {
+ struct ethtool_link_ksettings link_ksettings;
+
+ ethtool_params_from_link_mode(&link_ksettings, mode);
+
+ CHECK_LINK_MODE_ATTR(10);
+ CHECK_LINK_MODE_ATTR(100);
+ CHECK_LINK_MODE_ATTR(1000);
+ CHECK_LINK_MODE_ATTR(2500);
+ CHECK_LINK_MODE_ATTR(5000);
+ CHECK_LINK_MODE_ATTR(10000);
+ }
+
+ return 0;
+}
+
+static struct attribute *netdev_trig_link_speed_attrs[] = {
&dev_attr_link_10.attr,
&dev_attr_link_100.attr,
&dev_attr_link_1000.attr,
&dev_attr_link_2500.attr,
&dev_attr_link_5000.attr,
&dev_attr_link_10000.attr,
+ NULL
+};
+
+static const struct attribute_group netdev_trig_link_speed_attrs_group = {
+ .attrs = netdev_trig_link_speed_attrs,
+ .is_visible = netdev_trig_link_speed_visible,
+};
+
+static struct attribute *netdev_trig_attrs[] = {
+ &dev_attr_device_name.attr,
+ &dev_attr_link.attr,
&dev_attr_full_duplex.attr,
&dev_attr_half_duplex.attr,
&dev_attr_rx.attr,
@@ -475,7 +542,16 @@ static struct attribute *netdev_trig_attrs[] = {
&dev_attr_offloaded.attr,
NULL
};
-ATTRIBUTE_GROUPS(netdev_trig);
+
+static const struct attribute_group netdev_trig_attrs_group = {
+ .attrs = netdev_trig_attrs,
+};
+
+static const struct attribute_group *netdev_trig_groups[] = {
+ &netdev_trig_attrs_group,
+ &netdev_trig_link_speed_attrs_group,
+ NULL,
+};
static int netdev_trig_notify(struct notifier_block *nb,
unsigned long evt, void *dv)
@@ -484,6 +560,7 @@ static int netdev_trig_notify(struct notifier_block *nb,
netdev_notifier_info_to_dev((struct netdev_notifier_info *)dv);
struct led_netdev_data *trigger_data =
container_of(nb, struct led_netdev_data, notifier);
+ struct led_classdev *led_cdev = trigger_data->led_cdev;
if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE
&& evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER
@@ -504,12 +581,12 @@ static int netdev_trig_notify(struct notifier_block *nb,
trigger_data->duplex = DUPLEX_UNKNOWN;
switch (evt) {
case NETDEV_CHANGENAME:
- get_device_state(trigger_data);
- fallthrough;
case NETDEV_REGISTER:
dev_put(trigger_data->net_dev);
dev_hold(dev);
trigger_data->net_dev = dev;
+ if (evt == NETDEV_CHANGENAME)
+ get_device_state(trigger_data);
break;
case NETDEV_UNREGISTER:
dev_put(trigger_data->net_dev);
@@ -518,6 +595,10 @@ static int netdev_trig_notify(struct notifier_block *nb,
case NETDEV_UP:
case NETDEV_CHANGE:
get_device_state(trigger_data);
+ /* Refresh link_speed visibility */
+ if (evt == NETDEV_CHANGE)
+ sysfs_update_group(&led_cdev->dev->kobj,
+ &netdev_trig_link_speed_attrs_group);
break;
}
@@ -617,8 +698,8 @@ static int netdev_trig_activate(struct led_classdev *led_cdev)
if (dev) {
const char *name = dev_name(dev);
- set_device_name(trigger_data, name, strlen(name));
trigger_data->hw_control = true;
+ set_device_name(trigger_data, name, strlen(name));
rc = led_cdev->hw_control_get(led_cdev, &mode);
if (!rc)
@@ -663,3 +744,4 @@ MODULE_AUTHOR("Ben Whitten <ben.whitten@gmail.com>");
MODULE_AUTHOR("Oliver Jowett <oliver@opencloud.com>");
MODULE_DESCRIPTION("Netdev LED trigger");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("ledtrig:netdev");
diff --git a/drivers/leds/trigger/ledtrig-panic.c b/drivers/leds/trigger/ledtrig-panic.c
index 5a6b21bfeb9af..1d49c10780910 100644
--- a/drivers/leds/trigger/ledtrig-panic.c
+++ b/drivers/leds/trigger/ledtrig-panic.c
@@ -21,24 +21,15 @@ static struct led_trigger *trigger;
*/
static void led_trigger_set_panic(struct led_classdev *led_cdev)
{
- struct led_trigger *trig;
+ if (led_cdev->trigger)
+ list_del(&led_cdev->trig_list);
+ list_add_tail(&led_cdev->trig_list, &trigger->led_cdevs);
- list_for_each_entry(trig, &trigger_list, next_trig) {
- if (strcmp("panic", trig->name))
- continue;
- if (led_cdev->trigger)
- list_del(&led_cdev->trig_list);
- list_add_tail(&led_cdev->trig_list, &trig->led_cdevs);
+ /* Avoid the delayed blink path */
+ led_cdev->blink_delay_on = 0;
+ led_cdev->blink_delay_off = 0;
- /* Avoid the delayed blink path */
- led_cdev->blink_delay_on = 0;
- led_cdev->blink_delay_off = 0;
-
- led_cdev->trigger = trig;
- if (trig->activate)
- trig->activate(led_cdev);
- break;
- }
+ led_cdev->trigger = trigger;
}
static int led_trigger_panic_notifier(struct notifier_block *nb,
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index 057b0221f695a..b0407c5fadb2a 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -74,7 +74,9 @@ static struct adb_driver *adb_driver_list[] = {
NULL
};
-static struct class *adb_dev_class;
+static const struct class adb_dev_class = {
+ .name = "adb",
+};
static struct adb_driver *adb_controller;
BLOCKING_NOTIFIER_HEAD(adb_client_list);
@@ -888,10 +890,10 @@ adbdev_init(void)
return;
}
- adb_dev_class = class_create("adb");
- if (IS_ERR(adb_dev_class))
+ if (class_register(&adb_dev_class))
return;
- device_create(adb_dev_class, NULL, MKDEV(ADB_MAJOR, 0), NULL, "adb");
+
+ device_create(&adb_dev_class, NULL, MKDEV(ADB_MAJOR, 0), NULL, "adb");
platform_device_register(&adb_pfdev);
platform_driver_probe(&adb_pfdrv, adb_dummy_probe);
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index a5ee8f736a8e0..565f1e21ff7dc 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -136,7 +136,7 @@ static int macio_device_modalias(const struct device *dev, struct kobj_uevent_en
extern const struct attribute_group *macio_dev_groups[];
-struct bus_type macio_bus_type = {
+const struct bus_type macio_bus_type = {
.name = "macio",
.match = macio_bus_match,
.uevent = macio_device_modalias,
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index 40240bce77b01..896a43bd819f7 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -523,7 +523,7 @@ static int rackmeter_probe(struct macio_dev* mdev,
return rc;
}
-static int rackmeter_remove(struct macio_dev* mdev)
+static void rackmeter_remove(struct macio_dev *mdev)
{
struct rackmeter *rm = dev_get_drvdata(&mdev->ofdev.dev);
@@ -558,8 +558,6 @@ static int rackmeter_remove(struct macio_dev* mdev)
/* Get rid of me */
kfree(rm);
-
- return 0;
}
static int rackmeter_shutdown(struct macio_dev* mdev)
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index 3c1b29476ce24..37cdc6931f6d0 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -481,11 +481,9 @@ static int therm_of_probe(struct platform_device *dev)
return -ENODEV;
}
-static int
-therm_of_remove( struct platform_device *dev )
+static void therm_of_remove(struct platform_device *dev)
{
i2c_del_driver( &g4fan_driver );
- return 0;
}
static const struct of_device_id therm_of_match[] = {{
@@ -501,7 +499,7 @@ static struct platform_driver therm_of_driver = {
.of_match_table = therm_of_match,
},
.probe = therm_of_probe,
- .remove = therm_of_remove,
+ .remove_new = therm_of_remove,
};
struct apple_thermal_info {
diff --git a/drivers/macintosh/windfarm_pm112.c b/drivers/macintosh/windfarm_pm112.c
index d1dec314ae305..876b4d8cbe378 100644
--- a/drivers/macintosh/windfarm_pm112.c
+++ b/drivers/macintosh/windfarm_pm112.c
@@ -662,16 +662,14 @@ static int wf_pm112_probe(struct platform_device *dev)
return 0;
}
-static int wf_pm112_remove(struct platform_device *dev)
+static void wf_pm112_remove(struct platform_device *dev)
{
wf_unregister_client(&pm112_events);
- /* should release all sensors and controls */
- return 0;
}
static struct platform_driver wf_pm112_driver = {
.probe = wf_pm112_probe,
- .remove = wf_pm112_remove,
+ .remove_new = wf_pm112_remove,
.driver = {
.name = "windfarm",
},
diff --git a/drivers/macintosh/windfarm_pm121.c b/drivers/macintosh/windfarm_pm121.c
index 82500417ebeec..cd45fbc4fe1ce 100644
--- a/drivers/macintosh/windfarm_pm121.c
+++ b/drivers/macintosh/windfarm_pm121.c
@@ -992,15 +992,14 @@ static int pm121_probe(struct platform_device *ddev)
return 0;
}
-static int pm121_remove(struct platform_device *ddev)
+static void pm121_remove(struct platform_device *ddev)
{
wf_unregister_client(&pm121_events);
- return 0;
}
static struct platform_driver pm121_driver = {
.probe = pm121_probe,
- .remove = pm121_remove,
+ .remove_new = pm121_remove,
.driver = {
.name = "windfarm",
.bus = &platform_bus_type,
diff --git a/drivers/macintosh/windfarm_pm72.c b/drivers/macintosh/windfarm_pm72.c
index e21f973551cc2..14fa1e9ac3e00 100644
--- a/drivers/macintosh/windfarm_pm72.c
+++ b/drivers/macintosh/windfarm_pm72.c
@@ -775,17 +775,14 @@ static int wf_pm72_probe(struct platform_device *dev)
return 0;
}
-static int wf_pm72_remove(struct platform_device *dev)
+static void wf_pm72_remove(struct platform_device *dev)
{
wf_unregister_client(&pm72_events);
-
- /* should release all sensors and controls */
- return 0;
}
static struct platform_driver wf_pm72_driver = {
.probe = wf_pm72_probe,
- .remove = wf_pm72_remove,
+ .remove_new = wf_pm72_remove,
.driver = {
.name = "windfarm",
},
diff --git a/drivers/macintosh/windfarm_pm81.c b/drivers/macintosh/windfarm_pm81.c
index 257fb2c695c53..404d2454e33de 100644
--- a/drivers/macintosh/windfarm_pm81.c
+++ b/drivers/macintosh/windfarm_pm81.c
@@ -724,7 +724,7 @@ static int wf_smu_probe(struct platform_device *ddev)
return 0;
}
-static int wf_smu_remove(struct platform_device *ddev)
+static void wf_smu_remove(struct platform_device *ddev)
{
wf_unregister_client(&wf_smu_events);
@@ -761,13 +761,11 @@ static int wf_smu_remove(struct platform_device *ddev)
/* Destroy control loops state structures */
kfree(wf_smu_sys_fans);
kfree(wf_smu_cpu_fans);
-
- return 0;
}
static struct platform_driver wf_smu_driver = {
- .probe = wf_smu_probe,
- .remove = wf_smu_remove,
+ .probe = wf_smu_probe,
+ .remove_new = wf_smu_remove,
.driver = {
.name = "windfarm",
},
diff --git a/drivers/macintosh/windfarm_pm91.c b/drivers/macintosh/windfarm_pm91.c
index 120a9cfba0c54..fba02a375435e 100644
--- a/drivers/macintosh/windfarm_pm91.c
+++ b/drivers/macintosh/windfarm_pm91.c
@@ -647,7 +647,7 @@ static int wf_smu_probe(struct platform_device *ddev)
return 0;
}
-static int wf_smu_remove(struct platform_device *ddev)
+static void wf_smu_remove(struct platform_device *ddev)
{
wf_unregister_client(&wf_smu_events);
@@ -691,13 +691,11 @@ static int wf_smu_remove(struct platform_device *ddev)
kfree(wf_smu_slots_fans);
kfree(wf_smu_drive_fans);
kfree(wf_smu_cpu_fans);
-
- return 0;
}
static struct platform_driver wf_smu_driver = {
- .probe = wf_smu_probe,
- .remove = wf_smu_remove,
+ .probe = wf_smu_probe,
+ .remove_new = wf_smu_remove,
.driver = {
.name = "windfarm",
},
diff --git a/drivers/macintosh/windfarm_rm31.c b/drivers/macintosh/windfarm_rm31.c
index e9eb7fdde48c1..dc8f2c7ef1031 100644
--- a/drivers/macintosh/windfarm_rm31.c
+++ b/drivers/macintosh/windfarm_rm31.c
@@ -668,17 +668,14 @@ static int wf_rm31_probe(struct platform_device *dev)
return 0;
}
-static int wf_rm31_remove(struct platform_device *dev)
+static void wf_rm31_remove(struct platform_device *dev)
{
wf_unregister_client(&rm31_events);
-
- /* should release all sensors and controls */
- return 0;
}
static struct platform_driver wf_rm31_driver = {
.probe = wf_rm31_probe,
- .remove = wf_rm31_remove,
+ .remove_new = wf_rm31_remove,
.driver = {
.name = "windfarm",
},
diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
index 61994da7bad01..267045b765059 100644
--- a/drivers/mcb/mcb-core.c
+++ b/drivers/mcb/mcb-core.c
@@ -156,7 +156,7 @@ static const struct attribute_group *mcb_carrier_groups[] = {
};
-static struct bus_type mcb_bus_type = {
+static const struct bus_type mcb_bus_type = {
.name = "mcb",
.match = mcb_match,
.uevent = mcb_uevent,
@@ -165,7 +165,7 @@ static struct bus_type mcb_bus_type = {
.shutdown = mcb_shutdown,
};
-static struct device_type mcb_carrier_device_type = {
+static const struct device_type mcb_carrier_device_type = {
.name = "mcb-carrier",
.groups = mcb_carrier_groups,
};
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 68ce56fc61d0a..35b1080752cd7 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -519,7 +519,6 @@ config DM_VERITY
If unsure, say N.
config DM_VERITY_VERIFY_ROOTHASH_SIG
- def_bool n
bool "Verity data device root hash signature verification support"
depends on DM_VERITY
select SYSTEM_DATA_VERIFICATION
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index a438efb660699..6956beb55326f 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -702,13 +702,7 @@ static unsigned int bch_cache_max_chain(struct cache_set *c)
for (h = c->bucket_hash;
h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
h++) {
- unsigned int i = 0;
- struct hlist_node *p;
-
- hlist_for_each(p, h)
- i++;
-
- ret = max(ret, i);
+ ret = max(ret, hlist_count_nodes(h));
}
mutex_unlock(&c->bucket_lock);
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index d822ab2f739b0..7f3dc8ee6ab8d 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1699,7 +1699,6 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
struct bio_vec bv;
sector_t sector, logical_sector, area, offset;
struct page *page;
- void *buffer;
get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset,
@@ -1708,13 +1707,14 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
logical_sector = dio->range.logical_sector;
page = mempool_alloc(&ic->recheck_pool, GFP_NOIO);
- buffer = page_to_virt(page);
__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
unsigned pos = 0;
do {
+ sector_t alignment;
char *mem;
+ char *buffer = page_to_virt(page);
int r;
struct dm_io_request io_req;
struct dm_io_region io_loc;
@@ -1727,6 +1727,14 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
io_loc.sector = sector;
io_loc.count = ic->sectors_per_block;
+ /* Align the bio to logical block size */
+ alignment = dio->range.logical_sector | bio_sectors(bio) | (PAGE_SIZE >> SECTOR_SHIFT);
+ alignment &= -alignment;
+ io_loc.sector = round_down(io_loc.sector, alignment);
+ io_loc.count += sector - io_loc.sector;
+ buffer += (sector - io_loc.sector) << SECTOR_SHIFT;
+ io_loc.count = round_up(io_loc.count, alignment);
+
r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
if (unlikely(r)) {
dio->bi_status = errno_to_blk_status(r);
@@ -1848,12 +1856,12 @@ again:
r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
if (unlikely(r)) {
+ if (likely(checksums != checksums_onstack))
+ kfree(checksums);
if (r > 0) {
- integrity_recheck(dio, checksums);
+ integrity_recheck(dio, checksums_onstack);
goto skip_io;
}
- if (likely(checksums != checksums_onstack))
- kfree(checksums);
goto error;
}
@@ -4213,7 +4221,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
} else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
} else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
- if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
+ if ((uint64_t)val >= (uint64_t)UINT_MAX * 1000 / HZ) {
r = -EINVAL;
ti->error = "Invalid bitmap_flush_interval argument";
goto bad;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index bf7a574499a34..0ace06d1bee38 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -684,8 +684,10 @@ static void dm_exception_table_exit(struct dm_exception_table *et,
for (i = 0; i < size; i++) {
slot = et->table + i;
- hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list)
+ hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list) {
kmem_cache_free(mem, ex);
+ cond_resched();
+ }
}
kvfree(et->table);
diff --git a/drivers/md/dm-vdo/murmurhash3.c b/drivers/md/dm-vdo/murmurhash3.c
index 00c9b9c050011..01d2743444ec6 100644
--- a/drivers/md/dm-vdo/murmurhash3.c
+++ b/drivers/md/dm-vdo/murmurhash3.c
@@ -8,33 +8,14 @@
#include "murmurhash3.h"
+#include <asm/unaligned.h>
+
static inline u64 rotl64(u64 x, s8 r)
{
return (x << r) | (x >> (64 - r));
}
#define ROTL64(x, y) rotl64(x, y)
-static __always_inline u64 getblock64(const u64 *p, int i)
-{
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
- return p[i];
-#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
- return __builtin_bswap64(p[i]);
-#else
-#error "can't figure out byte order"
-#endif
-}
-
-static __always_inline void putblock64(u64 *p, int i, u64 value)
-{
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
- p[i] = value;
-#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
- p[i] = __builtin_bswap64(value);
-#else
-#error "can't figure out byte order"
-#endif
-}
/* Finalization mix - force all bits of a hash block to avalanche */
@@ -60,6 +41,8 @@ void murmurhash3_128(const void *key, const int len, const u32 seed, void *out)
const u64 c1 = 0x87c37b91114253d5LLU;
const u64 c2 = 0x4cf5ad432745937fLLU;
+ u64 *hash_out = out;
+
/* body */
const u64 *blocks = (const u64 *)(data);
@@ -67,8 +50,8 @@ void murmurhash3_128(const void *key, const int len, const u32 seed, void *out)
int i;
for (i = 0; i < nblocks; i++) {
- u64 k1 = getblock64(blocks, i * 2 + 0);
- u64 k2 = getblock64(blocks, i * 2 + 1);
+ u64 k1 = get_unaligned_le64(&blocks[i * 2]);
+ u64 k2 = get_unaligned_le64(&blocks[i * 2 + 1]);
k1 *= c1;
k1 = ROTL64(k1, 31);
@@ -170,6 +153,6 @@ void murmurhash3_128(const void *key, const int len, const u32 seed, void *out)
h1 += h2;
h2 += h1;
- putblock64((u64 *)out, 0, h1);
- putblock64((u64 *)out, 1, h2);
+ put_unaligned_le64(h1, &hash_out[0]);
+ put_unaligned_le64(h2, &hash_out[1]);
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 06263b0a7b588..56aa2a8b9d715 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2057,6 +2057,7 @@ static void cleanup_mapped_device(struct mapped_device *md)
static struct mapped_device *alloc_dev(int minor)
{
int r, numa_node_id = dm_get_numa_node();
+ struct dax_device *dax_dev;
struct mapped_device *md;
void *old_md;
@@ -2125,15 +2126,15 @@ static struct mapped_device *alloc_dev(int minor)
md->disk->private_data = md;
sprintf(md->disk->disk_name, "dm-%d", minor);
- if (IS_ENABLED(CONFIG_FS_DAX)) {
- md->dax_dev = alloc_dax(md, &dm_dax_ops);
- if (IS_ERR(md->dax_dev)) {
- md->dax_dev = NULL;
+ dax_dev = alloc_dax(md, &dm_dax_ops);
+ if (IS_ERR(dax_dev)) {
+ if (PTR_ERR(dax_dev) != -EOPNOTSUPP)
goto bad;
- }
- set_dax_nocache(md->dax_dev);
- set_dax_nomc(md->dax_dev);
- if (dax_add_host(md->dax_dev, md->disk))
+ } else {
+ set_dax_nocache(dax_dev);
+ set_dax_nomc(dax_dev);
+ md->dax_dev = dax_dev;
+ if (dax_add_host(dax_dev, md->disk))
goto bad;
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index be8ac24f50b6a..7b8a71ca66dde 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1558,7 +1558,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
for (j = 0; j < i; j++)
if (r1_bio->bios[j])
rdev_dec_pending(conf->mirrors[j].rdev, mddev);
- free_r1bio(r1_bio);
+ mempool_free(r1_bio, &conf->r1bio_pool);
allow_barrier(conf, bio->bi_iter.bi_sector);
if (bio->bi_opf & REQ_NOWAIT) {
diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
index 5741adf09a2ef..559a172ebc6cb 100644
--- a/drivers/media/cec/core/cec-adap.c
+++ b/drivers/media/cec/core/cec-adap.c
@@ -1151,20 +1151,6 @@ void cec_received_msg_ts(struct cec_adapter *adap,
if (valid_la && min_len) {
/* These messages have special length requirements */
switch (cmd) {
- case CEC_MSG_TIMER_STATUS:
- if (msg->msg[2] & 0x10) {
- switch (msg->msg[2] & 0xf) {
- case CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE:
- case CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE:
- if (msg->len < 5)
- valid_la = false;
- break;
- }
- } else if ((msg->msg[2] & 0xf) == CEC_OP_PROG_ERROR_DUPLICATE) {
- if (msg->len < 5)
- valid_la = false;
- }
- break;
case CEC_MSG_RECORD_ON:
switch (msg->msg[2]) {
case CEC_OP_RECORD_SRC_OWN:
diff --git a/drivers/media/cec/core/cec-core.c b/drivers/media/cec/core/cec-core.c
index 7e153c5cad04f..5a54db839e5d6 100644
--- a/drivers/media/cec/core/cec-core.c
+++ b/drivers/media/cec/core/cec-core.c
@@ -93,7 +93,7 @@ static void cec_devnode_release(struct device *cd)
cec_delete_adapter(to_cec_adapter(devnode));
}
-static struct bus_type cec_bus_type = {
+static const struct bus_type cec_bus_type = {
.name = CEC_NAME,
};
diff --git a/drivers/media/cec/platform/cros-ec/cros-ec-cec.c b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
index 52ec0ba4b3393..48ed2993d2f0d 100644
--- a/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
+++ b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
@@ -326,6 +326,8 @@ static const struct cec_dmi_match cec_dmi_match_table[] = {
{ "Google", "Taranza", "0000:00:02.0", port_db_conns },
/* Google Dexi */
{ "Google", "Dexi", "0000:00:02.0", port_db_conns },
+ /* Google Dita */
+ { "Google", "Dita", "0000:00:02.0", port_db_conns },
};
static struct device *cros_ec_cec_find_hdmi_dev(struct device *dev,
diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c
index 7d4bc2733f2b0..7ebcb10126c9c 100644
--- a/drivers/media/common/siano/smscoreapi.c
+++ b/drivers/media/common/siano/smscoreapi.c
@@ -2155,7 +2155,7 @@ module_init(smscore_module_init);
module_exit(smscore_module_exit);
MODULE_DESCRIPTION("Siano MDTV Core module");
-MODULE_AUTHOR("Siano Mobile Silicon, Inc. (uris@siano-ms.com)");
+MODULE_AUTHOR("Siano Mobile Silicon, Inc. <uris@siano-ms.com>");
MODULE_LICENSE("GPL");
/* This should match what's defined at smscoreapi.h */
diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c
index f80caaa333daf..d893a0e4672b2 100644
--- a/drivers/media/common/siano/smsdvb-main.c
+++ b/drivers/media/common/siano/smsdvb-main.c
@@ -1267,5 +1267,5 @@ module_init(smsdvb_module_init);
module_exit(smsdvb_module_exit);
MODULE_DESCRIPTION("SMS DVB subsystem adaptation module");
-MODULE_AUTHOR("Siano Mobile Silicon, Inc. (uris@siano-ms.com)");
+MODULE_AUTHOR("Siano Mobile Silicon, Inc. <uris@siano-ms.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
index a366566f22c3b..642c48e8c1f58 100644
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
@@ -113,6 +113,7 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w)
{
unsigned pat;
unsigned plane;
+ int ret = 0;
tpg->max_line_width = max_w;
for (pat = 0; pat < TPG_MAX_PAT_LINES; pat++) {
@@ -121,14 +122,18 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w)
tpg->lines[pat][plane] =
vzalloc(array3_size(max_w, 2, pixelsz));
- if (!tpg->lines[pat][plane])
- return -ENOMEM;
+ if (!tpg->lines[pat][plane]) {
+ ret = -ENOMEM;
+ goto free_lines;
+ }
if (plane == 0)
continue;
tpg->downsampled_lines[pat][plane] =
vzalloc(array3_size(max_w, 2, pixelsz));
- if (!tpg->downsampled_lines[pat][plane])
- return -ENOMEM;
+ if (!tpg->downsampled_lines[pat][plane]) {
+ ret = -ENOMEM;
+ goto free_lines;
+ }
}
}
for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
@@ -136,18 +141,45 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w)
tpg->contrast_line[plane] =
vzalloc(array_size(pixelsz, max_w));
- if (!tpg->contrast_line[plane])
- return -ENOMEM;
+ if (!tpg->contrast_line[plane]) {
+ ret = -ENOMEM;
+ goto free_contrast_line;
+ }
tpg->black_line[plane] =
vzalloc(array_size(pixelsz, max_w));
- if (!tpg->black_line[plane])
- return -ENOMEM;
+ if (!tpg->black_line[plane]) {
+ ret = -ENOMEM;
+ goto free_contrast_line;
+ }
tpg->random_line[plane] =
vzalloc(array3_size(max_w, 2, pixelsz));
- if (!tpg->random_line[plane])
- return -ENOMEM;
+ if (!tpg->random_line[plane]) {
+ ret = -ENOMEM;
+ goto free_contrast_line;
+ }
}
return 0;
+
+free_contrast_line:
+ for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
+ vfree(tpg->contrast_line[plane]);
+ vfree(tpg->black_line[plane]);
+ vfree(tpg->random_line[plane]);
+ tpg->contrast_line[plane] = NULL;
+ tpg->black_line[plane] = NULL;
+ tpg->random_line[plane] = NULL;
+ }
+free_lines:
+ for (pat = 0; pat < TPG_MAX_PAT_LINES; pat++)
+ for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
+ vfree(tpg->lines[pat][plane]);
+ tpg->lines[pat][plane] = NULL;
+ if (plane == 0)
+ continue;
+ vfree(tpg->downsampled_lines[pat][plane]);
+ tpg->downsampled_lines[pat][plane] = NULL;
+ }
+ return ret;
}
EXPORT_SYMBOL_GPL(tpg_alloc);
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 9293b058ab997..4f78f30b3646e 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -679,12 +679,10 @@ static int dvb_frontend_thread(void *data)
set_freezable();
while (1) {
up(&fepriv->sem); /* is locked when we enter the thread... */
-restart:
- wait_event_interruptible_timeout(fepriv->wait_queue,
- dvb_frontend_should_wakeup(fe) ||
- kthread_should_stop() ||
- freezing(current),
- fepriv->delay);
+ wait_event_freezable_timeout(fepriv->wait_queue,
+ dvb_frontend_should_wakeup(fe) ||
+ kthread_should_stop(),
+ fepriv->delay);
if (kthread_should_stop() || dvb_frontend_is_exiting(fe)) {
/* got signal or quitting */
@@ -694,9 +692,6 @@ restart:
break;
}
- if (try_to_freeze())
- goto restart;
-
if (down_interruptible(&fepriv->sem))
break;
@@ -2168,7 +2163,8 @@ static int dvb_frontend_handle_compat_ioctl(struct file *file, unsigned int cmd,
if (!tvps->num || (tvps->num > DTV_IOCTL_MAX_MSGS))
return -EINVAL;
- tvp = memdup_user(compat_ptr(tvps->props), tvps->num * sizeof(*tvp));
+ tvp = memdup_array_user(compat_ptr(tvps->props),
+ tvps->num, sizeof(*tvp));
if (IS_ERR(tvp))
return PTR_ERR(tvp);
@@ -2199,7 +2195,8 @@ static int dvb_frontend_handle_compat_ioctl(struct file *file, unsigned int cmd,
if (!tvps->num || (tvps->num > DTV_IOCTL_MAX_MSGS))
return -EINVAL;
- tvp = memdup_user(compat_ptr(tvps->props), tvps->num * sizeof(*tvp));
+ tvp = memdup_array_user(compat_ptr(tvps->props),
+ tvps->num, sizeof(*tvp));
if (IS_ERR(tvp))
return PTR_ERR(tvp);
@@ -2379,7 +2376,8 @@ static int dvb_get_property(struct dvb_frontend *fe, struct file *file,
if (!tvps->num || tvps->num > DTV_IOCTL_MAX_MSGS)
return -EINVAL;
- tvp = memdup_user((void __user *)tvps->props, tvps->num * sizeof(*tvp));
+ tvp = memdup_array_user((void __user *)tvps->props,
+ tvps->num, sizeof(*tvp));
if (IS_ERR(tvp))
return PTR_ERR(tvp);
@@ -2457,7 +2455,8 @@ static int dvb_frontend_handle_ioctl(struct file *file,
if (!tvps->num || (tvps->num > DTV_IOCTL_MAX_MSGS))
return -EINVAL;
- tvp = memdup_user((void __user *)tvps->props, tvps->num * sizeof(*tvp));
+ tvp = memdup_array_user((void __user *)tvps->props,
+ tvps->num, sizeof(*tvp));
if (IS_ERR(tvp))
return PTR_ERR(tvp);
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index 49f0eb7d0b9d3..733d0bc4b4cc3 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -490,6 +490,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
dvbdevfops = kmemdup(template->fops, sizeof(*dvbdevfops), GFP_KERNEL);
if (!dvbdevfops) {
kfree(dvbdev);
+ *pdvbdev = NULL;
mutex_unlock(&dvbdev_register_lock);
return -ENOMEM;
}
@@ -498,6 +499,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
if (!new_node) {
kfree(dvbdevfops);
kfree(dvbdev);
+ *pdvbdev = NULL;
mutex_unlock(&dvbdev_register_lock);
return -ENOMEM;
}
@@ -531,6 +533,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
}
list_del(&dvbdev->list_head);
kfree(dvbdev);
+ *pdvbdev = NULL;
up_write(&minor_rwsem);
mutex_unlock(&dvbdev_register_lock);
return -EINVAL;
@@ -553,6 +556,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
dvb_media_device_free(dvbdev);
list_del(&dvbdev->list_head);
kfree(dvbdev);
+ *pdvbdev = NULL;
mutex_unlock(&dvbdev_register_lock);
return ret;
}
@@ -571,6 +575,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
dvb_media_device_free(dvbdev);
list_del(&dvbdev->list_head);
kfree(dvbdev);
+ *pdvbdev = NULL;
mutex_unlock(&dvbdev_register_lock);
return PTR_ERR(clsdev);
}
diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c
index b3f5c49accafd..27f1de21f5717 100644
--- a/drivers/media/dvb-frontends/bcm3510.c
+++ b/drivers/media/dvb-frontends/bcm3510.c
@@ -797,7 +797,6 @@ struct dvb_frontend* bcm3510_attach(const struct bcm3510_config *config,
struct i2c_adapter *i2c)
{
struct bcm3510_state* state = NULL;
- int ret;
bcm3510_register_value v;
/* allocate memory for the internal state */
@@ -816,7 +815,7 @@ struct dvb_frontend* bcm3510_attach(const struct bcm3510_config *config,
mutex_init(&state->hab_mutex);
- if ((ret = bcm3510_readB(state,0xe0,&v)) < 0)
+ if (bcm3510_readB(state, 0xe0, &v) < 0)
goto error;
deb_info("Revision: 0x%1x, Layer: 0x%1x.\n",v.REVID_e0.REV,v.REVID_e0.LAYER);
diff --git a/drivers/media/dvb-frontends/bcm3510_priv.h b/drivers/media/dvb-frontends/bcm3510_priv.h
index 2c9f3c430a82a..89c71bc42a0fd 100644
--- a/drivers/media/dvb-frontends/bcm3510_priv.h
+++ b/drivers/media/dvb-frontends/bcm3510_priv.h
@@ -12,11 +12,11 @@
#define PACKED __attribute__((packed))
#undef err
-#define err(format, arg...) printk(KERN_ERR "bcm3510: " format "\n" , ## arg)
+#define err(format, arg...) printk(KERN_ERR "bcm3510: " format "\n", ## arg)
#undef info
-#define info(format, arg...) printk(KERN_INFO "bcm3510: " format "\n" , ## arg)
+#define info(format, arg...) printk(KERN_INFO "bcm3510: " format "\n", ## arg)
#undef warn
-#define warn(format, arg...) printk(KERN_WARNING "bcm3510: " format "\n" , ## arg)
+#define warn(format, arg...) printk(KERN_WARNING "bcm3510: " format "\n", ## arg)
#define PANASONIC_FIRST_IF_BASE_IN_KHz 1407500
diff --git a/drivers/media/dvb-frontends/cx24110.c b/drivers/media/dvb-frontends/cx24110.c
index 9aeea089756fe..65dd9b72ea555 100644
--- a/drivers/media/dvb-frontends/cx24110.c
+++ b/drivers/media/dvb-frontends/cx24110.c
@@ -224,13 +224,13 @@ static enum fe_code_rate cx24110_get_fec(struct cx24110_state *state)
}
}
-static int cx24110_set_symbolrate (struct cx24110_state* state, u32 srate)
+static int cx24110_set_symbolrate (struct cx24110_state *state, u32 srate)
{
/* fixme (low): add error handling */
u32 ratio;
u32 tmp, fclk, BDRI;
- static const u32 bands[]={5000000UL,15000000UL,90999000UL/2};
+ static const u32 bands[] = {5000000UL, 15000000UL, 90999000UL/2};
int i;
dprintk("cx24110 debug: entering %s(%d)\n",__func__,srate);
diff --git a/drivers/media/dvb-frontends/cx24110.h b/drivers/media/dvb-frontends/cx24110.h
index 834b011d3462d..8395518418937 100644
--- a/drivers/media/dvb-frontends/cx24110.h
+++ b/drivers/media/dvb-frontends/cx24110.h
@@ -34,11 +34,11 @@ static inline int cx24110_pll_write(struct dvb_frontend *fe, u32 val)
}
#if IS_REACHABLE(CONFIG_DVB_CX24110)
-extern struct dvb_frontend* cx24110_attach(const struct cx24110_config* config,
- struct i2c_adapter* i2c);
+extern struct dvb_frontend *cx24110_attach(const struct cx24110_config *config,
+ struct i2c_adapter *i2c);
#else
-static inline struct dvb_frontend* cx24110_attach(const struct cx24110_config* config,
- struct i2c_adapter* i2c)
+static inline struct dvb_frontend *cx24110_attach(const struct cx24110_config *config,
+ struct i2c_adapter *i2c)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return NULL;
diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c
index ac6e47d81b9eb..75fc7ad263d05 100644
--- a/drivers/media/dvb-frontends/cx24117.c
+++ b/drivers/media/dvb-frontends/cx24117.c
@@ -1647,7 +1647,7 @@ static const struct dvb_frontend_ops cx24117_ops = {
MODULE_DESCRIPTION("DVB Frontend module for Conexant cx24117/cx24132 hardware");
-MODULE_AUTHOR("Luis Alves (ljalvs@gmail.com)");
+MODULE_AUTHOR("Luis Alves <ljalvs@gmail.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.1");
MODULE_FIRMWARE(CX24117_DEFAULT_FIRMWARE);
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
index ef697ab6bc2e5..1775a4aa0a186 100644
--- a/drivers/media/dvb-frontends/dvb-pll.c
+++ b/drivers/media/dvb-frontends/dvb-pll.c
@@ -796,7 +796,7 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
b1[0] = 0;
msg.buf = b1;
- nr = ida_simple_get(&pll_ida, 0, DVB_PLL_MAX, GFP_KERNEL);
+ nr = ida_alloc_max(&pll_ida, DVB_PLL_MAX - 1, GFP_KERNEL);
if (nr < 0) {
kfree(b1);
return NULL;
@@ -862,7 +862,7 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
return fe;
out:
kfree(b1);
- ida_simple_remove(&pll_ida, nr);
+ ida_free(&pll_ida, nr);
return NULL;
}
@@ -905,7 +905,7 @@ static void dvb_pll_remove(struct i2c_client *client)
struct dvb_frontend *fe = i2c_get_clientdata(client);
struct dvb_pll_priv *priv = fe->tuner_priv;
- ida_simple_remove(&pll_ida, priv->nr);
+ ida_free(&pll_ida, priv->nr);
dvb_pll_release(fe);
}
diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
index 48326434488c4..72540ef4e5f88 100644
--- a/drivers/media/dvb-frontends/stv0367.c
+++ b/drivers/media/dvb-frontends/stv0367.c
@@ -118,50 +118,32 @@ static const s32 stv0367cab_RF_LookUp2[RF_LOOKUP_TABLE2_SIZE][RF_LOOKUP_TABLE2_S
}
};
-static
-int stv0367_writeregs(struct stv0367_state *state, u16 reg, u8 *data, int len)
+static noinline_for_stack
+int stv0367_writereg(struct stv0367_state *state, u16 reg, u8 data)
{
- u8 buf[MAX_XFER_SIZE];
+ u8 buf[3] = { MSB(reg), LSB(reg), data };
struct i2c_msg msg = {
.addr = state->config->demod_address,
.flags = 0,
.buf = buf,
- .len = len + 2
+ .len = 3,
};
int ret;
- if (2 + len > sizeof(buf)) {
- printk(KERN_WARNING
- "%s: i2c wr reg=%04x: len=%d is too big!\n",
- KBUILD_MODNAME, reg, len);
- return -EINVAL;
- }
-
-
- buf[0] = MSB(reg);
- buf[1] = LSB(reg);
- memcpy(buf + 2, data, len);
-
if (i2cdebug)
printk(KERN_DEBUG "%s: [%02x] %02x: %02x\n", __func__,
- state->config->demod_address, reg, buf[2]);
+ state->config->demod_address, reg, data);
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
printk(KERN_ERR "%s: i2c write error! ([%02x] %02x: %02x)\n",
- __func__, state->config->demod_address, reg, buf[2]);
+ __func__, state->config->demod_address, reg, data);
return (ret != 1) ? -EREMOTEIO : 0;
}
-static int stv0367_writereg(struct stv0367_state *state, u16 reg, u8 data)
-{
- u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
-
- return stv0367_writeregs(state, reg, &tmp, 1);
-}
-
-static u8 stv0367_readreg(struct stv0367_state *state, u16 reg)
+static noinline_for_stack
+u8 stv0367_readreg(struct stv0367_state *state, u16 reg)
{
u8 b0[] = { 0, 0 };
u8 b1[] = { 0 };
diff --git a/drivers/media/dvb-frontends/stv6110x_priv.h b/drivers/media/dvb-frontends/stv6110x_priv.h
index b27769558f789..81410595820a1 100644
--- a/drivers/media/dvb-frontends/stv6110x_priv.h
+++ b/drivers/media/dvb-frontends/stv6110x_priv.h
@@ -20,13 +20,13 @@
#define dprintk(__y, __z, format, arg...) do { \
if (__z) { \
if ((verbose > FE_ERROR) && (verbose > __y)) \
- printk(KERN_ERR "%s: " format "\n", __func__ , ##arg); \
+ printk(KERN_ERR "%s: " format "\n", __func__, ##arg); \
else if ((verbose > FE_NOTICE) && (verbose > __y)) \
- printk(KERN_NOTICE "%s: " format "\n", __func__ , ##arg); \
+ printk(KERN_NOTICE "%s: " format "\n", __func__, ##arg); \
else if ((verbose > FE_INFO) && (verbose > __y)) \
- printk(KERN_INFO "%s: " format "\n", __func__ , ##arg); \
+ printk(KERN_INFO "%s: " format "\n", __func__, ##arg); \
else if ((verbose > FE_DEBUG) && (verbose > __y)) \
- printk(KERN_DEBUG "%s: " format "\n", __func__ , ##arg); \
+ printk(KERN_DEBUG "%s: " format "\n", __func__, ##arg); \
} else { \
if (verbose > __y) \
printk(format, ##arg); \
diff --git a/drivers/media/dvb-frontends/tda8083.h b/drivers/media/dvb-frontends/tda8083.h
index 3a671ec3f45e7..b635ac7ef688e 100644
--- a/drivers/media/dvb-frontends/tda8083.h
+++ b/drivers/media/dvb-frontends/tda8083.h
@@ -24,11 +24,11 @@ struct tda8083_config
};
#if IS_REACHABLE(CONFIG_DVB_TDA8083)
-extern struct dvb_frontend* tda8083_attach(const struct tda8083_config* config,
- struct i2c_adapter* i2c);
+extern struct dvb_frontend *tda8083_attach(const struct tda8083_config *config,
+ struct i2c_adapter *i2c);
#else
-static inline struct dvb_frontend* tda8083_attach(const struct tda8083_config* config,
- struct i2c_adapter* i2c)
+static inline struct dvb_frontend *tda8083_attach(const struct tda8083_config *config,
+ struct i2c_adapter *i2c)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return NULL;
diff --git a/drivers/media/dvb-frontends/zl10036.c b/drivers/media/dvb-frontends/zl10036.c
index 7ba575e9c55f4..3df055be66d6c 100644
--- a/drivers/media/dvb-frontends/zl10036.c
+++ b/drivers/media/dvb-frontends/zl10036.c
@@ -3,7 +3,7 @@
* Driver for Zarlink zl10036 DVB-S silicon tuner
*
* Copyright (C) 2006 Tino Reichardt
- * Copyright (C) 2007-2009 Matthias Schwarzott <zzam@gentoo.de>
+ * Copyright (C) 2007-2009 Matthias Schwarzott <zzam@gentoo.org>
*
**
* The data sheet for this tuner can be found at:
diff --git a/drivers/media/dvb-frontends/zl10036.h b/drivers/media/dvb-frontends/zl10036.h
index ad83e6344e7fd..23c2964a928c4 100644
--- a/drivers/media/dvb-frontends/zl10036.h
+++ b/drivers/media/dvb-frontends/zl10036.h
@@ -3,7 +3,7 @@
* Driver for Zarlink ZL10036 DVB-S silicon tuner
*
* Copyright (C) 2006 Tino Reichardt
- * Copyright (C) 2007-2009 Matthias Schwarzott <zzam@gentoo.de>
+ * Copyright (C) 2007-2009 Matthias Schwarzott <zzam@gentoo.org>
*/
#ifndef DVB_ZL10036_H
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 4c3435921f19e..56f276b920ab7 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -224,6 +224,7 @@ config VIDEO_IMX412
config VIDEO_IMX415
tristate "Sony IMX415 sensor support"
depends on OF_GPIO
+ select V4L2_CCI_I2C
help
This is a Video4Linux2 sensor driver for the Sony
IMX415 camera.
@@ -658,6 +659,7 @@ config VIDEO_S5K6A3
config VIDEO_ST_VGXY61
tristate "ST VGXY61 sensor support"
+ select V4L2_CCI_I2C
depends on OF && GPIOLIB
help
This is a Video4Linux2 sensor driver for the ST VGXY61
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 409b9a37f0185..4829cbe324198 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -1057,11 +1057,11 @@ static int adv7182_init(struct adv7180_state *state)
ADV7180_REG_EXTENDED_OUTPUT_CONTROL,
0x17);
}
- }
- else
+ } else {
adv7180_write(state,
ADV7180_REG_EXTENDED_OUTPUT_CONTROL,
0x07);
+ }
adv7180_write(state, ADV7180_REG_OUTPUT_CONTROL, 0x0c);
adv7180_write(state, ADV7180_REG_CTRL_2, 0x40);
}
diff --git a/drivers/media/i2c/adv7343.c b/drivers/media/i2c/adv7343.c
index ff21cd4744d3d..4fbe4e18570e9 100644
--- a/drivers/media/i2c/adv7343.c
+++ b/drivers/media/i2c/adv7343.c
@@ -403,7 +403,7 @@ adv7343_get_pdata(struct i2c_client *client)
if (!IS_ENABLED(CONFIG_OF) || !client->dev.of_node)
return client->dev.platform_data;
- np = of_graph_get_next_endpoint(client->dev.of_node, NULL);
+ np = of_graph_get_endpoint_by_regs(client->dev.of_node, 0, -1);
if (!np)
return NULL;
diff --git a/drivers/media/i2c/adv748x/adv748x.h b/drivers/media/i2c/adv748x/adv748x.h
index 6f90f78f58cfa..d2b5e722e997f 100644
--- a/drivers/media/i2c/adv748x/adv748x.h
+++ b/drivers/media/i2c/adv748x/adv748x.h
@@ -173,7 +173,6 @@ struct adv748x_afe {
*
* @endpoints: parsed device node endpoints for each port
*
- * @i2c_addresses: I2C Page addresses
* @i2c_clients: I2C clients for the page accesses
* @regmap: regmap configuration pages.
*
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 810fa8826f308..319db3e847c4e 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -3204,8 +3204,8 @@ static int adv76xx_parse_dt(struct adv76xx_state *state)
np = state->i2c_clients[ADV76XX_PAGE_IO]->dev.of_node;
- /* Parse the endpoint. */
- endpoint = of_graph_get_next_endpoint(np, NULL);
+ /* FIXME: Parse the endpoint. */
+ endpoint = of_graph_get_endpoint_by_regs(np, -1, -1);
if (!endpoint)
return -EINVAL;
diff --git a/drivers/media/i2c/alvium-csi2.c b/drivers/media/i2c/alvium-csi2.c
index 34ff7fad38774..e65702e3f73e8 100644
--- a/drivers/media/i2c/alvium-csi2.c
+++ b/drivers/media/i2c/alvium-csi2.c
@@ -1170,40 +1170,32 @@ static int alvium_set_bayer_pattern(struct alvium_dev *alvium,
return 0;
}
-static int alvium_get_frame_interval(struct alvium_dev *alvium)
+static int alvium_get_frame_interval(struct alvium_dev *alvium,
+ u64 *min_fr, u64 *max_fr)
{
- u64 dft_fr, min_fr, max_fr;
int ret = 0;
- alvium_read(alvium, REG_BCRM_ACQUISITION_FRAME_RATE_RW,
- &dft_fr, &ret);
alvium_read(alvium, REG_BCRM_ACQUISITION_FRAME_RATE_MIN_R,
- &min_fr, &ret);
+ min_fr, &ret);
alvium_read(alvium, REG_BCRM_ACQUISITION_FRAME_RATE_MAX_R,
- &max_fr, &ret);
- if (ret)
- return ret;
-
- alvium->dft_fr = dft_fr;
- alvium->min_fr = min_fr;
- alvium->max_fr = max_fr;
+ max_fr, &ret);
- return 0;
+ return ret;
}
-static int alvium_set_frame_rate(struct alvium_dev *alvium)
+static int alvium_set_frame_rate(struct alvium_dev *alvium, u64 fr)
{
struct device *dev = &alvium->i2c_client->dev;
int ret;
ret = alvium_write_hshake(alvium, REG_BCRM_ACQUISITION_FRAME_RATE_RW,
- alvium->fr);
+ fr);
if (ret) {
dev_err(dev, "Fail to set frame rate lanes reg\n");
return ret;
}
- dev_dbg(dev, "set frame rate: %llu us\n", alvium->fr);
+ dev_dbg(dev, "set frame rate: %llu us\n", fr);
return 0;
}
@@ -1472,7 +1464,7 @@ static int alvium_get_hw_features_params(struct alvium_dev *alvium)
ret = alvium_get_img_height_params(alvium);
if (ret) {
- dev_err(dev, "Fail to read img heigth regs\n");
+ dev_err(dev, "Fail to read img height regs\n");
return ret;
}
@@ -1647,44 +1639,28 @@ static int alvium_hw_init(struct alvium_dev *alvium)
}
/* --------------- Subdev Operations --------------- */
-
-static int alvium_g_frame_interval(struct v4l2_subdev *sd,
+static int alvium_s_frame_interval(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval *fi)
{
struct alvium_dev *alvium = sd_to_alvium(sd);
-
- /*
- * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
- * subdev active state API.
- */
- if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
- return -EINVAL;
-
- fi->interval = alvium->frame_interval;
-
- return 0;
-}
-
-static int alvium_set_frame_interval(struct alvium_dev *alvium,
- struct v4l2_subdev_frame_interval *fi)
-{
struct device *dev = &alvium->i2c_client->dev;
u64 req_fr, min_fr, max_fr;
+ struct v4l2_fract *interval;
int ret;
+ if (alvium->streaming)
+ return -EBUSY;
+
if (fi->interval.denominator == 0)
return -EINVAL;
- ret = alvium_get_frame_interval(alvium);
+ ret = alvium_get_frame_interval(alvium, &min_fr, &max_fr);
if (ret) {
dev_err(dev, "Fail to get frame interval\n");
return ret;
}
- min_fr = alvium->min_fr;
- max_fr = alvium->max_fr;
-
dev_dbg(dev, "fi->interval.numerator = %d\n",
fi->interval.numerator);
dev_dbg(dev, "fi->interval.denominator = %d\n",
@@ -1692,39 +1668,17 @@ static int alvium_set_frame_interval(struct alvium_dev *alvium,
req_fr = (u64)((fi->interval.denominator * USEC_PER_SEC) /
fi->interval.numerator);
+ req_fr = clamp(req_fr, min_fr, max_fr);
- if (req_fr >= max_fr && req_fr <= min_fr)
- req_fr = alvium->dft_fr;
+ interval = v4l2_subdev_state_get_interval(sd_state, 0);
- alvium->fr = req_fr;
- alvium->frame_interval.numerator = fi->interval.numerator;
- alvium->frame_interval.denominator = fi->interval.denominator;
+ interval->numerator = fi->interval.numerator;
+ interval->denominator = fi->interval.denominator;
- return 0;
-}
-
-static int alvium_s_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_frame_interval *fi)
-{
- struct alvium_dev *alvium = sd_to_alvium(sd);
- int ret;
-
- /*
- * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
- * subdev active state API.
- */
if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
- return -EINVAL;
-
- if (alvium->streaming)
- return -EBUSY;
-
- ret = alvium_set_frame_interval(alvium, fi);
- if (!ret)
- ret = alvium_set_frame_rate(alvium);
+ return 0;
- return ret;
+ return alvium_set_frame_rate(alvium, req_fr);
}
static int alvium_enum_mbus_code(struct v4l2_subdev *sd,
@@ -1872,6 +1826,7 @@ static int alvium_init_state(struct v4l2_subdev *sd,
{
struct alvium_dev *alvium = sd_to_alvium(sd);
struct alvium_mode *mode = &alvium->mode;
+ struct v4l2_fract *interval;
struct v4l2_subdev_format sd_fmt = {
.which = V4L2_SUBDEV_FORMAT_TRY,
.format = alvium_csi2_default_fmt,
@@ -1889,6 +1844,11 @@ static int alvium_init_state(struct v4l2_subdev *sd,
*v4l2_subdev_state_get_crop(state, 0) = sd_crop.rect;
*v4l2_subdev_state_get_format(state, 0) = sd_fmt.format;
+ /* Setup initial frame interval*/
+ interval = v4l2_subdev_state_get_interval(state, 0);
+ interval->numerator = 1;
+ interval->denominator = ALVIUM_DEFAULT_FR_HZ;
+
return 0;
}
@@ -2258,7 +2218,7 @@ static const struct v4l2_subdev_pad_ops alvium_pad_ops = {
.set_fmt = alvium_set_fmt,
.get_selection = alvium_get_selection,
.set_selection = alvium_set_selection,
- .get_frame_interval = alvium_g_frame_interval,
+ .get_frame_interval = v4l2_subdev_get_frame_interval,
.set_frame_interval = alvium_s_frame_interval,
};
@@ -2279,11 +2239,6 @@ static int alvium_subdev_init(struct alvium_dev *alvium)
struct v4l2_subdev *sd = &alvium->sd;
int ret;
- /* Setup initial frame interval*/
- alvium->frame_interval.numerator = 1;
- alvium->frame_interval.denominator = ALVIUM_DEFAULT_FR_HZ;
- alvium->fr = ALVIUM_DEFAULT_FR_HZ;
-
/* Setup the initial mode */
alvium->mode.fmt = alvium_csi2_default_fmt;
alvium->mode.width = alvium_csi2_default_fmt.width;
diff --git a/drivers/media/i2c/alvium-csi2.h b/drivers/media/i2c/alvium-csi2.h
index b85a25169e79d..9463f8604fbcc 100644
--- a/drivers/media/i2c/alvium-csi2.h
+++ b/drivers/media/i2c/alvium-csi2.h
@@ -442,11 +442,6 @@ struct alvium_dev {
s32 inc_sharp;
struct alvium_mode mode;
- struct v4l2_fract frame_interval;
- u64 dft_fr;
- u64 min_fr;
- u64 max_fr;
- u64 fr;
u8 h_sup_csi_lanes;
u64 link_freq;
diff --git a/drivers/media/i2c/ar0521.c b/drivers/media/i2c/ar0521.c
index c7d5fa532ae1c..09331cf95c62d 100644
--- a/drivers/media/i2c/ar0521.c
+++ b/drivers/media/i2c/ar0521.c
@@ -314,7 +314,7 @@ static void ar0521_calc_pll(struct ar0521_dev *sensor)
* In the clock tree:
* MIPI_CLK = PIXEL_CLOCK * bpp / 2 / 2
*
- * Generic pixel_rate to bus clock frequencey equation:
+ * Generic pixel_rate to bus clock frequency equation:
* MIPI_CLK = V4L2_CID_PIXEL_RATE * bpp / lanes / 2
*
* From which we derive the PIXEL_CLOCK to use in the clock tree:
@@ -327,7 +327,7 @@ static void ar0521_calc_pll(struct ar0521_dev *sensor)
*
* TODO: in case we have less data lanes we have to reduce the desired
* VCO not to exceed the limits specified by the datasheet and
- * consequentially reduce the obtained pixel clock.
+ * consequently reduce the obtained pixel clock.
*/
pixel_clock = AR0521_PIXEL_CLOCK_RATE * 2 / sensor->lane_count;
bpp = ar0521_code_to_bpp(sensor);
@@ -806,7 +806,7 @@ static const struct initial_reg {
REGS(be(0x3F00),
be(0x0017), /* 3F00: BM_T0 */
be(0x02DD), /* 3F02: BM_T1 */
- /* 3F04: if Ana_gain less than 2, use noise_floor0, multipl */
+ /* 3F04: if Ana_gain less than 2, use noise_floor0, multiply */
be(0x0020),
/* 3F06: if Ana_gain between 4 and 7, use noise_floor2 and */
be(0x0040),
diff --git a/drivers/media/i2c/ccs/ccs-quirk.h b/drivers/media/i2c/ccs/ccs-quirk.h
index 0b1a64958d714..392c971096177 100644
--- a/drivers/media/i2c/ccs/ccs-quirk.h
+++ b/drivers/media/i2c/ccs/ccs-quirk.h
@@ -28,11 +28,11 @@ struct ccs_sensor;
* @reg_access: Register access quirk. The quirk may divert the access
* to another register, or no register at all.
*
- * @write: Is this read (false) or write (true) access?
- * @reg: Pointer to the register to access
- * @value: Register value, set by the caller on write, or
+ * -write: Is this read (false) or write (true) access?
+ * -reg: Pointer to the register to access
+ * -val: Register value, set by the caller on write, or
* by the quirk on read
- * @return: 0 on success, -ENOIOCTLCMD if no register
+ * -return: 0 on success, -ENOIOCTLCMD if no register
* access may be done by the caller (default read
* value is zero), else negative error code on error
* @flags: Quirk flags
diff --git a/drivers/media/i2c/dw9714.c b/drivers/media/i2c/dw9714.c
index cc09b32ede601..84d29bcf0ccd4 100644
--- a/drivers/media/i2c/dw9714.c
+++ b/drivers/media/i2c/dw9714.c
@@ -157,6 +157,8 @@ static int dw9714_probe(struct i2c_client *client)
return rval;
}
+ usleep_range(1000, 2000);
+
v4l2_i2c_subdev_init(&dw9714_dev->sd, client, &dw9714_ops);
dw9714_dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
V4L2_SUBDEV_FL_HAS_EVENTS;
diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
index b148b1bd2bc3b..10b6ad66d1261 100644
--- a/drivers/media/i2c/imx214.c
+++ b/drivers/media/i2c/imx214.c
@@ -968,7 +968,7 @@ static const struct v4l2_subdev_internal_ops imx214_internal_ops = {
static const struct regmap_config sensor_regmap_config = {
.reg_bits = 16,
.val_bits = 8,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static int imx214_get_regulators(struct device *dev, struct imx214 *imx214)
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
index 352da68b8b41b..3800de974e8a9 100644
--- a/drivers/media/i2c/imx274.c
+++ b/drivers/media/i2c/imx274.c
@@ -151,7 +151,7 @@ struct reg_8 {
static const struct regmap_config imx274_regmap_config = {
.reg_bits = 16,
.val_bits = 8,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
/*
diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
index 9967f34774334..4150e6e4b9a63 100644
--- a/drivers/media/i2c/imx290.c
+++ b/drivers/media/i2c/imx290.c
@@ -150,10 +150,10 @@
#define IMX290_PIXEL_ARRAY_WIDTH 1945
#define IMX290_PIXEL_ARRAY_HEIGHT 1097
-#define IMX920_PIXEL_ARRAY_MARGIN_LEFT 12
-#define IMX920_PIXEL_ARRAY_MARGIN_RIGHT 13
-#define IMX920_PIXEL_ARRAY_MARGIN_TOP 8
-#define IMX920_PIXEL_ARRAY_MARGIN_BOTTOM 9
+#define IMX290_PIXEL_ARRAY_MARGIN_LEFT 12
+#define IMX290_PIXEL_ARRAY_MARGIN_RIGHT 13
+#define IMX290_PIXEL_ARRAY_MARGIN_TOP 8
+#define IMX290_PIXEL_ARRAY_MARGIN_BOTTOM 9
#define IMX290_PIXEL_ARRAY_RECORDING_WIDTH 1920
#define IMX290_PIXEL_ARRAY_RECORDING_HEIGHT 1080
@@ -1161,10 +1161,10 @@ static int imx290_get_selection(struct v4l2_subdev *sd,
* The sensor moves the readout by 1 pixel based on flips to
* keep the Bayer order the same.
*/
- sel->r.top = IMX920_PIXEL_ARRAY_MARGIN_TOP
+ sel->r.top = IMX290_PIXEL_ARRAY_MARGIN_TOP
+ (IMX290_PIXEL_ARRAY_RECORDING_HEIGHT - format->height) / 2
+ imx290->vflip->val;
- sel->r.left = IMX920_PIXEL_ARRAY_MARGIN_LEFT
+ sel->r.left = IMX290_PIXEL_ARRAY_MARGIN_LEFT
+ (IMX290_PIXEL_ARRAY_RECORDING_WIDTH - format->width) / 2
+ imx290->hflip->val;
sel->r.width = format->width;
@@ -1183,8 +1183,8 @@ static int imx290_get_selection(struct v4l2_subdev *sd,
return 0;
case V4L2_SEL_TGT_CROP_DEFAULT:
- sel->r.top = IMX920_PIXEL_ARRAY_MARGIN_TOP;
- sel->r.left = IMX920_PIXEL_ARRAY_MARGIN_LEFT;
+ sel->r.top = IMX290_PIXEL_ARRAY_MARGIN_TOP;
+ sel->r.left = IMX290_PIXEL_ARRAY_MARGIN_LEFT;
sel->r.width = IMX290_PIXEL_ARRAY_RECORDING_WIDTH;
sel->r.height = IMX290_PIXEL_ARRAY_RECORDING_HEIGHT;
diff --git a/drivers/media/i2c/imx319.c b/drivers/media/i2c/imx319.c
index e47eff672e0c5..8fe3933f31467 100644
--- a/drivers/media/i2c/imx319.c
+++ b/drivers/media/i2c/imx319.c
@@ -70,7 +70,7 @@
#define IMX319_REG_ORIENTATION 0x0101
/* default link frequency and external clock */
-#define IMX319_LINK_FREQ_DEFAULT 482400000
+#define IMX319_LINK_FREQ_DEFAULT 482400000LL
#define IMX319_EXT_CLK 19200000
#define IMX319_LINK_FREQ_INDEX 0
@@ -107,8 +107,7 @@ struct imx319_mode {
struct imx319_hwcfg {
u32 ext_clk; /* sensor external clk */
- s64 *link_freqs; /* CSI-2 link frequencies */
- unsigned int nr_of_link_freqs;
+ unsigned long link_freq_bitmap;
};
struct imx319 {
@@ -129,7 +128,6 @@ struct imx319 {
const struct imx319_mode *cur_mode;
struct imx319_hwcfg *hwcfg;
- s64 link_def_freq; /* CSI-2 link default frequency */
/*
* Mutex for serialized access:
@@ -1654,7 +1652,10 @@ static const char * const imx319_test_pattern_menu[] = {
"Pseudorandom Sequence (PN9)",
};
-/* supported link frequencies */
+/*
+ * When adding more than the one below, make sure the disallowed ones will
+ * actually be disabled in the LINK_FREQ control.
+ */
static const s64 link_freq_menu_items[] = {
IMX319_LINK_FREQ_DEFAULT,
};
@@ -2058,7 +2059,7 @@ imx319_set_pad_format(struct v4l2_subdev *sd,
*framefmt = fmt->format;
} else {
imx319->cur_mode = mode;
- pixel_rate = imx319->link_def_freq * 2 * 4;
+ pixel_rate = IMX319_LINK_FREQ_DEFAULT * 2 * 4;
do_div(pixel_rate, 10);
__v4l2_ctrl_s_ctrl_int64(imx319->pixel_rate, pixel_rate);
/* Update limits and set FPS to default */
@@ -2255,7 +2256,7 @@ static int imx319_init_controls(struct imx319 *imx319)
imx319->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
/* pixel_rate = link_freq * 2 * nr_of_lanes / bits_per_sample */
- pixel_rate = imx319->link_def_freq * 2 * 4;
+ pixel_rate = IMX319_LINK_FREQ_DEFAULT * 2 * 4;
do_div(pixel_rate, 10);
/* By default, PIXEL_RATE is read only */
imx319->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &imx319_ctrl_ops,
@@ -2332,7 +2333,6 @@ static struct imx319_hwcfg *imx319_get_hwcfg(struct device *dev)
};
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
- unsigned int i;
int ret;
if (!fwnode)
@@ -2364,24 +2364,14 @@ static struct imx319_hwcfg *imx319_get_hwcfg(struct device *dev)
goto out_err;
}
- dev_dbg(dev, "num of link freqs: %d", bus_cfg.nr_of_link_frequencies);
- if (!bus_cfg.nr_of_link_frequencies) {
- dev_warn(dev, "no link frequencies defined");
- goto out_err;
- }
-
- cfg->nr_of_link_freqs = bus_cfg.nr_of_link_frequencies;
- cfg->link_freqs = devm_kcalloc(dev,
- bus_cfg.nr_of_link_frequencies + 1,
- sizeof(*cfg->link_freqs), GFP_KERNEL);
- if (!cfg->link_freqs)
+ ret = v4l2_link_freq_to_bitmap(dev, bus_cfg.link_frequencies,
+ bus_cfg.nr_of_link_frequencies,
+ link_freq_menu_items,
+ ARRAY_SIZE(link_freq_menu_items),
+ &cfg->link_freq_bitmap);
+ if (ret)
goto out_err;
- for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++) {
- cfg->link_freqs[i] = bus_cfg.link_frequencies[i];
- dev_dbg(dev, "link_freq[%d] = %lld", i, cfg->link_freqs[i]);
- }
-
v4l2_fwnode_endpoint_free(&bus_cfg);
fwnode_handle_put(ep);
return cfg;
@@ -2397,7 +2387,6 @@ static int imx319_probe(struct i2c_client *client)
struct imx319 *imx319;
bool full_power;
int ret;
- u32 i;
imx319 = devm_kzalloc(&client->dev, sizeof(*imx319), GFP_KERNEL);
if (!imx319)
@@ -2425,20 +2414,6 @@ static int imx319_probe(struct i2c_client *client)
goto error_probe;
}
- imx319->link_def_freq = link_freq_menu_items[IMX319_LINK_FREQ_INDEX];
- for (i = 0; i < imx319->hwcfg->nr_of_link_freqs; i++) {
- if (imx319->hwcfg->link_freqs[i] == imx319->link_def_freq) {
- dev_dbg(&client->dev, "link freq index %d matched", i);
- break;
- }
- }
-
- if (i == imx319->hwcfg->nr_of_link_freqs) {
- dev_err(&client->dev, "no link frequency supported");
- ret = -EINVAL;
- goto error_probe;
- }
-
/* Set default mode to max resolution */
imx319->cur_mode = &supported_modes[0];
diff --git a/drivers/media/i2c/imx334.c b/drivers/media/i2c/imx334.c
index 6725b3e2a73e1..40863d87d3413 100644
--- a/drivers/media/i2c/imx334.c
+++ b/drivers/media/i2c/imx334.c
@@ -136,7 +136,7 @@ struct imx334_mode {
* @vblank: Vertical blanking in lines
* @cur_mode: Pointer to current selected sensor mode
* @mutex: Mutex for serializing sensor controls
- * @menu_skip_mask: Menu skip mask for link_freq_ctrl
+ * @link_freq_bitmap: Menu bitmap for link_freq_ctrl
* @cur_code: current selected format code
*/
struct imx334 {
@@ -158,7 +158,7 @@ struct imx334 {
u32 vblank;
const struct imx334_mode *cur_mode;
struct mutex mutex;
- unsigned long menu_skip_mask;
+ unsigned long link_freq_bitmap;
u32 cur_code;
};
@@ -954,9 +954,9 @@ static int imx334_init_state(struct v4l2_subdev *sd,
imx334_fill_pad_format(imx334, imx334->cur_mode, &fmt);
__v4l2_ctrl_modify_range(imx334->link_freq_ctrl, 0,
- __fls(imx334->menu_skip_mask),
- ~(imx334->menu_skip_mask),
- __ffs(imx334->menu_skip_mask));
+ __fls(imx334->link_freq_bitmap),
+ ~(imx334->link_freq_bitmap),
+ __ffs(imx334->link_freq_bitmap));
mutex_unlock(&imx334->mutex);
@@ -1112,7 +1112,6 @@ static int imx334_parse_hw_config(struct imx334 *imx334)
};
struct fwnode_handle *ep;
unsigned long rate;
- unsigned int i, j;
int ret;
if (!fwnode)
@@ -1157,26 +1156,10 @@ static int imx334_parse_hw_config(struct imx334 *imx334)
goto done_endpoint_free;
}
- if (!bus_cfg.nr_of_link_frequencies) {
- dev_err(imx334->dev, "no link frequencies defined");
- ret = -EINVAL;
- goto done_endpoint_free;
- }
-
- for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++) {
- for (j = 0; j < ARRAY_SIZE(link_freq); j++) {
- if (bus_cfg.link_frequencies[i] == link_freq[j]) {
- set_bit(j, &imx334->menu_skip_mask);
- break;
- }
- }
-
- if (j == ARRAY_SIZE(link_freq)) {
- ret = dev_err_probe(imx334->dev, -EINVAL,
- "no supported link freq found\n");
- goto done_endpoint_free;
- }
- }
+ ret = v4l2_link_freq_to_bitmap(imx334->dev, bus_cfg.link_frequencies,
+ bus_cfg.nr_of_link_frequencies,
+ link_freq, ARRAY_SIZE(link_freq),
+ &imx334->link_freq_bitmap);
done_endpoint_free:
v4l2_fwnode_endpoint_free(&bus_cfg);
@@ -1310,8 +1293,8 @@ static int imx334_init_controls(struct imx334 *imx334)
imx334->link_freq_ctrl = v4l2_ctrl_new_int_menu(ctrl_hdlr,
&imx334_ctrl_ops,
V4L2_CID_LINK_FREQ,
- __fls(imx334->menu_skip_mask),
- __ffs(imx334->menu_skip_mask),
+ __fls(imx334->link_freq_bitmap),
+ __ffs(imx334->link_freq_bitmap),
link_freq);
if (imx334->link_freq_ctrl)
@@ -1386,7 +1369,7 @@ static int imx334_probe(struct i2c_client *client)
}
/* Set default mode to max resolution */
- imx334->cur_mode = &supported_modes[__ffs(imx334->menu_skip_mask)];
+ imx334->cur_mode = &supported_modes[__ffs(imx334->link_freq_bitmap)];
imx334->cur_code = imx334_mbus_codes[0];
imx334->vblank = imx334->cur_mode->vblank;
diff --git a/drivers/media/i2c/imx335.c b/drivers/media/i2c/imx335.c
index 7a37eb327ff40..dab6d080bc4c9 100644
--- a/drivers/media/i2c/imx335.c
+++ b/drivers/media/i2c/imx335.c
@@ -45,11 +45,28 @@
/* Group hold register */
#define IMX335_REG_HOLD 0x3001
+/* Test pattern generator */
+#define IMX335_REG_TPG 0x329e
+#define IMX335_TPG_ALL_000 0
+#define IMX335_TPG_ALL_FFF 1
+#define IMX335_TPG_ALL_555 2
+#define IMX335_TPG_ALL_AAA 3
+#define IMX335_TPG_TOG_555_AAA 4
+#define IMX335_TPG_TOG_AAA_555 5
+#define IMX335_TPG_TOG_000_555 6
+#define IMX335_TPG_TOG_555_000 7
+#define IMX335_TPG_TOG_000_FFF 8
+#define IMX335_TPG_TOG_FFF_000 9
+#define IMX335_TPG_H_COLOR_BARS 10
+#define IMX335_TPG_V_COLOR_BARS 11
+
/* Input clock rate */
#define IMX335_INCLK_RATE 24000000
/* CSI2 HW configuration */
-#define IMX335_LINK_FREQ 594000000
+#define IMX335_LINK_FREQ_594MHz 594000000LL
+#define IMX335_LINK_FREQ_445MHz 445500000LL
+
#define IMX335_NUM_DATA_LANES 4
#define IMX335_REG_MIN 0x00
@@ -99,7 +116,6 @@ static const char * const imx335_supply_name[] = {
* @vblank_min: Minimum vertical blanking in lines
* @vblank_max: Maximum vertical blanking in lines
* @pclk: Sensor pixel clock
- * @link_freq_idx: Link frequency index
* @reg_list: Register list for sensor mode
*/
struct imx335_mode {
@@ -111,7 +127,6 @@ struct imx335_mode {
u32 vblank_min;
u32 vblank_max;
u64 pclk;
- u32 link_freq_idx;
struct imx335_reg_list reg_list;
};
@@ -134,6 +149,7 @@ struct imx335_mode {
* @vblank: Vertical blanking in lines
* @cur_mode: Pointer to current selected sensor mode
* @mutex: Mutex for serializing sensor controls
+ * @link_freq_bitmap: Menu bitmap for link_freq_ctrl
* @cur_mbus_code: Currently selected media bus format code
*/
struct imx335 {
@@ -157,19 +173,46 @@ struct imx335 {
u32 vblank;
const struct imx335_mode *cur_mode;
struct mutex mutex;
+ unsigned long link_freq_bitmap;
u32 cur_mbus_code;
};
-static const s64 link_freq[] = {
- IMX335_LINK_FREQ,
+static const char * const imx335_tpg_menu[] = {
+ "Disabled",
+ "All 000h",
+ "All FFFh",
+ "All 555h",
+ "All AAAh",
+ "Toggle 555/AAAh",
+ "Toggle AAA/555h",
+ "Toggle 000/555h",
+ "Toggle 555/000h",
+ "Toggle 000/FFFh",
+ "Toggle FFF/000h",
+ "Horizontal color bars",
+ "Vertical color bars",
+};
+
+static const int imx335_tpg_val[] = {
+ IMX335_TPG_ALL_000,
+ IMX335_TPG_ALL_000,
+ IMX335_TPG_ALL_FFF,
+ IMX335_TPG_ALL_555,
+ IMX335_TPG_ALL_AAA,
+ IMX335_TPG_TOG_555_AAA,
+ IMX335_TPG_TOG_AAA_555,
+ IMX335_TPG_TOG_000_555,
+ IMX335_TPG_TOG_555_000,
+ IMX335_TPG_TOG_000_FFF,
+ IMX335_TPG_TOG_FFF_000,
+ IMX335_TPG_H_COLOR_BARS,
+ IMX335_TPG_V_COLOR_BARS,
};
/* Sensor mode registers */
static const struct imx335_reg mode_2592x1940_regs[] = {
{0x3000, 0x01},
{0x3002, 0x00},
- {0x300c, 0x3b},
- {0x300d, 0x2a},
{0x3018, 0x04},
{0x302c, 0x3c},
{0x302e, 0x20},
@@ -177,10 +220,6 @@ static const struct imx335_reg mode_2592x1940_regs[] = {
{0x3074, 0xc8},
{0x3076, 0x28},
{0x304c, 0x00},
- {0x314c, 0xc6},
- {0x315a, 0x02},
- {0x3168, 0xa0},
- {0x316a, 0x7e},
{0x31a1, 0x00},
{0x3288, 0x21},
{0x328a, 0x02},
@@ -249,7 +288,7 @@ static const struct imx335_reg mode_2592x1940_regs[] = {
{0x3794, 0x7a},
{0x3796, 0xa1},
{0x37b0, 0x36},
- {0x3a00, 0x01},
+ {0x3a00, 0x00},
};
static const struct imx335_reg raw10_framefmt_regs[] = {
@@ -266,6 +305,65 @@ static const struct imx335_reg raw12_framefmt_regs[] = {
{0x341d, 0x00},
};
+static const struct imx335_reg mipi_data_rate_1188Mbps[] = {
+ {0x300c, 0x3b},
+ {0x300d, 0x2a},
+ {0x314c, 0xc6},
+ {0x314d, 0x00},
+ {0x315a, 0x02},
+ {0x3168, 0xa0},
+ {0x316a, 0x7e},
+ {0x319e, 0x01},
+ {0x3a18, 0x8f},
+ {0x3a1a, 0x4f},
+ {0x3a1c, 0x47},
+ {0x3a1e, 0x37},
+ {0x3a1f, 0x01},
+ {0x3a20, 0x4f},
+ {0x3a22, 0x87},
+ {0x3a24, 0x4f},
+ {0x3a26, 0x7f},
+ {0x3a28, 0x3f},
+};
+
+static const struct imx335_reg mipi_data_rate_891Mbps[] = {
+ {0x300c, 0x3b},
+ {0x300d, 0x2a},
+ {0x314c, 0x29},
+ {0x314d, 0x01},
+ {0x315a, 0x06},
+ {0x3168, 0xa0},
+ {0x316a, 0x7e},
+ {0x319e, 0x02},
+ {0x3a18, 0x7f},
+ {0x3a1a, 0x37},
+ {0x3a1c, 0x37},
+ {0x3a1e, 0xf7},
+ {0x3a20, 0x3f},
+ {0x3a22, 0x6f},
+ {0x3a24, 0x3f},
+ {0x3a26, 0x5f},
+ {0x3a28, 0x2f},
+};
+
+static const s64 link_freq[] = {
+ /* Corresponds to 1188Mbps data lane rate */
+ IMX335_LINK_FREQ_594MHz,
+ /* Corresponds to 891Mbps data lane rate */
+ IMX335_LINK_FREQ_445MHz,
+};
+
+static const struct imx335_reg_list link_freq_reglist[] = {
+ {
+ .num_of_regs = ARRAY_SIZE(mipi_data_rate_1188Mbps),
+ .regs = mipi_data_rate_1188Mbps,
+ },
+ {
+ .num_of_regs = ARRAY_SIZE(mipi_data_rate_891Mbps),
+ .regs = mipi_data_rate_891Mbps,
+ },
+};
+
static const u32 imx335_mbus_codes[] = {
MEDIA_BUS_FMT_SRGGB12_1X12,
MEDIA_BUS_FMT_SRGGB10_1X10,
@@ -280,7 +378,6 @@ static const struct imx335_mode supported_mode = {
.vblank_min = 2560,
.vblank_max = 133060,
.pclk = 396000000,
- .link_freq_idx = 0,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mode_2592x1940_regs),
.regs = mode_2592x1940_regs,
@@ -405,7 +502,8 @@ static int imx335_update_controls(struct imx335 *imx335,
{
int ret;
- ret = __v4l2_ctrl_s_ctrl(imx335->link_freq_ctrl, mode->link_freq_idx);
+ ret = __v4l2_ctrl_s_ctrl(imx335->link_freq_ctrl,
+ __ffs(imx335->link_freq_bitmap));
if (ret)
return ret;
@@ -456,6 +554,49 @@ error_release_group_hold:
return ret;
}
+static int imx335_update_test_pattern(struct imx335 *imx335, u32 pattern_index)
+{
+ int ret;
+
+ if (pattern_index >= ARRAY_SIZE(imx335_tpg_val))
+ return -EINVAL;
+
+ if (pattern_index) {
+ const struct imx335_reg tpg_enable_regs[] = {
+ { 0x3148, 0x10 },
+ { 0x3280, 0x00 },
+ { 0x329c, 0x01 },
+ { 0x32a0, 0x11 },
+ { 0x3302, 0x00 },
+ { 0x3303, 0x00 },
+ { 0x336c, 0x00 },
+ };
+
+ ret = imx335_write_reg(imx335, IMX335_REG_TPG, 1,
+ imx335_tpg_val[pattern_index]);
+ if (ret)
+ return ret;
+
+ ret = imx335_write_regs(imx335, tpg_enable_regs,
+ ARRAY_SIZE(tpg_enable_regs));
+ } else {
+ const struct imx335_reg tpg_disable_regs[] = {
+ { 0x3148, 0x00 },
+ { 0x3280, 0x01 },
+ { 0x329c, 0x00 },
+ { 0x32a0, 0x10 },
+ { 0x3302, 0x32 },
+ { 0x3303, 0x00 },
+ { 0x336c, 0x01 },
+ };
+
+ ret = imx335_write_regs(imx335, tpg_disable_regs,
+ ARRAY_SIZE(tpg_disable_regs));
+ }
+
+ return ret;
+}
+
/**
* imx335_set_ctrl() - Set subdevice control
* @ctrl: pointer to v4l2_ctrl structure
@@ -476,26 +617,31 @@ static int imx335_set_ctrl(struct v4l2_ctrl *ctrl)
u32 exposure;
int ret;
- switch (ctrl->id) {
- case V4L2_CID_VBLANK:
+ /* Propagate change of current control to all related controls */
+ if (ctrl->id == V4L2_CID_VBLANK) {
imx335->vblank = imx335->vblank_ctrl->val;
dev_dbg(imx335->dev, "Received vblank %u, new lpfr %u\n",
imx335->vblank,
imx335->vblank + imx335->cur_mode->height);
- ret = __v4l2_ctrl_modify_range(imx335->exp_ctrl,
- IMX335_EXPOSURE_MIN,
- imx335->vblank +
- imx335->cur_mode->height -
- IMX335_EXPOSURE_OFFSET,
- 1, IMX335_EXPOSURE_DEFAULT);
- break;
- case V4L2_CID_EXPOSURE:
- /* Set controls only if sensor is in power on state */
- if (!pm_runtime_get_if_in_use(imx335->dev))
- return 0;
+ return __v4l2_ctrl_modify_range(imx335->exp_ctrl,
+ IMX335_EXPOSURE_MIN,
+ imx335->vblank +
+ imx335->cur_mode->height -
+ IMX335_EXPOSURE_OFFSET,
+ 1, IMX335_EXPOSURE_DEFAULT);
+ }
+ /*
+ * Applying V4L2 control value only happens
+ * when power is up for streaming.
+ */
+ if (pm_runtime_get_if_in_use(imx335->dev) == 0)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE:
exposure = ctrl->val;
analog_gain = imx335->again_ctrl->val;
@@ -504,7 +650,9 @@ static int imx335_set_ctrl(struct v4l2_ctrl *ctrl)
ret = imx335_update_exp_gain(imx335, exposure, analog_gain);
- pm_runtime_put(imx335->dev);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = imx335_update_test_pattern(imx335, ctrl->val);
break;
default:
@@ -512,6 +660,8 @@ static int imx335_set_ctrl(struct v4l2_ctrl *ctrl)
ret = -EINVAL;
}
+ pm_runtime_put(imx335->dev);
+
return ret;
}
@@ -691,6 +841,13 @@ static int imx335_init_state(struct v4l2_subdev *sd,
fmt.which = sd_state ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
imx335_fill_pad_format(imx335, &supported_mode, &fmt);
+ mutex_lock(&imx335->mutex);
+ __v4l2_ctrl_modify_range(imx335->link_freq_ctrl, 0,
+ __fls(imx335->link_freq_bitmap),
+ ~(imx335->link_freq_bitmap),
+ __ffs(imx335->link_freq_bitmap));
+ mutex_unlock(&imx335->mutex);
+
return imx335_set_pad_format(sd, sd_state, &fmt);
}
@@ -755,6 +912,14 @@ static int imx335_start_streaming(struct imx335 *imx335)
const struct imx335_reg_list *reg_list;
int ret;
+ /* Setup PLL */
+ reg_list = &link_freq_reglist[__ffs(imx335->link_freq_bitmap)];
+ ret = imx335_write_regs(imx335, reg_list->regs, reg_list->num_of_regs);
+ if (ret) {
+ dev_err(imx335->dev, "%s failed to set plls\n", __func__);
+ return ret;
+ }
+
/* Write sensor mode registers */
reg_list = &imx335->cur_mode->reg_list;
ret = imx335_write_regs(imx335, reg_list->regs,
@@ -939,19 +1104,10 @@ static int imx335_parse_hw_config(struct imx335 *imx335)
goto done_endpoint_free;
}
- if (!bus_cfg.nr_of_link_frequencies) {
- dev_err(imx335->dev, "no link frequencies defined\n");
- ret = -EINVAL;
- goto done_endpoint_free;
- }
-
- for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++)
- if (bus_cfg.link_frequencies[i] == IMX335_LINK_FREQ)
- goto done_endpoint_free;
-
- dev_err(imx335->dev, "no compatible link frequencies found\n");
-
- ret = -EINVAL;
+ ret = v4l2_link_freq_to_bitmap(imx335->dev, bus_cfg.link_frequencies,
+ bus_cfg.nr_of_link_frequencies,
+ link_freq, ARRAY_SIZE(link_freq),
+ &imx335->link_freq_bitmap);
done_endpoint_free:
v4l2_fwnode_endpoint_free(&bus_cfg);
@@ -1055,7 +1211,7 @@ static int imx335_init_controls(struct imx335 *imx335)
u32 lpfr;
int ret;
- ret = v4l2_ctrl_handler_init(ctrl_hdlr, 6);
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 7);
if (ret)
return ret;
@@ -1089,6 +1245,12 @@ static int imx335_init_controls(struct imx335 *imx335)
mode->vblank_max,
1, mode->vblank);
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr,
+ &imx335_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(imx335_tpg_menu) - 1,
+ 0, 0, imx335_tpg_menu);
+
/* Read only controls */
imx335->pclk_ctrl = v4l2_ctrl_new_std(ctrl_hdlr,
&imx335_ctrl_ops,
@@ -1099,9 +1261,8 @@ static int imx335_init_controls(struct imx335 *imx335)
imx335->link_freq_ctrl = v4l2_ctrl_new_int_menu(ctrl_hdlr,
&imx335_ctrl_ops,
V4L2_CID_LINK_FREQ,
- ARRAY_SIZE(link_freq) -
- 1,
- mode->link_freq_idx,
+ __fls(imx335->link_freq_bitmap),
+ __ffs(imx335->link_freq_bitmap),
link_freq);
if (imx335->link_freq_ctrl)
imx335->link_freq_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
diff --git a/drivers/media/i2c/imx355.c b/drivers/media/i2c/imx355.c
index 8c995c58743a2..7e9c2f65fa081 100644
--- a/drivers/media/i2c/imx355.c
+++ b/drivers/media/i2c/imx355.c
@@ -56,7 +56,7 @@
#define IMX355_REG_ORIENTATION 0x0101
/* default link frequency and external clock */
-#define IMX355_LINK_FREQ_DEFAULT 360000000
+#define IMX355_LINK_FREQ_DEFAULT 360000000LL
#define IMX355_EXT_CLK 19200000
#define IMX355_LINK_FREQ_INDEX 0
@@ -93,8 +93,7 @@ struct imx355_mode {
struct imx355_hwcfg {
u32 ext_clk; /* sensor external clk */
- s64 *link_freqs; /* CSI-2 link frequencies */
- unsigned int nr_of_link_freqs;
+ unsigned long link_freq_bitmap;
};
struct imx355 {
@@ -115,7 +114,6 @@ struct imx355 {
const struct imx355_mode *cur_mode;
struct imx355_hwcfg *hwcfg;
- s64 link_def_freq; /* CSI-2 link default frequency */
/*
* Mutex for serialized access:
@@ -879,7 +877,10 @@ static const char * const imx355_test_pattern_menu[] = {
"Pseudorandom Sequence (PN9)",
};
-/* supported link frequencies */
+/*
+ * When adding more than the one below, make sure the disallowed ones will
+ * actually be disabled in the LINK_FREQ control.
+ */
static const s64 link_freq_menu_items[] = {
IMX355_LINK_FREQ_DEFAULT,
};
@@ -1356,7 +1357,7 @@ imx355_set_pad_format(struct v4l2_subdev *sd,
*framefmt = fmt->format;
} else {
imx355->cur_mode = mode;
- pixel_rate = imx355->link_def_freq * 2 * 4;
+ pixel_rate = IMX355_LINK_FREQ_DEFAULT * 2 * 4;
do_div(pixel_rate, 10);
__v4l2_ctrl_s_ctrl_int64(imx355->pixel_rate, pixel_rate);
/* Update limits and set FPS to default */
@@ -1543,7 +1544,7 @@ static int imx355_init_controls(struct imx355 *imx355)
imx355->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
/* pixel_rate = link_freq * 2 * nr_of_lanes / bits_per_sample */
- pixel_rate = imx355->link_def_freq * 2 * 4;
+ pixel_rate = IMX355_LINK_FREQ_DEFAULT * 2 * 4;
do_div(pixel_rate, 10);
/* By default, PIXEL_RATE is read only */
imx355->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &imx355_ctrl_ops,
@@ -1620,7 +1621,6 @@ static struct imx355_hwcfg *imx355_get_hwcfg(struct device *dev)
};
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
- unsigned int i;
int ret;
if (!fwnode)
@@ -1652,24 +1652,14 @@ static struct imx355_hwcfg *imx355_get_hwcfg(struct device *dev)
goto out_err;
}
- dev_dbg(dev, "num of link freqs: %d", bus_cfg.nr_of_link_frequencies);
- if (!bus_cfg.nr_of_link_frequencies) {
- dev_warn(dev, "no link frequencies defined");
- goto out_err;
- }
-
- cfg->nr_of_link_freqs = bus_cfg.nr_of_link_frequencies;
- cfg->link_freqs = devm_kcalloc(dev,
- bus_cfg.nr_of_link_frequencies + 1,
- sizeof(*cfg->link_freqs), GFP_KERNEL);
- if (!cfg->link_freqs)
+ ret = v4l2_link_freq_to_bitmap(dev, bus_cfg.link_frequencies,
+ bus_cfg.nr_of_link_frequencies,
+ link_freq_menu_items,
+ ARRAY_SIZE(link_freq_menu_items),
+ &cfg->link_freq_bitmap);
+ if (ret)
goto out_err;
- for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++) {
- cfg->link_freqs[i] = bus_cfg.link_frequencies[i];
- dev_dbg(dev, "link_freq[%d] = %lld", i, cfg->link_freqs[i]);
- }
-
v4l2_fwnode_endpoint_free(&bus_cfg);
fwnode_handle_put(ep);
return cfg;
@@ -1684,7 +1674,6 @@ static int imx355_probe(struct i2c_client *client)
{
struct imx355 *imx355;
int ret;
- u32 i;
imx355 = devm_kzalloc(&client->dev, sizeof(*imx355), GFP_KERNEL);
if (!imx355)
@@ -1709,20 +1698,6 @@ static int imx355_probe(struct i2c_client *client)
goto error_probe;
}
- imx355->link_def_freq = link_freq_menu_items[IMX355_LINK_FREQ_INDEX];
- for (i = 0; i < imx355->hwcfg->nr_of_link_freqs; i++) {
- if (imx355->hwcfg->link_freqs[i] == imx355->link_def_freq) {
- dev_dbg(&client->dev, "link freq index %d matched", i);
- break;
- }
- }
-
- if (i == imx355->hwcfg->nr_of_link_freqs) {
- dev_err(&client->dev, "no link frequency supported");
- ret = -EINVAL;
- goto error_probe;
- }
-
/* Set default mode to max resolution */
imx355->cur_mode = &supported_modes[0];
diff --git a/drivers/media/i2c/imx415.c b/drivers/media/i2c/imx415.c
index 1e5f20c3ed824..a20b0db330d34 100644
--- a/drivers/media/i2c/imx415.c
+++ b/drivers/media/i2c/imx415.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/videodev2.h>
+#include <media/v4l2-cci.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
@@ -28,76 +29,65 @@
#define IMX415_NUM_CLK_PARAM_REGS 11
-#define IMX415_REG_8BIT(n) ((1 << 16) | (n))
-#define IMX415_REG_16BIT(n) ((2 << 16) | (n))
-#define IMX415_REG_24BIT(n) ((3 << 16) | (n))
-#define IMX415_REG_SIZE_SHIFT 16
-#define IMX415_REG_ADDR_MASK 0xffff
-
-#define IMX415_MODE IMX415_REG_8BIT(0x3000)
+#define IMX415_MODE CCI_REG8(0x3000)
#define IMX415_MODE_OPERATING (0)
#define IMX415_MODE_STANDBY BIT(0)
-#define IMX415_REGHOLD IMX415_REG_8BIT(0x3001)
+#define IMX415_REGHOLD CCI_REG8(0x3001)
#define IMX415_REGHOLD_INVALID (0)
#define IMX415_REGHOLD_VALID BIT(0)
-#define IMX415_XMSTA IMX415_REG_8BIT(0x3002)
+#define IMX415_XMSTA CCI_REG8(0x3002)
#define IMX415_XMSTA_START (0)
#define IMX415_XMSTA_STOP BIT(0)
-#define IMX415_BCWAIT_TIME IMX415_REG_16BIT(0x3008)
-#define IMX415_CPWAIT_TIME IMX415_REG_16BIT(0x300A)
-#define IMX415_WINMODE IMX415_REG_8BIT(0x301C)
-#define IMX415_ADDMODE IMX415_REG_8BIT(0x3022)
-#define IMX415_REVERSE IMX415_REG_8BIT(0x3030)
+#define IMX415_BCWAIT_TIME CCI_REG16_LE(0x3008)
+#define IMX415_CPWAIT_TIME CCI_REG16_LE(0x300a)
+#define IMX415_WINMODE CCI_REG8(0x301c)
+#define IMX415_ADDMODE CCI_REG8(0x3022)
+#define IMX415_REVERSE CCI_REG8(0x3030)
#define IMX415_HREVERSE_SHIFT (0)
#define IMX415_VREVERSE_SHIFT BIT(0)
-#define IMX415_ADBIT IMX415_REG_8BIT(0x3031)
-#define IMX415_MDBIT IMX415_REG_8BIT(0x3032)
-#define IMX415_SYS_MODE IMX415_REG_8BIT(0x3033)
-#define IMX415_OUTSEL IMX415_REG_8BIT(0x30C0)
-#define IMX415_DRV IMX415_REG_8BIT(0x30C1)
-#define IMX415_VMAX IMX415_REG_24BIT(0x3024)
-#define IMX415_HMAX IMX415_REG_16BIT(0x3028)
-#define IMX415_SHR0 IMX415_REG_24BIT(0x3050)
-#define IMX415_GAIN_PCG_0 IMX415_REG_16BIT(0x3090)
+#define IMX415_ADBIT CCI_REG8(0x3031)
+#define IMX415_MDBIT CCI_REG8(0x3032)
+#define IMX415_SYS_MODE CCI_REG8(0x3033)
+#define IMX415_OUTSEL CCI_REG8(0x30c0)
+#define IMX415_DRV CCI_REG8(0x30c1)
+#define IMX415_VMAX CCI_REG24_LE(0x3024)
+#define IMX415_HMAX CCI_REG16_LE(0x3028)
+#define IMX415_SHR0 CCI_REG24_LE(0x3050)
+#define IMX415_GAIN_PCG_0 CCI_REG16_LE(0x3090)
#define IMX415_AGAIN_MIN 0
#define IMX415_AGAIN_MAX 100
#define IMX415_AGAIN_STEP 1
-#define IMX415_BLKLEVEL IMX415_REG_16BIT(0x30E2)
+#define IMX415_BLKLEVEL CCI_REG16_LE(0x30e2)
#define IMX415_BLKLEVEL_DEFAULT 50
-#define IMX415_TPG_EN_DUOUT IMX415_REG_8BIT(0x30E4)
-#define IMX415_TPG_PATSEL_DUOUT IMX415_REG_8BIT(0x30E6)
-#define IMX415_TPG_COLORWIDTH IMX415_REG_8BIT(0x30E8)
-#define IMX415_TESTCLKEN_MIPI IMX415_REG_8BIT(0x3110)
-#define IMX415_INCKSEL1 IMX415_REG_8BIT(0x3115)
-#define IMX415_INCKSEL2 IMX415_REG_8BIT(0x3116)
-#define IMX415_INCKSEL3 IMX415_REG_16BIT(0x3118)
-#define IMX415_INCKSEL4 IMX415_REG_16BIT(0x311A)
-#define IMX415_INCKSEL5 IMX415_REG_8BIT(0x311E)
-#define IMX415_DIG_CLP_MODE IMX415_REG_8BIT(0x32C8)
-#define IMX415_WRJ_OPEN IMX415_REG_8BIT(0x3390)
-#define IMX415_SENSOR_INFO IMX415_REG_16BIT(0x3F12)
-#define IMX415_SENSOR_INFO_MASK 0xFFF
+#define IMX415_TPG_EN_DUOUT CCI_REG8(0x30e4)
+#define IMX415_TPG_PATSEL_DUOUT CCI_REG8(0x30e6)
+#define IMX415_TPG_COLORWIDTH CCI_REG8(0x30e8)
+#define IMX415_TESTCLKEN_MIPI CCI_REG8(0x3110)
+#define IMX415_INCKSEL1 CCI_REG8(0x3115)
+#define IMX415_INCKSEL2 CCI_REG8(0x3116)
+#define IMX415_INCKSEL3 CCI_REG16_LE(0x3118)
+#define IMX415_INCKSEL4 CCI_REG16_LE(0x311a)
+#define IMX415_INCKSEL5 CCI_REG8(0x311e)
+#define IMX415_DIG_CLP_MODE CCI_REG8(0x32c8)
+#define IMX415_WRJ_OPEN CCI_REG8(0x3390)
+#define IMX415_SENSOR_INFO CCI_REG16_LE(0x3f12)
+#define IMX415_SENSOR_INFO_MASK 0xfff
#define IMX415_CHIP_ID 0x514
-#define IMX415_LANEMODE IMX415_REG_16BIT(0x4001)
+#define IMX415_LANEMODE CCI_REG16_LE(0x4001)
#define IMX415_LANEMODE_2 1
#define IMX415_LANEMODE_4 3
-#define IMX415_TXCLKESC_FREQ IMX415_REG_16BIT(0x4004)
-#define IMX415_INCKSEL6 IMX415_REG_8BIT(0x400C)
-#define IMX415_TCLKPOST IMX415_REG_16BIT(0x4018)
-#define IMX415_TCLKPREPARE IMX415_REG_16BIT(0x401A)
-#define IMX415_TCLKTRAIL IMX415_REG_16BIT(0x401C)
-#define IMX415_TCLKZERO IMX415_REG_16BIT(0x401E)
-#define IMX415_THSPREPARE IMX415_REG_16BIT(0x4020)
-#define IMX415_THSZERO IMX415_REG_16BIT(0x4022)
-#define IMX415_THSTRAIL IMX415_REG_16BIT(0x4024)
-#define IMX415_THSEXIT IMX415_REG_16BIT(0x4026)
-#define IMX415_TLPX IMX415_REG_16BIT(0x4028)
-#define IMX415_INCKSEL7 IMX415_REG_8BIT(0x4074)
-
-struct imx415_reg {
- u32 address;
- u32 val;
-};
+#define IMX415_TXCLKESC_FREQ CCI_REG16_LE(0x4004)
+#define IMX415_INCKSEL6 CCI_REG8(0x400c)
+#define IMX415_TCLKPOST CCI_REG16_LE(0x4018)
+#define IMX415_TCLKPREPARE CCI_REG16_LE(0x401a)
+#define IMX415_TCLKTRAIL CCI_REG16_LE(0x401c)
+#define IMX415_TCLKZERO CCI_REG16_LE(0x401e)
+#define IMX415_THSPREPARE CCI_REG16_LE(0x4020)
+#define IMX415_THSZERO CCI_REG16_LE(0x4022)
+#define IMX415_THSTRAIL CCI_REG16_LE(0x4024)
+#define IMX415_THSEXIT CCI_REG16_LE(0x4026)
+#define IMX415_TLPX CCI_REG16_LE(0x4028)
+#define IMX415_INCKSEL7 CCI_REG8(0x4074)
static const char *const imx415_supply_names[] = {
"dvdd",
@@ -118,13 +108,13 @@ static const s64 link_freq_menu_items[] = {
struct imx415_clk_params {
u64 lane_rate;
u64 inck;
- struct imx415_reg regs[IMX415_NUM_CLK_PARAM_REGS];
+ struct cci_reg_sequence regs[IMX415_NUM_CLK_PARAM_REGS];
};
/* INCK Settings - includes all lane rate and INCK dependent registers */
static const struct imx415_clk_params imx415_clk_params[] = {
{
- .lane_rate = 594000000,
+ .lane_rate = 594000000UL,
.inck = 27000000,
.regs[0] = { IMX415_BCWAIT_TIME, 0x05D },
.regs[1] = { IMX415_CPWAIT_TIME, 0x042 },
@@ -139,7 +129,37 @@ static const struct imx415_clk_params imx415_clk_params[] = {
.regs[10] = { IMX415_TXCLKESC_FREQ, 0x06C0 },
},
{
- .lane_rate = 720000000,
+ .lane_rate = 594000000UL,
+ .inck = 37125000,
+ .regs[0] = { IMX415_BCWAIT_TIME, 0x07F },
+ .regs[1] = { IMX415_CPWAIT_TIME, 0x05B },
+ .regs[2] = { IMX415_SYS_MODE, 0x7 },
+ .regs[3] = { IMX415_INCKSEL1, 0x00 },
+ .regs[4] = { IMX415_INCKSEL2, 0x24 },
+ .regs[5] = { IMX415_INCKSEL3, 0x080 },
+ .regs[6] = { IMX415_INCKSEL4, 0x0E0 },
+ .regs[7] = { IMX415_INCKSEL5, 0x24 },
+ .regs[8] = { IMX415_INCKSEL6, 0x0 },
+ .regs[9] = { IMX415_INCKSEL7, 0x1 },
+ .regs[10] = { IMX415_TXCLKESC_FREQ, 0x0984 },
+ },
+ {
+ .lane_rate = 594000000UL,
+ .inck = 74250000,
+ .regs[0] = { IMX415_BCWAIT_TIME, 0x0FF },
+ .regs[1] = { IMX415_CPWAIT_TIME, 0x0B6 },
+ .regs[2] = { IMX415_SYS_MODE, 0x7 },
+ .regs[3] = { IMX415_INCKSEL1, 0x00 },
+ .regs[4] = { IMX415_INCKSEL2, 0x28 },
+ .regs[5] = { IMX415_INCKSEL3, 0x080 },
+ .regs[6] = { IMX415_INCKSEL4, 0x0E0 },
+ .regs[7] = { IMX415_INCKSEL5, 0x28 },
+ .regs[8] = { IMX415_INCKSEL6, 0x0 },
+ .regs[9] = { IMX415_INCKSEL7, 0x1 },
+ .regs[10] = { IMX415_TXCLKESC_FREQ, 0x1290 },
+ },
+ {
+ .lane_rate = 720000000UL,
.inck = 24000000,
.regs[0] = { IMX415_BCWAIT_TIME, 0x054 },
.regs[1] = { IMX415_CPWAIT_TIME, 0x03B },
@@ -154,7 +174,22 @@ static const struct imx415_clk_params imx415_clk_params[] = {
.regs[10] = { IMX415_TXCLKESC_FREQ, 0x0600 },
},
{
- .lane_rate = 891000000,
+ .lane_rate = 720000000UL,
+ .inck = 72000000,
+ .regs[0] = { IMX415_BCWAIT_TIME, 0x0F8 },
+ .regs[1] = { IMX415_CPWAIT_TIME, 0x0B0 },
+ .regs[2] = { IMX415_SYS_MODE, 0x9 },
+ .regs[3] = { IMX415_INCKSEL1, 0x00 },
+ .regs[4] = { IMX415_INCKSEL2, 0x28 },
+ .regs[5] = { IMX415_INCKSEL3, 0x0A0 },
+ .regs[6] = { IMX415_INCKSEL4, 0x0E0 },
+ .regs[7] = { IMX415_INCKSEL5, 0x28 },
+ .regs[8] = { IMX415_INCKSEL6, 0x0 },
+ .regs[9] = { IMX415_INCKSEL7, 0x1 },
+ .regs[10] = { IMX415_TXCLKESC_FREQ, 0x1200 },
+ },
+ {
+ .lane_rate = 891000000UL,
.inck = 27000000,
.regs[0] = { IMX415_BCWAIT_TIME, 0x05D },
.regs[1] = { IMX415_CPWAIT_TIME, 0x042 },
@@ -169,7 +204,37 @@ static const struct imx415_clk_params imx415_clk_params[] = {
.regs[10] = { IMX415_TXCLKESC_FREQ, 0x06C0 },
},
{
- .lane_rate = 1440000000,
+ .lane_rate = 891000000UL,
+ .inck = 37125000,
+ .regs[0] = { IMX415_BCWAIT_TIME, 0x07F },
+ .regs[1] = { IMX415_CPWAIT_TIME, 0x05B },
+ .regs[2] = { IMX415_SYS_MODE, 0x5 },
+ .regs[3] = { IMX415_INCKSEL1, 0x00 },
+ .regs[4] = { IMX415_INCKSEL2, 0x24 },
+ .regs[5] = { IMX415_INCKSEL3, 0x0C0 },
+ .regs[6] = { IMX415_INCKSEL4, 0x0E0 },
+ .regs[7] = { IMX415_INCKSEL5, 0x24 },
+ .regs[8] = { IMX415_INCKSEL6, 0x0 },
+ .regs[9] = { IMX415_INCKSEL7, 0x1 },
+ .regs[10] = { IMX415_TXCLKESC_FREQ, 0x0948 },
+ },
+ {
+ .lane_rate = 891000000UL,
+ .inck = 74250000,
+ .regs[0] = { IMX415_BCWAIT_TIME, 0x0FF },
+ .regs[1] = { IMX415_CPWAIT_TIME, 0x0B6 },
+ .regs[2] = { IMX415_SYS_MODE, 0x5 },
+ .regs[3] = { IMX415_INCKSEL1, 0x00 },
+ .regs[4] = { IMX415_INCKSEL2, 0x28 },
+ .regs[5] = { IMX415_INCKSEL3, 0x0C0 },
+ .regs[6] = { IMX415_INCKSEL4, 0x0E0 },
+ .regs[7] = { IMX415_INCKSEL5, 0x28 },
+ .regs[8] = { IMX415_INCKSEL6, 0x0 },
+ .regs[9] = { IMX415_INCKSEL7, 0x1 },
+ .regs[10] = { IMX415_TXCLKESC_FREQ, 0x1290 },
+ },
+ {
+ .lane_rate = 1440000000UL,
.inck = 24000000,
.regs[0] = { IMX415_BCWAIT_TIME, 0x054 },
.regs[1] = { IMX415_CPWAIT_TIME, 0x03B },
@@ -184,7 +249,22 @@ static const struct imx415_clk_params imx415_clk_params[] = {
.regs[10] = { IMX415_TXCLKESC_FREQ, 0x0600 },
},
{
- .lane_rate = 1485000000,
+ .lane_rate = 1440000000UL,
+ .inck = 72000000,
+ .regs[0] = { IMX415_BCWAIT_TIME, 0x0F8 },
+ .regs[1] = { IMX415_CPWAIT_TIME, 0x0B0 },
+ .regs[2] = { IMX415_SYS_MODE, 0x8 },
+ .regs[3] = { IMX415_INCKSEL1, 0x00 },
+ .regs[4] = { IMX415_INCKSEL2, 0x28 },
+ .regs[5] = { IMX415_INCKSEL3, 0x0A0 },
+ .regs[6] = { IMX415_INCKSEL4, 0x0E0 },
+ .regs[7] = { IMX415_INCKSEL5, 0x28 },
+ .regs[8] = { IMX415_INCKSEL6, 0x1 },
+ .regs[9] = { IMX415_INCKSEL7, 0x0 },
+ .regs[10] = { IMX415_TXCLKESC_FREQ, 0x1200 },
+ },
+ {
+ .lane_rate = 1485000000UL,
.inck = 27000000,
.regs[0] = { IMX415_BCWAIT_TIME, 0x05D },
.regs[1] = { IMX415_CPWAIT_TIME, 0x042 },
@@ -198,10 +278,175 @@ static const struct imx415_clk_params imx415_clk_params[] = {
.regs[9] = { IMX415_INCKSEL7, 0x0 },
.regs[10] = { IMX415_TXCLKESC_FREQ, 0x06C0 },
},
+ {
+ .lane_rate = 1485000000UL,
+ .inck = 37125000,
+ .regs[0] = { IMX415_BCWAIT_TIME, 0x07F },
+ .regs[1] = { IMX415_CPWAIT_TIME, 0x05B },
+ .regs[2] = { IMX415_SYS_MODE, 0x8 },
+ .regs[3] = { IMX415_INCKSEL1, 0x00 },
+ .regs[4] = { IMX415_INCKSEL2, 0x24 },
+ .regs[5] = { IMX415_INCKSEL3, 0x0A0 },
+ .regs[6] = { IMX415_INCKSEL4, 0x0E0 },
+ .regs[7] = { IMX415_INCKSEL5, 0x24 },
+ .regs[8] = { IMX415_INCKSEL6, 0x1 },
+ .regs[9] = { IMX415_INCKSEL7, 0x0 },
+ .regs[10] = { IMX415_TXCLKESC_FREQ, 0x0948 },
+ },
+ {
+ .lane_rate = 1485000000UL,
+ .inck = 74250000,
+ .regs[0] = { IMX415_BCWAIT_TIME, 0x0FF },
+ .regs[1] = { IMX415_CPWAIT_TIME, 0x0B6 },
+ .regs[2] = { IMX415_SYS_MODE, 0x8 },
+ .regs[3] = { IMX415_INCKSEL1, 0x00 },
+ .regs[4] = { IMX415_INCKSEL2, 0x28 },
+ .regs[5] = { IMX415_INCKSEL3, 0x0A0 },
+ .regs[6] = { IMX415_INCKSEL4, 0x0E0 },
+ .regs[7] = { IMX415_INCKSEL5, 0x28 },
+ .regs[8] = { IMX415_INCKSEL6, 0x1 },
+ .regs[9] = { IMX415_INCKSEL7, 0x0 },
+ .regs[10] = { IMX415_TXCLKESC_FREQ, 0x1290 },
+ },
+ {
+ .lane_rate = 1782000000UL,
+ .inck = 27000000,
+ .regs[0] = { IMX415_BCWAIT_TIME, 0x05D },
+ .regs[1] = { IMX415_CPWAIT_TIME, 0x042 },
+ .regs[2] = { IMX415_SYS_MODE, 0x4 },
+ .regs[3] = { IMX415_INCKSEL1, 0x00 },
+ .regs[4] = { IMX415_INCKSEL2, 0x23 },
+ .regs[5] = { IMX415_INCKSEL3, 0x0C6 },
+ .regs[6] = { IMX415_INCKSEL4, 0x0E7 },
+ .regs[7] = { IMX415_INCKSEL5, 0x23 },
+ .regs[8] = { IMX415_INCKSEL6, 0x1 },
+ .regs[9] = { IMX415_INCKSEL7, 0x0 },
+ .regs[10] = { IMX415_TXCLKESC_FREQ, 0x06C0 },
+ },
+ {
+ .lane_rate = 1782000000UL,
+ .inck = 37125000,
+ .regs[0] = { IMX415_BCWAIT_TIME, 0x07F },
+ .regs[1] = { IMX415_CPWAIT_TIME, 0x05B },
+ .regs[2] = { IMX415_SYS_MODE, 0x4 },
+ .regs[3] = { IMX415_INCKSEL1, 0x00 },
+ .regs[4] = { IMX415_INCKSEL2, 0x24 },
+ .regs[5] = { IMX415_INCKSEL3, 0x0C0 },
+ .regs[6] = { IMX415_INCKSEL4, 0x0E0 },
+ .regs[7] = { IMX415_INCKSEL5, 0x24 },
+ .regs[8] = { IMX415_INCKSEL6, 0x1 },
+ .regs[9] = { IMX415_INCKSEL7, 0x0 },
+ .regs[10] = { IMX415_TXCLKESC_FREQ, 0x0948 },
+ },
+ {
+ .lane_rate = 1782000000UL,
+ .inck = 74250000,
+ .regs[0] = { IMX415_BCWAIT_TIME, 0x0FF },
+ .regs[1] = { IMX415_CPWAIT_TIME, 0x0B6 },
+ .regs[2] = { IMX415_SYS_MODE, 0x4 },
+ .regs[3] = { IMX415_INCKSEL1, 0x00 },
+ .regs[4] = { IMX415_INCKSEL2, 0x28 },
+ .regs[5] = { IMX415_INCKSEL3, 0x0C0 },
+ .regs[6] = { IMX415_INCKSEL4, 0x0E0 },
+ .regs[7] = { IMX415_INCKSEL5, 0x28 },
+ .regs[8] = { IMX415_INCKSEL6, 0x1 },
+ .regs[9] = { IMX415_INCKSEL7, 0x0 },
+ .regs[10] = { IMX415_TXCLKESC_FREQ, 0x1290 },
+ },
+ {
+ .lane_rate = 2079000000UL,
+ .inck = 27000000,
+ .regs[0] = { IMX415_BCWAIT_TIME, 0x05D },
+ .regs[1] = { IMX415_CPWAIT_TIME, 0x042 },
+ .regs[2] = { IMX415_SYS_MODE, 0x2 },
+ .regs[3] = { IMX415_INCKSEL1, 0x00 },
+ .regs[4] = { IMX415_INCKSEL2, 0x23 },
+ .regs[5] = { IMX415_INCKSEL3, 0x0E7 },
+ .regs[6] = { IMX415_INCKSEL4, 0x0E7 },
+ .regs[7] = { IMX415_INCKSEL5, 0x23 },
+ .regs[8] = { IMX415_INCKSEL6, 0x1 },
+ .regs[9] = { IMX415_INCKSEL7, 0x0 },
+ .regs[10] = { IMX415_TXCLKESC_FREQ, 0x06C0 },
+ },
+ {
+ .lane_rate = 2079000000UL,
+ .inck = 37125000,
+ .regs[0] = { IMX415_BCWAIT_TIME, 0x07F },
+ .regs[1] = { IMX415_CPWAIT_TIME, 0x05B },
+ .regs[2] = { IMX415_SYS_MODE, 0x2 },
+ .regs[3] = { IMX415_INCKSEL1, 0x00 },
+ .regs[4] = { IMX415_INCKSEL2, 0x24 },
+ .regs[5] = { IMX415_INCKSEL3, 0x0E0 },
+ .regs[6] = { IMX415_INCKSEL4, 0x0E0 },
+ .regs[7] = { IMX415_INCKSEL5, 0x24 },
+ .regs[8] = { IMX415_INCKSEL6, 0x1 },
+ .regs[9] = { IMX415_INCKSEL7, 0x0 },
+ .regs[10] = { IMX415_TXCLKESC_FREQ, 0x0948 },
+ },
+ {
+ .lane_rate = 2079000000UL,
+ .inck = 74250000,
+ .regs[0] = { IMX415_BCWAIT_TIME, 0x0FF },
+ .regs[1] = { IMX415_CPWAIT_TIME, 0x0B6 },
+ .regs[2] = { IMX415_SYS_MODE, 0x2 },
+ .regs[3] = { IMX415_INCKSEL1, 0x00 },
+ .regs[4] = { IMX415_INCKSEL2, 0x28 },
+ .regs[5] = { IMX415_INCKSEL3, 0x0E0 },
+ .regs[6] = { IMX415_INCKSEL4, 0x0E0 },
+ .regs[7] = { IMX415_INCKSEL5, 0x28 },
+ .regs[8] = { IMX415_INCKSEL6, 0x1 },
+ .regs[9] = { IMX415_INCKSEL7, 0x0 },
+ .regs[10] = { IMX415_TXCLKESC_FREQ, 0x1290 },
+ },
+ {
+ .lane_rate = 2376000000UL,
+ .inck = 27000000,
+ .regs[0] = { IMX415_BCWAIT_TIME, 0x05D },
+ .regs[1] = { IMX415_CPWAIT_TIME, 0x042 },
+ .regs[2] = { IMX415_SYS_MODE, 0x0 },
+ .regs[3] = { IMX415_INCKSEL1, 0x00 },
+ .regs[4] = { IMX415_INCKSEL2, 0x23 },
+ .regs[5] = { IMX415_INCKSEL3, 0x108 },
+ .regs[6] = { IMX415_INCKSEL4, 0x0E7 },
+ .regs[7] = { IMX415_INCKSEL5, 0x23 },
+ .regs[8] = { IMX415_INCKSEL6, 0x1 },
+ .regs[9] = { IMX415_INCKSEL7, 0x0 },
+ .regs[10] = { IMX415_TXCLKESC_FREQ, 0x06C0 },
+ },
+ {
+ .lane_rate = 2376000000UL,
+ .inck = 37125000,
+ .regs[0] = { IMX415_BCWAIT_TIME, 0x07F },
+ .regs[1] = { IMX415_CPWAIT_TIME, 0x05B },
+ .regs[2] = { IMX415_SYS_MODE, 0x0 },
+ .regs[3] = { IMX415_INCKSEL1, 0x00 },
+ .regs[4] = { IMX415_INCKSEL2, 0x24 },
+ .regs[5] = { IMX415_INCKSEL3, 0x100 },
+ .regs[6] = { IMX415_INCKSEL4, 0x0E0 },
+ .regs[7] = { IMX415_INCKSEL5, 0x24 },
+ .regs[8] = { IMX415_INCKSEL6, 0x1 },
+ .regs[9] = { IMX415_INCKSEL7, 0x0 },
+ .regs[10] = { IMX415_TXCLKESC_FREQ, 0x0948 },
+ },
+ {
+ .lane_rate = 2376000000UL,
+ .inck = 74250000,
+ .regs[0] = { IMX415_BCWAIT_TIME, 0x0FF },
+ .regs[1] = { IMX415_CPWAIT_TIME, 0x0B6 },
+ .regs[2] = { IMX415_SYS_MODE, 0x0 },
+ .regs[3] = { IMX415_INCKSEL1, 0x00 },
+ .regs[4] = { IMX415_INCKSEL2, 0x28 },
+ .regs[5] = { IMX415_INCKSEL3, 0x100 },
+ .regs[6] = { IMX415_INCKSEL4, 0x0E0 },
+ .regs[7] = { IMX415_INCKSEL5, 0x28 },
+ .regs[8] = { IMX415_INCKSEL6, 0x1 },
+ .regs[9] = { IMX415_INCKSEL7, 0x0 },
+ .regs[10] = { IMX415_TXCLKESC_FREQ, 0x1290 },
+ },
};
/* all-pixel 2-lane 720 Mbps 15.74 Hz mode */
-static const struct imx415_reg imx415_mode_2_720[] = {
+static const struct cci_reg_sequence imx415_mode_2_720[] = {
{ IMX415_VMAX, 0x08CA },
{ IMX415_HMAX, 0x07F0 },
{ IMX415_LANEMODE, IMX415_LANEMODE_2 },
@@ -217,7 +462,7 @@ static const struct imx415_reg imx415_mode_2_720[] = {
};
/* all-pixel 2-lane 1440 Mbps 30.01 Hz mode */
-static const struct imx415_reg imx415_mode_2_1440[] = {
+static const struct cci_reg_sequence imx415_mode_2_1440[] = {
{ IMX415_VMAX, 0x08CA },
{ IMX415_HMAX, 0x042A },
{ IMX415_LANEMODE, IMX415_LANEMODE_2 },
@@ -233,7 +478,7 @@ static const struct imx415_reg imx415_mode_2_1440[] = {
};
/* all-pixel 4-lane 891 Mbps 30 Hz mode */
-static const struct imx415_reg imx415_mode_4_891[] = {
+static const struct cci_reg_sequence imx415_mode_4_891[] = {
{ IMX415_VMAX, 0x08CA },
{ IMX415_HMAX, 0x044C },
{ IMX415_LANEMODE, IMX415_LANEMODE_4 },
@@ -250,7 +495,7 @@ static const struct imx415_reg imx415_mode_4_891[] = {
struct imx415_mode_reg_list {
u32 num_of_regs;
- const struct imx415_reg *regs;
+ const struct cci_reg_sequence *regs;
};
/*
@@ -323,11 +568,6 @@ static const struct imx415_mode supported_modes[] = {
},
};
-static const struct regmap_config imx415_regmap_config = {
- .reg_bits = 16,
- .val_bits = 8,
-};
-
static const char *const imx415_test_pattern_menu[] = {
"disabled",
"solid black",
@@ -369,7 +609,7 @@ struct imx415 {
* This table includes fixed register settings and a bunch of undocumented
* registers that have to be set to another value than default.
*/
-static const struct imx415_reg imx415_init_table[] = {
+static const struct cci_reg_sequence imx415_init_table[] = {
/* use all-pixel readout mode, no flip */
{ IMX415_WINMODE, 0x00 },
{ IMX415_ADDMODE, 0x00 },
@@ -382,77 +622,77 @@ static const struct imx415_reg imx415_init_table[] = {
{ IMX415_DRV, 0x00 },
/* SONY magic registers */
- { IMX415_REG_8BIT(0x32D4), 0x21 },
- { IMX415_REG_8BIT(0x32EC), 0xA1 },
- { IMX415_REG_8BIT(0x3452), 0x7F },
- { IMX415_REG_8BIT(0x3453), 0x03 },
- { IMX415_REG_8BIT(0x358A), 0x04 },
- { IMX415_REG_8BIT(0x35A1), 0x02 },
- { IMX415_REG_8BIT(0x36BC), 0x0C },
- { IMX415_REG_8BIT(0x36CC), 0x53 },
- { IMX415_REG_8BIT(0x36CD), 0x00 },
- { IMX415_REG_8BIT(0x36CE), 0x3C },
- { IMX415_REG_8BIT(0x36D0), 0x8C },
- { IMX415_REG_8BIT(0x36D1), 0x00 },
- { IMX415_REG_8BIT(0x36D2), 0x71 },
- { IMX415_REG_8BIT(0x36D4), 0x3C },
- { IMX415_REG_8BIT(0x36D6), 0x53 },
- { IMX415_REG_8BIT(0x36D7), 0x00 },
- { IMX415_REG_8BIT(0x36D8), 0x71 },
- { IMX415_REG_8BIT(0x36DA), 0x8C },
- { IMX415_REG_8BIT(0x36DB), 0x00 },
- { IMX415_REG_8BIT(0x3724), 0x02 },
- { IMX415_REG_8BIT(0x3726), 0x02 },
- { IMX415_REG_8BIT(0x3732), 0x02 },
- { IMX415_REG_8BIT(0x3734), 0x03 },
- { IMX415_REG_8BIT(0x3736), 0x03 },
- { IMX415_REG_8BIT(0x3742), 0x03 },
- { IMX415_REG_8BIT(0x3862), 0xE0 },
- { IMX415_REG_8BIT(0x38CC), 0x30 },
- { IMX415_REG_8BIT(0x38CD), 0x2F },
- { IMX415_REG_8BIT(0x395C), 0x0C },
- { IMX415_REG_8BIT(0x3A42), 0xD1 },
- { IMX415_REG_8BIT(0x3A4C), 0x77 },
- { IMX415_REG_8BIT(0x3AE0), 0x02 },
- { IMX415_REG_8BIT(0x3AEC), 0x0C },
- { IMX415_REG_8BIT(0x3B00), 0x2E },
- { IMX415_REG_8BIT(0x3B06), 0x29 },
- { IMX415_REG_8BIT(0x3B98), 0x25 },
- { IMX415_REG_8BIT(0x3B99), 0x21 },
- { IMX415_REG_8BIT(0x3B9B), 0x13 },
- { IMX415_REG_8BIT(0x3B9C), 0x13 },
- { IMX415_REG_8BIT(0x3B9D), 0x13 },
- { IMX415_REG_8BIT(0x3B9E), 0x13 },
- { IMX415_REG_8BIT(0x3BA1), 0x00 },
- { IMX415_REG_8BIT(0x3BA2), 0x06 },
- { IMX415_REG_8BIT(0x3BA3), 0x0B },
- { IMX415_REG_8BIT(0x3BA4), 0x10 },
- { IMX415_REG_8BIT(0x3BA5), 0x14 },
- { IMX415_REG_8BIT(0x3BA6), 0x18 },
- { IMX415_REG_8BIT(0x3BA7), 0x1A },
- { IMX415_REG_8BIT(0x3BA8), 0x1A },
- { IMX415_REG_8BIT(0x3BA9), 0x1A },
- { IMX415_REG_8BIT(0x3BAC), 0xED },
- { IMX415_REG_8BIT(0x3BAD), 0x01 },
- { IMX415_REG_8BIT(0x3BAE), 0xF6 },
- { IMX415_REG_8BIT(0x3BAF), 0x02 },
- { IMX415_REG_8BIT(0x3BB0), 0xA2 },
- { IMX415_REG_8BIT(0x3BB1), 0x03 },
- { IMX415_REG_8BIT(0x3BB2), 0xE0 },
- { IMX415_REG_8BIT(0x3BB3), 0x03 },
- { IMX415_REG_8BIT(0x3BB4), 0xE0 },
- { IMX415_REG_8BIT(0x3BB5), 0x03 },
- { IMX415_REG_8BIT(0x3BB6), 0xE0 },
- { IMX415_REG_8BIT(0x3BB7), 0x03 },
- { IMX415_REG_8BIT(0x3BB8), 0xE0 },
- { IMX415_REG_8BIT(0x3BBA), 0xE0 },
- { IMX415_REG_8BIT(0x3BBC), 0xDA },
- { IMX415_REG_8BIT(0x3BBE), 0x88 },
- { IMX415_REG_8BIT(0x3BC0), 0x44 },
- { IMX415_REG_8BIT(0x3BC2), 0x7B },
- { IMX415_REG_8BIT(0x3BC4), 0xA2 },
- { IMX415_REG_8BIT(0x3BC8), 0xBD },
- { IMX415_REG_8BIT(0x3BCA), 0xBD },
+ { CCI_REG8(0x32D4), 0x21 },
+ { CCI_REG8(0x32EC), 0xA1 },
+ { CCI_REG8(0x3452), 0x7F },
+ { CCI_REG8(0x3453), 0x03 },
+ { CCI_REG8(0x358A), 0x04 },
+ { CCI_REG8(0x35A1), 0x02 },
+ { CCI_REG8(0x36BC), 0x0C },
+ { CCI_REG8(0x36CC), 0x53 },
+ { CCI_REG8(0x36CD), 0x00 },
+ { CCI_REG8(0x36CE), 0x3C },
+ { CCI_REG8(0x36D0), 0x8C },
+ { CCI_REG8(0x36D1), 0x00 },
+ { CCI_REG8(0x36D2), 0x71 },
+ { CCI_REG8(0x36D4), 0x3C },
+ { CCI_REG8(0x36D6), 0x53 },
+ { CCI_REG8(0x36D7), 0x00 },
+ { CCI_REG8(0x36D8), 0x71 },
+ { CCI_REG8(0x36DA), 0x8C },
+ { CCI_REG8(0x36DB), 0x00 },
+ { CCI_REG8(0x3724), 0x02 },
+ { CCI_REG8(0x3726), 0x02 },
+ { CCI_REG8(0x3732), 0x02 },
+ { CCI_REG8(0x3734), 0x03 },
+ { CCI_REG8(0x3736), 0x03 },
+ { CCI_REG8(0x3742), 0x03 },
+ { CCI_REG8(0x3862), 0xE0 },
+ { CCI_REG8(0x38CC), 0x30 },
+ { CCI_REG8(0x38CD), 0x2F },
+ { CCI_REG8(0x395C), 0x0C },
+ { CCI_REG8(0x3A42), 0xD1 },
+ { CCI_REG8(0x3A4C), 0x77 },
+ { CCI_REG8(0x3AE0), 0x02 },
+ { CCI_REG8(0x3AEC), 0x0C },
+ { CCI_REG8(0x3B00), 0x2E },
+ { CCI_REG8(0x3B06), 0x29 },
+ { CCI_REG8(0x3B98), 0x25 },
+ { CCI_REG8(0x3B99), 0x21 },
+ { CCI_REG8(0x3B9B), 0x13 },
+ { CCI_REG8(0x3B9C), 0x13 },
+ { CCI_REG8(0x3B9D), 0x13 },
+ { CCI_REG8(0x3B9E), 0x13 },
+ { CCI_REG8(0x3BA1), 0x00 },
+ { CCI_REG8(0x3BA2), 0x06 },
+ { CCI_REG8(0x3BA3), 0x0B },
+ { CCI_REG8(0x3BA4), 0x10 },
+ { CCI_REG8(0x3BA5), 0x14 },
+ { CCI_REG8(0x3BA6), 0x18 },
+ { CCI_REG8(0x3BA7), 0x1A },
+ { CCI_REG8(0x3BA8), 0x1A },
+ { CCI_REG8(0x3BA9), 0x1A },
+ { CCI_REG8(0x3BAC), 0xED },
+ { CCI_REG8(0x3BAD), 0x01 },
+ { CCI_REG8(0x3BAE), 0xF6 },
+ { CCI_REG8(0x3BAF), 0x02 },
+ { CCI_REG8(0x3BB0), 0xA2 },
+ { CCI_REG8(0x3BB1), 0x03 },
+ { CCI_REG8(0x3BB2), 0xE0 },
+ { CCI_REG8(0x3BB3), 0x03 },
+ { CCI_REG8(0x3BB4), 0xE0 },
+ { CCI_REG8(0x3BB5), 0x03 },
+ { CCI_REG8(0x3BB6), 0xE0 },
+ { CCI_REG8(0x3BB7), 0x03 },
+ { CCI_REG8(0x3BB8), 0xE0 },
+ { CCI_REG8(0x3BBA), 0xE0 },
+ { CCI_REG8(0x3BBC), 0xDA },
+ { CCI_REG8(0x3BBE), 0x88 },
+ { CCI_REG8(0x3BC0), 0x44 },
+ { CCI_REG8(0x3BC2), 0x7B },
+ { CCI_REG8(0x3BC4), 0xA2 },
+ { CCI_REG8(0x3BC8), 0xBD },
+ { CCI_REG8(0x3BCA), 0xBD },
};
static inline struct imx415 *to_imx415(struct v4l2_subdev *sd)
@@ -460,74 +700,26 @@ static inline struct imx415 *to_imx415(struct v4l2_subdev *sd)
return container_of(sd, struct imx415, subdev);
}
-static int imx415_read(struct imx415 *sensor, u32 addr)
-{
- u8 data[3] = { 0 };
- int ret;
-
- ret = regmap_raw_read(sensor->regmap, addr & IMX415_REG_ADDR_MASK, data,
- (addr >> IMX415_REG_SIZE_SHIFT) & 3);
- if (ret < 0)
- return ret;
-
- return (data[2] << 16) | (data[1] << 8) | data[0];
-}
-
-static int imx415_write(struct imx415 *sensor, u32 addr, u32 value)
-{
- u8 data[3] = { value & 0xff, (value >> 8) & 0xff, value >> 16 };
- int ret;
-
- ret = regmap_raw_write(sensor->regmap, addr & IMX415_REG_ADDR_MASK,
- data, (addr >> IMX415_REG_SIZE_SHIFT) & 3);
- if (ret < 0)
- dev_err_ratelimited(sensor->dev,
- "%u-bit write to 0x%04x failed: %d\n",
- ((addr >> IMX415_REG_SIZE_SHIFT) & 3) * 8,
- addr & IMX415_REG_ADDR_MASK, ret);
-
- return 0;
-}
-
static int imx415_set_testpattern(struct imx415 *sensor, int val)
{
- int ret;
+ int ret = 0;
if (val) {
- ret = imx415_write(sensor, IMX415_BLKLEVEL, 0x00);
- if (ret)
- return ret;
- ret = imx415_write(sensor, IMX415_TPG_EN_DUOUT, 0x01);
- if (ret)
- return ret;
- ret = imx415_write(sensor, IMX415_TPG_PATSEL_DUOUT, val - 1);
- if (ret)
- return ret;
- ret = imx415_write(sensor, IMX415_TPG_COLORWIDTH, 0x01);
- if (ret)
- return ret;
- ret = imx415_write(sensor, IMX415_TESTCLKEN_MIPI, 0x20);
- if (ret)
- return ret;
- ret = imx415_write(sensor, IMX415_DIG_CLP_MODE, 0x00);
- if (ret)
- return ret;
- ret = imx415_write(sensor, IMX415_WRJ_OPEN, 0x00);
+ cci_write(sensor->regmap, IMX415_BLKLEVEL, 0x00, &ret);
+ cci_write(sensor->regmap, IMX415_TPG_EN_DUOUT, 0x01, &ret);
+ cci_write(sensor->regmap, IMX415_TPG_PATSEL_DUOUT,
+ val - 1, &ret);
+ cci_write(sensor->regmap, IMX415_TPG_COLORWIDTH, 0x01, &ret);
+ cci_write(sensor->regmap, IMX415_TESTCLKEN_MIPI, 0x20, &ret);
+ cci_write(sensor->regmap, IMX415_DIG_CLP_MODE, 0x00, &ret);
+ cci_write(sensor->regmap, IMX415_WRJ_OPEN, 0x00, &ret);
} else {
- ret = imx415_write(sensor, IMX415_BLKLEVEL,
- IMX415_BLKLEVEL_DEFAULT);
- if (ret)
- return ret;
- ret = imx415_write(sensor, IMX415_TPG_EN_DUOUT, 0x00);
- if (ret)
- return ret;
- ret = imx415_write(sensor, IMX415_TESTCLKEN_MIPI, 0x00);
- if (ret)
- return ret;
- ret = imx415_write(sensor, IMX415_DIG_CLP_MODE, 0x01);
- if (ret)
- return ret;
- ret = imx415_write(sensor, IMX415_WRJ_OPEN, 0x01);
+ cci_write(sensor->regmap, IMX415_BLKLEVEL,
+ IMX415_BLKLEVEL_DEFAULT, &ret);
+ cci_write(sensor->regmap, IMX415_TPG_EN_DUOUT, 0x00, &ret);
+ cci_write(sensor->regmap, IMX415_TESTCLKEN_MIPI, 0x00, &ret);
+ cci_write(sensor->regmap, IMX415_DIG_CLP_MODE, 0x01, &ret);
+ cci_write(sensor->regmap, IMX415_WRJ_OPEN, 0x01, &ret);
}
return 0;
}
@@ -553,19 +745,21 @@ static int imx415_s_ctrl(struct v4l2_ctrl *ctrl)
/* clamp the exposure value to VMAX. */
vmax = format->height + sensor->vblank->cur.val;
ctrl->val = min_t(int, ctrl->val, vmax);
- ret = imx415_write(sensor, IMX415_SHR0, vmax - ctrl->val);
+ ret = cci_write(sensor->regmap, IMX415_SHR0,
+ vmax - ctrl->val, NULL);
break;
case V4L2_CID_ANALOGUE_GAIN:
/* analogue gain in 0.3 dB step size */
- ret = imx415_write(sensor, IMX415_GAIN_PCG_0, ctrl->val);
+ ret = cci_write(sensor->regmap, IMX415_GAIN_PCG_0,
+ ctrl->val, NULL);
break;
case V4L2_CID_HFLIP:
case V4L2_CID_VFLIP:
flip = (sensor->hflip->val << IMX415_HREVERSE_SHIFT) |
(sensor->vflip->val << IMX415_VREVERSE_SHIFT);
- ret = imx415_write(sensor, IMX415_REVERSE, flip);
+ ret = cci_write(sensor->regmap, IMX415_REVERSE, flip, NULL);
break;
case V4L2_CID_TEST_PATTERN:
@@ -679,8 +873,6 @@ static int imx415_ctrls_init(struct imx415 *sensor)
static int imx415_set_mode(struct imx415 *sensor, int mode)
{
- const struct imx415_reg *reg;
- unsigned int i;
int ret = 0;
if (mode >= ARRAY_SIZE(supported_modes)) {
@@ -688,34 +880,29 @@ static int imx415_set_mode(struct imx415 *sensor, int mode)
return -EINVAL;
}
- for (i = 0; i < supported_modes[mode].reg_list.num_of_regs; ++i) {
- reg = &supported_modes[mode].reg_list.regs[i];
- ret = imx415_write(sensor, reg->address, reg->val);
- if (ret)
- return ret;
- }
+ cci_multi_reg_write(sensor->regmap,
+ supported_modes[mode].reg_list.regs,
+ supported_modes[mode].reg_list.num_of_regs,
+ &ret);
- for (i = 0; i < IMX415_NUM_CLK_PARAM_REGS; ++i) {
- reg = &sensor->clk_params->regs[i];
- ret = imx415_write(sensor, reg->address, reg->val);
- if (ret)
- return ret;
- }
+ cci_multi_reg_write(sensor->regmap,
+ sensor->clk_params->regs,
+ IMX415_NUM_CLK_PARAM_REGS,
+ &ret);
return 0;
}
static int imx415_setup(struct imx415 *sensor, struct v4l2_subdev_state *state)
{
- unsigned int i;
int ret;
- for (i = 0; i < ARRAY_SIZE(imx415_init_table); ++i) {
- ret = imx415_write(sensor, imx415_init_table[i].address,
- imx415_init_table[i].val);
- if (ret)
- return ret;
- }
+ ret = cci_multi_reg_write(sensor->regmap,
+ imx415_init_table,
+ ARRAY_SIZE(imx415_init_table),
+ NULL);
+ if (ret)
+ return ret;
return imx415_set_mode(sensor, sensor->cur_mode);
}
@@ -724,7 +911,8 @@ static int imx415_wakeup(struct imx415 *sensor)
{
int ret;
- ret = imx415_write(sensor, IMX415_MODE, IMX415_MODE_OPERATING);
+ ret = cci_write(sensor->regmap, IMX415_MODE,
+ IMX415_MODE_OPERATING, NULL);
if (ret)
return ret;
@@ -743,21 +931,18 @@ static int imx415_stream_on(struct imx415 *sensor)
int ret;
ret = imx415_wakeup(sensor);
- if (ret)
- return ret;
-
- return imx415_write(sensor, IMX415_XMSTA, IMX415_XMSTA_START);
+ return cci_write(sensor->regmap, IMX415_XMSTA,
+ IMX415_XMSTA_START, &ret);
}
static int imx415_stream_off(struct imx415 *sensor)
{
int ret;
- ret = imx415_write(sensor, IMX415_XMSTA, IMX415_XMSTA_STOP);
- if (ret)
- return ret;
-
- return imx415_write(sensor, IMX415_MODE, IMX415_MODE_STANDBY);
+ ret = cci_write(sensor->regmap, IMX415_XMSTA,
+ IMX415_XMSTA_STOP, NULL);
+ return cci_write(sensor->regmap, IMX415_MODE,
+ IMX415_MODE_STANDBY, &ret);
}
static int imx415_s_stream(struct v4l2_subdev *sd, int enable)
@@ -992,6 +1177,7 @@ static void imx415_power_off(struct imx415 *sensor)
static int imx415_identify_model(struct imx415 *sensor)
{
int model, ret;
+ u64 chip_id;
/*
* While most registers can be read when the sensor is in standby, this
@@ -1002,14 +1188,14 @@ static int imx415_identify_model(struct imx415 *sensor)
return dev_err_probe(sensor->dev, ret,
"failed to get sensor out of standby\n");
- ret = imx415_read(sensor, IMX415_SENSOR_INFO);
+ ret = cci_read(sensor->regmap, IMX415_SENSOR_INFO, &chip_id, NULL);
if (ret < 0) {
dev_err_probe(sensor->dev, ret,
"failed to read sensor information\n");
goto done;
}
- model = ret & IMX415_SENSOR_INFO_MASK;
+ model = chip_id & IMX415_SENSOR_INFO_MASK;
switch (model) {
case IMX415_CHIP_ID:
@@ -1024,7 +1210,7 @@ static int imx415_identify_model(struct imx415 *sensor)
ret = 0;
done:
- imx415_write(sensor, IMX415_MODE, IMX415_MODE_STANDBY);
+ cci_write(sensor->regmap, IMX415_MODE, IMX415_MODE_STANDBY, &ret);
return ret;
}
@@ -1173,7 +1359,7 @@ static int imx415_probe(struct i2c_client *client)
if (ret)
return ret;
- sensor->regmap = devm_regmap_init_i2c(client, &imx415_regmap_config);
+ sensor->regmap = devm_cci_regmap_init_i2c(client, 16);
if (IS_ERR(sensor->regmap))
return PTR_ERR(sensor->regmap);
diff --git a/drivers/media/i2c/isl7998x.c b/drivers/media/i2c/isl7998x.c
index 89e13ebbce0c2..c7089035bbc10 100644
--- a/drivers/media/i2c/isl7998x.c
+++ b/drivers/media/i2c/isl7998x.c
@@ -1337,7 +1337,7 @@ static const struct regmap_config isl7998x_regmap = {
.rd_table = &isl7998x_readable_table,
.wr_table = &isl7998x_writeable_table,
.volatile_table = &isl7998x_volatile_table,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static int isl7998x_mc_init(struct isl7998x *isl7998x)
diff --git a/drivers/media/i2c/max2175.c b/drivers/media/i2c/max2175.c
index 70c2a2948fd4e..cd73d2096ae45 100644
--- a/drivers/media/i2c/max2175.c
+++ b/drivers/media/i2c/max2175.c
@@ -257,7 +257,7 @@ static const struct regmap_config max2175_regmap_config = {
.reg_defaults = max2175_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(max2175_reg_defaults),
.volatile_table = &max2175_volatile_regs,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
struct max2175 {
diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c
index 0ed8561edfee6..599a5bc7cbb35 100644
--- a/drivers/media/i2c/msp3400-driver.c
+++ b/drivers/media/i2c/msp3400-driver.c
@@ -309,23 +309,15 @@ static void msp_wake_thread(struct i2c_client *client)
wake_up_interruptible(&state->wq);
}
-int msp_sleep(struct msp_state *state, int timeout)
+int msp_sleep(struct msp_state *state, int msec)
{
- DECLARE_WAITQUEUE(wait, current);
-
- add_wait_queue(&state->wq, &wait);
- if (!kthread_should_stop()) {
- if (timeout < 0) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule();
- } else {
- schedule_timeout_interruptible
- (msecs_to_jiffies(timeout));
- }
- }
+ long timeout;
+
+ timeout = msec < 0 ? MAX_SCHEDULE_TIMEOUT : msecs_to_jiffies(msec);
+
+ wait_event_freezable_timeout(state->wq, kthread_should_stop() ||
+ state->restart, timeout);
- remove_wait_queue(&state->wq, &wait);
- try_to_freeze();
return state->restart;
}
diff --git a/drivers/media/i2c/msp3400-driver.h b/drivers/media/i2c/msp3400-driver.h
index 2bb9d5ff1bbde..7d391714ea52f 100644
--- a/drivers/media/i2c/msp3400-driver.h
+++ b/drivers/media/i2c/msp3400-driver.h
@@ -134,7 +134,7 @@ int msp_read_dsp(struct i2c_client *client, int addr);
int msp_reset(struct i2c_client *client);
void msp_set_scart(struct i2c_client *client, int in, int out);
void msp_update_volume(struct msp_state *state);
-int msp_sleep(struct msp_state *state, int timeout);
+int msp_sleep(struct msp_state *state, int msec);
/* msp3400-kthreads.c */
const char *msp_standard_std_name(int std);
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index 596200d0248cf..f4b4812123563 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -1078,7 +1078,7 @@ mt9p031_get_pdata(struct i2c_client *client)
if (!IS_ENABLED(CONFIG_OF) || !client->dev.of_node)
return client->dev.platform_data;
- np = of_graph_get_next_endpoint(client->dev.of_node, NULL);
+ np = of_graph_get_endpoint_by_regs(client->dev.of_node, 0, -1);
if (!np)
return NULL;
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index 3ca76eeae7ffd..302120ff125e0 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -988,7 +988,7 @@ static const struct regmap_config mt9v032_regmap_config = {
.reg_bits = 8,
.val_bits = 16,
.max_register = 0xff,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
/* -----------------------------------------------------------------------------
@@ -1006,7 +1006,7 @@ mt9v032_get_pdata(struct i2c_client *client)
if (!IS_ENABLED(CONFIG_OF) || !client->dev.of_node)
return client->dev.platform_data;
- np = of_graph_get_next_endpoint(client->dev.of_node, NULL);
+ np = of_graph_get_endpoint_by_regs(client->dev.of_node, 0, -1);
if (!np)
return NULL;
diff --git a/drivers/media/i2c/ov08x40.c b/drivers/media/i2c/ov08x40.c
index abbb0b774d43f..48df077522ad0 100644
--- a/drivers/media/i2c/ov08x40.c
+++ b/drivers/media/i2c/ov08x40.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2022 Intel Corporation.
+#include <asm-generic/unaligned.h>
#include <linux/acpi.h>
#include <linux/i2c.h>
#include <linux/module.h>
@@ -34,7 +35,7 @@
/* V_TIMING internal */
#define OV08X40_REG_VTS 0x380e
-#define OV08X40_VTS_30FPS 0x1388
+#define OV08X40_VTS_30FPS 0x09c4 /* the VTS need to be half in normal mode */
#define OV08X40_VTS_BIN_30FPS 0x115c
#define OV08X40_VTS_MAX 0x7fff
@@ -44,8 +45,9 @@
/* Exposure control */
#define OV08X40_REG_EXPOSURE 0x3500
-#define OV08X40_EXPOSURE_MAX_MARGIN 31
-#define OV08X40_EXPOSURE_MIN 1
+#define OV08X40_EXPOSURE_MAX_MARGIN 8
+#define OV08X40_EXPOSURE_BIN_MAX_MARGIN 2
+#define OV08X40_EXPOSURE_MIN 4
#define OV08X40_EXPOSURE_STEP 1
#define OV08X40_EXPOSURE_DEFAULT 0x40
@@ -94,6 +96,12 @@
/* Vertical Window Offset */
#define OV08X40_REG_V_WIN_OFFSET 0x3813
+/* Burst Register */
+#define OV08X40_REG_XTALK_FIRST_A 0x5a80
+#define OV08X40_REG_XTALK_LAST_A 0x5b9f
+#define OV08X40_REG_XTALK_FIRST_B 0x5bc0
+#define OV08X40_REG_XTALK_LAST_B 0x5f1f
+
enum {
OV08X40_LINK_FREQ_400MHZ_INDEX,
};
@@ -126,13 +134,17 @@ struct ov08x40_mode {
u32 vts_def;
u32 vts_min;
- /* HTS */
- u32 hts;
+ /* Line Length Pixels */
+ u32 llp;
/* Index of Link frequency config to be used */
u32 link_freq_index;
/* Default register values */
struct ov08x40_reg_list reg_list;
+
+ /* Exposure calculation */
+ u16 exposure_margin;
+ u16 exposure_shift;
};
static const struct ov08x40_reg mipi_data_rate_800mbps[] = {
@@ -665,1158 +677,6 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x3502, 0x10},
{0x3508, 0x0f},
{0x3509, 0x80},
- {0x5a80, 0x75},
- {0x5a81, 0x75},
- {0x5a82, 0x75},
- {0x5a83, 0x75},
- {0x5a84, 0x75},
- {0x5a85, 0x75},
- {0x5a86, 0x75},
- {0x5a87, 0x75},
- {0x5a88, 0x75},
- {0x5a89, 0x75},
- {0x5a8a, 0x75},
- {0x5a8b, 0x75},
- {0x5a8c, 0x75},
- {0x5a8d, 0x75},
- {0x5a8e, 0x75},
- {0x5a8f, 0x75},
- {0x5a90, 0x75},
- {0x5a91, 0x75},
- {0x5a92, 0x75},
- {0x5a93, 0x75},
- {0x5a94, 0x75},
- {0x5a95, 0x75},
- {0x5a96, 0x75},
- {0x5a97, 0x75},
- {0x5a98, 0x75},
- {0x5a99, 0x75},
- {0x5a9a, 0x75},
- {0x5a9b, 0x75},
- {0x5a9c, 0x75},
- {0x5a9d, 0x75},
- {0x5a9e, 0x75},
- {0x5a9f, 0x75},
- {0x5aa0, 0x75},
- {0x5aa1, 0x75},
- {0x5aa2, 0x75},
- {0x5aa3, 0x75},
- {0x5aa4, 0x75},
- {0x5aa5, 0x75},
- {0x5aa6, 0x75},
- {0x5aa7, 0x75},
- {0x5aa8, 0x75},
- {0x5aa9, 0x75},
- {0x5aaa, 0x75},
- {0x5aab, 0x75},
- {0x5aac, 0x75},
- {0x5aad, 0x75},
- {0x5aae, 0x75},
- {0x5aaf, 0x75},
- {0x5ab0, 0x75},
- {0x5ab1, 0x75},
- {0x5ab2, 0x75},
- {0x5ab3, 0x75},
- {0x5ab4, 0x75},
- {0x5ab5, 0x75},
- {0x5ab6, 0x75},
- {0x5ab7, 0x75},
- {0x5ab8, 0x75},
- {0x5ab9, 0x75},
- {0x5aba, 0x75},
- {0x5abb, 0x75},
- {0x5abc, 0x75},
- {0x5abd, 0x75},
- {0x5abe, 0x75},
- {0x5abf, 0x75},
- {0x5ac0, 0x75},
- {0x5ac1, 0x75},
- {0x5ac2, 0x75},
- {0x5ac3, 0x75},
- {0x5ac4, 0x75},
- {0x5ac5, 0x75},
- {0x5ac6, 0x75},
- {0x5ac7, 0x75},
- {0x5ac8, 0x75},
- {0x5ac9, 0x75},
- {0x5aca, 0x75},
- {0x5acb, 0x75},
- {0x5acc, 0x75},
- {0x5acd, 0x75},
- {0x5ace, 0x75},
- {0x5acf, 0x75},
- {0x5ad0, 0x75},
- {0x5ad1, 0x75},
- {0x5ad2, 0x75},
- {0x5ad3, 0x75},
- {0x5ad4, 0x75},
- {0x5ad5, 0x75},
- {0x5ad6, 0x75},
- {0x5ad7, 0x75},
- {0x5ad8, 0x75},
- {0x5ad9, 0x75},
- {0x5ada, 0x75},
- {0x5adb, 0x75},
- {0x5adc, 0x75},
- {0x5add, 0x75},
- {0x5ade, 0x75},
- {0x5adf, 0x75},
- {0x5ae0, 0x75},
- {0x5ae1, 0x75},
- {0x5ae2, 0x75},
- {0x5ae3, 0x75},
- {0x5ae4, 0x75},
- {0x5ae5, 0x75},
- {0x5ae6, 0x75},
- {0x5ae7, 0x75},
- {0x5ae8, 0x75},
- {0x5ae9, 0x75},
- {0x5aea, 0x75},
- {0x5aeb, 0x75},
- {0x5aec, 0x75},
- {0x5aed, 0x75},
- {0x5aee, 0x75},
- {0x5aef, 0x75},
- {0x5af0, 0x75},
- {0x5af1, 0x75},
- {0x5af2, 0x75},
- {0x5af3, 0x75},
- {0x5af4, 0x75},
- {0x5af5, 0x75},
- {0x5af6, 0x75},
- {0x5af7, 0x75},
- {0x5af8, 0x75},
- {0x5af9, 0x75},
- {0x5afa, 0x75},
- {0x5afb, 0x75},
- {0x5afc, 0x75},
- {0x5afd, 0x75},
- {0x5afe, 0x75},
- {0x5aff, 0x75},
- {0x5b00, 0x75},
- {0x5b01, 0x75},
- {0x5b02, 0x75},
- {0x5b03, 0x75},
- {0x5b04, 0x75},
- {0x5b05, 0x75},
- {0x5b06, 0x75},
- {0x5b07, 0x75},
- {0x5b08, 0x75},
- {0x5b09, 0x75},
- {0x5b0a, 0x75},
- {0x5b0b, 0x75},
- {0x5b0c, 0x75},
- {0x5b0d, 0x75},
- {0x5b0e, 0x75},
- {0x5b0f, 0x75},
- {0x5b10, 0x75},
- {0x5b11, 0x75},
- {0x5b12, 0x75},
- {0x5b13, 0x75},
- {0x5b14, 0x75},
- {0x5b15, 0x75},
- {0x5b16, 0x75},
- {0x5b17, 0x75},
- {0x5b18, 0x75},
- {0x5b19, 0x75},
- {0x5b1a, 0x75},
- {0x5b1b, 0x75},
- {0x5b1c, 0x75},
- {0x5b1d, 0x75},
- {0x5b1e, 0x75},
- {0x5b1f, 0x75},
- {0x5b20, 0x75},
- {0x5b21, 0x75},
- {0x5b22, 0x75},
- {0x5b23, 0x75},
- {0x5b24, 0x75},
- {0x5b25, 0x75},
- {0x5b26, 0x75},
- {0x5b27, 0x75},
- {0x5b28, 0x75},
- {0x5b29, 0x75},
- {0x5b2a, 0x75},
- {0x5b2b, 0x75},
- {0x5b2c, 0x75},
- {0x5b2d, 0x75},
- {0x5b2e, 0x75},
- {0x5b2f, 0x75},
- {0x5b30, 0x75},
- {0x5b31, 0x75},
- {0x5b32, 0x75},
- {0x5b33, 0x75},
- {0x5b34, 0x75},
- {0x5b35, 0x75},
- {0x5b36, 0x75},
- {0x5b37, 0x75},
- {0x5b38, 0x75},
- {0x5b39, 0x75},
- {0x5b3a, 0x75},
- {0x5b3b, 0x75},
- {0x5b3c, 0x75},
- {0x5b3d, 0x75},
- {0x5b3e, 0x75},
- {0x5b3f, 0x75},
- {0x5b40, 0x75},
- {0x5b41, 0x75},
- {0x5b42, 0x75},
- {0x5b43, 0x75},
- {0x5b44, 0x75},
- {0x5b45, 0x75},
- {0x5b46, 0x75},
- {0x5b47, 0x75},
- {0x5b48, 0x75},
- {0x5b49, 0x75},
- {0x5b4a, 0x75},
- {0x5b4b, 0x75},
- {0x5b4c, 0x75},
- {0x5b4d, 0x75},
- {0x5b4e, 0x75},
- {0x5b4f, 0x75},
- {0x5b50, 0x75},
- {0x5b51, 0x75},
- {0x5b52, 0x75},
- {0x5b53, 0x75},
- {0x5b54, 0x75},
- {0x5b55, 0x75},
- {0x5b56, 0x75},
- {0x5b57, 0x75},
- {0x5b58, 0x75},
- {0x5b59, 0x75},
- {0x5b5a, 0x75},
- {0x5b5b, 0x75},
- {0x5b5c, 0x75},
- {0x5b5d, 0x75},
- {0x5b5e, 0x75},
- {0x5b5f, 0x75},
- {0x5b60, 0x75},
- {0x5b61, 0x75},
- {0x5b62, 0x75},
- {0x5b63, 0x75},
- {0x5b64, 0x75},
- {0x5b65, 0x75},
- {0x5b66, 0x75},
- {0x5b67, 0x75},
- {0x5b68, 0x75},
- {0x5b69, 0x75},
- {0x5b6a, 0x75},
- {0x5b6b, 0x75},
- {0x5b6c, 0x75},
- {0x5b6d, 0x75},
- {0x5b6e, 0x75},
- {0x5b6f, 0x75},
- {0x5b70, 0x75},
- {0x5b71, 0x75},
- {0x5b72, 0x75},
- {0x5b73, 0x75},
- {0x5b74, 0x75},
- {0x5b75, 0x75},
- {0x5b76, 0x75},
- {0x5b77, 0x75},
- {0x5b78, 0x75},
- {0x5b79, 0x75},
- {0x5b7a, 0x75},
- {0x5b7b, 0x75},
- {0x5b7c, 0x75},
- {0x5b7d, 0x75},
- {0x5b7e, 0x75},
- {0x5b7f, 0x75},
- {0x5b80, 0x75},
- {0x5b81, 0x75},
- {0x5b82, 0x75},
- {0x5b83, 0x75},
- {0x5b84, 0x75},
- {0x5b85, 0x75},
- {0x5b86, 0x75},
- {0x5b87, 0x75},
- {0x5b88, 0x75},
- {0x5b89, 0x75},
- {0x5b8a, 0x75},
- {0x5b8b, 0x75},
- {0x5b8c, 0x75},
- {0x5b8d, 0x75},
- {0x5b8e, 0x75},
- {0x5b8f, 0x75},
- {0x5b90, 0x75},
- {0x5b91, 0x75},
- {0x5b92, 0x75},
- {0x5b93, 0x75},
- {0x5b94, 0x75},
- {0x5b95, 0x75},
- {0x5b96, 0x75},
- {0x5b97, 0x75},
- {0x5b98, 0x75},
- {0x5b99, 0x75},
- {0x5b9a, 0x75},
- {0x5b9b, 0x75},
- {0x5b9c, 0x75},
- {0x5b9d, 0x75},
- {0x5b9e, 0x75},
- {0x5b9f, 0x75},
- {0x5bc0, 0x75},
- {0x5bc1, 0x75},
- {0x5bc2, 0x75},
- {0x5bc3, 0x75},
- {0x5bc4, 0x75},
- {0x5bc5, 0x75},
- {0x5bc6, 0x75},
- {0x5bc7, 0x75},
- {0x5bc8, 0x75},
- {0x5bc9, 0x75},
- {0x5bca, 0x75},
- {0x5bcb, 0x75},
- {0x5bcc, 0x75},
- {0x5bcd, 0x75},
- {0x5bce, 0x75},
- {0x5bcf, 0x75},
- {0x5bd0, 0x75},
- {0x5bd1, 0x75},
- {0x5bd2, 0x75},
- {0x5bd3, 0x75},
- {0x5bd4, 0x75},
- {0x5bd5, 0x75},
- {0x5bd6, 0x75},
- {0x5bd7, 0x75},
- {0x5bd8, 0x75},
- {0x5bd9, 0x75},
- {0x5bda, 0x75},
- {0x5bdb, 0x75},
- {0x5bdc, 0x75},
- {0x5bdd, 0x75},
- {0x5bde, 0x75},
- {0x5bdf, 0x75},
- {0x5be0, 0x75},
- {0x5be1, 0x75},
- {0x5be2, 0x75},
- {0x5be3, 0x75},
- {0x5be4, 0x75},
- {0x5be5, 0x75},
- {0x5be6, 0x75},
- {0x5be7, 0x75},
- {0x5be8, 0x75},
- {0x5be9, 0x75},
- {0x5bea, 0x75},
- {0x5beb, 0x75},
- {0x5bec, 0x75},
- {0x5bed, 0x75},
- {0x5bee, 0x75},
- {0x5bef, 0x75},
- {0x5bf0, 0x75},
- {0x5bf1, 0x75},
- {0x5bf2, 0x75},
- {0x5bf3, 0x75},
- {0x5bf4, 0x75},
- {0x5bf5, 0x75},
- {0x5bf6, 0x75},
- {0x5bf7, 0x75},
- {0x5bf8, 0x75},
- {0x5bf9, 0x75},
- {0x5bfa, 0x75},
- {0x5bfb, 0x75},
- {0x5bfc, 0x75},
- {0x5bfd, 0x75},
- {0x5bfe, 0x75},
- {0x5bff, 0x75},
- {0x5c00, 0x75},
- {0x5c01, 0x75},
- {0x5c02, 0x75},
- {0x5c03, 0x75},
- {0x5c04, 0x75},
- {0x5c05, 0x75},
- {0x5c06, 0x75},
- {0x5c07, 0x75},
- {0x5c08, 0x75},
- {0x5c09, 0x75},
- {0x5c0a, 0x75},
- {0x5c0b, 0x75},
- {0x5c0c, 0x75},
- {0x5c0d, 0x75},
- {0x5c0e, 0x75},
- {0x5c0f, 0x75},
- {0x5c10, 0x75},
- {0x5c11, 0x75},
- {0x5c12, 0x75},
- {0x5c13, 0x75},
- {0x5c14, 0x75},
- {0x5c15, 0x75},
- {0x5c16, 0x75},
- {0x5c17, 0x75},
- {0x5c18, 0x75},
- {0x5c19, 0x75},
- {0x5c1a, 0x75},
- {0x5c1b, 0x75},
- {0x5c1c, 0x75},
- {0x5c1d, 0x75},
- {0x5c1e, 0x75},
- {0x5c1f, 0x75},
- {0x5c20, 0x75},
- {0x5c21, 0x75},
- {0x5c22, 0x75},
- {0x5c23, 0x75},
- {0x5c24, 0x75},
- {0x5c25, 0x75},
- {0x5c26, 0x75},
- {0x5c27, 0x75},
- {0x5c28, 0x75},
- {0x5c29, 0x75},
- {0x5c2a, 0x75},
- {0x5c2b, 0x75},
- {0x5c2c, 0x75},
- {0x5c2d, 0x75},
- {0x5c2e, 0x75},
- {0x5c2f, 0x75},
- {0x5c30, 0x75},
- {0x5c31, 0x75},
- {0x5c32, 0x75},
- {0x5c33, 0x75},
- {0x5c34, 0x75},
- {0x5c35, 0x75},
- {0x5c36, 0x75},
- {0x5c37, 0x75},
- {0x5c38, 0x75},
- {0x5c39, 0x75},
- {0x5c3a, 0x75},
- {0x5c3b, 0x75},
- {0x5c3c, 0x75},
- {0x5c3d, 0x75},
- {0x5c3e, 0x75},
- {0x5c3f, 0x75},
- {0x5c40, 0x75},
- {0x5c41, 0x75},
- {0x5c42, 0x75},
- {0x5c43, 0x75},
- {0x5c44, 0x75},
- {0x5c45, 0x75},
- {0x5c46, 0x75},
- {0x5c47, 0x75},
- {0x5c48, 0x75},
- {0x5c49, 0x75},
- {0x5c4a, 0x75},
- {0x5c4b, 0x75},
- {0x5c4c, 0x75},
- {0x5c4d, 0x75},
- {0x5c4e, 0x75},
- {0x5c4f, 0x75},
- {0x5c50, 0x75},
- {0x5c51, 0x75},
- {0x5c52, 0x75},
- {0x5c53, 0x75},
- {0x5c54, 0x75},
- {0x5c55, 0x75},
- {0x5c56, 0x75},
- {0x5c57, 0x75},
- {0x5c58, 0x75},
- {0x5c59, 0x75},
- {0x5c5a, 0x75},
- {0x5c5b, 0x75},
- {0x5c5c, 0x75},
- {0x5c5d, 0x75},
- {0x5c5e, 0x75},
- {0x5c5f, 0x75},
- {0x5c60, 0x75},
- {0x5c61, 0x75},
- {0x5c62, 0x75},
- {0x5c63, 0x75},
- {0x5c64, 0x75},
- {0x5c65, 0x75},
- {0x5c66, 0x75},
- {0x5c67, 0x75},
- {0x5c68, 0x75},
- {0x5c69, 0x75},
- {0x5c6a, 0x75},
- {0x5c6b, 0x75},
- {0x5c6c, 0x75},
- {0x5c6d, 0x75},
- {0x5c6e, 0x75},
- {0x5c6f, 0x75},
- {0x5c70, 0x75},
- {0x5c71, 0x75},
- {0x5c72, 0x75},
- {0x5c73, 0x75},
- {0x5c74, 0x75},
- {0x5c75, 0x75},
- {0x5c76, 0x75},
- {0x5c77, 0x75},
- {0x5c78, 0x75},
- {0x5c79, 0x75},
- {0x5c7a, 0x75},
- {0x5c7b, 0x75},
- {0x5c7c, 0x75},
- {0x5c7d, 0x75},
- {0x5c7e, 0x75},
- {0x5c7f, 0x75},
- {0x5c80, 0x75},
- {0x5c81, 0x75},
- {0x5c82, 0x75},
- {0x5c83, 0x75},
- {0x5c84, 0x75},
- {0x5c85, 0x75},
- {0x5c86, 0x75},
- {0x5c87, 0x75},
- {0x5c88, 0x75},
- {0x5c89, 0x75},
- {0x5c8a, 0x75},
- {0x5c8b, 0x75},
- {0x5c8c, 0x75},
- {0x5c8d, 0x75},
- {0x5c8e, 0x75},
- {0x5c8f, 0x75},
- {0x5c90, 0x75},
- {0x5c91, 0x75},
- {0x5c92, 0x75},
- {0x5c93, 0x75},
- {0x5c94, 0x75},
- {0x5c95, 0x75},
- {0x5c96, 0x75},
- {0x5c97, 0x75},
- {0x5c98, 0x75},
- {0x5c99, 0x75},
- {0x5c9a, 0x75},
- {0x5c9b, 0x75},
- {0x5c9c, 0x75},
- {0x5c9d, 0x75},
- {0x5c9e, 0x75},
- {0x5c9f, 0x75},
- {0x5ca0, 0x75},
- {0x5ca1, 0x75},
- {0x5ca2, 0x75},
- {0x5ca3, 0x75},
- {0x5ca4, 0x75},
- {0x5ca5, 0x75},
- {0x5ca6, 0x75},
- {0x5ca7, 0x75},
- {0x5ca8, 0x75},
- {0x5ca9, 0x75},
- {0x5caa, 0x75},
- {0x5cab, 0x75},
- {0x5cac, 0x75},
- {0x5cad, 0x75},
- {0x5cae, 0x75},
- {0x5caf, 0x75},
- {0x5cb0, 0x75},
- {0x5cb1, 0x75},
- {0x5cb2, 0x75},
- {0x5cb3, 0x75},
- {0x5cb4, 0x75},
- {0x5cb5, 0x75},
- {0x5cb6, 0x75},
- {0x5cb7, 0x75},
- {0x5cb8, 0x75},
- {0x5cb9, 0x75},
- {0x5cba, 0x75},
- {0x5cbb, 0x75},
- {0x5cbc, 0x75},
- {0x5cbd, 0x75},
- {0x5cbe, 0x75},
- {0x5cbf, 0x75},
- {0x5cc0, 0x75},
- {0x5cc1, 0x75},
- {0x5cc2, 0x75},
- {0x5cc3, 0x75},
- {0x5cc4, 0x75},
- {0x5cc5, 0x75},
- {0x5cc6, 0x75},
- {0x5cc7, 0x75},
- {0x5cc8, 0x75},
- {0x5cc9, 0x75},
- {0x5cca, 0x75},
- {0x5ccb, 0x75},
- {0x5ccc, 0x75},
- {0x5ccd, 0x75},
- {0x5cce, 0x75},
- {0x5ccf, 0x75},
- {0x5cd0, 0x75},
- {0x5cd1, 0x75},
- {0x5cd2, 0x75},
- {0x5cd3, 0x75},
- {0x5cd4, 0x75},
- {0x5cd5, 0x75},
- {0x5cd6, 0x75},
- {0x5cd7, 0x75},
- {0x5cd8, 0x75},
- {0x5cd9, 0x75},
- {0x5cda, 0x75},
- {0x5cdb, 0x75},
- {0x5cdc, 0x75},
- {0x5cdd, 0x75},
- {0x5cde, 0x75},
- {0x5cdf, 0x75},
- {0x5ce0, 0x75},
- {0x5ce1, 0x75},
- {0x5ce2, 0x75},
- {0x5ce3, 0x75},
- {0x5ce4, 0x75},
- {0x5ce5, 0x75},
- {0x5ce6, 0x75},
- {0x5ce7, 0x75},
- {0x5ce8, 0x75},
- {0x5ce9, 0x75},
- {0x5cea, 0x75},
- {0x5ceb, 0x75},
- {0x5cec, 0x75},
- {0x5ced, 0x75},
- {0x5cee, 0x75},
- {0x5cef, 0x75},
- {0x5cf0, 0x75},
- {0x5cf1, 0x75},
- {0x5cf2, 0x75},
- {0x5cf3, 0x75},
- {0x5cf4, 0x75},
- {0x5cf5, 0x75},
- {0x5cf6, 0x75},
- {0x5cf7, 0x75},
- {0x5cf8, 0x75},
- {0x5cf9, 0x75},
- {0x5cfa, 0x75},
- {0x5cfb, 0x75},
- {0x5cfc, 0x75},
- {0x5cfd, 0x75},
- {0x5cfe, 0x75},
- {0x5cff, 0x75},
- {0x5d00, 0x75},
- {0x5d01, 0x75},
- {0x5d02, 0x75},
- {0x5d03, 0x75},
- {0x5d04, 0x75},
- {0x5d05, 0x75},
- {0x5d06, 0x75},
- {0x5d07, 0x75},
- {0x5d08, 0x75},
- {0x5d09, 0x75},
- {0x5d0a, 0x75},
- {0x5d0b, 0x75},
- {0x5d0c, 0x75},
- {0x5d0d, 0x75},
- {0x5d0e, 0x75},
- {0x5d0f, 0x75},
- {0x5d10, 0x75},
- {0x5d11, 0x75},
- {0x5d12, 0x75},
- {0x5d13, 0x75},
- {0x5d14, 0x75},
- {0x5d15, 0x75},
- {0x5d16, 0x75},
- {0x5d17, 0x75},
- {0x5d18, 0x75},
- {0x5d19, 0x75},
- {0x5d1a, 0x75},
- {0x5d1b, 0x75},
- {0x5d1c, 0x75},
- {0x5d1d, 0x75},
- {0x5d1e, 0x75},
- {0x5d1f, 0x75},
- {0x5d20, 0x75},
- {0x5d21, 0x75},
- {0x5d22, 0x75},
- {0x5d23, 0x75},
- {0x5d24, 0x75},
- {0x5d25, 0x75},
- {0x5d26, 0x75},
- {0x5d27, 0x75},
- {0x5d28, 0x75},
- {0x5d29, 0x75},
- {0x5d2a, 0x75},
- {0x5d2b, 0x75},
- {0x5d2c, 0x75},
- {0x5d2d, 0x75},
- {0x5d2e, 0x75},
- {0x5d2f, 0x75},
- {0x5d30, 0x75},
- {0x5d31, 0x75},
- {0x5d32, 0x75},
- {0x5d33, 0x75},
- {0x5d34, 0x75},
- {0x5d35, 0x75},
- {0x5d36, 0x75},
- {0x5d37, 0x75},
- {0x5d38, 0x75},
- {0x5d39, 0x75},
- {0x5d3a, 0x75},
- {0x5d3b, 0x75},
- {0x5d3c, 0x75},
- {0x5d3d, 0x75},
- {0x5d3e, 0x75},
- {0x5d3f, 0x75},
- {0x5d40, 0x75},
- {0x5d41, 0x75},
- {0x5d42, 0x75},
- {0x5d43, 0x75},
- {0x5d44, 0x75},
- {0x5d45, 0x75},
- {0x5d46, 0x75},
- {0x5d47, 0x75},
- {0x5d48, 0x75},
- {0x5d49, 0x75},
- {0x5d4a, 0x75},
- {0x5d4b, 0x75},
- {0x5d4c, 0x75},
- {0x5d4d, 0x75},
- {0x5d4e, 0x75},
- {0x5d4f, 0x75},
- {0x5d50, 0x75},
- {0x5d51, 0x75},
- {0x5d52, 0x75},
- {0x5d53, 0x75},
- {0x5d54, 0x75},
- {0x5d55, 0x75},
- {0x5d56, 0x75},
- {0x5d57, 0x75},
- {0x5d58, 0x75},
- {0x5d59, 0x75},
- {0x5d5a, 0x75},
- {0x5d5b, 0x75},
- {0x5d5c, 0x75},
- {0x5d5d, 0x75},
- {0x5d5e, 0x75},
- {0x5d5f, 0x75},
- {0x5d60, 0x75},
- {0x5d61, 0x75},
- {0x5d62, 0x75},
- {0x5d63, 0x75},
- {0x5d64, 0x75},
- {0x5d65, 0x75},
- {0x5d66, 0x75},
- {0x5d67, 0x75},
- {0x5d68, 0x75},
- {0x5d69, 0x75},
- {0x5d6a, 0x75},
- {0x5d6b, 0x75},
- {0x5d6c, 0x75},
- {0x5d6d, 0x75},
- {0x5d6e, 0x75},
- {0x5d6f, 0x75},
- {0x5d70, 0x75},
- {0x5d71, 0x75},
- {0x5d72, 0x75},
- {0x5d73, 0x75},
- {0x5d74, 0x75},
- {0x5d75, 0x75},
- {0x5d76, 0x75},
- {0x5d77, 0x75},
- {0x5d78, 0x75},
- {0x5d79, 0x75},
- {0x5d7a, 0x75},
- {0x5d7b, 0x75},
- {0x5d7c, 0x75},
- {0x5d7d, 0x75},
- {0x5d7e, 0x75},
- {0x5d7f, 0x75},
- {0x5d80, 0x75},
- {0x5d81, 0x75},
- {0x5d82, 0x75},
- {0x5d83, 0x75},
- {0x5d84, 0x75},
- {0x5d85, 0x75},
- {0x5d86, 0x75},
- {0x5d87, 0x75},
- {0x5d88, 0x75},
- {0x5d89, 0x75},
- {0x5d8a, 0x75},
- {0x5d8b, 0x75},
- {0x5d8c, 0x75},
- {0x5d8d, 0x75},
- {0x5d8e, 0x75},
- {0x5d8f, 0x75},
- {0x5d90, 0x75},
- {0x5d91, 0x75},
- {0x5d92, 0x75},
- {0x5d93, 0x75},
- {0x5d94, 0x75},
- {0x5d95, 0x75},
- {0x5d96, 0x75},
- {0x5d97, 0x75},
- {0x5d98, 0x75},
- {0x5d99, 0x75},
- {0x5d9a, 0x75},
- {0x5d9b, 0x75},
- {0x5d9c, 0x75},
- {0x5d9d, 0x75},
- {0x5d9e, 0x75},
- {0x5d9f, 0x75},
- {0x5da0, 0x75},
- {0x5da1, 0x75},
- {0x5da2, 0x75},
- {0x5da3, 0x75},
- {0x5da4, 0x75},
- {0x5da5, 0x75},
- {0x5da6, 0x75},
- {0x5da7, 0x75},
- {0x5da8, 0x75},
- {0x5da9, 0x75},
- {0x5daa, 0x75},
- {0x5dab, 0x75},
- {0x5dac, 0x75},
- {0x5dad, 0x75},
- {0x5dae, 0x75},
- {0x5daf, 0x75},
- {0x5db0, 0x75},
- {0x5db1, 0x75},
- {0x5db2, 0x75},
- {0x5db3, 0x75},
- {0x5db4, 0x75},
- {0x5db5, 0x75},
- {0x5db6, 0x75},
- {0x5db7, 0x75},
- {0x5db8, 0x75},
- {0x5db9, 0x75},
- {0x5dba, 0x75},
- {0x5dbb, 0x75},
- {0x5dbc, 0x75},
- {0x5dbd, 0x75},
- {0x5dbe, 0x75},
- {0x5dbf, 0x75},
- {0x5dc0, 0x75},
- {0x5dc1, 0x75},
- {0x5dc2, 0x75},
- {0x5dc3, 0x75},
- {0x5dc4, 0x75},
- {0x5dc5, 0x75},
- {0x5dc6, 0x75},
- {0x5dc7, 0x75},
- {0x5dc8, 0x75},
- {0x5dc9, 0x75},
- {0x5dca, 0x75},
- {0x5dcb, 0x75},
- {0x5dcc, 0x75},
- {0x5dcd, 0x75},
- {0x5dce, 0x75},
- {0x5dcf, 0x75},
- {0x5dd0, 0x75},
- {0x5dd1, 0x75},
- {0x5dd2, 0x75},
- {0x5dd3, 0x75},
- {0x5dd4, 0x75},
- {0x5dd5, 0x75},
- {0x5dd6, 0x75},
- {0x5dd7, 0x75},
- {0x5dd8, 0x75},
- {0x5dd9, 0x75},
- {0x5dda, 0x75},
- {0x5ddb, 0x75},
- {0x5ddc, 0x75},
- {0x5ddd, 0x75},
- {0x5dde, 0x75},
- {0x5ddf, 0x75},
- {0x5de0, 0x75},
- {0x5de1, 0x75},
- {0x5de2, 0x75},
- {0x5de3, 0x75},
- {0x5de4, 0x75},
- {0x5de5, 0x75},
- {0x5de6, 0x75},
- {0x5de7, 0x75},
- {0x5de8, 0x75},
- {0x5de9, 0x75},
- {0x5dea, 0x75},
- {0x5deb, 0x75},
- {0x5dec, 0x75},
- {0x5ded, 0x75},
- {0x5dee, 0x75},
- {0x5def, 0x75},
- {0x5df0, 0x75},
- {0x5df1, 0x75},
- {0x5df2, 0x75},
- {0x5df3, 0x75},
- {0x5df4, 0x75},
- {0x5df5, 0x75},
- {0x5df6, 0x75},
- {0x5df7, 0x75},
- {0x5df8, 0x75},
- {0x5df9, 0x75},
- {0x5dfa, 0x75},
- {0x5dfb, 0x75},
- {0x5dfc, 0x75},
- {0x5dfd, 0x75},
- {0x5dfe, 0x75},
- {0x5dff, 0x75},
- {0x5e00, 0x75},
- {0x5e01, 0x75},
- {0x5e02, 0x75},
- {0x5e03, 0x75},
- {0x5e04, 0x75},
- {0x5e05, 0x75},
- {0x5e06, 0x75},
- {0x5e07, 0x75},
- {0x5e08, 0x75},
- {0x5e09, 0x75},
- {0x5e0a, 0x75},
- {0x5e0b, 0x75},
- {0x5e0c, 0x75},
- {0x5e0d, 0x75},
- {0x5e0e, 0x75},
- {0x5e0f, 0x75},
- {0x5e10, 0x75},
- {0x5e11, 0x75},
- {0x5e12, 0x75},
- {0x5e13, 0x75},
- {0x5e14, 0x75},
- {0x5e15, 0x75},
- {0x5e16, 0x75},
- {0x5e17, 0x75},
- {0x5e18, 0x75},
- {0x5e19, 0x75},
- {0x5e1a, 0x75},
- {0x5e1b, 0x75},
- {0x5e1c, 0x75},
- {0x5e1d, 0x75},
- {0x5e1e, 0x75},
- {0x5e1f, 0x75},
- {0x5e20, 0x75},
- {0x5e21, 0x75},
- {0x5e22, 0x75},
- {0x5e23, 0x75},
- {0x5e24, 0x75},
- {0x5e25, 0x75},
- {0x5e26, 0x75},
- {0x5e27, 0x75},
- {0x5e28, 0x75},
- {0x5e29, 0x75},
- {0x5e2a, 0x75},
- {0x5e2b, 0x75},
- {0x5e2c, 0x75},
- {0x5e2d, 0x75},
- {0x5e2e, 0x75},
- {0x5e2f, 0x75},
- {0x5e30, 0x75},
- {0x5e31, 0x75},
- {0x5e32, 0x75},
- {0x5e33, 0x75},
- {0x5e34, 0x75},
- {0x5e35, 0x75},
- {0x5e36, 0x75},
- {0x5e37, 0x75},
- {0x5e38, 0x75},
- {0x5e39, 0x75},
- {0x5e3a, 0x75},
- {0x5e3b, 0x75},
- {0x5e3c, 0x75},
- {0x5e3d, 0x75},
- {0x5e3e, 0x75},
- {0x5e3f, 0x75},
- {0x5e40, 0x75},
- {0x5e41, 0x75},
- {0x5e42, 0x75},
- {0x5e43, 0x75},
- {0x5e44, 0x75},
- {0x5e45, 0x75},
- {0x5e46, 0x75},
- {0x5e47, 0x75},
- {0x5e48, 0x75},
- {0x5e49, 0x75},
- {0x5e4a, 0x75},
- {0x5e4b, 0x75},
- {0x5e4c, 0x75},
- {0x5e4d, 0x75},
- {0x5e4e, 0x75},
- {0x5e4f, 0x75},
- {0x5e50, 0x75},
- {0x5e51, 0x75},
- {0x5e52, 0x75},
- {0x5e53, 0x75},
- {0x5e54, 0x75},
- {0x5e55, 0x75},
- {0x5e56, 0x75},
- {0x5e57, 0x75},
- {0x5e58, 0x75},
- {0x5e59, 0x75},
- {0x5e5a, 0x75},
- {0x5e5b, 0x75},
- {0x5e5c, 0x75},
- {0x5e5d, 0x75},
- {0x5e5e, 0x75},
- {0x5e5f, 0x75},
- {0x5e60, 0x75},
- {0x5e61, 0x75},
- {0x5e62, 0x75},
- {0x5e63, 0x75},
- {0x5e64, 0x75},
- {0x5e65, 0x75},
- {0x5e66, 0x75},
- {0x5e67, 0x75},
- {0x5e68, 0x75},
- {0x5e69, 0x75},
- {0x5e6a, 0x75},
- {0x5e6b, 0x75},
- {0x5e6c, 0x75},
- {0x5e6d, 0x75},
- {0x5e6e, 0x75},
- {0x5e6f, 0x75},
- {0x5e70, 0x75},
- {0x5e71, 0x75},
- {0x5e72, 0x75},
- {0x5e73, 0x75},
- {0x5e74, 0x75},
- {0x5e75, 0x75},
- {0x5e76, 0x75},
- {0x5e77, 0x75},
- {0x5e78, 0x75},
- {0x5e79, 0x75},
- {0x5e7a, 0x75},
- {0x5e7b, 0x75},
- {0x5e7c, 0x75},
- {0x5e7d, 0x75},
- {0x5e7e, 0x75},
- {0x5e7f, 0x75},
- {0x5e80, 0x75},
- {0x5e81, 0x75},
- {0x5e82, 0x75},
- {0x5e83, 0x75},
- {0x5e84, 0x75},
- {0x5e85, 0x75},
- {0x5e86, 0x75},
- {0x5e87, 0x75},
- {0x5e88, 0x75},
- {0x5e89, 0x75},
- {0x5e8a, 0x75},
- {0x5e8b, 0x75},
- {0x5e8c, 0x75},
- {0x5e8d, 0x75},
- {0x5e8e, 0x75},
- {0x5e8f, 0x75},
- {0x5e90, 0x75},
- {0x5e91, 0x75},
- {0x5e92, 0x75},
- {0x5e93, 0x75},
- {0x5e94, 0x75},
- {0x5e95, 0x75},
- {0x5e96, 0x75},
- {0x5e97, 0x75},
- {0x5e98, 0x75},
- {0x5e99, 0x75},
- {0x5e9a, 0x75},
- {0x5e9b, 0x75},
- {0x5e9c, 0x75},
- {0x5e9d, 0x75},
- {0x5e9e, 0x75},
- {0x5e9f, 0x75},
- {0x5ea0, 0x75},
- {0x5ea1, 0x75},
- {0x5ea2, 0x75},
- {0x5ea3, 0x75},
- {0x5ea4, 0x75},
- {0x5ea5, 0x75},
- {0x5ea6, 0x75},
- {0x5ea7, 0x75},
- {0x5ea8, 0x75},
- {0x5ea9, 0x75},
- {0x5eaa, 0x75},
- {0x5eab, 0x75},
- {0x5eac, 0x75},
- {0x5ead, 0x75},
- {0x5eae, 0x75},
- {0x5eaf, 0x75},
- {0x5eb0, 0x75},
- {0x5eb1, 0x75},
- {0x5eb2, 0x75},
- {0x5eb3, 0x75},
- {0x5eb4, 0x75},
- {0x5eb5, 0x75},
- {0x5eb6, 0x75},
- {0x5eb7, 0x75},
- {0x5eb8, 0x75},
- {0x5eb9, 0x75},
- {0x5eba, 0x75},
- {0x5ebb, 0x75},
- {0x5ebc, 0x75},
- {0x5ebd, 0x75},
- {0x5ebe, 0x75},
- {0x5ebf, 0x75},
- {0x5ec0, 0x75},
- {0x5ec1, 0x75},
- {0x5ec2, 0x75},
- {0x5ec3, 0x75},
- {0x5ec4, 0x75},
- {0x5ec5, 0x75},
- {0x5ec6, 0x75},
- {0x5ec7, 0x75},
- {0x5ec8, 0x75},
- {0x5ec9, 0x75},
- {0x5eca, 0x75},
- {0x5ecb, 0x75},
- {0x5ecc, 0x75},
- {0x5ecd, 0x75},
- {0x5ece, 0x75},
- {0x5ecf, 0x75},
- {0x5ed0, 0x75},
- {0x5ed1, 0x75},
- {0x5ed2, 0x75},
- {0x5ed3, 0x75},
- {0x5ed4, 0x75},
- {0x5ed5, 0x75},
- {0x5ed6, 0x75},
- {0x5ed7, 0x75},
- {0x5ed8, 0x75},
- {0x5ed9, 0x75},
- {0x5eda, 0x75},
- {0x5edb, 0x75},
- {0x5edc, 0x75},
- {0x5edd, 0x75},
- {0x5ede, 0x75},
- {0x5edf, 0x75},
- {0x5ee0, 0x75},
- {0x5ee1, 0x75},
- {0x5ee2, 0x75},
- {0x5ee3, 0x75},
- {0x5ee4, 0x75},
- {0x5ee5, 0x75},
- {0x5ee6, 0x75},
- {0x5ee7, 0x75},
- {0x5ee8, 0x75},
- {0x5ee9, 0x75},
- {0x5eea, 0x75},
- {0x5eeb, 0x75},
- {0x5eec, 0x75},
- {0x5eed, 0x75},
- {0x5eee, 0x75},
- {0x5eef, 0x75},
- {0x5ef0, 0x75},
- {0x5ef1, 0x75},
- {0x5ef2, 0x75},
- {0x5ef3, 0x75},
- {0x5ef4, 0x75},
- {0x5ef5, 0x75},
- {0x5ef6, 0x75},
- {0x5ef7, 0x75},
- {0x5ef8, 0x75},
- {0x5ef9, 0x75},
- {0x5efa, 0x75},
- {0x5efb, 0x75},
- {0x5efc, 0x75},
- {0x5efd, 0x75},
- {0x5efe, 0x75},
- {0x5eff, 0x75},
- {0x5f00, 0x75},
- {0x5f01, 0x75},
- {0x5f02, 0x75},
- {0x5f03, 0x75},
- {0x5f04, 0x75},
- {0x5f05, 0x75},
- {0x5f06, 0x75},
- {0x5f07, 0x75},
- {0x5f08, 0x75},
- {0x5f09, 0x75},
- {0x5f0a, 0x75},
- {0x5f0b, 0x75},
- {0x5f0c, 0x75},
- {0x5f0d, 0x75},
- {0x5f0e, 0x75},
- {0x5f0f, 0x75},
- {0x5f10, 0x75},
- {0x5f11, 0x75},
- {0x5f12, 0x75},
- {0x5f13, 0x75},
- {0x5f14, 0x75},
- {0x5f15, 0x75},
- {0x5f16, 0x75},
- {0x5f17, 0x75},
- {0x5f18, 0x75},
- {0x5f19, 0x75},
- {0x5f1a, 0x75},
- {0x5f1b, 0x75},
- {0x5f1c, 0x75},
- {0x5f1d, 0x75},
- {0x5f1e, 0x75},
- {0x5f1f, 0x75},
};
static const struct ov08x40_reg mode_1928x1208_regs[] = {
@@ -2354,7 +1214,7 @@ static const char * const ov08x40_test_pattern_menu[] = {
/* Configurations for supported link frequencies */
#define OV08X40_LINK_FREQ_400MHZ 400000000ULL
-
+#define OV08X40_SCLK_96MHZ 96000000ULL
#define OV08X40_EXT_CLK 19200000
#define OV08X40_DATA_LANES 4
@@ -2392,26 +1252,30 @@ static const struct ov08x40_mode supported_modes[] = {
.height = 2416,
.vts_def = OV08X40_VTS_30FPS,
.vts_min = OV08X40_VTS_30FPS,
- .hts = 640,
+ .llp = 0x10aa, /* in normal mode, tline time = 2 * HTS / SCLK */
.lanes = 4,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mode_3856x2416_regs),
.regs = mode_3856x2416_regs,
},
.link_freq_index = OV08X40_LINK_FREQ_400MHZ_INDEX,
+ .exposure_shift = 1,
+ .exposure_margin = OV08X40_EXPOSURE_MAX_MARGIN,
},
{
.width = 1928,
.height = 1208,
.vts_def = OV08X40_VTS_BIN_30FPS,
.vts_min = OV08X40_VTS_BIN_30FPS,
- .hts = 720,
+ .llp = 0x960,
.lanes = 4,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mode_1928x1208_regs),
.regs = mode_1928x1208_regs,
},
.link_freq_index = OV08X40_LINK_FREQ_400MHZ_INDEX,
+ .exposure_shift = 0,
+ .exposure_margin = OV08X40_EXPOSURE_BIN_MAX_MARGIN,
},
};
@@ -2432,6 +1296,9 @@ struct ov08x40 {
/* Mutex for serialized access */
struct mutex mutex;
+
+ /* True if the device has been identified */
+ bool identified;
};
#define to_ov08x40(_sd) container_of(_sd, struct ov08x40, sd)
@@ -2472,6 +1339,40 @@ static int ov08x40_read_reg(struct ov08x40 *ov08x,
return 0;
}
+static int ov08x40_burst_fill_regs(struct ov08x40 *ov08x, u16 first_reg,
+ u16 last_reg, u8 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
+ struct i2c_msg msgs;
+ size_t i, num_regs;
+ int ret;
+
+ num_regs = last_reg - first_reg + 1;
+ msgs.addr = client->addr;
+ msgs.flags = 0;
+ msgs.len = 2 + num_regs;
+ msgs.buf = kmalloc(msgs.len, GFP_KERNEL);
+
+ if (!msgs.buf)
+ return -ENOMEM;
+
+ put_unaligned_be16(first_reg, msgs.buf);
+
+ for (i = 0; i < num_regs; ++i)
+ msgs.buf[2 + i] = val;
+
+ ret = i2c_transfer(client->adapter, &msgs, 1);
+
+ kfree(msgs.buf);
+
+ if (ret != 1) {
+ dev_err(&client->dev, "Failed regs transferred: %d\n", ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
/* Write registers up to 4 at a time */
static int ov08x40_write_reg(struct ov08x40 *ov08x,
u16 reg, u32 len, u32 __val)
@@ -2664,13 +1565,23 @@ static int ov08x40_set_ctrl(struct v4l2_ctrl *ctrl)
struct ov08x40, ctrl_handler);
struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
s64 max;
+ int exp;
+ int fll;
int ret = 0;
/* Propagate change of current control to all related controls */
switch (ctrl->id) {
case V4L2_CID_VBLANK:
/* Update max exposure while meeting expected vblanking */
- max = ov08x->cur_mode->height + ctrl->val - OV08X40_EXPOSURE_MAX_MARGIN;
+ /*
+ * because in normal mode, 1 HTS = 0.5 tline
+ * fps = sclk / hts / vts
+ * so the vts value needs to be double
+ */
+ max = ((ov08x->cur_mode->height + ctrl->val) <<
+ ov08x->cur_mode->exposure_shift) -
+ ov08x->cur_mode->exposure_margin;
+
__v4l2_ctrl_modify_range(ov08x->exposure,
ov08x->exposure->minimum,
max, ov08x->exposure->step, max);
@@ -2694,15 +1605,20 @@ static int ov08x40_set_ctrl(struct v4l2_ctrl *ctrl)
ret = ov08x40_update_digital_gain(ov08x, ctrl->val);
break;
case V4L2_CID_EXPOSURE:
+ exp = (ctrl->val << ov08x->cur_mode->exposure_shift) -
+ ov08x->cur_mode->exposure_margin;
+
ret = ov08x40_write_reg(ov08x, OV08X40_REG_EXPOSURE,
OV08X40_REG_VALUE_24BIT,
- ctrl->val);
+ exp);
break;
case V4L2_CID_VBLANK:
+ fll = ((ov08x->cur_mode->height + ctrl->val) <<
+ ov08x->cur_mode->exposure_shift);
+
ret = ov08x40_write_reg(ov08x, OV08X40_REG_VTS,
OV08X40_REG_VALUE_16BIT,
- ov08x->cur_mode->height
- + ctrl->val);
+ fll);
break;
case V4L2_CID_TEST_PATTERN:
ret = ov08x40_enable_test_pattern(ov08x, ctrl->val);
@@ -2812,6 +1728,7 @@ ov08x40_set_pad_format(struct v4l2_subdev *sd,
s64 h_blank;
s64 pixel_rate;
s64 link_freq;
+ u64 steps;
mutex_lock(&ov08x->mutex);
@@ -2839,13 +1756,22 @@ ov08x40_set_pad_format(struct v4l2_subdev *sd,
ov08x->cur_mode->height;
vblank_min = ov08x->cur_mode->vts_min -
ov08x->cur_mode->height;
+
+ /*
+ * The frame length line should be aligned to a multiple of 4,
+ * as provided by the sensor vendor, in normal mode.
+ */
+ steps = mode->exposure_shift == 1 ? 4 : 1;
+
__v4l2_ctrl_modify_range(ov08x->vblank, vblank_min,
OV08X40_VTS_MAX
- ov08x->cur_mode->height,
- 1,
+ steps,
vblank_def);
__v4l2_ctrl_s_ctrl(ov08x->vblank, vblank_def);
- h_blank = ov08x->cur_mode->hts;
+
+ h_blank = ov08x->cur_mode->llp - ov08x->cur_mode->width;
+
__v4l2_ctrl_modify_range(ov08x->hblank, h_blank,
h_blank, 1, h_blank);
}
@@ -2887,6 +1813,22 @@ static int ov08x40_start_streaming(struct ov08x40 *ov08x)
return ret;
}
+ /* Use i2c burst to write register on full size registers */
+ if (ov08x->cur_mode->exposure_shift == 1) {
+ ret = ov08x40_burst_fill_regs(ov08x, OV08X40_REG_XTALK_FIRST_A,
+ OV08X40_REG_XTALK_LAST_A, 0x75);
+ if (ret == 0)
+ ret = ov08x40_burst_fill_regs(ov08x,
+ OV08X40_REG_XTALK_FIRST_B,
+ OV08X40_REG_XTALK_LAST_B,
+ 0x75);
+ }
+
+ if (ret) {
+ dev_err(&client->dev, "%s failed to set regs\n", __func__);
+ return ret;
+ }
+
/* Apply customized values from user */
ret = __v4l2_ctrl_handler_setup(ov08x->sd.ctrl_handler);
if (ret)
@@ -2948,6 +1890,9 @@ static int ov08x40_identify_module(struct ov08x40 *ov08x)
int ret;
u32 val;
+ if (ov08x->identified)
+ return 0;
+
ret = ov08x40_read_reg(ov08x, OV08X40_REG_CHIP_ID,
OV08X40_REG_VALUE_24BIT, &val);
if (ret)
@@ -2956,9 +1901,11 @@ static int ov08x40_identify_module(struct ov08x40 *ov08x)
if (val != OV08X40_CHIP_ID) {
dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
OV08X40_CHIP_ID, val);
- return -EIO;
+ return -ENXIO;
}
+ ov08x->identified = true;
+
return 0;
}
@@ -3035,7 +1982,8 @@ static int ov08x40_init_controls(struct ov08x40 *ov08x)
OV08X40_VTS_MAX - mode->height, 1,
vblank_def);
- hblank = ov08x->cur_mode->hts;
+ hblank = ov08x->cur_mode->llp - ov08x->cur_mode->width;
+
ov08x->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov08x40_ctrl_ops,
V4L2_CID_HBLANK,
hblank, hblank, 1, hblank);
@@ -3175,6 +2123,7 @@ static int ov08x40_probe(struct i2c_client *client)
{
struct ov08x40 *ov08x;
int ret;
+ bool full_power;
/* Check HW config */
ret = ov08x40_check_hwcfg(&client->dev);
@@ -3190,11 +2139,14 @@ static int ov08x40_probe(struct i2c_client *client)
/* Initialize subdev */
v4l2_i2c_subdev_init(&ov08x->sd, client, &ov08x40_subdev_ops);
- /* Check module identity */
- ret = ov08x40_identify_module(ov08x);
- if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d\n", ret);
- return ret;
+ full_power = acpi_dev_state_d0(&client->dev);
+ if (full_power) {
+ /* Check module identity */
+ ret = ov08x40_identify_module(ov08x);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d\n", ret);
+ return ret;
+ }
}
/* Set default mode to max resolution */
@@ -3222,11 +2174,8 @@ static int ov08x40_probe(struct i2c_client *client)
if (ret < 0)
goto error_media_entity;
- /*
- * Device is already turned on by i2c-core with ACPI domain PM.
- * Enable runtime PM and turn off the device.
- */
- pm_runtime_set_active(&client->dev);
+ if (full_power)
+ pm_runtime_set_active(&client->dev);
pm_runtime_enable(&client->dev);
pm_runtime_idle(&client->dev);
@@ -3270,11 +2219,13 @@ static struct i2c_driver ov08x40_i2c_driver = {
},
.probe = ov08x40_probe,
.remove = ov08x40_remove,
+ .flags = I2C_DRV_ACPI_WAIVE_D0_PROBE,
};
module_i2c_driver(ov08x40_i2c_driver);
MODULE_AUTHOR("Jason Chen <jason.z.chen@intel.com>");
+MODULE_AUTHOR("Qingwu Zhang <qingwu.zhang@intel.com>");
MODULE_AUTHOR("Shawn Tu");
MODULE_DESCRIPTION("OmniVision OV08X40 sensor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
index 1d0ef72a64036..d1653d7431d0e 100644
--- a/drivers/media/i2c/ov2659.c
+++ b/drivers/media/i2c/ov2659.c
@@ -1388,7 +1388,7 @@ ov2659_get_pdata(struct i2c_client *client)
if (!IS_ENABLED(CONFIG_OF) || !client->dev.of_node)
return client->dev.platform_data;
- endpoint = of_graph_get_next_endpoint(client->dev.of_node, NULL);
+ endpoint = of_graph_get_endpoint_by_regs(client->dev.of_node, 0, -1);
if (!endpoint)
return NULL;
diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c
index a26ac11c989d7..3b22b9e127873 100644
--- a/drivers/media/i2c/ov5645.c
+++ b/drivers/media/i2c/ov5645.c
@@ -118,7 +118,6 @@ static inline struct ov5645 *to_ov5645(struct v4l2_subdev *sd)
static const struct reg_value ov5645_global_init_setting[] = {
{ 0x3103, 0x11 },
- { 0x3008, 0x82 },
{ 0x3008, 0x42 },
{ 0x3103, 0x03 },
{ 0x3503, 0x07 },
@@ -627,6 +626,10 @@ static int ov5645_set_register_array(struct ov5645 *ov5645,
ret = ov5645_write_reg(ov5645, settings->reg, settings->val);
if (ret < 0)
return ret;
+
+ if (settings->reg == OV5645_SYSTEM_CTRL0 &&
+ settings->val == OV5645_SYSTEM_CTRL0_START)
+ usleep_range(1000, 2000);
}
return 0;
@@ -1056,7 +1059,7 @@ static int ov5645_probe(struct i2c_client *client)
ov5645->i2c_client = client;
ov5645->dev = dev;
- endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
+ endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1);
if (!endpoint) {
dev_err(dev, "endpoint node not found\n");
return -EINVAL;
diff --git a/drivers/media/i2c/ov5647.c b/drivers/media/i2c/ov5647.c
index 96c0fd4ff5abf..7e1ecdf2485f7 100644
--- a/drivers/media/i2c/ov5647.c
+++ b/drivers/media/i2c/ov5647.c
@@ -1363,7 +1363,7 @@ static int ov5647_parse_dt(struct ov5647 *sensor, struct device_node *np)
struct device_node *ep;
int ret;
- ep = of_graph_get_next_endpoint(np, NULL);
+ ep = of_graph_get_endpoint_by_regs(np, 0, -1);
if (!ep)
return -EINVAL;
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index af8d01f78c32a..cf6be509af33b 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -1568,7 +1568,7 @@ static int s5c73m3_get_dt_data(struct s5c73m3 *state)
"failed to request gpio S5C73M3_RST\n");
gpiod_set_consumer_name(state->reset, "S5C73M3_RST");
- node_ep = of_graph_get_next_endpoint(node, NULL);
+ node_ep = of_graph_get_endpoint_by_regs(node, 0, -1);
if (!node_ep) {
dev_warn(dev, "no endpoint defined for node: %pOF\n", node);
return 0;
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
index de079d2c9282b..6b11039c35798 100644
--- a/drivers/media/i2c/s5k5baf.c
+++ b/drivers/media/i2c/s5k5baf.c
@@ -1849,7 +1849,7 @@ static int s5k5baf_parse_device_node(struct s5k5baf *state, struct device *dev)
state->mclk_frequency);
}
- node_ep = of_graph_get_next_endpoint(node, NULL);
+ node_ep = of_graph_get_endpoint_by_regs(node, 0, -1);
if (!node_ep) {
dev_err(dev, "no endpoint defined at node %pOF\n", node);
return -EINVAL;
diff --git a/drivers/media/i2c/st-vgxy61.c b/drivers/media/i2c/st-vgxy61.c
index e4d37a1977240..b9e7c57027b1b 100644
--- a/drivers/media/i2c/st-vgxy61.c
+++ b/drivers/media/i2c/st-vgxy61.c
@@ -12,6 +12,7 @@
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/units.h>
@@ -19,79 +20,74 @@
#include <media/mipi-csi2.h>
#include <media/v4l2-async.h>
+#include <media/v4l2-cci.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
-#define VGXY61_REG_8BIT(n) ((1 << 16) | (n))
-#define VGXY61_REG_16BIT(n) ((2 << 16) | (n))
-#define VGXY61_REG_32BIT(n) ((4 << 16) | (n))
-#define VGXY61_REG_SIZE_SHIFT 16
-#define VGXY61_REG_ADDR_MASK 0xffff
-
-#define VGXY61_REG_MODEL_ID VGXY61_REG_16BIT(0x0000)
+#define VGXY61_REG_MODEL_ID CCI_REG16_LE(0x0000)
#define VG5661_MODEL_ID 0x5661
#define VG5761_MODEL_ID 0x5761
-#define VGXY61_REG_REVISION VGXY61_REG_16BIT(0x0002)
-#define VGXY61_REG_FWPATCH_REVISION VGXY61_REG_16BIT(0x0014)
-#define VGXY61_REG_FWPATCH_START_ADDR VGXY61_REG_8BIT(0x2000)
-#define VGXY61_REG_SYSTEM_FSM VGXY61_REG_8BIT(0x0020)
+#define VGXY61_REG_REVISION CCI_REG16_LE(0x0002)
+#define VGXY61_REG_FWPATCH_REVISION CCI_REG16_LE(0x0014)
+#define VGXY61_REG_FWPATCH_START_ADDR CCI_REG8(0x2000)
+#define VGXY61_REG_SYSTEM_FSM CCI_REG8(0x0020)
#define VGXY61_SYSTEM_FSM_SW_STBY 0x03
#define VGXY61_SYSTEM_FSM_STREAMING 0x04
-#define VGXY61_REG_NVM VGXY61_REG_8BIT(0x0023)
+#define VGXY61_REG_NVM CCI_REG8(0x0023)
#define VGXY61_NVM_OK 0x04
-#define VGXY61_REG_STBY VGXY61_REG_8BIT(0x0201)
+#define VGXY61_REG_STBY CCI_REG8(0x0201)
#define VGXY61_STBY_NO_REQ 0
#define VGXY61_STBY_REQ_TMP_READ BIT(2)
-#define VGXY61_REG_STREAMING VGXY61_REG_8BIT(0x0202)
+#define VGXY61_REG_STREAMING CCI_REG8(0x0202)
#define VGXY61_STREAMING_NO_REQ 0
#define VGXY61_STREAMING_REQ_STOP BIT(0)
#define VGXY61_STREAMING_REQ_START BIT(1)
-#define VGXY61_REG_EXT_CLOCK VGXY61_REG_32BIT(0x0220)
-#define VGXY61_REG_CLK_PLL_PREDIV VGXY61_REG_8BIT(0x0224)
-#define VGXY61_REG_CLK_SYS_PLL_MULT VGXY61_REG_8BIT(0x0225)
-#define VGXY61_REG_GPIO_0_CTRL VGXY61_REG_8BIT(0x0236)
-#define VGXY61_REG_GPIO_1_CTRL VGXY61_REG_8BIT(0x0237)
-#define VGXY61_REG_GPIO_2_CTRL VGXY61_REG_8BIT(0x0238)
-#define VGXY61_REG_GPIO_3_CTRL VGXY61_REG_8BIT(0x0239)
-#define VGXY61_REG_SIGNALS_POLARITY_CTRL VGXY61_REG_8BIT(0x023b)
-#define VGXY61_REG_LINE_LENGTH VGXY61_REG_16BIT(0x0300)
-#define VGXY61_REG_ORIENTATION VGXY61_REG_8BIT(0x0302)
-#define VGXY61_REG_VT_CTRL VGXY61_REG_8BIT(0x0304)
-#define VGXY61_REG_FORMAT_CTRL VGXY61_REG_8BIT(0x0305)
-#define VGXY61_REG_OIF_CTRL VGXY61_REG_16BIT(0x0306)
-#define VGXY61_REG_OIF_ROI0_CTRL VGXY61_REG_8BIT(0x030a)
-#define VGXY61_REG_ROI0_START_H VGXY61_REG_16BIT(0x0400)
-#define VGXY61_REG_ROI0_START_V VGXY61_REG_16BIT(0x0402)
-#define VGXY61_REG_ROI0_END_H VGXY61_REG_16BIT(0x0404)
-#define VGXY61_REG_ROI0_END_V VGXY61_REG_16BIT(0x0406)
-#define VGXY61_REG_PATGEN_CTRL VGXY61_REG_32BIT(0x0440)
+#define VGXY61_REG_EXT_CLOCK CCI_REG32_LE(0x0220)
+#define VGXY61_REG_CLK_PLL_PREDIV CCI_REG8(0x0224)
+#define VGXY61_REG_CLK_SYS_PLL_MULT CCI_REG8(0x0225)
+#define VGXY61_REG_GPIO_0_CTRL CCI_REG8(0x0236)
+#define VGXY61_REG_GPIO_1_CTRL CCI_REG8(0x0237)
+#define VGXY61_REG_GPIO_2_CTRL CCI_REG8(0x0238)
+#define VGXY61_REG_GPIO_3_CTRL CCI_REG8(0x0239)
+#define VGXY61_REG_SIGNALS_POLARITY_CTRL CCI_REG8(0x023b)
+#define VGXY61_REG_LINE_LENGTH CCI_REG16_LE(0x0300)
+#define VGXY61_REG_ORIENTATION CCI_REG8(0x0302)
+#define VGXY61_REG_VT_CTRL CCI_REG8(0x0304)
+#define VGXY61_REG_FORMAT_CTRL CCI_REG8(0x0305)
+#define VGXY61_REG_OIF_CTRL CCI_REG16_LE(0x0306)
+#define VGXY61_REG_OIF_ROI0_CTRL CCI_REG8(0x030a)
+#define VGXY61_REG_ROI0_START_H CCI_REG16_LE(0x0400)
+#define VGXY61_REG_ROI0_START_V CCI_REG16_LE(0x0402)
+#define VGXY61_REG_ROI0_END_H CCI_REG16_LE(0x0404)
+#define VGXY61_REG_ROI0_END_V CCI_REG16_LE(0x0406)
+#define VGXY61_REG_PATGEN_CTRL CCI_REG32_LE(0x0440)
#define VGXY61_PATGEN_LONG_ENABLE BIT(16)
#define VGXY61_PATGEN_SHORT_ENABLE BIT(0)
#define VGXY61_PATGEN_LONG_TYPE_SHIFT 18
#define VGXY61_PATGEN_SHORT_TYPE_SHIFT 4
-#define VGXY61_REG_FRAME_CONTENT_CTRL VGXY61_REG_8BIT(0x0478)
-#define VGXY61_REG_COARSE_EXPOSURE_LONG VGXY61_REG_16BIT(0x0500)
-#define VGXY61_REG_COARSE_EXPOSURE_SHORT VGXY61_REG_16BIT(0x0504)
-#define VGXY61_REG_ANALOG_GAIN VGXY61_REG_8BIT(0x0508)
-#define VGXY61_REG_DIGITAL_GAIN_LONG VGXY61_REG_16BIT(0x050a)
-#define VGXY61_REG_DIGITAL_GAIN_SHORT VGXY61_REG_16BIT(0x0512)
-#define VGXY61_REG_FRAME_LENGTH VGXY61_REG_16BIT(0x051a)
-#define VGXY61_REG_SIGNALS_CTRL VGXY61_REG_16BIT(0x0522)
+#define VGXY61_REG_FRAME_CONTENT_CTRL CCI_REG8(0x0478)
+#define VGXY61_REG_COARSE_EXPOSURE_LONG CCI_REG16_LE(0x0500)
+#define VGXY61_REG_COARSE_EXPOSURE_SHORT CCI_REG16_LE(0x0504)
+#define VGXY61_REG_ANALOG_GAIN CCI_REG8(0x0508)
+#define VGXY61_REG_DIGITAL_GAIN_LONG CCI_REG16_LE(0x050a)
+#define VGXY61_REG_DIGITAL_GAIN_SHORT CCI_REG16_LE(0x0512)
+#define VGXY61_REG_FRAME_LENGTH CCI_REG16_LE(0x051a)
+#define VGXY61_REG_SIGNALS_CTRL CCI_REG16_LE(0x0522)
#define VGXY61_SIGNALS_GPIO_ID_SHIFT 4
-#define VGXY61_REG_READOUT_CTRL VGXY61_REG_8BIT(0x0530)
-#define VGXY61_REG_HDR_CTRL VGXY61_REG_8BIT(0x0532)
-#define VGXY61_REG_PATGEN_LONG_DATA_GR VGXY61_REG_16BIT(0x092c)
-#define VGXY61_REG_PATGEN_LONG_DATA_R VGXY61_REG_16BIT(0x092e)
-#define VGXY61_REG_PATGEN_LONG_DATA_B VGXY61_REG_16BIT(0x0930)
-#define VGXY61_REG_PATGEN_LONG_DATA_GB VGXY61_REG_16BIT(0x0932)
-#define VGXY61_REG_PATGEN_SHORT_DATA_GR VGXY61_REG_16BIT(0x0950)
-#define VGXY61_REG_PATGEN_SHORT_DATA_R VGXY61_REG_16BIT(0x0952)
-#define VGXY61_REG_PATGEN_SHORT_DATA_B VGXY61_REG_16BIT(0x0954)
-#define VGXY61_REG_PATGEN_SHORT_DATA_GB VGXY61_REG_16BIT(0x0956)
-#define VGXY61_REG_BYPASS_CTRL VGXY61_REG_8BIT(0x0a60)
+#define VGXY61_REG_READOUT_CTRL CCI_REG8(0x0530)
+#define VGXY61_REG_HDR_CTRL CCI_REG8(0x0532)
+#define VGXY61_REG_PATGEN_LONG_DATA_GR CCI_REG16_LE(0x092c)
+#define VGXY61_REG_PATGEN_LONG_DATA_R CCI_REG16_LE(0x092e)
+#define VGXY61_REG_PATGEN_LONG_DATA_B CCI_REG16_LE(0x0930)
+#define VGXY61_REG_PATGEN_LONG_DATA_GB CCI_REG16_LE(0x0932)
+#define VGXY61_REG_PATGEN_SHORT_DATA_GR CCI_REG16_LE(0x0950)
+#define VGXY61_REG_PATGEN_SHORT_DATA_R CCI_REG16_LE(0x0952)
+#define VGXY61_REG_PATGEN_SHORT_DATA_B CCI_REG16_LE(0x0954)
+#define VGXY61_REG_PATGEN_SHORT_DATA_GB CCI_REG16_LE(0x0956)
+#define VGXY61_REG_BYPASS_CTRL CCI_REG8(0x0a60)
#define VGX661_WIDTH 1464
#define VGX661_HEIGHT 1104
@@ -384,6 +380,7 @@ static const struct vgxy61_mode_info vgx761_mode_data[] = {
struct vgxy61_dev {
struct i2c_client *i2c_client;
+ struct regmap *regmap;
struct v4l2_subdev sd;
struct media_pad pad;
struct regulator_bulk_data supplies[ARRAY_SIZE(vgxy61_supply_name)];
@@ -510,82 +507,6 @@ static unsigned int get_chunk_size(struct vgxy61_dev *sensor)
return max(max_write_len, 1);
}
-static int vgxy61_read_multiple(struct vgxy61_dev *sensor, u32 reg,
- unsigned int len)
-{
- struct i2c_client *client = sensor->i2c_client;
- struct i2c_msg msg[2];
- u8 buf[2];
- u8 val[sizeof(u32)] = {0};
- int ret;
-
- if (len > sizeof(u32))
- return -EINVAL;
- buf[0] = reg >> 8;
- buf[1] = reg & 0xff;
-
- msg[0].addr = client->addr;
- msg[0].flags = client->flags;
- msg[0].buf = buf;
- msg[0].len = sizeof(buf);
-
- msg[1].addr = client->addr;
- msg[1].flags = client->flags | I2C_M_RD;
- msg[1].buf = val;
- msg[1].len = len;
-
- ret = i2c_transfer(client->adapter, msg, 2);
- if (ret < 0) {
- dev_dbg(&client->dev, "%s: %x i2c_transfer, reg: %x => %d\n",
- __func__, client->addr, reg, ret);
- return ret;
- }
-
- return get_unaligned_le32(val);
-}
-
-static inline int vgxy61_read_reg(struct vgxy61_dev *sensor, u32 reg)
-{
- return vgxy61_read_multiple(sensor, reg & VGXY61_REG_ADDR_MASK,
- (reg >> VGXY61_REG_SIZE_SHIFT) & 7);
-}
-
-static int vgxy61_write_multiple(struct vgxy61_dev *sensor, u32 reg,
- const u8 *data, unsigned int len, int *err)
-{
- struct i2c_client *client = sensor->i2c_client;
- struct i2c_msg msg;
- u8 buf[VGXY61_WRITE_MULTIPLE_CHUNK_MAX + 2];
- unsigned int i;
- int ret;
-
- if (err && *err)
- return *err;
-
- if (len > VGXY61_WRITE_MULTIPLE_CHUNK_MAX)
- return -EINVAL;
- buf[0] = reg >> 8;
- buf[1] = reg & 0xff;
- for (i = 0; i < len; i++)
- buf[i + 2] = data[i];
-
- msg.addr = client->addr;
- msg.flags = client->flags;
- msg.buf = buf;
- msg.len = len + 2;
-
- ret = i2c_transfer(client->adapter, &msg, 1);
- if (ret < 0) {
- dev_dbg(&client->dev, "%s: i2c_transfer, reg: %x => %d\n",
- __func__, reg, ret);
- if (err)
- *err = ret;
- return ret;
- }
-
- return 0;
-}
-
static int vgxy61_write_array(struct vgxy61_dev *sensor, u32 reg,
unsigned int nb, const u8 *array)
{
@@ -595,7 +516,8 @@ static int vgxy61_write_array(struct vgxy61_dev *sensor, u32 reg,
while (nb) {
sz = min(nb, chunk_size);
- ret = vgxy61_write_multiple(sensor, reg, array, sz, NULL);
+ ret = regmap_bulk_write(sensor->regmap, CCI_REG_ADDR(reg),
+ array, sz);
if (ret < 0)
return ret;
nb -= sz;
@@ -606,24 +528,17 @@ static int vgxy61_write_array(struct vgxy61_dev *sensor, u32 reg,
return 0;
}
-static inline int vgxy61_write_reg(struct vgxy61_dev *sensor, u32 reg, u32 val,
- int *err)
-{
- return vgxy61_write_multiple(sensor, reg & VGXY61_REG_ADDR_MASK,
- (u8 *)&val,
- (reg >> VGXY61_REG_SIZE_SHIFT) & 7, err);
-}
-
static int vgxy61_poll_reg(struct vgxy61_dev *sensor, u32 reg, u8 poll_val,
unsigned int timeout_ms)
{
const unsigned int loop_delay_ms = 10;
+ u64 val;
int ret;
- return read_poll_timeout(vgxy61_read_reg, ret,
- ((ret < 0) || (ret == poll_val)),
+ return read_poll_timeout(cci_read, ret,
+ ((ret < 0) || (val == poll_val)),
loop_delay_ms * 1000, timeout_ms * 1000,
- false, sensor, reg);
+ false, sensor->regmap, reg, &val, NULL);
}
static int vgxy61_wait_state(struct vgxy61_dev *sensor, int state,
@@ -662,11 +577,11 @@ static int vgxy61_apply_exposure(struct vgxy61_dev *sensor)
int ret = 0;
/* We first set expo to zero to avoid forbidden parameters couple */
- vgxy61_write_reg(sensor, VGXY61_REG_COARSE_EXPOSURE_SHORT, 0, &ret);
- vgxy61_write_reg(sensor, VGXY61_REG_COARSE_EXPOSURE_LONG,
- sensor->expo_long, &ret);
- vgxy61_write_reg(sensor, VGXY61_REG_COARSE_EXPOSURE_SHORT,
- sensor->expo_short, &ret);
+ cci_write(sensor->regmap, VGXY61_REG_COARSE_EXPOSURE_SHORT, 0, &ret);
+ cci_write(sensor->regmap, VGXY61_REG_COARSE_EXPOSURE_LONG,
+ sensor->expo_long, &ret);
+ cci_write(sensor->regmap, VGXY61_REG_COARSE_EXPOSURE_SHORT,
+ sensor->expo_short, &ret);
return ret;
}
@@ -714,7 +629,7 @@ static int vgxy61_try_fmt_internal(struct v4l2_subdev *sd,
const struct vgxy61_mode_info **new_mode)
{
struct vgxy61_dev *sensor = to_vgxy61_dev(sd);
- const struct vgxy61_mode_info *mode = sensor->sensor_modes;
+ const struct vgxy61_mode_info *mode;
unsigned int index;
for (index = 0; index < ARRAY_SIZE(vgxy61_supported_codes); index++) {
@@ -827,8 +742,8 @@ static int vgxy61_update_analog_gain(struct vgxy61_dev *sensor, u32 target)
sensor->analog_gain = target;
if (sensor->streaming)
- return vgxy61_write_reg(sensor, VGXY61_REG_ANALOG_GAIN, target,
- NULL);
+ return cci_write(sensor->regmap, VGXY61_REG_ANALOG_GAIN, target,
+ NULL);
return 0;
}
@@ -842,10 +757,10 @@ static int vgxy61_apply_digital_gain(struct vgxy61_dev *sensor,
* DIGITAL_GAIN_SHORT_CH0 is enough to configure the gain of all
* four sub pixels.
*/
- vgxy61_write_reg(sensor, VGXY61_REG_DIGITAL_GAIN_LONG, digital_gain,
- &ret);
- vgxy61_write_reg(sensor, VGXY61_REG_DIGITAL_GAIN_SHORT, digital_gain,
- &ret);
+ cci_write(sensor->regmap, VGXY61_REG_DIGITAL_GAIN_LONG, digital_gain,
+ &ret);
+ cci_write(sensor->regmap, VGXY61_REG_DIGITAL_GAIN_SHORT, digital_gain,
+ &ret);
return ret;
}
@@ -870,7 +785,7 @@ static int vgxy61_apply_patgen(struct vgxy61_dev *sensor, u32 index)
if (pattern)
reg |= VGXY61_PATGEN_LONG_ENABLE | VGXY61_PATGEN_SHORT_ENABLE;
- return vgxy61_write_reg(sensor, VGXY61_REG_PATGEN_CTRL, reg, NULL);
+ return cci_write(sensor->regmap, VGXY61_REG_PATGEN_CTRL, reg, NULL);
}
static int vgxy61_update_patgen(struct vgxy61_dev *sensor, u32 pattern)
@@ -887,15 +802,13 @@ static int vgxy61_apply_gpiox_strobe_mode(struct vgxy61_dev *sensor,
unsigned int idx)
{
static const u8 index2val[] = {0x0, 0x1, 0x3};
- int reg;
+ u16 mask, val;
- reg = vgxy61_read_reg(sensor, VGXY61_REG_SIGNALS_CTRL);
- if (reg < 0)
- return reg;
- reg &= ~(0xf << (idx * VGXY61_SIGNALS_GPIO_ID_SHIFT));
- reg |= index2val[mode] << (idx * VGXY61_SIGNALS_GPIO_ID_SHIFT);
+ mask = 0xf << (idx * VGXY61_SIGNALS_GPIO_ID_SHIFT);
+ val = index2val[mode] << (idx * VGXY61_SIGNALS_GPIO_ID_SHIFT);
- return vgxy61_write_reg(sensor, VGXY61_REG_SIGNALS_CTRL, reg, NULL);
+ return cci_update_bits(sensor->regmap, VGXY61_REG_SIGNALS_CTRL,
+ mask, val, NULL);
}
static int vgxy61_update_gpios_strobe_mode(struct vgxy61_dev *sensor,
@@ -940,12 +853,12 @@ static int vgxy61_update_gpios_strobe_polarity(struct vgxy61_dev *sensor,
if (sensor->streaming)
return -EBUSY;
- vgxy61_write_reg(sensor, VGXY61_REG_GPIO_0_CTRL, polarity << 1, &ret);
- vgxy61_write_reg(sensor, VGXY61_REG_GPIO_1_CTRL, polarity << 1, &ret);
- vgxy61_write_reg(sensor, VGXY61_REG_GPIO_2_CTRL, polarity << 1, &ret);
- vgxy61_write_reg(sensor, VGXY61_REG_GPIO_3_CTRL, polarity << 1, &ret);
- vgxy61_write_reg(sensor, VGXY61_REG_SIGNALS_POLARITY_CTRL, polarity,
- &ret);
+ cci_write(sensor->regmap, VGXY61_REG_GPIO_0_CTRL, polarity << 1, &ret);
+ cci_write(sensor->regmap, VGXY61_REG_GPIO_1_CTRL, polarity << 1, &ret);
+ cci_write(sensor->regmap, VGXY61_REG_GPIO_2_CTRL, polarity << 1, &ret);
+ cci_write(sensor->regmap, VGXY61_REG_GPIO_3_CTRL, polarity << 1, &ret);
+ cci_write(sensor->regmap, VGXY61_REG_SIGNALS_POLARITY_CTRL, polarity,
+ &ret);
return ret;
}
@@ -1057,8 +970,8 @@ static int vgxy61_update_exposure(struct vgxy61_dev *sensor, u16 new_expo_long,
static int vgxy61_apply_framelength(struct vgxy61_dev *sensor)
{
- return vgxy61_write_reg(sensor, VGXY61_REG_FRAME_LENGTH,
- sensor->frame_length, NULL);
+ return cci_write(sensor->regmap, VGXY61_REG_FRAME_LENGTH,
+ sensor->frame_length, NULL);
}
static int vgxy61_update_vblank(struct vgxy61_dev *sensor, u16 vblank,
@@ -1086,8 +999,8 @@ static int vgxy61_apply_hdr(struct vgxy61_dev *sensor,
{
static const u8 index2val[] = {0x1, 0x4, 0xa};
- return vgxy61_write_reg(sensor, VGXY61_REG_HDR_CTRL, index2val[index],
- NULL);
+ return cci_write(sensor->regmap, VGXY61_REG_HDR_CTRL, index2val[index],
+ NULL);
}
static int vgxy61_update_hdr(struct vgxy61_dev *sensor,
@@ -1133,16 +1046,16 @@ static int vgxy61_apply_settings(struct vgxy61_dev *sensor)
if (ret)
return ret;
- ret = vgxy61_write_reg(sensor, VGXY61_REG_ANALOG_GAIN,
- sensor->analog_gain, NULL);
+ ret = cci_write(sensor->regmap, VGXY61_REG_ANALOG_GAIN,
+ sensor->analog_gain, NULL);
if (ret)
return ret;
ret = vgxy61_apply_digital_gain(sensor, sensor->digital_gain);
if (ret)
return ret;
- ret = vgxy61_write_reg(sensor, VGXY61_REG_ORIENTATION,
- sensor->hflip | (sensor->vflip << 1), NULL);
+ ret = cci_write(sensor->regmap, VGXY61_REG_ORIENTATION,
+ sensor->hflip | (sensor->vflip << 1), NULL);
if (ret)
return ret;
@@ -1174,19 +1087,19 @@ static int vgxy61_stream_enable(struct vgxy61_dev *sensor)
if (ret)
return ret;
- vgxy61_write_reg(sensor, VGXY61_REG_FORMAT_CTRL,
- get_bpp_by_code(sensor->fmt.code), &ret);
- vgxy61_write_reg(sensor, VGXY61_REG_OIF_ROI0_CTRL,
- get_data_type_by_code(sensor->fmt.code), &ret);
-
- vgxy61_write_reg(sensor, VGXY61_REG_READOUT_CTRL,
- sensor->current_mode->bin_mode, &ret);
- vgxy61_write_reg(sensor, VGXY61_REG_ROI0_START_H, crop->left, &ret);
- vgxy61_write_reg(sensor, VGXY61_REG_ROI0_END_H,
- crop->left + crop->width - 1, &ret);
- vgxy61_write_reg(sensor, VGXY61_REG_ROI0_START_V, crop->top, &ret);
- vgxy61_write_reg(sensor, VGXY61_REG_ROI0_END_V,
- crop->top + crop->height - 1, &ret);
+ cci_write(sensor->regmap, VGXY61_REG_FORMAT_CTRL,
+ get_bpp_by_code(sensor->fmt.code), &ret);
+ cci_write(sensor->regmap, VGXY61_REG_OIF_ROI0_CTRL,
+ get_data_type_by_code(sensor->fmt.code), &ret);
+
+ cci_write(sensor->regmap, VGXY61_REG_READOUT_CTRL,
+ sensor->current_mode->bin_mode, &ret);
+ cci_write(sensor->regmap, VGXY61_REG_ROI0_START_H, crop->left, &ret);
+ cci_write(sensor->regmap, VGXY61_REG_ROI0_END_H,
+ crop->left + crop->width - 1, &ret);
+ cci_write(sensor->regmap, VGXY61_REG_ROI0_START_V, crop->top, &ret);
+ cci_write(sensor->regmap, VGXY61_REG_ROI0_END_V,
+ crop->top + crop->height - 1, &ret);
if (ret)
goto err_rpm_put;
@@ -1194,8 +1107,8 @@ static int vgxy61_stream_enable(struct vgxy61_dev *sensor)
if (ret)
goto err_rpm_put;
- ret = vgxy61_write_reg(sensor, VGXY61_REG_STREAMING,
- VGXY61_STREAMING_REQ_START, NULL);
+ ret = cci_write(sensor->regmap, VGXY61_REG_STREAMING,
+ VGXY61_STREAMING_REQ_START, NULL);
if (ret)
goto err_rpm_put;
@@ -1225,8 +1138,8 @@ static int vgxy61_stream_disable(struct vgxy61_dev *sensor)
struct i2c_client *client = v4l2_get_subdevdata(&sensor->sd);
int ret;
- ret = vgxy61_write_reg(sensor, VGXY61_REG_STREAMING,
- VGXY61_STREAMING_REQ_STOP, NULL);
+ ret = cci_write(sensor->regmap, VGXY61_REG_STREAMING,
+ VGXY61_STREAMING_REQ_STOP, NULL);
if (ret)
goto err_str_dis;
@@ -1582,7 +1495,7 @@ static int vgxy61_configure(struct vgxy61_dev *sensor)
{
u32 sensor_freq;
u8 prediv, mult;
- int line_length;
+ u64 line_length;
int ret = 0;
compute_pll_parameters_by_freq(sensor->clk_freq, &prediv, &mult);
@@ -1592,28 +1505,28 @@ static int vgxy61_configure(struct vgxy61_dev *sensor)
/* Video timing ISP path (pixel clock) requires 804/5 mhz = 160 mhz */
sensor->pclk = sensor_freq / 5;
- line_length = vgxy61_read_reg(sensor, VGXY61_REG_LINE_LENGTH);
- if (line_length < 0)
- return line_length;
- sensor->line_length = line_length;
- vgxy61_write_reg(sensor, VGXY61_REG_EXT_CLOCK, sensor->clk_freq, &ret);
- vgxy61_write_reg(sensor, VGXY61_REG_CLK_PLL_PREDIV, prediv, &ret);
- vgxy61_write_reg(sensor, VGXY61_REG_CLK_SYS_PLL_MULT, mult, &ret);
- vgxy61_write_reg(sensor, VGXY61_REG_OIF_CTRL, sensor->oif_ctrl, &ret);
- vgxy61_write_reg(sensor, VGXY61_REG_FRAME_CONTENT_CTRL, 0, &ret);
- vgxy61_write_reg(sensor, VGXY61_REG_BYPASS_CTRL, 4, &ret);
+ cci_read(sensor->regmap, VGXY61_REG_LINE_LENGTH, &line_length, &ret);
+ if (ret < 0)
+ return ret;
+ sensor->line_length = (u16)line_length;
+ cci_write(sensor->regmap, VGXY61_REG_EXT_CLOCK, sensor->clk_freq, &ret);
+ cci_write(sensor->regmap, VGXY61_REG_CLK_PLL_PREDIV, prediv, &ret);
+ cci_write(sensor->regmap, VGXY61_REG_CLK_SYS_PLL_MULT, mult, &ret);
+ cci_write(sensor->regmap, VGXY61_REG_OIF_CTRL, sensor->oif_ctrl, &ret);
+ cci_write(sensor->regmap, VGXY61_REG_FRAME_CONTENT_CTRL, 0, &ret);
+ cci_write(sensor->regmap, VGXY61_REG_BYPASS_CTRL, 4, &ret);
if (ret)
return ret;
vgxy61_update_gpios_strobe_polarity(sensor, sensor->gpios_polarity);
/* Set pattern generator solid to middle value */
- vgxy61_write_reg(sensor, VGXY61_REG_PATGEN_LONG_DATA_GR, 0x800, &ret);
- vgxy61_write_reg(sensor, VGXY61_REG_PATGEN_LONG_DATA_R, 0x800, &ret);
- vgxy61_write_reg(sensor, VGXY61_REG_PATGEN_LONG_DATA_B, 0x800, &ret);
- vgxy61_write_reg(sensor, VGXY61_REG_PATGEN_LONG_DATA_GB, 0x800, &ret);
- vgxy61_write_reg(sensor, VGXY61_REG_PATGEN_SHORT_DATA_GR, 0x800, &ret);
- vgxy61_write_reg(sensor, VGXY61_REG_PATGEN_SHORT_DATA_R, 0x800, &ret);
- vgxy61_write_reg(sensor, VGXY61_REG_PATGEN_SHORT_DATA_B, 0x800, &ret);
- vgxy61_write_reg(sensor, VGXY61_REG_PATGEN_SHORT_DATA_GB, 0x800, &ret);
+ cci_write(sensor->regmap, VGXY61_REG_PATGEN_LONG_DATA_GR, 0x800, &ret);
+ cci_write(sensor->regmap, VGXY61_REG_PATGEN_LONG_DATA_R, 0x800, &ret);
+ cci_write(sensor->regmap, VGXY61_REG_PATGEN_LONG_DATA_B, 0x800, &ret);
+ cci_write(sensor->regmap, VGXY61_REG_PATGEN_LONG_DATA_GB, 0x800, &ret);
+ cci_write(sensor->regmap, VGXY61_REG_PATGEN_SHORT_DATA_GR, 0x800, &ret);
+ cci_write(sensor->regmap, VGXY61_REG_PATGEN_SHORT_DATA_R, 0x800, &ret);
+ cci_write(sensor->regmap, VGXY61_REG_PATGEN_SHORT_DATA_B, 0x800, &ret);
+ cci_write(sensor->regmap, VGXY61_REG_PATGEN_SHORT_DATA_GB, 0x800, &ret);
if (ret)
return ret;
@@ -1623,37 +1536,33 @@ static int vgxy61_configure(struct vgxy61_dev *sensor)
static int vgxy61_patch(struct vgxy61_dev *sensor)
{
struct i2c_client *client = sensor->i2c_client;
- int patch, ret;
+ u64 patch;
+ int ret;
ret = vgxy61_write_array(sensor, VGXY61_REG_FWPATCH_START_ADDR,
sizeof(patch_array), patch_array);
- if (ret)
- return ret;
-
- ret = vgxy61_write_reg(sensor, VGXY61_REG_STBY, 0x10, NULL);
+ cci_write(sensor->regmap, VGXY61_REG_STBY, 0x10, &ret);
if (ret)
return ret;
ret = vgxy61_poll_reg(sensor, VGXY61_REG_STBY, 0, VGXY61_TIMEOUT_MS);
- if (ret)
+ cci_read(sensor->regmap, VGXY61_REG_FWPATCH_REVISION, &patch, &ret);
+ if (ret < 0)
return ret;
- patch = vgxy61_read_reg(sensor, VGXY61_REG_FWPATCH_REVISION);
- if (patch < 0)
- return patch;
-
if (patch != (VGXY61_FWPATCH_REVISION_MAJOR << 12) +
(VGXY61_FWPATCH_REVISION_MINOR << 8) +
VGXY61_FWPATCH_REVISION_MICRO) {
- dev_err(&client->dev, "bad patch version expected %d.%d.%d got %d.%d.%d\n",
+ dev_err(&client->dev,
+ "bad patch version expected %d.%d.%d got %u.%u.%u\n",
VGXY61_FWPATCH_REVISION_MAJOR,
VGXY61_FWPATCH_REVISION_MINOR,
VGXY61_FWPATCH_REVISION_MICRO,
- patch >> 12, (patch >> 8) & 0x0f, patch & 0xff);
+ (u16)patch >> 12, ((u16)patch >> 8) & 0x0f, (u16)patch & 0xff);
return -ENODEV;
}
- dev_dbg(&client->dev, "patch %d.%d.%d applied\n",
- patch >> 12, (patch >> 8) & 0x0f, patch & 0xff);
+ dev_dbg(&client->dev, "patch %u.%u.%u applied\n",
+ (u16)patch >> 12, ((u16)patch >> 8) & 0x0f, (u16)patch & 0xff);
return 0;
}
@@ -1661,11 +1570,12 @@ static int vgxy61_patch(struct vgxy61_dev *sensor)
static int vgxy61_detect_cut_version(struct vgxy61_dev *sensor)
{
struct i2c_client *client = sensor->i2c_client;
- int device_rev;
+ u64 device_rev;
+ int ret;
- device_rev = vgxy61_read_reg(sensor, VGXY61_REG_REVISION);
- if (device_rev < 0)
- return device_rev;
+ ret = cci_read(sensor->regmap, VGXY61_REG_REVISION, &device_rev, NULL);
+ if (ret < 0)
+ return ret;
switch (device_rev >> 8) {
case 0xA:
@@ -1687,17 +1597,17 @@ static int vgxy61_detect_cut_version(struct vgxy61_dev *sensor)
static int vgxy61_detect(struct vgxy61_dev *sensor)
{
struct i2c_client *client = sensor->i2c_client;
- int id = 0;
- int ret, st;
+ u64 st, id = 0;
+ int ret;
- id = vgxy61_read_reg(sensor, VGXY61_REG_MODEL_ID);
- if (id < 0)
- return id;
+ ret = cci_read(sensor->regmap, VGXY61_REG_MODEL_ID, &id, NULL);
+ if (ret < 0)
+ return ret;
if (id != VG5661_MODEL_ID && id != VG5761_MODEL_ID) {
- dev_warn(&client->dev, "Unsupported sensor id %x\n", id);
+ dev_warn(&client->dev, "Unsupported sensor id %x\n", (u16)id);
return -ENODEV;
}
- dev_dbg(&client->dev, "detected sensor id = 0x%04x\n", id);
+ dev_dbg(&client->dev, "detected sensor id = 0x%04x\n", (u16)id);
sensor->id = id;
ret = vgxy61_wait_state(sensor, VGXY61_SYSTEM_FSM_SW_STBY,
@@ -1705,11 +1615,11 @@ static int vgxy61_detect(struct vgxy61_dev *sensor)
if (ret)
return ret;
- st = vgxy61_read_reg(sensor, VGXY61_REG_NVM);
- if (st < 0)
+ ret = cci_read(sensor->regmap, VGXY61_REG_NVM, &st, NULL);
+ if (ret < 0)
return st;
if (st != VGXY61_NVM_OK)
- dev_warn(&client->dev, "Bad nvm state got %d\n", st);
+ dev_warn(&client->dev, "Bad nvm state got %u\n", (u8)st);
ret = vgxy61_detect_cut_version(sensor);
if (ret)
@@ -1832,6 +1742,12 @@ static int vgxy61_probe(struct i2c_client *client)
sensor->analog_gain = 0;
sensor->digital_gain = 256;
+ sensor->regmap = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(sensor->regmap)) {
+ ret = PTR_ERR(sensor->regmap);
+ return dev_err_probe(dev, ret, "Failed to init regmap\n");
+ }
+
handle = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), 0, 0, 0);
if (!handle) {
dev_err(dev, "handle node not found\n");
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 558152575d102..3192a334aaab5 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -1895,7 +1895,7 @@ static int tc358743_probe_of(struct tc358743_state *state)
return dev_err_probe(dev, PTR_ERR(refclk),
"failed to get refclk\n");
- ep = of_graph_get_next_endpoint(dev->of_node, NULL);
+ ep = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1);
if (!ep) {
dev_err(dev, "missing endpoint node\n");
return -EINVAL;
diff --git a/drivers/media/i2c/tc358746.c b/drivers/media/i2c/tc358746.c
index 106de4271d2ef..d676adc4401bb 100644
--- a/drivers/media/i2c/tc358746.c
+++ b/drivers/media/i2c/tc358746.c
@@ -843,14 +843,14 @@ static unsigned long tc358746_find_pll_settings(struct tc358746 *tc358746,
if (fin < 4 * HZ_PER_MHZ || fin > 40 * HZ_PER_MHZ)
continue;
- tmp = fout * p * postdiv;
+ tmp = fout * postdiv;
do_div(tmp, fin);
mul = tmp;
if (mul > 511)
continue;
tmp = mul * fin;
- do_div(tmp, p * postdiv);
+ do_div(tmp, postdiv);
delta = abs(fout - tmp);
if (delta < min_delta) {
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
index 1ea703a9909f5..8e4a0718c4b6b 100644
--- a/drivers/media/i2c/tda1997x.c
+++ b/drivers/media/i2c/tda1997x.c
@@ -2310,7 +2310,7 @@ static int tda1997x_parse_dt(struct tda1997x_state *state)
pdata->vidout_sel_de = DE_FREF_SEL_DE_VHREF;
np = state->client->dev.of_node;
- ep = of_graph_get_next_endpoint(np, NULL);
+ ep = of_graph_get_endpoint_by_regs(np, 0, -1);
if (!ep)
return -EINVAL;
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index 5a561e5bf6598..f9c9c80c33ac1 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -987,7 +987,7 @@ tvp514x_get_pdata(struct i2c_client *client)
if (!IS_ENABLED(CONFIG_OF) || !client->dev.of_node)
return client->dev.platform_data;
- endpoint = of_graph_get_next_endpoint(client->dev.of_node, NULL);
+ endpoint = of_graph_get_endpoint_by_regs(client->dev.of_node, 0, -1);
if (!endpoint)
return NULL;
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 9fc586cfdcd87..64b91aa3c82a8 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -1817,7 +1817,7 @@ static struct regmap_config tvp5150_config = {
.val_bits = 8,
.max_register = 0xff,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.rd_table = &tvp5150_readable_table,
.volatile_reg = tvp5150_volatile_reg,
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index 30831b4b56d6b..6a04ffae53432 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -893,7 +893,7 @@ tvp7002_get_pdata(struct i2c_client *client)
if (!IS_ENABLED(CONFIG_OF) || !client->dev.of_node)
return client->dev.platform_data;
- endpoint = of_graph_get_next_endpoint(client->dev.of_node, NULL);
+ endpoint = of_graph_get_endpoint_by_regs(client->dev.of_node, 0, -1);
if (!endpoint)
return NULL;
diff --git a/drivers/media/mc/mc-devnode.c b/drivers/media/mc/mc-devnode.c
index 680fbb3a93402..7f67825c8757f 100644
--- a/drivers/media/mc/mc-devnode.c
+++ b/drivers/media/mc/mc-devnode.c
@@ -63,7 +63,7 @@ static void media_devnode_release(struct device *cd)
pr_debug("%s: Media Devnode Deallocated\n", __func__);
}
-static struct bus_type media_bus_type = {
+static const struct bus_type media_bus_type = {
.name = MEDIA_NAME,
};
@@ -190,7 +190,6 @@ static int media_release(struct inode *inode, struct file *filp)
return value is ignored. */
put_device(&devnode->dev);
- pr_debug("%s: Media Release\n", __func__);
return 0;
}
diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
index 543a392f86357..0e28b9a7936ef 100644
--- a/drivers/media/mc/mc-entity.c
+++ b/drivers/media/mc/mc-entity.c
@@ -535,14 +535,15 @@ static int media_pipeline_walk_push(struct media_pipeline_walk *walk,
/*
* Move the top entry link cursor to the next link. If all links of the entry
- * have been visited, pop the entry itself.
+ * have been visited, pop the entry itself. Return true if the entry has been
+ * popped.
*/
-static void media_pipeline_walk_pop(struct media_pipeline_walk *walk)
+static bool media_pipeline_walk_pop(struct media_pipeline_walk *walk)
{
struct media_pipeline_walk_entry *entry;
if (WARN_ON(walk->stack.top < 0))
- return;
+ return false;
entry = media_pipeline_walk_top(walk);
@@ -552,7 +553,7 @@ static void media_pipeline_walk_pop(struct media_pipeline_walk *walk)
walk->stack.top);
walk->stack.top--;
- return;
+ return true;
}
entry->links = entry->links->next;
@@ -560,6 +561,8 @@ static void media_pipeline_walk_pop(struct media_pipeline_walk *walk)
dev_dbg(walk->mdev->dev,
"media pipeline: moved entry %u to next link\n",
walk->stack.top);
+
+ return false;
}
/* Free all memory allocated while walking the pipeline. */
@@ -605,30 +608,24 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe,
struct media_pipeline_walk *walk)
{
struct media_pipeline_walk_entry *entry = media_pipeline_walk_top(walk);
- struct media_pad *pad;
+ struct media_pad *origin;
struct media_link *link;
struct media_pad *local;
struct media_pad *remote;
+ bool last_link;
int ret;
- pad = entry->pad;
+ origin = entry->pad;
link = list_entry(entry->links, typeof(*link), list);
- media_pipeline_walk_pop(walk);
+ last_link = media_pipeline_walk_pop(walk);
dev_dbg(walk->mdev->dev,
"media pipeline: exploring link '%s':%u -> '%s':%u\n",
link->source->entity->name, link->source->index,
link->sink->entity->name, link->sink->index);
- /* Skip links that are not enabled. */
- if (!(link->flags & MEDIA_LNK_FL_ENABLED)) {
- dev_dbg(walk->mdev->dev,
- "media pipeline: skipping link (disabled)\n");
- return 0;
- }
-
/* Get the local pad and remote pad. */
- if (link->source->entity == pad->entity) {
+ if (link->source->entity == origin->entity) {
local = link->source;
remote = link->sink;
} else {
@@ -640,25 +637,64 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe,
* Skip links that originate from a different pad than the incoming pad
* that is not connected internally in the entity to the incoming pad.
*/
- if (pad != local &&
- !media_entity_has_pad_interdep(pad->entity, pad->index, local->index)) {
+ if (origin != local &&
+ !media_entity_has_pad_interdep(origin->entity, origin->index,
+ local->index)) {
dev_dbg(walk->mdev->dev,
"media pipeline: skipping link (no route)\n");
- return 0;
+ goto done;
}
/*
- * Add the local and remote pads of the link to the pipeline and push
- * them to the stack, if they're not already present.
+ * Add the local pad of the link to the pipeline and push it to the
+ * stack, if not already present.
*/
ret = media_pipeline_add_pad(pipe, walk, local);
if (ret)
return ret;
+ /* Similarly, add the remote pad, but only if the link is enabled. */
+ if (!(link->flags & MEDIA_LNK_FL_ENABLED)) {
+ dev_dbg(walk->mdev->dev,
+ "media pipeline: skipping link (disabled)\n");
+ goto done;
+ }
+
ret = media_pipeline_add_pad(pipe, walk, remote);
if (ret)
return ret;
+done:
+ /*
+ * If we're done iterating over links, iterate over pads of the entity.
+ * This is necessary to discover pads that are not connected with any
+ * link. Those are dead ends from a pipeline exploration point of view,
+ * but are still part of the pipeline and need to be added to enable
+ * proper validation.
+ */
+ if (!last_link)
+ return 0;
+
+ dev_dbg(walk->mdev->dev,
+ "media pipeline: adding unconnected pads of '%s'\n",
+ local->entity->name);
+
+ media_entity_for_each_pad(origin->entity, local) {
+ /*
+ * Skip the origin pad (already handled), pad that have links
+ * (already discovered through iterating over links) and pads
+ * not internally connected.
+ */
+ if (origin == local || !local->num_links ||
+ !media_entity_has_pad_interdep(origin->entity, origin->index,
+ local->index))
+ continue;
+
+ ret = media_pipeline_add_pad(pipe, walk, local);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -770,7 +806,6 @@ __must_check int __media_pipeline_start(struct media_pad *pad,
struct media_pad *pad = ppad->pad;
struct media_entity *entity = pad->entity;
bool has_enabled_link = false;
- bool has_link = false;
struct media_link *link;
dev_dbg(mdev->dev, "Validating pad '%s':%u\n", pad->entity->name,
@@ -800,7 +835,6 @@ __must_check int __media_pipeline_start(struct media_pad *pad,
/* Record if the pad has links and enabled links. */
if (link->flags & MEDIA_LNK_FL_ENABLED)
has_enabled_link = true;
- has_link = true;
/*
* Validate the link if it's enabled and has the
@@ -838,7 +872,7 @@ __must_check int __media_pipeline_start(struct media_pad *pad,
* 3. If the pad has the MEDIA_PAD_FL_MUST_CONNECT flag set,
* ensure that it has either no link or an enabled link.
*/
- if ((pad->flags & MEDIA_PAD_FL_MUST_CONNECT) && has_link &&
+ if ((pad->flags & MEDIA_PAD_FL_MUST_CONNECT) &&
!has_enabled_link) {
dev_dbg(mdev->dev,
"Pad '%s':%u must be connected by an enabled link\n",
@@ -1038,6 +1072,9 @@ static void __media_entity_remove_link(struct media_entity *entity,
/* Remove the reverse links for a data link. */
if ((link->flags & MEDIA_LNK_FL_LINK_TYPE) == MEDIA_LNK_FL_DATA_LINK) {
+ link->source->num_links--;
+ link->sink->num_links--;
+
if (link->source->entity == entity)
remote = link->sink->entity;
else
@@ -1092,6 +1129,11 @@ media_create_pad_link(struct media_entity *source, u16 source_pad,
struct media_link *link;
struct media_link *backlink;
+ if (flags & MEDIA_LNK_FL_LINK_TYPE)
+ return -EINVAL;
+
+ flags |= MEDIA_LNK_FL_DATA_LINK;
+
if (WARN_ON(!source || !sink) ||
WARN_ON(source_pad >= source->num_pads) ||
WARN_ON(sink_pad >= sink->num_pads))
@@ -1107,7 +1149,7 @@ media_create_pad_link(struct media_entity *source, u16 source_pad,
link->source = &source->pads[source_pad];
link->sink = &sink->pads[sink_pad];
- link->flags = flags & ~MEDIA_LNK_FL_INTERFACE_LINK;
+ link->flags = flags;
/* Initialize graph object embedded at the new link */
media_gobj_create(source->graph_obj.mdev, MEDIA_GRAPH_LINK,
@@ -1138,6 +1180,9 @@ media_create_pad_link(struct media_entity *source, u16 source_pad,
sink->num_links++;
source->num_links++;
+ link->source->num_links++;
+ link->sink->num_links++;
+
return 0;
}
EXPORT_SYMBOL_GPL(media_create_pad_link);
diff --git a/drivers/media/pci/bt8xx/bttv-gpio.c b/drivers/media/pci/bt8xx/bttv-gpio.c
index a2b18e2bed1b0..6b7fea50328c2 100644
--- a/drivers/media/pci/bt8xx/bttv-gpio.c
+++ b/drivers/media/pci/bt8xx/bttv-gpio.c
@@ -55,7 +55,7 @@ static void bttv_sub_remove(struct device *dev)
sub->remove(sdev);
}
-struct bus_type bttv_sub_bus_type = {
+const struct bus_type bttv_sub_bus_type = {
.name = "bttv-sub",
.match = &bttv_sub_bus_match,
.probe = bttv_sub_probe,
diff --git a/drivers/media/pci/bt8xx/bttvp.h b/drivers/media/pci/bt8xx/bttvp.h
index 0368a583cf077..a534e63b9a37b 100644
--- a/drivers/media/pci/bt8xx/bttvp.h
+++ b/drivers/media/pci/bt8xx/bttvp.h
@@ -234,7 +234,7 @@ int bttv_s_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *f);
/* ---------------------------------------------------------- */
/* bttv-gpio.c */
-extern struct bus_type bttv_sub_bus_type;
+extern const struct bus_type bttv_sub_bus_type;
int bttv_sub_add_device(struct bttv_core *core, char *name);
int bttv_sub_del_devices(struct bttv_core *core);
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index 42fdcf992e48b..7d4a409c433e2 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -1354,6 +1354,10 @@ int cx23885_video_register(struct cx23885_dev *dev)
/* register Video device */
dev->video_dev = cx23885_vdev_init(dev, dev->pci,
&cx23885_video_template, "video");
+ if (!dev->video_dev) {
+ err = -ENOMEM;
+ goto fail_unreg;
+ }
dev->video_dev->queue = &dev->vb2_vidq;
dev->video_dev->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
V4L2_CAP_AUDIO | V4L2_CAP_VIDEO_CAPTURE;
@@ -1382,6 +1386,10 @@ int cx23885_video_register(struct cx23885_dev *dev)
/* register VBI device */
dev->vbi_dev = cx23885_vdev_init(dev, dev->pci,
&cx23885_vbi_template, "vbi");
+ if (!dev->vbi_dev) {
+ err = -ENOMEM;
+ goto fail_unreg;
+ }
dev->vbi_dev->queue = &dev->vb2_vbiq;
dev->vbi_dev->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
V4L2_CAP_AUDIO | V4L2_CAP_VBI_CAPTURE;
diff --git a/drivers/media/pci/dt3155/dt3155.h b/drivers/media/pci/dt3155/dt3155.h
index c9ce79cb55668..ce1835d9691ec 100644
--- a/drivers/media/pci/dt3155/dt3155.h
+++ b/drivers/media/pci/dt3155/dt3155.h
@@ -162,7 +162,6 @@
* @height: frame height
* @input: current input
* @sequence: frame counter
- * @stats: statistics structure
* @regs: local copy of mmio base register
* @csr2: local copy of csr2 register
* @config: local copy of config register
diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c
index f980e3125a7b9..e994db4f4d914 100644
--- a/drivers/media/pci/intel/ipu-bridge.c
+++ b/drivers/media/pci/intel/ipu-bridge.c
@@ -2,6 +2,7 @@
/* Author: Dan Scally <djrscally@gmail.com> */
#include <linux/acpi.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/i2c.h>
#include <linux/mei_cl_bus.h>
@@ -60,6 +61,8 @@ static const struct ipu_sensor_config ipu_supported_sensors[] = {
IPU_SENSOR_CONFIG("OVTIDB10", 1, 560000000),
/* GalaxyCore GC0310 */
IPU_SENSOR_CONFIG("INT0310", 0),
+ /* Omnivision ov01a10 */
+ IPU_SENSOR_CONFIG("OVTI01A0", 1, 400000000),
};
static const struct ipu_property_names prop_names = {
@@ -747,6 +750,24 @@ static int ipu_bridge_ivsc_is_ready(void)
return ready;
}
+static int ipu_bridge_check_fwnode_graph(struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *endpoint;
+
+ if (IS_ERR_OR_NULL(fwnode))
+ return -EINVAL;
+
+ endpoint = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (endpoint) {
+ fwnode_handle_put(endpoint);
+ return 0;
+ }
+
+ return ipu_bridge_check_fwnode_graph(fwnode->secondary);
+}
+
+static DEFINE_MUTEX(ipu_bridge_mutex);
+
int ipu_bridge_init(struct device *dev,
ipu_parse_sensor_fwnode_t parse_sensor_fwnode)
{
@@ -755,6 +776,11 @@ int ipu_bridge_init(struct device *dev,
unsigned int i;
int ret;
+ guard(mutex)(&ipu_bridge_mutex);
+
+ if (!ipu_bridge_check_fwnode_graph(dev_fwnode(dev)))
+ return 0;
+
if (!ipu_bridge_ivsc_is_ready())
return -EPROBE_DEFER;
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index ed08bf4178f08..c42adc5a408db 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -28,6 +28,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mc.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-dma-sg.h>
@@ -1407,7 +1408,6 @@ static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
{
struct cio2_device *cio2 = to_cio2_device(notifier);
- struct device *dev = &cio2->pci_dev->dev;
struct sensor_async_subdev *s_asd;
struct v4l2_async_connection *asd;
struct cio2_queue *q;
@@ -1417,23 +1417,10 @@ static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
s_asd = to_sensor_asd(asd);
q = &cio2->queue[s_asd->csi2.port];
- ret = media_entity_get_fwnode_pad(&q->sensor->entity,
- s_asd->asd.match.fwnode,
- MEDIA_PAD_FL_SOURCE);
- if (ret < 0) {
- dev_err(dev, "no pad for endpoint %pfw (%d)\n",
- s_asd->asd.match.fwnode, ret);
- return ret;
- }
-
- ret = media_create_pad_link(&q->sensor->entity, ret,
- &q->subdev.entity, CIO2_PAD_SINK,
- 0);
- if (ret) {
- dev_err(dev, "failed to create link for %s (endpoint %pfw, error %d)\n",
- q->sensor->name, s_asd->asd.match.fwnode, ret);
+ ret = v4l2_create_fwnode_links_to_pad(asd->sd,
+ &q->subdev_pads[CIO2_PAD_SINK], 0);
+ if (ret)
return ret;
- }
}
return v4l2_device_register_subdev_nodes(&cio2->v4l2_dev);
@@ -1572,6 +1559,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
v4l2_subdev_init(subdev, &cio2_subdev_ops);
subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
subdev->owner = THIS_MODULE;
+ subdev->dev = dev;
snprintf(subdev->name, sizeof(subdev->name),
CIO2_ENTITY_NAME " %td", q - cio2->queue);
subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
@@ -1679,29 +1667,12 @@ static void cio2_queues_exit(struct cio2_device *cio2)
cio2_queue_exit(cio2, &cio2->queue[i]);
}
-static int cio2_check_fwnode_graph(struct fwnode_handle *fwnode)
-{
- struct fwnode_handle *endpoint;
-
- if (IS_ERR_OR_NULL(fwnode))
- return -EINVAL;
-
- endpoint = fwnode_graph_get_next_endpoint(fwnode, NULL);
- if (endpoint) {
- fwnode_handle_put(endpoint);
- return 0;
- }
-
- return cio2_check_fwnode_graph(fwnode->secondary);
-}
-
/**************** PCI interface ****************/
static int cio2_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{
struct device *dev = &pci_dev->dev;
- struct fwnode_handle *fwnode = dev_fwnode(dev);
struct cio2_device *cio2;
int r;
@@ -1710,17 +1681,9 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
* if the device has no endpoints then we can try to build those as
* software_nodes parsed from SSDB.
*/
- r = cio2_check_fwnode_graph(fwnode);
- if (r) {
- if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) {
- dev_err(dev, "fwnode graph has no endpoints connected\n");
- return -EINVAL;
- }
-
- r = ipu_bridge_init(dev, ipu_bridge_parse_ssdb);
- if (r)
- return r;
- }
+ r = ipu_bridge_init(dev, ipu_bridge_parse_ssdb);
+ if (r)
+ return r;
cio2 = devm_kzalloc(dev, sizeof(*cio2), GFP_KERNEL);
if (!cio2)
diff --git a/drivers/media/pci/intel/ivsc/mei_csi.c b/drivers/media/pci/intel/ivsc/mei_csi.c
index 15b905f66ab72..55e0c60c420cd 100644
--- a/drivers/media/pci/intel/ivsc/mei_csi.c
+++ b/drivers/media/pci/intel/ivsc/mei_csi.c
@@ -71,8 +71,8 @@ enum ivsc_privacy_status {
};
enum csi_pads {
- CSI_PAD_SOURCE,
CSI_PAD_SINK,
+ CSI_PAD_SOURCE,
CSI_NUM_PADS
};
@@ -128,7 +128,6 @@ struct mei_csi {
int streaming;
struct media_pad pads[CSI_NUM_PADS];
- struct v4l2_mbus_framefmt format_mbus[CSI_NUM_PADS];
/* number of data lanes used on the CSI-2 link */
u32 nr_of_lanes;
@@ -329,58 +328,17 @@ err:
return ret;
}
-static struct v4l2_mbus_framefmt *
-mei_csi_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- unsigned int pad, u32 which)
-{
- struct mei_csi *csi = sd_to_csi(sd);
-
- switch (which) {
- case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_state_get_format(sd_state, pad);
- case V4L2_SUBDEV_FORMAT_ACTIVE:
- return &csi->format_mbus[pad];
- default:
- return NULL;
- }
-}
-
static int mei_csi_init_state(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *mbusformat;
- struct mei_csi *csi = sd_to_csi(sd);
unsigned int i;
- mutex_lock(&csi->lock);
-
for (i = 0; i < sd->entity.num_pads; i++) {
mbusformat = v4l2_subdev_state_get_format(sd_state, i);
*mbusformat = mei_csi_format_mbus_default;
}
- mutex_unlock(&csi->lock);
-
- return 0;
-}
-
-static int mei_csi_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *mbusformat;
- struct mei_csi *csi = sd_to_csi(sd);
-
- mutex_lock(&csi->lock);
-
- mbusformat = mei_csi_get_pad_format(sd, sd_state, format->pad,
- format->which);
- if (mbusformat)
- format->format = *mbusformat;
-
- mutex_unlock(&csi->lock);
-
return 0;
}
@@ -388,20 +346,17 @@ static int mei_csi_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
- struct v4l2_mbus_framefmt *source_mbusformat;
- struct v4l2_mbus_framefmt *mbusformat;
- struct mei_csi *csi = sd_to_csi(sd);
- struct media_pad *pad;
+ struct v4l2_mbus_framefmt *source_fmt;
+ struct v4l2_mbus_framefmt *sink_fmt;
- mbusformat = mei_csi_get_pad_format(sd, sd_state, format->pad,
- format->which);
- if (!mbusformat)
- return -EINVAL;
+ sink_fmt = v4l2_subdev_state_get_format(sd_state, CSI_PAD_SINK);
+ source_fmt = v4l2_subdev_state_get_format(sd_state, CSI_PAD_SOURCE);
- source_mbusformat = mei_csi_get_pad_format(sd, sd_state, CSI_PAD_SOURCE,
- format->which);
- if (!source_mbusformat)
- return -EINVAL;
+ if (format->pad) {
+ *source_fmt = *sink_fmt;
+
+ return 0;
+ }
v4l_bound_align_image(&format->format.width, 1, 65536, 0,
&format->format.height, 1, 65536, 0, 0);
@@ -504,18 +459,8 @@ static int mei_csi_set_fmt(struct v4l2_subdev *sd,
if (format->format.field == V4L2_FIELD_ANY)
format->format.field = V4L2_FIELD_NONE;
- mutex_lock(&csi->lock);
-
- pad = &csi->pads[format->pad];
- if (pad->flags & MEDIA_PAD_FL_SOURCE)
- format->format = csi->format_mbus[CSI_PAD_SINK];
-
- *mbusformat = format->format;
-
- if (pad->flags & MEDIA_PAD_FL_SINK)
- *source_mbusformat = format->format;
-
- mutex_unlock(&csi->lock);
+ *sink_fmt = format->format;
+ *source_fmt = *sink_fmt;
return 0;
}
@@ -554,7 +499,7 @@ static const struct v4l2_subdev_video_ops mei_csi_video_ops = {
};
static const struct v4l2_subdev_pad_ops mei_csi_pad_ops = {
- .get_fmt = mei_csi_get_fmt,
+ .get_fmt = v4l2_subdev_get_fmt,
.set_fmt = mei_csi_set_fmt,
};
@@ -587,7 +532,7 @@ static int mei_csi_notify_bound(struct v4l2_async_notifier *notifier,
csi->remote_pad = pad;
return media_create_pad_link(&subdev->entity, pad,
- &csi->subdev.entity, 1,
+ &csi->subdev.entity, CSI_PAD_SINK,
MEDIA_LNK_FL_ENABLED |
MEDIA_LNK_FL_IMMUTABLE);
}
@@ -749,6 +694,7 @@ static int mei_csi_probe(struct mei_cl_device *cldev,
goto err_disable;
csi->subdev.dev = &cldev->dev;
+ csi->subdev.state_lock = &csi->lock;
v4l2_subdev_init(&csi->subdev, &mei_csi_subdev_ops);
csi->subdev.internal_ops = &mei_csi_internal_ops;
v4l2_set_subdevdata(&csi->subdev, csi);
@@ -764,9 +710,6 @@ static int mei_csi_probe(struct mei_cl_device *cldev,
if (ret)
goto err_ctrl_handler;
- csi->format_mbus[CSI_PAD_SOURCE] = mei_csi_format_mbus_default;
- csi->format_mbus[CSI_PAD_SINK] = mei_csi_format_mbus_default;
-
csi->pads[CSI_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
csi->pads[CSI_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&csi->subdev.entity, CSI_NUM_PADS,
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index e4cf9d63e926d..364ce9e570182 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -757,7 +757,7 @@ static const struct video_device video_dev_template = {
/**
* vip_irq - interrupt routine
* @irq: Number of interrupt ( not used, correct number is assumed )
- * @vip: local data structure containing all information
+ * @data: local data structure containing all information
*
* check for both frame interrupts set ( top and bottom ).
* check FIFO overflow, but limit number of log messages after open.
@@ -767,8 +767,9 @@ static const struct video_device video_dev_template = {
*
* IRQ_HANDLED, interrupt done.
*/
-static irqreturn_t vip_irq(int irq, struct sta2x11_vip *vip)
+static irqreturn_t vip_irq(int irq, void *data)
{
+ struct sta2x11_vip *vip = data;
unsigned int status;
status = reg_read(vip, DVP_ITS);
@@ -1053,9 +1054,7 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev,
spin_lock_init(&vip->slock);
- ret = request_irq(pdev->irq,
- (irq_handler_t) vip_irq,
- IRQF_SHARED, KBUILD_MODNAME, vip);
+ ret = request_irq(pdev->irq, vip_irq, IRQF_SHARED, KBUILD_MODNAME, vip);
if (ret) {
dev_err(&pdev->dev, "request_irq failed\n");
ret = -ENODEV;
diff --git a/drivers/media/pci/ttpci/budget-av.c b/drivers/media/pci/ttpci/budget-av.c
index 230b104a7cdf0..a47c5850ef875 100644
--- a/drivers/media/pci/ttpci/budget-av.c
+++ b/drivers/media/pci/ttpci/budget-av.c
@@ -1463,7 +1463,8 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio
budget_av->has_saa7113 = 1;
err = saa7146_vv_init(dev, &vv_data);
if (err != 0) {
- /* fixme: proper cleanup here */
+ ttpci_budget_deinit(&budget_av->budget);
+ kfree(budget_av);
ERR("cannot init vv subsystem\n");
return err;
}
@@ -1472,9 +1473,10 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio
vv_data.vid_ops.vidioc_s_input = vidioc_s_input;
if ((err = saa7146_register_device(&budget_av->vd, dev, "knc1", VFL_TYPE_VIDEO))) {
- /* fixme: proper cleanup here */
- ERR("cannot register capture v4l2 device\n");
saa7146_vv_release(dev);
+ ttpci_budget_deinit(&budget_av->budget);
+ kfree(budget_av);
+ ERR("cannot register capture v4l2 device\n");
return err;
}
diff --git a/drivers/media/platform/amphion/vdec.c b/drivers/media/platform/amphion/vdec.c
index 133d77d1ea0c3..a57f9f4f3b876 100644
--- a/drivers/media/platform/amphion/vdec.c
+++ b/drivers/media/platform/amphion/vdec.c
@@ -1595,9 +1595,11 @@ static int vdec_stop_session(struct vpu_inst *inst, u32 type)
if (V4L2_TYPE_IS_OUTPUT(type)) {
vdec_update_state(inst, VPU_CODEC_STATE_SEEK, 0);
vdec->drain = 0;
+ vdec_abort(inst);
} else {
if (inst->state != VPU_CODEC_STATE_DYAMIC_RESOLUTION_CHANGE) {
- vdec_abort(inst);
+ if (vb2_is_streaming(v4l2_m2m_get_src_vq(inst->fh.m2m_ctx)))
+ vdec_abort(inst);
vdec->eos_received = 0;
}
vdec_clear_slots(inst);
diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c
index f8450a8ccda62..c1108df72dd51 100644
--- a/drivers/media/platform/atmel/atmel-isi.c
+++ b/drivers/media/platform/atmel/atmel-isi.c
@@ -834,7 +834,7 @@ static int atmel_isi_parse_dt(struct atmel_isi *isi,
isi->pdata.full_mode = 1;
isi->pdata.frate = ISI_CFG1_FRATE_CAPTURE_ALL;
- np = of_graph_get_next_endpoint(np, NULL);
+ np = of_graph_get_endpoint_by_regs(np, 0, -1);
if (!np) {
dev_err(&pdev->dev, "Could not find the endpoint\n");
return -EINVAL;
@@ -1158,7 +1158,7 @@ static int isi_graph_init(struct atmel_isi *isi)
struct device_node *ep;
int ret;
- ep = of_graph_get_next_endpoint(isi->dev->of_node, NULL);
+ ep = of_graph_get_endpoint_by_regs(isi->dev->of_node, 0, -1);
if (!ep)
return -EINVAL;
diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
index fead5426830e8..2d7b0508cc9af 100644
--- a/drivers/media/platform/cadence/cdns-csi2rx.c
+++ b/drivers/media/platform/cadence/cdns-csi2rx.c
@@ -114,10 +114,14 @@ static const struct csi2rx_fmt formats[] = {
{ .code = MEDIA_BUS_FMT_SGBRG8_1X8, .bpp = 8, },
{ .code = MEDIA_BUS_FMT_SGRBG8_1X8, .bpp = 8, },
{ .code = MEDIA_BUS_FMT_SRGGB8_1X8, .bpp = 8, },
+ { .code = MEDIA_BUS_FMT_Y8_1X8, .bpp = 8, },
{ .code = MEDIA_BUS_FMT_SBGGR10_1X10, .bpp = 10, },
{ .code = MEDIA_BUS_FMT_SGBRG10_1X10, .bpp = 10, },
{ .code = MEDIA_BUS_FMT_SGRBG10_1X10, .bpp = 10, },
{ .code = MEDIA_BUS_FMT_SRGGB10_1X10, .bpp = 10, },
+ { .code = MEDIA_BUS_FMT_RGB565_1X16, .bpp = 16, },
+ { .code = MEDIA_BUS_FMT_RGB888_1X24, .bpp = 24, },
+ { .code = MEDIA_BUS_FMT_BGR888_1X24, .bpp = 24, },
};
static const struct csi2rx_fmt *csi2rx_get_fmt_by_code(u32 code)
@@ -389,6 +393,18 @@ out:
return ret;
}
+static int csi2rx_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code_enum)
+{
+ if (code_enum->index >= ARRAY_SIZE(formats))
+ return -EINVAL;
+
+ code_enum->code = formats[code_enum->index].code;
+
+ return 0;
+}
+
static int csi2rx_set_fmt(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format)
@@ -439,6 +455,7 @@ static int csi2rx_init_state(struct v4l2_subdev *subdev,
}
static const struct v4l2_subdev_pad_ops csi2rx_pad_ops = {
+ .enum_mbus_code = csi2rx_enum_mbus_code,
.get_fmt = v4l2_subdev_get_fmt,
.set_fmt = csi2rx_set_fmt,
};
@@ -468,7 +485,7 @@ static int csi2rx_async_bound(struct v4l2_async_notifier *notifier,
struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev);
csi2rx->source_pad = media_entity_get_fwnode_pad(&s_subdev->entity,
- s_subdev->fwnode,
+ asd->match.fwnode,
MEDIA_PAD_FL_SOURCE);
if (csi2rx->source_pad < 0) {
dev_err(csi2rx->dev, "Couldn't find output pad for subdev %s\n",
diff --git a/drivers/media/platform/chips-media/wave5/wave5-hw.c b/drivers/media/platform/chips-media/wave5/wave5-hw.c
index f1e022fb148ea..2d82791f575e0 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-hw.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-hw.c
@@ -2315,7 +2315,7 @@ static bool wave5_vpu_enc_check_common_param_valid(struct vpu_instance *inst,
param->intra_refresh_mode);
return false;
}
- };
+ }
return true;
invalid_refresh_argument:
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c b/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
index f29cfa3af94ac..8bbf9d10b4677 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
@@ -92,7 +92,7 @@ static int switch_state(struct vpu_instance *inst, enum vpu_instance_state state
break;
case VPU_INST_STATE_STOP:
break;
- };
+ }
dev_dbg(inst->dev->dev, "Switch state from %s to %s.\n",
state_to_str(inst->state), state_to_str(state));
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu.c b/drivers/media/platform/chips-media/wave5/wave5-vpu.c
index 0d90b5820bef7..1b3df5b04249a 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpu.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu.c
@@ -250,7 +250,7 @@ err_clk_dis:
return ret;
}
-static int wave5_vpu_remove(struct platform_device *pdev)
+static void wave5_vpu_remove(struct platform_device *pdev)
{
struct vpu_device *dev = dev_get_drvdata(&pdev->dev);
@@ -262,8 +262,6 @@ static int wave5_vpu_remove(struct platform_device *pdev)
v4l2_device_unregister(&dev->v4l2_dev);
wave5_vdi_release(&pdev->dev);
ida_destroy(&dev->inst_ida);
-
- return 0;
}
static const struct wave5_match_data ti_wave521c_data = {
@@ -283,7 +281,7 @@ static struct platform_driver wave5_vpu_driver = {
.of_match_table = of_match_ptr(wave5_dt_ids),
},
.probe = wave5_vpu_probe,
- .remove = wave5_vpu_remove,
+ .remove_new = wave5_vpu_remove,
};
module_platform_driver(wave5_vpu_driver);
diff --git a/drivers/media/platform/intel/pxa_camera.c b/drivers/media/platform/intel/pxa_camera.c
index 59b89e421dc28..d904952bf00e3 100644
--- a/drivers/media/platform/intel/pxa_camera.c
+++ b/drivers/media/platform/intel/pxa_camera.c
@@ -2207,7 +2207,7 @@ static int pxa_camera_pdata_from_dt(struct device *dev,
pcdev->mclk = mclk_rate;
}
- np = of_graph_get_next_endpoint(np, NULL);
+ np = of_graph_get_endpoint_by_regs(np, 0, -1);
if (!np) {
dev_err(dev, "could not find endpoint\n");
return -EINVAL;
diff --git a/drivers/media/platform/marvell/Kconfig b/drivers/media/platform/marvell/Kconfig
index d6499ffe30e8b..d31f4730f2a38 100644
--- a/drivers/media/platform/marvell/Kconfig
+++ b/drivers/media/platform/marvell/Kconfig
@@ -7,6 +7,7 @@ config VIDEO_CAFE_CCIC
depends on V4L_PLATFORM_DRIVERS
depends on PCI && I2C && VIDEO_DEV
depends on COMMON_CLK
+ select V4L2_ASYNC
select VIDEO_OV7670 if MEDIA_SUBDRV_AUTOSELECT && VIDEO_CAMERA_SENSOR
select VIDEOBUF2_VMALLOC
select VIDEOBUF2_DMA_CONTIG
@@ -24,6 +25,7 @@ config VIDEO_MMP_CAMERA
depends on COMMON_CLK
select VIDEO_OV7670 if MEDIA_SUBDRV_AUTOSELECT && VIDEO_CAMERA_SENSOR
select I2C_GPIO
+ select V4L2_ASYNC
select VIDEOBUF2_VMALLOC
select VIDEOBUF2_DMA_CONTIG
select VIDEOBUF2_DMA_SG
diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h
index 8ba6e757e11aa..8877eb39e8071 100644
--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h
@@ -144,7 +144,6 @@ struct mtk_jpegdec_clk {
* @jpegenc_irq: jpeg encode irq num
* @job_timeout_work: encode timeout workqueue
* @hw_param: jpeg encode hw parameters
- * @hw_rdy: record hw ready
* @hw_state: record hw state
* @hw_lock: spinlock protecting the hw device resource
*/
diff --git a/drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.c b/drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.c
index b065ccd069140..378a1cba0144f 100644
--- a/drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.c
+++ b/drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.c
@@ -26,7 +26,7 @@ static void mtk_mdp_vpu_handle_init_ack(const struct mdp_ipi_comm_ack *msg)
vpu->inst_addr = msg->vpu_inst_addr;
}
-static void mtk_mdp_vpu_ipi_handler(const void *data, unsigned int len,
+static void mtk_mdp_vpu_ipi_handler(void *data, unsigned int len,
void *priv)
{
const struct mdp_ipi_comm_ack *msg = data;
diff --git a/drivers/media/platform/mediatek/mdp3/mdp_cfg_data.c b/drivers/media/platform/mediatek/mdp3/mdp_cfg_data.c
index 502eeae0bfdc4..ecca52b45307c 100644
--- a/drivers/media/platform/mediatek/mdp3/mdp_cfg_data.c
+++ b/drivers/media/platform/mediatek/mdp3/mdp_cfg_data.c
@@ -46,18 +46,114 @@ enum mt8183_mdp_comp_id {
MT8183_MDP_COMP_WROT1, /* 25 */
};
+enum mt8195_mdp_comp_id {
+ /* MT8195 Comp id */
+ /* ISP */
+ MT8195_MDP_COMP_WPEI = 0,
+ MT8195_MDP_COMP_WPEO, /* 1 */
+ MT8195_MDP_COMP_WPEI2, /* 2 */
+ MT8195_MDP_COMP_WPEO2, /* 3 */
+
+ /* MDP */
+ MT8195_MDP_COMP_CAMIN, /* 4 */
+ MT8195_MDP_COMP_CAMIN2, /* 5 */
+ MT8195_MDP_COMP_SPLIT, /* 6 */
+ MT8195_MDP_COMP_SPLIT2, /* 7 */
+ MT8195_MDP_COMP_RDMA0, /* 8 */
+ MT8195_MDP_COMP_RDMA1, /* 9 */
+ MT8195_MDP_COMP_RDMA2, /* 10 */
+ MT8195_MDP_COMP_RDMA3, /* 11 */
+ MT8195_MDP_COMP_STITCH, /* 12 */
+ MT8195_MDP_COMP_FG0, /* 13 */
+ MT8195_MDP_COMP_FG1, /* 14 */
+ MT8195_MDP_COMP_FG2, /* 15 */
+ MT8195_MDP_COMP_FG3, /* 16 */
+ MT8195_MDP_COMP_TO_SVPP2MOUT, /* 17 */
+ MT8195_MDP_COMP_TO_SVPP3MOUT, /* 18 */
+ MT8195_MDP_COMP_TO_WARP0MOUT, /* 19 */
+ MT8195_MDP_COMP_TO_WARP1MOUT, /* 20 */
+ MT8195_MDP_COMP_VPP0_SOUT, /* 21 */
+ MT8195_MDP_COMP_VPP1_SOUT, /* 22 */
+ MT8195_MDP_COMP_PQ0_SOUT, /* 23 */
+ MT8195_MDP_COMP_PQ1_SOUT, /* 24 */
+ MT8195_MDP_COMP_HDR0, /* 25 */
+ MT8195_MDP_COMP_HDR1, /* 26 */
+ MT8195_MDP_COMP_HDR2, /* 27 */
+ MT8195_MDP_COMP_HDR3, /* 28 */
+ MT8195_MDP_COMP_AAL0, /* 29 */
+ MT8195_MDP_COMP_AAL1, /* 30 */
+ MT8195_MDP_COMP_AAL2, /* 31 */
+ MT8195_MDP_COMP_AAL3, /* 32 */
+ MT8195_MDP_COMP_RSZ0, /* 33 */
+ MT8195_MDP_COMP_RSZ1, /* 34 */
+ MT8195_MDP_COMP_RSZ2, /* 35 */
+ MT8195_MDP_COMP_RSZ3, /* 36 */
+ MT8195_MDP_COMP_TDSHP0, /* 37 */
+ MT8195_MDP_COMP_TDSHP1, /* 38 */
+ MT8195_MDP_COMP_TDSHP2, /* 39 */
+ MT8195_MDP_COMP_TDSHP3, /* 40 */
+ MT8195_MDP_COMP_COLOR0, /* 41 */
+ MT8195_MDP_COMP_COLOR1, /* 42 */
+ MT8195_MDP_COMP_COLOR2, /* 43 */
+ MT8195_MDP_COMP_COLOR3, /* 44 */
+ MT8195_MDP_COMP_OVL0, /* 45 */
+ MT8195_MDP_COMP_OVL1, /* 46 */
+ MT8195_MDP_COMP_PAD0, /* 47 */
+ MT8195_MDP_COMP_PAD1, /* 48 */
+ MT8195_MDP_COMP_PAD2, /* 49 */
+ MT8195_MDP_COMP_PAD3, /* 50 */
+ MT8195_MDP_COMP_TCC0, /* 51 */
+ MT8195_MDP_COMP_TCC1, /* 52 */
+ MT8195_MDP_COMP_WROT0, /* 53 */
+ MT8195_MDP_COMP_WROT1, /* 54 */
+ MT8195_MDP_COMP_WROT2, /* 55 */
+ MT8195_MDP_COMP_WROT3, /* 56 */
+ MT8195_MDP_COMP_MERGE2, /* 57 */
+ MT8195_MDP_COMP_MERGE3, /* 58 */
+
+ MT8195_MDP_COMP_VDO0DL0, /* 59 */
+ MT8195_MDP_COMP_VDO1DL0, /* 60 */
+ MT8195_MDP_COMP_VDO0DL1, /* 61 */
+ MT8195_MDP_COMP_VDO1DL1, /* 62 */
+};
+
static const struct of_device_id mt8183_mdp_probe_infra[MDP_INFRA_MAX] = {
[MDP_INFRA_MMSYS] = { .compatible = "mediatek,mt8183-mmsys" },
[MDP_INFRA_MUTEX] = { .compatible = "mediatek,mt8183-disp-mutex" },
[MDP_INFRA_SCP] = { .compatible = "mediatek,mt8183-scp" }
};
+static const struct of_device_id mt8195_mdp_probe_infra[MDP_INFRA_MAX] = {
+ [MDP_INFRA_MMSYS] = { .compatible = "mediatek,mt8195-vppsys0" },
+ [MDP_INFRA_MMSYS2] = { .compatible = "mediatek,mt8195-vppsys1" },
+ [MDP_INFRA_MUTEX] = { .compatible = "mediatek,mt8195-vpp-mutex" },
+ [MDP_INFRA_MUTEX2] = { .compatible = "mediatek,mt8195-vpp-mutex" },
+ [MDP_INFRA_SCP] = { .compatible = "mediatek,mt8195-scp" }
+};
+
static const struct mdp_platform_config mt8183_plat_cfg = {
.rdma_support_10bit = true,
.rdma_rsz1_sram_sharing = true,
.rdma_upsample_repeat_only = true,
+ .rdma_event_num = 1,
.rsz_disable_dcm_small_sample = false,
.wrot_filter_constraint = false,
+ .wrot_event_num = 1,
+};
+
+static const struct mdp_platform_config mt8195_plat_cfg = {
+ .rdma_support_10bit = true,
+ .rdma_rsz1_sram_sharing = false,
+ .rdma_upsample_repeat_only = false,
+ .rdma_esl_setting = true,
+ .rdma_event_num = 4,
+ .rsz_disable_dcm_small_sample = false,
+ .rsz_etc_control = true,
+ .wrot_filter_constraint = false,
+ .wrot_event_num = 4,
+ .tdshp_hist_num = 17,
+ .tdshp_constrain = true,
+ .tdshp_contour = true,
};
static const u32 mt8183_mutex_idx[MDP_MAX_COMP_COUNT] = {
@@ -71,81 +167,384 @@ static const u32 mt8183_mutex_idx[MDP_MAX_COMP_COUNT] = {
[MDP_COMP_CCORR0] = MUTEX_MOD_IDX_MDP_CCORR0,
};
+static const u32 mt8195_mutex_idx[MDP_MAX_COMP_COUNT] = {
+ [MDP_COMP_RDMA0] = MUTEX_MOD_IDX_MDP_RDMA0,
+ [MDP_COMP_RDMA1] = MUTEX_MOD_IDX_MDP_RDMA1,
+ [MDP_COMP_RDMA2] = MUTEX_MOD_IDX_MDP_RDMA2,
+ [MDP_COMP_RDMA3] = MUTEX_MOD_IDX_MDP_RDMA3,
+ [MDP_COMP_STITCH] = MUTEX_MOD_IDX_MDP_STITCH0,
+ [MDP_COMP_FG0] = MUTEX_MOD_IDX_MDP_FG0,
+ [MDP_COMP_FG1] = MUTEX_MOD_IDX_MDP_FG1,
+ [MDP_COMP_FG2] = MUTEX_MOD_IDX_MDP_FG2,
+ [MDP_COMP_FG3] = MUTEX_MOD_IDX_MDP_FG3,
+ [MDP_COMP_HDR0] = MUTEX_MOD_IDX_MDP_HDR0,
+ [MDP_COMP_HDR1] = MUTEX_MOD_IDX_MDP_HDR1,
+ [MDP_COMP_HDR2] = MUTEX_MOD_IDX_MDP_HDR2,
+ [MDP_COMP_HDR3] = MUTEX_MOD_IDX_MDP_HDR3,
+ [MDP_COMP_AAL0] = MUTEX_MOD_IDX_MDP_AAL0,
+ [MDP_COMP_AAL1] = MUTEX_MOD_IDX_MDP_AAL1,
+ [MDP_COMP_AAL2] = MUTEX_MOD_IDX_MDP_AAL2,
+ [MDP_COMP_AAL3] = MUTEX_MOD_IDX_MDP_AAL3,
+ [MDP_COMP_RSZ0] = MUTEX_MOD_IDX_MDP_RSZ0,
+ [MDP_COMP_RSZ1] = MUTEX_MOD_IDX_MDP_RSZ1,
+ [MDP_COMP_RSZ2] = MUTEX_MOD_IDX_MDP_RSZ2,
+ [MDP_COMP_RSZ3] = MUTEX_MOD_IDX_MDP_RSZ3,
+ [MDP_COMP_MERGE2] = MUTEX_MOD_IDX_MDP_MERGE2,
+ [MDP_COMP_MERGE3] = MUTEX_MOD_IDX_MDP_MERGE3,
+ [MDP_COMP_TDSHP0] = MUTEX_MOD_IDX_MDP_TDSHP0,
+ [MDP_COMP_TDSHP1] = MUTEX_MOD_IDX_MDP_TDSHP1,
+ [MDP_COMP_TDSHP2] = MUTEX_MOD_IDX_MDP_TDSHP2,
+ [MDP_COMP_TDSHP3] = MUTEX_MOD_IDX_MDP_TDSHP3,
+ [MDP_COMP_COLOR0] = MUTEX_MOD_IDX_MDP_COLOR0,
+ [MDP_COMP_COLOR1] = MUTEX_MOD_IDX_MDP_COLOR1,
+ [MDP_COMP_COLOR2] = MUTEX_MOD_IDX_MDP_COLOR2,
+ [MDP_COMP_COLOR3] = MUTEX_MOD_IDX_MDP_COLOR3,
+ [MDP_COMP_OVL0] = MUTEX_MOD_IDX_MDP_OVL0,
+ [MDP_COMP_OVL1] = MUTEX_MOD_IDX_MDP_OVL1,
+ [MDP_COMP_PAD0] = MUTEX_MOD_IDX_MDP_PAD0,
+ [MDP_COMP_PAD1] = MUTEX_MOD_IDX_MDP_PAD1,
+ [MDP_COMP_PAD2] = MUTEX_MOD_IDX_MDP_PAD2,
+ [MDP_COMP_PAD3] = MUTEX_MOD_IDX_MDP_PAD3,
+ [MDP_COMP_TCC0] = MUTEX_MOD_IDX_MDP_TCC0,
+ [MDP_COMP_TCC1] = MUTEX_MOD_IDX_MDP_TCC1,
+ [MDP_COMP_WROT0] = MUTEX_MOD_IDX_MDP_WROT0,
+ [MDP_COMP_WROT1] = MUTEX_MOD_IDX_MDP_WROT1,
+ [MDP_COMP_WROT2] = MUTEX_MOD_IDX_MDP_WROT2,
+ [MDP_COMP_WROT3] = MUTEX_MOD_IDX_MDP_WROT3,
+};
+
static const struct mdp_comp_data mt8183_mdp_comp_data[MDP_MAX_COMP_COUNT] = {
[MDP_COMP_WPEI] = {
- {MDP_COMP_TYPE_WPEI, 0, MT8183_MDP_COMP_WPEI},
+ {MDP_COMP_TYPE_WPEI, 0, MT8183_MDP_COMP_WPEI, MDP_MM_SUBSYS_0},
{0, 0, 0}
},
[MDP_COMP_WPEO] = {
- {MDP_COMP_TYPE_EXTO, 2, MT8183_MDP_COMP_WPEO},
+ {MDP_COMP_TYPE_EXTO, 2, MT8183_MDP_COMP_WPEO, MDP_MM_SUBSYS_0},
{0, 0, 0}
},
[MDP_COMP_WPEI2] = {
- {MDP_COMP_TYPE_WPEI, 1, MT8183_MDP_COMP_WPEI2},
+ {MDP_COMP_TYPE_WPEI, 1, MT8183_MDP_COMP_WPEI2, MDP_MM_SUBSYS_0},
{0, 0, 0}
},
[MDP_COMP_WPEO2] = {
- {MDP_COMP_TYPE_EXTO, 3, MT8183_MDP_COMP_WPEO2},
+ {MDP_COMP_TYPE_EXTO, 3, MT8183_MDP_COMP_WPEO2, MDP_MM_SUBSYS_0},
{0, 0, 0}
},
[MDP_COMP_ISP_IMGI] = {
- {MDP_COMP_TYPE_IMGI, 0, MT8183_MDP_COMP_ISP_IMGI},
+ {MDP_COMP_TYPE_IMGI, 0, MT8183_MDP_COMP_ISP_IMGI, MDP_MM_SUBSYS_0},
{0, 0, 4}
},
[MDP_COMP_ISP_IMGO] = {
- {MDP_COMP_TYPE_EXTO, 0, MT8183_MDP_COMP_ISP_IMGO},
+ {MDP_COMP_TYPE_EXTO, 0, MT8183_MDP_COMP_ISP_IMGO, MDP_MM_SUBSYS_0},
{0, 0, 4}
},
[MDP_COMP_ISP_IMG2O] = {
- {MDP_COMP_TYPE_EXTO, 1, MT8183_MDP_COMP_ISP_IMG2O},
+ {MDP_COMP_TYPE_EXTO, 1, MT8183_MDP_COMP_ISP_IMG2O, MDP_MM_SUBSYS_0},
{0, 0, 0}
},
[MDP_COMP_CAMIN] = {
- {MDP_COMP_TYPE_DL_PATH, 0, MT8183_MDP_COMP_CAMIN},
+ {MDP_COMP_TYPE_DL_PATH, 0, MT8183_MDP_COMP_CAMIN, MDP_MM_SUBSYS_0},
{2, 2, 1}
},
[MDP_COMP_CAMIN2] = {
- {MDP_COMP_TYPE_DL_PATH, 1, MT8183_MDP_COMP_CAMIN2},
+ {MDP_COMP_TYPE_DL_PATH, 1, MT8183_MDP_COMP_CAMIN2, MDP_MM_SUBSYS_0},
{2, 4, 1}
},
[MDP_COMP_RDMA0] = {
- {MDP_COMP_TYPE_RDMA, 0, MT8183_MDP_COMP_RDMA0},
+ {MDP_COMP_TYPE_RDMA, 0, MT8183_MDP_COMP_RDMA0, MDP_MM_SUBSYS_0},
{2, 0, 0}
},
[MDP_COMP_CCORR0] = {
- {MDP_COMP_TYPE_CCORR, 0, MT8183_MDP_COMP_CCORR0},
+ {MDP_COMP_TYPE_CCORR, 0, MT8183_MDP_COMP_CCORR0, MDP_MM_SUBSYS_0},
{1, 0, 0}
},
[MDP_COMP_RSZ0] = {
- {MDP_COMP_TYPE_RSZ, 0, MT8183_MDP_COMP_RSZ0},
+ {MDP_COMP_TYPE_RSZ, 0, MT8183_MDP_COMP_RSZ0, MDP_MM_SUBSYS_0},
{1, 0, 0}
},
[MDP_COMP_RSZ1] = {
- {MDP_COMP_TYPE_RSZ, 1, MT8183_MDP_COMP_RSZ1},
+ {MDP_COMP_TYPE_RSZ, 1, MT8183_MDP_COMP_RSZ1, MDP_MM_SUBSYS_0},
{1, 0, 0}
},
[MDP_COMP_TDSHP0] = {
- {MDP_COMP_TYPE_TDSHP, 0, MT8183_MDP_COMP_TDSHP0},
+ {MDP_COMP_TYPE_TDSHP, 0, MT8183_MDP_COMP_TDSHP0, MDP_MM_SUBSYS_0},
{0, 0, 0}
},
[MDP_COMP_PATH0_SOUT] = {
- {MDP_COMP_TYPE_PATH, 0, MT8183_MDP_COMP_PATH0_SOUT},
+ {MDP_COMP_TYPE_PATH, 0, MT8183_MDP_COMP_PATH0_SOUT, MDP_MM_SUBSYS_0},
{0, 0, 0}
},
[MDP_COMP_PATH1_SOUT] = {
- {MDP_COMP_TYPE_PATH, 1, MT8183_MDP_COMP_PATH1_SOUT},
+ {MDP_COMP_TYPE_PATH, 1, MT8183_MDP_COMP_PATH1_SOUT, MDP_MM_SUBSYS_0},
{0, 0, 0}
},
[MDP_COMP_WROT0] = {
- {MDP_COMP_TYPE_WROT, 0, MT8183_MDP_COMP_WROT0},
+ {MDP_COMP_TYPE_WROT, 0, MT8183_MDP_COMP_WROT0, MDP_MM_SUBSYS_0},
{1, 0, 0}
},
[MDP_COMP_WDMA] = {
- {MDP_COMP_TYPE_WDMA, 0, MT8183_MDP_COMP_WDMA},
+ {MDP_COMP_TYPE_WDMA, 0, MT8183_MDP_COMP_WDMA, MDP_MM_SUBSYS_0},
{1, 0, 0}
},
};
+static const struct mdp_comp_data mt8195_mdp_comp_data[MDP_MAX_COMP_COUNT] = {
+ [MDP_COMP_WPEI] = {
+ {MDP_COMP_TYPE_WPEI, 0, MT8195_MDP_COMP_WPEI, MDP_MM_SUBSYS_0},
+ {0, 0, 0}
+ },
+ [MDP_COMP_WPEO] = {
+ {MDP_COMP_TYPE_EXTO, 2, MT8195_MDP_COMP_WPEO, MDP_MM_SUBSYS_0},
+ {0, 0, 0}
+ },
+ [MDP_COMP_WPEI2] = {
+ {MDP_COMP_TYPE_WPEI, 1, MT8195_MDP_COMP_WPEI2, MDP_MM_SUBSYS_0},
+ {0, 0, 0}
+ },
+ [MDP_COMP_WPEO2] = {
+ {MDP_COMP_TYPE_EXTO, 3, MT8195_MDP_COMP_WPEO2, MDP_MM_SUBSYS_0},
+ {0, 0, 0}
+ },
+ [MDP_COMP_CAMIN] = {
+ {MDP_COMP_TYPE_DL_PATH, 0, MT8195_MDP_COMP_CAMIN, MDP_MM_SUBSYS_0},
+ {3, 3, 0}
+ },
+ [MDP_COMP_CAMIN2] = {
+ {MDP_COMP_TYPE_DL_PATH, 1, MT8195_MDP_COMP_CAMIN2, MDP_MM_SUBSYS_0},
+ {3, 6, 0}
+ },
+ [MDP_COMP_SPLIT] = {
+ {MDP_COMP_TYPE_SPLIT, 0, MT8195_MDP_COMP_SPLIT, MDP_MM_SUBSYS_1},
+ {7, 0, 0}
+ },
+ [MDP_COMP_SPLIT2] = {
+ {MDP_COMP_TYPE_SPLIT, 1, MT8195_MDP_COMP_SPLIT2, MDP_MM_SUBSYS_1},
+ {7, 0, 0}
+ },
+ [MDP_COMP_RDMA0] = {
+ {MDP_COMP_TYPE_RDMA, 0, MT8195_MDP_COMP_RDMA0, MDP_MM_SUBSYS_0},
+ {3, 0, 0}
+ },
+ [MDP_COMP_RDMA1] = {
+ {MDP_COMP_TYPE_RDMA, 1, MT8195_MDP_COMP_RDMA1, MDP_MM_SUBSYS_1},
+ {3, 0, 0}
+ },
+ [MDP_COMP_RDMA2] = {
+ {MDP_COMP_TYPE_RDMA, 2, MT8195_MDP_COMP_RDMA2, MDP_MM_SUBSYS_1},
+ {3, 0, 0}
+ },
+ [MDP_COMP_RDMA3] = {
+ {MDP_COMP_TYPE_RDMA, 3, MT8195_MDP_COMP_RDMA3, MDP_MM_SUBSYS_1},
+ {3, 0, 0}
+ },
+ [MDP_COMP_STITCH] = {
+ {MDP_COMP_TYPE_STITCH, 0, MT8195_MDP_COMP_STITCH, MDP_MM_SUBSYS_0},
+ {1, 0, 0}
+ },
+ [MDP_COMP_FG0] = {
+ {MDP_COMP_TYPE_FG, 0, MT8195_MDP_COMP_FG0, MDP_MM_SUBSYS_0},
+ {1, 0, 0}
+ },
+ [MDP_COMP_FG1] = {
+ {MDP_COMP_TYPE_FG, 1, MT8195_MDP_COMP_FG1, MDP_MM_SUBSYS_1},
+ {1, 0, 0}
+ },
+ [MDP_COMP_FG2] = {
+ {MDP_COMP_TYPE_FG, 2, MT8195_MDP_COMP_FG2, MDP_MM_SUBSYS_1},
+ {1, 0, 0}
+ },
+ [MDP_COMP_FG3] = {
+ {MDP_COMP_TYPE_FG, 3, MT8195_MDP_COMP_FG3, MDP_MM_SUBSYS_1},
+ {1, 0, 0}
+ },
+ [MDP_COMP_HDR0] = {
+ {MDP_COMP_TYPE_HDR, 0, MT8195_MDP_COMP_HDR0, MDP_MM_SUBSYS_0},
+ {1, 0, 0}
+ },
+ [MDP_COMP_HDR1] = {
+ {MDP_COMP_TYPE_HDR, 1, MT8195_MDP_COMP_HDR1, MDP_MM_SUBSYS_1},
+ {1, 0, 0}
+ },
+ [MDP_COMP_HDR2] = {
+ {MDP_COMP_TYPE_HDR, 2, MT8195_MDP_COMP_HDR2, MDP_MM_SUBSYS_1},
+ {1, 0, 0}
+ },
+ [MDP_COMP_HDR3] = {
+ {MDP_COMP_TYPE_HDR, 3, MT8195_MDP_COMP_HDR3, MDP_MM_SUBSYS_1},
+ {1, 0, 0}
+ },
+ [MDP_COMP_AAL0] = {
+ {MDP_COMP_TYPE_AAL, 0, MT8195_MDP_COMP_AAL0, MDP_MM_SUBSYS_0},
+ {1, 0, 0}
+ },
+ [MDP_COMP_AAL1] = {
+ {MDP_COMP_TYPE_AAL, 1, MT8195_MDP_COMP_AAL1, MDP_MM_SUBSYS_1},
+ {1, 0, 0}
+ },
+ [MDP_COMP_AAL2] = {
+ {MDP_COMP_TYPE_AAL, 2, MT8195_MDP_COMP_AAL2, MDP_MM_SUBSYS_1},
+ {1, 0, 0}
+ },
+ [MDP_COMP_AAL3] = {
+ {MDP_COMP_TYPE_AAL, 3, MT8195_MDP_COMP_AAL3, MDP_MM_SUBSYS_1},
+ {1, 0, 0}
+ },
+ [MDP_COMP_RSZ0] = {
+ {MDP_COMP_TYPE_RSZ, 0, MT8195_MDP_COMP_RSZ0, MDP_MM_SUBSYS_0},
+ {1, 0, 0}
+ },
+ [MDP_COMP_RSZ1] = {
+ {MDP_COMP_TYPE_RSZ, 1, MT8195_MDP_COMP_RSZ1, MDP_MM_SUBSYS_1},
+ {1, 0, 0}
+ },
+ [MDP_COMP_RSZ2] = {
+ {MDP_COMP_TYPE_RSZ, 2, MT8195_MDP_COMP_RSZ2, MDP_MM_SUBSYS_1},
+ {2, 0, 0},
+ {MDP_COMP_MERGE2, true, true}
+ },
+ [MDP_COMP_RSZ3] = {
+ {MDP_COMP_TYPE_RSZ, 3, MT8195_MDP_COMP_RSZ3, MDP_MM_SUBSYS_1},
+ {2, 0, 0},
+ {MDP_COMP_MERGE3, true, true}
+ },
+ [MDP_COMP_TDSHP0] = {
+ {MDP_COMP_TYPE_TDSHP, 0, MT8195_MDP_COMP_TDSHP0, MDP_MM_SUBSYS_0},
+ {1, 0, 0}
+ },
+ [MDP_COMP_TDSHP1] = {
+ {MDP_COMP_TYPE_TDSHP, 1, MT8195_MDP_COMP_TDSHP1, MDP_MM_SUBSYS_1},
+ {1, 0, 0}
+ },
+ [MDP_COMP_TDSHP2] = {
+ {MDP_COMP_TYPE_TDSHP, 2, MT8195_MDP_COMP_TDSHP2, MDP_MM_SUBSYS_1},
+ {1, 0, 0}
+ },
+ [MDP_COMP_TDSHP3] = {
+ {MDP_COMP_TYPE_TDSHP, 3, MT8195_MDP_COMP_TDSHP3, MDP_MM_SUBSYS_1},
+ {1, 0, 0}
+ },
+ [MDP_COMP_COLOR0] = {
+ {MDP_COMP_TYPE_COLOR, 0, MT8195_MDP_COMP_COLOR0, MDP_MM_SUBSYS_0},
+ {1, 0, 0}
+ },
+ [MDP_COMP_COLOR1] = {
+ {MDP_COMP_TYPE_COLOR, 1, MT8195_MDP_COMP_COLOR1, MDP_MM_SUBSYS_1},
+ {1, 0, 0}
+ },
+ [MDP_COMP_COLOR2] = {
+ {MDP_COMP_TYPE_COLOR, 2, MT8195_MDP_COMP_COLOR2, MDP_MM_SUBSYS_1},
+ {1, 0, 0}
+ },
+ [MDP_COMP_COLOR3] = {
+ {MDP_COMP_TYPE_COLOR, 3, MT8195_MDP_COMP_COLOR3, MDP_MM_SUBSYS_1},
+ {1, 0, 0}
+ },
+ [MDP_COMP_OVL0] = {
+ {MDP_COMP_TYPE_OVL, 0, MT8195_MDP_COMP_OVL0, MDP_MM_SUBSYS_0},
+ {1, 0, 0}
+ },
+ [MDP_COMP_OVL1] = {
+ {MDP_COMP_TYPE_OVL, 1, MT8195_MDP_COMP_OVL1, MDP_MM_SUBSYS_1},
+ {1, 0, 0}
+ },
+ [MDP_COMP_PAD0] = {
+ {MDP_COMP_TYPE_PAD, 0, MT8195_MDP_COMP_PAD0, MDP_MM_SUBSYS_0},
+ {1, 0, 0}
+ },
+ [MDP_COMP_PAD1] = {
+ {MDP_COMP_TYPE_PAD, 1, MT8195_MDP_COMP_PAD1, MDP_MM_SUBSYS_1},
+ {1, 0, 0}
+ },
+ [MDP_COMP_PAD2] = {
+ {MDP_COMP_TYPE_PAD, 2, MT8195_MDP_COMP_PAD2, MDP_MM_SUBSYS_1},
+ {1, 0, 0}
+ },
+ [MDP_COMP_PAD3] = {
+ {MDP_COMP_TYPE_PAD, 3, MT8195_MDP_COMP_PAD3, MDP_MM_SUBSYS_1},
+ {1, 0, 0}
+ },
+ [MDP_COMP_TCC0] = {
+ {MDP_COMP_TYPE_TCC, 0, MT8195_MDP_COMP_TCC0, MDP_MM_SUBSYS_0},
+ {1, 0, 0}
+ },
+ [MDP_COMP_TCC1] = {
+ {MDP_COMP_TYPE_TCC, 1, MT8195_MDP_COMP_TCC1, MDP_MM_SUBSYS_1},
+ {1, 0, 0}
+ },
+ [MDP_COMP_WROT0] = {
+ {MDP_COMP_TYPE_WROT, 0, MT8195_MDP_COMP_WROT0, MDP_MM_SUBSYS_0},
+ {1, 0, 0}
+ },
+ [MDP_COMP_WROT1] = {
+ {MDP_COMP_TYPE_WROT, 1, MT8195_MDP_COMP_WROT1, MDP_MM_SUBSYS_1},
+ {1, 0, 0}
+ },
+ [MDP_COMP_WROT2] = {
+ {MDP_COMP_TYPE_WROT, 2, MT8195_MDP_COMP_WROT2, MDP_MM_SUBSYS_1},
+ {1, 0, 0}
+ },
+ [MDP_COMP_WROT3] = {
+ {MDP_COMP_TYPE_WROT, 3, MT8195_MDP_COMP_WROT3, MDP_MM_SUBSYS_1},
+ {1, 0, 0}
+ },
+ [MDP_COMP_MERGE2] = {
+ {MDP_COMP_TYPE_MERGE, 0, MT8195_MDP_COMP_MERGE2, MDP_MM_SUBSYS_1},
+ {1, 0, 0}
+ },
+ [MDP_COMP_MERGE3] = {
+ {MDP_COMP_TYPE_MERGE, 1, MT8195_MDP_COMP_MERGE3, MDP_MM_SUBSYS_1},
+ {1, 0, 0}
+ },
+ [MDP_COMP_PQ0_SOUT] = {
+ {MDP_COMP_TYPE_DUMMY, 0, MT8195_MDP_COMP_PQ0_SOUT, MDP_MM_SUBSYS_0},
+ {0, 0, 0}
+ },
+ [MDP_COMP_PQ1_SOUT] = {
+ {MDP_COMP_TYPE_DUMMY, 1, MT8195_MDP_COMP_PQ1_SOUT, MDP_MM_SUBSYS_1},
+ {0, 0, 0}
+ },
+ [MDP_COMP_TO_WARP0MOUT] = {
+ {MDP_COMP_TYPE_DUMMY, 2, MT8195_MDP_COMP_TO_WARP0MOUT, MDP_MM_SUBSYS_0},
+ {0, 0, 0}
+ },
+ [MDP_COMP_TO_WARP1MOUT] = {
+ {MDP_COMP_TYPE_DUMMY, 3, MT8195_MDP_COMP_TO_WARP1MOUT, MDP_MM_SUBSYS_0},
+ {0, 0, 0}
+ },
+ [MDP_COMP_TO_SVPP2MOUT] = {
+ {MDP_COMP_TYPE_DUMMY, 4, MT8195_MDP_COMP_TO_SVPP2MOUT, MDP_MM_SUBSYS_1},
+ {0, 0, 0}
+ },
+ [MDP_COMP_TO_SVPP3MOUT] = {
+ {MDP_COMP_TYPE_DUMMY, 5, MT8195_MDP_COMP_TO_SVPP3MOUT, MDP_MM_SUBSYS_1},
+ {0, 0, 0}
+ },
+ [MDP_COMP_VPP0_SOUT] = {
+ {MDP_COMP_TYPE_PATH, 0, MT8195_MDP_COMP_VPP0_SOUT, MDP_MM_SUBSYS_1},
+ {4, 9, 0}
+ },
+ [MDP_COMP_VPP1_SOUT] = {
+ {MDP_COMP_TYPE_PATH, 1, MT8195_MDP_COMP_VPP1_SOUT, MDP_MM_SUBSYS_0},
+ {2, 13, 0}
+ },
+ [MDP_COMP_VDO0DL0] = {
+ {MDP_COMP_TYPE_DL_PATH, 0, MT8195_MDP_COMP_VDO0DL0, MDP_MM_SUBSYS_1},
+ {1, 15, 0}
+ },
+ [MDP_COMP_VDO1DL0] = {
+ {MDP_COMP_TYPE_DL_PATH, 0, MT8195_MDP_COMP_VDO1DL0, MDP_MM_SUBSYS_1},
+ {1, 17, 0}
+ },
+ [MDP_COMP_VDO0DL1] = {
+ {MDP_COMP_TYPE_DL_PATH, 0, MT8195_MDP_COMP_VDO0DL1, MDP_MM_SUBSYS_1},
+ {1, 18, 0}
+ },
+ [MDP_COMP_VDO1DL1] = {
+ {MDP_COMP_TYPE_DL_PATH, 0, MT8195_MDP_COMP_VDO1DL1, MDP_MM_SUBSYS_1},
+ {1, 16, 0}
+ },
+};
+
static const struct of_device_id mt8183_sub_comp_dt_ids[] = {
{
.compatible = "mediatek,mt8183-mdp3-wdma",
@@ -157,6 +556,10 @@ static const struct of_device_id mt8183_sub_comp_dt_ids[] = {
{}
};
+static const struct of_device_id mt8195_sub_comp_dt_ids[] = {
+ {}
+};
+
/*
* All 10-bit related formats are not added in the basic format list,
* please add the corresponding format settings before use.
@@ -382,6 +785,222 @@ static const struct mdp_format mt8183_formats[] = {
}
};
+static const struct mdp_format mt8195_formats[] = {
+ {
+ .pixelformat = V4L2_PIX_FMT_GREY,
+ .mdp_color = MDP_COLOR_GREY,
+ .depth = { 8 },
+ .row_depth = { 8 },
+ .num_planes = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_RGB565X,
+ .mdp_color = MDP_COLOR_BGR565,
+ .depth = { 16 },
+ .row_depth = { 16 },
+ .num_planes = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ .mdp_color = MDP_COLOR_RGB565,
+ .depth = { 16 },
+ .row_depth = { 16 },
+ .num_planes = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_RGB24,
+ .mdp_color = MDP_COLOR_RGB888,
+ .depth = { 24 },
+ .row_depth = { 24 },
+ .num_planes = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_BGR24,
+ .mdp_color = MDP_COLOR_BGR888,
+ .depth = { 24 },
+ .row_depth = { 24 },
+ .num_planes = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_ABGR32,
+ .mdp_color = MDP_COLOR_BGRA8888,
+ .depth = { 32 },
+ .row_depth = { 32 },
+ .num_planes = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_ARGB32,
+ .mdp_color = MDP_COLOR_ARGB8888,
+ .depth = { 32 },
+ .row_depth = { 32 },
+ .num_planes = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ .mdp_color = MDP_COLOR_UYVY,
+ .depth = { 16 },
+ .row_depth = { 16 },
+ .num_planes = 1,
+ .walign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_VYUY,
+ .mdp_color = MDP_COLOR_VYUY,
+ .depth = { 16 },
+ .row_depth = { 16 },
+ .num_planes = 1,
+ .walign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ .mdp_color = MDP_COLOR_YUYV,
+ .depth = { 16 },
+ .row_depth = { 16 },
+ .num_planes = 1,
+ .walign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YVYU,
+ .mdp_color = MDP_COLOR_YVYU,
+ .depth = { 16 },
+ .row_depth = { 16 },
+ .num_planes = 1,
+ .walign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YUV420,
+ .mdp_color = MDP_COLOR_I420,
+ .depth = { 12 },
+ .row_depth = { 8 },
+ .num_planes = 1,
+ .walign = 1,
+ .halign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YVU420,
+ .mdp_color = MDP_COLOR_YV12,
+ .depth = { 12 },
+ .row_depth = { 8 },
+ .num_planes = 1,
+ .walign = 1,
+ .halign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ .mdp_color = MDP_COLOR_NV12,
+ .depth = { 12 },
+ .row_depth = { 8 },
+ .num_planes = 1,
+ .walign = 1,
+ .halign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV21,
+ .mdp_color = MDP_COLOR_NV21,
+ .depth = { 12 },
+ .row_depth = { 8 },
+ .num_planes = 1,
+ .walign = 1,
+ .halign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV16,
+ .mdp_color = MDP_COLOR_NV16,
+ .depth = { 16 },
+ .row_depth = { 8 },
+ .num_planes = 1,
+ .walign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV61,
+ .mdp_color = MDP_COLOR_NV61,
+ .depth = { 16 },
+ .row_depth = { 8 },
+ .num_planes = 1,
+ .walign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV12M,
+ .mdp_color = MDP_COLOR_NV12,
+ .depth = { 8, 4 },
+ .row_depth = { 8, 8 },
+ .num_planes = 2,
+ .walign = 1,
+ .halign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_MM21,
+ .mdp_color = MDP_COLOR_420_BLK,
+ .depth = { 8, 4 },
+ .row_depth = { 8, 8 },
+ .num_planes = 2,
+ .walign = 6,
+ .halign = 6,
+ .flags = MDP_FMT_FLAG_OUTPUT,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV21M,
+ .mdp_color = MDP_COLOR_NV21,
+ .depth = { 8, 4 },
+ .row_depth = { 8, 8 },
+ .num_planes = 2,
+ .walign = 1,
+ .halign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV16M,
+ .mdp_color = MDP_COLOR_NV16,
+ .depth = { 8, 8 },
+ .row_depth = { 8, 8 },
+ .num_planes = 2,
+ .walign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV61M,
+ .mdp_color = MDP_COLOR_NV61,
+ .depth = { 8, 8 },
+ .row_depth = { 8, 8 },
+ .num_planes = 2,
+ .walign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YUV420M,
+ .mdp_color = MDP_COLOR_I420,
+ .depth = { 8, 2, 2 },
+ .row_depth = { 8, 4, 4 },
+ .num_planes = 3,
+ .walign = 1,
+ .halign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YVU420M,
+ .mdp_color = MDP_COLOR_YV12,
+ .depth = { 8, 2, 2 },
+ .row_depth = { 8, 4, 4 },
+ .num_planes = 3,
+ .walign = 1,
+ .halign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YUV422M,
+ .mdp_color = MDP_COLOR_I422,
+ .depth = { 8, 4, 4 },
+ .row_depth = { 8, 4, 4 },
+ .num_planes = 3,
+ .walign = 1,
+ .halign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YVU422M,
+ .mdp_color = MDP_COLOR_YV16,
+ .depth = { 8, 4, 4 },
+ .row_depth = { 8, 4, 4 },
+ .num_planes = 3,
+ .walign = 1,
+ .halign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }
+};
+
static const struct mdp_limit mt8183_mdp_def_limit = {
.out_limit = {
.wmin = 16,
@@ -401,15 +1020,54 @@ static const struct mdp_limit mt8183_mdp_def_limit = {
.v_scale_down_max = 128,
};
+static const struct mdp_limit mt8195_mdp_def_limit = {
+ .out_limit = {
+ .wmin = 64,
+ .hmin = 64,
+ .wmax = 8192,
+ .hmax = 8192,
+ },
+ .cap_limit = {
+ .wmin = 64,
+ .hmin = 64,
+ .wmax = 8192,
+ .hmax = 8192,
+ },
+ .h_scale_up_max = 64,
+ .v_scale_up_max = 64,
+ .h_scale_down_max = 128,
+ .v_scale_down_max = 128,
+};
+
static const struct mdp_pipe_info mt8183_pipe_info[] = {
- [MDP_PIPE_WPEI] = {MDP_PIPE_WPEI, 0},
- [MDP_PIPE_WPEI2] = {MDP_PIPE_WPEI2, 1},
- [MDP_PIPE_IMGI] = {MDP_PIPE_IMGI, 2},
- [MDP_PIPE_RDMA0] = {MDP_PIPE_RDMA0, 3}
+ [MDP_PIPE_WPEI] = {MDP_PIPE_WPEI, MDP_MM_SUBSYS_0, 0},
+ [MDP_PIPE_WPEI2] = {MDP_PIPE_WPEI2, MDP_MM_SUBSYS_0, 1},
+ [MDP_PIPE_IMGI] = {MDP_PIPE_IMGI, MDP_MM_SUBSYS_0, 2},
+ [MDP_PIPE_RDMA0] = {MDP_PIPE_RDMA0, MDP_MM_SUBSYS_0, 3}
+};
+
+static const struct mdp_pipe_info mt8195_pipe_info[] = {
+ [MDP_PIPE_WPEI] = {MDP_PIPE_WPEI, MDP_MM_SUBSYS_0, 0},
+ [MDP_PIPE_WPEI2] = {MDP_PIPE_WPEI2, MDP_MM_SUBSYS_0, 1},
+ [MDP_PIPE_IMGI] = {MDP_PIPE_IMGI, MDP_MM_SUBSYS_0, 2},
+ [MDP_PIPE_RDMA0] = {MDP_PIPE_RDMA0, MDP_MM_SUBSYS_0, 3},
+ [MDP_PIPE_RDMA1] = {MDP_PIPE_RDMA1, MDP_MM_SUBSYS_1, 0},
+ [MDP_PIPE_RDMA2] = {MDP_PIPE_RDMA2, MDP_MM_SUBSYS_1, 1},
+ [MDP_PIPE_RDMA3] = {MDP_PIPE_RDMA3, MDP_MM_SUBSYS_1, 2},
+ [MDP_PIPE_SPLIT] = {MDP_PIPE_SPLIT, MDP_MM_SUBSYS_1, 3},
+ [MDP_PIPE_SPLIT2] = {MDP_PIPE_SPLIT2, MDP_MM_SUBSYS_1, 4},
+ [MDP_PIPE_VPP1_SOUT] = {MDP_PIPE_VPP1_SOUT, MDP_MM_SUBSYS_0, 4},
+ [MDP_PIPE_VPP0_SOUT] = {MDP_PIPE_VPP0_SOUT, MDP_MM_SUBSYS_1, 5},
+};
+
+static const struct v4l2_rect mt8195_mdp_pp_criteria = {
+ .width = 1920,
+ .height = 1080,
};
const struct mtk_mdp_driver_data mt8183_mdp_driver_data = {
.mdp_plat_id = MT8183,
+ .mdp_con_res = 0x14001000,
.mdp_probe_infra = mt8183_mdp_probe_infra,
.mdp_cfg = &mt8183_plat_cfg,
.mdp_mutex_table_idx = mt8183_mutex_idx,
@@ -421,6 +1079,25 @@ const struct mtk_mdp_driver_data mt8183_mdp_driver_data = {
.def_limit = &mt8183_mdp_def_limit,
.pipe_info = mt8183_pipe_info,
.pipe_info_len = ARRAY_SIZE(mt8183_pipe_info),
+ .pp_used = MDP_PP_USED_1,
+};
+
+const struct mtk_mdp_driver_data mt8195_mdp_driver_data = {
+ .mdp_plat_id = MT8195,
+ .mdp_con_res = 0x14001000,
+ .mdp_probe_infra = mt8195_mdp_probe_infra,
+ .mdp_sub_comp_dt_ids = mt8195_sub_comp_dt_ids,
+ .mdp_cfg = &mt8195_plat_cfg,
+ .mdp_mutex_table_idx = mt8195_mutex_idx,
+ .comp_data = mt8195_mdp_comp_data,
+ .comp_data_len = ARRAY_SIZE(mt8195_mdp_comp_data),
+ .format = mt8195_formats,
+ .format_len = ARRAY_SIZE(mt8195_formats),
+ .def_limit = &mt8195_mdp_def_limit,
+ .pipe_info = mt8195_pipe_info,
+ .pipe_info_len = ARRAY_SIZE(mt8195_pipe_info),
+ .pp_criteria = &mt8195_mdp_pp_criteria,
+ .pp_used = MDP_PP_USED_2,
};
s32 mdp_cfg_get_id_inner(struct mdp_dev *mdp_dev, enum mtk_mdp_comp_id id)
@@ -451,3 +1128,11 @@ enum mtk_mdp_comp_id mdp_cfg_get_id_public(struct mdp_dev *mdp_dev, s32 inner_id
err_public_id:
return public_id;
}
+
+bool mdp_cfg_comp_is_dummy(struct mdp_dev *mdp_dev, s32 inner_id)
+{
+ enum mtk_mdp_comp_id id = mdp_cfg_get_id_public(mdp_dev, inner_id);
+ enum mdp_comp_type type = mdp_dev->mdp_data->comp_data[id].match.type;
+
+ return (type == MDP_COMP_TYPE_DUMMY);
+}
diff --git a/drivers/media/platform/mediatek/mdp3/mdp_reg_aal.h b/drivers/media/platform/mediatek/mdp3/mdp_reg_aal.h
new file mode 100644
index 0000000000000..4b9513e545432
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mdp_reg_aal.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MDP_REG_AAL_H__
+#define __MDP_REG_AAL_H__
+
+#define MDP_AAL_EN (0x000)
+#define MDP_AAL_CFG (0x020)
+#define MDP_AAL_SIZE (0x030)
+#define MDP_AAL_OUTPUT_SIZE (0x034)
+#define MDP_AAL_OUTPUT_OFFSET (0x038)
+#define MDP_AAL_CFG_MAIN (0x200)
+
+/* MASK */
+#define MDP_AAL_EN_MASK (0x01)
+#define MDP_AAL_CFG_MASK (0x70FF00B3)
+#define MDP_AAL_SIZE_MASK (0x1FFF1FFF)
+#define MDP_AAL_OUTPUT_SIZE_MASK (0x1FFF1FFF)
+#define MDP_AAL_OUTPUT_OFFSET_MASK (0x0FF00FF)
+#define MDP_AAL_CFG_MAIN_MASK (0x0FE)
+
+#endif // __MDP_REG_AAL_H__
diff --git a/drivers/media/platform/mediatek/mdp3/mdp_reg_color.h b/drivers/media/platform/mediatek/mdp3/mdp_reg_color.h
new file mode 100644
index 0000000000000..f72503975b240
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mdp_reg_color.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MDP_REG_COLOR_H__
+#define __MDP_REG_COLOR_H__
+
+#define MDP_COLOR_WIN_X_MAIN (0x40C)
+#define MDP_COLOR_WIN_Y_MAIN (0x410)
+#define MDP_COLOR_START (0xC00)
+#define MDP_COLOR_INTEN (0xC04)
+#define MDP_COLOR_OUT_SEL (0xC0C)
+#define MDP_COLOR_INTERNAL_IP_WIDTH (0xC50)
+#define MDP_COLOR_INTERNAL_IP_HEIGHT (0xC54)
+#define MDP_COLOR_CM1_EN (0xC60)
+#define MDP_COLOR_CM2_EN (0xCA0)
+
+/* MASK */
+#define MDP_COLOR_WIN_X_MAIN_MASK (0xFFFFFFFF)
+#define MDP_COLOR_WIN_Y_MAIN_MASK (0xFFFFFFFF)
+#define MDP_COLOR_START_MASK (0x0FF013F)
+#define MDP_COLOR_INTEN_MASK (0x07)
+#define MDP_COLOR_OUT_SEL_MASK (0x0777)
+#define MDP_COLOR_INTERNAL_IP_WIDTH_MASK (0x03FFF)
+#define MDP_COLOR_INTERNAL_IP_HEIGHT_MASK (0x03FFF)
+#define MDP_COLOR_CM1_EN_MASK (0x03)
+#define MDP_COLOR_CM2_EN_MASK (0x017)
+
+#endif // __MDP_REG_COLOR_H__
diff --git a/drivers/media/platform/mediatek/mdp3/mdp_reg_fg.h b/drivers/media/platform/mediatek/mdp3/mdp_reg_fg.h
new file mode 100644
index 0000000000000..d90bcad33a59c
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mdp_reg_fg.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MDP_REG_FG_H__
+#define __MDP_REG_FG_H__
+
+#define MDP_FG_TRIGGER (0x0)
+#define MDP_FG_FG_CTRL_0 (0x20)
+#define MDP_FG_FG_CK_EN (0x24)
+#define MDP_FG_TILE_INFO_0 (0x418)
+#define MDP_FG_TILE_INFO_1 (0x41c)
+
+/* MASK */
+#define MDP_FG_TRIGGER_MASK (0x00000007)
+#define MDP_FG_FG_CTRL_0_MASK (0x00000033)
+#define MDP_FG_FG_CK_EN_MASK (0x0000000F)
+#define MDP_FG_TILE_INFO_0_MASK (0xFFFFFFFF)
+#define MDP_FG_TILE_INFO_1_MASK (0xFFFFFFFF)
+
+#endif //__MDP_REG_FG_H__
diff --git a/drivers/media/platform/mediatek/mdp3/mdp_reg_hdr.h b/drivers/media/platform/mediatek/mdp3/mdp_reg_hdr.h
new file mode 100644
index 0000000000000..c19fbba39fc07
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mdp_reg_hdr.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MDP_REG_HDR_H__
+#define __MDP_REG_HDR_H__
+
+#define MDP_HDR_TOP (0x000)
+#define MDP_HDR_RELAY (0x004)
+#define MDP_HDR_SIZE_0 (0x014)
+#define MDP_HDR_SIZE_1 (0x018)
+#define MDP_HDR_SIZE_2 (0x01C)
+#define MDP_HDR_HIST_CTRL_0 (0x020)
+#define MDP_HDR_HIST_CTRL_1 (0x024)
+#define MDP_HDR_HIST_ADDR (0x0DC)
+#define MDP_HDR_TILE_POS (0x118)
+
+/* MASK */
+#define MDP_HDR_RELAY_MASK (0x01)
+#define MDP_HDR_TOP_MASK (0xFF0FEB6D)
+#define MDP_HDR_SIZE_0_MASK (0x1FFF1FFF)
+#define MDP_HDR_SIZE_1_MASK (0x1FFF1FFF)
+#define MDP_HDR_SIZE_2_MASK (0x1FFF1FFF)
+#define MDP_HDR_HIST_CTRL_0_MASK (0x1FFF1FFF)
+#define MDP_HDR_HIST_CTRL_1_MASK (0x1FFF1FFF)
+#define MDP_HDR_HIST_ADDR_MASK (0xBF3F2F3F)
+#define MDP_HDR_TILE_POS_MASK (0x1FFF1FFF)
+
+#endif // __MDP_REG_HDR_H__
diff --git a/drivers/media/platform/mediatek/mdp3/mdp_reg_merge.h b/drivers/media/platform/mediatek/mdp3/mdp_reg_merge.h
new file mode 100644
index 0000000000000..46be27e2a6568
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mdp_reg_merge.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MDP_REG_MERGE_H__
+#define __MDP_REG_MERGE_H__
+
+#define MDP_MERGE_ENABLE (0x000)
+#define MDP_MERGE_CFG_0 (0x010)
+#define MDP_MERGE_CFG_4 (0x020)
+#define MDP_MERGE_CFG_12 (0x040)
+#define MDP_MERGE_CFG_24 (0x070)
+#define MDP_MERGE_CFG_25 (0x074)
+
+/* MASK */
+#define MDP_MERGE_ENABLE_MASK (0xFFFFFFFF)
+#define MDP_MERGE_CFG_0_MASK (0xFFFFFFFF)
+#define MDP_MERGE_CFG_4_MASK (0xFFFFFFFF)
+#define MDP_MERGE_CFG_12_MASK (0xFFFFFFFF)
+#define MDP_MERGE_CFG_24_MASK (0xFFFFFFFF)
+#define MDP_MERGE_CFG_25_MASK (0xFFFFFFFF)
+
+#endif //__MDP_REG_MERGE_H__
diff --git a/drivers/media/platform/mediatek/mdp3/mdp_reg_ovl.h b/drivers/media/platform/mediatek/mdp3/mdp_reg_ovl.h
new file mode 100644
index 0000000000000..21d2d03232937
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mdp_reg_ovl.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MDP_REG_OVL_H__
+#define __MDP_REG_OVL_H__
+
+#define MDP_OVL_EN (0x00c)
+#define MDP_OVL_ROI_SIZE (0x020)
+#define MDP_OVL_DP_CON (0x024)
+#define MDP_OVL_SRC_CON (0x02c)
+#define MDP_OVL_L0_CON (0x030)
+#define MDP_OVL_L0_SRC_SIZE (0x038)
+
+/* MASK */
+#define MDP_OVL_DP_CON_MASK (0x0FFFFFFF)
+#define MDP_OVL_EN_MASK (0xB07D07B1)
+#define MDP_OVL_L0_CON_MASK (0xFFFFFFFF)
+#define MDP_OVL_L0_SRC_SIZE_MASK (0x1FFF1FFF)
+#define MDP_OVL_ROI_SIZE_MASK (0x1FFF1FFF)
+#define MDP_OVL_SRC_CON_MASK (0x0000031F)
+
+#endif //__MDP_REG_OVL_H__
diff --git a/drivers/media/platform/mediatek/mdp3/mdp_reg_pad.h b/drivers/media/platform/mediatek/mdp3/mdp_reg_pad.h
new file mode 100644
index 0000000000000..0e89f1db19edb
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mdp_reg_pad.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MDP_REG_PAD_H__
+#define __MDP_REG_PAD_H__
+
+#define MDP_PAD_CON (0x000)
+#define MDP_PAD_PIC_SIZE (0x004)
+#define MDP_PAD_W_SIZE (0x008)
+#define MDP_PAD_H_SIZE (0x00c)
+
+/* MASK */
+#define MDP_PAD_CON_MASK (0x00000007)
+#define MDP_PAD_PIC_SIZE_MASK (0xFFFFFFFF)
+#define MDP_PAD_W_SIZE_MASK (0x1FFF1FFF)
+#define MDP_PAD_H_SIZE_MASK (0x1FFF1FFF)
+
+#endif // __MDP_REG_PAD_H__
diff --git a/drivers/media/platform/mediatek/mdp3/mdp_reg_rdma.h b/drivers/media/platform/mediatek/mdp3/mdp_reg_rdma.h
index be4065e252d3d..0affb2a3b958a 100644
--- a/drivers/media/platform/mediatek/mdp3/mdp_reg_rdma.h
+++ b/drivers/media/platform/mediatek/mdp3/mdp_reg_rdma.h
@@ -26,6 +26,18 @@
#define MDP_RDMA_SRC_OFFSET_2 0x128
#define MDP_RDMA_SRC_OFFSET_0_P 0x148
#define MDP_RDMA_TRANSFORM_0 0x200
+#define MDP_RDMA_DMABUF_CON_0 0x240
+#define MDP_RDMA_ULTRA_TH_HIGH_CON_0 0x248
+#define MDP_RDMA_ULTRA_TH_LOW_CON_0 0x250
+#define MDP_RDMA_DMABUF_CON_1 0x258
+#define MDP_RDMA_ULTRA_TH_HIGH_CON_1 0x260
+#define MDP_RDMA_ULTRA_TH_LOW_CON_1 0x268
+#define MDP_RDMA_DMABUF_CON_2 0x270
+#define MDP_RDMA_ULTRA_TH_HIGH_CON_2 0x278
+#define MDP_RDMA_ULTRA_TH_LOW_CON_2 0x280
+#define MDP_RDMA_DMABUF_CON_3 0x288
+#define MDP_RDMA_ULTRA_TH_HIGH_CON_3 0x290
+#define MDP_RDMA_ULTRA_TH_LOW_CON_3 0x298
#define MDP_RDMA_RESV_DUMMY_0 0x2a0
#define MDP_RDMA_MON_STA_1 0x408
#define MDP_RDMA_SRC_BASE_0 0xf00
@@ -54,6 +66,18 @@
#define MDP_RDMA_SRC_OFFSET_2_MASK 0xffffffff
#define MDP_RDMA_SRC_OFFSET_0_P_MASK 0xffffffff
#define MDP_RDMA_TRANSFORM_0_MASK 0xff110777
+#define MDP_RDMA_DMABUF_CON_0_MASK 0x0fff00ff
+#define MDP_RDMA_ULTRA_TH_HIGH_CON_0_MASK 0x3fffffff
+#define MDP_RDMA_ULTRA_TH_LOW_CON_0_MASK 0x3fffffff
+#define MDP_RDMA_DMABUF_CON_1_MASK 0x0f7f007f
+#define MDP_RDMA_ULTRA_TH_HIGH_CON_1_MASK 0x3fffffff
+#define MDP_RDMA_ULTRA_TH_LOW_CON_1_MASK 0x3fffffff
+#define MDP_RDMA_DMABUF_CON_2_MASK 0x0f3f003f
+#define MDP_RDMA_ULTRA_TH_HIGH_CON_2_MASK 0x3fffffff
+#define MDP_RDMA_ULTRA_TH_LOW_CON_2_MASK 0x3fffffff
+#define MDP_RDMA_DMABUF_CON_3_MASK 0x0f3f003f
+#define MDP_RDMA_ULTRA_TH_HIGH_CON_3_MASK 0x3fffffff
+#define MDP_RDMA_ULTRA_TH_LOW_CON_3_MASK 0x3fffffff
#define MDP_RDMA_RESV_DUMMY_0_MASK 0xffffffff
#define MDP_RDMA_MON_STA_1_MASK 0xffffffff
#define MDP_RDMA_SRC_BASE_0_MASK 0xffffffff
diff --git a/drivers/media/platform/mediatek/mdp3/mdp_reg_rsz.h b/drivers/media/platform/mediatek/mdp3/mdp_reg_rsz.h
index 484f6d60641ff..187531db8e3bd 100644
--- a/drivers/media/platform/mediatek/mdp3/mdp_reg_rsz.h
+++ b/drivers/media/platform/mediatek/mdp3/mdp_reg_rsz.h
@@ -20,6 +20,7 @@
#define PRZ_LUMA_VERTICAL_SUBPIXEL_OFFSET 0x02c
#define PRZ_CHROMA_HORIZONTAL_INTEGER_OFFSET 0x030
#define PRZ_CHROMA_HORIZONTAL_SUBPIXEL_OFFSET 0x034
+#define RSZ_ETC_CONTROL 0x22c
/* MASK */
#define PRZ_ENABLE_MASK 0x00010001
@@ -35,5 +36,6 @@
#define PRZ_LUMA_VERTICAL_SUBPIXEL_OFFSET_MASK 0x001fffff
#define PRZ_CHROMA_HORIZONTAL_INTEGER_OFFSET_MASK 0x0000ffff
#define PRZ_CHROMA_HORIZONTAL_SUBPIXEL_OFFSET_MASK 0x001fffff
+#define RSZ_ETC_CONTROL_MASK 0xff770000
#endif // __MDP_REG_RSZ_H__
diff --git a/drivers/media/platform/mediatek/mdp3/mdp_reg_tdshp.h b/drivers/media/platform/mediatek/mdp3/mdp_reg_tdshp.h
new file mode 100644
index 0000000000000..83b5f9b432d84
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mdp_reg_tdshp.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MDP_REG_TDSHP_H__
+#define __MDP_REG_TDSHP_H__
+
+#define MDP_HIST_CFG_00 (0x064)
+#define MDP_HIST_CFG_01 (0x068)
+#define MDP_TDSHP_CTRL (0x100)
+#define MDP_TDSHP_CFG (0x110)
+#define MDP_TDSHP_INPUT_SIZE (0x120)
+#define MDP_TDSHP_OUTPUT_OFFSET (0x124)
+#define MDP_TDSHP_OUTPUT_SIZE (0x128)
+#define MDP_LUMA_HIST_INIT (0x200)
+#define MDP_DC_TWO_D_W1_RESULT_INIT (0x260)
+#define MDP_CONTOUR_HIST_INIT (0x398)
+
+/* MASK */
+#define MDP_HIST_CFG_00_MASK (0xFFFFFFFF)
+#define MDP_HIST_CFG_01_MASK (0xFFFFFFFF)
+#define MDP_LUMA_HIST_MASK (0xFFFFFFFF)
+#define MDP_TDSHP_CTRL_MASK (0x07)
+#define MDP_TDSHP_CFG_MASK (0x03F7)
+#define MDP_TDSHP_INPUT_SIZE_MASK (0x1FFF1FFF)
+#define MDP_TDSHP_OUTPUT_OFFSET_MASK (0x0FF00FF)
+#define MDP_TDSHP_OUTPUT_SIZE_MASK (0x1FFF1FFF)
+#define MDP_LUMA_HIST_INIT_MASK (0xFFFFFFFF)
+#define MDP_DC_TWO_D_W1_RESULT_INIT_MASK (0x007FFFFF)
+#define MDP_CONTOUR_HIST_INIT_MASK (0xFFFFFFFF)
+
+#endif // __MDP_REG_TDSHP_H__
diff --git a/drivers/media/platform/mediatek/mdp3/mdp_reg_wrot.h b/drivers/media/platform/mediatek/mdp3/mdp_reg_wrot.h
index 6d3ff0e2b6720..b6f016d2c29da 100644
--- a/drivers/media/platform/mediatek/mdp3/mdp_reg_wrot.h
+++ b/drivers/media/platform/mediatek/mdp3/mdp_reg_wrot.h
@@ -17,14 +17,18 @@
#define VIDO_STRIDE 0x030
#define VIDO_OFST_ADDR_C 0x038
#define VIDO_STRIDE_C 0x03c
+#define VIDO_CTRL_2 0x048
#define VIDO_DITHER 0x054
#define VIDO_STRIDE_V 0x06c
#define VIDO_OFST_ADDR_V 0x068
#define VIDO_RSV_1 0x070
+#define VIDO_DMA_PREULTRA 0x074
#define VIDO_IN_SIZE 0x078
#define VIDO_ROT_EN 0x07c
#define VIDO_FIFO_TEST 0x080
#define VIDO_MAT_CTRL 0x084
+#define VIDO_SCAN_10BIT 0x0dc
+#define VIDO_PENDING_ZERO 0x0e0
#define VIDO_BASE_ADDR 0xf00
#define VIDO_BASE_ADDR_C 0xf04
#define VIDO_BASE_ADDR_V 0xf08
@@ -40,14 +44,18 @@
#define VIDO_STRIDE_MASK 0x0000ffff
#define VIDO_OFST_ADDR_C_MASK 0x0fffffff
#define VIDO_STRIDE_C_MASK 0x0000ffff
+#define VIDO_CTRL_2_MASK 0x0000000f
#define VIDO_DITHER_MASK 0xff000001
#define VIDO_STRIDE_V_MASK 0x0000ffff
#define VIDO_OFST_ADDR_V_MASK 0x0fffffff
#define VIDO_RSV_1_MASK 0xffffffff
+#define VIDO_DMA_PREULTRA_MASK 0x00ffffff
#define VIDO_IN_SIZE_MASK 0x1fff1fff
#define VIDO_ROT_EN_MASK 0x00000001
#define VIDO_FIFO_TEST_MASK 0x00000fff
#define VIDO_MAT_CTRL_MASK 0x000000f3
+#define VIDO_SCAN_10BIT_MASK 0x0000000f
+#define VIDO_PENDING_ZERO_MASK 0x07ffffff
#define VIDO_BASE_ADDR_MASK 0xffffffff
#define VIDO_BASE_ADDR_C_MASK 0xffffffff
#define VIDO_BASE_ADDR_V_MASK 0xffffffff
diff --git a/drivers/media/platform/mediatek/mdp3/mdp_sm_mt8195.h b/drivers/media/platform/mediatek/mdp3/mdp_sm_mt8195.h
new file mode 100644
index 0000000000000..b09f48222d243
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mdp_sm_mt8195.h
@@ -0,0 +1,283 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MDP_SM_MT8195_H__
+#define __MDP_SM_MT8195_H__
+
+#include "mtk-mdp3-type.h"
+
+/*
+ * ISP-MDP generic output information
+ * MD5 of the target SCP prebuild:
+ * a49ec487e458b5971880f1b63dc2a9d5
+ */
+
+#define IMG_MAX_SUBFRAMES_8195 20
+
+struct img_comp_frame_8195 {
+ u32 output_disable;
+ u32 bypass;
+ u32 in_width;
+ u32 in_height;
+ u32 out_width;
+ u32 out_height;
+ struct img_crop crop;
+ u32 in_total_width;
+ u32 out_total_width;
+} __packed;
+
+struct img_comp_subfrm_8195 {
+ u32 tile_disable;
+ struct img_region in;
+ struct img_region out;
+ struct img_offset luma;
+ struct img_offset chroma;
+ s32 out_vertical; /* Output vertical index */
+ s32 out_horizontal; /* Output horizontal index */
+} __packed;
+
+struct mdp_rdma_subfrm_8195 {
+ u32 offset[IMG_MAX_PLANES];
+ u32 offset_0_p;
+ u32 src;
+ u32 clip;
+ u32 clip_ofst;
+ u32 in_tile_xleft;
+ u32 in_tile_ytop;
+} __packed;
+
+struct mdp_rdma_data_8195 {
+ u32 src_ctrl;
+ u32 comp_ctrl;
+ u32 control;
+ u32 iova[IMG_MAX_PLANES];
+ u32 iova_end[IMG_MAX_PLANES];
+ u32 mf_bkgd;
+ u32 mf_bkgd_in_pxl;
+ u32 sf_bkgd;
+ u32 ufo_dec_y;
+ u32 ufo_dec_c;
+ u32 transform;
+ u32 dmabuf_con0;
+ u32 ultra_th_high_con0;
+ u32 ultra_th_low_con0;
+ u32 dmabuf_con1;
+ u32 ultra_th_high_con1;
+ u32 ultra_th_low_con1;
+ u32 dmabuf_con2;
+ u32 ultra_th_high_con2;
+ u32 ultra_th_low_con2;
+ u32 dmabuf_con3;
+ struct mdp_rdma_subfrm_8195 subfrms[IMG_MAX_SUBFRAMES_8195];
+} __packed;
+
+struct mdp_fg_subfrm_8195 {
+ u32 info_0;
+ u32 info_1;
+} __packed;
+
+struct mdp_fg_data_8195 {
+ u32 ctrl_0;
+ u32 ck_en;
+ struct mdp_fg_subfrm_8195 subfrms[IMG_MAX_SUBFRAMES_8195];
+} __packed;
+
+struct mdp_hdr_subfrm_8195 {
+ u32 win_size;
+ u32 src;
+ u32 clip_ofst0;
+ u32 clip_ofst1;
+ u32 hist_ctrl_0;
+ u32 hist_ctrl_1;
+ u32 hdr_top;
+ u32 hist_addr;
+} __packed;
+
+struct mdp_hdr_data_8195 {
+ u32 top;
+ u32 relay;
+ struct mdp_hdr_subfrm_8195 subfrms[IMG_MAX_SUBFRAMES_8195];
+} __packed;
+
+struct mdp_aal_subfrm_8195 {
+ u32 src;
+ u32 clip;
+ u32 clip_ofst;
+} __packed;
+
+struct mdp_aal_data_8195 {
+ u32 cfg_main;
+ u32 cfg;
+ struct mdp_aal_subfrm_8195 subfrms[IMG_MAX_SUBFRAMES_8195];
+} __packed;
+
+struct mdp_rsz_subfrm_8195 {
+ u32 control2;
+ u32 src;
+ u32 clip;
+ u32 hdmirx_en;
+ u32 luma_h_int_ofst;
+ u32 luma_h_sub_ofst;
+ u32 luma_v_int_ofst;
+ u32 luma_v_sub_ofst;
+ u32 chroma_h_int_ofst;
+ u32 chroma_h_sub_ofst;
+ u32 rsz_switch;
+ u32 merge_cfg;
+} __packed;
+
+struct mdp_rsz_data_8195 {
+ u32 coeff_step_x;
+ u32 coeff_step_y;
+ u32 control1;
+ u32 control2;
+ u32 etc_control;
+ u32 prz_enable;
+ u32 ibse_softclip;
+ u32 tap_adapt;
+ u32 ibse_gaincontrol1;
+ u32 ibse_gaincontrol2;
+ u32 ibse_ylevel_1;
+ u32 ibse_ylevel_2;
+ u32 ibse_ylevel_3;
+ u32 ibse_ylevel_4;
+ u32 ibse_ylevel_5;
+ struct mdp_rsz_subfrm_8195 subfrms[IMG_MAX_SUBFRAMES_8195];
+} __packed;
+
+struct mdp_tdshp_subfrm_8195 {
+ u32 src;
+ u32 clip;
+ u32 clip_ofst;
+ u32 hist_cfg_0;
+ u32 hist_cfg_1;
+} __packed;
+
+struct mdp_tdshp_data_8195 {
+ u32 cfg;
+ struct mdp_tdshp_subfrm_8195 subfrms[IMG_MAX_SUBFRAMES_8195];
+} __packed;
+
+struct mdp_color_subfrm_8195 {
+ u32 in_hsize;
+ u32 in_vsize;
+} __packed;
+
+struct mdp_color_data_8195 {
+ u32 start;
+ struct mdp_color_subfrm_8195 subfrms[IMG_MAX_SUBFRAMES_8195];
+} __packed;
+
+struct mdp_ovl_subfrm_8195 {
+ u32 L0_src_size;
+ u32 roi_size;
+} __packed;
+
+struct mdp_ovl_data_8195 {
+ u32 L0_con;
+ u32 src_con;
+ struct mdp_ovl_subfrm_8195 subfrms[IMG_MAX_SUBFRAMES_8195];
+} __packed;
+
+struct mdp_pad_subfrm_8195 {
+ u32 pic_size;
+} __packed;
+
+struct mdp_pad_data_8195 {
+ struct mdp_pad_subfrm_8195 subfrms[IMG_MAX_SUBFRAMES_8195];
+} __packed;
+
+struct mdp_tcc_subfrm_8195 {
+ u32 pic_size;
+} __packed;
+
+struct mdp_tcc_data_8195 {
+ struct mdp_tcc_subfrm_8195 subfrms[IMG_MAX_SUBFRAMES_8195];
+} __packed;
+
+struct mdp_wrot_subfrm_8195 {
+ u32 offset[IMG_MAX_PLANES];
+ u32 src;
+ u32 clip;
+ u32 clip_ofst;
+ u32 main_buf;
+} __packed;
+
+struct mdp_wrot_data_8195 {
+ u32 iova[IMG_MAX_PLANES];
+ u32 control;
+ u32 stride[IMG_MAX_PLANES];
+ u32 mat_ctrl;
+ u32 fifo_test;
+ u32 filter;
+ u32 pre_ultra;
+ u32 framesize;
+ u32 afbc_yuvtrans;
+ u32 scan_10bit;
+ u32 pending_zero;
+ u32 bit_number;
+ u32 pvric;
+ u32 vpp02vpp1;
+ struct mdp_wrot_subfrm_8195 subfrms[IMG_MAX_SUBFRAMES_8195];
+} __packed;
+
+struct mdp_wdma_subfrm_8195 {
+ u32 offset[IMG_MAX_PLANES];
+ u32 src;
+ u32 clip;
+ u32 clip_ofst;
+} __packed;
+
+struct mdp_wdma_data_8195 {
+ u32 wdma_cfg;
+ u32 iova[IMG_MAX_PLANES];
+ u32 w_in_byte;
+ u32 uv_stride;
+ struct mdp_wdma_subfrm_8195 subfrms[IMG_MAX_SUBFRAMES_8195];
+} __packed;
+
+struct isp_data_8195 {
+ u64 dl_flags; /* 1 << (enum mdp_comp_type) */
+ u32 smxi_iova[4];
+ u32 cq_idx;
+ u32 cq_iova;
+ u32 tpipe_iova[IMG_MAX_SUBFRAMES_8195];
+} __packed;
+
+struct img_compparam_8195 {
+ u32 type; /* enum mdp_comp_id */
+ u32 id; /* engine alias_id */
+ u32 input;
+ u32 outputs[IMG_MAX_HW_OUTPUTS];
+ u32 num_outputs;
+ struct img_comp_frame_8195 frame;
+ struct img_comp_subfrm_8195 subfrms[IMG_MAX_SUBFRAMES_8195];
+ u32 num_subfrms;
+ union {
+ struct mdp_rdma_data_8195 rdma;
+ struct mdp_fg_data_8195 fg;
+ struct mdp_hdr_data_8195 hdr;
+ struct mdp_aal_data_8195 aal;
+ struct mdp_rsz_data_8195 rsz;
+ struct mdp_tdshp_data_8195 tdshp;
+ struct mdp_color_data_8195 color;
+ struct mdp_ovl_data_8195 ovl;
+ struct mdp_pad_data_8195 pad;
+ struct mdp_tcc_data_8195 tcc;
+ struct mdp_wrot_data_8195 wrot;
+ struct mdp_wdma_data_8195 wdma;
+ struct isp_data_8195 isp;
+ };
+} __packed;
+
+struct img_config_8195 {
+ struct img_compparam_8195 components[IMG_MAX_COMPONENTS];
+ u32 num_components;
+ struct img_mmsys_ctrl ctrls[IMG_MAX_SUBFRAMES_8195];
+ u32 num_subfrms;
+} __packed;
+
+#endif /* __MDP_SM_MT8195_H__ */
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-img-ipi.h b/drivers/media/platform/mediatek/mdp3/mtk-img-ipi.h
index 22b8b9a10ef7f..f83ac408306ee 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-img-ipi.h
+++ b/drivers/media/platform/mediatek/mdp3/mtk-img-ipi.h
@@ -10,6 +10,7 @@
#include <linux/err.h>
#include "mdp_sm_mt8183.h"
+#include "mdp_sm_mt8195.h"
#include "mtk-mdp3-type.h"
/* ISP-MDP generic input information */
@@ -115,6 +116,7 @@ struct img_frameparam {
/* Platform config indicator */
#define MT8183 8183
+#define MT8195 8195
#define CFG_CHECK(plat, p_id) ((plat) == (p_id))
@@ -137,12 +139,14 @@ struct img_frameparam {
struct img_config {
union {
struct img_config_8183 config_8183;
+ struct img_config_8195 config_8195;
};
} __packed;
struct img_compparam {
union {
struct img_compparam_8183 comp_8183;
+ struct img_compparam_8195 comp_8195;
};
} __packed;
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cfg.h b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cfg.h
index dee57cc4a954f..49cdf45f6e59e 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cfg.h
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cfg.h
@@ -10,11 +10,13 @@
#include <linux/types.h>
extern const struct mtk_mdp_driver_data mt8183_mdp_driver_data;
+extern const struct mtk_mdp_driver_data mt8195_mdp_driver_data;
struct mdp_dev;
enum mtk_mdp_comp_id;
s32 mdp_cfg_get_id_inner(struct mdp_dev *mdp_dev, enum mtk_mdp_comp_id id);
enum mtk_mdp_comp_id mdp_cfg_get_id_public(struct mdp_dev *mdp_dev, s32 id);
+bool mdp_cfg_comp_is_dummy(struct mdp_dev *mdp_dev, s32 inner_id);
#endif /* __MTK_MDP3_CFG_H__ */
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
index 6adac857a4779..1d64bac34b90a 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
@@ -6,6 +6,7 @@
#include <linux/mailbox_controller.h>
#include <linux/platform_device.h>
+#include "mtk-mdp3-cfg.h"
#include "mtk-mdp3-cmdq.h"
#include "mtk-mdp3-comp.h"
#include "mtk-mdp3-core.h"
@@ -39,85 +40,192 @@ static bool is_output_disabled(int p_id, const struct img_compparam *param, u32
num = CFG_COMP(MT8183, param, num_subfrms);
dis_output = CFG_COMP(MT8183, param, frame.output_disable);
dis_tile = CFG_COMP(MT8183, param, frame.output_disable);
+ } else if (CFG_CHECK(MT8195, p_id)) {
+ num = CFG_COMP(MT8195, param, num_subfrms);
+ dis_output = CFG_COMP(MT8195, param, frame.output_disable);
+ dis_tile = CFG_COMP(MT8195, param, frame.output_disable);
}
return (count < num) ? (dis_output || dis_tile) : true;
}
-static int mdp_path_subfrm_require(const struct mdp_path *path,
- struct mdp_cmdq_cmd *cmd,
- s32 *mutex_id, u32 count)
+static struct mtk_mutex *__get_mutex(const struct mdp_dev *mdp_dev,
+ const struct mdp_pipe_info *p)
{
- const int p_id = path->mdp_dev->mdp_data->mdp_plat_id;
- const struct mdp_comp_ctx *ctx;
- const struct mtk_mdp_driver_data *data = path->mdp_dev->mdp_data;
- struct device *dev = &path->mdp_dev->pdev->dev;
- struct mtk_mutex **mutex = path->mdp_dev->mdp_mutex;
- int id, index;
- u32 num_comp = 0;
+ return mdp_dev->mm_subsys[p->sub_id].mdp_mutex[p->mutex_id];
+}
- if (CFG_CHECK(MT8183, p_id))
- num_comp = CFG_GET(MT8183, path->config, num_components);
+static u8 __get_pp_num(enum mdp_stream_type type)
+{
+ switch (type) {
+ case MDP_STREAM_TYPE_DUAL_BITBLT:
+ return MDP_PP_USED_2;
+ default:
+ return MDP_PP_USED_1;
+ }
+}
- /* Decide which mutex to use based on the current pipeline */
- switch (path->comps[0].comp->public_id) {
+static enum mdp_pipe_id __get_pipe(const struct mdp_dev *mdp_dev,
+ enum mtk_mdp_comp_id id)
+{
+ enum mdp_pipe_id pipe_id;
+
+ switch (id) {
case MDP_COMP_RDMA0:
- index = MDP_PIPE_RDMA0;
+ pipe_id = MDP_PIPE_RDMA0;
break;
case MDP_COMP_ISP_IMGI:
- index = MDP_PIPE_IMGI;
+ pipe_id = MDP_PIPE_IMGI;
break;
case MDP_COMP_WPEI:
- index = MDP_PIPE_WPEI;
+ pipe_id = MDP_PIPE_WPEI;
break;
case MDP_COMP_WPEI2:
- index = MDP_PIPE_WPEI2;
+ pipe_id = MDP_PIPE_WPEI2;
+ break;
+ case MDP_COMP_RDMA1:
+ pipe_id = MDP_PIPE_RDMA1;
+ break;
+ case MDP_COMP_RDMA2:
+ pipe_id = MDP_PIPE_RDMA2;
+ break;
+ case MDP_COMP_RDMA3:
+ pipe_id = MDP_PIPE_RDMA3;
break;
default:
- dev_err(dev, "Unknown pipeline and no mutex is assigned");
- return -EINVAL;
+ /* Avoid exceptions when operating MUTEX */
+ pipe_id = MDP_PIPE_RDMA0;
+ dev_err(&mdp_dev->pdev->dev, "Unknown pipeline id %d", id);
+ break;
+ }
+
+ return pipe_id;
+}
+
+static struct img_config *__get_config_offset(struct mdp_dev *mdp,
+ struct mdp_cmdq_param *param,
+ u8 pp_idx)
+{
+ const int p_id = mdp->mdp_data->mdp_plat_id;
+ struct device *dev = &mdp->pdev->dev;
+ void *cfg_c, *cfg_n;
+ long bound = mdp->vpu.config_size;
+
+ if (pp_idx >= mdp->mdp_data->pp_used)
+ goto err_param;
+
+ if (CFG_CHECK(MT8183, p_id))
+ cfg_c = CFG_OFST(MT8183, param->config, pp_idx);
+ else if (CFG_CHECK(MT8195, p_id))
+ cfg_c = CFG_OFST(MT8195, param->config, pp_idx);
+ else
+ goto err_param;
+
+ if (CFG_CHECK(MT8183, p_id))
+ cfg_n = CFG_OFST(MT8183, param->config, pp_idx + 1);
+ else if (CFG_CHECK(MT8195, p_id))
+ cfg_n = CFG_OFST(MT8195, param->config, pp_idx + 1);
+ else
+ goto err_param;
+
+ if ((long)cfg_n - (long)mdp->vpu.config > bound) {
+ dev_err(dev, "config offset %ld OOB %ld\n", (long)cfg_n, bound);
+ cfg_c = ERR_PTR(-EFAULT);
}
- *mutex_id = data->pipe_info[index].mutex_id;
+
+ return (struct img_config *)cfg_c;
+
+err_param:
+ cfg_c = ERR_PTR(-EINVAL);
+ return (struct img_config *)cfg_c;
+}
+
+static int mdp_path_subfrm_require(const struct mdp_path *path,
+ struct mdp_cmdq_cmd *cmd,
+ struct mdp_pipe_info *p, u32 count)
+{
+ const int p_id = path->mdp_dev->mdp_data->mdp_plat_id;
+ const struct mdp_comp_ctx *ctx;
+ const struct mtk_mdp_driver_data *data = path->mdp_dev->mdp_data;
+ struct mtk_mutex *mutex;
+ int id, index;
+ u32 num_comp = 0;
+
+ if (CFG_CHECK(MT8183, p_id))
+ num_comp = CFG_GET(MT8183, path->config, num_components);
+ else if (CFG_CHECK(MT8195, p_id))
+ num_comp = CFG_GET(MT8195, path->config, num_components);
+
+ /* Decide which mutex to use based on the current pipeline */
+ index = __get_pipe(path->mdp_dev, path->comps[0].comp->public_id);
+ memcpy(p, &data->pipe_info[index], sizeof(struct mdp_pipe_info));
+ mutex = __get_mutex(path->mdp_dev, p);
/* Set mutex mod */
for (index = 0; index < num_comp; index++) {
+ s32 inner_id = MDP_COMP_NONE;
+ const u32 *mutex_idx;
+ const struct mdp_comp_blend *b;
+
+ if (CFG_CHECK(MT8183, p_id))
+ inner_id = CFG_GET(MT8183, path->config, components[index].type);
+ else if (CFG_CHECK(MT8195, p_id))
+ inner_id = CFG_GET(MT8195, path->config, components[index].type);
+
+ if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id))
+ continue;
+
ctx = &path->comps[index];
if (is_output_disabled(p_id, ctx->param, count))
continue;
+
+ mutex_idx = data->mdp_mutex_table_idx;
id = ctx->comp->public_id;
- mtk_mutex_write_mod(mutex[*mutex_id],
- data->mdp_mutex_table_idx[id], false);
+ mtk_mutex_write_mod(mutex, mutex_idx[id], false);
+
+ b = &data->comp_data[id].blend;
+ if (b && b->aid_mod)
+ mtk_mutex_write_mod(mutex, mutex_idx[b->b_id], false);
}
- mtk_mutex_write_sof(mutex[*mutex_id],
- MUTEX_SOF_IDX_SINGLE_MODE);
+ mtk_mutex_write_sof(mutex, MUTEX_SOF_IDX_SINGLE_MODE);
return 0;
}
static int mdp_path_subfrm_run(const struct mdp_path *path,
struct mdp_cmdq_cmd *cmd,
- s32 *mutex_id, u32 count)
+ struct mdp_pipe_info *p, u32 count)
{
const int p_id = path->mdp_dev->mdp_data->mdp_plat_id;
const struct mdp_comp_ctx *ctx;
struct device *dev = &path->mdp_dev->pdev->dev;
- struct mtk_mutex **mutex = path->mdp_dev->mdp_mutex;
+ struct mtk_mutex *mutex;
int index;
u32 num_comp = 0;
s32 event;
+ s32 inner_id = MDP_COMP_NONE;
- if (-1 == *mutex_id) {
+ if (-1 == p->mutex_id) {
dev_err(dev, "Incorrect mutex id");
return -EINVAL;
}
if (CFG_CHECK(MT8183, p_id))
num_comp = CFG_GET(MT8183, path->config, num_components);
+ else if (CFG_CHECK(MT8195, p_id))
+ num_comp = CFG_GET(MT8195, path->config, num_components);
/* Wait WROT SRAM shared to DISP RDMA */
/* Clear SOF event for each engine */
for (index = 0; index < num_comp; index++) {
+ if (CFG_CHECK(MT8183, p_id))
+ inner_id = CFG_GET(MT8183, path->config, components[index].type);
+ else if (CFG_CHECK(MT8195, p_id))
+ inner_id = CFG_GET(MT8195, path->config, components[index].type);
+
+ if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id))
+ continue;
ctx = &path->comps[index];
if (is_output_disabled(p_id, ctx->param, count))
continue;
@@ -127,10 +235,18 @@ static int mdp_path_subfrm_run(const struct mdp_path *path,
}
/* Enable the mutex */
- mtk_mutex_enable_by_cmdq(mutex[*mutex_id], (void *)&cmd->pkt);
+ mutex = __get_mutex(path->mdp_dev, p);
+ mtk_mutex_enable_by_cmdq(mutex, (void *)&cmd->pkt);
/* Wait SOF events and clear mutex modules (optional) */
for (index = 0; index < num_comp; index++) {
+ if (CFG_CHECK(MT8183, p_id))
+ inner_id = CFG_GET(MT8183, path->config, components[index].type);
+ else if (CFG_CHECK(MT8195, p_id))
+ inner_id = CFG_GET(MT8195, path->config, components[index].type);
+
+ if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id))
+ continue;
ctx = &path->comps[index];
if (is_output_disabled(p_id, ctx->param, count))
continue;
@@ -151,13 +267,26 @@ static int mdp_path_ctx_init(struct mdp_dev *mdp, struct mdp_path *path)
if (CFG_CHECK(MT8183, p_id))
num_comp = CFG_GET(MT8183, path->config, num_components);
+ else if (CFG_CHECK(MT8195, p_id))
+ num_comp = CFG_GET(MT8195, path->config, num_components);
if (num_comp < 1)
return -EINVAL;
for (index = 0; index < num_comp; index++) {
+ s32 inner_id = MDP_COMP_NONE;
+
+ if (CFG_CHECK(MT8183, p_id))
+ inner_id = CFG_GET(MT8183, path->config, components[index].type);
+ else if (CFG_CHECK(MT8195, p_id))
+ inner_id = CFG_GET(MT8195, path->config, components[index].type);
+
+ if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id))
+ continue;
if (CFG_CHECK(MT8183, p_id))
param = (void *)CFG_ADDR(MT8183, path->config, components[index]);
+ else if (CFG_CHECK(MT8195, p_id))
+ param = (void *)CFG_ADDR(MT8195, path->config, components[index]);
ret = mdp_comp_ctx_config(mdp, &path->comps[index],
param, path->param);
if (ret)
@@ -174,18 +303,23 @@ static int mdp_path_config_subfrm(struct mdp_cmdq_cmd *cmd,
const struct img_mmsys_ctrl *ctrl = NULL;
const struct img_mux *set;
struct mdp_comp_ctx *ctx;
- s32 mutex_id;
+ struct mdp_pipe_info pipe;
int index, ret;
u32 num_comp = 0;
+ s32 inner_id = MDP_COMP_NONE;
if (CFG_CHECK(MT8183, p_id))
num_comp = CFG_GET(MT8183, path->config, num_components);
+ else if (CFG_CHECK(MT8195, p_id))
+ num_comp = CFG_GET(MT8195, path->config, num_components);
if (CFG_CHECK(MT8183, p_id))
ctrl = CFG_ADDR(MT8183, path->config, ctrls[count]);
+ else if (CFG_CHECK(MT8195, p_id))
+ ctrl = CFG_ADDR(MT8195, path->config, ctrls[count]);
/* Acquire components */
- ret = mdp_path_subfrm_require(path, cmd, &mutex_id, count);
+ ret = mdp_path_subfrm_require(path, cmd, &pipe, count);
if (ret)
return ret;
/* Enable mux settings */
@@ -196,6 +330,13 @@ static int mdp_path_config_subfrm(struct mdp_cmdq_cmd *cmd,
}
/* Config sub-frame information */
for (index = (num_comp - 1); index >= 0; index--) {
+ if (CFG_CHECK(MT8183, p_id))
+ inner_id = CFG_GET(MT8183, path->config, components[index].type);
+ else if (CFG_CHECK(MT8195, p_id))
+ inner_id = CFG_GET(MT8195, path->config, components[index].type);
+
+ if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id))
+ continue;
ctx = &path->comps[index];
if (is_output_disabled(p_id, ctx->param, count))
continue;
@@ -204,11 +345,18 @@ static int mdp_path_config_subfrm(struct mdp_cmdq_cmd *cmd,
return ret;
}
/* Run components */
- ret = mdp_path_subfrm_run(path, cmd, &mutex_id, count);
+ ret = mdp_path_subfrm_run(path, cmd, &pipe, count);
if (ret)
return ret;
/* Wait components done */
for (index = 0; index < num_comp; index++) {
+ if (CFG_CHECK(MT8183, p_id))
+ inner_id = CFG_GET(MT8183, path->config, components[index].type);
+ else if (CFG_CHECK(MT8195, p_id))
+ inner_id = CFG_GET(MT8195, path->config, components[index].type);
+
+ if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id))
+ continue;
ctx = &path->comps[index];
if (is_output_disabled(p_id, ctx->param, count))
continue;
@@ -218,6 +366,13 @@ static int mdp_path_config_subfrm(struct mdp_cmdq_cmd *cmd,
}
/* Advance to the next sub-frame */
for (index = 0; index < num_comp; index++) {
+ if (CFG_CHECK(MT8183, p_id))
+ inner_id = CFG_GET(MT8183, path->config, components[index].type);
+ else if (CFG_CHECK(MT8195, p_id))
+ inner_id = CFG_GET(MT8195, path->config, components[index].type);
+
+ if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id))
+ continue;
ctx = &path->comps[index];
ret = call_op(ctx, advance_subfrm, cmd, count);
if (ret)
@@ -241,16 +396,28 @@ static int mdp_path_config(struct mdp_dev *mdp, struct mdp_cmdq_cmd *cmd,
int index, count, ret;
u32 num_comp = 0;
u32 num_sub = 0;
+ s32 inner_id = MDP_COMP_NONE;
if (CFG_CHECK(MT8183, p_id))
num_comp = CFG_GET(MT8183, path->config, num_components);
+ else if (CFG_CHECK(MT8195, p_id))
+ num_comp = CFG_GET(MT8195, path->config, num_components);
if (CFG_CHECK(MT8183, p_id))
num_sub = CFG_GET(MT8183, path->config, num_subfrms);
+ else if (CFG_CHECK(MT8195, p_id))
+ num_sub = CFG_GET(MT8195, path->config, num_subfrms);
/* Config path frame */
/* Reset components */
for (index = 0; index < num_comp; index++) {
+ if (CFG_CHECK(MT8183, p_id))
+ inner_id = CFG_GET(MT8183, path->config, components[index].type);
+ else if (CFG_CHECK(MT8195, p_id))
+ inner_id = CFG_GET(MT8195, path->config, components[index].type);
+
+ if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id))
+ continue;
ctx = &path->comps[index];
ret = call_op(ctx, init_comp, cmd);
if (ret)
@@ -263,7 +430,17 @@ static int mdp_path_config(struct mdp_dev *mdp, struct mdp_cmdq_cmd *cmd,
ctx = &path->comps[index];
if (CFG_CHECK(MT8183, p_id))
+ inner_id = CFG_GET(MT8183, path->config, components[index].type);
+ else if (CFG_CHECK(MT8195, p_id))
+ inner_id = CFG_GET(MT8195, path->config, components[index].type);
+
+ if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id))
+ continue;
+
+ if (CFG_CHECK(MT8183, p_id))
out = CFG_COMP(MT8183, ctx->param, outputs[0]);
+ else if (CFG_CHECK(MT8195, p_id))
+ out = CFG_COMP(MT8195, ctx->param, outputs[0]);
compose = path->composes[out];
ret = call_op(ctx, config_frame, cmd, compose);
@@ -279,6 +456,13 @@ static int mdp_path_config(struct mdp_dev *mdp, struct mdp_cmdq_cmd *cmd,
}
/* Post processing information */
for (index = 0; index < num_comp; index++) {
+ if (CFG_CHECK(MT8183, p_id))
+ inner_id = CFG_GET(MT8183, path->config, components[index].type);
+ else if (CFG_CHECK(MT8195, p_id))
+ inner_id = CFG_GET(MT8195, path->config, components[index].type);
+
+ if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id))
+ continue;
ctx = &path->comps[index];
ret = call_op(ctx, post_process, cmd);
if (ret)
@@ -328,18 +512,31 @@ static void mdp_auto_release_work(struct work_struct *work)
{
struct mdp_cmdq_cmd *cmd;
struct mdp_dev *mdp;
- int id;
+ struct mtk_mutex *mutex;
+ enum mdp_pipe_id pipe_id;
cmd = container_of(work, struct mdp_cmdq_cmd, auto_release_work);
mdp = cmd->mdp;
- id = mdp->mdp_data->pipe_info[MDP_PIPE_RDMA0].mutex_id;
- mtk_mutex_unprepare(mdp->mdp_mutex[id]);
+ pipe_id = __get_pipe(mdp, cmd->comps[0].public_id);
+ mutex = __get_mutex(mdp, &mdp->mdp_data->pipe_info[pipe_id]);
+ mtk_mutex_unprepare(mutex);
mdp_comp_clocks_off(&mdp->pdev->dev, cmd->comps,
cmd->num_comps);
- atomic_dec(&mdp->job_count);
- wake_up(&mdp->callback_wq);
+ if (atomic_dec_and_test(&mdp->job_count)) {
+ if (cmd->mdp_ctx)
+ mdp_m2m_job_finish(cmd->mdp_ctx);
+
+ if (cmd->user_cmdq_cb) {
+ struct cmdq_cb_data user_cb_data;
+
+ user_cb_data.sta = cmd->data->sta;
+ user_cb_data.pkt = cmd->data->pkt;
+ cmd->user_cmdq_cb(user_cb_data);
+ }
+ wake_up(&mdp->callback_wq);
+ }
mdp_cmdq_pkt_destroy(&cmd->pkt);
kfree(cmd->comps);
@@ -354,7 +551,7 @@ static void mdp_handle_cmdq_callback(struct mbox_client *cl, void *mssg)
struct cmdq_cb_data *data;
struct mdp_dev *mdp;
struct device *dev;
- int id;
+ enum mdp_pipe_id pipe_id;
if (!mssg) {
pr_info("%s:no callback data\n", __func__);
@@ -363,30 +560,23 @@ static void mdp_handle_cmdq_callback(struct mbox_client *cl, void *mssg)
data = (struct cmdq_cb_data *)mssg;
cmd = container_of(data->pkt, struct mdp_cmdq_cmd, pkt);
+ cmd->data = data;
mdp = cmd->mdp;
dev = &mdp->pdev->dev;
- if (cmd->mdp_ctx)
- mdp_m2m_job_finish(cmd->mdp_ctx);
-
- if (cmd->user_cmdq_cb) {
- struct cmdq_cb_data user_cb_data;
-
- user_cb_data.sta = data->sta;
- user_cb_data.pkt = data->pkt;
- cmd->user_cmdq_cb(user_cb_data);
- }
-
INIT_WORK(&cmd->auto_release_work, mdp_auto_release_work);
if (!queue_work(mdp->clock_wq, &cmd->auto_release_work)) {
+ struct mtk_mutex *mutex;
+
dev_err(dev, "%s:queue_work fail!\n", __func__);
- id = mdp->mdp_data->pipe_info[MDP_PIPE_RDMA0].mutex_id;
- mtk_mutex_unprepare(mdp->mdp_mutex[id]);
+ pipe_id = __get_pipe(mdp, cmd->comps[0].public_id);
+ mutex = __get_mutex(mdp, &mdp->mdp_data->pipe_info[pipe_id]);
+ mtk_mutex_unprepare(mutex);
mdp_comp_clocks_off(&mdp->pdev->dev, cmd->comps,
cmd->num_comps);
- atomic_dec(&mdp->job_count);
- wake_up(&mdp->callback_wq);
+ if (atomic_dec_and_test(&mdp->job_count))
+ wake_up(&mdp->callback_wq);
mdp_cmdq_pkt_destroy(&cmd->pkt);
kfree(cmd->comps);
@@ -396,34 +586,48 @@ static void mdp_handle_cmdq_callback(struct mbox_client *cl, void *mssg)
}
}
-int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param)
+static struct mdp_cmdq_cmd *mdp_cmdq_prepare(struct mdp_dev *mdp,
+ struct mdp_cmdq_param *param,
+ u8 pp_idx)
{
struct mdp_path *path = NULL;
struct mdp_cmdq_cmd *cmd = NULL;
struct mdp_comp *comps = NULL;
struct device *dev = &mdp->pdev->dev;
const int p_id = mdp->mdp_data->mdp_plat_id;
- int i, ret;
- u32 num_comp = 0;
-
- atomic_inc(&mdp->job_count);
- if (atomic_read(&mdp->suspended)) {
- atomic_dec(&mdp->job_count);
- return -ECANCELED;
+ struct img_config *config;
+ struct mtk_mutex *mutex = NULL;
+ enum mdp_pipe_id pipe_id;
+ int i, ret = -ECANCELED;
+ u32 num_comp;
+
+ config = __get_config_offset(mdp, param, pp_idx);
+ if (IS_ERR(config)) {
+ ret = PTR_ERR(config);
+ goto err_uninit;
}
+ if (CFG_CHECK(MT8183, p_id))
+ num_comp = CFG_GET(MT8183, config, num_components);
+ else if (CFG_CHECK(MT8195, p_id))
+ num_comp = CFG_GET(MT8195, config, num_components);
+ else
+ goto err_uninit;
+
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
- goto err_cancel_job;
+ goto err_uninit;
}
- ret = mdp_cmdq_pkt_create(mdp->cmdq_clt, &cmd->pkt, SZ_16K);
+ ret = mdp_cmdq_pkt_create(mdp->cmdq_clt[pp_idx], &cmd->pkt, SZ_16K);
if (ret)
goto err_free_cmd;
if (CFG_CHECK(MT8183, p_id)) {
num_comp = CFG_GET(MT8183, param->config, num_components);
+ } else if (CFG_CHECK(MT8195, p_id)) {
+ num_comp = CFG_GET(MT8195, param->config, num_components);
} else {
ret = -EINVAL;
goto err_destroy_pkt;
@@ -440,15 +644,8 @@ int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param)
goto err_free_comps;
}
- i = mdp->mdp_data->pipe_info[MDP_PIPE_RDMA0].mutex_id;
- ret = mtk_mutex_prepare(mdp->mdp_mutex[i]);
- if (ret) {
- dev_err(dev, "Fail to enable mutex clk\n");
- goto err_free_path;
- }
-
path->mdp_dev = mdp;
- path->config = param->config;
+ path->config = config;
path->param = param->param;
for (i = 0; i < param->param->num_outputs; i++) {
path->bounds[i].left = 0;
@@ -462,22 +659,40 @@ int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param)
}
ret = mdp_path_ctx_init(mdp, path);
if (ret) {
- dev_err(dev, "mdp_path_ctx_init error\n");
+ dev_err(dev, "mdp_path_ctx_init error %d\n", pp_idx);
+ goto err_free_path;
+ }
+
+ pipe_id = __get_pipe(mdp, path->comps[0].comp->public_id);
+ mutex = __get_mutex(mdp, &mdp->mdp_data->pipe_info[pipe_id]);
+ ret = mtk_mutex_prepare(mutex);
+ if (ret) {
+ dev_err(dev, "Fail to enable mutex %d clk\n", pp_idx);
goto err_free_path;
}
ret = mdp_path_config(mdp, cmd, path);
if (ret) {
- dev_err(dev, "mdp_path_config error\n");
+ dev_err(dev, "mdp_path_config error %d\n", pp_idx);
goto err_free_path;
}
cmdq_pkt_finalize(&cmd->pkt);
- for (i = 0; i < num_comp; i++)
+ for (i = 0; i < num_comp; i++) {
+ s32 inner_id = MDP_COMP_NONE;
+
+ if (CFG_CHECK(MT8183, p_id))
+ inner_id = CFG_GET(MT8183, path->config, components[i].type);
+ else if (CFG_CHECK(MT8195, p_id))
+ inner_id = CFG_GET(MT8195, path->config, components[i].type);
+
+ if (mdp_cfg_comp_is_dummy(mdp, inner_id))
+ continue;
memcpy(&comps[i], path->comps[i].comp,
sizeof(struct mdp_comp));
+ }
- mdp->cmdq_clt->client.rx_callback = mdp_handle_cmdq_callback;
+ mdp->cmdq_clt[pp_idx]->client.rx_callback = mdp_handle_cmdq_callback;
cmd->mdp = mdp;
cmd->user_cmdq_cb = param->cmdq_cb;
cmd->user_cb_data = param->cb_data;
@@ -485,29 +700,12 @@ int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param)
cmd->num_comps = num_comp;
cmd->mdp_ctx = param->mdp_ctx;
- ret = mdp_comp_clocks_on(&mdp->pdev->dev, cmd->comps, cmd->num_comps);
- if (ret)
- goto err_free_path;
-
- dma_sync_single_for_device(mdp->cmdq_clt->chan->mbox->dev,
- cmd->pkt.pa_base, cmd->pkt.cmd_buf_size,
- DMA_TO_DEVICE);
- ret = mbox_send_message(mdp->cmdq_clt->chan, &cmd->pkt);
- if (ret < 0) {
- dev_err(dev, "mbox send message fail %d!\n", ret);
- goto err_clock_off;
- }
- mbox_client_txdone(mdp->cmdq_clt->chan, 0);
-
kfree(path);
- return 0;
+ return cmd;
-err_clock_off:
- mdp_comp_clocks_off(&mdp->pdev->dev, cmd->comps,
- cmd->num_comps);
err_free_path:
- i = mdp->mdp_data->pipe_info[MDP_PIPE_RDMA0].mutex_id;
- mtk_mutex_unprepare(mdp->mdp_mutex[i]);
+ if (mutex)
+ mtk_mutex_unprepare(mutex);
kfree(path);
err_free_comps:
kfree(comps);
@@ -515,8 +713,58 @@ err_destroy_pkt:
mdp_cmdq_pkt_destroy(&cmd->pkt);
err_free_cmd:
kfree(cmd);
+err_uninit:
+ return ERR_PTR(ret);
+}
+
+int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param)
+{
+ struct mdp_cmdq_cmd *cmd[MDP_PP_MAX] = {NULL};
+ struct device *dev = &mdp->pdev->dev;
+ int i, ret;
+ u8 pp_used = __get_pp_num(param->param->type);
+
+ atomic_set(&mdp->job_count, pp_used);
+ if (atomic_read(&mdp->suspended)) {
+ atomic_set(&mdp->job_count, 0);
+ return -ECANCELED;
+ }
+
+ for (i = 0; i < pp_used; i++) {
+ cmd[i] = mdp_cmdq_prepare(mdp, param, i);
+ if (IS_ERR_OR_NULL(cmd[i])) {
+ ret = PTR_ERR(cmd[i]);
+ goto err_cancel_job;
+ }
+ }
+
+ for (i = 0; i < pp_used; i++) {
+ ret = mdp_comp_clocks_on(&mdp->pdev->dev, cmd[i]->comps, cmd[i]->num_comps);
+ if (ret)
+ goto err_clock_off;
+ }
+
+ for (i = 0; i < pp_used; i++) {
+ dma_sync_single_for_device(mdp->cmdq_clt[i]->chan->mbox->dev,
+ cmd[i]->pkt.pa_base, cmd[i]->pkt.cmd_buf_size,
+ DMA_TO_DEVICE);
+
+ ret = mbox_send_message(mdp->cmdq_clt[i]->chan, &cmd[i]->pkt);
+ if (ret < 0) {
+ dev_err(dev, "mbox send message fail %d!\n", ret);
+ i = pp_used;
+ goto err_clock_off;
+ }
+ mbox_client_txdone(mdp->cmdq_clt[i]->chan, 0);
+ }
+ return 0;
+
+err_clock_off:
+ while (--i >= 0)
+ mdp_comp_clocks_off(&mdp->pdev->dev, cmd[i]->comps,
+ cmd[i]->num_comps);
err_cancel_job:
- atomic_dec(&mdp->job_count);
+ atomic_set(&mdp->job_count, 0);
return ret;
}
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h
index 43475b862ddb9..53a30ad7e0b09 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h
@@ -29,6 +29,7 @@ struct mdp_cmdq_cmd {
struct cmdq_pkt pkt;
s32 *event;
struct mdp_dev *mdp;
+ struct cmdq_cb_data *data;
void (*user_cmdq_cb)(struct cmdq_cb_data data);
void *user_cb_data;
struct mdp_comp *comps;
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
index 575c8d52acd19..8f62fb167156d 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
@@ -13,11 +13,19 @@
#include "mtk-mdp3-core.h"
#include "mtk-mdp3-regs.h"
-#include "mdp_reg_rdma.h"
+#include "mdp_reg_aal.h"
#include "mdp_reg_ccorr.h"
+#include "mdp_reg_color.h"
+#include "mdp_reg_fg.h"
+#include "mdp_reg_hdr.h"
+#include "mdp_reg_merge.h"
+#include "mdp_reg_ovl.h"
+#include "mdp_reg_pad.h"
+#include "mdp_reg_rdma.h"
#include "mdp_reg_rsz.h"
-#include "mdp_reg_wrot.h"
+#include "mdp_reg_tdshp.h"
#include "mdp_reg_wdma.h"
+#include "mdp_reg_wrot.h"
static u32 mdp_comp_alias_id[MDP_COMP_TYPE_COUNT];
static int p_id;
@@ -85,6 +93,7 @@ static int config_rdma_frame(struct mdp_comp_ctx *ctx,
bool en_ufo = MDP_COLOR_IS_UFP(colorformat);
phys_addr_t base = ctx->comp->reg_base;
u8 subsys_id = ctx->comp->subsys_id;
+ u32 rdma_con_mask = 0;
u32 reg = 0;
if (mdp_cfg && mdp_cfg->rdma_support_10bit) {
@@ -105,6 +114,8 @@ static int config_rdma_frame(struct mdp_comp_ctx *ctx,
/* Setup source frame info */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.src_ctrl);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.src_ctrl);
MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_CON, reg,
0x03C8FE0F);
@@ -113,69 +124,163 @@ static int config_rdma_frame(struct mdp_comp_ctx *ctx,
/* Setup source buffer base */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.ufo_dec_y);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.ufo_dec_y);
MM_REG_WRITE(cmd, subsys_id,
base, MDP_RDMA_UFO_DEC_LENGTH_BASE_Y,
reg, 0xFFFFFFFF);
+
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.ufo_dec_c);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.ufo_dec_c);
MM_REG_WRITE(cmd, subsys_id,
base, MDP_RDMA_UFO_DEC_LENGTH_BASE_C,
reg, 0xFFFFFFFF);
+
/* Set 10bit source frame pitch */
if (block10bit) {
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.mf_bkgd_in_pxl);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.mf_bkgd_in_pxl);
MM_REG_WRITE(cmd, subsys_id,
base, MDP_RDMA_MF_BKGD_SIZE_IN_PXL,
reg, 0x001FFFFF);
}
}
- if (CFG_CHECK(MT8183, p_id))
+ if (CFG_CHECK(MT8183, p_id)) {
reg = CFG_COMP(MT8183, ctx->param, rdma.control);
+ rdma_con_mask = 0x1110;
+ } else if (CFG_CHECK(MT8195, p_id)) {
+ reg = CFG_COMP(MT8195, ctx->param, rdma.control);
+ rdma_con_mask = 0x1130;
+ }
MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_CON, reg,
- 0x1110);
+ rdma_con_mask);
+
/* Setup source buffer base */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.iova[0]);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.iova[0]);
MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_0, reg,
0xFFFFFFFF);
+
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.iova[1]);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.iova[1]);
MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_1, reg,
0xFFFFFFFF);
+
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.iova[2]);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.iova[2]);
MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_2, reg,
0xFFFFFFFF);
+
/* Setup source buffer end */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.iova_end[0]);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.iova_end[0]);
MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_0,
reg, 0xFFFFFFFF);
+
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.iova_end[1]);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.iova_end[1]);
MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_1,
reg, 0xFFFFFFFF);
+
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.iova_end[2]);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.iova_end[2]);
MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_2,
reg, 0xFFFFFFFF);
+
/* Setup source frame pitch */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.mf_bkgd);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.mf_bkgd);
MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_BKGD_SIZE_IN_BYTE,
reg, 0x001FFFFF);
+
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.sf_bkgd);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.sf_bkgd);
MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SF_BKGD_SIZE_IN_BYTE,
reg, 0x001FFFFF);
+
/* Setup color transform */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.transform);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.transform);
MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_TRANSFORM_0,
reg, 0x0F110000);
+ if (!mdp_cfg || !mdp_cfg->rdma_esl_setting)
+ goto rdma_config_done;
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.dmabuf_con0);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_DMABUF_CON_0,
+ reg, 0x0FFF00FF);
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_high_con0);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_HIGH_CON_0,
+ reg, 0x3FFFFFFF);
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_low_con0);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_LOW_CON_0,
+ reg, 0x3FFFFFFF);
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.dmabuf_con1);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_DMABUF_CON_1,
+ reg, 0x0F7F007F);
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_high_con1);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_HIGH_CON_1,
+ reg, 0x3FFFFFFF);
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_low_con1);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_LOW_CON_1,
+ reg, 0x3FFFFFFF);
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.dmabuf_con2);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_DMABUF_CON_2,
+ reg, 0x0F3F003F);
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_high_con2);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_HIGH_CON_2,
+ reg, 0x3FFFFFFF);
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_low_con2);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_LOW_CON_2,
+ reg, 0x3FFFFFFF);
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.dmabuf_con3);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_DMABUF_CON_3,
+ reg, 0x0F3F003F);
+
+rdma_config_done:
return 0;
}
@@ -197,6 +302,8 @@ static int config_rdma_subfrm(struct mdp_comp_ctx *ctx,
/* Set Y pixel offset */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].offset[0]);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].offset[0]);
MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_0,
reg, 0xFFFFFFFF);
@@ -205,6 +312,8 @@ static int config_rdma_subfrm(struct mdp_comp_ctx *ctx,
if (mdp_cfg->rdma_support_10bit && block10bit && en_ufo) {
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].offset_0_p);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].offset_0_p);
MM_REG_WRITE(cmd, subsys_id, base,
MDP_RDMA_SRC_OFFSET_0_P,
reg, 0xFFFFFFFF);
@@ -214,32 +323,49 @@ static int config_rdma_subfrm(struct mdp_comp_ctx *ctx,
/* Set U pixel offset */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].offset[1]);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].offset[1]);
MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_1,
reg, 0xFFFFFFFF);
+
/* Set V pixel offset */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].offset[2]);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].offset[2]);
MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_2,
reg, 0xFFFFFFFF);
+
/* Set source size */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].src);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].src);
MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_SRC_SIZE, reg,
0x1FFF1FFF);
+
/* Set target size */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].clip);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].clip);
MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_CLIP_SIZE,
reg, 0x1FFF1FFF);
+
/* Set crop offset */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].clip_ofst);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].clip_ofst);
MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_OFFSET_1,
reg, 0x003F001F);
if (CFG_CHECK(MT8183, p_id)) {
csf_l = CFG_COMP(MT8183, ctx->param, subfrms[index].in.left);
csf_r = CFG_COMP(MT8183, ctx->param, subfrms[index].in.right);
+ } else if (CFG_CHECK(MT8195, p_id)) {
+ csf_l = CFG_COMP(MT8195, ctx->param, subfrms[index].in.left);
+ csf_r = CFG_COMP(MT8195, ctx->param, subfrms[index].in.right);
}
if (mdp_cfg && mdp_cfg->rdma_upsample_repeat_only)
if ((csf_r - csf_l + 1) > 320)
@@ -251,14 +377,20 @@ static int config_rdma_subfrm(struct mdp_comp_ctx *ctx,
static int wait_rdma_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
{
+ const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
struct device *dev = &ctx->comp->mdp_dev->pdev->dev;
phys_addr_t base = ctx->comp->reg_base;
u8 subsys_id = ctx->comp->subsys_id;
- if (ctx->comp->alias_id == 0)
- MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
- else
- dev_err(dev, "Do not support RDMA1_DONE event\n");
+ if (!mdp_cfg)
+ return -EINVAL;
+
+ if (ctx->comp->alias_id >= mdp_cfg->rdma_event_num) {
+ dev_err(dev, "Invalid RDMA event %d\n", ctx->comp->alias_id);
+ return -EINVAL;
+ }
+
+ MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
/* Disable RDMA */
MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_EN, 0x0, BIT(0));
@@ -283,6 +415,14 @@ static int init_rsz(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, 0x0, BIT(16));
/* Enable RSZ */
MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, BIT(0), BIT(0));
+
+ if (CFG_CHECK(MT8195, p_id)) {
+ struct device *dev;
+
+ dev = ctx->comp->mdp_dev->mm_subsys[MDP_MM_SUBSYS_1].mmsys;
+ mtk_mmsys_vpp_rsz_dcm_config(dev, true, NULL);
+ }
+
return 0;
}
@@ -290,13 +430,19 @@ static int config_rsz_frame(struct mdp_comp_ctx *ctx,
struct mdp_cmdq_cmd *cmd,
const struct v4l2_rect *compose)
{
+ const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
phys_addr_t base = ctx->comp->reg_base;
u8 subsys_id = ctx->comp->subsys_id;
bool bypass = FALSE;
u32 reg = 0;
+ if (mdp_cfg && mdp_cfg->rsz_etc_control)
+ MM_REG_WRITE(cmd, subsys_id, base, RSZ_ETC_CONTROL, 0x0, 0xFFFFFFFF);
+
if (CFG_CHECK(MT8183, p_id))
bypass = CFG_COMP(MT8183, ctx->param, frame.bypass);
+ else if (CFG_CHECK(MT8195, p_id))
+ bypass = CFG_COMP(MT8195, ctx->param, frame.bypass);
if (bypass) {
/* Disable RSZ */
@@ -306,20 +452,32 @@ static int config_rsz_frame(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rsz.control1);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rsz.control1);
MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_1, reg,
0x03FFFDF3);
+
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rsz.control2);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rsz.control2);
MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_2, reg,
0x0FFFC290);
+
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rsz.coeff_step_x);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rsz.coeff_step_x);
MM_REG_WRITE(cmd, subsys_id, base, PRZ_HORIZONTAL_COEFF_STEP,
reg, 0x007FFFFF);
+
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rsz.coeff_step_y);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rsz.coeff_step_y);
MM_REG_WRITE(cmd, subsys_id, base, PRZ_VERTICAL_COEFF_STEP,
reg, 0x007FFFFF);
+
return 0;
}
@@ -331,19 +489,28 @@ static int config_rsz_subfrm(struct mdp_comp_ctx *ctx,
u8 subsys_id = ctx->comp->subsys_id;
u32 csf_l = 0, csf_r = 0;
u32 reg = 0;
+ u32 id;
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rsz.subfrms[index].control2);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rsz.subfrms[index].control2);
MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_2, reg,
0x00003800);
+
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rsz.subfrms[index].src);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rsz.subfrms[index].src);
MM_REG_WRITE(cmd, subsys_id, base, PRZ_INPUT_IMAGE, reg,
0xFFFFFFFF);
if (CFG_CHECK(MT8183, p_id)) {
csf_l = CFG_COMP(MT8183, ctx->param, subfrms[index].in.left);
csf_r = CFG_COMP(MT8183, ctx->param, subfrms[index].in.right);
+ } else if (CFG_CHECK(MT8195, p_id)) {
+ csf_l = CFG_COMP(MT8195, ctx->param, subfrms[index].in.left);
+ csf_r = CFG_COMP(MT8195, ctx->param, subfrms[index].in.right);
}
if (mdp_cfg && mdp_cfg->rsz_disable_dcm_small_sample)
if ((csf_r - csf_l + 1) <= 16)
@@ -352,37 +519,99 @@ static int config_rsz_subfrm(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, subfrms[index].luma.left);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, subfrms[index].luma.left);
MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_HORIZONTAL_INTEGER_OFFSET,
reg, 0xFFFF);
+
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, subfrms[index].luma.left_subpix);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, subfrms[index].luma.left_subpix);
MM_REG_WRITE(cmd, subsys_id,
base, PRZ_LUMA_HORIZONTAL_SUBPIXEL_OFFSET,
reg, 0x1FFFFF);
+
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, subfrms[index].luma.top);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, subfrms[index].luma.top);
MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_VERTICAL_INTEGER_OFFSET,
reg, 0xFFFF);
+
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, subfrms[index].luma.top_subpix);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, subfrms[index].luma.top_subpix);
MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_VERTICAL_SUBPIXEL_OFFSET,
reg, 0x1FFFFF);
+
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, subfrms[index].chroma.left);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, subfrms[index].chroma.left);
MM_REG_WRITE(cmd, subsys_id,
base, PRZ_CHROMA_HORIZONTAL_INTEGER_OFFSET,
reg, 0xFFFF);
+
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, subfrms[index].chroma.left_subpix);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, subfrms[index].chroma.left_subpix);
MM_REG_WRITE(cmd, subsys_id,
base, PRZ_CHROMA_HORIZONTAL_SUBPIXEL_OFFSET,
reg, 0x1FFFFF);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rsz.subfrms[index].clip);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rsz.subfrms[index].clip);
MM_REG_WRITE(cmd, subsys_id, base, PRZ_OUTPUT_IMAGE, reg,
0xFFFFFFFF);
+ if (CFG_CHECK(MT8195, p_id)) {
+ struct device *dev;
+ struct mdp_comp *merge;
+ const struct mtk_mdp_driver_data *data = ctx->comp->mdp_dev->mdp_data;
+ enum mtk_mdp_comp_id public_id = ctx->comp->public_id;
+
+ switch (public_id) {
+ case MDP_COMP_RSZ2:
+ merge = ctx->comp->mdp_dev->comp[MDP_COMP_MERGE2];
+ break;
+ case MDP_COMP_RSZ3:
+ merge = ctx->comp->mdp_dev->comp[MDP_COMP_MERGE3];
+ break;
+ default:
+ goto rsz_subfrm_done;
+ }
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rsz.subfrms[index].rsz_switch);
+
+ id = data->comp_data[public_id].match.alias_id;
+ dev = ctx->comp->mdp_dev->mm_subsys[MDP_MM_SUBSYS_1].mmsys;
+ mtk_mmsys_vpp_rsz_merge_config(dev, id, reg, NULL);
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, rsz.subfrms[index].merge_cfg);
+ MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
+ MDP_MERGE_CFG_0, reg, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
+ MDP_MERGE_CFG_4, reg, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
+ MDP_MERGE_CFG_24, reg, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
+ MDP_MERGE_CFG_25, reg, 0xFFFFFFFF);
+
+ /* Bypass mode */
+ MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
+ MDP_MERGE_CFG_12, BIT(0), 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
+ MDP_MERGE_ENABLE, BIT(0), 0xFFFFFFFF);
+ }
+
+rsz_subfrm_done:
return 0;
}
@@ -399,6 +628,9 @@ static int advance_rsz_subfrm(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8183, p_id)) {
csf_l = CFG_COMP(MT8183, ctx->param, subfrms[index].in.left);
csf_r = CFG_COMP(MT8183, ctx->param, subfrms[index].in.right);
+ } else if (CFG_CHECK(MT8195, p_id)) {
+ csf_l = CFG_COMP(MT8195, ctx->param, subfrms[index].in.left);
+ csf_r = CFG_COMP(MT8195, ctx->param, subfrms[index].in.right);
}
if ((csf_r - csf_l + 1) <= 16)
@@ -425,6 +657,11 @@ static int init_wrot(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
/* Reset WROT */
MM_REG_WRITE(cmd, subsys_id, base, VIDO_SOFT_RST, BIT(0), BIT(0));
MM_REG_POLL(cmd, subsys_id, base, VIDO_SOFT_RST_STAT, BIT(0), BIT(0));
+
+ /* Reset setting */
+ if (CFG_CHECK(MT8195, p_id))
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_CTRL, 0x0, 0xFFFFFFFF);
+
MM_REG_WRITE(cmd, subsys_id, base, VIDO_SOFT_RST, 0x0, BIT(0));
MM_REG_POLL(cmd, subsys_id, base, VIDO_SOFT_RST_STAT, 0x0, BIT(0));
return 0;
@@ -442,57 +679,118 @@ static int config_wrot_frame(struct mdp_comp_ctx *ctx,
/* Write frame base address */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.iova[0]);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, wrot.iova[0]);
MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR, reg,
0xFFFFFFFF);
+
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.iova[1]);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, wrot.iova[1]);
MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR_C, reg,
0xFFFFFFFF);
+
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.iova[2]);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, wrot.iova[2]);
MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR_V, reg,
0xFFFFFFFF);
+
+ if (mdp_cfg && mdp_cfg->wrot_support_10bit) {
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, wrot.scan_10bit);
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_SCAN_10BIT,
+ reg, 0x0000000F);
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, wrot.pending_zero);
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_PENDING_ZERO,
+ reg, 0x04000000);
+ }
+
+ if (CFG_CHECK(MT8195, p_id)) {
+ reg = CFG_COMP(MT8195, ctx->param, wrot.bit_number);
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_CTRL_2,
+ reg, 0x00000007);
+ }
+
/* Write frame related registers */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.control);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, wrot.control);
MM_REG_WRITE(cmd, subsys_id, base, VIDO_CTRL, reg,
0xF131510F);
+
+ /* Write pre-ultra threshold */
+ if (CFG_CHECK(MT8195, p_id)) {
+ reg = CFG_COMP(MT8195, ctx->param, wrot.pre_ultra);
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_DMA_PREULTRA, reg,
+ 0x00FFFFFF);
+ }
+
/* Write frame Y pitch */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.stride[0]);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, wrot.stride[0]);
MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE, reg,
0x0000FFFF);
+
/* Write frame UV pitch */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.stride[1]);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, wrot.stride[1]);
MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE_C, reg,
0xFFFF);
+
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.stride[2]);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, wrot.stride[2]);
MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE_V, reg,
0xFFFF);
+
/* Write matrix control */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.mat_ctrl);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, wrot.mat_ctrl);
MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAT_CTRL, reg, 0xF3);
/* Set the fixed ALPHA as 0xFF */
MM_REG_WRITE(cmd, subsys_id, base, VIDO_DITHER, 0xFF000000,
0xFF000000);
+
/* Set VIDO_EOL_SEL */
MM_REG_WRITE(cmd, subsys_id, base, VIDO_RSV_1, BIT(31), BIT(31));
+
/* Set VIDO_FIFO_TEST */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.fifo_test);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, wrot.fifo_test);
+
if (reg != 0)
MM_REG_WRITE(cmd, subsys_id, base, VIDO_FIFO_TEST,
reg, 0xFFF);
+
/* Filter enable */
if (mdp_cfg && mdp_cfg->wrot_filter_constraint) {
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.filter);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, wrot.filter);
MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE,
reg, 0x77);
+
+ /* Turn off WROT DMA DCM */
+ if (CFG_CHECK(MT8195, p_id))
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_ROT_EN,
+ (0x1 << 23) + (0x1 << 20), 0x900000);
}
return 0;
@@ -508,35 +806,54 @@ static int config_wrot_subfrm(struct mdp_comp_ctx *ctx,
/* Write Y pixel offset */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].offset[0]);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].offset[0]);
MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR,
reg, 0x0FFFFFFF);
+
/* Write U pixel offset */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].offset[1]);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].offset[1]);
MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR_C,
reg, 0x0FFFFFFF);
+
/* Write V pixel offset */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].offset[2]);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].offset[2]);
MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR_V,
reg, 0x0FFFFFFF);
+
/* Write source size */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].src);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].src);
MM_REG_WRITE(cmd, subsys_id, base, VIDO_IN_SIZE, reg,
0x1FFF1FFF);
+
/* Write target size */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].clip);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].clip);
MM_REG_WRITE(cmd, subsys_id, base, VIDO_TAR_SIZE, reg,
0x1FFF1FFF);
+
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].clip_ofst);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].clip_ofst);
MM_REG_WRITE(cmd, subsys_id, base, VIDO_CROP_OFST, reg,
0x1FFF1FFF);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].main_buf);
+ else if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].main_buf);
MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE,
reg, 0x1FFF7F00);
@@ -553,10 +870,15 @@ static int wait_wrot_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
phys_addr_t base = ctx->comp->reg_base;
u8 subsys_id = ctx->comp->subsys_id;
- if (ctx->comp->alias_id == 0)
- MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
- else
- dev_err(dev, "Do not support WROT1_DONE event\n");
+ if (!mdp_cfg)
+ return -EINVAL;
+
+ if (ctx->comp->alias_id >= mdp_cfg->wrot_event_num) {
+ dev_err(dev, "Invalid WROT event %d!\n", ctx->comp->alias_id);
+ return -EINVAL;
+ }
+
+ MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
if (mdp_cfg && mdp_cfg->wrot_filter_constraint)
MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE, 0x0,
@@ -697,6 +1019,171 @@ static const struct mdp_comp_ops wdma_ops = {
.wait_comp_event = wait_wdma_event,
};
+static int reset_luma_hist(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
+{
+ const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
+ phys_addr_t base = ctx->comp->reg_base;
+ u16 subsys_id = ctx->comp->subsys_id;
+ u32 hist_num, i;
+
+ if (!mdp_cfg)
+ return -EINVAL;
+
+ hist_num = mdp_cfg->tdshp_hist_num;
+
+ /* Reset histogram */
+ for (i = 0; i <= hist_num; i++)
+ MM_REG_WRITE_MASK(cmd, subsys_id, base,
+ (MDP_LUMA_HIST_INIT + (i << 2)),
+ 0, 0xFFFFFFFF);
+
+ if (mdp_cfg->tdshp_constrain)
+ MM_REG_WRITE(cmd, subsys_id, base,
+ MDP_DC_TWO_D_W1_RESULT_INIT, 0, 0xFFFFFFFF);
+
+ if (mdp_cfg->tdshp_contour)
+ for (i = 0; i < hist_num; i++)
+ MM_REG_WRITE_MASK(cmd, subsys_id, base,
+ (MDP_CONTOUR_HIST_INIT + (i << 2)),
+ 0, 0xFFFFFFFF);
+
+ return 0;
+}
+
+static int init_tdshp(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u16 subsys_id = ctx->comp->subsys_id;
+
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_CTRL, BIT(0), BIT(0));
+ /* Enable FIFO */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_CFG, BIT(1), BIT(1));
+
+ return reset_luma_hist(ctx, cmd);
+}
+
+static int config_tdshp_frame(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd,
+ const struct v4l2_rect *compose)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u16 subsys_id = ctx->comp->subsys_id;
+ u32 reg = 0;
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, tdshp.cfg);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_CFG, reg, BIT(0));
+
+ return 0;
+}
+
+static int config_tdshp_subfrm(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd, u32 index)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u16 subsys_id = ctx->comp->subsys_id;
+ u32 reg = 0;
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, tdshp.subfrms[index].src);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_INPUT_SIZE,
+ reg, MDP_TDSHP_INPUT_SIZE_MASK);
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, tdshp.subfrms[index].clip_ofst);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_OUTPUT_OFFSET,
+ reg, 0x00FF00FF);
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, tdshp.subfrms[index].clip);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_OUTPUT_SIZE,
+ reg, MDP_TDSHP_OUTPUT_SIZE_MASK);
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, tdshp.subfrms[index].hist_cfg_0);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_HIST_CFG_00, reg, 0xFFFFFFFF);
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, tdshp.subfrms[index].hist_cfg_1);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_HIST_CFG_01, reg, 0xFFFFFFFF);
+
+ return 0;
+}
+
+static const struct mdp_comp_ops tdshp_ops = {
+ .get_comp_flag = get_comp_flag,
+ .init_comp = init_tdshp,
+ .config_frame = config_tdshp_frame,
+ .config_subfrm = config_tdshp_subfrm,
+};
+
+static int init_color(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u16 subsys_id = ctx->comp->subsys_id;
+
+ MM_REG_WRITE(cmd, subsys_id, base,
+ MDP_COLOR_START, 0x1, BIT(1) | BIT(0));
+ MM_REG_WRITE(cmd, subsys_id, base,
+ MDP_COLOR_WIN_X_MAIN, 0xFFFF0000, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base,
+ MDP_COLOR_WIN_Y_MAIN, 0xFFFF0000, 0xFFFFFFFF);
+
+ /* Reset color matrix */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_CM1_EN, 0x0, BIT(0));
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_CM2_EN, 0x0, BIT(0));
+
+ /* Enable interrupt */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_INTEN, 0x7, 0x7);
+
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_OUT_SEL, 0x333, 0x333);
+
+ return 0;
+}
+
+static int config_color_frame(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd,
+ const struct v4l2_rect *compose)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u16 subsys_id = ctx->comp->subsys_id;
+ u32 reg = 0;
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, color.start);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_START,
+ reg, MDP_COLOR_START_MASK);
+
+ return 0;
+}
+
+static int config_color_subfrm(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd, u32 index)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u16 subsys_id = ctx->comp->subsys_id;
+ u32 reg = 0;
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, color.subfrms[index].in_hsize);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_INTERNAL_IP_WIDTH,
+ reg, 0x00003FFF);
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, color.subfrms[index].in_vsize);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_INTERNAL_IP_HEIGHT,
+ reg, 0x00003FFF);
+
+ return 0;
+}
+
+static const struct mdp_comp_ops color_ops = {
+ .get_comp_flag = get_comp_flag,
+ .init_comp = init_color,
+ .config_frame = config_color_frame,
+ .config_subfrm = config_color_subfrm,
+};
+
static int init_ccorr(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
{
phys_addr_t base = ctx->comp->reg_base;
@@ -738,12 +1225,318 @@ static const struct mdp_comp_ops ccorr_ops = {
.config_subfrm = config_ccorr_subfrm,
};
+static int init_aal(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u16 subsys_id = ctx->comp->subsys_id;
+
+ /* Always set MDP_AAL enable to 1 */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_EN, BIT(0), BIT(0));
+
+ return 0;
+}
+
+static int config_aal_frame(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd,
+ const struct v4l2_rect *compose)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u16 subsys_id = ctx->comp->subsys_id;
+ u32 reg = 0;
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, aal.cfg_main);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_CFG_MAIN, reg, BIT(7));
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, aal.cfg);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_CFG, reg, BIT(0));
+
+ return 0;
+}
+
+static int config_aal_subfrm(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd, u32 index)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u16 subsys_id = ctx->comp->subsys_id;
+ u32 reg = 0;
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, aal.subfrms[index].src);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_SIZE,
+ reg, MDP_AAL_SIZE_MASK);
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, aal.subfrms[index].clip_ofst);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_OUTPUT_OFFSET,
+ reg, 0x00FF00FF);
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, aal.subfrms[index].clip);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_OUTPUT_SIZE,
+ reg, MDP_AAL_OUTPUT_SIZE_MASK);
+
+ return 0;
+}
+
+static const struct mdp_comp_ops aal_ops = {
+ .get_comp_flag = get_comp_flag,
+ .init_comp = init_aal,
+ .config_frame = config_aal_frame,
+ .config_subfrm = config_aal_subfrm,
+};
+
+static int init_hdr(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u16 subsys_id = ctx->comp->subsys_id;
+
+ /* Always set MDP_HDR enable to 1 */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_TOP, BIT(0), BIT(0));
+
+ return 0;
+}
+
+static int config_hdr_frame(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd,
+ const struct v4l2_rect *compose)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u16 subsys_id = ctx->comp->subsys_id;
+ u32 reg = 0;
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, hdr.top);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_TOP, reg, BIT(29) | BIT(28));
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, hdr.relay);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_RELAY, reg, BIT(0));
+
+ return 0;
+}
+
+static int config_hdr_subfrm(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd, u32 index)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u16 subsys_id = ctx->comp->subsys_id;
+ u32 reg = 0;
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].win_size);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_TILE_POS,
+ reg, MDP_HDR_TILE_POS_MASK);
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].src);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_SIZE_0, reg, 0x1FFF1FFF);
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].clip_ofst0);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_SIZE_1, reg, 0x1FFF1FFF);
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].clip_ofst1);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_SIZE_2, reg, 0x1FFF1FFF);
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].hist_ctrl_0);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_HIST_CTRL_0, reg, 0x00003FFF);
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].hist_ctrl_1);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_HIST_CTRL_1, reg, 0x00003FFF);
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].hdr_top);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_TOP, reg, BIT(6) | BIT(5));
+
+ /* Enable histogram */
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].hist_addr);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_HIST_ADDR, reg, BIT(9));
+
+ return 0;
+}
+
+static const struct mdp_comp_ops hdr_ops = {
+ .get_comp_flag = get_comp_flag,
+ .init_comp = init_hdr,
+ .config_frame = config_hdr_frame,
+ .config_subfrm = config_hdr_subfrm,
+};
+
+static int init_fg(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u16 subsys_id = ctx->comp->subsys_id;
+
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_TRIGGER, BIT(2), BIT(2));
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_TRIGGER, 0x0, BIT(2));
+
+ return 0;
+}
+
+static int config_fg_frame(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd,
+ const struct v4l2_rect *compose)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u16 subsys_id = ctx->comp->subsys_id;
+ u32 reg = 0;
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, fg.ctrl_0);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_FG_CTRL_0, reg, BIT(0));
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, fg.ck_en);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_FG_CK_EN, reg, 0x7);
+
+ return 0;
+}
+
+static int config_fg_subfrm(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd, u32 index)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u16 subsys_id = ctx->comp->subsys_id;
+ u32 reg = 0;
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, fg.subfrms[index].info_0);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_TILE_INFO_0, reg, 0xFFFFFFFF);
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, fg.subfrms[index].info_1);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_TILE_INFO_1, reg, 0xFFFFFFFF);
+
+ return 0;
+}
+
+static const struct mdp_comp_ops fg_ops = {
+ .get_comp_flag = get_comp_flag,
+ .init_comp = init_fg,
+ .config_frame = config_fg_frame,
+ .config_subfrm = config_fg_subfrm,
+};
+
+static int init_ovl(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u16 subsys_id = ctx->comp->subsys_id;
+
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_EN,
+ BIT(0), MDP_OVL_EN_MASK);
+
+ /* Set to relay mode */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_SRC_CON,
+ BIT(9), MDP_OVL_SRC_CON_MASK);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_DP_CON,
+ BIT(0), MDP_OVL_DP_CON_MASK);
+
+ return 0;
+}
+
+static int config_ovl_frame(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd,
+ const struct v4l2_rect *compose)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u16 subsys_id = ctx->comp->subsys_id;
+ u32 reg = 0;
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, ovl.L0_con);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_L0_CON, reg, BIT(29) | BIT(28));
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, ovl.src_con);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_SRC_CON, reg, BIT(0));
+
+ return 0;
+}
+
+static int config_ovl_subfrm(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd, u32 index)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u16 subsys_id = ctx->comp->subsys_id;
+ u32 reg = 0;
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, ovl.subfrms[index].L0_src_size);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_L0_SRC_SIZE,
+ reg, MDP_OVL_L0_SRC_SIZE_MASK);
+
+ /* Setup output size */
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, ovl.subfrms[index].roi_size);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_ROI_SIZE,
+ reg, MDP_OVL_ROI_SIZE_MASK);
+
+ return 0;
+}
+
+static const struct mdp_comp_ops ovl_ops = {
+ .get_comp_flag = get_comp_flag,
+ .init_comp = init_ovl,
+ .config_frame = config_ovl_frame,
+ .config_subfrm = config_ovl_subfrm,
+};
+
+static int init_pad(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u16 subsys_id = ctx->comp->subsys_id;
+
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_PAD_CON,
+ BIT(1), MDP_PAD_CON_MASK);
+ /* Reset */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_PAD_W_SIZE,
+ 0, MDP_PAD_W_SIZE_MASK);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_PAD_H_SIZE,
+ 0, MDP_PAD_H_SIZE_MASK);
+
+ return 0;
+}
+
+static int config_pad_subfrm(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd, u32 index)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u16 subsys_id = ctx->comp->subsys_id;
+ u32 reg = 0;
+
+ if (CFG_CHECK(MT8195, p_id))
+ reg = CFG_COMP(MT8195, ctx->param, pad.subfrms[index].pic_size);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_PAD_PIC_SIZE,
+ reg, MDP_PAD_PIC_SIZE_MASK);
+
+ return 0;
+}
+
+static const struct mdp_comp_ops pad_ops = {
+ .get_comp_flag = get_comp_flag,
+ .init_comp = init_pad,
+ .config_subfrm = config_pad_subfrm,
+};
+
static const struct mdp_comp_ops *mdp_comp_ops[MDP_COMP_TYPE_COUNT] = {
[MDP_COMP_TYPE_RDMA] = &rdma_ops,
[MDP_COMP_TYPE_RSZ] = &rsz_ops,
[MDP_COMP_TYPE_WROT] = &wrot_ops,
[MDP_COMP_TYPE_WDMA] = &wdma_ops,
+ [MDP_COMP_TYPE_TDSHP] = &tdshp_ops,
+ [MDP_COMP_TYPE_COLOR] = &color_ops,
[MDP_COMP_TYPE_CCORR] = &ccorr_ops,
+ [MDP_COMP_TYPE_AAL] = &aal_ops,
+ [MDP_COMP_TYPE_HDR] = &hdr_ops,
+ [MDP_COMP_TYPE_FG] = &fg_ops,
+ [MDP_COMP_TYPE_OVL] = &ovl_ops,
+ [MDP_COMP_TYPE_PAD] = &pad_ops,
};
static const struct of_device_id mdp_comp_dt_ids[] __maybe_unused = {
@@ -762,6 +1555,42 @@ static const struct of_device_id mdp_comp_dt_ids[] __maybe_unused = {
}, {
.compatible = "mediatek,mt8183-mdp3-wdma",
.data = (void *)MDP_COMP_TYPE_WDMA,
+ }, {
+ .compatible = "mediatek,mt8195-mdp3-rdma",
+ .data = (void *)MDP_COMP_TYPE_RDMA,
+ }, {
+ .compatible = "mediatek,mt8195-mdp3-split",
+ .data = (void *)MDP_COMP_TYPE_SPLIT,
+ }, {
+ .compatible = "mediatek,mt8195-mdp3-stitch",
+ .data = (void *)MDP_COMP_TYPE_STITCH,
+ }, {
+ .compatible = "mediatek,mt8195-mdp3-fg",
+ .data = (void *)MDP_COMP_TYPE_FG,
+ }, {
+ .compatible = "mediatek,mt8195-mdp3-hdr",
+ .data = (void *)MDP_COMP_TYPE_HDR,
+ }, {
+ .compatible = "mediatek,mt8195-mdp3-aal",
+ .data = (void *)MDP_COMP_TYPE_AAL,
+ }, {
+ .compatible = "mediatek,mt8195-mdp3-merge",
+ .data = (void *)MDP_COMP_TYPE_MERGE,
+ }, {
+ .compatible = "mediatek,mt8195-mdp3-tdshp",
+ .data = (void *)MDP_COMP_TYPE_TDSHP,
+ }, {
+ .compatible = "mediatek,mt8195-mdp3-color",
+ .data = (void *)MDP_COMP_TYPE_COLOR,
+ }, {
+ .compatible = "mediatek,mt8195-mdp3-ovl",
+ .data = (void *)MDP_COMP_TYPE_OVL,
+ }, {
+ .compatible = "mediatek,mt8195-mdp3-padding",
+ .data = (void *)MDP_COMP_TYPE_PAD,
+ }, {
+ .compatible = "mediatek,mt8195-mdp3-tcc",
+ .data = (void *)MDP_COMP_TYPE_TCC,
},
{}
};
@@ -853,9 +1682,26 @@ int mdp_comp_clocks_on(struct device *dev, struct mdp_comp *comps, int num)
int i, ret;
for (i = 0; i < num; i++) {
+ struct mdp_dev *m = comps[i].mdp_dev;
+ enum mtk_mdp_comp_id id;
+ const struct mdp_comp_blend *b;
+
+ /* Bypass the dummy component*/
+ if (!m)
+ continue;
+
ret = mdp_comp_clock_on(dev, &comps[i]);
if (ret)
return ret;
+
+ id = comps[i].public_id;
+ b = &m->mdp_data->comp_data[id].blend;
+
+ if (b && b->aid_clk) {
+ ret = mdp_comp_clock_on(dev, m->comp[b->b_id]);
+ if (ret)
+ return ret;
+ }
}
return 0;
@@ -865,8 +1711,23 @@ void mdp_comp_clocks_off(struct device *dev, struct mdp_comp *comps, int num)
{
int i;
- for (i = 0; i < num; i++)
+ for (i = 0; i < num; i++) {
+ struct mdp_dev *m = comps[i].mdp_dev;
+ enum mtk_mdp_comp_id id;
+ const struct mdp_comp_blend *b;
+
+ /* Bypass the dummy component*/
+ if (!m)
+ continue;
+
mdp_comp_clock_off(dev, &comps[i]);
+
+ id = comps[i].public_id;
+ b = &m->mdp_data->comp_data[id].blend;
+
+ if (b && b->aid_clk)
+ mdp_comp_clock_off(dev, m->comp[b->b_id]);
+ }
}
static int mdp_get_subsys_id(struct mdp_dev *mdp, struct device *dev,
@@ -1174,6 +2035,8 @@ int mdp_comp_ctx_config(struct mdp_dev *mdp, struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8183, p_id))
arg = CFG_COMP(MT8183, param, type);
+ else if (CFG_CHECK(MT8195, p_id))
+ arg = CFG_COMP(MT8195, param, type);
else
return -EINVAL;
public_id = mdp_cfg_get_id_public(mdp, arg);
@@ -1191,16 +2054,22 @@ int mdp_comp_ctx_config(struct mdp_dev *mdp, struct mdp_comp_ctx *ctx,
ctx->param = param;
if (CFG_CHECK(MT8183, p_id))
arg = CFG_COMP(MT8183, param, input);
+ else if (CFG_CHECK(MT8195, p_id))
+ arg = CFG_COMP(MT8195, param, input);
else
return -EINVAL;
ctx->input = &frame->inputs[arg];
if (CFG_CHECK(MT8183, p_id))
idx = CFG_COMP(MT8183, param, num_outputs);
+ else if (CFG_CHECK(MT8195, p_id))
+ idx = CFG_COMP(MT8195, param, num_outputs);
else
return -EINVAL;
for (i = 0; i < idx; i++) {
if (CFG_CHECK(MT8183, p_id))
arg = CFG_COMP(MT8183, param, outputs[i]);
+ else if (CFG_CHECK(MT8195, p_id))
+ arg = CFG_COMP(MT8195, param, outputs[i]);
else
return -EINVAL;
ctx->outputs[i] = &frame->outputs[arg];
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.h b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.h
index 20d2bcb77ef93..3e5d2da1c8076 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.h
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.h
@@ -84,22 +84,66 @@ enum mtk_mdp_comp_id {
MDP_COMP_CAMIN, /* 9 */
MDP_COMP_CAMIN2, /* 10 */
MDP_COMP_RDMA0, /* 11 */
- MDP_COMP_AAL0, /* 12 */
- MDP_COMP_CCORR0, /* 13 */
- MDP_COMP_RSZ0, /* 14 */
- MDP_COMP_RSZ1, /* 15 */
- MDP_COMP_TDSHP0, /* 16 */
- MDP_COMP_COLOR0, /* 17 */
- MDP_COMP_PATH0_SOUT, /* 18 */
- MDP_COMP_PATH1_SOUT, /* 19 */
- MDP_COMP_WROT0, /* 20 */
- MDP_COMP_WDMA, /* 21 */
-
- /* Dummy Engine */
- MDP_COMP_RDMA1, /* 22 */
- MDP_COMP_RSZ2, /* 23 */
- MDP_COMP_TDSHP1, /* 24 */
- MDP_COMP_WROT1, /* 25 */
+ MDP_COMP_RDMA1, /* 12 */
+ MDP_COMP_RDMA2, /* 13 */
+ MDP_COMP_RDMA3, /* 14 */
+ MDP_COMP_AAL0, /* 15 */
+ MDP_COMP_AAL1, /* 16 */
+ MDP_COMP_AAL2, /* 17 */
+ MDP_COMP_AAL3, /* 18 */
+ MDP_COMP_CCORR0, /* 19 */
+ MDP_COMP_RSZ0, /* 20 */
+ MDP_COMP_RSZ1, /* 21 */
+ MDP_COMP_RSZ2, /* 22 */
+ MDP_COMP_RSZ3, /* 23 */
+ MDP_COMP_TDSHP0, /* 24 */
+ MDP_COMP_TDSHP1, /* 25 */
+ MDP_COMP_TDSHP2, /* 26 */
+ MDP_COMP_TDSHP3, /* 27 */
+ MDP_COMP_COLOR0, /* 28 */
+ MDP_COMP_COLOR1, /* 29 */
+ MDP_COMP_COLOR2, /* 30 */
+ MDP_COMP_COLOR3, /* 31 */
+ MDP_COMP_PATH0_SOUT, /* 32 */
+ MDP_COMP_PATH1_SOUT, /* 33 */
+ MDP_COMP_WROT0, /* 34 */
+ MDP_COMP_WROT1, /* 35 */
+ MDP_COMP_WROT2, /* 36 */
+ MDP_COMP_WROT3, /* 37 */
+ MDP_COMP_WDMA, /* 38 */
+ MDP_COMP_SPLIT, /* 39 */
+ MDP_COMP_SPLIT2, /* 40 */
+ MDP_COMP_STITCH, /* 41 */
+ MDP_COMP_FG0, /* 42 */
+ MDP_COMP_FG1, /* 43 */
+ MDP_COMP_FG2, /* 44 */
+ MDP_COMP_FG3, /* 45 */
+ MDP_COMP_TO_SVPP2MOUT, /* 46 */
+ MDP_COMP_TO_SVPP3MOUT, /* 47 */
+ MDP_COMP_TO_WARP0MOUT, /* 48 */
+ MDP_COMP_TO_WARP1MOUT, /* 49 */
+ MDP_COMP_VPP0_SOUT, /* 50 */
+ MDP_COMP_VPP1_SOUT, /* 51 */
+ MDP_COMP_PQ0_SOUT, /* 52 */
+ MDP_COMP_PQ1_SOUT, /* 53 */
+ MDP_COMP_HDR0, /* 54 */
+ MDP_COMP_HDR1, /* 55 */
+ MDP_COMP_HDR2, /* 56 */
+ MDP_COMP_HDR3, /* 57 */
+ MDP_COMP_OVL0, /* 58 */
+ MDP_COMP_OVL1, /* 59 */
+ MDP_COMP_PAD0, /* 60 */
+ MDP_COMP_PAD1, /* 61 */
+ MDP_COMP_PAD2, /* 62 */
+ MDP_COMP_PAD3, /* 63 */
+ MDP_COMP_TCC0, /* 64 */
+ MDP_COMP_TCC1, /* 65 */
+ MDP_COMP_MERGE2, /* 66 */
+ MDP_COMP_MERGE3, /* 67 */
+ MDP_COMP_VDO0DL0, /* 68 */
+ MDP_COMP_VDO1DL0, /* 69 */
+ MDP_COMP_VDO0DL1, /* 70 */
+ MDP_COMP_VDO1DL1, /* 71 */
MDP_MAX_COMP_COUNT /* ALWAYS keep at the end */
};
@@ -117,12 +161,21 @@ enum mdp_comp_type {
MDP_COMP_TYPE_COLOR,
MDP_COMP_TYPE_DRE,
MDP_COMP_TYPE_CCORR,
+ MDP_COMP_TYPE_AAL,
+ MDP_COMP_TYPE_TCC,
MDP_COMP_TYPE_HDR,
+ MDP_COMP_TYPE_SPLIT,
+ MDP_COMP_TYPE_STITCH,
+ MDP_COMP_TYPE_FG,
+ MDP_COMP_TYPE_OVL,
+ MDP_COMP_TYPE_PAD,
+ MDP_COMP_TYPE_MERGE,
MDP_COMP_TYPE_IMGI,
MDP_COMP_TYPE_WPEI,
MDP_COMP_TYPE_EXTO, /* External path */
MDP_COMP_TYPE_DL_PATH, /* Direct-link path */
+ MDP_COMP_TYPE_DUMMY,
MDP_COMP_TYPE_COUNT /* ALWAYS keep at the end */
};
@@ -138,6 +191,7 @@ struct mdp_comp_match {
enum mdp_comp_type type;
u32 alias_id;
s32 inner_id;
+ s32 subsys_id;
};
/* Used to describe the item order in MDP property */
@@ -147,9 +201,16 @@ struct mdp_comp_info {
u32 dts_reg_ofst;
};
+struct mdp_comp_blend {
+ enum mtk_mdp_comp_id b_id;
+ bool aid_mod;
+ bool aid_clk;
+};
+
struct mdp_comp_data {
struct mdp_comp_match match;
struct mdp_comp_info info;
+ struct mdp_comp_blend blend;
};
struct mdp_comp_ops;
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
index 94f4ed78523bb..5209f531ef8d0 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
@@ -21,14 +21,21 @@ static const struct of_device_id mdp_of_ids[] = {
{ .compatible = "mediatek,mt8183-mdp3-rdma",
.data = &mt8183_mdp_driver_data,
},
+ { .compatible = "mediatek,mt8195-mdp3-rdma",
+ .data = &mt8195_mdp_driver_data,
+ },
+ { .compatible = "mediatek,mt8195-mdp3-wrot",
+ .data = &mt8195_mdp_driver_data,
+ },
{},
};
MODULE_DEVICE_TABLE(of, mdp_of_ids);
static struct platform_device *__get_pdev_by_id(struct platform_device *pdev,
+ struct platform_device *from,
enum mdp_infra_id id)
{
- struct device_node *node;
+ struct device_node *node, *f = NULL;
struct platform_device *mdp_pdev = NULL;
const struct mtk_mdp_driver_data *mdp_data;
const char *compat;
@@ -46,9 +53,14 @@ static struct platform_device *__get_pdev_by_id(struct platform_device *pdev,
dev_err(&pdev->dev, "have no driver data to find node\n");
return NULL;
}
+
compat = mdp_data->mdp_probe_infra[id].compatible;
+ if (strlen(compat) == 0)
+ return NULL;
- node = of_find_compatible_node(NULL, NULL, compat);
+ if (from)
+ f = from->dev.of_node;
+ node = of_find_compatible_node(f, NULL, compat);
if (WARN_ON(!node)) {
dev_err(&pdev->dev, "find node from id %d failed\n", id);
return NULL;
@@ -130,6 +142,10 @@ void mdp_video_device_release(struct video_device *vdev)
struct mdp_dev *mdp = (struct mdp_dev *)video_get_drvdata(vdev);
int i;
+ for (i = 0; i < mdp->mdp_data->pp_used; i++)
+ if (mdp->cmdq_clt[i])
+ cmdq_mbox_destroy(mdp->cmdq_clt[i]);
+
scp_put(mdp->scp);
destroy_workqueue(mdp->job_wq);
@@ -140,19 +156,72 @@ void mdp_video_device_release(struct video_device *vdev)
vb2_dma_contig_clear_max_seg_size(&mdp->pdev->dev);
mdp_comp_destroy(mdp);
- for (i = 0; i < MDP_PIPE_MAX; i++)
- mtk_mutex_put(mdp->mdp_mutex[i]);
+ for (i = 0; i < mdp->mdp_data->pipe_info_len; i++) {
+ enum mdp_mm_subsys_id idx;
+ struct mtk_mutex *m;
+ u32 m_id;
+
+ idx = mdp->mdp_data->pipe_info[i].sub_id;
+ m_id = mdp->mdp_data->pipe_info[i].mutex_id;
+ m = mdp->mm_subsys[idx].mdp_mutex[m_id];
+ if (!IS_ERR_OR_NULL(m))
+ mtk_mutex_put(m);
+ }
mdp_vpu_shared_mem_free(&mdp->vpu);
v4l2_m2m_release(mdp->m2m_dev);
kfree(mdp);
}
+static int mdp_mm_subsys_deploy(struct mdp_dev *mdp, enum mdp_infra_id id)
+{
+ struct platform_device *mm_pdev = NULL;
+ struct device **dev;
+ int i;
+
+ if (!mdp)
+ return -EINVAL;
+
+ for (i = 0; i < MDP_MM_SUBSYS_MAX; i++) {
+ const char *compat;
+ enum mdp_infra_id sub_id = id + i;
+
+ switch (id) {
+ case MDP_INFRA_MMSYS:
+ dev = &mdp->mm_subsys[i].mmsys;
+ break;
+ case MDP_INFRA_MUTEX:
+ dev = &mdp->mm_subsys[i].mutex;
+ break;
+ default:
+ dev_err(&mdp->pdev->dev, "Unknown infra id %d", id);
+ return -EINVAL;
+ }
+
+ /*
+ * Not every chip has multiple multimedia subsystems, so
+ * the config may be null.
+ */
+ compat = mdp->mdp_data->mdp_probe_infra[sub_id].compatible;
+ if (strlen(compat) == 0)
+ continue;
+
+ mm_pdev = __get_pdev_by_id(mdp->pdev, mm_pdev, sub_id);
+ if (WARN_ON(!mm_pdev))
+ return -ENODEV;
+
+ *dev = &mm_pdev->dev;
+ }
+
+ return 0;
+}
+
static int mdp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mdp_dev *mdp;
struct platform_device *mm_pdev;
+ struct resource *res;
int ret, i, mutex_id;
mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
@@ -164,25 +233,34 @@ static int mdp_probe(struct platform_device *pdev)
mdp->pdev = pdev;
mdp->mdp_data = of_device_get_match_data(&pdev->dev);
- mm_pdev = __get_pdev_by_id(pdev, MDP_INFRA_MMSYS);
- if (!mm_pdev) {
- ret = -ENODEV;
- goto err_destroy_device;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res->start != mdp->mdp_data->mdp_con_res) {
+ platform_set_drvdata(pdev, mdp);
+ goto success_return;
}
- mdp->mdp_mmsys = &mm_pdev->dev;
- mm_pdev = __get_pdev_by_id(pdev, MDP_INFRA_MUTEX);
- if (WARN_ON(!mm_pdev)) {
- ret = -ENODEV;
+ ret = mdp_mm_subsys_deploy(mdp, MDP_INFRA_MMSYS);
+ if (ret)
goto err_destroy_device;
- }
+
+ ret = mdp_mm_subsys_deploy(mdp, MDP_INFRA_MUTEX);
+ if (ret)
+ goto err_destroy_device;
+
for (i = 0; i < mdp->mdp_data->pipe_info_len; i++) {
+ enum mdp_mm_subsys_id idx;
+ struct mtk_mutex **m;
+
+ idx = mdp->mdp_data->pipe_info[i].sub_id;
mutex_id = mdp->mdp_data->pipe_info[i].mutex_id;
- if (!IS_ERR_OR_NULL(mdp->mdp_mutex[mutex_id]))
+ m = &mdp->mm_subsys[idx].mdp_mutex[mutex_id];
+
+ if (!IS_ERR_OR_NULL(*m))
continue;
- mdp->mdp_mutex[mutex_id] = mtk_mutex_get(&mm_pdev->dev);
- if (IS_ERR(mdp->mdp_mutex[mutex_id])) {
- ret = PTR_ERR(mdp->mdp_mutex[mutex_id]);
+
+ *m = mtk_mutex_get(mdp->mm_subsys[idx].mutex);
+ if (IS_ERR(*m)) {
+ ret = PTR_ERR(*m);
goto err_free_mutex;
}
}
@@ -210,7 +288,7 @@ static int mdp_probe(struct platform_device *pdev)
mdp->scp = scp_get(pdev);
if (!mdp->scp) {
- mm_pdev = __get_pdev_by_id(pdev, MDP_INFRA_SCP);
+ mm_pdev = __get_pdev_by_id(pdev, NULL, MDP_INFRA_SCP);
if (WARN_ON(!mm_pdev)) {
dev_err(&pdev->dev, "Could not get scp device\n");
ret = -ENODEV;
@@ -225,10 +303,12 @@ static int mdp_probe(struct platform_device *pdev)
mutex_init(&mdp->vpu_lock);
mutex_init(&mdp->m2m_lock);
- mdp->cmdq_clt = cmdq_mbox_create(dev, 0);
- if (IS_ERR(mdp->cmdq_clt)) {
- ret = PTR_ERR(mdp->cmdq_clt);
- goto err_put_scp;
+ for (i = 0; i < mdp->mdp_data->pp_used; i++) {
+ mdp->cmdq_clt[i] = cmdq_mbox_create(dev, i);
+ if (IS_ERR(mdp->cmdq_clt[i])) {
+ ret = PTR_ERR(mdp->cmdq_clt[i]);
+ goto err_mbox_destroy;
+ }
}
init_waitqueue_head(&mdp->callback_wq);
@@ -250,14 +330,15 @@ static int mdp_probe(struct platform_device *pdev)
goto err_unregister_device;
}
+success_return:
dev_dbg(dev, "mdp-%d registered successfully\n", pdev->id);
return 0;
err_unregister_device:
v4l2_device_unregister(&mdp->v4l2_dev);
err_mbox_destroy:
- cmdq_mbox_destroy(mdp->cmdq_clt);
-err_put_scp:
+ while (--i >= 0)
+ cmdq_mbox_destroy(mdp->cmdq_clt[i]);
scp_put(mdp->scp);
err_destroy_clock_wq:
destroy_workqueue(mdp->clock_wq);
@@ -266,9 +347,16 @@ err_destroy_job_wq:
err_deinit_comp:
mdp_comp_destroy(mdp);
err_free_mutex:
- for (i = 0; i < mdp->mdp_data->pipe_info_len; i++)
- if (!IS_ERR_OR_NULL(mdp->mdp_mutex[i]))
- mtk_mutex_put(mdp->mdp_mutex[i]);
+ for (i = 0; i < mdp->mdp_data->pipe_info_len; i++) {
+ enum mdp_mm_subsys_id idx;
+ struct mtk_mutex *m;
+
+ idx = mdp->mdp_data->pipe_info[i].sub_id;
+ mutex_id = mdp->mdp_data->pipe_info[i].mutex_id;
+ m = mdp->mm_subsys[idx].mdp_mutex[mutex_id];
+ if (!IS_ERR_OR_NULL(m))
+ mtk_mutex_put(m);
+ }
err_destroy_device:
kfree(mdp);
err_return:
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h
index 7e21d226ceb81..8c09e984fd016 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h
@@ -19,12 +19,24 @@
#define MDP_PHANDLE_NAME "mediatek,mdp3"
enum mdp_infra_id {
+ /*
+ * Due to the sequential nature of function "mdp_mm_subsys_deploy",
+ * adding new enum. necessitates careful consideration.
+ */
MDP_INFRA_MMSYS,
+ MDP_INFRA_MMSYS2,
MDP_INFRA_MUTEX,
+ MDP_INFRA_MUTEX2,
MDP_INFRA_SCP,
MDP_INFRA_MAX
};
+enum mdp_mm_subsys_id {
+ MDP_MM_SUBSYS_0,
+ MDP_MM_SUBSYS_1,
+ MDP_MM_SUBSYS_MAX,
+};
+
enum mdp_buffer_usage {
MDP_BUFFER_USAGE_HW_READ,
MDP_BUFFER_USAGE_MDP,
@@ -37,8 +49,16 @@ struct mdp_platform_config {
bool rdma_support_10bit;
bool rdma_rsz1_sram_sharing;
bool rdma_upsample_repeat_only;
+ bool rdma_esl_setting;
+ u32 rdma_event_num;
bool rsz_disable_dcm_small_sample;
+ bool rsz_etc_control;
bool wrot_filter_constraint;
+ bool wrot_support_10bit;
+ u32 wrot_event_num;
+ u32 tdshp_hist_num;
+ bool tdshp_constrain;
+ bool tdshp_contour;
};
/* indicate which mutex is used by each pipepline */
@@ -47,11 +67,27 @@ enum mdp_pipe_id {
MDP_PIPE_WPEI2,
MDP_PIPE_IMGI,
MDP_PIPE_RDMA0,
+ MDP_PIPE_RDMA1,
+ MDP_PIPE_RDMA2,
+ MDP_PIPE_RDMA3,
+ MDP_PIPE_SPLIT,
+ MDP_PIPE_SPLIT2,
+ MDP_PIPE_VPP0_SOUT,
+ MDP_PIPE_VPP1_SOUT,
MDP_PIPE_MAX
};
+/* MDP parallel pipe control */
+enum {
+ MDP_PP_USED_1 = 1,
+ MDP_PP_USED_2 = 2,
+};
+
+#define MDP_PP_MAX MDP_PP_USED_2
+
struct mtk_mdp_driver_data {
const int mdp_plat_id;
+ const resource_size_t mdp_con_res;
const struct of_device_id *mdp_probe_infra;
const struct mdp_platform_config *mdp_cfg;
const u32 *mdp_mutex_table_idx;
@@ -63,12 +99,19 @@ struct mtk_mdp_driver_data {
const struct mdp_limit *def_limit;
const struct mdp_pipe_info *pipe_info;
unsigned int pipe_info_len;
+ const struct v4l2_rect *pp_criteria;
+ const u8 pp_used;
+};
+
+struct mdp_mm_subsys {
+ struct device *mmsys;
+ struct device *mutex;
+ struct mtk_mutex *mdp_mutex[MDP_PIPE_MAX];
};
struct mdp_dev {
struct platform_device *pdev;
- struct device *mdp_mmsys;
- struct mtk_mutex *mdp_mutex[MDP_PIPE_MAX];
+ struct mdp_mm_subsys mm_subsys[MDP_MM_SUBSYS_MAX];
struct mdp_comp *comp[MDP_MAX_COMP_COUNT];
const struct mtk_mdp_driver_data *mdp_data;
@@ -82,7 +125,7 @@ struct mdp_dev {
s32 vpu_count;
u32 id_count;
struct ida mdp_ida;
- struct cmdq_client *cmdq_clt;
+ struct cmdq_client *cmdq_clt[MDP_PP_MAX];
wait_queue_head_t callback_wq;
struct v4l2_device v4l2_dev;
@@ -96,6 +139,7 @@ struct mdp_dev {
struct mdp_pipe_info {
enum mdp_pipe_id pipe_id;
+ enum mdp_mm_subsys_id sub_id;
u32 mutex_id;
};
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
index a298c1b15b9ea..35a8b059bde5d 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
@@ -87,6 +87,9 @@ static void mdp_m2m_device_run(void *priv)
dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
mdp_set_dst_config(&param.outputs[0], frame, &dst_vb->vb2_buf);
+ if (mdp_check_pp_enable(ctx->mdp_dev, frame))
+ param.type = MDP_STREAM_TYPE_DUAL_BITBLT;
+
ret = mdp_vpu_process(&ctx->mdp_dev->vpu, &param);
if (ret) {
dev_err(&ctx->mdp_dev->pdev->dev,
@@ -101,6 +104,18 @@ static void mdp_m2m_device_run(void *priv)
task.cb_data = NULL;
task.mdp_ctx = ctx;
+ if (atomic_read(&ctx->mdp_dev->job_count)) {
+ ret = wait_event_timeout(ctx->mdp_dev->callback_wq,
+ !atomic_read(&ctx->mdp_dev->job_count),
+ 2 * HZ);
+ if (ret == 0) {
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "%d jobs not yet done\n",
+ atomic_read(&ctx->mdp_dev->job_count));
+ goto worker_end;
+ }
+ }
+
ret = mdp_cmdq_send(ctx->mdp_dev, &task);
if (ret) {
dev_err(&ctx->mdp_dev->pdev->dev,
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c
index 9b436b911d92d..657356f87743e 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c
@@ -304,6 +304,24 @@ int mdp_check_scaling_ratio(const struct v4l2_rect *crop,
return 0;
}
+bool mdp_check_pp_enable(struct mdp_dev *mdp, struct mdp_frame *frame)
+{
+ u32 s, r1, r2;
+
+ if (!mdp || !frame)
+ return false;
+
+ if (!mdp->mdp_data->pp_criteria)
+ return false;
+
+ s = mdp->mdp_data->pp_criteria->width *
+ mdp->mdp_data->pp_criteria->height;
+ r1 = frame->crop.c.width * frame->crop.c.height;
+ r2 = frame->compose.width * frame->compose.height;
+
+ return (r1 >= s || r2 >= s);
+}
+
/* Stride that is accepted by MDP HW */
static u32 mdp_fmt_get_stride(const struct mdp_format *fmt,
u32 bytesperline, unsigned int plane)
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.h b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.h
index e9ab8ac2c0e87..b0c8f9f008202 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.h
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.h
@@ -368,6 +368,7 @@ int mdp_try_crop(struct mdp_m2m_ctx *ctx, struct v4l2_rect *r,
int mdp_check_scaling_ratio(const struct v4l2_rect *crop,
const struct v4l2_rect *compose, s32 rotation,
const struct mdp_limit *limit);
+bool mdp_check_pp_enable(struct mdp_dev *mdp, struct mdp_frame *frame);
void mdp_set_src_config(struct img_input *in,
struct mdp_frame *frame, struct vb2_buffer *vb);
void mdp_set_dst_config(struct img_output *out,
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c
index 49fc2e9d45dd5..da3a892ad867e 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c
@@ -198,6 +198,7 @@ int mdp_vpu_dev_init(struct mdp_vpu_dev *vpu, struct mtk_scp *scp,
};
struct mdp_dev *mdp = vpu_to_mdp(vpu);
int err;
+ u8 pp_num = mdp->mdp_data->pp_used;
init_completion(&vpu->ipi_acked);
vpu->scp = scp;
@@ -211,7 +212,7 @@ int mdp_vpu_dev_init(struct mdp_vpu_dev *vpu, struct mtk_scp *scp,
mutex_lock(vpu->lock);
vpu->work_size = ALIGN(vpu->work_size, 64);
vpu->param_size = ALIGN(sizeof(struct img_ipi_frameparam), 64);
- vpu->config_size = ALIGN(sizeof(struct img_config), 64);
+ vpu->config_size = ALIGN(sizeof(struct img_config) * pp_num, 64);
err = mdp_vpu_shared_mem_alloc(vpu);
mutex_unlock(vpu->lock);
if (err) {
diff --git a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c
index 9f6e4b59455da..d7027d600208f 100644
--- a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c
+++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c
@@ -29,15 +29,7 @@ static int mtk_vcodec_vpu_set_ipi_register(struct mtk_vcodec_fw *fw, int id,
mtk_vcodec_ipi_handler handler,
const char *name, void *priv)
{
- /*
- * The handler we receive takes a void * as its first argument. We
- * cannot change this because it needs to be passed down to the rproc
- * subsystem when SCP is used. VPU takes a const argument, which is
- * more constrained, so the conversion below is safe.
- */
- ipi_handler_t handler_const = (ipi_handler_t)handler;
-
- return vpu_ipi_register(fw->pdev, id, handler_const, name, priv);
+ return vpu_ipi_register(fw->pdev, id, handler, name, priv);
}
static int mtk_vcodec_vpu_ipi_send(struct mtk_vcodec_fw *fw, int id, void *buf,
@@ -58,12 +50,12 @@ static void mtk_vcodec_vpu_reset_dec_handler(void *priv)
dev_err(&dev->plat_dev->dev, "Watchdog timeout!!");
- mutex_lock(&dev->dev_mutex);
+ mutex_lock(&dev->dev_ctx_lock);
list_for_each_entry(ctx, &dev->ctx_list, list) {
ctx->state = MTK_STATE_ABORT;
mtk_v4l2_vdec_dbg(0, ctx, "[%d] Change to state MTK_STATE_ABORT", ctx->id);
}
- mutex_unlock(&dev->dev_mutex);
+ mutex_unlock(&dev->dev_ctx_lock);
}
static void mtk_vcodec_vpu_reset_enc_handler(void *priv)
@@ -73,12 +65,12 @@ static void mtk_vcodec_vpu_reset_enc_handler(void *priv)
dev_err(&dev->plat_dev->dev, "Watchdog timeout!!");
- mutex_lock(&dev->dev_mutex);
+ mutex_lock(&dev->dev_ctx_lock);
list_for_each_entry(ctx, &dev->ctx_list, list) {
ctx->state = MTK_STATE_ABORT;
mtk_v4l2_vdec_dbg(0, ctx, "[%d] Change to state MTK_STATE_ABORT", ctx->id);
}
- mutex_unlock(&dev->dev_mutex);
+ mutex_unlock(&dev->dev_ctx_lock);
}
static const struct mtk_vcodec_fw_ops mtk_vcodec_vpu_msg = {
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.h b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.h
index ece27c880e50c..1af075fc01940 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.h
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.h
@@ -39,7 +39,6 @@ struct vdec_fb {
/**
* struct mtk_video_dec_buf - Private data related to each VB2 buffer.
* @m2m_buf: M2M buffer
- * @list: link list
* @used: Capture buffer contain decoded frame data and keep in
* codec data structure
* @queued_in_vb2: Capture buffer is queue in vb2
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c
index f47c98faf068b..2073781ccadb1 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c
@@ -268,7 +268,9 @@ static int fops_vcodec_open(struct file *file)
ctx->dev->vdec_pdata->init_vdec_params(ctx);
+ mutex_lock(&dev->dev_ctx_lock);
list_add(&ctx->list, &dev->ctx_list);
+ mutex_unlock(&dev->dev_ctx_lock);
mtk_vcodec_dbgfs_create(ctx);
mutex_unlock(&dev->dev_mutex);
@@ -311,7 +313,9 @@ static int fops_vcodec_release(struct file *file)
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
mtk_vcodec_dbgfs_remove(dev, ctx->id);
+ mutex_lock(&dev->dev_ctx_lock);
list_del_init(&ctx->list);
+ mutex_unlock(&dev->dev_ctx_lock);
kfree(ctx);
mutex_unlock(&dev->dev_mutex);
return 0;
@@ -404,6 +408,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
for (i = 0; i < MTK_VDEC_HW_MAX; i++)
mutex_init(&dev->dec_mutex[i]);
mutex_init(&dev->dev_mutex);
+ mutex_init(&dev->dev_ctx_lock);
spin_lock_init(&dev->irqlock);
snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), "%s",
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
index 849b89dd205c2..85b2c0d3d8bcd 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
@@ -241,6 +241,7 @@ struct mtk_vcodec_dec_ctx {
*
* @dec_mutex: decoder hardware lock
* @dev_mutex: video_device lock
+ * @dev_ctx_lock: the lock of context list
* @decode_workqueue: decode work queue
*
* @irqlock: protect data access by irq handler and work thread
@@ -282,6 +283,7 @@ struct mtk_vcodec_dec_dev {
/* decoder hardware mutex lock */
struct mutex dec_mutex[MTK_VDEC_HW_MAX];
struct mutex dev_mutex;
+ struct mutex dev_ctx_lock;
struct workqueue_struct *decode_workqueue;
spinlock_t irqlock;
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c
index d54b3833790d1..b903e39fee892 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c
@@ -566,7 +566,7 @@ static void mtk_vcodec_dec_fill_h264_level(struct v4l2_ctrl_config *cfg,
default:
cfg->max = V4L2_MPEG_VIDEO_H264_LEVEL_4_1;
break;
- };
+ }
}
static void mtk_vcodec_dec_fill_h264_profile(struct v4l2_ctrl_config *cfg,
@@ -580,7 +580,7 @@ static void mtk_vcodec_dec_fill_h264_profile(struct v4l2_ctrl_config *cfg,
default:
cfg->max = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH;
break;
- };
+ }
}
static void mtk_vcodec_dec_fill_h265_level(struct v4l2_ctrl_config *cfg,
@@ -596,7 +596,7 @@ static void mtk_vcodec_dec_fill_h265_level(struct v4l2_ctrl_config *cfg,
default:
cfg->max = V4L2_MPEG_VIDEO_HEVC_LEVEL_4;
break;
- };
+ }
}
static void mtk_vcodec_dec_fill_h265_profile(struct v4l2_ctrl_config *cfg,
@@ -610,7 +610,7 @@ static void mtk_vcodec_dec_fill_h265_profile(struct v4l2_ctrl_config *cfg,
default:
cfg->max = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE;
break;
- };
+ }
}
static void mtk_vcodec_dec_fill_vp9_level(struct v4l2_ctrl_config *cfg,
@@ -630,7 +630,7 @@ static void mtk_vcodec_dec_fill_vp9_level(struct v4l2_ctrl_config *cfg,
default:
cfg->max = V4L2_MPEG_VIDEO_VP9_LEVEL_4_0;
break;
- };
+ }
}
static void mtk_vcodec_dec_fill_vp9_profile(struct v4l2_ctrl_config *cfg,
@@ -644,7 +644,7 @@ static void mtk_vcodec_dec_fill_vp9_profile(struct v4l2_ctrl_config *cfg,
default:
cfg->max = V4L2_MPEG_VIDEO_VP9_PROFILE_1;
break;
- };
+ }
}
static void mtk_vcodec_dec_reset_controls(struct v4l2_ctrl_config *cfg,
@@ -680,7 +680,7 @@ static void mtk_vcodec_dec_reset_controls(struct v4l2_ctrl_config *cfg,
break;
default:
break;
- };
+ }
}
static int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_dec_ctx *ctx)
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c
index 06ed47df693bf..21836dd6ef85a 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c
@@ -869,7 +869,6 @@ static int vdec_hevc_slice_init(struct mtk_vcodec_dec_ctx *ctx)
inst->vpu.codec_type = ctx->current_codec;
inst->vpu.capture_type = ctx->capture_fourcc;
- ctx->drv_handle = inst;
err = vpu_dec_init(&inst->vpu);
if (err) {
mtk_vdec_err(ctx, "vdec_hevc init err=%d", err);
@@ -898,6 +897,7 @@ static int vdec_hevc_slice_init(struct mtk_vcodec_dec_ctx *ctx)
mtk_vdec_debug(ctx, "lat hevc instance >> %p, codec_type = 0x%x",
inst, inst->vpu.codec_type);
+ ctx->drv_handle = inst;
return 0;
error_free_inst:
kfree(inst);
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
index 19407f9bc773c..987b3d71b662a 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
@@ -449,7 +449,7 @@ static int vdec_vp8_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
inst->frm_cnt, y_fb_dma, c_fb_dma, fb);
inst->cur_fb = fb;
- dec->bs_dma = (unsigned long)bs->dma_addr;
+ dec->bs_dma = (uint64_t)bs->dma_addr;
dec->bs_sz = bs->size;
dec->cur_y_fb_dma = y_fb_dma;
dec->cur_c_fb_dma = c_fb_dma;
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c
index f64b21c071696..f677e499fefab 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c
@@ -37,7 +37,6 @@
* @bs_sz: bitstream size
* @resolution_changed:resolution change flag 1 - changed, 0 - not change
* @frame_header_type: current frame header type
- * @wait_key_frame: wait key frame coming
* @crc: used to check whether hardware's status is right
* @reserved: reserved, currently unused
*/
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
index 55355fa700908..039082f600c81 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
@@ -16,6 +16,7 @@
#include "../vdec_drv_base.h"
#include "../vdec_vpu_if.h"
+#define VP9_MAX_SUPER_FRAMES_NUM 8
#define VP9_SUPER_FRAME_BS_SZ 64
#define MAX_VP9_DPB_SIZE 9
@@ -133,11 +134,11 @@ struct vp9_sf_ref_fb {
*/
struct vdec_vp9_vsi {
unsigned char sf_bs_buf[VP9_SUPER_FRAME_BS_SZ];
- struct vp9_sf_ref_fb sf_ref_fb[VP9_MAX_FRM_BUF_NUM-1];
+ struct vp9_sf_ref_fb sf_ref_fb[VP9_MAX_SUPER_FRAMES_NUM];
int sf_next_ref_fb_idx;
unsigned int sf_frm_cnt;
- unsigned int sf_frm_offset[VP9_MAX_FRM_BUF_NUM-1];
- unsigned int sf_frm_sz[VP9_MAX_FRM_BUF_NUM-1];
+ unsigned int sf_frm_offset[VP9_MAX_SUPER_FRAMES_NUM];
+ unsigned int sf_frm_sz[VP9_MAX_SUPER_FRAMES_NUM];
unsigned int sf_frm_idx;
unsigned int sf_init;
struct vdec_fb fb;
@@ -526,7 +527,7 @@ static void vp9_swap_frm_bufs(struct vdec_vp9_inst *inst)
/* if this super frame and it is not last sub-frame, get next fb for
* sub-frame decode
*/
- if (vsi->sf_frm_cnt > 0 && vsi->sf_frm_idx != vsi->sf_frm_cnt - 1)
+ if (vsi->sf_frm_cnt > 0 && vsi->sf_frm_idx != vsi->sf_frm_cnt)
vsi->sf_next_ref_fb_idx = vp9_get_sf_ref_fb(inst);
}
@@ -735,7 +736,7 @@ static void get_free_fb(struct vdec_vp9_inst *inst, struct vdec_fb **out_fb)
static int validate_vsi_array_indexes(struct vdec_vp9_inst *inst,
struct vdec_vp9_vsi *vsi) {
- if (vsi->sf_frm_idx >= VP9_MAX_FRM_BUF_NUM - 1) {
+ if (vsi->sf_frm_idx > VP9_MAX_SUPER_FRAMES_NUM) {
mtk_vdec_err(inst->ctx, "Invalid vsi->sf_frm_idx=%u.", vsi->sf_frm_idx);
return -EIO;
}
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c
index 69d37b93bd358..eea709d938209 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c
@@ -141,7 +141,6 @@ struct vdec_vp9_slice_frame_counts {
* @skip: skip counts.
* @y_mode: Y prediction mode counts.
* @filter: interpolation filter counts.
- * @mv_joint: motion vector joint counts.
* @sign: motion vector sign counts.
* @classes: motion vector class counts.
* @class0: motion vector class0 bit counts.
@@ -1075,7 +1074,7 @@ static int vdec_vp9_slice_setup_tile_buffer(struct vdec_vp9_slice_instance *inst
unsigned int mi_row;
unsigned int mi_col;
unsigned int offset;
- unsigned int pa;
+ dma_addr_t pa;
unsigned int size;
struct vdec_vp9_slice_tiles *tiles;
unsigned char *pos;
@@ -1110,7 +1109,7 @@ static int vdec_vp9_slice_setup_tile_buffer(struct vdec_vp9_slice_instance *inst
pos = va + offset;
end = va + bs->size;
/* truncated */
- pa = (unsigned int)bs->dma_addr + offset;
+ pa = bs->dma_addr + offset;
tb = instance->tile.va;
for (i = 0; i < rows; i++) {
for (j = 0; j < cols; j++) {
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c
index 82e57ae983d55..da6be556727bb 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c
@@ -77,12 +77,14 @@ static bool vpu_dec_check_ap_inst(struct mtk_vcodec_dec_dev *dec_dev, struct vde
struct mtk_vcodec_dec_ctx *ctx;
int ret = false;
+ mutex_lock(&dec_dev->dev_ctx_lock);
list_for_each_entry(ctx, &dec_dev->ctx_list, list) {
if (!IS_ERR_OR_NULL(ctx) && ctx->vpu_inst == vpu) {
ret = true;
break;
}
}
+ mutex_unlock(&dec_dev->dev_ctx_lock);
return ret;
}
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.h b/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.h
index fbb3f34a73f05..aa7d08afc2f48 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.h
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.h
@@ -22,7 +22,6 @@ struct mtk_vcodec_dec_ctx;
* in place of inst_addr in messages.
* @signaled : 1 - Host has received ack message from VPU, 0 - not received
* @ctx : context for v4l2 layer integration
- * @dev : platform device of VPU
* @wq : wait queue to wait VPU message ack
* @handler : ipi handler for each decoder
* @codec_type : use codec type to separate different codecs
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.h b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.h
index 82246401ed4a3..908d8179b2d25 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.h
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.h
@@ -26,7 +26,6 @@
/**
* struct mtk_video_enc_buf - Private data related to each VB2 buffer.
* @m2m_buf: M2M buffer
- * @list: list that buffer link to
* @param_change: Types of encode parameter change before encoding this
* buffer
* @enc_params: Encode parameters changed before encode this buffer
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
index 6319f24bc714b..3cb8a16222220 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
@@ -177,7 +177,9 @@ static int fops_vcodec_open(struct file *file)
mtk_v4l2_venc_dbg(2, ctx, "Create instance [%d]@%p m2m_ctx=%p ",
ctx->id, ctx, ctx->m2m_ctx);
+ mutex_lock(&dev->dev_ctx_lock);
list_add(&ctx->list, &dev->ctx_list);
+ mutex_unlock(&dev->dev_ctx_lock);
mutex_unlock(&dev->dev_mutex);
mtk_v4l2_venc_dbg(0, ctx, "%s encoder [%d]", dev_name(&dev->plat_dev->dev),
@@ -212,7 +214,9 @@ static int fops_vcodec_release(struct file *file)
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+ mutex_lock(&dev->dev_ctx_lock);
list_del_init(&ctx->list);
+ mutex_unlock(&dev->dev_ctx_lock);
kfree(ctx);
mutex_unlock(&dev->dev_mutex);
return 0;
@@ -294,6 +298,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
mutex_init(&dev->enc_mutex);
mutex_init(&dev->dev_mutex);
+ mutex_init(&dev->dev_ctx_lock);
spin_lock_init(&dev->irqlock);
snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), "%s",
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h
index a042f607ed8d1..0bd85d0fb379a 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h
@@ -178,6 +178,7 @@ struct mtk_vcodec_enc_ctx {
*
* @enc_mutex: encoder hardware lock.
* @dev_mutex: video_device lock
+ * @dev_ctx_lock: the lock of context list
* @encode_workqueue: encode work queue
*
* @enc_irq: h264 encoder irq resource
@@ -205,6 +206,7 @@ struct mtk_vcodec_enc_dev {
/* encoder hardware mutex lock */
struct mutex enc_mutex;
struct mutex dev_mutex;
+ struct mutex dev_ctx_lock;
struct workqueue_struct *encode_workqueue;
int enc_irq;
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
index 84ad1cc6ad171..51bb7ee141b9e 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
@@ -47,12 +47,14 @@ static bool vpu_enc_check_ap_inst(struct mtk_vcodec_enc_dev *enc_dev, struct ven
struct mtk_vcodec_enc_ctx *ctx;
int ret = false;
+ mutex_lock(&enc_dev->dev_ctx_lock);
list_for_each_entry(ctx, &enc_dev->ctx_list, list) {
if (!IS_ERR_OR_NULL(ctx) && ctx->vpu_inst == vpu) {
ret = true;
break;
}
}
+ mutex_unlock(&enc_dev->dev_ctx_lock);
return ret;
}
diff --git a/drivers/media/platform/mediatek/vpu/mtk_vpu.c b/drivers/media/platform/mediatek/vpu/mtk_vpu.c
index 7243604a82a5b..724ae7c2ab3ba 100644
--- a/drivers/media/platform/mediatek/vpu/mtk_vpu.c
+++ b/drivers/media/platform/mediatek/vpu/mtk_vpu.c
@@ -635,7 +635,7 @@ OUT_LOAD_FW:
}
EXPORT_SYMBOL_GPL(vpu_load_firmware);
-static void vpu_init_ipi_handler(const void *data, unsigned int len, void *priv)
+static void vpu_init_ipi_handler(void *data, unsigned int len, void *priv)
{
struct mtk_vpu *vpu = priv;
const struct vpu_run *run = data;
diff --git a/drivers/media/platform/mediatek/vpu/mtk_vpu.h b/drivers/media/platform/mediatek/vpu/mtk_vpu.h
index a56053ff135af..da05f3e740810 100644
--- a/drivers/media/platform/mediatek/vpu/mtk_vpu.h
+++ b/drivers/media/platform/mediatek/vpu/mtk_vpu.h
@@ -17,7 +17,7 @@
* VPU interfaces with other blocks by share memory and interrupt.
*/
-typedef void (*ipi_handler_t) (const void *data,
+typedef void (*ipi_handler_t) (void *data,
unsigned int len,
void *priv);
diff --git a/drivers/media/platform/nuvoton/npcm-video.c b/drivers/media/platform/nuvoton/npcm-video.c
index a1fcb616b256a..60fbb91400355 100644
--- a/drivers/media/platform/nuvoton/npcm-video.c
+++ b/drivers/media/platform/nuvoton/npcm-video.c
@@ -1785,7 +1785,7 @@ static int npcm_video_probe(struct platform_device *pdev)
return 0;
}
-static int npcm_video_remove(struct platform_device *pdev)
+static void npcm_video_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct v4l2_device *v4l2_dev = dev_get_drvdata(dev);
@@ -1798,8 +1798,6 @@ static int npcm_video_remove(struct platform_device *pdev)
if (video->ece.enable)
npcm_video_ece_stop(video);
of_reserved_mem_device_release(dev);
-
- return 0;
}
static const struct of_device_id npcm_video_match[] = {
@@ -1816,7 +1814,7 @@ static struct platform_driver npcm_video_driver = {
.of_match_table = npcm_video_match,
},
.probe = npcm_video_probe,
- .remove = npcm_video_remove,
+ .remove_new = npcm_video_remove,
};
module_platform_driver(npcm_video_driver);
diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
index 64112b63298ca..cc97790ed30f6 100644
--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
+++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
@@ -1373,6 +1373,8 @@ static bool mxc_jpeg_source_change(struct mxc_jpeg_ctx *ctx,
q_data_cap->crop.top = 0;
q_data_cap->crop.width = jpeg_src_buf->w;
q_data_cap->crop.height = jpeg_src_buf->h;
+ q_data_cap->bytesperline[0] = 0;
+ q_data_cap->bytesperline[1] = 0;
/*
* align up the resolution for CAST IP,
@@ -1752,6 +1754,14 @@ static u32 mxc_jpeg_get_image_format(struct device *dev,
static void mxc_jpeg_bytesperline(struct mxc_jpeg_q_data *q, u32 precision)
{
+ u32 bytesperline[2];
+
+ bytesperline[0] = q->bytesperline[0];
+ bytesperline[1] = q->bytesperline[0]; /*imx-jpeg only support the same line pitch*/
+ v4l_bound_align_image(&bytesperline[0], 0, MXC_JPEG_MAX_LINE, 2,
+ &bytesperline[1], 0, MXC_JPEG_MAX_LINE, 2,
+ 0);
+
/* Bytes distance between the leftmost pixels in two adjacent lines */
if (q->fmt->fourcc == V4L2_PIX_FMT_JPEG) {
/* bytesperline unused for compressed formats */
@@ -1775,6 +1785,12 @@ static void mxc_jpeg_bytesperline(struct mxc_jpeg_q_data *q, u32 precision)
q->bytesperline[0] = q->w_adjusted * DIV_ROUND_UP(precision, 8);
q->bytesperline[1] = 0;
}
+
+ if (q->fmt->fourcc != V4L2_PIX_FMT_JPEG) {
+ q->bytesperline[0] = max(q->bytesperline[0], bytesperline[0]);
+ if (q->fmt->mem_planes > 1)
+ q->bytesperline[1] = max(q->bytesperline[1], bytesperline[1]);
+ }
}
static void mxc_jpeg_sizeimage(struct mxc_jpeg_q_data *q)
diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
index dc4afeeff5b65..86e324b21aed2 100644
--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
+++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
@@ -22,6 +22,7 @@
#define MXC_JPEG_MIN_HEIGHT 64
#define MXC_JPEG_MAX_WIDTH 0x2000
#define MXC_JPEG_MAX_HEIGHT 0x2000
+#define MXC_JPEG_MAX_LINE 0x8000
#define MXC_JPEG_MAX_CFG_STREAM 0x1000
#define MXC_JPEG_H_ALIGN 3
#define MXC_JPEG_W_ALIGN 3
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
index f73facb97dc50..c2013995049c6 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
@@ -506,7 +506,7 @@ err_pm:
return ret;
}
-static int mxc_isi_remove(struct platform_device *pdev)
+static void mxc_isi_remove(struct platform_device *pdev)
{
struct mxc_isi_dev *isi = platform_get_drvdata(pdev);
unsigned int i;
@@ -523,8 +523,6 @@ static int mxc_isi_remove(struct platform_device *pdev)
mxc_isi_v4l2_cleanup(isi);
pm_runtime_disable(isi->dev);
-
- return 0;
}
static const struct of_device_id mxc_isi_of_match[] = {
@@ -537,7 +535,7 @@ MODULE_DEVICE_TABLE(of, mxc_isi_of_match);
static struct platform_driver mxc_isi_driver = {
.probe = mxc_isi_probe,
- .remove = mxc_isi_remove,
+ .remove_new = mxc_isi_remove,
.driver = {
.of_match_table = mxc_isi_of_match,
.name = MXC_ISI_DRIVER_NAME,
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c
index 575f173373887..93a55c97cd173 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c
@@ -161,7 +161,6 @@ mxc_isi_crossbar_xlate_streams(struct mxc_isi_crossbar *xbar,
pad = media_pad_remote_pad_first(&xbar->pads[sink_pad]);
sd = media_entity_to_v4l2_subdev(pad->entity);
-
if (!sd) {
dev_dbg(xbar->isi->dev,
"no entity connected to crossbar input %u\n",
@@ -469,7 +468,8 @@ int mxc_isi_crossbar_init(struct mxc_isi_dev *isi)
}
for (i = 0; i < xbar->num_sinks; ++i)
- xbar->pads[i].flags = MEDIA_PAD_FL_SINK;
+ xbar->pads[i].flags = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_MUST_CONNECT;
for (i = 0; i < xbar->num_sources; ++i)
xbar->pads[i + xbar->num_sinks].flags = MEDIA_PAD_FL_SOURCE;
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c
index 19e80b95ffeaa..5623914f95e64 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c
@@ -215,8 +215,7 @@ static void mxc_isi_channel_set_csc(struct mxc_isi_pipe *pipe,
[MXC_ISI_ENC_RGB] = "RGB",
[MXC_ISI_ENC_YUV] = "YUV",
};
- const u32 *coeffs;
- bool cscen = true;
+ const u32 *coeffs = NULL;
u32 val;
val = mxc_isi_read(pipe, CHNL_IMG_CTRL);
@@ -235,14 +234,13 @@ static void mxc_isi_channel_set_csc(struct mxc_isi_pipe *pipe,
val |= CHNL_IMG_CTRL_CSC_MODE(CHNL_IMG_CTRL_CSC_MODE_RGB2YCBCR);
} else {
/* Bypass CSC */
- cscen = false;
val |= CHNL_IMG_CTRL_CSC_BYPASS;
}
dev_dbg(pipe->isi->dev, "CSC: %s -> %s\n",
encodings[in_encoding], encodings[out_encoding]);
- if (cscen) {
+ if (coeffs) {
mxc_isi_write(pipe, CHNL_CSC_COEFF0, coeffs[0]);
mxc_isi_write(pipe, CHNL_CSC_COEFF1, coeffs[1]);
mxc_isi_write(pipe, CHNL_CSC_COEFF2, coeffs[2]);
@@ -253,7 +251,7 @@ static void mxc_isi_channel_set_csc(struct mxc_isi_pipe *pipe,
mxc_isi_write(pipe, CHNL_IMG_CTRL, val);
- *bypass = !cscen;
+ *bypass = !coeffs;
}
void mxc_isi_channel_set_alpha(struct mxc_isi_pipe *pipe, u8 alpha)
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index 7ef341bf21cc8..6a77de3744543 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -427,7 +427,6 @@ enum venus_inst_modes {
* @error: an error returned during last HFI sync operation
* @session_error: a flag rised by HFI interface in case of session error
* @ops: HFI operations
- * @priv: a private for HFI operations callbacks
* @session_type: the type of the session (decoder or encoder)
* @hprop: a union used as a holder by get property
* @core_acquired: the Core has been acquired
diff --git a/drivers/media/platform/renesas/Kconfig b/drivers/media/platform/renesas/Kconfig
index ed788e991f74b..c7fc718a30a5e 100644
--- a/drivers/media/platform/renesas/Kconfig
+++ b/drivers/media/platform/renesas/Kconfig
@@ -14,6 +14,22 @@ config VIDEO_RENESAS_CEU
help
This is a v4l2 driver for the Renesas CEU Interface
+config VIDEO_RCAR_CSI2
+ tristate "R-Car MIPI CSI-2 Receiver"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && OF
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select RESET_CONTROLLER
+ select V4L2_FWNODE
+ help
+ Support for Renesas R-Car MIPI CSI-2 receiver.
+ Supports R-Car Gen3 and RZ/G2 SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rcar-csi2.
+
config VIDEO_RCAR_ISP
tristate "R-Car Image Signal Processor (ISP)"
depends on V4L_PLATFORM_DRIVERS
diff --git a/drivers/media/platform/renesas/Makefile b/drivers/media/platform/renesas/Makefile
index 55854e8688870..50774a20330c9 100644
--- a/drivers/media/platform/renesas/Makefile
+++ b/drivers/media/platform/renesas/Makefile
@@ -7,6 +7,7 @@ obj-y += rcar-vin/
obj-y += rzg2l-cru/
obj-y += vsp1/
+obj-$(CONFIG_VIDEO_RCAR_CSI2) += rcar-csi2.o
obj-$(CONFIG_VIDEO_RCAR_DRIF) += rcar_drif.o
obj-$(CONFIG_VIDEO_RCAR_ISP) += rcar-isp.o
obj-$(CONFIG_VIDEO_RENESAS_CEU) += renesas-ceu.o
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c b/drivers/media/platform/renesas/rcar-csi2.c
index 582d5e35db0e5..582d5e35db0e5 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/renesas/rcar-csi2.c
diff --git a/drivers/media/platform/renesas/rcar-isp.c b/drivers/media/platform/renesas/rcar-isp.c
index 530d65fc546bc..4512ac338ca53 100644
--- a/drivers/media/platform/renesas/rcar-isp.c
+++ b/drivers/media/platform/renesas/rcar-isp.c
@@ -518,6 +518,7 @@ static void risp_remove(struct platform_device *pdev)
static struct platform_driver rcar_isp_driver = {
.driver = {
.name = "rcar-isp",
+ .suppress_bind_attrs = true,
.of_match_table = risp_of_id_table,
},
.probe = risp_probe,
diff --git a/drivers/media/platform/renesas/rcar-vin/Kconfig b/drivers/media/platform/renesas/rcar-vin/Kconfig
index de55fe63d84cf..2ec857ab83cb4 100644
--- a/drivers/media/platform/renesas/rcar-vin/Kconfig
+++ b/drivers/media/platform/renesas/rcar-vin/Kconfig
@@ -1,20 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-config VIDEO_RCAR_CSI2
- tristate "R-Car MIPI CSI-2 Receiver"
- depends on V4L_PLATFORM_DRIVERS
- depends on VIDEO_DEV && OF
- depends on ARCH_RENESAS || COMPILE_TEST
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select RESET_CONTROLLER
- select V4L2_FWNODE
- help
- Support for Renesas R-Car MIPI CSI-2 receiver.
- Supports R-Car Gen3 and RZ/G2 SoCs.
-
- To compile this driver as a module, choose M here: the
- module will be called rcar-csi2.
-
config VIDEO_RCAR_VIN
tristate "R-Car Video Input (VIN) Driver"
depends on V4L_PLATFORM_DRIVERS
diff --git a/drivers/media/platform/renesas/rcar-vin/Makefile b/drivers/media/platform/renesas/rcar-vin/Makefile
index 00d809f5d2c10..5938ad6290c84 100644
--- a/drivers/media/platform/renesas/rcar-vin/Makefile
+++ b/drivers/media/platform/renesas/rcar-vin/Makefile
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
rcar-vin-objs = rcar-core.o rcar-dma.o rcar-v4l2.o
-obj-$(CONFIG_VIDEO_RCAR_CSI2) += rcar-csi2.o
obj-$(CONFIG_VIDEO_RCAR_VIN) += rcar-vin.o
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h
index 811603f18af09..a5a99b004322b 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h
@@ -133,9 +133,6 @@ struct rzg2l_cru_dev {
struct v4l2_pix_format format;
};
-void rzg2l_cru_vclk_unprepare(struct rzg2l_cru_dev *cru);
-int rzg2l_cru_vclk_prepare(struct rzg2l_cru_dev *cru);
-
int rzg2l_cru_start_image_processing(struct rzg2l_cru_dev *cru);
void rzg2l_cru_stop_image_processing(struct rzg2l_cru_dev *cru);
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
index d20f4eff93a42..e68fcdaea207a 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
@@ -108,6 +108,7 @@ struct rzg2l_csi2 {
struct reset_control *presetn;
struct reset_control *cmn_rstb;
struct clk *sysclk;
+ struct clk *vclk;
unsigned long vclk_rate;
struct v4l2_subdev subdev;
@@ -361,7 +362,7 @@ static int rzg2l_csi2_dphy_setting(struct v4l2_subdev *sd, bool on)
return rzg2l_csi2_dphy_disable(csi2);
}
-static void rzg2l_csi2_mipi_link_enable(struct rzg2l_csi2 *csi2)
+static int rzg2l_csi2_mipi_link_enable(struct rzg2l_csi2 *csi2)
{
unsigned long vclk_rate = csi2->vclk_rate / HZ_PER_MHZ;
u32 frrskw, frrclk, frrskw_coeff, frrclk_coeff;
@@ -386,11 +387,15 @@ static void rzg2l_csi2_mipi_link_enable(struct rzg2l_csi2 *csi2)
rzg2l_csi2_write(csi2, CSI2nDTEL, 0xf778ff0f);
rzg2l_csi2_write(csi2, CSI2nDTEH, 0x00ffff1f);
+ clk_disable_unprepare(csi2->vclk);
+
/* Enable LINK reception */
rzg2l_csi2_write(csi2, CSI2nMCT3, CSI2nMCT3_RXEN);
+
+ return clk_prepare_enable(csi2->vclk);
}
-static void rzg2l_csi2_mipi_link_disable(struct rzg2l_csi2 *csi2)
+static int rzg2l_csi2_mipi_link_disable(struct rzg2l_csi2 *csi2)
{
unsigned int timeout = VSRSTS_RETRIES;
@@ -409,18 +414,21 @@ static void rzg2l_csi2_mipi_link_disable(struct rzg2l_csi2 *csi2)
if (!timeout)
dev_err(csi2->dev, "Clearing CSI2nRTST.VSRSTS timed out\n");
+
+ return 0;
}
static int rzg2l_csi2_mipi_link_setting(struct v4l2_subdev *sd, bool on)
{
struct rzg2l_csi2 *csi2 = sd_to_csi2(sd);
+ int ret;
if (on)
- rzg2l_csi2_mipi_link_enable(csi2);
+ ret = rzg2l_csi2_mipi_link_enable(csi2);
else
- rzg2l_csi2_mipi_link_disable(csi2);
+ ret = rzg2l_csi2_mipi_link_disable(csi2);
- return 0;
+ return ret;
}
static int rzg2l_csi2_s_stream(struct v4l2_subdev *sd, int enable)
@@ -731,7 +739,6 @@ static const struct media_entity_operations rzg2l_csi2_entity_ops = {
static int rzg2l_csi2_probe(struct platform_device *pdev)
{
struct rzg2l_csi2 *csi2;
- struct clk *vclk;
int ret;
csi2 = devm_kzalloc(&pdev->dev, sizeof(*csi2), GFP_KERNEL);
@@ -757,12 +764,11 @@ static int rzg2l_csi2_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(csi2->sysclk),
"Failed to get system clk\n");
- vclk = clk_get(&pdev->dev, "video");
- if (IS_ERR(vclk))
- return dev_err_probe(&pdev->dev, PTR_ERR(vclk),
+ csi2->vclk = devm_clk_get(&pdev->dev, "video");
+ if (IS_ERR(csi2->vclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(csi2->vclk),
"Failed to get video clock\n");
- csi2->vclk_rate = clk_get_rate(vclk);
- clk_put(vclk);
+ csi2->vclk_rate = clk_get_rate(csi2->vclk);
csi2->dev = &pdev->dev;
@@ -834,7 +840,7 @@ static void rzg2l_csi2_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
-static int __maybe_unused rzg2l_csi2_pm_runtime_suspend(struct device *dev)
+static int rzg2l_csi2_pm_runtime_suspend(struct device *dev)
{
struct rzg2l_csi2 *csi2 = dev_get_drvdata(dev);
@@ -843,7 +849,7 @@ static int __maybe_unused rzg2l_csi2_pm_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused rzg2l_csi2_pm_runtime_resume(struct device *dev)
+static int rzg2l_csi2_pm_runtime_resume(struct device *dev)
{
struct rzg2l_csi2 *csi2 = dev_get_drvdata(dev);
@@ -851,7 +857,8 @@ static int __maybe_unused rzg2l_csi2_pm_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops rzg2l_csi2_pm_ops = {
- SET_RUNTIME_PM_OPS(rzg2l_csi2_pm_runtime_suspend, rzg2l_csi2_pm_runtime_resume, NULL)
+ RUNTIME_PM_OPS(rzg2l_csi2_pm_runtime_suspend,
+ rzg2l_csi2_pm_runtime_resume, NULL)
};
static const struct of_device_id rzg2l_csi2_of_table[] = {
@@ -865,7 +872,7 @@ static struct platform_driver rzg2l_csi2_pdrv = {
.driver = {
.name = "rzg2l-csi2",
.of_match_table = rzg2l_csi2_of_table,
- .pm = &rzg2l_csi2_pm_ops,
+ .pm = pm_ptr(&rzg2l_csi2_pm_ops),
},
};
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c
index 9f351a05893e6..ac8ebae4ed079 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c
@@ -5,6 +5,7 @@
* Copyright (C) 2022 Renesas Electronics Corp.
*/
+#include <linux/delay.h>
#include "rzg2l-cru.h"
struct rzg2l_cru_ip_format {
@@ -71,26 +72,17 @@ static int rzg2l_cru_ip_s_stream(struct v4l2_subdev *sd, int enable)
if (ret)
return ret;
+ fsleep(1000);
+
ret = rzg2l_cru_start_image_processing(cru);
if (ret) {
v4l2_subdev_call(cru->ip.remote, video, post_streamoff);
return ret;
}
- rzg2l_cru_vclk_unprepare(cru);
-
ret = v4l2_subdev_call(cru->ip.remote, video, s_stream, enable);
- if (ret == -ENOIOCTLCMD)
- ret = 0;
- if (!ret) {
- ret = rzg2l_cru_vclk_prepare(cru);
- if (!ret)
- return 0;
- } else {
- /* enable back vclk so that s_stream in error path disables it */
- if (rzg2l_cru_vclk_prepare(cru))
- dev_err(cru->dev, "Failed to enable vclk\n");
- }
+ if (!ret || ret == -ENOIOCTLCMD)
+ return 0;
s_stream_ret = ret;
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
index d0ffa90bc6567..b16b8af6e8f8c 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
@@ -430,13 +430,6 @@ int rzg2l_cru_start_image_processing(struct rzg2l_cru_dev *cru)
spin_lock_irqsave(&cru->qlock, flags);
- /* Initialize image convert */
- ret = rzg2l_cru_initialize_image_conv(cru, fmt);
- if (ret) {
- spin_unlock_irqrestore(&cru->qlock, flags);
- return ret;
- }
-
/* Select a video input */
rzg2l_cru_write(cru, CRUnCTRL, CRUnCTRL_VINSEL(0));
@@ -450,6 +443,13 @@ int rzg2l_cru_start_image_processing(struct rzg2l_cru_dev *cru)
/* Initialize the AXI master */
rzg2l_cru_initialize_axi(cru);
+ /* Initialize image convert */
+ ret = rzg2l_cru_initialize_image_conv(cru, fmt);
+ if (ret) {
+ spin_unlock_irqrestore(&cru->qlock, flags);
+ return ret;
+ }
+
/* Enable interrupt */
rzg2l_cru_write(cru, CRUnIE, CRUnIE_EFE);
@@ -461,16 +461,6 @@ int rzg2l_cru_start_image_processing(struct rzg2l_cru_dev *cru)
return 0;
}
-void rzg2l_cru_vclk_unprepare(struct rzg2l_cru_dev *cru)
-{
- clk_disable_unprepare(cru->vclk);
-}
-
-int rzg2l_cru_vclk_prepare(struct rzg2l_cru_dev *cru)
-{
- return clk_prepare_enable(cru->vclk);
-}
-
static int rzg2l_cru_set_stream(struct rzg2l_cru_dev *cru, int on)
{
struct media_pipeline *pipe;
@@ -499,39 +489,24 @@ static int rzg2l_cru_set_stream(struct rzg2l_cru_dev *cru, int on)
video_device_pipeline_stop(&cru->vdev);
- pm_runtime_put_sync(cru->dev);
- clk_disable_unprepare(cru->vclk);
-
return stream_off_ret;
}
- ret = pm_runtime_resume_and_get(cru->dev);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(cru->vclk);
- if (ret)
- goto err_pm_put;
-
ret = rzg2l_cru_mc_validate_format(cru, sd, pad);
if (ret)
- goto err_vclk_disable;
+ return ret;
pipe = media_entity_pipeline(&sd->entity) ? : &cru->vdev.pipe;
ret = video_device_pipeline_start(&cru->vdev, pipe);
if (ret)
- goto err_vclk_disable;
+ return ret;
ret = v4l2_subdev_call(sd, video, pre_streamon, 0);
- if (ret == -ENOIOCTLCMD)
- ret = 0;
- if (ret)
+ if (ret && ret != -ENOIOCTLCMD)
goto pipe_line_stop;
ret = v4l2_subdev_call(sd, video, s_stream, 1);
- if (ret == -ENOIOCTLCMD)
- ret = 0;
- if (ret)
+ if (ret && ret != -ENOIOCTLCMD)
goto err_s_stream;
return 0;
@@ -542,12 +517,6 @@ err_s_stream:
pipe_line_stop:
video_device_pipeline_stop(&cru->vdev);
-err_vclk_disable:
- clk_disable_unprepare(cru->vclk);
-
-err_pm_put:
- pm_runtime_put_sync(cru->dev);
-
return ret;
}
@@ -646,25 +615,33 @@ static int rzg2l_cru_start_streaming_vq(struct vb2_queue *vq, unsigned int count
struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vq);
int ret;
+ ret = pm_runtime_resume_and_get(cru->dev);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(cru->vclk);
+ if (ret)
+ goto err_pm_put;
+
/* Release reset state */
ret = reset_control_deassert(cru->aresetn);
if (ret) {
dev_err(cru->dev, "failed to deassert aresetn\n");
- return ret;
+ goto err_vclk_disable;
}
ret = reset_control_deassert(cru->presetn);
if (ret) {
reset_control_assert(cru->aresetn);
dev_err(cru->dev, "failed to deassert presetn\n");
- return ret;
+ goto assert_aresetn;
}
ret = request_irq(cru->image_conv_irq, rzg2l_cru_irq,
IRQF_SHARED, KBUILD_MODNAME, cru);
if (ret) {
dev_err(cru->dev, "failed to request irq\n");
- goto assert_resets;
+ goto assert_presetn;
}
/* Allocate scratch buffer. */
@@ -696,10 +673,18 @@ out:
free_image_conv_irq:
free_irq(cru->image_conv_irq, cru);
-assert_resets:
+assert_presetn:
reset_control_assert(cru->presetn);
+
+assert_aresetn:
reset_control_assert(cru->aresetn);
+err_vclk_disable:
+ clk_disable_unprepare(cru->vclk);
+
+err_pm_put:
+ pm_runtime_put_sync(cru->dev);
+
return ret;
}
@@ -714,9 +699,11 @@ static void rzg2l_cru_stop_streaming_vq(struct vb2_queue *vq)
cru->scratch, cru->scratch_phys);
free_irq(cru->image_conv_irq, cru);
- reset_control_assert(cru->presetn);
-
return_unused_buffers(cru, VB2_BUF_STATE_ERROR);
+
+ reset_control_assert(cru->presetn);
+ clk_disable_unprepare(cru->vclk);
+ pm_runtime_put_sync(cru->dev);
}
static const struct vb2_ops rzg2l_cru_qops = {
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
index c381c22135a21..2bddb4fa8a5cd 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
@@ -47,13 +47,18 @@ enum rkisp1_plane {
* @fourcc: pixel format
* @fmt_type: helper filed for pixel format
* @uv_swap: if cb cr swapped, for yuv
+ * @yc_swap: if y and cb/cr swapped, for yuv
+ * @byte_swap: if byte pairs are swapped, for raw
* @write_format: defines how YCbCr self picture data is written to memory
- * @output_format: defines sp output format
+ * @output_format: defines the output format (RKISP1_CIF_MI_INIT_MP_OUTPUT_* for
+ * the main path and RKISP1_MI_CTRL_SP_OUTPUT_* for the self path)
* @mbus: the mbus code on the src resizer pad that matches the pixel format
*/
struct rkisp1_capture_fmt_cfg {
u32 fourcc;
- u8 uv_swap;
+ u32 uv_swap : 1;
+ u32 yc_swap : 1;
+ u32 byte_swap : 1;
u32 write_format;
u32 output_format;
u32 mbus;
@@ -94,36 +99,50 @@ static const struct rkisp1_capture_fmt_cfg rkisp1_mp_fmts[] = {
.fourcc = V4L2_PIX_FMT_YUYV,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUVINT,
+ .output_format = RKISP1_CIF_MI_INIT_MP_OUTPUT_YUV422,
+ .mbus = MEDIA_BUS_FMT_YUYV8_2X8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .uv_swap = 0,
+ .yc_swap = 1,
+ .write_format = RKISP1_MI_CTRL_MP_WRITE_YUVINT,
+ .output_format = RKISP1_CIF_MI_INIT_MP_OUTPUT_YUV422,
.mbus = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
.fourcc = V4L2_PIX_FMT_YUV422P,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+ .output_format = RKISP1_CIF_MI_INIT_MP_OUTPUT_YUV422,
.mbus = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
.fourcc = V4L2_PIX_FMT_NV16,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
+ .output_format = RKISP1_CIF_MI_INIT_MP_OUTPUT_YUV422,
.mbus = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
.fourcc = V4L2_PIX_FMT_NV61,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
+ .output_format = RKISP1_CIF_MI_INIT_MP_OUTPUT_YUV422,
.mbus = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
.fourcc = V4L2_PIX_FMT_NV16M,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
+ .output_format = RKISP1_CIF_MI_INIT_MP_OUTPUT_YUV422,
.mbus = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
.fourcc = V4L2_PIX_FMT_NV61M,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
+ .output_format = RKISP1_CIF_MI_INIT_MP_OUTPUT_YUV422,
.mbus = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
.fourcc = V4L2_PIX_FMT_YVU422M,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+ .output_format = RKISP1_CIF_MI_INIT_MP_OUTPUT_YUV422,
.mbus = MEDIA_BUS_FMT_YUYV8_2X8,
},
/* yuv400 */
@@ -131,6 +150,7 @@ static const struct rkisp1_capture_fmt_cfg rkisp1_mp_fmts[] = {
.fourcc = V4L2_PIX_FMT_GREY,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+ .output_format = RKISP1_CIF_MI_INIT_MP_OUTPUT_YUV400,
.mbus = MEDIA_BUS_FMT_YUYV8_2X8,
},
/* yuv420 */
@@ -138,81 +158,107 @@ static const struct rkisp1_capture_fmt_cfg rkisp1_mp_fmts[] = {
.fourcc = V4L2_PIX_FMT_NV21,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
+ .output_format = RKISP1_CIF_MI_INIT_MP_OUTPUT_YUV420,
.mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
}, {
.fourcc = V4L2_PIX_FMT_NV12,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
+ .output_format = RKISP1_CIF_MI_INIT_MP_OUTPUT_YUV420,
.mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
}, {
.fourcc = V4L2_PIX_FMT_NV21M,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
+ .output_format = RKISP1_CIF_MI_INIT_MP_OUTPUT_YUV420,
.mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
}, {
.fourcc = V4L2_PIX_FMT_NV12M,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
+ .output_format = RKISP1_CIF_MI_INIT_MP_OUTPUT_YUV420,
.mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
}, {
.fourcc = V4L2_PIX_FMT_YUV420,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+ .output_format = RKISP1_CIF_MI_INIT_MP_OUTPUT_YUV420,
.mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
}, {
.fourcc = V4L2_PIX_FMT_YVU420,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+ .output_format = RKISP1_CIF_MI_INIT_MP_OUTPUT_YUV420,
.mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
},
/* raw */
{
.fourcc = V4L2_PIX_FMT_SRGGB8,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+ .output_format = RKISP1_CIF_MI_INIT_MP_OUTPUT_RAW8,
.mbus = MEDIA_BUS_FMT_SRGGB8_1X8,
}, {
.fourcc = V4L2_PIX_FMT_SGRBG8,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+ .output_format = RKISP1_CIF_MI_INIT_MP_OUTPUT_RAW8,
.mbus = MEDIA_BUS_FMT_SGRBG8_1X8,
}, {
.fourcc = V4L2_PIX_FMT_SGBRG8,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+ .output_format = RKISP1_CIF_MI_INIT_MP_OUTPUT_RAW8,
.mbus = MEDIA_BUS_FMT_SGBRG8_1X8,
}, {
.fourcc = V4L2_PIX_FMT_SBGGR8,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+ .output_format = RKISP1_CIF_MI_INIT_MP_OUTPUT_RAW8,
.mbus = MEDIA_BUS_FMT_SBGGR8_1X8,
}, {
.fourcc = V4L2_PIX_FMT_SRGGB10,
+ .byte_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+ .output_format = RKISP1_CIF_MI_INIT_MP_OUTPUT_RAW10,
.mbus = MEDIA_BUS_FMT_SRGGB10_1X10,
}, {
.fourcc = V4L2_PIX_FMT_SGRBG10,
+ .byte_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+ .output_format = RKISP1_CIF_MI_INIT_MP_OUTPUT_RAW10,
.mbus = MEDIA_BUS_FMT_SGRBG10_1X10,
}, {
.fourcc = V4L2_PIX_FMT_SGBRG10,
+ .byte_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+ .output_format = RKISP1_CIF_MI_INIT_MP_OUTPUT_RAW10,
.mbus = MEDIA_BUS_FMT_SGBRG10_1X10,
}, {
.fourcc = V4L2_PIX_FMT_SBGGR10,
+ .byte_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+ .output_format = RKISP1_CIF_MI_INIT_MP_OUTPUT_RAW10,
.mbus = MEDIA_BUS_FMT_SBGGR10_1X10,
}, {
.fourcc = V4L2_PIX_FMT_SRGGB12,
+ .byte_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+ .output_format = RKISP1_CIF_MI_INIT_MP_OUTPUT_RAW12,
.mbus = MEDIA_BUS_FMT_SRGGB12_1X12,
}, {
.fourcc = V4L2_PIX_FMT_SGRBG12,
+ .byte_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+ .output_format = RKISP1_CIF_MI_INIT_MP_OUTPUT_RAW12,
.mbus = MEDIA_BUS_FMT_SGRBG12_1X12,
}, {
.fourcc = V4L2_PIX_FMT_SGBRG12,
+ .byte_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+ .output_format = RKISP1_CIF_MI_INIT_MP_OUTPUT_RAW12,
.mbus = MEDIA_BUS_FMT_SGBRG12_1X12,
}, {
.fourcc = V4L2_PIX_FMT_SBGGR12,
+ .byte_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+ .output_format = RKISP1_CIF_MI_INIT_MP_OUTPUT_RAW12,
.mbus = MEDIA_BUS_FMT_SBGGR12_1X12,
},
};
@@ -230,6 +276,13 @@ static const struct rkisp1_capture_fmt_cfg rkisp1_sp_fmts[] = {
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
.mbus = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .uv_swap = 0,
+ .yc_swap = 1,
+ .write_format = RKISP1_MI_CTRL_SP_WRITE_INT,
+ .output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
+ .mbus = MEDIA_BUS_FMT_YUYV8_2X8,
+ }, {
.fourcc = V4L2_PIX_FMT_YUV422P,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
@@ -442,6 +495,14 @@ static void rkisp1_mp_config(struct rkisp1_capture *cap)
rkisp1_write(rkisp1, cap->config->mi.cr_size_init,
rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CR));
+ if (rkisp1_has_feature(rkisp1, MAIN_STRIDE)) {
+ rkisp1_write(rkisp1, RKISP1_CIF_MI_MP_Y_LLENGTH, cap->stride);
+ rkisp1_write(rkisp1, RKISP1_CIF_MI_MP_Y_PIC_WIDTH, pixm->width);
+ rkisp1_write(rkisp1, RKISP1_CIF_MI_MP_Y_PIC_HEIGHT, pixm->height);
+ rkisp1_write(rkisp1, RKISP1_CIF_MI_MP_Y_PIC_SIZE,
+ cap->stride * pixm->height);
+ }
+
rkisp1_irq_frame_end_enable(cap);
/* set uv swapping for semiplanar formats */
@@ -454,6 +515,25 @@ static void rkisp1_mp_config(struct rkisp1_capture *cap)
rkisp1_write(rkisp1, RKISP1_CIF_MI_XTD_FORMAT_CTRL, reg);
}
+ /*
+ * U/V swapping with the MI_XTD_FORMAT_CTRL register only works for
+ * NV12/NV21 and NV16/NV61, so instead use byte swap to support UYVY.
+ * YVYU and VYUY cannot be supported with this method.
+ */
+ if (rkisp1_has_feature(rkisp1, MAIN_STRIDE)) {
+ reg = rkisp1_read(rkisp1, RKISP1_CIF_MI_OUTPUT_ALIGN_FORMAT);
+ if (cap->pix.cfg->yc_swap || cap->pix.cfg->byte_swap)
+ reg |= RKISP1_CIF_OUTPUT_ALIGN_FORMAT_MP_BYTE_SWAP_BYTES;
+ else
+ reg &= ~RKISP1_CIF_OUTPUT_ALIGN_FORMAT_MP_BYTE_SWAP_BYTES;
+
+ reg |= RKISP1_CIF_OUTPUT_ALIGN_FORMAT_MP_LSB_ALIGNMENT;
+ rkisp1_write(rkisp1, RKISP1_CIF_MI_OUTPUT_ALIGN_FORMAT, reg);
+
+ rkisp1_write(rkisp1, RKISP1_CIF_MI_INIT,
+ cap->pix.cfg->output_format);
+ }
+
rkisp1_mi_config_ctrl(cap);
reg = rkisp1_read(rkisp1, RKISP1_CIF_MI_CTRL);
@@ -479,11 +559,11 @@ static void rkisp1_sp_config(struct rkisp1_capture *cap)
rkisp1_write(rkisp1, cap->config->mi.cr_size_init,
rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CR));
- rkisp1_write(rkisp1, RKISP1_CIF_MI_SP_Y_LLENGTH, cap->sp_y_stride);
+ rkisp1_write(rkisp1, RKISP1_CIF_MI_SP_Y_LLENGTH, cap->stride);
rkisp1_write(rkisp1, RKISP1_CIF_MI_SP_Y_PIC_WIDTH, pixm->width);
rkisp1_write(rkisp1, RKISP1_CIF_MI_SP_Y_PIC_HEIGHT, pixm->height);
rkisp1_write(rkisp1, RKISP1_CIF_MI_SP_Y_PIC_SIZE,
- cap->sp_y_stride * pixm->height);
+ cap->stride * pixm->height);
rkisp1_irq_frame_end_enable(cap);
@@ -497,6 +577,20 @@ static void rkisp1_sp_config(struct rkisp1_capture *cap)
rkisp1_write(rkisp1, RKISP1_CIF_MI_XTD_FORMAT_CTRL, reg);
}
+ /*
+ * U/V swapping with the MI_XTD_FORMAT_CTRL register only works for
+ * NV12/NV21 and NV16/NV61, so instead use byte swap to support UYVY.
+ * YVYU and VYUY cannot be supported with this method.
+ */
+ if (rkisp1_has_feature(rkisp1, MAIN_STRIDE)) {
+ reg = rkisp1_read(rkisp1, RKISP1_CIF_MI_OUTPUT_ALIGN_FORMAT);
+ if (cap->pix.cfg->yc_swap)
+ reg |= RKISP1_CIF_OUTPUT_ALIGN_FORMAT_SP_BYTE_SWAP_BYTES;
+ else
+ reg &= ~RKISP1_CIF_OUTPUT_ALIGN_FORMAT_SP_BYTE_SWAP_BYTES;
+ rkisp1_write(rkisp1, RKISP1_CIF_MI_OUTPUT_ALIGN_FORMAT, reg);
+ }
+
rkisp1_mi_config_ctrl(cap);
mi_ctrl = rkisp1_read(rkisp1, RKISP1_CIF_MI_CTRL);
@@ -640,11 +734,13 @@ static void rkisp1_dummy_buf_destroy(struct rkisp1_capture *cap)
static void rkisp1_set_next_buf(struct rkisp1_capture *cap)
{
+ u8 shift = rkisp1_has_feature(cap->rkisp1, DMA_34BIT) ? 2 : 0;
+
cap->buf.curr = cap->buf.next;
cap->buf.next = NULL;
if (!list_empty(&cap->buf.queue)) {
- u32 *buff_addr;
+ dma_addr_t *buff_addr;
cap->buf.next = list_first_entry(&cap->buf.queue, struct rkisp1_buffer, queue);
list_del(&cap->buf.next->queue);
@@ -652,7 +748,7 @@ static void rkisp1_set_next_buf(struct rkisp1_capture *cap)
buff_addr = cap->buf.next->buff_addr;
rkisp1_write(cap->rkisp1, cap->config->mi.y_base_ad_init,
- buff_addr[RKISP1_PLANE_Y]);
+ buff_addr[RKISP1_PLANE_Y] >> shift);
/*
* In order to support grey format we capture
* YUV422 planar format from the camera and
@@ -661,17 +757,17 @@ static void rkisp1_set_next_buf(struct rkisp1_capture *cap)
if (cap->pix.cfg->fourcc == V4L2_PIX_FMT_GREY) {
rkisp1_write(cap->rkisp1,
cap->config->mi.cb_base_ad_init,
- cap->buf.dummy.dma_addr);
+ cap->buf.dummy.dma_addr >> shift);
rkisp1_write(cap->rkisp1,
cap->config->mi.cr_base_ad_init,
- cap->buf.dummy.dma_addr);
+ cap->buf.dummy.dma_addr >> shift);
} else {
rkisp1_write(cap->rkisp1,
cap->config->mi.cb_base_ad_init,
- buff_addr[RKISP1_PLANE_CB]);
+ buff_addr[RKISP1_PLANE_CB] >> shift);
rkisp1_write(cap->rkisp1,
cap->config->mi.cr_base_ad_init,
- buff_addr[RKISP1_PLANE_CR]);
+ buff_addr[RKISP1_PLANE_CR] >> shift);
}
} else {
/*
@@ -679,11 +775,11 @@ static void rkisp1_set_next_buf(struct rkisp1_capture *cap)
* throw data if there is no available buffer.
*/
rkisp1_write(cap->rkisp1, cap->config->mi.y_base_ad_init,
- cap->buf.dummy.dma_addr);
+ cap->buf.dummy.dma_addr >> shift);
rkisp1_write(cap->rkisp1, cap->config->mi.cb_base_ad_init,
- cap->buf.dummy.dma_addr);
+ cap->buf.dummy.dma_addr >> shift);
rkisp1_write(cap->rkisp1, cap->config->mi.cr_base_ad_init,
- cap->buf.dummy.dma_addr);
+ cap->buf.dummy.dma_addr >> shift);
}
/* Set plane offsets */
@@ -722,6 +818,7 @@ irqreturn_t rkisp1_capture_isr(int irq, void *ctx)
{
struct device *dev = ctx;
struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
+ unsigned int dev_count = rkisp1_path_count(rkisp1);
unsigned int i;
u32 status;
@@ -734,7 +831,7 @@ irqreturn_t rkisp1_capture_isr(int irq, void *ctx)
rkisp1_write(rkisp1, RKISP1_CIF_MI_ICR, status);
- for (i = 0; i < ARRAY_SIZE(rkisp1->capture_devs); ++i) {
+ for (i = 0; i < dev_count; ++i) {
struct rkisp1_capture *cap = &rkisp1->capture_devs[i];
if (!(status & RKISP1_CIF_MI_FRAME(cap)))
@@ -891,6 +988,7 @@ static void rkisp1_cap_stream_enable(struct rkisp1_capture *cap)
{
struct rkisp1_device *rkisp1 = cap->rkisp1;
struct rkisp1_capture *other = &rkisp1->capture_devs[cap->id ^ 1];
+ bool has_self_path = rkisp1_has_feature(rkisp1, SELF_PATH);
cap->ops->set_data_path(cap);
cap->ops->config(cap);
@@ -899,19 +997,40 @@ static void rkisp1_cap_stream_enable(struct rkisp1_capture *cap)
spin_lock_irq(&cap->buf.lock);
rkisp1_set_next_buf(cap);
cap->ops->enable(cap);
- /* It's safe to configure ACTIVE and SHADOW registers for the
- * first stream. While when the second is starting, do NOT
- * force update because it also updates the first one.
+
+ /*
+ * It's safe to configure ACTIVE and SHADOW registers for the first
+ * stream. While when the second is starting, do NOT force update
+ * because it also updates the first one.
*
- * The latter case would drop one more buffer(that is 2) since
- * there's no buffer in a shadow register when the second FE received.
- * This's also required because the second FE maybe corrupt
- * especially when run at 120fps.
+ * The latter case would drop one more buffer(that is 2) since there's
+ * no buffer in a shadow register when the second FE received. This's
+ * also required because the second FE maybe corrupt especially when
+ * run at 120fps.
*/
- if (!other->is_streaming) {
- /* force cfg update */
- rkisp1_write(rkisp1, RKISP1_CIF_MI_INIT,
- RKISP1_CIF_MI_INIT_SOFT_UPD);
+ if (!has_self_path || !other->is_streaming) {
+ u32 reg;
+
+ /*
+ * Force cfg update.
+ *
+ * The ISP8000 (implementing the MAIN_STRIDE feature) as a
+ * mp_output_format field in the CIF_MI_INIT register that must
+ * be preserved. It can be read back, but it is not clear what
+ * other register bits will return. Mask them out.
+ *
+ * On Rockchip platforms, the CIF_MI_INIT register is marked as
+ * write-only and reads as zeros. We can skip reading it.
+ */
+ if (rkisp1_has_feature(rkisp1, MAIN_STRIDE))
+ reg = rkisp1_read(rkisp1, RKISP1_CIF_MI_INIT)
+ & RKISP1_CIF_MI_INIT_MP_OUTPUT_MASK;
+ else
+ reg = 0;
+
+ reg |= RKISP1_CIF_MI_INIT_SOFT_UPD;
+ rkisp1_write(rkisp1, RKISP1_CIF_MI_INIT, reg);
+
rkisp1_set_next_buf(cap);
}
spin_unlock_irq(&cap->buf.lock);
@@ -1095,8 +1214,8 @@ static const struct vb2_ops rkisp1_vb2_ops = {
*/
static const struct v4l2_format_info *
-rkisp1_fill_pixfmt(struct v4l2_pix_format_mplane *pixm,
- enum rkisp1_stream_id id)
+rkisp1_fill_pixfmt(const struct rkisp1_capture *cap,
+ struct v4l2_pix_format_mplane *pixm)
{
struct v4l2_plane_pix_format *plane_y = &pixm->plane_fmt[0];
const struct v4l2_format_info *info;
@@ -1109,10 +1228,13 @@ rkisp1_fill_pixfmt(struct v4l2_pix_format_mplane *pixm,
/*
* The SP supports custom strides, expressed as a number of pixels for
- * the Y plane. Clamp the stride to a reasonable value to avoid integer
- * overflows when calculating the bytesperline and sizeimage values.
+ * the Y plane, and so does the MP in ISP versions that have the
+ * MAIN_STRIDE feature. Clamp the stride to a reasonable value to avoid
+ * integer overflows when calculating the bytesperline and sizeimage
+ * values.
*/
- if (id == RKISP1_SELFPATH)
+ if (cap->id == RKISP1_SELFPATH ||
+ rkisp1_has_feature(cap->rkisp1, MAIN_STRIDE))
stride = clamp(DIV_ROUND_UP(plane_y->bytesperline, info->bpp[0]),
pixm->width, 65536U);
else
@@ -1147,10 +1269,14 @@ rkisp1_fill_pixfmt(struct v4l2_pix_format_mplane *pixm,
static const struct rkisp1_capture_fmt_cfg *
rkisp1_find_fmt_cfg(const struct rkisp1_capture *cap, const u32 pixelfmt)
{
+ bool yc_swap_support = rkisp1_has_feature(cap->rkisp1, MAIN_STRIDE);
unsigned int i;
for (i = 0; i < cap->config->fmt_size; i++) {
- if (cap->config->fmts[i].fourcc == pixelfmt)
+ const struct rkisp1_capture_fmt_cfg *fmt = &cap->config->fmts[i];
+
+ if (fmt->fourcc == pixelfmt &&
+ (!fmt->yc_swap || yc_swap_support))
return &cap->config->fmts[i];
}
return NULL;
@@ -1187,7 +1313,7 @@ static void rkisp1_try_fmt(const struct rkisp1_capture *cap,
pixm->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
pixm->quantization = V4L2_QUANTIZATION_DEFAULT;
- info = rkisp1_fill_pixfmt(pixm, cap->id);
+ info = rkisp1_fill_pixfmt(cap, pixm);
if (fmt_cfg)
*fmt_cfg = fmt;
@@ -1199,12 +1325,9 @@ static void rkisp1_set_fmt(struct rkisp1_capture *cap,
struct v4l2_pix_format_mplane *pixm)
{
rkisp1_try_fmt(cap, pixm, &cap->pix.cfg, &cap->pix.info);
- cap->pix.fmt = *pixm;
- /* SP supports custom stride in number of pixels of the Y plane */
- if (cap->id == RKISP1_SELFPATH)
- cap->sp_y_stride = pixm->plane_fmt[0].bytesperline /
- cap->pix.info->bpp[0];
+ cap->pix.fmt = *pixm;
+ cap->stride = pixm->plane_fmt[0].bytesperline / cap->pix.info->bpp[0];
}
static int rkisp1_try_fmt_vid_cap_mplane(struct file *file, void *fh,
@@ -1222,23 +1345,29 @@ static int rkisp1_enum_fmt_vid_cap_mplane(struct file *file, void *priv,
{
struct rkisp1_capture *cap = video_drvdata(file);
const struct rkisp1_capture_fmt_cfg *fmt = NULL;
+ bool yc_swap_support = rkisp1_has_feature(cap->rkisp1, MAIN_STRIDE);
unsigned int i, n = 0;
- if (!f->mbus_code) {
- if (f->index >= cap->config->fmt_size)
- return -EINVAL;
+ if (f->index >= cap->config->fmt_size)
+ return -EINVAL;
+ if (!f->mbus_code && yc_swap_support) {
fmt = &cap->config->fmts[f->index];
f->pixelformat = fmt->fourcc;
return 0;
}
for (i = 0; i < cap->config->fmt_size; i++) {
- if (cap->config->fmts[i].mbus != f->mbus_code)
+ fmt = &cap->config->fmts[i];
+
+ if (f->mbus_code && fmt->mbus != f->mbus_code)
+ continue;
+
+ if (!yc_swap_support && fmt->yc_swap)
continue;
if (n++ == f->index) {
- f->pixelformat = cap->config->fmts[i].fourcc;
+ f->pixelformat = fmt->fourcc;
return 0;
}
}
@@ -1501,10 +1630,11 @@ rkisp1_capture_init(struct rkisp1_device *rkisp1, enum rkisp1_stream_id id)
int rkisp1_capture_devs_register(struct rkisp1_device *rkisp1)
{
+ unsigned int dev_count = rkisp1_path_count(rkisp1);
unsigned int i;
int ret;
- for (i = 0; i < ARRAY_SIZE(rkisp1->capture_devs); i++) {
+ for (i = 0; i < dev_count; i++) {
struct rkisp1_capture *cap = &rkisp1->capture_devs[i];
rkisp1_capture_init(rkisp1, i);
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
index b757f75edecf7..26573f6ae5755 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
@@ -24,6 +24,7 @@
#include "rkisp1-regs.h"
struct dentry;
+struct regmap;
/*
* flags on the 'direction' field in struct rkisp1_mbus_info' that indicate
@@ -110,6 +111,10 @@ enum rkisp1_isp_pad {
* enum rkisp1_feature - ISP features
*
* @RKISP1_FEATURE_MIPI_CSI2: The ISP has an internal MIPI CSI-2 receiver
+ * @RKISP1_FEATURE_MAIN_STRIDE: The ISP supports configurable stride on the main path
+ * @RKISP1_FEATURE_SELF_PATH: The ISP has a self path
+ * @RKISP1_FEATURE_DUAL_CROP: The ISP has the dual crop block at the resizer input
+ * @RKISP1_FEATURE_DMA_34BIT: The ISP uses 34-bit DMA addresses
*
* The ISP features are stored in a bitmask in &rkisp1_info.features and allow
* the driver to implement support for features present in some ISP versions
@@ -117,8 +122,15 @@ enum rkisp1_isp_pad {
*/
enum rkisp1_feature {
RKISP1_FEATURE_MIPI_CSI2 = BIT(0),
+ RKISP1_FEATURE_MAIN_STRIDE = BIT(1),
+ RKISP1_FEATURE_SELF_PATH = BIT(2),
+ RKISP1_FEATURE_DUAL_CROP = BIT(3),
+ RKISP1_FEATURE_DMA_34BIT = BIT(4),
};
+#define rkisp1_has_feature(rkisp1, feature) \
+ ((rkisp1)->info->features & RKISP1_FEATURE_##feature)
+
/*
* struct rkisp1_info - Model-specific ISP Information
*
@@ -229,7 +241,7 @@ struct rkisp1_vdev_node {
struct rkisp1_buffer {
struct vb2_v4l2_buffer vb;
struct list_head queue;
- u32 buff_addr[VIDEO_MAX_PLANES];
+ dma_addr_t buff_addr[VIDEO_MAX_PLANES];
};
/*
@@ -263,7 +275,7 @@ struct rkisp1_device;
* handler to stop the streaming by waiting on the 'done' wait queue.
* If the irq handler is not called, the stream is stopped by the callback
* after timeout.
- * @sp_y_stride: the selfpath allows to configure a y stride that is longer than the image width.
+ * @stride: the line stride for the first plane, in pixel units
* @buf.lock: lock to protect buf.queue
* @buf.queue: queued buffer list
* @buf.dummy: dummy space to store dropped data
@@ -284,7 +296,7 @@ struct rkisp1_capture {
bool is_streaming;
bool is_stopping;
wait_queue_head_t done;
- unsigned int sp_y_stride;
+ unsigned int stride;
struct {
/* protects queue, curr and next */
spinlock_t lock;
@@ -435,6 +447,8 @@ struct rkisp1_debug {
* @dev: a pointer to the struct device
* @clk_size: number of clocks
* @clks: array of clocks
+ * @gasket: the gasket - i.MX8MP only
+ * @gasket_id: the gasket ID (0 or 1) - i.MX8MP only
* @v4l2_dev: v4l2_device variable
* @media_dev: media_device variable
* @notifier: a notifier to register on the v4l2-async API to be notified on the sensor
@@ -457,6 +471,8 @@ struct rkisp1_device {
struct device *dev;
unsigned int clk_size;
struct clk_bulk_data clks[RKISP1_MAX_BUS_CLK];
+ struct regmap *gasket;
+ unsigned int gasket_id;
struct v4l2_device v4l2_dev;
struct media_device media_dev;
struct v4l2_async_notifier notifier;
@@ -527,6 +543,19 @@ int rkisp1_cap_enum_mbus_codes(struct rkisp1_capture *cap,
const struct rkisp1_mbus_info *rkisp1_mbus_info_get_by_index(unsigned int index);
/*
+ * rkisp1_path_count - Return the number of paths supported by the device
+ *
+ * Some devices only have a main path, while other device have both a main path
+ * and a self path. This function returns the number of paths that this device
+ * has, based on the feature flags. It should be used insted of checking
+ * ARRAY_SIZE of capture_devs/resizer_devs.
+ */
+static inline unsigned int rkisp1_path_count(struct rkisp1_device *rkisp1)
+{
+ return rkisp1_has_feature(rkisp1, SELF_PATH) ? 2 : 1;
+}
+
+/*
* rkisp1_sd_adjust_crop_rect - adjust a rectangle to fit into another rectangle.
*
* @crop: rectangle to adjust.
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
index 73cf08a740118..bb0202386c701 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
@@ -207,7 +208,7 @@ static int rkisp1_subdev_notifier_register(struct rkisp1_device *rkisp1)
switch (reg) {
case 0:
/* MIPI CSI-2 port */
- if (!(rkisp1->info->features & RKISP1_FEATURE_MIPI_CSI2)) {
+ if (!rkisp1_has_feature(rkisp1, MIPI_CSI2)) {
dev_err(rkisp1->dev,
"internal CSI must be available for port 0\n");
ret = -EINVAL;
@@ -358,10 +359,11 @@ static const struct dev_pm_ops rkisp1_pm_ops = {
static int rkisp1_create_links(struct rkisp1_device *rkisp1)
{
+ unsigned int dev_count = rkisp1_path_count(rkisp1);
unsigned int i;
int ret;
- if (rkisp1->info->features & RKISP1_FEATURE_MIPI_CSI2) {
+ if (rkisp1_has_feature(rkisp1, MIPI_CSI2)) {
/* Link the CSI receiver to the ISP. */
ret = media_create_pad_link(&rkisp1->csi.sd.entity,
RKISP1_CSI_PAD_SRC,
@@ -373,7 +375,7 @@ static int rkisp1_create_links(struct rkisp1_device *rkisp1)
}
/* create ISP->RSZ->CAP links */
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < dev_count; i++) {
struct media_entity *resizer =
&rkisp1->resizer_devs[i].sd.entity;
struct media_entity *capture =
@@ -413,7 +415,7 @@ static int rkisp1_create_links(struct rkisp1_device *rkisp1)
static void rkisp1_entities_unregister(struct rkisp1_device *rkisp1)
{
- if (rkisp1->info->features & RKISP1_FEATURE_MIPI_CSI2)
+ if (rkisp1_has_feature(rkisp1, MIPI_CSI2))
rkisp1_csi_unregister(rkisp1);
rkisp1_params_unregister(rkisp1);
rkisp1_stats_unregister(rkisp1);
@@ -446,7 +448,7 @@ static int rkisp1_entities_register(struct rkisp1_device *rkisp1)
if (ret)
goto error;
- if (rkisp1->info->features & RKISP1_FEATURE_MIPI_CSI2) {
+ if (rkisp1_has_feature(rkisp1, MIPI_CSI2)) {
ret = rkisp1_csi_register(rkisp1);
if (ret)
goto error;
@@ -505,7 +507,9 @@ static const struct rkisp1_info px30_isp_info = {
.isrs = px30_isp_isrs,
.isr_size = ARRAY_SIZE(px30_isp_isrs),
.isp_ver = RKISP1_V12,
- .features = RKISP1_FEATURE_MIPI_CSI2,
+ .features = RKISP1_FEATURE_MIPI_CSI2
+ | RKISP1_FEATURE_SELF_PATH
+ | RKISP1_FEATURE_DUAL_CROP,
};
static const char * const rk3399_isp_clks[] = {
@@ -524,7 +528,29 @@ static const struct rkisp1_info rk3399_isp_info = {
.isrs = rk3399_isp_isrs,
.isr_size = ARRAY_SIZE(rk3399_isp_isrs),
.isp_ver = RKISP1_V10,
- .features = RKISP1_FEATURE_MIPI_CSI2,
+ .features = RKISP1_FEATURE_MIPI_CSI2
+ | RKISP1_FEATURE_SELF_PATH
+ | RKISP1_FEATURE_DUAL_CROP,
+};
+
+static const char * const imx8mp_isp_clks[] = {
+ "isp",
+ "hclk",
+ "aclk",
+};
+
+static const struct rkisp1_isr_data imx8mp_isp_isrs[] = {
+ { NULL, rkisp1_isr, BIT(RKISP1_IRQ_ISP) | BIT(RKISP1_IRQ_MI) },
+};
+
+static const struct rkisp1_info imx8mp_isp_info = {
+ .clks = imx8mp_isp_clks,
+ .clk_size = ARRAY_SIZE(imx8mp_isp_clks),
+ .isrs = imx8mp_isp_isrs,
+ .isr_size = ARRAY_SIZE(imx8mp_isp_isrs),
+ .isp_ver = RKISP1_V_IMX8MP,
+ .features = RKISP1_FEATURE_MAIN_STRIDE
+ | RKISP1_FEATURE_DMA_34BIT,
};
static const struct of_device_id rkisp1_of_match[] = {
@@ -536,6 +562,10 @@ static const struct of_device_id rkisp1_of_match[] = {
.compatible = "rockchip,rk3399-cif-isp",
.data = &rk3399_isp_info,
},
+ {
+ .compatible = "fsl,imx8mp-isp",
+ .data = &imx8mp_isp_info,
+ },
{},
};
MODULE_DEVICE_TABLE(of, rkisp1_of_match);
@@ -547,6 +577,7 @@ static int rkisp1_probe(struct platform_device *pdev)
struct rkisp1_device *rkisp1;
struct v4l2_device *v4l2_dev;
unsigned int i;
+ u64 dma_mask;
int ret, irq;
u32 cif_id;
@@ -560,6 +591,13 @@ static int rkisp1_probe(struct platform_device *pdev)
dev_set_drvdata(dev, rkisp1);
rkisp1->dev = dev;
+ dma_mask = rkisp1_has_feature(rkisp1, DMA_34BIT) ? DMA_BIT_MASK(34) :
+ DMA_BIT_MASK(32);
+
+ ret = dma_set_mask_and_coherent(dev, dma_mask);
+ if (ret)
+ return ret;
+
mutex_init(&rkisp1->stream_lock);
rkisp1->base_addr = devm_platform_ioremap_resource(pdev, 0);
@@ -596,6 +634,21 @@ static int rkisp1_probe(struct platform_device *pdev)
return ret;
rkisp1->clk_size = info->clk_size;
+ if (info->isp_ver == RKISP1_V_IMX8MP) {
+ unsigned int id;
+
+ rkisp1->gasket = syscon_regmap_lookup_by_phandle_args(dev->of_node,
+ "fsl,blk-ctrl",
+ 1, &id);
+ if (IS_ERR(rkisp1->gasket)) {
+ ret = PTR_ERR(rkisp1->gasket);
+ dev_err(dev, "failed to get gasket: %d\n", ret);
+ return ret;
+ }
+
+ rkisp1->gasket_id = id;
+ }
+
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_resume_and_get(&pdev->dev);
@@ -650,7 +703,7 @@ static int rkisp1_probe(struct platform_device *pdev)
err_unreg_entities:
rkisp1_entities_unregister(rkisp1);
err_cleanup_csi:
- if (rkisp1->info->features & RKISP1_FEATURE_MIPI_CSI2)
+ if (rkisp1_has_feature(rkisp1, MIPI_CSI2))
rkisp1_csi_cleanup(rkisp1);
err_unreg_media_dev:
media_device_unregister(&rkisp1->media_dev);
@@ -671,7 +724,7 @@ static void rkisp1_remove(struct platform_device *pdev)
v4l2_async_nf_cleanup(&rkisp1->notifier);
rkisp1_entities_unregister(rkisp1);
- if (rkisp1->info->features & RKISP1_FEATURE_MIPI_CSI2)
+ if (rkisp1_has_feature(rkisp1, MIPI_CSI2))
rkisp1_csi_cleanup(rkisp1);
rkisp1_debug_cleanup(rkisp1);
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
index 78a1f7a1499be..e45a213baf497 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
@@ -10,6 +10,7 @@
#include <linux/iopoll.h>
#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
@@ -53,6 +54,115 @@
* +---------------------------------------------------------+
*/
+/* -----------------------------------------------------------------------------
+ * Media block control (i.MX8MP only)
+ */
+
+#define ISP_DEWARP_CONTROL 0x0138
+
+#define ISP_DEWARP_CONTROL_MIPI_CSI2_HS_POLARITY BIT(22)
+#define ISP_DEWARP_CONTROL_MIPI_CSI2_VS_SEL_RISING (0 << 20)
+#define ISP_DEWARP_CONTROL_MIPI_CSI2_VS_SEL_NEGATIVE (1 << 20)
+#define ISP_DEWARP_CONTROL_MIPI_CSI2_VS_SEL_POSITIVE (2 << 20)
+#define ISP_DEWARP_CONTROL_MIPI_CSI2_VS_SEL_FALLING (3 << 20)
+#define ISP_DEWARP_CONTROL_MIPI_CSI2_VS_SEL_MASK GENMASK(21, 20)
+#define ISP_DEWARP_CONTROL_MIPI_ISP2_LEFT_JUST_MODE BIT(19)
+#define ISP_DEWARP_CONTROL_MIPI_ISP2_DATA_TYPE(dt) ((dt) << 13)
+#define ISP_DEWARP_CONTROL_MIPI_ISP2_DATA_TYPE_MASK GENMASK(18, 13)
+
+#define ISP_DEWARP_CONTROL_MIPI_CSI1_HS_POLARITY BIT(12)
+#define ISP_DEWARP_CONTROL_MIPI_CSI1_VS_SEL_RISING (0 << 10)
+#define ISP_DEWARP_CONTROL_MIPI_CSI1_VS_SEL_NEGATIVE (1 << 10)
+#define ISP_DEWARP_CONTROL_MIPI_CSI1_VS_SEL_POSITIVE (2 << 10)
+#define ISP_DEWARP_CONTROL_MIPI_CSI1_VS_SEL_FALLING (3 << 10)
+#define ISP_DEWARP_CONTROL_MIPI_CSI1_VS_SEL_MASK GENMASK(11, 10)
+#define ISP_DEWARP_CONTROL_MIPI_ISP1_LEFT_JUST_MODE BIT(9)
+#define ISP_DEWARP_CONTROL_MIPI_ISP1_DATA_TYPE(dt) ((dt) << 3)
+#define ISP_DEWARP_CONTROL_MIPI_ISP1_DATA_TYPE_MASK GENMASK(8, 3)
+
+#define ISP_DEWARP_CONTROL_GPR_ISP_1_DISABLE BIT(1)
+#define ISP_DEWARP_CONTROL_GPR_ISP_0_DISABLE BIT(0)
+
+static int rkisp1_gasket_enable(struct rkisp1_device *rkisp1,
+ struct media_pad *source)
+{
+ struct v4l2_subdev *source_sd;
+ struct v4l2_mbus_frame_desc fd;
+ unsigned int dt;
+ u32 mask;
+ u32 val;
+ int ret;
+
+ /*
+ * Configure and enable the gasket with the CSI-2 data type. Set the
+ * vsync polarity as active high, as that is what the ISP is configured
+ * to expect in ISP_ACQ_PROP. Enable left justification, as the i.MX8MP
+ * ISP has a 16-bit wide input and expects data to be left-aligned.
+ */
+
+ source_sd = media_entity_to_v4l2_subdev(source->entity);
+ ret = v4l2_subdev_call(source_sd, pad, get_frame_desc,
+ source->index, &fd);
+ if (ret) {
+ dev_err(rkisp1->dev,
+ "failed to get frame descriptor from '%s':%u: %d\n",
+ source_sd->name, 0, ret);
+ return ret;
+ }
+
+ if (fd.num_entries != 1) {
+ dev_err(rkisp1->dev, "invalid frame descriptor for '%s':%u\n",
+ source_sd->name, 0);
+ return -EINVAL;
+ }
+
+ dt = fd.entry[0].bus.csi2.dt;
+
+ if (rkisp1->gasket_id == 0) {
+ mask = ISP_DEWARP_CONTROL_MIPI_CSI1_HS_POLARITY
+ | ISP_DEWARP_CONTROL_MIPI_CSI1_VS_SEL_MASK
+ | ISP_DEWARP_CONTROL_MIPI_ISP1_LEFT_JUST_MODE
+ | ISP_DEWARP_CONTROL_MIPI_ISP1_DATA_TYPE_MASK
+ | ISP_DEWARP_CONTROL_GPR_ISP_0_DISABLE;
+ val = ISP_DEWARP_CONTROL_MIPI_CSI1_VS_SEL_POSITIVE
+ | ISP_DEWARP_CONTROL_MIPI_ISP1_LEFT_JUST_MODE
+ | ISP_DEWARP_CONTROL_MIPI_ISP1_DATA_TYPE(dt);
+ } else {
+ mask = ISP_DEWARP_CONTROL_MIPI_CSI2_HS_POLARITY
+ | ISP_DEWARP_CONTROL_MIPI_CSI2_VS_SEL_MASK
+ | ISP_DEWARP_CONTROL_MIPI_ISP2_LEFT_JUST_MODE
+ | ISP_DEWARP_CONTROL_MIPI_ISP2_DATA_TYPE_MASK
+ | ISP_DEWARP_CONTROL_GPR_ISP_1_DISABLE;
+ val = ISP_DEWARP_CONTROL_MIPI_CSI2_VS_SEL_POSITIVE
+ | ISP_DEWARP_CONTROL_MIPI_ISP2_LEFT_JUST_MODE
+ | ISP_DEWARP_CONTROL_MIPI_ISP2_DATA_TYPE(dt);
+ }
+
+ regmap_update_bits(rkisp1->gasket, ISP_DEWARP_CONTROL, mask, val);
+
+ return 0;
+}
+
+static void rkisp1_gasket_disable(struct rkisp1_device *rkisp1)
+{
+ u32 mask;
+ u32 val;
+
+ if (rkisp1->gasket_id == 1) {
+ mask = ISP_DEWARP_CONTROL_MIPI_ISP2_LEFT_JUST_MODE
+ | ISP_DEWARP_CONTROL_MIPI_ISP2_DATA_TYPE_MASK
+ | ISP_DEWARP_CONTROL_GPR_ISP_1_DISABLE;
+ val = ISP_DEWARP_CONTROL_GPR_ISP_1_DISABLE;
+ } else {
+ mask = ISP_DEWARP_CONTROL_MIPI_ISP1_LEFT_JUST_MODE
+ | ISP_DEWARP_CONTROL_MIPI_ISP1_DATA_TYPE_MASK
+ | ISP_DEWARP_CONTROL_GPR_ISP_0_DISABLE;
+ val = ISP_DEWARP_CONTROL_GPR_ISP_0_DISABLE;
+ }
+
+ regmap_update_bits(rkisp1->gasket, ISP_DEWARP_CONTROL, mask, val);
+}
+
/* ----------------------------------------------------------------------------
* Camera Interface registers configurations
*/
@@ -291,6 +401,9 @@ static void rkisp1_isp_stop(struct rkisp1_isp *isp)
RKISP1_CIF_VI_IRCL_MIPI_SW_RST |
RKISP1_CIF_VI_IRCL_ISP_SW_RST);
rkisp1_write(rkisp1, RKISP1_CIF_VI_IRCL, 0x0);
+
+ if (rkisp1->info->isp_ver == RKISP1_V_IMX8MP)
+ rkisp1_gasket_disable(rkisp1);
}
static void rkisp1_config_clk(struct rkisp1_isp *isp)
@@ -315,16 +428,24 @@ static void rkisp1_config_clk(struct rkisp1_isp *isp)
}
}
-static void rkisp1_isp_start(struct rkisp1_isp *isp,
- struct v4l2_subdev_state *sd_state)
+static int rkisp1_isp_start(struct rkisp1_isp *isp,
+ struct v4l2_subdev_state *sd_state,
+ struct media_pad *source)
{
struct rkisp1_device *rkisp1 = isp->rkisp1;
const struct v4l2_mbus_framefmt *src_fmt;
const struct rkisp1_mbus_info *src_info;
u32 val;
+ int ret;
rkisp1_config_clk(isp);
+ if (rkisp1->info->isp_ver == RKISP1_V_IMX8MP) {
+ ret = rkisp1_gasket_enable(rkisp1, source);
+ if (ret)
+ return ret;
+ }
+
/* Activate ISP */
val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_CTRL);
val |= RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD |
@@ -338,6 +459,8 @@ static void rkisp1_isp_start(struct rkisp1_isp *isp,
if (src_info->pixel_enc != V4L2_PIXEL_ENC_BAYER)
rkisp1_params_post_configure(&rkisp1->params);
+
+ return 0;
}
/* ----------------------------------------------------------------------------
@@ -848,7 +971,9 @@ static int rkisp1_isp_s_stream(struct v4l2_subdev *sd, int enable)
if (ret)
goto out_unlock;
- rkisp1_isp_start(isp, sd_state);
+ ret = rkisp1_isp_start(isp, sd_state, source_pad);
+ if (ret)
+ goto out_unlock;
ret = v4l2_subdev_call(rkisp1->source, video, s_stream, true);
if (ret) {
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
index bea69a0d766ac..fccf4c17ee8d0 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
@@ -144,6 +144,15 @@
/* MI_INIT */
#define RKISP1_CIF_MI_INIT_SKIP BIT(2)
#define RKISP1_CIF_MI_INIT_SOFT_UPD BIT(4)
+#define RKISP1_CIF_MI_INIT_MP_OUTPUT_YUV400 (0 << 5)
+#define RKISP1_CIF_MI_INIT_MP_OUTPUT_YUV420 (1 << 5)
+#define RKISP1_CIF_MI_INIT_MP_OUTPUT_YUV422 (2 << 5)
+#define RKISP1_CIF_MI_INIT_MP_OUTPUT_YUV444 (3 << 5)
+#define RKISP1_CIF_MI_INIT_MP_OUTPUT_RAW12 (4 << 5)
+#define RKISP1_CIF_MI_INIT_MP_OUTPUT_RAW8 (5 << 5)
+#define RKISP1_CIF_MI_INIT_MP_OUTPUT_JPEG (6 << 5)
+#define RKISP1_CIF_MI_INIT_MP_OUTPUT_RAW10 (7 << 5)
+#define RKISP1_CIF_MI_INIT_MP_OUTPUT_MASK (15 << 5)
/* MI_CTRL_SHD */
#define RKISP1_CIF_MI_CTRL_SHD_MP_IN_ENABLED BIT(0)
@@ -207,6 +216,24 @@
#define RKISP1_CIF_MI_XTD_FMT_CTRL_SP_CB_CR_SWAP BIT(1)
#define RKISP1_CIF_MI_XTD_FMT_CTRL_DMA_CB_CR_SWAP BIT(2)
+/* MI_OUTPUT_ALIGN_FORMAT */
+#define RKISP1_CIF_OUTPUT_ALIGN_FORMAT_MP_LSB_ALIGNMENT BIT(0)
+#define RKISP1_CIF_OUTPUT_ALIGN_FORMAT_MP_BYTE_SWAP_BYTES BIT(1)
+#define RKISP1_CIF_OUTPUT_ALIGN_FORMAT_MP_BYTE_SWAP_WORDS BIT(2)
+#define RKISP1_CIF_OUTPUT_ALIGN_FORMAT_MP_BYTE_SWAP_DWORDS BIT(3)
+#define RKISP1_CIF_OUTPUT_ALIGN_FORMAT_SP_BYTE_SWAP_BYTES BIT(4)
+#define RKISP1_CIF_OUTPUT_ALIGN_FORMAT_SP_BYTE_SWAP_WORDS BIT(5)
+#define RKISP1_CIF_OUTPUT_ALIGN_FORMAT_SP_BYTE_SWAP_DWORDS BIT(6)
+#define RKISP1_CIF_OUTPUT_ALIGN_FORMAT_DMA_BYTE_SWAP_BYTES BIT(7)
+#define RKISP1_CIF_OUTPUT_ALIGN_FORMAT_DMA_BYTE_SWAP_WORDS BIT(8)
+#define RKISP1_CIF_OUTPUT_ALIGN_FORMAT_DMA_BYTE_SWAP_DWORDS BIT(9)
+
+/* MI_MP_OUTPUT_FIFO_SIZE */
+#define RKISP1_CIF_MI_MP_OUTPUT_FIFO_SIZE_OUTPUT_FIFO_DEPTH_FULL (0 << 0)
+#define RKISP1_CIF_MI_MP_OUTPUT_FIFO_SIZE_OUTPUT_FIFO_DEPTH_HALF (1 << 0)
+#define RKISP1_CIF_MI_MP_OUTPUT_FIFO_SIZE_OUTPUT_FIFO_DEPTH_QUARTER (2 << 0)
+#define RKISP1_CIF_MI_MP_OUTPUT_FIFO_SIZE_OUTPUT_FIFO_DEPTH_EIGHT (3 << 0)
+
/* VI_CCL */
#define RKISP1_CIF_CCL_CIF_CLK_DIS BIT(2)
/* VI_ISP_CLK_CTRL */
@@ -1000,6 +1027,15 @@
#define RKISP1_CIF_MI_SP_CB_BASE_AD_INIT2 (RKISP1_CIF_MI_BASE + 0x00000140)
#define RKISP1_CIF_MI_SP_CR_BASE_AD_INIT2 (RKISP1_CIF_MI_BASE + 0x00000144)
#define RKISP1_CIF_MI_XTD_FORMAT_CTRL (RKISP1_CIF_MI_BASE + 0x00000148)
+#define RKISP1_CIF_MI_MP_HANDSHAKE_0 (RKISP1_CIF_MI_BASE + 0x0000014C)
+#define RKISP1_CIF_MI_MP_Y_LLENGTH (RKISP1_CIF_MI_BASE + 0x00000150)
+#define RKISP1_CIF_MI_MP_Y_SLICE_OFFSET (RKISP1_CIF_MI_BASE + 0x00000154)
+#define RKISP1_CIF_MI_MP_C_SLICE_OFFSET (RKISP1_CIF_MI_BASE + 0x00000158)
+#define RKISP1_CIF_MI_OUTPUT_ALIGN_FORMAT (RKISP1_CIF_MI_BASE + 0x0000015C)
+#define RKISP1_CIF_MI_MP_OUTPUT_FIFO_SIZE (RKISP1_CIF_MI_BASE + 0x00000160)
+#define RKISP1_CIF_MI_MP_Y_PIC_WIDTH (RKISP1_CIF_MI_BASE + 0x00000164)
+#define RKISP1_CIF_MI_MP_Y_PIC_HEIGHT (RKISP1_CIF_MI_BASE + 0x00000168)
+#define RKISP1_CIF_MI_MP_Y_PIC_SIZE (RKISP1_CIF_MI_BASE + 0x0000016C)
#define RKISP1_CIF_SMIA_BASE 0x00001a00
#define RKISP1_CIF_SMIA_CTRL (RKISP1_CIF_SMIA_BASE + 0x00000000)
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
index a8e3777013023..6f3931ca5b51a 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
@@ -444,11 +444,12 @@ static void rkisp1_rsz_set_sink_crop(struct rkisp1_resizer *rsz,
sink_fmt = v4l2_subdev_state_get_format(sd_state, RKISP1_RSZ_PAD_SINK);
sink_crop = v4l2_subdev_state_get_crop(sd_state, RKISP1_RSZ_PAD_SINK);
- /* Not crop for MP bayer raw data */
+ /* Not crop for MP bayer raw data, or for devices lacking dual crop. */
mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
- if (rsz->id == RKISP1_MAINPATH &&
- mbus_info->pixel_enc == V4L2_PIXEL_ENC_BAYER) {
+ if ((rsz->id == RKISP1_MAINPATH &&
+ mbus_info->pixel_enc == V4L2_PIXEL_ENC_BAYER) ||
+ !rkisp1_has_feature(rsz->rkisp1, DUAL_CROP)) {
sink_crop->left = 0;
sink_crop->top = 0;
sink_crop->width = sink_fmt->width;
@@ -631,21 +632,24 @@ static int rkisp1_rsz_s_stream(struct v4l2_subdev *sd, int enable)
struct rkisp1_device *rkisp1 = rsz->rkisp1;
struct rkisp1_capture *other = &rkisp1->capture_devs[rsz->id ^ 1];
enum rkisp1_shadow_regs_when when = RKISP1_SHADOW_REGS_SYNC;
+ bool has_self_path = rkisp1_has_feature(rkisp1, SELF_PATH);
struct v4l2_subdev_state *sd_state;
if (!enable) {
- rkisp1_dcrop_disable(rsz, RKISP1_SHADOW_REGS_ASYNC);
+ if (rkisp1_has_feature(rkisp1, DUAL_CROP))
+ rkisp1_dcrop_disable(rsz, RKISP1_SHADOW_REGS_ASYNC);
rkisp1_rsz_disable(rsz, RKISP1_SHADOW_REGS_ASYNC);
return 0;
}
- if (other->is_streaming)
+ if (has_self_path && other->is_streaming)
when = RKISP1_SHADOW_REGS_ASYNC;
sd_state = v4l2_subdev_lock_and_get_active_state(sd);
rkisp1_rsz_config(rsz, sd_state, when);
- rkisp1_dcrop_config(rsz, sd_state);
+ if (rkisp1_has_feature(rkisp1, DUAL_CROP))
+ rkisp1_dcrop_config(rsz, sd_state);
v4l2_subdev_unlock_state(sd_state);
@@ -731,10 +735,11 @@ err_entity_cleanup:
int rkisp1_resizer_devs_register(struct rkisp1_device *rkisp1)
{
+ unsigned int dev_count = rkisp1_path_count(rkisp1);
unsigned int i;
int ret;
- for (i = 0; i < ARRAY_SIZE(rkisp1->resizer_devs); i++) {
+ for (i = 0; i < dev_count; i++) {
struct rkisp1_resizer *rsz = &rkisp1->resizer_devs[i];
rsz->rkisp1 = rkisp1;
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-capture.c b/drivers/media/platform/samsung/exynos4-is/fimc-capture.c
index 05cafba1c7287..ffa4ea21387da 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-capture.c
@@ -180,7 +180,7 @@ void fimc_capture_irq_handler(struct fimc_dev *fimc, int deq_buf)
struct fimc_vid_cap *cap = &fimc->vid_cap;
struct fimc_pipeline *p = to_fimc_pipeline(cap->ve.pipe);
struct v4l2_subdev *csis = p->subdevs[IDX_CSIS];
- struct fimc_frame *f = &cap->ctx->d_frame;
+ const struct fimc_frame *f = &cap->ctx->d_frame;
struct fimc_vid_buffer *v_buf;
if (test_and_clear_bit(ST_CAPT_SHUT, &fimc->state)) {
@@ -342,8 +342,8 @@ static int queue_setup(struct vb2_queue *vq,
unsigned int sizes[], struct device *alloc_devs[])
{
struct fimc_ctx *ctx = vq->drv_priv;
- struct fimc_frame *frame = &ctx->d_frame;
- struct fimc_fmt *fmt = frame->fmt;
+ const struct fimc_frame *frame = &ctx->d_frame;
+ const struct fimc_fmt *fmt = frame->fmt;
unsigned long wh = frame->f_width * frame->f_height;
int i;
@@ -559,18 +559,18 @@ static const struct v4l2_file_operations fimc_capture_fops = {
* Format and crop negotiation helpers
*/
-static struct fimc_fmt *fimc_capture_try_format(struct fimc_ctx *ctx,
- u32 *width, u32 *height,
- u32 *code, u32 *fourcc, int pad)
+static const struct fimc_fmt *fimc_capture_try_format(struct fimc_ctx *ctx,
+ u32 *width, u32 *height,
+ u32 *code, u32 *fourcc, int pad)
{
bool rotation = ctx->rotation == 90 || ctx->rotation == 270;
struct fimc_dev *fimc = ctx->fimc_dev;
const struct fimc_variant *var = fimc->variant;
const struct fimc_pix_limit *pl = var->pix_limit;
- struct fimc_frame *dst = &ctx->d_frame;
+ const struct fimc_frame *dst = &ctx->d_frame;
u32 depth, min_w, max_w, min_h, align_h = 3;
+ const struct fimc_fmt *ffmt;
u32 mask = FMT_FLAGS_CAM;
- struct fimc_fmt *ffmt;
/* Conversion from/to JPEG or User Defined format is not supported */
if (code && ctx->s_frame.fmt && pad == FIMC_SD_PAD_SOURCE &&
@@ -644,7 +644,7 @@ static void fimc_capture_try_selection(struct fimc_ctx *ctx,
struct fimc_dev *fimc = ctx->fimc_dev;
const struct fimc_variant *var = fimc->variant;
const struct fimc_pix_limit *pl = var->pix_limit;
- struct fimc_frame *sink = &ctx->s_frame;
+ const struct fimc_frame *sink = &ctx->s_frame;
u32 max_w, max_h, min_w = 0, min_h = 0, min_sz;
u32 align_sz = 0, align_h = 4;
u32 max_sc_h, max_sc_v;
@@ -722,7 +722,7 @@ static int fimc_cap_querycap(struct file *file, void *priv,
static int fimc_cap_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct fimc_fmt *fmt;
+ const struct fimc_fmt *fmt;
fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM | FMT_FLAGS_M2M,
f->index);
@@ -757,7 +757,7 @@ static struct media_entity *fimc_pipeline_get_head(struct media_entity *me)
*/
static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
struct v4l2_mbus_framefmt *tfmt,
- struct fimc_fmt **fmt_id,
+ const struct fimc_fmt **fmt_id,
bool set)
{
struct fimc_dev *fimc = ctx->fimc_dev;
@@ -768,8 +768,8 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
: V4L2_SUBDEV_FORMAT_TRY,
};
struct v4l2_mbus_framefmt *mf = &sfmt.format;
+ const struct fimc_fmt *ffmt;
struct media_entity *me;
- struct fimc_fmt *ffmt;
struct media_pad *pad;
int ret, i = 1;
u32 fcc;
@@ -903,8 +903,8 @@ static int fimc_cap_g_fmt_mplane(struct file *file, void *fh,
*/
static int __video_try_or_set_format(struct fimc_dev *fimc,
struct v4l2_format *f, bool try,
- struct fimc_fmt **inp_fmt,
- struct fimc_fmt **out_fmt)
+ const struct fimc_fmt **inp_fmt,
+ const struct fimc_fmt **out_fmt)
{
struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
struct fimc_vid_cap *vc = &fimc->vid_cap;
@@ -986,7 +986,7 @@ static int fimc_cap_try_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
struct fimc_dev *fimc = video_drvdata(file);
- struct fimc_fmt *out_fmt = NULL, *inp_fmt = NULL;
+ const struct fimc_fmt *out_fmt = NULL, *inp_fmt = NULL;
return __video_try_or_set_format(fimc, f, true, &inp_fmt, &out_fmt);
}
@@ -1010,9 +1010,9 @@ static int __fimc_capture_set_format(struct fimc_dev *fimc,
{
struct fimc_vid_cap *vc = &fimc->vid_cap;
struct fimc_ctx *ctx = vc->ctx;
- struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ const struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
struct fimc_frame *ff = &ctx->d_frame;
- struct fimc_fmt *inp_fmt = NULL;
+ const struct fimc_fmt *inp_fmt = NULL;
int ret, i;
if (vb2_is_busy(&fimc->vid_cap.vbq))
@@ -1132,7 +1132,7 @@ static int fimc_pipeline_validate(struct fimc_dev *fimc)
/* Don't call FIMC subdev operation to avoid nested locking */
if (sd == &vc->subdev) {
- struct fimc_frame *ff = &vc->ctx->s_frame;
+ const struct fimc_frame *ff = &vc->ctx->s_frame;
sink_fmt.format.width = ff->f_width;
sink_fmt.format.height = ff->f_height;
sink_fmt.format.code = ff->fmt ? ff->fmt->mbus_code : 0;
@@ -1158,7 +1158,7 @@ static int fimc_pipeline_validate(struct fimc_dev *fimc)
if (sd == p->subdevs[IDX_SENSOR] &&
fimc_user_defined_mbus_fmt(src_fmt.format.code)) {
struct v4l2_plane_pix_format plane_fmt[FIMC_MAX_PLANES];
- struct fimc_frame *frame = &vc->ctx->d_frame;
+ const struct fimc_frame *frame = &vc->ctx->d_frame;
unsigned int i;
ret = fimc_get_sensor_frame_desc(sd, plane_fmt,
@@ -1263,7 +1263,7 @@ static int fimc_cap_g_selection(struct file *file, void *fh,
{
struct fimc_dev *fimc = video_drvdata(file);
struct fimc_ctx *ctx = fimc->vid_cap.ctx;
- struct fimc_frame *f = &ctx->s_frame;
+ const struct fimc_frame *f = &ctx->s_frame;
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -1460,7 +1460,7 @@ static int fimc_subdev_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
- struct fimc_fmt *fmt;
+ const struct fimc_fmt *fmt;
fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM, code->index);
if (!fmt)
@@ -1475,7 +1475,7 @@ static int fimc_subdev_get_fmt(struct v4l2_subdev *sd,
{
struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
struct fimc_ctx *ctx = fimc->vid_cap.ctx;
- struct fimc_frame *ff = &ctx->s_frame;
+ const struct fimc_frame *ff = &ctx->s_frame;
struct v4l2_mbus_framefmt *mf;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
@@ -1519,7 +1519,7 @@ static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
struct fimc_vid_cap *vc = &fimc->vid_cap;
struct fimc_ctx *ctx = vc->ctx;
struct fimc_frame *ff;
- struct fimc_fmt *ffmt;
+ const struct fimc_fmt *ffmt;
dbg("pad%d: code: 0x%x, %dx%d",
fmt->pad, mf->code, mf->width, mf->height);
@@ -1582,7 +1582,7 @@ static int fimc_subdev_get_selection(struct v4l2_subdev *sd,
{
struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
struct fimc_ctx *ctx = fimc->vid_cap.ctx;
- struct fimc_frame *f = &ctx->s_frame;
+ const struct fimc_frame *f = &ctx->s_frame;
struct v4l2_rect *r = &sel->r;
struct v4l2_rect *try_sel;
@@ -1715,9 +1715,9 @@ static int fimc_register_capture_device(struct fimc_dev *fimc,
{
struct video_device *vfd = &fimc->vid_cap.ve.vdev;
struct vb2_queue *q = &fimc->vid_cap.vbq;
- struct fimc_ctx *ctx;
struct fimc_vid_cap *vid_cap;
- struct fimc_fmt *fmt;
+ const struct fimc_fmt *fmt;
+ struct fimc_ctx *ctx;
int ret = -ENOMEM;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-core.c b/drivers/media/platform/samsung/exynos4-is/fimc-core.c
index 0be687b01ce5a..aae74b501a42d 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-core.c
@@ -29,11 +29,11 @@
#include "fimc-reg.h"
#include "media-dev.h"
-static char *fimc_clocks[MAX_FIMC_CLOCKS] = {
+static const char *fimc_clocks[MAX_FIMC_CLOCKS] = {
"sclk_fimc", "fimc"
};
-static struct fimc_fmt fimc_formats[] = {
+static const struct fimc_fmt fimc_formats[] = {
{
.fourcc = V4L2_PIX_FMT_RGB565,
.depth = { 16 },
@@ -180,7 +180,7 @@ static struct fimc_fmt fimc_formats[] = {
},
};
-struct fimc_fmt *fimc_get_format(unsigned int index)
+const struct fimc_fmt *fimc_get_format(unsigned int index)
{
if (index >= ARRAY_SIZE(fimc_formats))
return NULL;
@@ -228,8 +228,8 @@ int fimc_set_scaler_info(struct fimc_ctx *ctx)
const struct fimc_variant *variant = ctx->fimc_dev->variant;
struct device *dev = &ctx->fimc_dev->pdev->dev;
struct fimc_scaler *sc = &ctx->scaler;
- struct fimc_frame *s_frame = &ctx->s_frame;
- struct fimc_frame *d_frame = &ctx->d_frame;
+ const struct fimc_frame *s_frame = &ctx->s_frame;
+ const struct fimc_frame *d_frame = &ctx->d_frame;
int tx, ty, sx, sy;
int ret;
@@ -326,7 +326,7 @@ out:
/* The color format (colplanes, memplanes) must be already configured. */
int fimc_prepare_addr(struct fimc_ctx *ctx, struct vb2_buffer *vb,
- struct fimc_frame *frame, struct fimc_addr *addr)
+ const struct fimc_frame *frame, struct fimc_addr *addr)
{
int ret = 0;
u32 pix_size;
@@ -670,7 +670,7 @@ void fimc_alpha_ctrl_update(struct fimc_ctx *ctx)
v4l2_ctrl_unlock(ctrl);
}
-void __fimc_get_format(struct fimc_frame *frame, struct v4l2_format *f)
+void __fimc_get_format(const struct fimc_frame *frame, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
int i;
@@ -695,7 +695,7 @@ void __fimc_get_format(struct fimc_frame *frame, struct v4l2_format *f)
* @height: requested pixel height
* @pix: multi-plane format to adjust
*/
-void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
+void fimc_adjust_mplane_format(const struct fimc_fmt *fmt, u32 width, u32 height,
struct v4l2_pix_format_mplane *pix)
{
u32 bytesperline = 0;
@@ -752,10 +752,11 @@ void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
* @mask: the color flags to match
* @index: offset in the fimc_formats array, ignored if negative
*/
-struct fimc_fmt *fimc_find_format(const u32 *pixelformat, const u32 *mbus_code,
- unsigned int mask, int index)
+const struct fimc_fmt *fimc_find_format(const u32 *pixelformat,
+ const u32 *mbus_code,
+ unsigned int mask, int index)
{
- struct fimc_fmt *fmt, *def_fmt = NULL;
+ const struct fimc_fmt *fmt, *def_fmt = NULL;
unsigned int i;
int id = 0;
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-core.h b/drivers/media/platform/samsung/exynos4-is/fimc-core.h
index 2b0760add0929..63385152a2ffe 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-core.h
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-core.h
@@ -257,7 +257,7 @@ struct fimc_frame {
unsigned int bytesperline[VIDEO_MAX_PLANES];
struct fimc_addr addr;
struct fimc_dma_offset dma_offset;
- struct fimc_fmt *fmt;
+ const struct fimc_fmt *fmt;
u8 alpha;
};
@@ -515,7 +515,7 @@ static inline void set_frame_crop(struct fimc_frame *f,
f->height = height;
}
-static inline u32 fimc_get_format_depth(struct fimc_fmt *ff)
+static inline u32 fimc_get_format_depth(const struct fimc_fmt *ff)
{
u32 i, depth = 0;
@@ -557,7 +557,7 @@ static inline bool fimc_ctx_state_is_set(u32 mask, struct fimc_ctx *ctx)
return ret;
}
-static inline int tiled_fmt(struct fimc_fmt *fmt)
+static inline int tiled_fmt(const struct fimc_fmt *fmt)
{
return fmt->fourcc == V4L2_PIX_FMT_NV12MT;
}
@@ -575,7 +575,7 @@ static inline bool fimc_user_defined_mbus_fmt(u32 code)
}
/* Return the alpha component bit mask */
-static inline int fimc_get_alpha_mask(struct fimc_fmt *fmt)
+static inline int fimc_get_alpha_mask(const struct fimc_fmt *fmt)
{
switch (fmt->color) {
case FIMC_FMT_RGB444: return 0x0f;
@@ -610,25 +610,24 @@ static inline struct fimc_frame *ctx_get_frame(struct fimc_ctx *ctx,
/* -----------------------------------------------------*/
/* fimc-core.c */
-int fimc_vidioc_enum_fmt_mplane(struct file *file, void *priv,
- struct v4l2_fmtdesc *f);
int fimc_ctrls_create(struct fimc_ctx *ctx);
void fimc_ctrls_delete(struct fimc_ctx *ctx);
void fimc_ctrls_activate(struct fimc_ctx *ctx, bool active);
void fimc_alpha_ctrl_update(struct fimc_ctx *ctx);
-void __fimc_get_format(struct fimc_frame *frame, struct v4l2_format *f);
-void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
+void __fimc_get_format(const struct fimc_frame *frame, struct v4l2_format *f);
+void fimc_adjust_mplane_format(const struct fimc_fmt *fmt, u32 width, u32 height,
struct v4l2_pix_format_mplane *pix);
-struct fimc_fmt *fimc_find_format(const u32 *pixelformat, const u32 *mbus_code,
- unsigned int mask, int index);
-struct fimc_fmt *fimc_get_format(unsigned int index);
+const struct fimc_fmt *fimc_find_format(const u32 *pixelformat,
+ const u32 *mbus_code,
+ unsigned int mask, int index);
+const struct fimc_fmt *fimc_get_format(unsigned int index);
int fimc_check_scaler_ratio(struct fimc_ctx *ctx, int sw, int sh,
int dw, int dh, int rotation);
int fimc_set_scaler_info(struct fimc_ctx *ctx);
int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags);
int fimc_prepare_addr(struct fimc_ctx *ctx, struct vb2_buffer *vb,
- struct fimc_frame *frame, struct fimc_addr *addr);
+ const struct fimc_frame *frame, struct fimc_addr *addr);
void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f);
void fimc_set_yuv_order(struct fimc_ctx *ctx);
void fimc_capture_irq_handler(struct fimc_dev *fimc, int deq_buf);
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-is.c b/drivers/media/platform/samsung/exynos4-is/fimc-is.c
index a08c87ef6e2d8..39aab667910de 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is.c
@@ -175,7 +175,7 @@ static int fimc_is_parse_sensor_config(struct fimc_is *is, unsigned int index,
return -EINVAL;
}
- ep = of_graph_get_next_endpoint(node, NULL);
+ ep = of_graph_get_endpoint_by_regs(node, 0, -1);
if (!ep)
return -ENXIO;
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c b/drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
index 8fa26969c4111..06c4352562b38 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
@@ -40,7 +40,7 @@ static int isp_video_capture_queue_setup(struct vb2_queue *vq,
unsigned int sizes[], struct device *alloc_devs[])
{
struct fimc_isp *isp = vb2_get_drv_priv(vq);
- struct v4l2_pix_format_mplane *vid_fmt = &isp->video_capture.pixfmt;
+ const struct v4l2_pix_format_mplane *vid_fmt = &isp->video_capture.pixfmt;
const struct fimc_fmt *fmt = isp->video_capture.format;
unsigned int wh, i;
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-lite-reg.c b/drivers/media/platform/samsung/exynos4-is/fimc-lite-reg.c
index 57996b4104b46..2483277a6cb0e 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-lite-reg.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-lite-reg.c
@@ -124,7 +124,7 @@ static const u32 src_pixfmt_map[8][3] = {
};
/* Set camera input pixel format and resolution */
-void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f)
+void flite_hw_set_source_format(struct fimc_lite *dev, const struct flite_frame *f)
{
u32 pixelcode = f->fmt->mbus_code;
int i = ARRAY_SIZE(src_pixfmt_map);
@@ -155,7 +155,7 @@ void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f)
}
/* Set the camera host input window offsets (cropping) */
-void flite_hw_set_window_offset(struct fimc_lite *dev, struct flite_frame *f)
+void flite_hw_set_window_offset(struct fimc_lite *dev, const struct flite_frame *f)
{
u32 hoff2, voff2;
u32 cfg;
@@ -186,7 +186,7 @@ static void flite_hw_set_camera_port(struct fimc_lite *dev, int id)
/* Select serial or parallel bus, camera port (A,B) and set signals polarity */
void flite_hw_set_camera_bus(struct fimc_lite *dev,
- struct fimc_source_info *si)
+ const struct fimc_source_info *si)
{
u32 cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
unsigned int flags = si->flags;
@@ -226,7 +226,8 @@ static void flite_hw_set_pack12(struct fimc_lite *dev, int on)
writel(cfg, dev->regs + FLITE_REG_CIODMAFMT);
}
-static void flite_hw_set_out_order(struct fimc_lite *dev, struct flite_frame *f)
+static void flite_hw_set_out_order(struct fimc_lite *dev,
+ const struct flite_frame *f)
{
static const u32 pixcode[4][2] = {
{ MEDIA_BUS_FMT_YUYV8_2X8, FLITE_REG_CIODMAFMT_YCBYCR },
@@ -244,7 +245,7 @@ static void flite_hw_set_out_order(struct fimc_lite *dev, struct flite_frame *f)
writel(cfg | pixcode[i][1], dev->regs + FLITE_REG_CIODMAFMT);
}
-void flite_hw_set_dma_window(struct fimc_lite *dev, struct flite_frame *f)
+void flite_hw_set_dma_window(struct fimc_lite *dev, const struct flite_frame *f)
{
u32 cfg;
@@ -294,7 +295,7 @@ void flite_hw_mask_dma_buffer(struct fimc_lite *dev, u32 index)
}
/* Enable/disable output DMA, set output pixel size and offsets (composition) */
-void flite_hw_set_output_dma(struct fimc_lite *dev, struct flite_frame *f,
+void flite_hw_set_output_dma(struct fimc_lite *dev, const struct flite_frame *f,
bool enable)
{
u32 cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-lite-reg.h b/drivers/media/platform/samsung/exynos4-is/fimc-lite-reg.h
index c5656e902750c..c5ec36dfb2f9f 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-lite-reg.h
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-lite-reg.h
@@ -133,15 +133,13 @@ void flite_hw_set_interrupt_mask(struct fimc_lite *dev);
void flite_hw_capture_start(struct fimc_lite *dev);
void flite_hw_capture_stop(struct fimc_lite *dev);
void flite_hw_set_camera_bus(struct fimc_lite *dev,
- struct fimc_source_info *s_info);
-void flite_hw_set_camera_polarity(struct fimc_lite *dev,
- struct fimc_source_info *cam);
-void flite_hw_set_window_offset(struct fimc_lite *dev, struct flite_frame *f);
-void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f);
+ const struct fimc_source_info *s_info);
+void flite_hw_set_window_offset(struct fimc_lite *dev, const struct flite_frame *f);
+void flite_hw_set_source_format(struct fimc_lite *dev, const struct flite_frame *f);
-void flite_hw_set_output_dma(struct fimc_lite *dev, struct flite_frame *f,
+void flite_hw_set_output_dma(struct fimc_lite *dev, const struct flite_frame *f,
bool enable);
-void flite_hw_set_dma_window(struct fimc_lite *dev, struct flite_frame *f);
+void flite_hw_set_dma_window(struct fimc_lite *dev, const struct flite_frame *f);
void flite_hw_set_test_pattern(struct fimc_lite *dev, bool on);
void flite_hw_dump_regs(struct fimc_lite *dev, const char *label);
void flite_hw_set_dma_buffer(struct fimc_lite *dev, struct flite_buffer *buf);
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-lite.c b/drivers/media/platform/samsung/exynos4-is/fimc-lite.c
index 7898c9bebb046..d1d860fa3454d 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-lite.c
@@ -738,7 +738,7 @@ static int fimc_lite_try_fmt_mplane(struct file *file, void *fh,
static int fimc_lite_s_fmt_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
+ const struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
struct fimc_lite *fimc = video_drvdata(file);
struct flite_frame *frame = &fimc->out_frame;
const struct fimc_fmt *fmt = NULL;
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-lite.h b/drivers/media/platform/samsung/exynos4-is/fimc-lite.h
index ddf29e0b5b1cd..2d96fb00a5c68 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-lite.h
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-lite.h
@@ -117,8 +117,6 @@ struct flite_buffer {
* @ctrl_handler: v4l2 control handler
* @test_pattern: test pattern controls
* @index: FIMC-LITE platform device index
- * @pipeline: video capture pipeline data structure
- * @pipeline_ops: media pipeline ops for the video node driver
* @slock: spinlock protecting this data structure and the hw registers
* @lock: mutex serializing video device and the subdev operations
* @clock: FIMC-LITE gate clock
@@ -134,7 +132,6 @@ struct flite_buffer {
* @active_buf_q: the queue head of buffers scheduled in hardware
* @vb_queue: vb2 buffers queue
* @buf_index: helps to keep track of the DMA start address register index
- * @active_buf_count: number of video buffers scheduled in hardware
* @frame_count: the captured frames counter
* @reqbufs_count: the number of buffers requested with REQBUFS ioctl
* @events: event info
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-m2m.c b/drivers/media/platform/samsung/exynos4-is/fimc-m2m.c
index df8e2aa454d8f..199997eec1cc2 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-m2m.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-m2m.c
@@ -170,7 +170,7 @@ static int fimc_queue_setup(struct vb2_queue *vq,
unsigned int sizes[], struct device *alloc_devs[])
{
struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
- struct fimc_frame *f;
+ const struct fimc_frame *f;
int i;
f = ctx_get_frame(ctx, vq->type);
@@ -192,7 +192,7 @@ static int fimc_queue_setup(struct vb2_queue *vq,
static int fimc_buf_prepare(struct vb2_buffer *vb)
{
struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- struct fimc_frame *frame;
+ const struct fimc_frame *frame;
int i;
frame = ctx_get_frame(ctx, vb->vb2_queue->type);
@@ -237,7 +237,7 @@ static int fimc_m2m_querycap(struct file *file, void *fh,
static int fimc_m2m_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct fimc_fmt *fmt;
+ const struct fimc_fmt *fmt;
fmt = fimc_find_format(NULL, NULL, get_m2m_fmt_flags(f->type),
f->index);
@@ -252,7 +252,7 @@ static int fimc_m2m_g_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
- struct fimc_frame *frame = ctx_get_frame(ctx, f->type);
+ const struct fimc_frame *frame = ctx_get_frame(ctx, f->type);
if (IS_ERR(frame))
return PTR_ERR(frame);
@@ -266,7 +266,7 @@ static int fimc_try_fmt_mplane(struct fimc_ctx *ctx, struct v4l2_format *f)
struct fimc_dev *fimc = ctx->fimc_dev;
const struct fimc_variant *variant = fimc->variant;
struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
- struct fimc_fmt *fmt;
+ const struct fimc_fmt *fmt;
u32 max_w, mod_x, mod_y;
if (!IS_M2M(f->type))
@@ -314,8 +314,9 @@ static int fimc_m2m_try_fmt_mplane(struct file *file, void *fh,
return fimc_try_fmt_mplane(ctx, f);
}
-static void __set_frame_format(struct fimc_frame *frame, struct fimc_fmt *fmt,
- struct v4l2_pix_format_mplane *pixm)
+static void __set_frame_format(struct fimc_frame *frame,
+ const struct fimc_fmt *fmt,
+ const struct v4l2_pix_format_mplane *pixm)
{
int i;
@@ -340,7 +341,7 @@ static int fimc_m2m_s_fmt_mplane(struct file *file, void *fh,
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
struct fimc_dev *fimc = ctx->fimc_dev;
- struct fimc_fmt *fmt;
+ const struct fimc_fmt *fmt;
struct vb2_queue *vq;
struct fimc_frame *frame;
int ret;
@@ -378,7 +379,7 @@ static int fimc_m2m_g_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
- struct fimc_frame *frame;
+ const struct fimc_frame *frame;
frame = ctx_get_frame(ctx, s->type);
if (IS_ERR(frame))
@@ -428,7 +429,7 @@ static int fimc_m2m_try_selection(struct fimc_ctx *ctx,
struct v4l2_selection *s)
{
struct fimc_dev *fimc = ctx->fimc_dev;
- struct fimc_frame *f;
+ const struct fimc_frame *f;
u32 min_size, halign, depth = 0;
int i;
@@ -588,7 +589,7 @@ static int fimc_m2m_set_default_format(struct fimc_ctx *ctx)
.sizeimage = 800 * 4 * 600,
},
};
- struct fimc_fmt *fmt;
+ const struct fimc_fmt *fmt;
fmt = fimc_find_format(&pixm.pixelformat, NULL, FMT_FLAGS_M2M, 0);
if (!fmt)
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-reg.c b/drivers/media/platform/samsung/exynos4-is/fimc-reg.c
index 95165a2cc7d1c..b4ee39e471e71 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-reg.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-reg.c
@@ -105,7 +105,7 @@ void fimc_hw_set_target_format(struct fimc_ctx *ctx)
{
u32 cfg;
struct fimc_dev *dev = ctx->fimc_dev;
- struct fimc_frame *frame = &ctx->d_frame;
+ const struct fimc_frame *frame = &ctx->d_frame;
dbg("w= %d, h= %d color: %d", frame->width,
frame->height, frame->fmt->color);
@@ -147,7 +147,7 @@ void fimc_hw_set_target_format(struct fimc_ctx *ctx)
static void fimc_hw_set_out_dma_size(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
- struct fimc_frame *frame = &ctx->d_frame;
+ const struct fimc_frame *frame = &ctx->d_frame;
u32 cfg;
cfg = (frame->f_height << 16) | frame->f_width;
@@ -166,9 +166,9 @@ static void fimc_hw_set_out_dma_size(struct fimc_ctx *ctx)
void fimc_hw_set_out_dma(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
- struct fimc_frame *frame = &ctx->d_frame;
- struct fimc_dma_offset *offset = &frame->dma_offset;
- struct fimc_fmt *fmt = frame->fmt;
+ const struct fimc_frame *frame = &ctx->d_frame;
+ const struct fimc_dma_offset *offset = &frame->dma_offset;
+ const struct fimc_fmt *fmt = frame->fmt;
u32 cfg;
/* Set the input dma offsets. */
@@ -248,8 +248,8 @@ static void fimc_hw_set_scaler(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
struct fimc_scaler *sc = &ctx->scaler;
- struct fimc_frame *src_frame = &ctx->s_frame;
- struct fimc_frame *dst_frame = &ctx->d_frame;
+ const struct fimc_frame *src_frame = &ctx->s_frame;
+ const struct fimc_frame *dst_frame = &ctx->d_frame;
u32 cfg = readl(dev->regs + FIMC_REG_CISCCTRL);
@@ -388,7 +388,7 @@ void fimc_hw_set_effect(struct fimc_ctx *ctx)
void fimc_hw_set_rgb_alpha(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
- struct fimc_frame *frame = &ctx->d_frame;
+ const struct fimc_frame *frame = &ctx->d_frame;
u32 cfg;
if (!(frame->fmt->flags & FMT_HAS_ALPHA))
@@ -403,7 +403,7 @@ void fimc_hw_set_rgb_alpha(struct fimc_ctx *ctx)
static void fimc_hw_set_in_dma_size(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
- struct fimc_frame *frame = &ctx->s_frame;
+ const struct fimc_frame *frame = &ctx->s_frame;
u32 cfg_o = 0;
u32 cfg_r = 0;
@@ -420,8 +420,8 @@ static void fimc_hw_set_in_dma_size(struct fimc_ctx *ctx)
void fimc_hw_set_in_dma(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
- struct fimc_frame *frame = &ctx->s_frame;
- struct fimc_dma_offset *offset = &frame->dma_offset;
+ const struct fimc_frame *frame = &ctx->s_frame;
+ const struct fimc_dma_offset *offset = &frame->dma_offset;
u32 cfg;
/* Set the pixel offsets. */
@@ -526,7 +526,7 @@ void fimc_hw_set_output_path(struct fimc_ctx *ctx)
writel(cfg, dev->regs + FIMC_REG_CISCCTRL);
}
-void fimc_hw_set_input_addr(struct fimc_dev *dev, struct fimc_addr *addr)
+void fimc_hw_set_input_addr(struct fimc_dev *dev, const struct fimc_addr *addr)
{
u32 cfg = readl(dev->regs + FIMC_REG_CIREAL_ISIZE);
cfg |= FIMC_REG_CIREAL_ISIZE_ADDR_CH_DIS;
@@ -541,7 +541,7 @@ void fimc_hw_set_input_addr(struct fimc_dev *dev, struct fimc_addr *addr)
}
void fimc_hw_set_output_addr(struct fimc_dev *dev,
- struct fimc_addr *addr, int index)
+ const struct fimc_addr *addr, int index)
{
int i = (index == -1) ? 0 : index;
do {
@@ -554,7 +554,7 @@ void fimc_hw_set_output_addr(struct fimc_dev *dev,
}
int fimc_hw_set_camera_polarity(struct fimc_dev *fimc,
- struct fimc_source_info *cam)
+ const struct fimc_source_info *cam)
{
u32 cfg = readl(fimc->regs + FIMC_REG_CIGCTRL);
@@ -598,8 +598,8 @@ static const struct mbus_pixfmt_desc pix_desc[] = {
int fimc_hw_set_camera_source(struct fimc_dev *fimc,
struct fimc_source_info *source)
{
- struct fimc_vid_cap *vc = &fimc->vid_cap;
- struct fimc_frame *f = &vc->ctx->s_frame;
+ const struct fimc_vid_cap *vc = &fimc->vid_cap;
+ const struct fimc_frame *f = &vc->ctx->s_frame;
u32 bus_width, cfg = 0;
int i;
@@ -648,7 +648,7 @@ int fimc_hw_set_camera_source(struct fimc_dev *fimc,
return 0;
}
-void fimc_hw_set_camera_offset(struct fimc_dev *fimc, struct fimc_frame *f)
+void fimc_hw_set_camera_offset(struct fimc_dev *fimc, const struct fimc_frame *f)
{
u32 hoff2, voff2;
@@ -668,9 +668,9 @@ void fimc_hw_set_camera_offset(struct fimc_dev *fimc, struct fimc_frame *f)
}
int fimc_hw_set_camera_type(struct fimc_dev *fimc,
- struct fimc_source_info *source)
+ const struct fimc_source_info *source)
{
- struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+ const struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
u32 csis_data_alignment = 32;
u32 cfg, tmp;
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-reg.h b/drivers/media/platform/samsung/exynos4-is/fimc-reg.h
index b9b33aa1f12fe..9714f4309655c 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-reg.h
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-reg.h
@@ -302,16 +302,16 @@ void fimc_hw_set_rgb_alpha(struct fimc_ctx *ctx);
void fimc_hw_set_in_dma(struct fimc_ctx *ctx);
void fimc_hw_set_input_path(struct fimc_ctx *ctx);
void fimc_hw_set_output_path(struct fimc_ctx *ctx);
-void fimc_hw_set_input_addr(struct fimc_dev *fimc, struct fimc_addr *addr);
-void fimc_hw_set_output_addr(struct fimc_dev *fimc, struct fimc_addr *addr,
+void fimc_hw_set_input_addr(struct fimc_dev *fimc, const struct fimc_addr *addr);
+void fimc_hw_set_output_addr(struct fimc_dev *fimc, const struct fimc_addr *addr,
int index);
int fimc_hw_set_camera_source(struct fimc_dev *fimc,
struct fimc_source_info *cam);
-void fimc_hw_set_camera_offset(struct fimc_dev *fimc, struct fimc_frame *f);
+void fimc_hw_set_camera_offset(struct fimc_dev *fimc, const struct fimc_frame *f);
int fimc_hw_set_camera_polarity(struct fimc_dev *fimc,
- struct fimc_source_info *cam);
+ const struct fimc_source_info *cam);
int fimc_hw_set_camera_type(struct fimc_dev *fimc,
- struct fimc_source_info *cam);
+ const struct fimc_source_info *cam);
void fimc_hw_clear_irq(struct fimc_dev *dev);
void fimc_hw_enable_scaler(struct fimc_dev *dev, bool on);
void fimc_hw_activate_input_dma(struct fimc_dev *dev, bool on);
diff --git a/drivers/media/platform/samsung/exynos4-is/mipi-csis.c b/drivers/media/platform/samsung/exynos4-is/mipi-csis.c
index aae8a8b2c0f4c..4b9b20ba35041 100644
--- a/drivers/media/platform/samsung/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/samsung/exynos4-is/mipi-csis.c
@@ -727,7 +727,8 @@ static int s5pcsis_parse_dt(struct platform_device *pdev,
&state->max_num_lanes))
return -EINVAL;
- node = of_graph_get_next_endpoint(node, NULL);
+ /* from port@3 or port@4 */
+ node = of_graph_get_endpoint_by_regs(node, -1, -1);
if (!node) {
dev_err(&pdev->dev, "No port node at %pOF\n",
pdev->dev.of_node);
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
index fbb047eadf5af..50451984d59f7 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
@@ -183,7 +183,7 @@ static void s5p_mfc_watchdog_worker(struct work_struct *work)
mfc_err("Error: some instance may be closing/opening\n");
spin_lock_irqsave(&dev->irqlock, flags);
- s5p_mfc_clock_off();
+ s5p_mfc_clock_off(dev);
for (i = 0; i < MFC_NUM_CONTEXTS; i++) {
ctx = dev->ctx[i];
@@ -211,9 +211,9 @@ static void s5p_mfc_watchdog_worker(struct work_struct *work)
mfc_err("Failed to reload FW\n");
goto unlock;
}
- s5p_mfc_clock_on();
+ s5p_mfc_clock_on(dev);
ret = s5p_mfc_init_hw(dev);
- s5p_mfc_clock_off();
+ s5p_mfc_clock_off(dev);
if (ret)
mfc_err("Failed to reinit FW\n");
}
@@ -393,7 +393,7 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
wake_up_ctx(ctx, reason, err);
WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
- s5p_mfc_clock_off();
+ s5p_mfc_clock_off(dev);
s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
return;
}
@@ -465,7 +465,7 @@ leave_handle_frame:
s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
wake_up_ctx(ctx, reason, err);
WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
- s5p_mfc_clock_off();
+ s5p_mfc_clock_off(dev);
/* if suspending, wake up device and do not try_run again*/
if (test_bit(0, &dev->enter_suspend))
wake_up_dev(dev, reason, err);
@@ -509,7 +509,7 @@ static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev,
}
WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
- s5p_mfc_clock_off();
+ s5p_mfc_clock_off(dev);
wake_up_dev(dev, reason, err);
}
@@ -565,7 +565,7 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
clear_work_bit(ctx);
WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
- s5p_mfc_clock_off();
+ s5p_mfc_clock_off(dev);
s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
wake_up_ctx(ctx, reason, err);
}
@@ -601,7 +601,7 @@ static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx,
}
WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
- s5p_mfc_clock_off();
+ s5p_mfc_clock_off(dev);
wake_up(&ctx->queue);
if (ctx->src_queue_cnt >= 1 && ctx->dst_queue_cnt >= 1)
@@ -610,7 +610,7 @@ static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx,
} else {
WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
- s5p_mfc_clock_off();
+ s5p_mfc_clock_off(dev);
wake_up(&ctx->queue);
}
@@ -638,7 +638,7 @@ static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx)
WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
- s5p_mfc_clock_off();
+ s5p_mfc_clock_off(dev);
wake_up(&ctx->queue);
s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
}
@@ -690,7 +690,7 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
}
s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
- s5p_mfc_clock_off();
+ s5p_mfc_clock_off(dev);
wake_up_ctx(ctx, reason, err);
s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
} else {
@@ -754,7 +754,7 @@ irq_cleanup_hw:
if (test_and_clear_bit(0, &dev->hw_lock) == 0)
mfc_err("Failed to unlock hw\n");
- s5p_mfc_clock_off();
+ s5p_mfc_clock_off(dev);
clear_work_bit(ctx);
wake_up(&ctx->queue);
@@ -841,20 +841,20 @@ static int s5p_mfc_open(struct file *file)
dev->watchdog_timer.expires = jiffies +
msecs_to_jiffies(MFC_WATCHDOG_INTERVAL);
add_timer(&dev->watchdog_timer);
- ret = s5p_mfc_power_on();
+ ret = s5p_mfc_power_on(dev);
if (ret < 0) {
mfc_err("power on failed\n");
goto err_pwr_enable;
}
- s5p_mfc_clock_on();
+ s5p_mfc_clock_on(dev);
ret = s5p_mfc_load_firmware(dev);
if (ret) {
- s5p_mfc_clock_off();
+ s5p_mfc_clock_off(dev);
goto err_load_fw;
}
/* Init the FW */
ret = s5p_mfc_init_hw(dev);
- s5p_mfc_clock_off();
+ s5p_mfc_clock_off(dev);
if (ret)
goto err_init_hw;
}
@@ -931,7 +931,7 @@ err_init_hw:
err_load_fw:
err_pwr_enable:
if (dev->num_inst == 1) {
- if (s5p_mfc_power_off() < 0)
+ if (s5p_mfc_power_off(dev) < 0)
mfc_err("power off failed\n");
del_timer_sync(&dev->watchdog_timer);
}
@@ -963,7 +963,7 @@ static int s5p_mfc_release(struct file *file)
vb2_queue_release(&ctx->vq_src);
vb2_queue_release(&ctx->vq_dst);
if (dev) {
- s5p_mfc_clock_on();
+ s5p_mfc_clock_on(dev);
/* Mark context as idle */
clear_work_bit_irqsave(ctx);
@@ -983,12 +983,12 @@ static int s5p_mfc_release(struct file *file)
mfc_debug(2, "Last instance\n");
s5p_mfc_deinit_hw(dev);
del_timer_sync(&dev->watchdog_timer);
- s5p_mfc_clock_off();
- if (s5p_mfc_power_off() < 0)
+ s5p_mfc_clock_off(dev);
+ if (s5p_mfc_power_off(dev) < 0)
mfc_err("Power off failed\n");
} else {
mfc_debug(2, "Shutting down clock\n");
- s5p_mfc_clock_off();
+ s5p_mfc_clock_off(dev);
}
}
if (dev)
@@ -1520,20 +1520,20 @@ static const struct dev_pm_ops s5p_mfc_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(s5p_mfc_suspend, s5p_mfc_resume)
};
-static struct s5p_mfc_buf_size_v5 mfc_buf_size_v5 = {
+static const struct s5p_mfc_buf_size_v5 mfc_buf_size_v5 = {
.h264_ctx = MFC_H264_CTX_BUF_SIZE,
.non_h264_ctx = MFC_CTX_BUF_SIZE,
.dsc = DESC_BUF_SIZE,
.shm = SHARED_BUF_SIZE,
};
-static struct s5p_mfc_buf_size buf_size_v5 = {
+static const struct s5p_mfc_buf_size buf_size_v5 = {
.fw = MAX_FW_SIZE,
.cpb = MAX_CPB_SIZE,
.priv = &mfc_buf_size_v5,
};
-static struct s5p_mfc_variant mfc_drvdata_v5 = {
+static const struct s5p_mfc_variant mfc_drvdata_v5 = {
.version = MFC_VERSION,
.version_bit = MFC_V5_BIT,
.port_num = MFC_NUM_PORTS,
@@ -1544,7 +1544,7 @@ static struct s5p_mfc_variant mfc_drvdata_v5 = {
.use_clock_gating = true,
};
-static struct s5p_mfc_buf_size_v6 mfc_buf_size_v6 = {
+static const struct s5p_mfc_buf_size_v6 mfc_buf_size_v6 = {
.dev_ctx = MFC_CTX_BUF_SIZE_V6,
.h264_dec_ctx = MFC_H264_DEC_CTX_BUF_SIZE_V6,
.other_dec_ctx = MFC_OTHER_DEC_CTX_BUF_SIZE_V6,
@@ -1552,13 +1552,13 @@ static struct s5p_mfc_buf_size_v6 mfc_buf_size_v6 = {
.other_enc_ctx = MFC_OTHER_ENC_CTX_BUF_SIZE_V6,
};
-static struct s5p_mfc_buf_size buf_size_v6 = {
+static const struct s5p_mfc_buf_size buf_size_v6 = {
.fw = MAX_FW_SIZE_V6,
.cpb = MAX_CPB_SIZE_V6,
.priv = &mfc_buf_size_v6,
};
-static struct s5p_mfc_variant mfc_drvdata_v6 = {
+static const struct s5p_mfc_variant mfc_drvdata_v6 = {
.version = MFC_VERSION_V6,
.version_bit = MFC_V6_BIT,
.port_num = MFC_NUM_PORTS_V6,
@@ -1573,7 +1573,7 @@ static struct s5p_mfc_variant mfc_drvdata_v6 = {
.num_clocks = 1,
};
-static struct s5p_mfc_buf_size_v6 mfc_buf_size_v7 = {
+static const struct s5p_mfc_buf_size_v6 mfc_buf_size_v7 = {
.dev_ctx = MFC_CTX_BUF_SIZE_V7,
.h264_dec_ctx = MFC_H264_DEC_CTX_BUF_SIZE_V7,
.other_dec_ctx = MFC_OTHER_DEC_CTX_BUF_SIZE_V7,
@@ -1581,13 +1581,13 @@ static struct s5p_mfc_buf_size_v6 mfc_buf_size_v7 = {
.other_enc_ctx = MFC_OTHER_ENC_CTX_BUF_SIZE_V7,
};
-static struct s5p_mfc_buf_size buf_size_v7 = {
+static const struct s5p_mfc_buf_size buf_size_v7 = {
.fw = MAX_FW_SIZE_V7,
.cpb = MAX_CPB_SIZE_V7,
.priv = &mfc_buf_size_v7,
};
-static struct s5p_mfc_variant mfc_drvdata_v7 = {
+static const struct s5p_mfc_variant mfc_drvdata_v7 = {
.version = MFC_VERSION_V7,
.version_bit = MFC_V7_BIT,
.port_num = MFC_NUM_PORTS_V7,
@@ -1597,7 +1597,7 @@ static struct s5p_mfc_variant mfc_drvdata_v7 = {
.num_clocks = 1,
};
-static struct s5p_mfc_variant mfc_drvdata_v7_3250 = {
+static const struct s5p_mfc_variant mfc_drvdata_v7_3250 = {
.version = MFC_VERSION_V7,
.version_bit = MFC_V7_BIT,
.port_num = MFC_NUM_PORTS_V7,
@@ -1607,7 +1607,7 @@ static struct s5p_mfc_variant mfc_drvdata_v7_3250 = {
.num_clocks = 2,
};
-static struct s5p_mfc_buf_size_v6 mfc_buf_size_v8 = {
+static const struct s5p_mfc_buf_size_v6 mfc_buf_size_v8 = {
.dev_ctx = MFC_CTX_BUF_SIZE_V8,
.h264_dec_ctx = MFC_H264_DEC_CTX_BUF_SIZE_V8,
.other_dec_ctx = MFC_OTHER_DEC_CTX_BUF_SIZE_V8,
@@ -1615,13 +1615,13 @@ static struct s5p_mfc_buf_size_v6 mfc_buf_size_v8 = {
.other_enc_ctx = MFC_OTHER_ENC_CTX_BUF_SIZE_V8,
};
-static struct s5p_mfc_buf_size buf_size_v8 = {
+static const struct s5p_mfc_buf_size buf_size_v8 = {
.fw = MAX_FW_SIZE_V8,
.cpb = MAX_CPB_SIZE_V8,
.priv = &mfc_buf_size_v8,
};
-static struct s5p_mfc_variant mfc_drvdata_v8 = {
+static const struct s5p_mfc_variant mfc_drvdata_v8 = {
.version = MFC_VERSION_V8,
.version_bit = MFC_V8_BIT,
.port_num = MFC_NUM_PORTS_V8,
@@ -1631,7 +1631,7 @@ static struct s5p_mfc_variant mfc_drvdata_v8 = {
.num_clocks = 1,
};
-static struct s5p_mfc_variant mfc_drvdata_v8_5433 = {
+static const struct s5p_mfc_variant mfc_drvdata_v8_5433 = {
.version = MFC_VERSION_V8,
.version_bit = MFC_V8_BIT,
.port_num = MFC_NUM_PORTS_V8,
@@ -1641,7 +1641,7 @@ static struct s5p_mfc_variant mfc_drvdata_v8_5433 = {
.num_clocks = 3,
};
-static struct s5p_mfc_buf_size_v6 mfc_buf_size_v10 = {
+static const struct s5p_mfc_buf_size_v6 mfc_buf_size_v10 = {
.dev_ctx = MFC_CTX_BUF_SIZE_V10,
.h264_dec_ctx = MFC_H264_DEC_CTX_BUF_SIZE_V10,
.other_dec_ctx = MFC_OTHER_DEC_CTX_BUF_SIZE_V10,
@@ -1650,13 +1650,13 @@ static struct s5p_mfc_buf_size_v6 mfc_buf_size_v10 = {
.other_enc_ctx = MFC_OTHER_ENC_CTX_BUF_SIZE_V10,
};
-static struct s5p_mfc_buf_size buf_size_v10 = {
+static const struct s5p_mfc_buf_size buf_size_v10 = {
.fw = MAX_FW_SIZE_V10,
.cpb = MAX_CPB_SIZE_V10,
.priv = &mfc_buf_size_v10,
};
-static struct s5p_mfc_variant mfc_drvdata_v10 = {
+static const struct s5p_mfc_variant mfc_drvdata_v10 = {
.version = MFC_VERSION_V10,
.version_bit = MFC_V10_BIT,
.port_num = MFC_NUM_PORTS_V10,
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd.c
index 774c573dc075b..196d8c99647b2 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd.c
@@ -12,14 +12,10 @@
#include "s5p_mfc_cmd_v5.h"
#include "s5p_mfc_cmd_v6.h"
-static struct s5p_mfc_hw_cmds *s5p_mfc_cmds;
-
void s5p_mfc_init_hw_cmds(struct s5p_mfc_dev *dev)
{
if (IS_MFCV6_PLUS(dev))
- s5p_mfc_cmds = s5p_mfc_init_hw_cmds_v6();
+ dev->mfc_cmds = s5p_mfc_init_hw_cmds_v6();
else
- s5p_mfc_cmds = s5p_mfc_init_hw_cmds_v5();
-
- dev->mfc_cmds = s5p_mfc_cmds;
+ dev->mfc_cmds = s5p_mfc_init_hw_cmds_v5();
}
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd.h
index 945d12fdceb7d..172c5a63b58ea 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd.h
@@ -19,7 +19,7 @@ struct s5p_mfc_cmd_args {
struct s5p_mfc_hw_cmds {
int (*cmd_host2risc)(struct s5p_mfc_dev *dev, int cmd,
- struct s5p_mfc_cmd_args *args);
+ const struct s5p_mfc_cmd_args *args);
int (*sys_init_cmd)(struct s5p_mfc_dev *dev);
int (*sleep_cmd)(struct s5p_mfc_dev *dev);
int (*wakeup_cmd)(struct s5p_mfc_dev *dev);
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v5.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v5.c
index 327e54e706114..82ee6d300c738 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v5.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v5.c
@@ -14,7 +14,7 @@
/* This function is used to send a command to the MFC */
static int s5p_mfc_cmd_host2risc_v5(struct s5p_mfc_dev *dev, int cmd,
- struct s5p_mfc_cmd_args *args)
+ const struct s5p_mfc_cmd_args *args)
{
int cur_cmd;
unsigned long timeout;
@@ -148,7 +148,7 @@ static int s5p_mfc_close_inst_cmd_v5(struct s5p_mfc_ctx *ctx)
}
/* Initialize cmd function pointers for MFC v5 */
-static struct s5p_mfc_hw_cmds s5p_mfc_cmds_v5 = {
+static const struct s5p_mfc_hw_cmds s5p_mfc_cmds_v5 = {
.cmd_host2risc = s5p_mfc_cmd_host2risc_v5,
.sys_init_cmd = s5p_mfc_sys_init_cmd_v5,
.sleep_cmd = s5p_mfc_sleep_cmd_v5,
@@ -157,7 +157,7 @@ static struct s5p_mfc_hw_cmds s5p_mfc_cmds_v5 = {
.close_inst_cmd = s5p_mfc_close_inst_cmd_v5,
};
-struct s5p_mfc_hw_cmds *s5p_mfc_init_hw_cmds_v5(void)
+const struct s5p_mfc_hw_cmds *s5p_mfc_init_hw_cmds_v5(void)
{
return &s5p_mfc_cmds_v5;
}
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v5.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v5.h
index 6eafa514aebca..c626376053c45 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v5.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v5.h
@@ -11,6 +11,6 @@
#include "s5p_mfc_common.h"
-struct s5p_mfc_hw_cmds *s5p_mfc_init_hw_cmds_v5(void);
+const struct s5p_mfc_hw_cmds *s5p_mfc_init_hw_cmds_v5(void);
#endif /* S5P_MFC_CMD_H_ */
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c
index f8588e52dfc82..47bc3014b5d8b 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c
@@ -15,7 +15,7 @@
#include "s5p_mfc_cmd_v6.h"
static int s5p_mfc_cmd_host2risc_v6(struct s5p_mfc_dev *dev, int cmd,
- struct s5p_mfc_cmd_args *args)
+ const struct s5p_mfc_cmd_args *args)
{
mfc_debug(2, "Issue the command: %d\n", cmd);
@@ -32,7 +32,7 @@ static int s5p_mfc_cmd_host2risc_v6(struct s5p_mfc_dev *dev, int cmd,
static int s5p_mfc_sys_init_cmd_v6(struct s5p_mfc_dev *dev)
{
struct s5p_mfc_cmd_args h2r_args;
- struct s5p_mfc_buf_size_v6 *buf_size = dev->variant->buf_size->priv;
+ const struct s5p_mfc_buf_size_v6 *buf_size = dev->variant->buf_size->priv;
int ret;
ret = s5p_mfc_hw_call(dev->mfc_ops, alloc_dev_context_buffer, dev);
@@ -154,7 +154,7 @@ static int s5p_mfc_close_inst_cmd_v6(struct s5p_mfc_ctx *ctx)
}
/* Initialize cmd function pointers for MFC v6 */
-static struct s5p_mfc_hw_cmds s5p_mfc_cmds_v6 = {
+static const struct s5p_mfc_hw_cmds s5p_mfc_cmds_v6 = {
.cmd_host2risc = s5p_mfc_cmd_host2risc_v6,
.sys_init_cmd = s5p_mfc_sys_init_cmd_v6,
.sleep_cmd = s5p_mfc_sleep_cmd_v6,
@@ -163,7 +163,7 @@ static struct s5p_mfc_hw_cmds s5p_mfc_cmds_v6 = {
.close_inst_cmd = s5p_mfc_close_inst_cmd_v6,
};
-struct s5p_mfc_hw_cmds *s5p_mfc_init_hw_cmds_v6(void)
+const struct s5p_mfc_hw_cmds *s5p_mfc_init_hw_cmds_v6(void)
{
return &s5p_mfc_cmds_v6;
}
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.h
index 9dc44460cc38d..29083436f5173 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.h
@@ -11,6 +11,6 @@
#include "s5p_mfc_common.h"
-struct s5p_mfc_hw_cmds *s5p_mfc_init_hw_cmds_v6(void);
+const struct s5p_mfc_hw_cmds *s5p_mfc_init_hw_cmds_v6(void);
#endif /* S5P_MFC_CMD_H_ */
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h
index 59450b324f7d0..3cc2a4f5c40a6 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h
@@ -221,15 +221,15 @@ struct s5p_mfc_buf_size_v6 {
struct s5p_mfc_buf_size {
unsigned int fw;
unsigned int cpb;
- void *priv;
+ const void *priv;
};
struct s5p_mfc_variant {
unsigned int version;
unsigned int port_num;
u32 version_bit;
- struct s5p_mfc_buf_size *buf_size;
- char *fw_name[MFC_FW_MAX_VERSIONS];
+ const struct s5p_mfc_buf_size *buf_size;
+ const char *fw_name[MFC_FW_MAX_VERSIONS];
const char *clk_names[MFC_MAX_CLOCKS];
int num_clocks;
bool use_clock_gating;
@@ -340,8 +340,8 @@ struct s5p_mfc_dev {
struct s5p_mfc_priv_buf ctx_buf;
int warn_start;
- struct s5p_mfc_hw_ops *mfc_ops;
- struct s5p_mfc_hw_cmds *mfc_cmds;
+ const struct s5p_mfc_hw_ops *mfc_ops;
+ const struct s5p_mfc_hw_cmds *mfc_cmds;
const struct s5p_mfc_regs *mfc_regs;
enum s5p_mfc_fw_ver fw_ver;
bool fw_get_done;
@@ -612,7 +612,6 @@ struct s5p_mfc_codec_ops {
* @chroma_dpb_size: dpb buffer size for chroma
* @me_buffer_size: size of the motion estimation buffer
* @tmv_buffer_size: size of temporal predictor motion vector buffer
- * @frame_type: used to force the type of the next encoded frame
* @ref_queue: list of the reference buffers for encoding
* @force_frame_type: encoder's frame type forcing control
* @ref_queue_cnt: number of the buffers in the reference list
@@ -639,8 +638,8 @@ struct s5p_mfc_ctx {
unsigned int int_err;
wait_queue_head_t queue;
- struct s5p_mfc_fmt *src_fmt;
- struct s5p_mfc_fmt *dst_fmt;
+ const struct s5p_mfc_fmt *src_fmt;
+ const struct s5p_mfc_fmt *dst_fmt;
struct vb2_queue vq_src;
struct vb2_queue vq_dst;
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_ctrl.c
index 503487f34a800..625d77b2be0ff 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_ctrl.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_ctrl.c
@@ -221,7 +221,7 @@ int s5p_mfc_init_hw(struct s5p_mfc_dev *dev)
/* 0. MFC reset */
mfc_debug(2, "MFC reset..\n");
- s5p_mfc_clock_on();
+ s5p_mfc_clock_on(dev);
dev->risc_on = 0;
ret = s5p_mfc_reset(dev);
if (ret) {
@@ -249,7 +249,7 @@ int s5p_mfc_init_hw(struct s5p_mfc_dev *dev)
if (s5p_mfc_wait_for_done_dev(dev, S5P_MFC_R2H_CMD_FW_STATUS_RET)) {
mfc_err("Failed to load firmware\n");
s5p_mfc_reset(dev);
- s5p_mfc_clock_off();
+ s5p_mfc_clock_off(dev);
return -EIO;
}
s5p_mfc_clean_dev_int_flags(dev);
@@ -258,14 +258,14 @@ int s5p_mfc_init_hw(struct s5p_mfc_dev *dev)
if (ret) {
mfc_err("Failed to send command to MFC - timeout\n");
s5p_mfc_reset(dev);
- s5p_mfc_clock_off();
+ s5p_mfc_clock_off(dev);
return ret;
}
mfc_debug(2, "Ok, now will wait for completion of hardware init\n");
if (s5p_mfc_wait_for_done_dev(dev, S5P_MFC_R2H_CMD_SYS_INIT_RET)) {
mfc_err("Failed to init hardware\n");
s5p_mfc_reset(dev);
- s5p_mfc_clock_off();
+ s5p_mfc_clock_off(dev);
return -EIO;
}
dev->int_cond = 0;
@@ -275,7 +275,7 @@ int s5p_mfc_init_hw(struct s5p_mfc_dev *dev)
mfc_err("Failed to init firmware - error: %d int: %d\n",
dev->int_err, dev->int_type);
s5p_mfc_reset(dev);
- s5p_mfc_clock_off();
+ s5p_mfc_clock_off(dev);
return -EIO;
}
if (IS_MFCV6_PLUS(dev))
@@ -285,7 +285,7 @@ int s5p_mfc_init_hw(struct s5p_mfc_dev *dev)
mfc_debug(2, "MFC F/W version : %02xyy, %02xmm, %02xdd\n",
(ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF);
- s5p_mfc_clock_off();
+ s5p_mfc_clock_off(dev);
mfc_debug_leave();
return 0;
}
@@ -294,12 +294,12 @@ int s5p_mfc_init_hw(struct s5p_mfc_dev *dev)
/* Deinitialize hardware */
void s5p_mfc_deinit_hw(struct s5p_mfc_dev *dev)
{
- s5p_mfc_clock_on();
+ s5p_mfc_clock_on(dev);
s5p_mfc_reset(dev);
s5p_mfc_hw_call(dev->mfc_ops, release_dev_context_buffer, dev);
- s5p_mfc_clock_off();
+ s5p_mfc_clock_off(dev);
}
int s5p_mfc_sleep(struct s5p_mfc_dev *dev)
@@ -307,7 +307,7 @@ int s5p_mfc_sleep(struct s5p_mfc_dev *dev)
int ret;
mfc_debug_enter();
- s5p_mfc_clock_on();
+ s5p_mfc_clock_on(dev);
s5p_mfc_clean_dev_int_flags(dev);
ret = s5p_mfc_hw_call(dev->mfc_cmds, sleep_cmd, dev);
if (ret) {
@@ -318,7 +318,7 @@ int s5p_mfc_sleep(struct s5p_mfc_dev *dev)
mfc_err("Failed to sleep\n");
return -EIO;
}
- s5p_mfc_clock_off();
+ s5p_mfc_clock_off(dev);
dev->int_cond = 0;
if (dev->int_err != 0 || dev->int_type !=
S5P_MFC_R2H_CMD_SLEEP_RET) {
@@ -390,12 +390,12 @@ int s5p_mfc_wakeup(struct s5p_mfc_dev *dev)
mfc_debug_enter();
/* 0. MFC reset */
mfc_debug(2, "MFC reset..\n");
- s5p_mfc_clock_on();
+ s5p_mfc_clock_on(dev);
dev->risc_on = 0;
ret = s5p_mfc_reset(dev);
if (ret) {
mfc_err("Failed to reset MFC - timeout\n");
- s5p_mfc_clock_off();
+ s5p_mfc_clock_off(dev);
return ret;
}
mfc_debug(2, "Done MFC reset..\n");
@@ -410,7 +410,7 @@ int s5p_mfc_wakeup(struct s5p_mfc_dev *dev)
else
ret = s5p_mfc_wait_wakeup(dev);
- s5p_mfc_clock_off();
+ s5p_mfc_clock_off(dev);
if (ret)
return ret;
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c
index 3957f28d4547c..91e102d4ec4e2 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c
@@ -27,7 +27,7 @@
#include "s5p_mfc_opr.h"
#include "s5p_mfc_pm.h"
-static struct s5p_mfc_fmt formats[] = {
+static const struct s5p_mfc_fmt formats[] = {
{
.fourcc = V4L2_PIX_FMT_NV12MT_16X16,
.codec_mode = S5P_MFC_CODEC_NONE,
@@ -177,7 +177,7 @@ static struct s5p_mfc_fmt formats[] = {
#define NUM_FORMATS ARRAY_SIZE(formats)
/* Find selected format description */
-static struct s5p_mfc_fmt *find_format(struct v4l2_format *f, unsigned int t)
+static const struct s5p_mfc_fmt *find_format(struct v4l2_format *f, unsigned int t)
{
unsigned int i;
@@ -406,7 +406,7 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct s5p_mfc_dev *dev = video_drvdata(file);
- struct s5p_mfc_fmt *fmt;
+ const struct s5p_mfc_fmt *fmt;
mfc_debug(2, "Type is %d\n", f->type);
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
@@ -445,7 +445,7 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
int ret = 0;
struct v4l2_pix_format_mplane *pix_mp;
- struct s5p_mfc_buf_size *buf_size = dev->variant->buf_size;
+ const struct s5p_mfc_buf_size *buf_size = dev->variant->buf_size;
mfc_debug_enter();
ret = vidioc_try_fmt(file, priv, f);
@@ -496,7 +496,7 @@ static int reqbufs_output(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx,
{
int ret = 0;
- s5p_mfc_clock_on();
+ s5p_mfc_clock_on(dev);
if (reqbufs->count == 0) {
mfc_debug(2, "Freeing buffers\n");
@@ -533,7 +533,7 @@ static int reqbufs_output(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx,
ret = -EINVAL;
}
out:
- s5p_mfc_clock_off();
+ s5p_mfc_clock_off(dev);
if (ret)
mfc_err("Failed allocating buffers for OUTPUT queue\n");
return ret;
@@ -544,7 +544,7 @@ static int reqbufs_capture(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx,
{
int ret = 0;
- s5p_mfc_clock_on();
+ s5p_mfc_clock_on(dev);
if (reqbufs->count == 0) {
mfc_debug(2, "Freeing buffers\n");
@@ -587,7 +587,7 @@ static int reqbufs_capture(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx,
ret = -EINVAL;
}
out:
- s5p_mfc_clock_off();
+ s5p_mfc_clock_off(dev);
if (ret)
mfc_err("Failed allocating buffers for CAPTURE queue\n");
return ret;
@@ -1159,7 +1159,7 @@ static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
}
-static struct vb2_ops s5p_mfc_dec_qops = {
+static const struct vb2_ops s5p_mfc_dec_qops = {
.queue_setup = s5p_mfc_queue_setup,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
@@ -1174,7 +1174,7 @@ const struct s5p_mfc_codec_ops *get_dec_codec_ops(void)
return &decoder_codec_ops;
}
-struct vb2_ops *get_dec_queue_ops(void)
+const struct vb2_ops *get_dec_queue_ops(void)
{
return &s5p_mfc_dec_qops;
}
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.h
index 0c52ab46cff73..47a6eb9a8fc0b 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.h
@@ -10,9 +10,8 @@
#define S5P_MFC_DEC_H_
const struct s5p_mfc_codec_ops *get_dec_codec_ops(void);
-struct vb2_ops *get_dec_queue_ops(void);
+const struct vb2_ops *get_dec_queue_ops(void);
const struct v4l2_ioctl_ops *get_dec_v4l2_ioctl_ops(void);
-struct s5p_mfc_fmt *get_dec_def_fmt(bool src);
int s5p_mfc_dec_ctrls_setup(struct s5p_mfc_ctx *ctx);
void s5p_mfc_dec_ctrls_delete(struct s5p_mfc_ctx *ctx);
void s5p_mfc_dec_init(struct s5p_mfc_ctx *ctx);
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
index ef8bb40b9712e..81cbb36fb382c 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
@@ -30,7 +30,7 @@
#define DEF_SRC_FMT_ENC V4L2_PIX_FMT_NV12M
#define DEF_DST_FMT_ENC V4L2_PIX_FMT_H264
-static struct s5p_mfc_fmt formats[] = {
+static const struct s5p_mfc_fmt formats[] = {
{
.fourcc = V4L2_PIX_FMT_NV12MT_16X16,
.codec_mode = S5P_MFC_CODEC_NONE,
@@ -111,7 +111,7 @@ static struct s5p_mfc_fmt formats[] = {
};
#define NUM_FORMATS ARRAY_SIZE(formats)
-static struct s5p_mfc_fmt *find_format(struct v4l2_format *f, unsigned int t)
+static const struct s5p_mfc_fmt *find_format(struct v4l2_format *f, unsigned int t)
{
unsigned int i;
@@ -1431,7 +1431,7 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct s5p_mfc_dev *dev = video_drvdata(file);
- struct s5p_mfc_fmt *fmt;
+ const struct s5p_mfc_fmt *fmt;
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
@@ -2392,7 +2392,7 @@ static const struct v4l2_ioctl_ops s5p_mfc_enc_ioctl_ops = {
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
-static int check_vb_with_fmt(struct s5p_mfc_fmt *fmt, struct vb2_buffer *vb)
+static int check_vb_with_fmt(const struct s5p_mfc_fmt *fmt, struct vb2_buffer *vb)
{
int i;
@@ -2650,7 +2650,7 @@ static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
}
-static struct vb2_ops s5p_mfc_enc_qops = {
+static const struct vb2_ops s5p_mfc_enc_qops = {
.queue_setup = s5p_mfc_queue_setup,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
@@ -2666,7 +2666,7 @@ const struct s5p_mfc_codec_ops *get_enc_codec_ops(void)
return &encoder_codec_ops;
}
-struct vb2_ops *get_enc_queue_ops(void)
+const struct vb2_ops *get_enc_queue_ops(void)
{
return &s5p_mfc_enc_qops;
}
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.h
index 3f1b1a037a4f5..62d6db67fd914 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.h
@@ -10,9 +10,8 @@
#define S5P_MFC_ENC_H_
const struct s5p_mfc_codec_ops *get_enc_codec_ops(void);
-struct vb2_ops *get_enc_queue_ops(void);
+const struct vb2_ops *get_enc_queue_ops(void);
const struct v4l2_ioctl_ops *get_enc_v4l2_ioctl_ops(void);
-struct s5p_mfc_fmt *get_enc_def_fmt(bool src);
int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx);
void s5p_mfc_enc_ctrls_delete(struct s5p_mfc_ctx *ctx);
void s5p_mfc_enc_init(struct s5p_mfc_ctx *ctx);
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr.c
index 673962301173c..5ba791fa36763 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr.c
@@ -14,18 +14,15 @@
#include "s5p_mfc_opr_v5.h"
#include "s5p_mfc_opr_v6.h"
-static struct s5p_mfc_hw_ops *s5p_mfc_ops;
-
void s5p_mfc_init_hw_ops(struct s5p_mfc_dev *dev)
{
if (IS_MFCV6_PLUS(dev)) {
- s5p_mfc_ops = s5p_mfc_init_hw_ops_v6();
+ dev->mfc_ops = s5p_mfc_init_hw_ops_v6();
dev->warn_start = S5P_FIMV_ERR_WARNINGS_START_V6;
} else {
- s5p_mfc_ops = s5p_mfc_init_hw_ops_v5();
+ dev->mfc_ops = s5p_mfc_init_hw_ops_v5();
dev->warn_start = S5P_FIMV_ERR_WARNINGS_START;
}
- dev->mfc_ops = s5p_mfc_ops;
}
void s5p_mfc_init_regs(struct s5p_mfc_dev *dev)
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v5.c
index fcfaf125a5a1d..365f552e604bd 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v5.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v5.c
@@ -34,7 +34,7 @@
static int s5p_mfc_alloc_dec_temp_buffers_v5(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
- struct s5p_mfc_buf_size_v5 *buf_size = dev->variant->buf_size->priv;
+ const struct s5p_mfc_buf_size_v5 *buf_size = dev->variant->buf_size->priv;
int ret;
ctx->dsc.size = buf_size->dsc;
@@ -200,7 +200,7 @@ static void s5p_mfc_release_codec_buffers_v5(struct s5p_mfc_ctx *ctx)
static int s5p_mfc_alloc_instance_buffer_v5(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
- struct s5p_mfc_buf_size_v5 *buf_size = dev->variant->buf_size->priv;
+ const struct s5p_mfc_buf_size_v5 *buf_size = dev->variant->buf_size->priv;
int ret;
if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC ||
@@ -345,7 +345,7 @@ static void s5p_mfc_enc_calc_src_size_v5(struct s5p_mfc_ctx *ctx)
static void s5p_mfc_set_dec_desc_buffer(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
- struct s5p_mfc_buf_size_v5 *buf_size = dev->variant->buf_size->priv;
+ const struct s5p_mfc_buf_size_v5 *buf_size = dev->variant->buf_size->priv;
mfc_write(dev, OFFSETA(ctx->dsc.dma), S5P_FIMV_SI_CH0_DESC_ADR);
mfc_write(dev, buf_size->dsc, S5P_FIMV_SI_CH0_DESC_SIZE);
@@ -676,7 +676,7 @@ static int s5p_mfc_set_enc_ref_buffer_v5(struct s5p_mfc_ctx *ctx)
static int s5p_mfc_set_enc_params(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
- struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ const struct s5p_mfc_enc_params *p = &ctx->enc_params;
unsigned int reg;
unsigned int shm;
@@ -759,8 +759,8 @@ static int s5p_mfc_set_enc_params(struct s5p_mfc_ctx *ctx)
static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
- struct s5p_mfc_enc_params *p = &ctx->enc_params;
- struct s5p_mfc_h264_enc_params *p_264 = &p->codec.h264;
+ const struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ const struct s5p_mfc_h264_enc_params *p_264 = &p->codec.h264;
unsigned int reg;
unsigned int shm;
@@ -916,8 +916,8 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
static int s5p_mfc_set_enc_params_mpeg4(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
- struct s5p_mfc_enc_params *p = &ctx->enc_params;
- struct s5p_mfc_mpeg4_enc_params *p_mpeg4 = &p->codec.mpeg4;
+ const struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ const struct s5p_mfc_mpeg4_enc_params *p_mpeg4 = &p->codec.mpeg4;
unsigned int reg;
unsigned int shm;
unsigned int framerate;
@@ -995,8 +995,8 @@ static int s5p_mfc_set_enc_params_mpeg4(struct s5p_mfc_ctx *ctx)
static int s5p_mfc_set_enc_params_h263(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
- struct s5p_mfc_enc_params *p = &ctx->enc_params;
- struct s5p_mfc_mpeg4_enc_params *p_h263 = &p->codec.mpeg4;
+ const struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ const struct s5p_mfc_mpeg4_enc_params *p_h263 = &p->codec.mpeg4;
unsigned int reg;
unsigned int shm;
@@ -1348,7 +1348,7 @@ static void s5p_mfc_try_run_v5(struct s5p_mfc_dev *dev)
* Last frame has already been sent to MFC.
* Now obtaining frames from MFC buffer
*/
- s5p_mfc_clock_on();
+ s5p_mfc_clock_on(dev);
s5p_mfc_clean_ctx_int_flags(ctx);
if (ctx->type == MFCINST_DECODER) {
@@ -1424,7 +1424,7 @@ static void s5p_mfc_try_run_v5(struct s5p_mfc_dev *dev)
* scheduled, reduce the clock count as no one will
* ever do this, because no interrupt related to this try_run
* will ever come from hardware. */
- s5p_mfc_clock_off();
+ s5p_mfc_clock_off(dev);
}
}
@@ -1593,7 +1593,7 @@ static unsigned int s5p_mfc_get_crop_info_v_v5(struct s5p_mfc_ctx *ctx)
}
/* Initialize opr function pointers for MFC v5 */
-static struct s5p_mfc_hw_ops s5p_mfc_ops_v5 = {
+static const struct s5p_mfc_hw_ops s5p_mfc_ops_v5 = {
.alloc_dec_temp_buffers = s5p_mfc_alloc_dec_temp_buffers_v5,
.release_dec_desc_buffer = s5p_mfc_release_dec_desc_buffer_v5,
.alloc_codec_buffers = s5p_mfc_alloc_codec_buffers_v5,
@@ -1633,7 +1633,7 @@ static struct s5p_mfc_hw_ops s5p_mfc_ops_v5 = {
.get_crop_info_v = s5p_mfc_get_crop_info_v_v5,
};
-struct s5p_mfc_hw_ops *s5p_mfc_init_hw_ops_v5(void)
+const struct s5p_mfc_hw_ops *s5p_mfc_init_hw_ops_v5(void)
{
return &s5p_mfc_ops_v5;
}
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v5.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v5.h
index b53d376ead603..0b98c619676e4 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v5.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v5.h
@@ -78,5 +78,5 @@ enum MFC_SHM_OFS {
FRAME_PACK_SEI_INFO = 0x17c, /* E */
};
-struct s5p_mfc_hw_ops *s5p_mfc_init_hw_ops_v5(void);
+const struct s5p_mfc_hw_ops *s5p_mfc_init_hw_ops_v5(void);
#endif /* S5P_MFC_OPR_H_ */
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c
index fd945211d28e7..73f7af674c01b 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c
@@ -383,7 +383,7 @@ static void s5p_mfc_release_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
static int s5p_mfc_alloc_instance_buffer_v6(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
- struct s5p_mfc_buf_size_v6 *buf_size = dev->variant->buf_size->priv;
+ const struct s5p_mfc_buf_size_v6 *buf_size = dev->variant->buf_size->priv;
int ret;
mfc_debug_enter();
@@ -443,7 +443,7 @@ static void s5p_mfc_release_instance_buffer_v6(struct s5p_mfc_ctx *ctx)
/* Allocate context buffers for SYS_INIT */
static int s5p_mfc_alloc_dev_context_buffer_v6(struct s5p_mfc_dev *dev)
{
- struct s5p_mfc_buf_size_v6 *buf_size = dev->variant->buf_size->priv;
+ const struct s5p_mfc_buf_size_v6 *buf_size = dev->variant->buf_size->priv;
int ret;
mfc_debug_enter();
@@ -587,7 +587,7 @@ static int s5p_mfc_set_dec_stream_buffer_v6(struct s5p_mfc_ctx *ctx,
{
struct s5p_mfc_dev *dev = ctx->dev;
const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
- struct s5p_mfc_buf_size *buf_size = dev->variant->buf_size;
+ const struct s5p_mfc_buf_size *buf_size = dev->variant->buf_size;
mfc_debug_enter();
mfc_debug(2, "inst_no: %d, buf_addr: 0x%08x,\n"
@@ -863,7 +863,7 @@ static int s5p_mfc_set_enc_params(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
- struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ const struct s5p_mfc_enc_params *p = &ctx->enc_params;
unsigned int reg = 0;
mfc_debug_enter();
@@ -1349,8 +1349,8 @@ static int s5p_mfc_set_enc_params_mpeg4(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
- struct s5p_mfc_enc_params *p = &ctx->enc_params;
- struct s5p_mfc_mpeg4_enc_params *p_mpeg4 = &p->codec.mpeg4;
+ const struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ const struct s5p_mfc_mpeg4_enc_params *p_mpeg4 = &p->codec.mpeg4;
unsigned int reg = 0;
mfc_debug_enter();
@@ -1431,8 +1431,8 @@ static int s5p_mfc_set_enc_params_h263(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
- struct s5p_mfc_enc_params *p = &ctx->enc_params;
- struct s5p_mfc_mpeg4_enc_params *p_h263 = &p->codec.mpeg4;
+ const struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ const struct s5p_mfc_mpeg4_enc_params *p_h263 = &p->codec.mpeg4;
unsigned int reg = 0;
mfc_debug_enter();
@@ -1501,8 +1501,8 @@ static int s5p_mfc_set_enc_params_vp8(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
- struct s5p_mfc_enc_params *p = &ctx->enc_params;
- struct s5p_mfc_vp8_enc_params *p_vp8 = &p->codec.vp8;
+ const struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ const struct s5p_mfc_vp8_enc_params *p_vp8 = &p->codec.vp8;
unsigned int reg = 0;
unsigned int val = 0;
@@ -1897,8 +1897,8 @@ static int s5p_mfc_h264_set_aso_slice_order_v6(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
- struct s5p_mfc_enc_params *p = &ctx->enc_params;
- struct s5p_mfc_h264_enc_params *p_h264 = &p->codec.h264;
+ const struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ const struct s5p_mfc_h264_enc_params *p_h264 = &p->codec.h264;
int i;
if (p_h264->aso) {
@@ -2165,7 +2165,7 @@ static void s5p_mfc_try_run_v6(struct s5p_mfc_dev *dev)
/* Last frame has already been sent to MFC
* Now obtaining frames from MFC buffer */
- s5p_mfc_clock_on();
+ s5p_mfc_clock_on(dev);
s5p_mfc_clean_ctx_int_flags(ctx);
if (ctx->type == MFCINST_DECODER) {
@@ -2245,7 +2245,7 @@ static void s5p_mfc_try_run_v6(struct s5p_mfc_dev *dev)
* scheduled, reduce the clock count as no one will
* ever do this, because no interrupt related to this try_run
* will ever come from hardware. */
- s5p_mfc_clock_off();
+ s5p_mfc_clock_off(dev);
}
}
@@ -2261,9 +2261,9 @@ s5p_mfc_read_info_v6(struct s5p_mfc_ctx *ctx, unsigned long ofs)
{
int ret;
- s5p_mfc_clock_on();
+ s5p_mfc_clock_on(ctx->dev);
ret = readl((void __iomem *)ofs);
- s5p_mfc_clock_off();
+ s5p_mfc_clock_off(ctx->dev);
return ret;
}
@@ -2657,7 +2657,7 @@ done:
}
/* Initialize opr function pointers for MFC v6 */
-static struct s5p_mfc_hw_ops s5p_mfc_ops_v6 = {
+static const struct s5p_mfc_hw_ops s5p_mfc_ops_v6 = {
.alloc_dec_temp_buffers = s5p_mfc_alloc_dec_temp_buffers_v6,
.release_dec_desc_buffer = s5p_mfc_release_dec_desc_buffer_v6,
.alloc_codec_buffers = s5p_mfc_alloc_codec_buffers_v6,
@@ -2701,7 +2701,7 @@ static struct s5p_mfc_hw_ops s5p_mfc_ops_v6 = {
.get_e_min_scratch_buf_size = s5p_mfc_get_e_min_scratch_buf_size,
};
-struct s5p_mfc_hw_ops *s5p_mfc_init_hw_ops_v6(void)
+const struct s5p_mfc_hw_ops *s5p_mfc_init_hw_ops_v6(void)
{
return &s5p_mfc_ops_v6;
}
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.h
index 94ecb0e6e7c73..7fc1307675d8d 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.h
@@ -51,6 +51,6 @@
#define FRAME_DELTA_DEFAULT 1
-struct s5p_mfc_hw_ops *s5p_mfc_init_hw_ops_v6(void);
+const struct s5p_mfc_hw_ops *s5p_mfc_init_hw_ops_v6(void);
const struct s5p_mfc_regs *s5p_mfc_init_regs_v6_plus(struct s5p_mfc_dev *dev);
#endif /* S5P_MFC_OPR_V6_H_ */
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_pm.c
index 187849841a28b..ae42414083831 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_pm.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_pm.c
@@ -14,17 +14,11 @@
#include "s5p_mfc_debug.h"
#include "s5p_mfc_pm.h"
-static struct s5p_mfc_pm *pm;
-static struct s5p_mfc_dev *p_dev;
-static atomic_t clk_ref;
-
int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
{
+ struct s5p_mfc_pm *pm = &dev->pm;
int i;
- pm = &dev->pm;
- p_dev = dev;
-
pm->num_clocks = dev->variant->num_clocks;
pm->clk_names = dev->variant->clk_names;
pm->device = &dev->plat_dev->dev;
@@ -49,70 +43,63 @@ int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
pm->clock_gate = pm->clocks[0];
pm_runtime_enable(pm->device);
- atomic_set(&clk_ref, 0);
return 0;
}
void s5p_mfc_final_pm(struct s5p_mfc_dev *dev)
{
- pm_runtime_disable(pm->device);
+ pm_runtime_disable(dev->pm.device);
}
-int s5p_mfc_clock_on(void)
+int s5p_mfc_clock_on(struct s5p_mfc_dev *dev)
{
- atomic_inc(&clk_ref);
- mfc_debug(3, "+ %d\n", atomic_read(&clk_ref));
-
- return clk_enable(pm->clock_gate);
+ return clk_enable(dev->pm.clock_gate);
}
-void s5p_mfc_clock_off(void)
+void s5p_mfc_clock_off(struct s5p_mfc_dev *dev)
{
- atomic_dec(&clk_ref);
- mfc_debug(3, "- %d\n", atomic_read(&clk_ref));
-
- clk_disable(pm->clock_gate);
+ clk_disable(dev->pm.clock_gate);
}
-int s5p_mfc_power_on(void)
+int s5p_mfc_power_on(struct s5p_mfc_dev *dev)
{
int i, ret = 0;
- ret = pm_runtime_resume_and_get(pm->device);
+ ret = pm_runtime_resume_and_get(dev->pm.device);
if (ret < 0)
return ret;
/* clock control */
- for (i = 0; i < pm->num_clocks; i++) {
- ret = clk_prepare_enable(pm->clocks[i]);
+ for (i = 0; i < dev->pm.num_clocks; i++) {
+ ret = clk_prepare_enable(dev->pm.clocks[i]);
if (ret < 0) {
mfc_err("clock prepare failed for clock: %s\n",
- pm->clk_names[i]);
+ dev->pm.clk_names[i]);
goto err;
}
}
/* prepare for software clock gating */
- clk_disable(pm->clock_gate);
+ clk_disable(dev->pm.clock_gate);
return 0;
err:
while (--i >= 0)
- clk_disable_unprepare(pm->clocks[i]);
- pm_runtime_put(pm->device);
+ clk_disable_unprepare(dev->pm.clocks[i]);
+ pm_runtime_put(dev->pm.device);
return ret;
}
-int s5p_mfc_power_off(void)
+int s5p_mfc_power_off(struct s5p_mfc_dev *dev)
{
int i;
/* finish software clock gating */
- clk_enable(pm->clock_gate);
+ clk_enable(dev->pm.clock_gate);
- for (i = 0; i < pm->num_clocks; i++)
- clk_disable_unprepare(pm->clocks[i]);
+ for (i = 0; i < dev->pm.num_clocks; i++)
+ clk_disable_unprepare(dev->pm.clocks[i]);
- return pm_runtime_put_sync(pm->device);
+ return pm_runtime_put_sync(dev->pm.device);
}
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_pm.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_pm.h
index 4159d2364e872..9c71036f03853 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_pm.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_pm.h
@@ -12,9 +12,9 @@
int s5p_mfc_init_pm(struct s5p_mfc_dev *dev);
void s5p_mfc_final_pm(struct s5p_mfc_dev *dev);
-int s5p_mfc_clock_on(void);
-void s5p_mfc_clock_off(void);
-int s5p_mfc_power_on(void);
-int s5p_mfc_power_off(void);
+int s5p_mfc_clock_on(struct s5p_mfc_dev *dev);
+void s5p_mfc_clock_off(struct s5p_mfc_dev *dev);
+int s5p_mfc_power_on(struct s5p_mfc_dev *dev);
+int s5p_mfc_power_off(struct s5p_mfc_dev *dev);
#endif /* S5P_MFC_PM_H_ */
diff --git a/drivers/media/platform/st/stm32/stm32-dcmi.c b/drivers/media/platform/st/stm32/stm32-dcmi.c
index c4610e3055461..ff3331af94068 100644
--- a/drivers/media/platform/st/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/st/stm32/stm32-dcmi.c
@@ -1855,7 +1855,7 @@ static int dcmi_graph_init(struct stm32_dcmi *dcmi)
struct device_node *ep;
int ret;
- ep = of_graph_get_next_endpoint(dcmi->dev->of_node, NULL);
+ ep = of_graph_get_endpoint_by_regs(dcmi->dev->of_node, 0, -1);
if (!ep) {
dev_err(dcmi->dev, "Failed to get next endpoint\n");
return -EINVAL;
@@ -1907,7 +1907,7 @@ static int dcmi_probe(struct platform_device *pdev)
"Could not get reset control\n");
/* Get bus characteristics from devicetree */
- np = of_graph_get_next_endpoint(np, NULL);
+ np = of_graph_get_endpoint_by_regs(np, 0, -1);
if (!np) {
dev_err(&pdev->dev, "Could not find the endpoint\n");
return -ENODEV;
diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c
index 32c6619be9a26..bce821eb71cec 100644
--- a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c
+++ b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c
@@ -517,7 +517,7 @@ static int dcmipp_probe(struct platform_device *pdev)
return 0;
}
-static int dcmipp_remove(struct platform_device *pdev)
+static void dcmipp_remove(struct platform_device *pdev)
{
struct dcmipp_device *dcmipp = platform_get_drvdata(pdev);
unsigned int i;
@@ -534,8 +534,6 @@ static int dcmipp_remove(struct platform_device *pdev)
media_device_cleanup(&dcmipp->mdev);
v4l2_device_unregister(&dcmipp->v4l2_dev);
-
- return 0;
}
static int dcmipp_runtime_suspend(struct device *dev)
@@ -588,7 +586,7 @@ static const struct dev_pm_ops dcmipp_pm_ops = {
static struct platform_driver dcmipp_pdrv = {
.probe = dcmipp_probe,
- .remove = dcmipp_remove,
+ .remove_new = dcmipp_remove,
.driver = {
.name = DCMIPP_PDEV_NAME,
.of_match_table = dcmipp_of_match,
diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
index 954fabec27f63..a1c35a2b68ed9 100644
--- a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
+++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
@@ -66,6 +66,7 @@ static void deinterlace_device_run(void *priv)
struct vb2_v4l2_buffer *src, *dst;
unsigned int hstep, vstep;
dma_addr_t addr;
+ int i;
src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
@@ -160,6 +161,26 @@ static void deinterlace_device_run(void *priv)
deinterlace_write(dev, DEINTERLACE_CH1_HORZ_FACT, hstep);
deinterlace_write(dev, DEINTERLACE_CH1_VERT_FACT, vstep);
+ /* neutral filter coefficients */
+ deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_COEF_ACCESS);
+ readl_poll_timeout(dev->base + DEINTERLACE_STATUS, val,
+ val & DEINTERLACE_STATUS_COEF_STATUS, 2, 40);
+
+ for (i = 0; i < 32; i++) {
+ deinterlace_write(dev, DEINTERLACE_CH0_HORZ_COEF0 + i * 4,
+ DEINTERLACE_IDENTITY_COEF);
+ deinterlace_write(dev, DEINTERLACE_CH0_VERT_COEF + i * 4,
+ DEINTERLACE_IDENTITY_COEF);
+ deinterlace_write(dev, DEINTERLACE_CH1_HORZ_COEF0 + i * 4,
+ DEINTERLACE_IDENTITY_COEF);
+ deinterlace_write(dev, DEINTERLACE_CH1_VERT_COEF + i * 4,
+ DEINTERLACE_IDENTITY_COEF);
+ }
+
+ deinterlace_clr_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_COEF_ACCESS, 0);
+
deinterlace_clr_set_bits(dev, DEINTERLACE_FIELD_CTRL,
DEINTERLACE_FIELD_CTRL_FIELD_CNT_MSK,
DEINTERLACE_FIELD_CTRL_FIELD_CNT(ctx->field));
@@ -248,7 +269,6 @@ static irqreturn_t deinterlace_irq(int irq, void *data)
static void deinterlace_init(struct deinterlace_dev *dev)
{
u32 val;
- int i;
deinterlace_write(dev, DEINTERLACE_BYPASS,
DEINTERLACE_BYPASS_CSC);
@@ -284,27 +304,7 @@ static void deinterlace_init(struct deinterlace_dev *dev)
deinterlace_clr_set_bits(dev, DEINTERLACE_CHROMA_DIFF,
DEINTERLACE_CHROMA_DIFF_TH_MSK,
- DEINTERLACE_CHROMA_DIFF_TH(5));
-
- /* neutral filter coefficients */
- deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
- DEINTERLACE_FRM_CTRL_COEF_ACCESS);
- readl_poll_timeout(dev->base + DEINTERLACE_STATUS, val,
- val & DEINTERLACE_STATUS_COEF_STATUS, 2, 40);
-
- for (i = 0; i < 32; i++) {
- deinterlace_write(dev, DEINTERLACE_CH0_HORZ_COEF0 + i * 4,
- DEINTERLACE_IDENTITY_COEF);
- deinterlace_write(dev, DEINTERLACE_CH0_VERT_COEF + i * 4,
- DEINTERLACE_IDENTITY_COEF);
- deinterlace_write(dev, DEINTERLACE_CH1_HORZ_COEF0 + i * 4,
- DEINTERLACE_IDENTITY_COEF);
- deinterlace_write(dev, DEINTERLACE_CH1_VERT_COEF + i * 4,
- DEINTERLACE_IDENTITY_COEF);
- }
-
- deinterlace_clr_set_bits(dev, DEINTERLACE_FRM_CTRL,
- DEINTERLACE_FRM_CTRL_COEF_ACCESS, 0);
+ DEINTERLACE_CHROMA_DIFF_TH(31));
}
static inline struct deinterlace_ctx *deinterlace_file2ctx(struct file *file)
@@ -929,11 +929,18 @@ static int deinterlace_runtime_resume(struct device *device)
return ret;
}
+ ret = reset_control_deassert(dev->rstc);
+ if (ret) {
+ dev_err(dev->dev, "Failed to apply reset\n");
+
+ goto err_exclusive_rate;
+ }
+
ret = clk_prepare_enable(dev->bus_clk);
if (ret) {
dev_err(dev->dev, "Failed to enable bus clock\n");
- goto err_exclusive_rate;
+ goto err_rst;
}
ret = clk_prepare_enable(dev->mod_clk);
@@ -950,23 +957,16 @@ static int deinterlace_runtime_resume(struct device *device)
goto err_mod_clk;
}
- ret = reset_control_deassert(dev->rstc);
- if (ret) {
- dev_err(dev->dev, "Failed to apply reset\n");
-
- goto err_ram_clk;
- }
-
deinterlace_init(dev);
return 0;
-err_ram_clk:
- clk_disable_unprepare(dev->ram_clk);
err_mod_clk:
clk_disable_unprepare(dev->mod_clk);
err_bus_clk:
clk_disable_unprepare(dev->bus_clk);
+err_rst:
+ reset_control_assert(dev->rstc);
err_exclusive_rate:
clk_rate_exclusive_put(dev->mod_clk);
@@ -977,11 +977,12 @@ static int deinterlace_runtime_suspend(struct device *device)
{
struct deinterlace_dev *dev = dev_get_drvdata(device);
- reset_control_assert(dev->rstc);
-
clk_disable_unprepare(dev->ram_clk);
clk_disable_unprepare(dev->mod_clk);
clk_disable_unprepare(dev->bus_clk);
+
+ reset_control_assert(dev->rstc);
+
clk_rate_exclusive_put(dev->mod_clk);
return 0;
diff --git a/drivers/media/platform/ti/davinci/vpif.c b/drivers/media/platform/ti/davinci/vpif.c
index 63cdfed37bc9b..f4e1fa76bf372 100644
--- a/drivers/media/platform/ti/davinci/vpif.c
+++ b/drivers/media/platform/ti/davinci/vpif.c
@@ -465,8 +465,7 @@ static int vpif_probe(struct platform_device *pdev)
* so their devices need to be registered manually here
* for their legacy platform_drivers to work.
*/
- endpoint = of_graph_get_next_endpoint(pdev->dev.of_node,
- endpoint);
+ endpoint = of_graph_get_endpoint_by_regs(pdev->dev.of_node, 0, -1);
if (!endpoint)
return 0;
of_node_put(endpoint);
diff --git a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
index 59b30fc431440..6da83d0cffaae 100644
--- a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
+++ b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
@@ -159,6 +159,12 @@ static const struct ti_csi2rx_fmt ti_csi2rx_formats[] = {
.bpp = 8,
.size = SHIM_DMACNTX_SIZE_8,
}, {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .csi_dt = MIPI_CSI2_DT_RAW8,
+ .bpp = 8,
+ .size = SHIM_DMACNTX_SIZE_8,
+ }, {
.fourcc = V4L2_PIX_FMT_SBGGR10,
.code = MEDIA_BUS_FMT_SBGGR10_1X10,
.csi_dt = MIPI_CSI2_DT_RAW10,
@@ -182,6 +188,24 @@ static const struct ti_csi2rx_fmt ti_csi2rx_formats[] = {
.csi_dt = MIPI_CSI2_DT_RAW10,
.bpp = 16,
.size = SHIM_DMACNTX_SIZE_16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB565X,
+ .code = MEDIA_BUS_FMT_RGB565_1X16,
+ .csi_dt = MIPI_CSI2_DT_RGB565,
+ .bpp = 16,
+ .size = SHIM_DMACNTX_SIZE_16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ .code = MEDIA_BUS_FMT_RGB888_1X24,
+ .csi_dt = MIPI_CSI2_DT_RGB888,
+ .bpp = 32,
+ .size = SHIM_DMACNTX_SIZE_32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGBX32,
+ .code = MEDIA_BUS_FMT_BGR888_1X24,
+ .csi_dt = MIPI_CSI2_DT_RGB888,
+ .bpp = 32,
+ .size = SHIM_DMACNTX_SIZE_32,
},
/* More formats can be supported but they are not listed for now. */
@@ -1065,7 +1089,6 @@ static void ti_csi2rx_cleanup_vb2q(struct ti_csi2rx_dev *csi)
static int ti_csi2rx_probe(struct platform_device *pdev)
{
struct ti_csi2rx_dev *csi;
- struct resource *res;
int ret;
csi = devm_kzalloc(&pdev->dev, sizeof(*csi), GFP_KERNEL);
@@ -1076,9 +1099,7 @@ static int ti_csi2rx_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, csi);
mutex_init(&csi->mutex);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- csi->shim = devm_ioremap_resource(&pdev->dev, res);
+ csi->shim = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(csi->shim)) {
ret = PTR_ERR(csi->shim);
goto err_mutex;
@@ -1121,7 +1142,7 @@ err_mutex:
return ret;
}
-static int ti_csi2rx_remove(struct platform_device *pdev)
+static void ti_csi2rx_remove(struct platform_device *pdev)
{
struct ti_csi2rx_dev *csi = platform_get_drvdata(pdev);
@@ -1133,8 +1154,6 @@ static int ti_csi2rx_remove(struct platform_device *pdev)
ti_csi2rx_cleanup_dma(csi);
mutex_destroy(&csi->mutex);
-
- return 0;
}
static const struct of_device_id ti_csi2rx_of_match[] = {
@@ -1145,7 +1164,7 @@ MODULE_DEVICE_TABLE(of, ti_csi2rx_of_match);
static struct platform_driver ti_csi2rx_pdrv = {
.probe = ti_csi2rx_probe,
- .remove = ti_csi2rx_remove,
+ .remove_new = ti_csi2rx_remove,
.driver = {
.name = TI_CSI2RX_MODULE_NAME,
.of_match_table = ti_csi2rx_of_match,
diff --git a/drivers/media/platform/verisilicon/Kconfig b/drivers/media/platform/verisilicon/Kconfig
index 24b927d8f182e..9a34d14c6e404 100644
--- a/drivers/media/platform/verisilicon/Kconfig
+++ b/drivers/media/platform/verisilicon/Kconfig
@@ -4,7 +4,7 @@ comment "Verisilicon media platform drivers"
config VIDEO_HANTRO
tristate "Hantro VPU driver"
- depends on ARCH_MXC || ARCH_ROCKCHIP || ARCH_AT91 || ARCH_SUNXI || COMPILE_TEST
+ depends on ARCH_MXC || ARCH_ROCKCHIP || ARCH_AT91 || ARCH_SUNXI || ARCH_STM32 || COMPILE_TEST
depends on V4L_MEM2MEM_DRIVERS
depends on VIDEO_DEV
select MEDIA_CONTROLLER
@@ -15,8 +15,8 @@ config VIDEO_HANTRO
select V4L2_VP9
help
Support for the Hantro IP based Video Processing Units present on
- Rockchip and NXP i.MX8M SoCs, which accelerate video and image
- encoding and decoding.
+ Rockchip, NXP i.MX8M and STM32MP25 SoCs, which accelerate video
+ and image encoding and decoding.
To compile this driver as a module, choose M here: the module
will be called hantro-vpu.
@@ -51,3 +51,11 @@ config VIDEO_HANTRO_SUNXI
default y
help
Enable support for H6 SoC.
+
+config VIDEO_HANTRO_STM32MP25
+ bool "Hantro STM32MP25 support"
+ depends on VIDEO_HANTRO
+ depends on ARCH_STM32 || COMPILE_TEST
+ default y
+ help
+ Enable support for STM32MP25 SoCs.
diff --git a/drivers/media/platform/verisilicon/Makefile b/drivers/media/platform/verisilicon/Makefile
index 6ad2ef885920b..eb38a1833b02f 100644
--- a/drivers/media/platform/verisilicon/Makefile
+++ b/drivers/media/platform/verisilicon/Makefile
@@ -39,3 +39,6 @@ hantro-vpu-$(CONFIG_VIDEO_HANTRO_ROCKCHIP) += \
hantro-vpu-$(CONFIG_VIDEO_HANTRO_SUNXI) += \
sunxi_vpu_hw.o
+
+hantro-vpu-$(CONFIG_VIDEO_HANTRO_STM32MP25) += \
+ stm32mp25_vpu_hw.o
diff --git a/drivers/media/platform/verisilicon/hantro.h b/drivers/media/platform/verisilicon/hantro.h
index 6f5eb975d0e33..811260dc3c777 100644
--- a/drivers/media/platform/verisilicon/hantro.h
+++ b/drivers/media/platform/verisilicon/hantro.h
@@ -237,7 +237,6 @@ struct hantro_dev {
* @codec_ops: Set of operations related to codec mode.
* @postproc: Post-processing context.
* @h264_dec: H.264-decoding context.
- * @jpeg_enc: JPEG-encoding context.
* @mpeg2_dec: MPEG-2-decoding context.
* @vp8_dec: VP8-decoding context.
* @hevc_dec: HEVC-decoding context.
diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c
index db3df6cc4513b..34b123dafd890 100644
--- a/drivers/media/platform/verisilicon/hantro_drv.c
+++ b/drivers/media/platform/verisilicon/hantro_drv.c
@@ -736,6 +736,10 @@ static const struct of_device_id of_hantro_match[] = {
#ifdef CONFIG_VIDEO_HANTRO_SUNXI
{ .compatible = "allwinner,sun50i-h6-vpu-g2", .data = &sunxi_vpu_variant, },
#endif
+#ifdef CONFIG_VIDEO_HANTRO_STM32MP25
+ { .compatible = "st,stm32mp25-vdec", .data = &stm32mp25_vdec_variant, },
+ { .compatible = "st,stm32mp25-venc", .data = &stm32mp25_venc_variant, },
+#endif
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, of_hantro_match);
diff --git a/drivers/media/platform/verisilicon/hantro_g1_h264_dec.c b/drivers/media/platform/verisilicon/hantro_g1_h264_dec.c
index 9de7f05eff2a6..ad5c1a6634f5c 100644
--- a/drivers/media/platform/verisilicon/hantro_g1_h264_dec.c
+++ b/drivers/media/platform/verisilicon/hantro_g1_h264_dec.c
@@ -243,7 +243,7 @@ static void set_buffers(struct hantro_ctx *ctx, struct vb2_v4l2_buffer *src_buf)
vdpu_write_relaxed(vpu, dst_dma + offset, G1_REG_ADDR_DIR_MV);
}
- /* Auxiliary buffer prepared in hantro_g1_h264_dec_prepare_table(). */
+ /* Auxiliary buffer prepared in hantro_h264_dec_init(). */
vdpu_write_relaxed(vpu, ctx->h264_dec.priv.dma, G1_REG_ADDR_QTABLE);
}
diff --git a/drivers/media/platform/verisilicon/hantro_hw.h b/drivers/media/platform/verisilicon/hantro_hw.h
index 9aec8a79acdca..7737320cc8cc6 100644
--- a/drivers/media/platform/verisilicon/hantro_hw.h
+++ b/drivers/media/platform/verisilicon/hantro_hw.h
@@ -408,6 +408,8 @@ extern const struct hantro_variant rk3568_vpu_variant;
extern const struct hantro_variant rk3588_vpu981_variant;
extern const struct hantro_variant sama5d4_vdec_variant;
extern const struct hantro_variant sunxi_vpu_variant;
+extern const struct hantro_variant stm32mp25_vdec_variant;
+extern const struct hantro_variant stm32mp25_venc_variant;
extern const struct hantro_postproc_ops hantro_g1_postproc_ops;
extern const struct hantro_postproc_ops hantro_g2_postproc_ops;
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu2_hw_h264_dec.c b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_h264_dec.c
index 46c1a83bcc4e0..6da87f5184bcb 100644
--- a/drivers/media/platform/verisilicon/rockchip_vpu2_hw_h264_dec.c
+++ b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_h264_dec.c
@@ -460,7 +460,7 @@ static void set_buffers(struct hantro_ctx *ctx, struct vb2_v4l2_buffer *src_buf)
vdpu_write_relaxed(vpu, dst_dma + offset, VDPU_REG_DIR_MV_BASE);
}
- /* Auxiliary buffer prepared in hantro_g1_h264_dec_prepare_table(). */
+ /* Auxiliary buffer prepared in hantro_h264_dec_init(). */
vdpu_write_relaxed(vpu, ctx->h264_dec.priv.dma, VDPU_REG_QTABLE_BASE);
}
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu981_regs.h b/drivers/media/platform/verisilicon/rockchip_vpu981_regs.h
index 182e6c830ff69..850ff0f844248 100644
--- a/drivers/media/platform/verisilicon/rockchip_vpu981_regs.h
+++ b/drivers/media/platform/verisilicon/rockchip_vpu981_regs.h
@@ -118,7 +118,7 @@
#define av1_mcomp_filt_type AV1_DEC_REG(11, 8, 0x7)
#define av1_multicore_expect_context_update AV1_DEC_REG(11, 11, 0x1)
#define av1_multicore_sbx_offset AV1_DEC_REG(11, 12, 0x7f)
-#define av1_ulticore_tile_col AV1_DEC_REG(11, 19, 0x7f)
+#define av1_multicore_tile_col AV1_DEC_REG(11, 19, 0x7f)
#define av1_transform_mode AV1_DEC_REG(11, 27, 0x7)
#define av1_dec_tile_size_mag AV1_DEC_REG(11, 30, 0x3)
diff --git a/drivers/media/platform/verisilicon/stm32mp25_vpu_hw.c b/drivers/media/platform/verisilicon/stm32mp25_vpu_hw.c
new file mode 100644
index 0000000000000..833821120b201
--- /dev/null
+++ b/drivers/media/platform/verisilicon/stm32mp25_vpu_hw.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * STM32MP25 video codec driver
+ *
+ * Copyright (C) STMicroelectronics SA 2024
+ * Authors: Hugues Fruchet <hugues.fruchet@foss.st.com>
+ * for STMicroelectronics.
+ *
+ */
+
+#include "hantro.h"
+#include "hantro_jpeg.h"
+#include "hantro_h1_regs.h"
+
+/*
+ * Supported formats.
+ */
+
+static const struct hantro_fmt stm32mp25_vdec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8_FRAME,
+ .codec_mode = HANTRO_MODE_VP8_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_H264_SLICE,
+ .codec_mode = HANTRO_MODE_H264_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static const struct hantro_fmt stm32mp25_venc_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420M,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = ROCKCHIP_VPU_ENC_FMT_YUV420P,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = ROCKCHIP_VPU_ENC_FMT_YUV420SP,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = ROCKCHIP_VPU_ENC_FMT_YUYV422,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = ROCKCHIP_VPU_ENC_FMT_UYVY422,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_JPEG,
+ .codec_mode = HANTRO_MODE_JPEG_ENC,
+ .max_depth = 2,
+ .header_size = JPEG_HEADER_SIZE,
+ .frmsize = {
+ .min_width = 96,
+ .max_width = FMT_4K_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = 96,
+ .max_height = FMT_4K_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static irqreturn_t stm32mp25_venc_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status;
+
+ status = vepu_read(vpu, H1_REG_INTERRUPT);
+ state = (status & H1_REG_INTERRUPT_FRAME_RDY) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vepu_write(vpu, H1_REG_INTERRUPT_BIT, H1_REG_INTERRUPT);
+
+ hantro_irq_done(vpu, state);
+
+ return IRQ_HANDLED;
+}
+
+static void stm32mp25_venc_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ reset_control_reset(vpu->resets);
+}
+
+/*
+ * Supported codec ops.
+ */
+
+static const struct hantro_codec_ops stm32mp25_vdec_codec_ops[] = {
+ [HANTRO_MODE_VP8_DEC] = {
+ .run = hantro_g1_vp8_dec_run,
+ .reset = hantro_g1_reset,
+ .init = hantro_vp8_dec_init,
+ .exit = hantro_vp8_dec_exit,
+ },
+ [HANTRO_MODE_H264_DEC] = {
+ .run = hantro_g1_h264_dec_run,
+ .reset = hantro_g1_reset,
+ .init = hantro_h264_dec_init,
+ .exit = hantro_h264_dec_exit,
+ },
+};
+
+static const struct hantro_codec_ops stm32mp25_venc_codec_ops[] = {
+ [HANTRO_MODE_JPEG_ENC] = {
+ .run = hantro_h1_jpeg_enc_run,
+ .reset = stm32mp25_venc_reset,
+ .done = hantro_h1_jpeg_enc_done,
+ },
+};
+
+/*
+ * Variants.
+ */
+
+static const struct hantro_irq stm32mp25_vdec_irqs[] = {
+ { "vdec", hantro_g1_irq },
+};
+
+static const char * const stm32mp25_vdec_clk_names[] = { "vdec-clk" };
+
+const struct hantro_variant stm32mp25_vdec_variant = {
+ .dec_fmts = stm32mp25_vdec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(stm32mp25_vdec_fmts),
+ .codec = HANTRO_VP8_DECODER | HANTRO_H264_DECODER,
+ .codec_ops = stm32mp25_vdec_codec_ops,
+ .irqs = stm32mp25_vdec_irqs,
+ .num_irqs = ARRAY_SIZE(stm32mp25_vdec_irqs),
+ .clk_names = stm32mp25_vdec_clk_names,
+ .num_clocks = ARRAY_SIZE(stm32mp25_vdec_clk_names),
+};
+
+static const struct hantro_irq stm32mp25_venc_irqs[] = {
+ { "venc", stm32mp25_venc_irq },
+};
+
+static const char * const stm32mp25_venc_clk_names[] = {
+ "venc-clk"
+};
+
+const struct hantro_variant stm32mp25_venc_variant = {
+ .enc_fmts = stm32mp25_venc_fmts,
+ .num_enc_fmts = ARRAY_SIZE(stm32mp25_venc_fmts),
+ .codec = HANTRO_JPEG_ENCODER,
+ .codec_ops = stm32mp25_venc_codec_ops,
+ .irqs = stm32mp25_venc_irqs,
+ .num_irqs = ARRAY_SIZE(stm32mp25_venc_irqs),
+ .clk_names = stm32mp25_venc_clk_names,
+ .num_clocks = ARRAY_SIZE(stm32mp25_venc_clk_names)
+};
diff --git a/drivers/media/platform/xilinx/Kconfig b/drivers/media/platform/xilinx/Kconfig
index 93ef78bf62e6d..601edd9acd5b9 100644
--- a/drivers/media/platform/xilinx/Kconfig
+++ b/drivers/media/platform/xilinx/Kconfig
@@ -26,10 +26,10 @@ config VIDEO_XILINX_TPG
depends on VIDEO_XILINX
select VIDEO_XILINX_VTC
help
- Driver for the Xilinx Video Test Pattern Generator
+ Driver for the Xilinx Video Test Pattern Generator
config VIDEO_XILINX_VTC
tristate "Xilinx Video Timing Controller"
depends on VIDEO_XILINX
help
- Driver for the Xilinx Video Timing Controller
+ Driver for the Xilinx Video Timing Controller
diff --git a/drivers/media/test-drivers/vicodec/codec-fwht.c b/drivers/media/test-drivers/vicodec/codec-fwht.c
index 1ce682e1b85c3..fd75457d03b20 100644
--- a/drivers/media/test-drivers/vicodec/codec-fwht.c
+++ b/drivers/media/test-drivers/vicodec/codec-fwht.c
@@ -49,7 +49,7 @@ static const uint8_t zigzag[64] = {
/*
* noinline_for_stack to work around
- * https://bugs.llvm.org/show_bug.cgi?id=38809
+ * https://llvm.org/pr38809
*/
static int noinline_for_stack
rlc(const s16 *in, __be16 *output, int blocktype)
diff --git a/drivers/media/test-drivers/vidtv/vidtv_bridge.c b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
index 8b04e12af286c..613949df897d3 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_bridge.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
@@ -45,28 +45,28 @@
#define LNB_HIGH_FREQ 10600000 /* transition frequency */
static unsigned int drop_tslock_prob_on_low_snr;
-module_param(drop_tslock_prob_on_low_snr, uint, 0);
+module_param(drop_tslock_prob_on_low_snr, uint, 0444);
MODULE_PARM_DESC(drop_tslock_prob_on_low_snr,
"Probability of losing the TS lock if the signal quality is bad");
static unsigned int recover_tslock_prob_on_good_snr;
-module_param(recover_tslock_prob_on_good_snr, uint, 0);
+module_param(recover_tslock_prob_on_good_snr, uint, 0444);
MODULE_PARM_DESC(recover_tslock_prob_on_good_snr,
"Probability recovering the TS lock when the signal improves");
static unsigned int mock_power_up_delay_msec;
-module_param(mock_power_up_delay_msec, uint, 0);
+module_param(mock_power_up_delay_msec, uint, 0444);
MODULE_PARM_DESC(mock_power_up_delay_msec, "Simulate a power up delay");
static unsigned int mock_tune_delay_msec;
-module_param(mock_tune_delay_msec, uint, 0);
+module_param(mock_tune_delay_msec, uint, 0444);
MODULE_PARM_DESC(mock_tune_delay_msec, "Simulate a tune delay");
static unsigned int vidtv_valid_dvb_t_freqs[NUM_VALID_TUNER_FREQS] = {
474000000
};
-module_param_array(vidtv_valid_dvb_t_freqs, uint, NULL, 0);
+module_param_array(vidtv_valid_dvb_t_freqs, uint, NULL, 0444);
MODULE_PARM_DESC(vidtv_valid_dvb_t_freqs,
"Valid DVB-T frequencies to simulate, in Hz");
@@ -74,19 +74,19 @@ static unsigned int vidtv_valid_dvb_c_freqs[NUM_VALID_TUNER_FREQS] = {
474000000
};
-module_param_array(vidtv_valid_dvb_c_freqs, uint, NULL, 0);
+module_param_array(vidtv_valid_dvb_c_freqs, uint, NULL, 0444);
MODULE_PARM_DESC(vidtv_valid_dvb_c_freqs,
"Valid DVB-C frequencies to simulate, in Hz");
static unsigned int vidtv_valid_dvb_s_freqs[NUM_VALID_TUNER_FREQS] = {
11362000
};
-module_param_array(vidtv_valid_dvb_s_freqs, uint, NULL, 0);
+module_param_array(vidtv_valid_dvb_s_freqs, uint, NULL, 0444);
MODULE_PARM_DESC(vidtv_valid_dvb_s_freqs,
"Valid DVB-S/S2 frequencies to simulate at Ku-Band, in kHz");
static unsigned int max_frequency_shift_hz;
-module_param(max_frequency_shift_hz, uint, 0);
+module_param(max_frequency_shift_hz, uint, 0444);
MODULE_PARM_DESC(max_frequency_shift_hz,
"Maximum shift in HZ allowed when tuning in a channel");
@@ -96,24 +96,24 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nums);
* Influences the signal acquisition time. See ISO/IEC 13818-1 : 2000. p. 113.
*/
static unsigned int si_period_msec = 40;
-module_param(si_period_msec, uint, 0);
+module_param(si_period_msec, uint, 0444);
MODULE_PARM_DESC(si_period_msec, "How often to send SI packets. Default: 40ms");
static unsigned int pcr_period_msec = 40;
-module_param(pcr_period_msec, uint, 0);
+module_param(pcr_period_msec, uint, 0444);
MODULE_PARM_DESC(pcr_period_msec,
"How often to send PCR packets. Default: 40ms");
static unsigned int mux_rate_kbytes_sec = 4096;
-module_param(mux_rate_kbytes_sec, uint, 0);
+module_param(mux_rate_kbytes_sec, uint, 0444);
MODULE_PARM_DESC(mux_rate_kbytes_sec, "Mux rate: will pad stream if below");
static unsigned int pcr_pid = 0x200;
-module_param(pcr_pid, uint, 0);
+module_param(pcr_pid, uint, 0444);
MODULE_PARM_DESC(pcr_pid, "PCR PID for all channels: defaults to 0x200");
static unsigned int mux_buf_sz_pkts;
-module_param(mux_buf_sz_pkts, uint, 0);
+module_param(mux_buf_sz_pkts, uint, 0444);
MODULE_PARM_DESC(mux_buf_sz_pkts,
"Size for the internal mux buffer in multiples of 188 bytes");
diff --git a/drivers/media/test-drivers/visl/visl-core.c b/drivers/media/test-drivers/visl/visl-core.c
index 68dac896277b1..c46464bcaf2e1 100644
--- a/drivers/media/test-drivers/visl/visl-core.c
+++ b/drivers/media/test-drivers/visl/visl-core.c
@@ -64,30 +64,35 @@ MODULE_PARM_DESC(visl_transtime_ms, " simulated process time in milliseconds.");
* particular number of frames
*/
int visl_dprintk_frame_start = -1;
-module_param(visl_dprintk_frame_start, int, 0);
+module_param(visl_dprintk_frame_start, int, 0444);
MODULE_PARM_DESC(visl_dprintk_frame_start,
" a frame number to start tracing with dprintk");
unsigned int visl_dprintk_nframes;
-module_param(visl_dprintk_nframes, uint, 0);
+module_param(visl_dprintk_nframes, uint, 0444);
MODULE_PARM_DESC(visl_dprintk_nframes,
" the number of frames to trace with dprintk");
bool keep_bitstream_buffers;
-module_param(keep_bitstream_buffers, bool, false);
+module_param(keep_bitstream_buffers, bool, 0444);
MODULE_PARM_DESC(keep_bitstream_buffers,
" keep bitstream buffers in debugfs after streaming is stopped");
int bitstream_trace_frame_start = -1;
-module_param(bitstream_trace_frame_start, int, 0);
+module_param(bitstream_trace_frame_start, int, 0444);
MODULE_PARM_DESC(bitstream_trace_frame_start,
" a frame number to start dumping the bitstream through debugfs");
unsigned int bitstream_trace_nframes;
-module_param(bitstream_trace_nframes, uint, 0);
+module_param(bitstream_trace_nframes, uint, 0444);
MODULE_PARM_DESC(bitstream_trace_nframes,
" the number of frames to dump the bitstream through debugfs");
+bool tpg_verbose;
+module_param(tpg_verbose, bool, 0644);
+MODULE_PARM_DESC(tpg_verbose,
+ " add more verbose information on the generated output frames");
+
static const struct visl_ctrl_desc visl_fwht_ctrl_descs[] = {
{
.cfg.id = V4L2_CID_STATELESS_FWHT_PARAMS,
diff --git a/drivers/media/test-drivers/visl/visl-dec.c b/drivers/media/test-drivers/visl/visl-dec.c
index f21260054e0fc..6a9639bd4d61a 100644
--- a/drivers/media/test-drivers/visl/visl-dec.c
+++ b/drivers/media/test-drivers/visl/visl-dec.c
@@ -42,6 +42,22 @@ static void *plane_vaddr(struct tpg_data *tpg, struct vb2_buffer *buf,
return vbuf;
}
+static void visl_print_ts_idx(u8 **buf, __kernel_size_t *buflen, const char *name,
+ u64 ts, struct vb2_buffer *vb2_buf)
+{
+ u32 len;
+
+ if (tpg_verbose && vb2_buf) {
+ len = scnprintf(*buf, *buflen, "%s: %lld, vb2_idx: %d\n", name,
+ ts, vb2_buf->index);
+ } else {
+ len = scnprintf(*buf, *buflen, "%s: %lld\n", name, ts);
+ }
+
+ *buf += len;
+ *buflen -= len;
+}
+
static void visl_get_ref_frames(struct visl_ctx *ctx, u8 *buf,
__kernel_size_t buflen, struct visl_run *run)
{
@@ -63,9 +79,9 @@ static void visl_get_ref_frames(struct visl_ctx *ctx, u8 *buf,
vb2_buf = vb2_find_buffer(cap_q, run->fwht.params->backward_ref_ts);
- scnprintf(buf, buflen, "backwards_ref_ts: %lld, vb2_idx: %d",
- run->fwht.params->backward_ref_ts,
- vb2_buf ? vb2_buf->index : -1);
+ visl_print_ts_idx(&buf, &buflen, "backwards_ref_ts",
+ run->fwht.params->backward_ref_ts, vb2_buf);
+
break;
}
@@ -76,13 +92,11 @@ static void visl_get_ref_frames(struct visl_ctx *ctx, u8 *buf,
b_ref = vb2_find_buffer(cap_q, run->mpeg2.pic->backward_ref_ts);
f_ref = vb2_find_buffer(cap_q, run->mpeg2.pic->forward_ref_ts);
- scnprintf(buf, buflen,
- "backward_ref_ts: %llu, vb2_idx: %d\n"
- "forward_ref_ts: %llu, vb2_idx: %d\n",
- run->mpeg2.pic->backward_ref_ts,
- b_ref ? b_ref->index : -1,
- run->mpeg2.pic->forward_ref_ts,
- f_ref ? f_ref->index : -1);
+ visl_print_ts_idx(&buf, &buflen, "backward_ref_ts",
+ run->mpeg2.pic->backward_ref_ts, b_ref);
+ visl_print_ts_idx(&buf, &buflen, "forward_ref_ts",
+ run->mpeg2.pic->forward_ref_ts, f_ref);
+
break;
}
@@ -95,16 +109,13 @@ static void visl_get_ref_frames(struct visl_ctx *ctx, u8 *buf,
golden = vb2_find_buffer(cap_q, run->vp8.frame->golden_frame_ts);
alt = vb2_find_buffer(cap_q, run->vp8.frame->alt_frame_ts);
- scnprintf(buf, buflen,
- "last_ref_ts: %llu, vb2_idx: %d\n"
- "golden_ref_ts: %llu, vb2_idx: %d\n"
- "alt_ref_ts: %llu, vb2_idx: %d\n",
- run->vp8.frame->last_frame_ts,
- last ? last->index : -1,
- run->vp8.frame->golden_frame_ts,
- golden ? golden->index : -1,
- run->vp8.frame->alt_frame_ts,
- alt ? alt->index : -1);
+ visl_print_ts_idx(&buf, &buflen, "last_ref_ts",
+ run->vp8.frame->last_frame_ts, last);
+ visl_print_ts_idx(&buf, &buflen, "golden_ref_ts",
+ run->vp8.frame->golden_frame_ts, golden);
+ visl_print_ts_idx(&buf, &buflen, "alt_ref_ts",
+ run->vp8.frame->alt_frame_ts, alt);
+
break;
}
@@ -117,28 +128,32 @@ static void visl_get_ref_frames(struct visl_ctx *ctx, u8 *buf,
golden = vb2_find_buffer(cap_q, run->vp9.frame->golden_frame_ts);
alt = vb2_find_buffer(cap_q, run->vp9.frame->alt_frame_ts);
- scnprintf(buf, buflen,
- "last_ref_ts: %llu, vb2_idx: %d\n"
- "golden_ref_ts: %llu, vb2_idx: %d\n"
- "alt_ref_ts: %llu, vb2_idx: %d\n",
- run->vp9.frame->last_frame_ts,
- last ? last->index : -1,
- run->vp9.frame->golden_frame_ts,
- golden ? golden->index : -1,
- run->vp9.frame->alt_frame_ts,
- alt ? alt->index : -1);
+ visl_print_ts_idx(&buf, &buflen, "last_ref_ts",
+ run->vp9.frame->last_frame_ts, last);
+ visl_print_ts_idx(&buf, &buflen, "golden_ref_ts",
+ run->vp9.frame->golden_frame_ts, golden);
+ visl_print_ts_idx(&buf, &buflen, "alt_ref_ts",
+ run->vp9.frame->alt_frame_ts, alt);
+
break;
}
case VISL_CODEC_H264: {
char entry[] = "dpb[%d]:%u, vb2_index: %d\n";
+ char entry_stable[] = "dpb[%d]:%u\n";
struct vb2_buffer *vb2_buf;
for (i = 0; i < ARRAY_SIZE(run->h264.dpram->dpb); i++) {
- vb2_buf = vb2_find_buffer(cap_q, run->h264.dpram->dpb[i].reference_ts);
- len = scnprintf(buf, buflen, entry, i,
- run->h264.dpram->dpb[i].reference_ts,
- vb2_buf ? vb2_buf->index : -1);
+ vb2_buf = vb2_find_buffer(cap_q,
+ run->h264.dpram->dpb[i].reference_ts);
+ if (tpg_verbose && vb2_buf) {
+ len = scnprintf(buf, buflen, entry, i,
+ run->h264.dpram->dpb[i].reference_ts,
+ vb2_buf->index);
+ } else {
+ len = scnprintf(buf, buflen, entry_stable, i,
+ run->h264.dpram->dpb[i].reference_ts);
+ }
buf += len;
buflen -= len;
}
@@ -148,13 +163,20 @@ static void visl_get_ref_frames(struct visl_ctx *ctx, u8 *buf,
case VISL_CODEC_HEVC: {
char entry[] = "dpb[%d]:%u, vb2_index: %d\n";
+ char entry_stable[] = "dpb[%d]:%u\n";
struct vb2_buffer *vb2_buf;
for (i = 0; i < ARRAY_SIZE(run->hevc.dpram->dpb); i++) {
vb2_buf = vb2_find_buffer(cap_q, run->hevc.dpram->dpb[i].timestamp);
- len = scnprintf(buf, buflen, entry, i,
- run->hevc.dpram->dpb[i].timestamp,
- vb2_buf ? vb2_buf->index : -1);
+ if (tpg_verbose && vb2_buf) {
+ len = scnprintf(buf, buflen, entry, i,
+ run->hevc.dpram->dpb[i].timestamp,
+ vb2_buf->index);
+ } else {
+ len = scnprintf(buf, buflen, entry_stable, i,
+ run->hevc.dpram->dpb[i].timestamp);
+ }
+
buf += len;
buflen -= len;
}
@@ -171,43 +193,38 @@ static void visl_get_ref_frames(struct visl_ctx *ctx, u8 *buf,
int idx_alt2 = run->av1.frame->ref_frame_idx[ALT2_BUF_IDX];
int idx_alt = run->av1.frame->ref_frame_idx[ALT_BUF_IDX];
+ const u64 *reference_frame_ts = run->av1.frame->reference_frame_ts;
+
struct vb2_buffer *ref_last =
- vb2_find_buffer(cap_q, run->av1.frame->reference_frame_ts[idx_last]);
+ vb2_find_buffer(cap_q, reference_frame_ts[idx_last]);
struct vb2_buffer *ref_last2 =
- vb2_find_buffer(cap_q, run->av1.frame->reference_frame_ts[idx_last2]);
+ vb2_find_buffer(cap_q, reference_frame_ts[idx_last2]);
struct vb2_buffer *ref_last3 =
- vb2_find_buffer(cap_q, run->av1.frame->reference_frame_ts[idx_last3]);
+ vb2_find_buffer(cap_q, reference_frame_ts[idx_last3]);
struct vb2_buffer *ref_golden =
- vb2_find_buffer(cap_q, run->av1.frame->reference_frame_ts[idx_golden]);
+ vb2_find_buffer(cap_q, reference_frame_ts[idx_golden]);
struct vb2_buffer *ref_bwd =
- vb2_find_buffer(cap_q, run->av1.frame->reference_frame_ts[idx_bwd]);
+ vb2_find_buffer(cap_q, reference_frame_ts[idx_bwd]);
struct vb2_buffer *ref_alt2 =
- vb2_find_buffer(cap_q, run->av1.frame->reference_frame_ts[idx_alt2]);
+ vb2_find_buffer(cap_q, reference_frame_ts[idx_alt2]);
struct vb2_buffer *ref_alt =
- vb2_find_buffer(cap_q, run->av1.frame->reference_frame_ts[idx_alt]);
-
- scnprintf(buf, buflen,
- "ref_last_ts: %llu, vb2_idx: %d\n"
- "ref_last2_ts: %llu, vb2_idx: %d\n"
- "ref_last3_ts: %llu, vb2_idx: %d\n"
- "ref_golden_ts: %llu, vb2_idx: %d\n"
- "ref_bwd_ts: %llu, vb2_idx: %d\n"
- "ref_alt2_ts: %llu, vb2_idx: %d\n"
- "ref_alt_ts: %llu, vb2_idx: %d\n",
- run->av1.frame->reference_frame_ts[idx_last],
- ref_last ? ref_last->index : -1,
- run->av1.frame->reference_frame_ts[idx_last2],
- ref_last2 ? ref_last2->index : -1,
- run->av1.frame->reference_frame_ts[idx_last3],
- ref_last3 ? ref_last3->index : -1,
- run->av1.frame->reference_frame_ts[idx_golden],
- ref_golden ? ref_golden->index : -1,
- run->av1.frame->reference_frame_ts[idx_bwd],
- ref_bwd ? ref_bwd->index : -1,
- run->av1.frame->reference_frame_ts[idx_alt2],
- ref_alt2 ? ref_alt2->index : -1,
- run->av1.frame->reference_frame_ts[idx_alt],
- ref_alt ? ref_alt->index : -1);
+ vb2_find_buffer(cap_q, reference_frame_ts[idx_alt]);
+
+ visl_print_ts_idx(&buf, &buflen, "ref_last_ts",
+ reference_frame_ts[idx_last], ref_last);
+ visl_print_ts_idx(&buf, &buflen, "ref_last2_ts",
+ reference_frame_ts[idx_last2], ref_last2);
+ visl_print_ts_idx(&buf, &buflen, "ref_last3_ts",
+ reference_frame_ts[idx_last3], ref_last3);
+ visl_print_ts_idx(&buf, &buflen, "ref_golden_ts",
+ reference_frame_ts[idx_golden], ref_golden);
+ visl_print_ts_idx(&buf, &buflen, "ref_bwd_ts",
+ reference_frame_ts[idx_bwd], ref_bwd);
+ visl_print_ts_idx(&buf, &buflen, "ref_alt2_ts",
+ reference_frame_ts[idx_alt2], ref_alt2);
+ visl_print_ts_idx(&buf, &buflen, "ref_alt_ts",
+ reference_frame_ts[idx_alt], ref_alt);
+
break;
}
}
@@ -254,15 +271,23 @@ static void visl_tpg_fill_sequence(struct visl_ctx *ctx,
struct visl_run *run, char buf[], size_t bufsz)
{
u32 stream_ms;
-
- stream_ms = jiffies_to_msecs(get_jiffies_64() - ctx->capture_streamon_jiffies);
+ int len;
+
+ if (tpg_verbose) {
+ stream_ms = jiffies_to_msecs(get_jiffies_64() - ctx->capture_streamon_jiffies);
+
+ len = scnprintf(buf, bufsz,
+ "stream time: %02d:%02d:%02d:%03d ",
+ (stream_ms / (60 * 60 * 1000)) % 24,
+ (stream_ms / (60 * 1000)) % 60,
+ (stream_ms / 1000) % 60,
+ stream_ms % 1000);
+ buf += len;
+ bufsz -= len;
+ }
scnprintf(buf, bufsz,
- "stream time: %02d:%02d:%02d:%03d sequence:%u timestamp:%lld field:%s",
- (stream_ms / (60 * 60 * 1000)) % 24,
- (stream_ms / (60 * 1000)) % 60,
- (stream_ms / 1000) % 60,
- stream_ms % 1000,
+ "sequence:%u timestamp:%lld field:%s",
run->dst->sequence,
run->dst->vb2_buf.timestamp,
(run->dst->field == V4L2_FIELD_ALTERNATE) ?
@@ -270,6 +295,35 @@ static void visl_tpg_fill_sequence(struct visl_ctx *ctx,
" top" : " bottom") : "none");
}
+static bool visl_tpg_fill_codec_specific(struct visl_ctx *ctx,
+ struct visl_run *run,
+ char buf[], size_t bufsz)
+{
+ /*
+ * To add variability, we need a value that is stable for a given
+ * input but is different than already shown fields.
+ * The pic order count value defines the display order of the frames
+ * (which can be different than the decoding order that is shown with
+ * the sequence number).
+ * Therefore it is stable for a given input and will add a different
+ * value that is more specific to the way the input is encoded.
+ */
+ switch (ctx->current_codec) {
+ case VISL_CODEC_H264:
+ scnprintf(buf, bufsz,
+ "H264: %u", run->h264.dpram->pic_order_cnt_lsb);
+ break;
+ case VISL_CODEC_HEVC:
+ scnprintf(buf, bufsz,
+ "HEVC: %d", run->hevc.dpram->pic_order_cnt_val);
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
static void visl_tpg_fill(struct visl_ctx *ctx, struct visl_run *run)
{
u8 *basep[TPG_MAX_PLANES][2];
@@ -302,6 +356,13 @@ static void visl_tpg_fill(struct visl_ctx *ctx, struct visl_run *run)
frame_dprintk(ctx->dev, run->dst->sequence, "");
line++;
+ if (visl_tpg_fill_codec_specific(ctx, run, buf, TPG_STR_BUF_SZ)) {
+ tpg_gen_text(&ctx->tpg, basep, line++ * line_height, 16, buf);
+ frame_dprintk(ctx->dev, run->dst->sequence, "%s\n", buf);
+ frame_dprintk(ctx->dev, run->dst->sequence, "");
+ line++;
+ }
+
visl_get_ref_frames(ctx, buf, TPG_STR_BUF_SZ, run);
while ((line_str = strsep(&tmp, "\n")) && strlen(line_str)) {
@@ -338,35 +399,37 @@ static void visl_tpg_fill(struct visl_ctx *ctx, struct visl_run *run)
frame_dprintk(ctx->dev, run->dst->sequence, "%s\n", buf);
}
- line++;
- frame_dprintk(ctx->dev, run->dst->sequence, "");
- scnprintf(buf, TPG_STR_BUF_SZ, "Output queue status:");
- tpg_gen_text(&ctx->tpg, basep, line++ * line_height, 16, buf);
- frame_dprintk(ctx->dev, run->dst->sequence, "%s\n", buf);
+ if (tpg_verbose) {
+ line++;
+ frame_dprintk(ctx->dev, run->dst->sequence, "");
+ scnprintf(buf, TPG_STR_BUF_SZ, "Output queue status:");
+ tpg_gen_text(&ctx->tpg, basep, line++ * line_height, 16, buf);
+ frame_dprintk(ctx->dev, run->dst->sequence, "%s\n", buf);
- len = 0;
- for (i = 0; i < vb2_get_num_buffers(out_q); i++) {
- char entry[] = "index: %u, state: %s, request_fd: %d, ";
- u32 old_len = len;
- struct vb2_buffer *vb2;
- char *q_status;
+ len = 0;
+ for (i = 0; i < vb2_get_num_buffers(out_q); i++) {
+ char entry[] = "index: %u, state: %s, request_fd: %d, ";
+ u32 old_len = len;
+ struct vb2_buffer *vb2;
+ char *q_status;
- vb2 = vb2_get_buffer(out_q, i);
- if (!vb2)
- continue;
+ vb2 = vb2_get_buffer(out_q, i);
+ if (!vb2)
+ continue;
- q_status = visl_get_vb2_state(vb2->state);
+ q_status = visl_get_vb2_state(vb2->state);
- len += scnprintf(&buf[len], TPG_STR_BUF_SZ - len,
- entry, i, q_status,
- to_vb2_v4l2_buffer(vb2)->request_fd);
+ len += scnprintf(&buf[len], TPG_STR_BUF_SZ - len,
+ entry, i, q_status,
+ to_vb2_v4l2_buffer(vb2)->request_fd);
- len += visl_fill_bytesused(to_vb2_v4l2_buffer(vb2),
- &buf[len],
- TPG_STR_BUF_SZ - len);
+ len += visl_fill_bytesused(to_vb2_v4l2_buffer(vb2),
+ &buf[len],
+ TPG_STR_BUF_SZ - len);
- tpg_gen_text(&ctx->tpg, basep, line++ * line_height, 16, &buf[old_len]);
- frame_dprintk(ctx->dev, run->dst->sequence, "%s", &buf[old_len]);
+ tpg_gen_text(&ctx->tpg, basep, line++ * line_height, 16, &buf[old_len]);
+ frame_dprintk(ctx->dev, run->dst->sequence, "%s", &buf[old_len]);
+ }
}
line++;
@@ -398,32 +461,34 @@ static void visl_tpg_fill(struct visl_ctx *ctx, struct visl_run *run)
frame_dprintk(ctx->dev, run->dst->sequence, "%s\n", buf);
}
- line++;
- frame_dprintk(ctx->dev, run->dst->sequence, "");
- scnprintf(buf, TPG_STR_BUF_SZ, "Capture queue status:");
- tpg_gen_text(&ctx->tpg, basep, line++ * line_height, 16, buf);
- frame_dprintk(ctx->dev, run->dst->sequence, "%s\n", buf);
+ if (tpg_verbose) {
+ line++;
+ frame_dprintk(ctx->dev, run->dst->sequence, "");
+ scnprintf(buf, TPG_STR_BUF_SZ, "Capture queue status:");
+ tpg_gen_text(&ctx->tpg, basep, line++ * line_height, 16, buf);
+ frame_dprintk(ctx->dev, run->dst->sequence, "%s\n", buf);
- len = 0;
- for (i = 0; i < vb2_get_num_buffers(cap_q); i++) {
- u32 old_len = len;
- struct vb2_buffer *vb2;
- char *q_status;
+ len = 0;
+ for (i = 0; i < vb2_get_num_buffers(cap_q); i++) {
+ u32 old_len = len;
+ struct vb2_buffer *vb2;
+ char *q_status;
- vb2 = vb2_get_buffer(cap_q, i);
- if (!vb2)
- continue;
+ vb2 = vb2_get_buffer(cap_q, i);
+ if (!vb2)
+ continue;
- q_status = visl_get_vb2_state(vb2->state);
+ q_status = visl_get_vb2_state(vb2->state);
- len += scnprintf(&buf[len], TPG_STR_BUF_SZ - len,
- "index: %u, status: %s, timestamp: %llu, is_held: %d",
- vb2->index, q_status,
- vb2->timestamp,
- to_vb2_v4l2_buffer(vb2)->is_held);
+ len += scnprintf(&buf[len], TPG_STR_BUF_SZ - len,
+ "index: %u, status: %s, timestamp: %llu, is_held: %d",
+ vb2->index, q_status,
+ vb2->timestamp,
+ to_vb2_v4l2_buffer(vb2)->is_held);
- tpg_gen_text(&ctx->tpg, basep, line++ * line_height, 16, &buf[old_len]);
- frame_dprintk(ctx->dev, run->dst->sequence, "%s", &buf[old_len]);
+ tpg_gen_text(&ctx->tpg, basep, line++ * line_height, 16, &buf[old_len]);
+ frame_dprintk(ctx->dev, run->dst->sequence, "%s", &buf[old_len]);
+ }
}
}
diff --git a/drivers/media/test-drivers/visl/visl.h b/drivers/media/test-drivers/visl/visl.h
index c593b1337f116..434e9efbf9b21 100644
--- a/drivers/media/test-drivers/visl/visl.h
+++ b/drivers/media/test-drivers/visl/visl.h
@@ -85,6 +85,7 @@ extern unsigned int visl_dprintk_nframes;
extern bool keep_bitstream_buffers;
extern int bitstream_trace_frame_start;
extern unsigned int bitstream_trace_nframes;
+extern bool tpg_verbose;
#define frame_dprintk(dev, current, fmt, arg...) \
do { \
diff --git a/drivers/media/tuners/tda18271-fe.c b/drivers/media/tuners/tda18271-fe.c
index f0371d004b36d..a7e721baaa997 100644
--- a/drivers/media/tuners/tda18271-fe.c
+++ b/drivers/media/tuners/tda18271-fe.c
@@ -470,7 +470,6 @@ static int tda18271_powerscan(struct dvb_frontend *fe,
/* algorithm initialization */
sgn = 1;
*freq_out = *freq_in;
- bcal = 0;
count = 0;
wait = false;
diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
index 57ded9ff3f043..29bc63021c5aa 100644
--- a/drivers/media/tuners/xc4000.c
+++ b/drivers/media/tuners/xc4000.c
@@ -1515,10 +1515,10 @@ static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
{
struct xc4000_priv *priv = fe->tuner_priv;
+ mutex_lock(&priv->lock);
*freq = priv->freq_hz + priv->freq_offset;
if (debug) {
- mutex_lock(&priv->lock);
if ((priv->cur_fw.type
& (BASE | FM | DTV6 | DTV7 | DTV78 | DTV8)) == BASE) {
u16 snr = 0;
@@ -1529,8 +1529,8 @@ static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
return 0;
}
}
- mutex_unlock(&priv->lock);
}
+ mutex_unlock(&priv->lock);
dprintk(1, "%s()\n", __func__);
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index 3b75d062e6025..343a4433ed24c 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -1759,7 +1759,7 @@ int cx231xx_417_register(struct cx231xx *dev)
dev->mpeg_ctrl_handler.ops = &cx231xx_ops;
if (dev->sd_cx25840)
v4l2_ctrl_add_handler(&dev->mpeg_ctrl_handler.hdl,
- dev->sd_cx25840->ctrl_handler, NULL, false);
+ dev->sd_cx25840->ctrl_handler, NULL, true);
if (dev->mpeg_ctrl_handler.hdl.error) {
err = dev->mpeg_ctrl_handler.hdl.error;
dprintk(3, "%s: can't add cx25840 controls\n", dev->name);
diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h
index 0990aa4a17bb9..cbb0541d4dc1f 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb.h
+++ b/drivers/media/usb/dvb-usb/dvb-usb.h
@@ -126,8 +126,6 @@ struct usb_data_stream_properties {
* @caps: capabilities of the DVB USB device.
* @pid_filter_count: number of PID filter position in the optional hardware
* PID-filter.
- * @num_frontends: number of frontends of the DVB USB adapter.
- * @frontend_ctrl: called to power on/off active frontend.
* @streaming_ctrl: called to start and stop the MPEG2-TS streaming of the
* device (not URB submitting/killing).
* This callback will be called without data URBs being active - data URBs
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 4d037c92af7c5..bae76023cf71d 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -4094,6 +4094,10 @@ static int em28xx_usb_probe(struct usb_interface *intf,
* topology will likely change after the load of the em28xx subdrivers.
*/
#ifdef CONFIG_MEDIA_CONTROLLER
+ /*
+ * No need to check the return value, the device will still be
+ * usable without media controller API.
+ */
retval = media_device_register(dev->media_dev);
#endif
diff --git a/drivers/media/usb/go7007/go7007-driver.c b/drivers/media/usb/go7007/go7007-driver.c
index 0c24e29843048..eb03f98b2ef11 100644
--- a/drivers/media/usb/go7007/go7007-driver.c
+++ b/drivers/media/usb/go7007/go7007-driver.c
@@ -80,7 +80,7 @@ static int go7007_load_encoder(struct go7007 *go)
const struct firmware *fw_entry;
char fw_name[] = "go7007/go7007fw.bin";
void *bounce;
- int fw_len, rv = 0;
+ int fw_len;
u16 intr_val, intr_data;
if (go->boot_fw == NULL) {
@@ -109,9 +109,11 @@ static int go7007_load_encoder(struct go7007 *go)
go7007_read_interrupt(go, &intr_val, &intr_data) < 0 ||
(intr_val & ~0x1) != 0x5a5a) {
v4l2_err(go, "error transferring firmware\n");
- rv = -1;
+ kfree(go->boot_fw);
+ go->boot_fw = NULL;
+ return -1;
}
- return rv;
+ return 0;
}
MODULE_FIRMWARE("go7007/go7007fw.bin");
diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c
index eeb85981e02b6..762c13e49bfa5 100644
--- a/drivers/media/usb/go7007/go7007-usb.c
+++ b/drivers/media/usb/go7007/go7007-usb.c
@@ -1201,7 +1201,9 @@ static int go7007_usb_probe(struct usb_interface *intf,
u16 channel;
/* read channel number from GPIO[1:0] */
- go7007_read_addr(go, 0x3c81, &channel);
+ if (go7007_read_addr(go, 0x3c81, &channel))
+ goto allocfail;
+
channel &= 0x3;
go->board_id = GO7007_BOARDID_ADLINK_MPG24;
usb->board = board = &board_adlink_mpg24;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-context.c b/drivers/media/usb/pvrusb2/pvrusb2-context.c
index 1764674de98bc..73c95ba2328a4 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-context.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-context.c
@@ -90,8 +90,10 @@ static void pvr2_context_destroy(struct pvr2_context *mp)
}
-static void pvr2_context_notify(struct pvr2_context *mp)
+static void pvr2_context_notify(void *ptr)
{
+ struct pvr2_context *mp = ptr;
+
pvr2_context_set_notify(mp,!0);
}
@@ -106,9 +108,7 @@ static void pvr2_context_check(struct pvr2_context *mp)
pvr2_trace(PVR2_TRACE_CTXT,
"pvr2_context %p (initialize)", mp);
/* Finish hardware initialization */
- if (pvr2_hdw_initialize(mp->hdw,
- (void (*)(void *))pvr2_context_notify,
- mp)) {
+ if (pvr2_hdw_initialize(mp->hdw, pvr2_context_notify, mp)) {
mp->video_stream.stream =
pvr2_hdw_get_video_stream(mp->hdw);
/* Trigger interface initialization. By doing this
@@ -267,9 +267,9 @@ static void pvr2_context_exit(struct pvr2_context *mp)
void pvr2_context_disconnect(struct pvr2_context *mp)
{
pvr2_hdw_disconnect(mp->hdw);
- mp->disconnect_flag = !0;
if (!pvr2_context_shutok())
pvr2_context_notify(mp);
+ mp->disconnect_flag = !0;
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-dvb.c b/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
index 26811efe0fb58..3610139fb9ad7 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
@@ -33,9 +33,6 @@ static int pvr2_dvb_feed_func(struct pvr2_dvb_adapter *adap)
for (;;) {
if (kthread_should_stop()) break;
- /* Not sure about this... */
- try_to_freeze();
-
bp = pvr2_stream_get_ready_buffer(stream);
if (bp != NULL) {
count = pvr2_buffer_get_count(bp);
@@ -62,8 +59,7 @@ static int pvr2_dvb_feed_func(struct pvr2_dvb_adapter *adap)
/* Wait until more buffers become available or we're
told not to wait any longer. */
- ret = wait_event_interruptible(
- adap->buffer_wait_data,
+ ret = wait_event_freezable(adap->buffer_wait_data,
(pvr2_stream_get_ready_count(stream) > 0) ||
kthread_should_stop());
if (ret < 0) break;
@@ -88,8 +84,10 @@ static int pvr2_dvb_feed_thread(void *data)
return stat;
}
-static void pvr2_dvb_notify(struct pvr2_dvb_adapter *adap)
+static void pvr2_dvb_notify(void *ptr)
{
+ struct pvr2_dvb_adapter *adap = ptr;
+
wake_up(&adap->buffer_wait_data);
}
@@ -149,7 +147,7 @@ static int pvr2_dvb_stream_do_start(struct pvr2_dvb_adapter *adap)
}
pvr2_stream_set_callback(pvr->video_stream.stream,
- (pvr2_stream_callback) pvr2_dvb_notify, adap);
+ pvr2_dvb_notify, adap);
ret = pvr2_stream_set_buffer_count(stream, PVR2_DVB_BUFFER_COUNT);
if (ret < 0) return ret;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index c04ab7258d645..d608b793fa847 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -1033,8 +1033,10 @@ static int pvr2_v4l2_open(struct file *file)
}
-static void pvr2_v4l2_notify(struct pvr2_v4l2_fh *fhp)
+static void pvr2_v4l2_notify(void *ptr)
{
+ struct pvr2_v4l2_fh *fhp = ptr;
+
wake_up(&fhp->wait_data);
}
@@ -1067,7 +1069,7 @@ static int pvr2_v4l2_iosetup(struct pvr2_v4l2_fh *fh)
hdw = fh->channel.mc_head->hdw;
sp = fh->pdi->stream->stream;
- pvr2_stream_set_callback(sp,(pvr2_stream_callback)pvr2_v4l2_notify,fh);
+ pvr2_stream_set_callback(sp, pvr2_v4l2_notify, fh);
pvr2_hdw_set_stream_type(hdw,fh->pdi->config);
if ((ret = pvr2_hdw_set_streaming(hdw,!0)) < 0) return ret;
return pvr2_ioread_set_enabled(fh->rhp,!0);
@@ -1198,11 +1200,6 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
dip->minor_type = pvr2_v4l_type_video;
nr_ptr = video_nr;
caps |= V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_AUDIO;
- if (!dip->stream) {
- pr_err(KBUILD_MODNAME
- ": Failed to set up pvrusb2 v4l video dev due to missing stream instance\n");
- return;
- }
break;
case VFL_TYPE_VBI:
dip->config = pvr2_config_vbi;
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index 3c2627712fe9d..8e1de1e8bd127 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -1906,9 +1906,10 @@ static int s2255_get_fx2fw(struct s2255_dev *dev)
{
int fw;
int ret;
- unsigned char transBuffer[64];
- ret = s2255_vendor_req(dev, S2255_VR_FW, 0, 0, transBuffer, 2,
- S2255_VR_IN);
+ u8 transBuffer[2] = {};
+
+ ret = s2255_vendor_req(dev, S2255_VR_FW, 0, 0, transBuffer,
+ sizeof(transBuffer), S2255_VR_IN);
if (ret < 0)
dprintk(dev, 2, "get fw error: %x\n", ret);
fw = transBuffer[0] + (transBuffer[1] << 8);
diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
index 9d9e14c858e67..723510520d092 100644
--- a/drivers/media/usb/siano/smsusb.c
+++ b/drivers/media/usb/siano/smsusb.c
@@ -724,5 +724,5 @@ static struct usb_driver smsusb_driver = {
module_usb_driver(smsusb_driver);
MODULE_DESCRIPTION("Driver for the Siano SMS1xxx USB dongle");
-MODULE_AUTHOR("Siano Mobile Silicon, INC. (uris@siano-ms.com)");
+MODULE_AUTHOR("Siano Mobile Silicon, Inc. <uris@siano-ms.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c
index 62a583040cd48..702f1c8bd2ab3 100644
--- a/drivers/media/usb/usbtv/usbtv-video.c
+++ b/drivers/media/usb/usbtv/usbtv-video.c
@@ -963,15 +963,8 @@ ctrl_fail:
void usbtv_video_free(struct usbtv *usbtv)
{
- mutex_lock(&usbtv->vb2q_lock);
- mutex_lock(&usbtv->v4l2_lock);
-
- usbtv_stop(usbtv);
vb2_video_unregister_device(&usbtv->vdev);
v4l2_device_disconnect(&usbtv->v4l2_dev);
- mutex_unlock(&usbtv->v4l2_lock);
- mutex_unlock(&usbtv->vb2q_lock);
-
v4l2_device_put(&usbtv->v4l2_dev);
}
diff --git a/drivers/media/v4l2-core/v4l2-cci.c b/drivers/media/v4l2-core/v4l2-cci.c
index 10005c80f43b5..ee3475bed37fa 100644
--- a/drivers/media/v4l2-core/v4l2-cci.c
+++ b/drivers/media/v4l2-core/v4l2-cci.c
@@ -32,7 +32,7 @@ int cci_read(struct regmap *map, u32 reg, u64 *val, int *err)
ret = regmap_bulk_read(map, reg, buf, len);
if (ret) {
- dev_err(regmap_get_device(map), "Error reading reg 0x%4x: %d\n",
+ dev_err(regmap_get_device(map), "Error reading reg 0x%04x: %d\n",
reg, ret);
goto out;
}
@@ -131,7 +131,7 @@ int cci_write(struct regmap *map, u32 reg, u64 val, int *err)
ret = regmap_bulk_write(map, reg, buf, len);
if (ret)
- dev_err(regmap_get_device(map), "Error writing reg 0x%4x: %d\n",
+ dev_err(regmap_get_device(map), "Error writing reg 0x%04x: %d\n",
reg, ret);
out:
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 273d83de2a876..d34d210908d96 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -585,3 +585,50 @@ u32 v4l2_fraction_to_interval(u32 numerator, u32 denominator)
return denominator ? numerator * multiplier / denominator : 0;
}
EXPORT_SYMBOL_GPL(v4l2_fraction_to_interval);
+
+int v4l2_link_freq_to_bitmap(struct device *dev, const u64 *fw_link_freqs,
+ unsigned int num_of_fw_link_freqs,
+ const s64 *driver_link_freqs,
+ unsigned int num_of_driver_link_freqs,
+ unsigned long *bitmap)
+{
+ unsigned int i;
+
+ *bitmap = 0;
+
+ if (!num_of_fw_link_freqs) {
+ dev_err(dev, "no link frequencies in firmware\n");
+ return -ENODATA;
+ }
+
+ for (i = 0; i < num_of_fw_link_freqs; i++) {
+ unsigned int j;
+
+ for (j = 0; j < num_of_driver_link_freqs; j++) {
+ if (fw_link_freqs[i] != driver_link_freqs[j])
+ continue;
+
+ dev_dbg(dev, "enabling link frequency %lld Hz\n",
+ driver_link_freqs[j]);
+ *bitmap |= BIT(j);
+ break;
+ }
+ }
+
+ if (!*bitmap) {
+ dev_err(dev, "no matching link frequencies found\n");
+
+ dev_dbg(dev, "specified in firmware:\n");
+ for (i = 0; i < num_of_fw_link_freqs; i++)
+ dev_dbg(dev, "\t%llu Hz\n", fw_link_freqs[i]);
+
+ dev_dbg(dev, "driver supported:\n");
+ for (i = 0; i < num_of_driver_link_freqs; i++)
+ dev_dbg(dev, "\t%lld Hz\n", driver_link_freqs[i]);
+
+ return -ENOENT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_link_freq_to_bitmap);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-api.c b/drivers/media/v4l2-core/v4l2-ctrls-api.c
index 002ea6588edf1..d9a422017bd9d 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-api.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-api.c
@@ -1179,7 +1179,7 @@ int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm)
return -EINVAL;
/* Use mask to see if this menu item should be skipped */
- if (ctrl->menu_skip_mask & (1ULL << i))
+ if (i < BITS_PER_LONG_LONG && (ctrl->menu_skip_mask & BIT_ULL(i)))
return -EINVAL;
/* Empty menu items should also be skipped */
if (ctrl->type == V4L2_CTRL_TYPE_MENU) {
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c
index a662fb60f73f4..c4d995f32191c 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-core.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c
@@ -1504,11 +1504,12 @@ int check_range(enum v4l2_ctrl_type type,
return 0;
case V4L2_CTRL_TYPE_MENU:
case V4L2_CTRL_TYPE_INTEGER_MENU:
- if (min > max || def < min || def > max)
+ if (min > max || def < min || def > max ||
+ min < 0 || (step && max >= BITS_PER_LONG_LONG))
return -ERANGE;
/* Note: step == menu_skip_mask for menu controls.
So here we check if the default value is masked out. */
- if (step && ((1 << def) & step))
+ if (def < BITS_PER_LONG_LONG && (step & BIT_ULL(def)))
return -EINVAL;
return 0;
case V4L2_CTRL_TYPE_STRING:
@@ -2503,7 +2504,8 @@ int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
EXPORT_SYMBOL(v4l2_ctrl_handler_setup);
/* Log the control name and value */
-static void log_ctrl(const struct v4l2_ctrl *ctrl,
+static void log_ctrl(const struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ctrl *ctrl,
const char *prefix, const char *colon)
{
if (ctrl->flags & (V4L2_CTRL_FLAG_DISABLED | V4L2_CTRL_FLAG_WRITE_ONLY))
@@ -2513,7 +2515,11 @@ static void log_ctrl(const struct v4l2_ctrl *ctrl,
pr_info("%s%s%s: ", prefix, colon, ctrl->name);
+ if (ctrl->handler != hdl)
+ v4l2_ctrl_lock(ctrl);
ctrl->type_ops->log(ctrl);
+ if (ctrl->handler != hdl)
+ v4l2_ctrl_unlock(ctrl);
if (ctrl->flags & (V4L2_CTRL_FLAG_INACTIVE |
V4L2_CTRL_FLAG_GRABBED |
@@ -2532,7 +2538,7 @@ static void log_ctrl(const struct v4l2_ctrl *ctrl,
void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl,
const char *prefix)
{
- struct v4l2_ctrl *ctrl;
+ struct v4l2_ctrl_ref *ref;
const char *colon = "";
int len;
@@ -2544,9 +2550,12 @@ void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl,
if (len && prefix[len - 1] != ' ')
colon = ": ";
mutex_lock(hdl->lock);
- list_for_each_entry(ctrl, &hdl->ctrls, node)
- if (!(ctrl->flags & V4L2_CTRL_FLAG_DISABLED))
- log_ctrl(ctrl, prefix, colon);
+ list_for_each_entry(ref, &hdl->ctrl_refs, node) {
+ if (ref->from_other_dev ||
+ (ref->ctrl->flags & V4L2_CTRL_FLAG_DISABLED))
+ continue;
+ log_ctrl(hdl, ref->ctrl, prefix, colon);
+ }
mutex_unlock(hdl->lock);
}
EXPORT_SYMBOL(v4l2_ctrl_handler_log_status);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 33076af4dfdbd..6e7b8b682d13f 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -3028,7 +3028,7 @@ static long __video_do_ioctl(struct file *file,
if (v4l2_is_known_ioctl(cmd)) {
info = &v4l2_ioctls[_IOC_NR(cmd)];
- if (!test_bit(_IOC_NR(cmd), vfd->valid_ioctls) &&
+ if (!is_valid_ioctl(vfd, cmd) &&
!((info->flags & INFO_FL_CTRL) && vfh && vfh->ctrl_handler))
goto done;
diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c
index 52d349e72b8ca..4bb91359e3a9a 100644
--- a/drivers/media/v4l2-core/v4l2-mc.c
+++ b/drivers/media/v4l2-core/v4l2-mc.c
@@ -337,12 +337,18 @@ int v4l2_create_fwnode_links_to_pad(struct v4l2_subdev *src_sd,
src_idx = media_entity_get_fwnode_pad(&src_sd->entity,
endpoint,
MEDIA_PAD_FL_SOURCE);
- if (src_idx < 0)
+ if (src_idx < 0) {
+ dev_dbg(src_sd->dev, "no source pad found for %pfw\n",
+ endpoint);
continue;
+ }
remote_ep = fwnode_graph_get_remote_endpoint(endpoint);
- if (!remote_ep)
+ if (!remote_ep) {
+ dev_dbg(src_sd->dev, "no remote ep found for %pfw\n",
+ endpoint);
continue;
+ }
/*
* ask the sink to verify it owns the remote endpoint,
@@ -353,8 +359,12 @@ int v4l2_create_fwnode_links_to_pad(struct v4l2_subdev *src_sd,
MEDIA_PAD_FL_SINK);
fwnode_handle_put(remote_ep);
- if (sink_idx < 0 || sink_idx != sink->index)
+ if (sink_idx < 0 || sink_idx != sink->index) {
+ dev_dbg(src_sd->dev,
+ "sink pad index mismatch or error (is %d, expected %u)\n",
+ sink_idx, sink->index);
continue;
+ }
/*
* the source endpoint corresponds to one of its source pads,
@@ -367,8 +377,13 @@ int v4l2_create_fwnode_links_to_pad(struct v4l2_subdev *src_sd,
src = &src_sd->entity.pads[src_idx];
/* skip if link already exists */
- if (media_entity_find_link(src, sink))
+ if (media_entity_find_link(src, sink)) {
+ dev_dbg(src_sd->dev,
+ "link %s:%d -> %s:%d already exists\n",
+ src_sd->entity.name, src_idx,
+ sink->entity->name, sink_idx);
continue;
+ }
dev_dbg(src_sd->dev, "creating link %s:%d -> %s:%d\n",
src_sd->entity.name, src_idx,
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 9e983176542be..75517134a5e94 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -1087,11 +1087,17 @@ static int v4l2_m2m_register_entity(struct media_device *mdev,
entity->function = function;
ret = media_entity_pads_init(entity, num_pads, pads);
- if (ret)
+ if (ret) {
+ kfree(entity->name);
+ entity->name = NULL;
return ret;
+ }
ret = media_device_register_entity(mdev, entity);
- if (ret)
+ if (ret) {
+ kfree(entity->name);
+ entity->name = NULL;
return ret;
+ }
return 0;
}
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index a083921a8968b..224b488794e5b 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -755,7 +755,7 @@ const char *const tegra_mc_error_names[8] = {
[6] = "SMMU translation error",
};
-struct icc_node *tegra_mc_icc_xlate(struct of_phandle_args *spec, void *data)
+struct icc_node *tegra_mc_icc_xlate(const struct of_phandle_args *spec, void *data)
{
struct tegra_mc *mc = icc_provider_to_tegra_mc(data);
struct icc_node *node;
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index 00ed2b6a0d1b2..47c0c19e13fd5 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -1285,7 +1285,7 @@ to_tegra_emc_provider(struct icc_provider *provider)
}
static struct icc_node_data *
-emc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+emc_of_icc_xlate_extended(const struct of_phandle_args *spec, void *data)
{
struct icc_provider *provider = data;
struct icc_node_data *ndata;
diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
index 470b7dbab2c2f..9d7393e19f125 100644
--- a/drivers/memory/tegra/tegra124.c
+++ b/drivers/memory/tegra/tegra124.c
@@ -1170,7 +1170,7 @@ static int tegra124_mc_icc_aggreate(struct icc_node *node, u32 tag, u32 avg_bw,
}
static struct icc_node_data *
-tegra124_mc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+tegra124_mc_of_icc_xlate_extended(const struct of_phandle_args *spec, void *data)
{
struct tegra_mc *mc = icc_provider_to_tegra_mc(data);
const struct tegra_mc_client *client;
diff --git a/drivers/memory/tegra/tegra186-emc.c b/drivers/memory/tegra/tegra186-emc.c
index fcd4aea48bda9..57d9ae12fcfe1 100644
--- a/drivers/memory/tegra/tegra186-emc.c
+++ b/drivers/memory/tegra/tegra186-emc.c
@@ -236,7 +236,7 @@ static int tegra_emc_icc_set_bw(struct icc_node *src, struct icc_node *dst)
}
static struct icc_node *
-tegra_emc_of_icc_xlate(struct of_phandle_args *spec, void *data)
+tegra_emc_of_icc_xlate(const struct of_phandle_args *spec, void *data)
{
struct icc_provider *provider = data;
struct icc_node *node;
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index fd595c851a278..97cf59523b0b1 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -950,7 +950,7 @@ to_tegra_emc_provider(struct icc_provider *provider)
}
static struct icc_node_data *
-emc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+emc_of_icc_xlate_extended(const struct of_phandle_args *spec, void *data)
{
struct icc_provider *provider = data;
struct icc_node_data *ndata;
diff --git a/drivers/memory/tegra/tegra20.c b/drivers/memory/tegra/tegra20.c
index aa4b97d5e7323..a3022e715deef 100644
--- a/drivers/memory/tegra/tegra20.c
+++ b/drivers/memory/tegra/tegra20.c
@@ -390,7 +390,7 @@ static int tegra20_mc_icc_aggreate(struct icc_node *node, u32 tag, u32 avg_bw,
}
static struct icc_node_data *
-tegra20_mc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+tegra20_mc_of_icc_xlate_extended(const struct of_phandle_args *spec, void *data)
{
struct tegra_mc *mc = icc_provider_to_tegra_mc(data);
unsigned int i, idx = spec->args[0];
diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c
index 9eae25c57ec6e..d7b0a23c2d7db 100644
--- a/drivers/memory/tegra/tegra30-emc.c
+++ b/drivers/memory/tegra/tegra30-emc.c
@@ -1468,7 +1468,7 @@ to_tegra_emc_provider(struct icc_provider *provider)
}
static struct icc_node_data *
-emc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+emc_of_icc_xlate_extended(const struct of_phandle_args *spec, void *data)
{
struct icc_provider *provider = data;
struct icc_node_data *ndata;
diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c
index 06f8b35e0a149..d3e685c8431ff 100644
--- a/drivers/memory/tegra/tegra30.c
+++ b/drivers/memory/tegra/tegra30.c
@@ -1332,7 +1332,7 @@ static int tegra30_mc_icc_aggreate(struct icc_node *node, u32 tag, u32 avg_bw,
}
static struct icc_node_data *
-tegra30_mc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+tegra30_mc_of_icc_xlate_extended(const struct of_phandle_args *spec, void *data)
{
struct tegra_mc *mc = icc_provider_to_tegra_mc(data);
const struct tegra_mc_client *client;
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 0581f855c72e8..c459f709107b7 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -1401,7 +1401,6 @@ static struct pci_driver mptfc_driver = {
static int
mptfc_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
{
- MPT_SCSI_HOST *hd;
u8 event = le32_to_cpu(pEvReply->Event) & 0xFF;
unsigned long flags;
int rc=1;
@@ -1412,8 +1411,7 @@ mptfc_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n",
ioc->name, event));
- if (ioc->sh == NULL ||
- ((hd = shost_priv(ioc->sh)) == NULL))
+ if (ioc->sh == NULL || shost_priv(ioc->sh) == NULL)
return 1;
switch (event) {
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index e7a6e45b9fac2..4b023ee229cf1 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -1773,6 +1773,7 @@ config TWL4030_CORE
bool "TI TWL4030/TWL5030/TWL6030/TPS659x0 Support"
depends on I2C=y
select IRQ_DOMAIN
+ select MFD_CORE
select REGMAP_I2C
help
Say yes here if you have TWL4030 / TWL6030 family chip on your board.
diff --git a/drivers/mfd/ac100.c b/drivers/mfd/ac100.c
index 6d49d7fb5f14e..8f47c392cbd12 100644
--- a/drivers/mfd/ac100.c
+++ b/drivers/mfd/ac100.c
@@ -72,7 +72,7 @@ static const struct regmap_config ac100_regmap_config = {
.wr_table = &ac100_writeable_table,
.volatile_table = &ac100_volatile_table,
.max_register = AC100_RTC_GP(15),
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static struct mfd_cell ac100_cells[] = {
diff --git a/drivers/mfd/altera-sysmgr.c b/drivers/mfd/altera-sysmgr.c
index 0e52bd2ebd74b..fb5f988e61f37 100644
--- a/drivers/mfd/altera-sysmgr.c
+++ b/drivers/mfd/altera-sysmgr.c
@@ -109,7 +109,9 @@ struct regmap *altr_sysmgr_regmap_lookup_by_phandle(struct device_node *np,
dev = driver_find_device_by_of_node(&altr_sysmgr_driver.driver,
(void *)sysmgr_np);
- of_node_put(sysmgr_np);
+ if (property)
+ of_node_put(sysmgr_np);
+
if (!dev)
return ERR_PTR(-EPROBE_DEFER);
diff --git a/drivers/mfd/as3711.c b/drivers/mfd/as3711.c
index c7e85ff380132..9741977031df0 100644
--- a/drivers/mfd/as3711.c
+++ b/drivers/mfd/as3711.c
@@ -106,7 +106,7 @@ static const struct regmap_config as3711_regmap_config = {
.precious_reg = as3711_precious_reg,
.max_register = AS3711_MAX_REG,
.num_reg_defaults_raw = AS3711_NUM_REGS,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
#ifdef CONFIG_OF
diff --git a/drivers/mfd/as3722.c b/drivers/mfd/as3722.c
index a2bf68afc131d..bec047bdd0884 100644
--- a/drivers/mfd/as3722.c
+++ b/drivers/mfd/as3722.c
@@ -299,7 +299,7 @@ static const struct regmap_config as3722_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = AS3722_MAX_REGISTER,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.rd_table = &as3722_readable_table,
.wr_table = &as3722_writable_table,
.volatile_table = &as3722_volatile_table,
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index deaa969bab4e1..d8daa593ebd53 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -352,7 +352,7 @@ static const struct regmap_config axp192_regmap_config = {
.wr_table = &axp192_writeable_table,
.volatile_table = &axp192_volatile_table,
.max_register = AXP20X_CC_CTRL,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct regmap_config axp20x_regmap_config = {
@@ -388,7 +388,7 @@ static const struct regmap_config axp313a_regmap_config = {
.wr_table = &axp313a_writeable_table,
.volatile_table = &axp313a_volatile_table,
.max_register = AXP313A_IRQ_STATE,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct regmap_config axp806_regmap_config = {
diff --git a/drivers/mfd/bcm590xx.c b/drivers/mfd/bcm590xx.c
index 92eede9a5e61b..8b56786d85d01 100644
--- a/drivers/mfd/bcm590xx.c
+++ b/drivers/mfd/bcm590xx.c
@@ -27,14 +27,14 @@ static const struct regmap_config bcm590xx_regmap_config_pri = {
.reg_bits = 8,
.val_bits = 8,
.max_register = BCM590XX_MAX_REGISTER_PRI,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct regmap_config bcm590xx_regmap_config_sec = {
.reg_bits = 8,
.val_bits = 8,
.max_register = BCM590XX_MAX_REGISTER_SEC,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static int bcm590xx_i2c_probe(struct i2c_client *i2c_pri)
diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c
index 819d09e4d1007..0a955178d4697 100644
--- a/drivers/mfd/bd9571mwv.c
+++ b/drivers/mfd/bd9571mwv.c
@@ -67,7 +67,7 @@ static const struct regmap_access_table bd9571mwv_volatile_table = {
static const struct regmap_config bd9571mwv_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.rd_table = &bd9571mwv_readable_table,
.wr_table = &bd9571mwv_writable_table,
.volatile_table = &bd9571mwv_volatile_table,
@@ -152,7 +152,7 @@ static const struct regmap_access_table bd9574mwf_volatile_table = {
static const struct regmap_config bd9574mwf_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.rd_table = &bd9574mwf_readable_table,
.wr_table = &bd9574mwf_writable_table,
.volatile_table = &bd9574mwf_volatile_table,
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index 603b1cd527850..a52d59cc2b1ec 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -74,6 +74,10 @@ static const struct mfd_cell cros_ec_cec_cells[] = {
{ .name = "cros-ec-cec", },
};
+static const struct mfd_cell cros_ec_gpio_cells[] = {
+ { .name = "cros-ec-gpio", },
+};
+
static const struct mfd_cell cros_ec_rtc_cells[] = {
{ .name = "cros-ec-rtc", },
};
@@ -91,6 +95,10 @@ static const struct mfd_cell cros_usbpd_notify_cells[] = {
{ .name = "cros-usbpd-notify", },
};
+static const struct mfd_cell cros_ec_wdt_cells[] = {
+ { .name = "cros-ec-wdt", }
+};
+
static const struct cros_feature_to_cells cros_subdevices[] = {
{
.id = EC_FEATURE_CEC,
@@ -98,6 +106,11 @@ static const struct cros_feature_to_cells cros_subdevices[] = {
.num_cells = ARRAY_SIZE(cros_ec_cec_cells),
},
{
+ .id = EC_FEATURE_GPIO,
+ .mfd_cells = cros_ec_gpio_cells,
+ .num_cells = ARRAY_SIZE(cros_ec_gpio_cells),
+ },
+ {
.id = EC_FEATURE_RTC,
.mfd_cells = cros_ec_rtc_cells,
.num_cells = ARRAY_SIZE(cros_ec_rtc_cells),
@@ -107,6 +120,11 @@ static const struct cros_feature_to_cells cros_subdevices[] = {
.mfd_cells = cros_usbpd_charger_cells,
.num_cells = ARRAY_SIZE(cros_usbpd_charger_cells),
},
+ {
+ .id = EC_FEATURE_HANG_DETECT,
+ .mfd_cells = cros_ec_wdt_cells,
+ .num_cells = ARRAY_SIZE(cros_ec_wdt_cells),
+ },
};
static const struct mfd_cell cros_ec_platform_cells[] = {
diff --git a/drivers/mfd/cs42l43-i2c.c b/drivers/mfd/cs42l43-i2c.c
index 4922211680c96..c9e4ea76149a8 100644
--- a/drivers/mfd/cs42l43-i2c.c
+++ b/drivers/mfd/cs42l43-i2c.c
@@ -6,11 +6,15 @@
* Cirrus Logic International Semiconductor Ltd.
*/
+#include <linux/array_size.h>
#include <linux/err.h>
-#include <linux/errno.h>
#include <linux/i2c.h>
+#include <linux/mfd/cs42l43.h>
#include <linux/mfd/cs42l43-regs.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
#include "cs42l43.h"
@@ -34,7 +38,6 @@ static const struct regmap_config cs42l43_i2c_regmap = {
static int cs42l43_i2c_probe(struct i2c_client *i2c)
{
struct cs42l43 *cs42l43;
- int ret;
cs42l43 = devm_kzalloc(&i2c->dev, sizeof(*cs42l43), GFP_KERNEL);
if (!cs42l43)
@@ -46,11 +49,9 @@ static int cs42l43_i2c_probe(struct i2c_client *i2c)
cs42l43->attached = true;
cs42l43->regmap = devm_regmap_init_i2c(i2c, &cs42l43_i2c_regmap);
- if (IS_ERR(cs42l43->regmap)) {
- ret = PTR_ERR(cs42l43->regmap);
- dev_err(cs42l43->dev, "Failed to allocate regmap: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(cs42l43->regmap))
+ return dev_err_probe(cs42l43->dev, PTR_ERR(cs42l43->regmap),
+ "Failed to allocate regmap\n");
return cs42l43_dev_probe(cs42l43);
}
diff --git a/drivers/mfd/cs42l43-sdw.c b/drivers/mfd/cs42l43-sdw.c
index 1d85bbf8cdd5d..65f7b1d782486 100644
--- a/drivers/mfd/cs42l43-sdw.c
+++ b/drivers/mfd/cs42l43-sdw.c
@@ -6,11 +6,15 @@
* Cirrus Logic International Semiconductor Ltd.
*/
+#include <linux/array_size.h>
#include <linux/device.h>
#include <linux/err.h>
-#include <linux/errno.h>
+#include <linux/mfd/cs42l43.h>
#include <linux/mfd/cs42l43-regs.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
#include <linux/soundwire/sdw.h>
#include <linux/soundwire/sdw_registers.h>
#include <linux/soundwire/sdw_type.h>
@@ -167,7 +171,6 @@ static int cs42l43_sdw_probe(struct sdw_slave *sdw, const struct sdw_device_id *
{
struct cs42l43 *cs42l43;
struct device *dev = &sdw->dev;
- int ret;
cs42l43 = devm_kzalloc(dev, sizeof(*cs42l43), GFP_KERNEL);
if (!cs42l43)
@@ -177,11 +180,9 @@ static int cs42l43_sdw_probe(struct sdw_slave *sdw, const struct sdw_device_id *
cs42l43->sdw = sdw;
cs42l43->regmap = devm_regmap_init_sdw(sdw, &cs42l43_sdw_regmap);
- if (IS_ERR(cs42l43->regmap)) {
- ret = PTR_ERR(cs42l43->regmap);
- dev_err(cs42l43->dev, "Failed to allocate regmap: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(cs42l43->regmap))
+ return dev_err_probe(cs42l43->dev, PTR_ERR(cs42l43->regmap),
+ "Failed to allocate regmap\n");
return cs42l43_dev_probe(cs42l43);
}
diff --git a/drivers/mfd/cs42l43.c b/drivers/mfd/cs42l43.c
index 7b6d07cbe6fc6..a0fb2dc6c3b25 100644
--- a/drivers/mfd/cs42l43.c
+++ b/drivers/mfd/cs42l43.c
@@ -6,51 +6,57 @@
* Cirrus Logic International Semiconductor Ltd.
*/
+#include <linux/array_size.h>
#include <linux/bitops.h>
#include <linux/build_bug.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/err.h>
-#include <linux/errno.h>
#include <linux/firmware.h>
+#include <linux/gpio/consumer.h>
#include <linux/jiffies.h>
#include <linux/mfd/core.h>
+#include <linux/mfd/cs42l43.h>
#include <linux/mfd/cs42l43-regs.h>
#include <linux/module.h>
+#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
#include <linux/soundwire/sdw.h>
+#include <linux/types.h>
#include "cs42l43.h"
-#define CS42L43_RESET_DELAY 20
+#define CS42L43_RESET_DELAY_MS 20
-#define CS42L43_SDW_ATTACH_TIMEOUT 500
-#define CS42L43_SDW_DETACH_TIMEOUT 100
+#define CS42L43_SDW_ATTACH_TIMEOUT_MS 500
+#define CS42L43_SDW_DETACH_TIMEOUT_MS 100
#define CS42L43_MCU_BOOT_STAGE1 1
#define CS42L43_MCU_BOOT_STAGE2 2
#define CS42L43_MCU_BOOT_STAGE3 3
#define CS42L43_MCU_BOOT_STAGE4 4
-#define CS42L43_MCU_POLL 5000
-#define CS42L43_MCU_CMD_TIMEOUT 20000
+#define CS42L43_MCU_POLL_US 5000
+#define CS42L43_MCU_CMD_TIMEOUT_US 20000
#define CS42L43_MCU_UPDATE_FORMAT 3
#define CS42L43_MCU_UPDATE_OFFSET 0x100000
-#define CS42L43_MCU_UPDATE_TIMEOUT 500000
+#define CS42L43_MCU_UPDATE_TIMEOUT_US 500000
#define CS42L43_MCU_UPDATE_RETRIES 5
#define CS42L43_MCU_SUPPORTED_REV 0x2105
#define CS42L43_MCU_SHADOW_REGS_REQUIRED_REV 0x2200
#define CS42L43_MCU_SUPPORTED_BIOS_REV 0x0001
-#define CS42L43_VDDP_DELAY 50
-#define CS42L43_VDDD_DELAY 1000
+#define CS42L43_VDDP_DELAY_US 50
+#define CS42L43_VDDD_DELAY_US 1000
-#define CS42L43_AUTOSUSPEND_TIME 250
+#define CS42L43_AUTOSUSPEND_TIME_MS 250
struct cs42l43_patch_header {
__le16 version;
__le16 size;
- u8 reserved;
- u8 secure;
+ __u8 reserved;
+ __u8 secure;
__le16 bss_size;
__le32 apply_addr;
__le32 checksum;
@@ -84,7 +90,7 @@ const struct reg_default cs42l43_reg_default[CS42L43_N_DEFAULTS] = {
{ CS42L43_DRV_CTRL_5, 0x136C00C0 },
{ CS42L43_GPIO_CTRL1, 0x00000707 },
{ CS42L43_GPIO_CTRL2, 0x00000000 },
- { CS42L43_GPIO_FN_SEL, 0x00000000 },
+ { CS42L43_GPIO_FN_SEL, 0x00000004 },
{ CS42L43_MCLK_SRC_SEL, 0x00000000 },
{ CS42L43_SAMPLE_RATE1, 0x00000003 },
{ CS42L43_SAMPLE_RATE2, 0x00000003 },
@@ -131,38 +137,38 @@ const struct reg_default cs42l43_reg_default[CS42L43_N_DEFAULTS] = {
{ CS42L43_ASP_TX_CH4_CTRL, 0x00170091 },
{ CS42L43_ASP_TX_CH5_CTRL, 0x001700C1 },
{ CS42L43_ASP_TX_CH6_CTRL, 0x001700F1 },
- { CS42L43_ASPTX1_INPUT, 0x00800000 },
- { CS42L43_ASPTX2_INPUT, 0x00800000 },
- { CS42L43_ASPTX3_INPUT, 0x00800000 },
- { CS42L43_ASPTX4_INPUT, 0x00800000 },
- { CS42L43_ASPTX5_INPUT, 0x00800000 },
- { CS42L43_ASPTX6_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP1_CH1_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP1_CH2_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP1_CH3_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP1_CH4_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP2_CH1_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP2_CH2_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP3_CH1_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP3_CH2_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP4_CH1_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP4_CH2_INPUT, 0x00800000 },
- { CS42L43_ASRC_INT1_INPUT1, 0x00800000 },
- { CS42L43_ASRC_INT2_INPUT1, 0x00800000 },
- { CS42L43_ASRC_INT3_INPUT1, 0x00800000 },
- { CS42L43_ASRC_INT4_INPUT1, 0x00800000 },
- { CS42L43_ASRC_DEC1_INPUT1, 0x00800000 },
- { CS42L43_ASRC_DEC2_INPUT1, 0x00800000 },
- { CS42L43_ASRC_DEC3_INPUT1, 0x00800000 },
- { CS42L43_ASRC_DEC4_INPUT1, 0x00800000 },
- { CS42L43_ISRC1INT1_INPUT1, 0x00800000 },
- { CS42L43_ISRC1INT2_INPUT1, 0x00800000 },
- { CS42L43_ISRC1DEC1_INPUT1, 0x00800000 },
- { CS42L43_ISRC1DEC2_INPUT1, 0x00800000 },
- { CS42L43_ISRC2INT1_INPUT1, 0x00800000 },
- { CS42L43_ISRC2INT2_INPUT1, 0x00800000 },
- { CS42L43_ISRC2DEC1_INPUT1, 0x00800000 },
- { CS42L43_ISRC2DEC2_INPUT1, 0x00800000 },
+ { CS42L43_ASPTX1_INPUT, 0x00000000 },
+ { CS42L43_ASPTX2_INPUT, 0x00000000 },
+ { CS42L43_ASPTX3_INPUT, 0x00000000 },
+ { CS42L43_ASPTX4_INPUT, 0x00000000 },
+ { CS42L43_ASPTX5_INPUT, 0x00000000 },
+ { CS42L43_ASPTX6_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP1_CH1_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP1_CH2_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP1_CH3_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP1_CH4_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP2_CH1_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP2_CH2_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP3_CH1_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP3_CH2_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP4_CH1_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP4_CH2_INPUT, 0x00000000 },
+ { CS42L43_ASRC_INT1_INPUT1, 0x00000000 },
+ { CS42L43_ASRC_INT2_INPUT1, 0x00000000 },
+ { CS42L43_ASRC_INT3_INPUT1, 0x00000000 },
+ { CS42L43_ASRC_INT4_INPUT1, 0x00000000 },
+ { CS42L43_ASRC_DEC1_INPUT1, 0x00000000 },
+ { CS42L43_ASRC_DEC2_INPUT1, 0x00000000 },
+ { CS42L43_ASRC_DEC3_INPUT1, 0x00000000 },
+ { CS42L43_ASRC_DEC4_INPUT1, 0x00000000 },
+ { CS42L43_ISRC1INT1_INPUT1, 0x00000000 },
+ { CS42L43_ISRC1INT2_INPUT1, 0x00000000 },
+ { CS42L43_ISRC1DEC1_INPUT1, 0x00000000 },
+ { CS42L43_ISRC1DEC2_INPUT1, 0x00000000 },
+ { CS42L43_ISRC2INT1_INPUT1, 0x00000000 },
+ { CS42L43_ISRC2INT2_INPUT1, 0x00000000 },
+ { CS42L43_ISRC2DEC1_INPUT1, 0x00000000 },
+ { CS42L43_ISRC2DEC2_INPUT1, 0x00000000 },
{ CS42L43_EQ1MIX_INPUT1, 0x00800000 },
{ CS42L43_EQ1MIX_INPUT2, 0x00800000 },
{ CS42L43_EQ1MIX_INPUT3, 0x00800000 },
@@ -171,8 +177,8 @@ const struct reg_default cs42l43_reg_default[CS42L43_N_DEFAULTS] = {
{ CS42L43_EQ2MIX_INPUT2, 0x00800000 },
{ CS42L43_EQ2MIX_INPUT3, 0x00800000 },
{ CS42L43_EQ2MIX_INPUT4, 0x00800000 },
- { CS42L43_SPDIF1_INPUT1, 0x00800000 },
- { CS42L43_SPDIF2_INPUT1, 0x00800000 },
+ { CS42L43_SPDIF1_INPUT1, 0x00000000 },
+ { CS42L43_SPDIF2_INPUT1, 0x00000000 },
{ CS42L43_AMP1MIX_INPUT1, 0x00800000 },
{ CS42L43_AMP1MIX_INPUT2, 0x00800000 },
{ CS42L43_AMP1MIX_INPUT3, 0x00800000 },
@@ -217,7 +223,7 @@ const struct reg_default cs42l43_reg_default[CS42L43_N_DEFAULTS] = {
{ CS42L43_CTRL_REG, 0x00000006 },
{ CS42L43_FDIV_FRAC, 0x40000000 },
{ CS42L43_CAL_RATIO, 0x00000080 },
- { CS42L43_SPI_CLK_CONFIG1, 0x00000000 },
+ { CS42L43_SPI_CLK_CONFIG1, 0x00000001 },
{ CS42L43_SPI_CONFIG1, 0x00000000 },
{ CS42L43_SPI_CONFIG2, 0x00000000 },
{ CS42L43_SPI_CONFIG3, 0x00000001 },
@@ -532,10 +538,10 @@ static int cs42l43_soft_reset(struct cs42l43 *cs42l43)
regcache_cache_only(cs42l43->regmap, true);
regmap_multi_reg_write_bypassed(cs42l43->regmap, reset, ARRAY_SIZE(reset));
- msleep(CS42L43_RESET_DELAY);
+ msleep(CS42L43_RESET_DELAY_MS);
if (cs42l43->sdw) {
- unsigned long timeout = msecs_to_jiffies(CS42L43_SDW_DETACH_TIMEOUT);
+ unsigned long timeout = msecs_to_jiffies(CS42L43_SDW_DETACH_TIMEOUT_MS);
unsigned long time;
time = wait_for_completion_timeout(&cs42l43->device_detach, timeout);
@@ -555,7 +561,7 @@ static int cs42l43_soft_reset(struct cs42l43 *cs42l43)
static int cs42l43_wait_for_attach(struct cs42l43 *cs42l43)
{
if (!cs42l43->attached) {
- unsigned long timeout = msecs_to_jiffies(CS42L43_SDW_ATTACH_TIMEOUT);
+ unsigned long timeout = msecs_to_jiffies(CS42L43_SDW_ATTACH_TIMEOUT_MS);
unsigned long time;
time = wait_for_completion_timeout(&cs42l43->device_attach, timeout);
@@ -597,7 +603,7 @@ static int cs42l43_mcu_stage_2_3(struct cs42l43 *cs42l43, bool shadow)
ret = regmap_read_poll_timeout(cs42l43->regmap, CS42L43_BOOT_STATUS,
val, (val == CS42L43_MCU_BOOT_STAGE3),
- CS42L43_MCU_POLL, CS42L43_MCU_CMD_TIMEOUT);
+ CS42L43_MCU_POLL_US, CS42L43_MCU_CMD_TIMEOUT_US);
if (ret) {
dev_err(cs42l43->dev, "Failed to move to stage 3: %d, 0x%x\n", ret, val);
return ret;
@@ -646,7 +652,7 @@ static int cs42l43_mcu_disable(struct cs42l43 *cs42l43)
ret = regmap_read_poll_timeout(cs42l43->regmap, CS42L43_SOFT_INT_SHADOW, val,
(val & CS42L43_CONTROL_APPLIED_INT_MASK),
- CS42L43_MCU_POLL, CS42L43_MCU_CMD_TIMEOUT);
+ CS42L43_MCU_POLL_US, CS42L43_MCU_CMD_TIMEOUT_US);
if (ret) {
dev_err(cs42l43->dev, "Failed to disable firmware: %d, 0x%x\n", ret, val);
return ret;
@@ -690,7 +696,7 @@ static void cs42l43_mcu_load_firmware(const struct firmware *firmware, void *con
ret = regmap_read_poll_timeout(cs42l43->regmap, CS42L43_SOFT_INT_SHADOW, val,
(val & CS42L43_PATCH_APPLIED_INT_MASK),
- CS42L43_MCU_POLL, CS42L43_MCU_UPDATE_TIMEOUT);
+ CS42L43_MCU_POLL_US, CS42L43_MCU_UPDATE_TIMEOUT_US);
if (ret) {
dev_err(cs42l43->dev, "Failed to update firmware: %d, 0x%x\n", ret, val);
cs42l43->firmware_error = ret;
@@ -951,7 +957,7 @@ static int cs42l43_power_up(struct cs42l43 *cs42l43)
}
/* vdd-p must be on for 50uS before any other supply */
- usleep_range(CS42L43_VDDP_DELAY, 2 * CS42L43_VDDP_DELAY);
+ usleep_range(CS42L43_VDDP_DELAY_US, 2 * CS42L43_VDDP_DELAY_US);
gpiod_set_value_cansleep(cs42l43->reset, 1);
@@ -967,7 +973,7 @@ static int cs42l43_power_up(struct cs42l43 *cs42l43)
goto err_core_supplies;
}
- usleep_range(CS42L43_VDDD_DELAY, 2 * CS42L43_VDDD_DELAY);
+ usleep_range(CS42L43_VDDD_DELAY_US, 2 * CS42L43_VDDD_DELAY_US);
return 0;
@@ -1051,7 +1057,7 @@ int cs42l43_dev_probe(struct cs42l43 *cs42l43)
if (ret)
return ret;
- pm_runtime_set_autosuspend_delay(cs42l43->dev, CS42L43_AUTOSUSPEND_TIME);
+ pm_runtime_set_autosuspend_delay(cs42l43->dev, CS42L43_AUTOSUSPEND_TIME_MS);
pm_runtime_use_autosuspend(cs42l43->dev);
pm_runtime_set_active(cs42l43->dev);
/*
@@ -1059,7 +1065,9 @@ int cs42l43_dev_probe(struct cs42l43 *cs42l43)
* the boot work runs.
*/
pm_runtime_get_noresume(cs42l43->dev);
- devm_pm_runtime_enable(cs42l43->dev);
+ ret = devm_pm_runtime_enable(cs42l43->dev);
+ if (ret)
+ return ret;
queue_work(system_long_wq, &cs42l43->boot_work);
diff --git a/drivers/mfd/cs42l43.h b/drivers/mfd/cs42l43.h
index eb4caf3938332..8d1b1b0f5a473 100644
--- a/drivers/mfd/cs42l43.h
+++ b/drivers/mfd/cs42l43.h
@@ -6,15 +6,17 @@
* Cirrus Logic International Semiconductor Ltd.
*/
-#include <linux/mfd/cs42l43.h>
-#include <linux/pm.h>
-#include <linux/regmap.h>
-
#ifndef CS42L43_CORE_INT_H
#define CS42L43_CORE_INT_H
#define CS42L43_N_DEFAULTS 176
+struct dev_pm_ops;
+struct device;
+struct reg_default;
+
+struct cs42l43;
+
extern const struct dev_pm_ops cs42l43_pm_ops;
extern const struct reg_default cs42l43_reg_default[CS42L43_N_DEFAULTS];
diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
index 150448cd2eb08..dc85801b9fa08 100644
--- a/drivers/mfd/da9052-core.c
+++ b/drivers/mfd/da9052-core.c
@@ -533,7 +533,7 @@ const struct regmap_config da9052_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.max_register = DA9052_PAGE1_CON_REG,
.readable_reg = da9052_reg_readable,
diff --git a/drivers/mfd/da9055-core.c b/drivers/mfd/da9055-core.c
index 768302e05baa1..1f727ef60d638 100644
--- a/drivers/mfd/da9055-core.c
+++ b/drivers/mfd/da9055-core.c
@@ -245,7 +245,7 @@ const struct regmap_config da9055_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.max_register = DA9055_MAX_REGISTER_CNT,
.readable_reg = da9055_register_readable,
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c
index 73a22107900c8..dbbc4779170a0 100644
--- a/drivers/mfd/da9062-core.c
+++ b/drivers/mfd/da9062-core.c
@@ -476,7 +476,7 @@ static struct regmap_config da9061_regmap_config = {
.ranges = da9061_range_cfg,
.num_ranges = ARRAY_SIZE(da9061_range_cfg),
.max_register = DA9062AA_CONFIG_ID,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.rd_table = &da9061_aa_readable_table,
.wr_table = &da9061_aa_writeable_table,
.volatile_table = &da9061_aa_volatile_table,
@@ -582,7 +582,7 @@ static struct regmap_config da9062_regmap_config = {
.ranges = da9062_range_cfg,
.num_ranges = ARRAY_SIZE(da9062_range_cfg),
.max_register = DA9062AA_CONFIG_ID,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.rd_table = &da9062_aa_readable_table,
.wr_table = &da9062_aa_writeable_table,
.volatile_table = &da9062_aa_volatile_table,
diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c
index d715cf9a9e688..c6235cd0dbdc4 100644
--- a/drivers/mfd/da9063-i2c.c
+++ b/drivers/mfd/da9063-i2c.c
@@ -342,7 +342,7 @@ static struct regmap_config da9063_regmap_config = {
.num_ranges = ARRAY_SIZE(da9063_range_cfg),
.max_register = DA9063_REG_CONFIG_ID,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct of_device_id da9063_dt_ids[] = {
diff --git a/drivers/mfd/da9150-core.c b/drivers/mfd/da9150-core.c
index 94d621e20635d..5c59cc869fb3e 100644
--- a/drivers/mfd/da9150-core.c
+++ b/drivers/mfd/da9150-core.c
@@ -169,7 +169,7 @@ static const struct regmap_config da9150_regmap_config = {
.num_ranges = ARRAY_SIZE(da9150_range_cfg),
.max_register = DA9150_TBAT_RES_B,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.volatile_reg = da9150_volatile_reg,
};
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index 4621d3950b8f9..8c00e0c695c5b 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -23,12 +23,22 @@
#include "intel-lpss.h"
-/* Some DSDTs have an unused GEXP ACPI device conflicting with I2C4 resources */
-static const struct pci_device_id ignore_resource_conflicts_ids[] = {
- /* Microsoft Surface Go (version 1) I2C4 */
- { PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, 0x9d64, 0x152d, 0x1182), },
- /* Microsoft Surface Go 2 I2C4 */
- { PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, 0x9d64, 0x152d, 0x1237), },
+static const struct pci_device_id quirk_ids[] = {
+ {
+ /* Microsoft Surface Go (version 1) I2C4 */
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, 0x9d64, 0x152d, 0x1182),
+ .driver_data = QUIRK_IGNORE_RESOURCE_CONFLICTS,
+ },
+ {
+ /* Microsoft Surface Go 2 I2C4 */
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, 0x9d64, 0x152d, 0x1237),
+ .driver_data = QUIRK_IGNORE_RESOURCE_CONFLICTS,
+ },
+ {
+ /* Dell XPS 9530 (2023) */
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, 0x51fb, 0x1028, 0x0beb),
+ .driver_data = QUIRK_CLOCK_DIVIDER_UNITY,
+ },
{ }
};
@@ -36,6 +46,7 @@ static int intel_lpss_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
const struct intel_lpss_platform_info *data = (void *)id->driver_data;
+ const struct pci_device_id *quirk_pci_info;
struct intel_lpss_platform_info *info;
int ret;
@@ -55,8 +66,9 @@ static int intel_lpss_pci_probe(struct pci_dev *pdev,
info->mem = pci_resource_n(pdev, 0);
info->irq = pci_irq_vector(pdev, 0);
- if (pci_match_id(ignore_resource_conflicts_ids, pdev))
- info->ignore_resource_conflicts = true;
+ quirk_pci_info = pci_match_id(quirk_ids, pdev);
+ if (quirk_pci_info)
+ info->quirks = quirk_pci_info->driver_data;
pdev->d3cold_delay = 0;
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index eff423f7dd284..2a9018112dfc8 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -300,6 +300,7 @@ static int intel_lpss_register_clock_divider(struct intel_lpss *lpss,
{
char name[32];
struct clk *tmp = *clk;
+ int ret;
snprintf(name, sizeof(name), "%s-enable", devname);
tmp = clk_register_gate(NULL, name, __clk_get_name(tmp), 0,
@@ -316,6 +317,12 @@ static int intel_lpss_register_clock_divider(struct intel_lpss *lpss,
return PTR_ERR(tmp);
*clk = tmp;
+ if (lpss->info->quirks & QUIRK_CLOCK_DIVIDER_UNITY) {
+ ret = clk_set_rate(tmp, lpss->info->clk_rate);
+ if (ret)
+ return ret;
+ }
+
snprintf(name, sizeof(name), "%s-update", devname);
tmp = clk_register_gate(NULL, name, __clk_get_name(tmp),
CLK_SET_RATE_PARENT, lpss->priv, 31, 0, NULL);
@@ -412,7 +419,7 @@ int intel_lpss_probe(struct device *dev,
return ret;
lpss->cell->swnode = info->swnode;
- lpss->cell->ignore_resource_conflicts = info->ignore_resource_conflicts;
+ lpss->cell->ignore_resource_conflicts = info->quirks & QUIRK_IGNORE_RESOURCE_CONFLICTS;
intel_lpss_init_dev(lpss);
diff --git a/drivers/mfd/intel-lpss.h b/drivers/mfd/intel-lpss.h
index c1d72b117ed5e..6f8f668f4c6f0 100644
--- a/drivers/mfd/intel-lpss.h
+++ b/drivers/mfd/intel-lpss.h
@@ -11,16 +11,28 @@
#ifndef __MFD_INTEL_LPSS_H
#define __MFD_INTEL_LPSS_H
+#include <linux/bits.h>
#include <linux/pm.h>
+/*
+ * Some DSDTs have an unused GEXP ACPI device conflicting with I2C4 resources.
+ * Set to ignore resource conflicts with ACPI declared SystemMemory regions.
+ */
+#define QUIRK_IGNORE_RESOURCE_CONFLICTS BIT(0)
+/*
+ * Some devices have misconfigured clock divider due to a firmware bug.
+ * Set this to force the clock divider to 1:1 ratio.
+ */
+#define QUIRK_CLOCK_DIVIDER_UNITY BIT(1)
+
struct device;
struct resource;
struct software_node;
struct intel_lpss_platform_info {
struct resource *mem;
- bool ignore_resource_conflicts;
int irq;
+ unsigned int quirks;
unsigned long clk_rate;
const char *clk_con_id;
const struct software_node *swnode;
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index 67af36a389136..5557f023a1734 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -428,50 +428,13 @@ static int kempld_detect_device(struct kempld_device_data *pld)
#ifdef CONFIG_ACPI
static int kempld_get_acpi_data(struct platform_device *pdev)
{
- struct list_head resource_list;
- struct resource *resources;
- struct resource_entry *rentry;
struct device *dev = &pdev->dev;
- struct acpi_device *acpi_dev = ACPI_COMPANION(dev);
const struct kempld_platform_data *pdata;
int ret;
- int count;
pdata = acpi_device_get_match_data(dev);
ret = platform_device_add_data(pdev, pdata,
sizeof(struct kempld_platform_data));
- if (ret)
- return ret;
-
- INIT_LIST_HEAD(&resource_list);
- ret = acpi_dev_get_resources(acpi_dev, &resource_list, NULL, NULL);
- if (ret < 0)
- goto out;
-
- count = ret;
-
- if (count == 0) {
- ret = platform_device_add_resources(pdev, pdata->ioresource, 1);
- goto out;
- }
-
- resources = devm_kcalloc(&acpi_dev->dev, count, sizeof(*resources),
- GFP_KERNEL);
- if (!resources) {
- ret = -ENOMEM;
- goto out;
- }
-
- count = 0;
- list_for_each_entry(rentry, &resource_list, node) {
- memcpy(&resources[count], rentry->res,
- sizeof(*resources));
- count++;
- }
- ret = platform_device_add_resources(pdev, resources, count);
-
-out:
- acpi_dev_free_resource_list(&resource_list);
return ret;
}
diff --git a/drivers/mfd/khadas-mcu.c b/drivers/mfd/khadas-mcu.c
index 61396d824f16d..ba981a7886921 100644
--- a/drivers/mfd/khadas-mcu.c
+++ b/drivers/mfd/khadas-mcu.c
@@ -72,7 +72,7 @@ static const struct regmap_config khadas_mcu_regmap_config = {
.max_register = KHADAS_MCU_CMD_FAN_STATUS_CTRL_REG,
.volatile_reg = khadas_mcu_reg_volatile,
.writeable_reg = khadas_mcu_reg_writeable,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static struct mfd_cell khadas_mcu_fan_cells[] = {
diff --git a/drivers/mfd/lochnagar-i2c.c b/drivers/mfd/lochnagar-i2c.c
index 0b76fcccd0bda..6c930c57f2e23 100644
--- a/drivers/mfd/lochnagar-i2c.c
+++ b/drivers/mfd/lochnagar-i2c.c
@@ -70,7 +70,7 @@ static const struct regmap_config lochnagar1_i2c_regmap = {
.use_single_read = true,
.use_single_write = true,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct reg_sequence lochnagar1_patch[] = {
@@ -163,7 +163,7 @@ static const struct regmap_config lochnagar2_i2c_regmap = {
.readable_reg = lochnagar2_readable_register,
.volatile_reg = lochnagar2_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct reg_sequence lochnagar2_patch[] = {
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index 73a0e7f9bd311..f14901660147f 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -38,6 +38,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/align.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
@@ -1321,7 +1322,7 @@ static int lpc_ich_init_spi(struct pci_dev *dev)
case INTEL_SPI_BYT:
pci_read_config_dword(dev, SPIBASE_BYT, &spi_base);
if (spi_base & SPIBASE_BYT_EN) {
- res->start = spi_base & ~(SPIBASE_BYT_SZ - 1);
+ res->start = ALIGN_DOWN(spi_base, SPIBASE_BYT_SZ);
res->end = res->start + SPIBASE_BYT_SZ - 1;
info->set_writeable = lpc_ich_byt_set_writeable;
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index 1000572761a84..920797b806ced 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -7,6 +7,7 @@
* Copyright 2009 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
*/
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -174,28 +175,27 @@ int mc13xxx_irq_free(struct mc13xxx *mc13xxx, int irq, void *dev)
}
EXPORT_SYMBOL(mc13xxx_irq_free);
-#define maskval(reg, mask) (((reg) & (mask)) >> __ffs(mask))
static void mc13xxx_print_revision(struct mc13xxx *mc13xxx, u32 revision)
{
dev_info(mc13xxx->dev, "%s: rev: %d.%d, "
"fin: %d, fab: %d, icid: %d/%d\n",
mc13xxx->variant->name,
- maskval(revision, MC13XXX_REVISION_REVFULL),
- maskval(revision, MC13XXX_REVISION_REVMETAL),
- maskval(revision, MC13XXX_REVISION_FIN),
- maskval(revision, MC13XXX_REVISION_FAB),
- maskval(revision, MC13XXX_REVISION_ICID),
- maskval(revision, MC13XXX_REVISION_ICIDCODE));
+ FIELD_GET(MC13XXX_REVISION_REVFULL, revision),
+ FIELD_GET(MC13XXX_REVISION_REVMETAL, revision),
+ FIELD_GET(MC13XXX_REVISION_FIN, revision),
+ FIELD_GET(MC13XXX_REVISION_FAB, revision),
+ FIELD_GET(MC13XXX_REVISION_ICID, revision),
+ FIELD_GET(MC13XXX_REVISION_ICIDCODE, revision));
}
static void mc34708_print_revision(struct mc13xxx *mc13xxx, u32 revision)
{
dev_info(mc13xxx->dev, "%s: rev %d.%d, fin: %d, fab: %d\n",
mc13xxx->variant->name,
- maskval(revision, MC34708_REVISION_REVFULL),
- maskval(revision, MC34708_REVISION_REVMETAL),
- maskval(revision, MC34708_REVISION_FIN),
- maskval(revision, MC34708_REVISION_FAB));
+ FIELD_GET(MC34708_REVISION_REVFULL, revision),
+ FIELD_GET(MC34708_REVISION_REVMETAL, revision),
+ FIELD_GET(MC34708_REVISION_FIN, revision),
+ FIELD_GET(MC34708_REVISION_FAB, revision));
}
/* These are only exported for mc13xxx-i2c and mc13xxx-spi */
diff --git a/drivers/mfd/mcp-core.c b/drivers/mfd/mcp-core.c
index 2fa592c37c6f0..16ca23311cab4 100644
--- a/drivers/mfd/mcp-core.c
+++ b/drivers/mfd/mcp-core.c
@@ -41,7 +41,7 @@ static void mcp_bus_remove(struct device *dev)
drv->remove(mcp);
}
-static struct bus_type mcp_bus_type = {
+static const struct bus_type mcp_bus_type = {
.name = "mcp",
.match = mcp_bus_match,
.probe = mcp_bus_probe,
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 2b85509a90fc2..6ad5c93027afc 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -29,7 +29,7 @@ struct mfd_of_node_entry {
struct device_node *np;
};
-static struct device_type mfd_dev_type = {
+static const struct device_type mfd_dev_type = {
.name = "mfd_device",
};
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index 4449dde05021a..4fd4a2da5ad73 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -142,6 +142,9 @@ static const struct mfd_cell mt6357_devs[] = {
.resources = mt6357_rtc_resources,
.of_compatible = "mediatek,mt6357-rtc",
}, {
+ .name = "mt6357-sound",
+ .of_compatible = "mediatek,mt6357-sound"
+ }, {
.name = "mtk-pmic-keys",
.num_resources = ARRAY_SIZE(mt6357_keys_resources),
.resources = mt6357_keys_resources,
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index ebc62033db169..949feb03d4f8d 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -699,7 +699,7 @@ static int usbhs_omap_probe(struct platform_device *pdev)
}
for (i = 0; i < omap->nports; i++) {
- char clkname[30];
+ char clkname[40];
/* clock names are indexed from 1*/
snprintf(clkname, sizeof(clkname),
diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c
index 6ff84b2600c54..ef326d6d566e6 100644
--- a/drivers/mfd/rave-sp.c
+++ b/drivers/mfd/rave-sp.c
@@ -358,7 +358,7 @@ int rave_sp_exec(struct rave_sp *sp,
ackid = atomic_inc_return(&sp->ackid);
reply.ackid = ackid;
- reply.code = rave_sp_reply_code((u8)command),
+ reply.code = rave_sp_reply_code((u8)command);
mutex_lock(&sp->bus_lock);
@@ -471,8 +471,8 @@ static void rave_sp_receive_frame(struct rave_sp *sp,
rave_sp_receive_reply(sp, data, length);
}
-static ssize_t rave_sp_receive_buf(struct serdev_device *serdev,
- const u8 *buf, size_t size)
+static size_t rave_sp_receive_buf(struct serdev_device *serdev,
+ const u8 *buf, size_t size)
{
struct device *dev = &serdev->dev;
struct rave_sp *sp = dev_get_drvdata(dev);
diff --git a/drivers/mfd/rc5t583.c b/drivers/mfd/rc5t583.c
index 5e81f011363ff..2c0e8e9630f74 100644
--- a/drivers/mfd/rc5t583.c
+++ b/drivers/mfd/rc5t583.c
@@ -230,7 +230,7 @@ static const struct regmap_config rc5t583_regmap_config = {
.volatile_reg = volatile_reg,
.max_register = RC5T583_MAX_REG,
.num_reg_defaults_raw = RC5T583_NUM_REGS,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static int rc5t583_i2c_probe(struct i2c_client *i2c)
diff --git a/drivers/mfd/rk8xx-core.c b/drivers/mfd/rk8xx-core.c
index b1ffc3b9e2be7..e2261b68b844d 100644
--- a/drivers/mfd/rk8xx-core.c
+++ b/drivers/mfd/rk8xx-core.c
@@ -43,8 +43,8 @@ static struct resource rk806_pwrkey_resources[] = {
};
static const struct resource rk817_pwrkey_resources[] = {
- DEFINE_RES_IRQ(RK817_IRQ_PWRON_RISE),
DEFINE_RES_IRQ(RK817_IRQ_PWRON_FALL),
+ DEFINE_RES_IRQ(RK817_IRQ_PWRON_RISE),
};
static const struct resource rk817_charger_resources[] = {
diff --git a/drivers/mfd/rk8xx-spi.c b/drivers/mfd/rk8xx-spi.c
index fd137f38c2c4a..3405fb82ff9fb 100644
--- a/drivers/mfd/rk8xx-spi.c
+++ b/drivers/mfd/rk8xx-spi.c
@@ -34,7 +34,7 @@ static const struct regmap_config rk806_regmap_config_spi = {
.reg_bits = 16,
.val_bits = 8,
.max_register = RK806_BUCK_RSERVE_REG5,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.volatile_table = &rk806_volatile_table,
};
diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c
index 7336e6d8a0013..23ca00d2c624d 100644
--- a/drivers/mfd/rn5t618.c
+++ b/drivers/mfd/rn5t618.c
@@ -62,7 +62,7 @@ static const struct regmap_config rn5t618_regmap_config = {
.val_bits = 8,
.volatile_reg = rn5t618_volatile_reg,
.max_register = RN5T618_MAX_REG,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct regmap_irq rc5t619_irqs[] = {
diff --git a/drivers/mfd/rohm-bd71828.c b/drivers/mfd/rohm-bd71828.c
index 594718f7e8e18..2f3826c7eef49 100644
--- a/drivers/mfd/rohm-bd71828.c
+++ b/drivers/mfd/rohm-bd71828.c
@@ -197,7 +197,7 @@ static const struct regmap_config bd71815_regmap = {
.val_bits = 8,
.volatile_table = &bd71815_volatile_regs,
.max_register = BD71815_MAX_REGISTER - 1,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct regmap_config bd71828_regmap = {
@@ -205,7 +205,7 @@ static const struct regmap_config bd71828_regmap = {
.val_bits = 8,
.volatile_table = &bd71828_volatile_regs,
.max_register = BD71828_MAX_REGISTER,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
/*
diff --git a/drivers/mfd/rohm-bd718x7.c b/drivers/mfd/rohm-bd718x7.c
index 4798bdf27afb6..7755a4c073bfe 100644
--- a/drivers/mfd/rohm-bd718x7.c
+++ b/drivers/mfd/rohm-bd718x7.c
@@ -87,7 +87,7 @@ static const struct regmap_config bd718xx_regmap_config = {
.val_bits = 8,
.volatile_table = &volatile_regs,
.max_register = BD718XX_MAX_REGISTER - 1,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static int bd718xx_init_press_duration(struct regmap *regmap,
diff --git a/drivers/mfd/rohm-bd9576.c b/drivers/mfd/rohm-bd9576.c
index bceac7016740d..3a9f61961721b 100644
--- a/drivers/mfd/rohm-bd9576.c
+++ b/drivers/mfd/rohm-bd9576.c
@@ -62,7 +62,7 @@ static struct regmap_config bd957x_regmap = {
.val_bits = 8,
.volatile_table = &volatile_regs,
.max_register = BD957X_MAX_REGISTER,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static struct regmap_irq bd9576_irqs[] = {
diff --git a/drivers/mfd/rsmu_i2c.c b/drivers/mfd/rsmu_i2c.c
index 06d78a1cf1ccb..5711e512b6a2f 100644
--- a/drivers/mfd/rsmu_i2c.c
+++ b/drivers/mfd/rsmu_i2c.c
@@ -188,7 +188,7 @@ static const struct regmap_config rsmu_sabre_regmap_config = {
.ranges = rsmu_sabre_range_cfg,
.num_ranges = ARRAY_SIZE(rsmu_sabre_range_cfg),
.volatile_reg = rsmu_sabre_volatile_reg,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.can_multi_write = true,
};
diff --git a/drivers/mfd/si476x-prop.c b/drivers/mfd/si476x-prop.c
index f0608d138f02e..3d5c118888b26 100644
--- a/drivers/mfd/si476x-prop.c
+++ b/drivers/mfd/si476x-prop.c
@@ -222,7 +222,7 @@ static const struct regmap_config si476x_regmap_config = {
.reg_read = si476x_core_regmap_read,
.reg_write = si476x_core_regmap_write,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
struct regmap *devm_regmap_init_si476x(struct si476x_core *core)
diff --git a/drivers/mfd/stmfx.c b/drivers/mfd/stmfx.c
index c02cbd9c2f5d7..f391c2ccaa72a 100644
--- a/drivers/mfd/stmfx.c
+++ b/drivers/mfd/stmfx.c
@@ -53,7 +53,7 @@ static const struct regmap_config stmfx_regmap_config = {
.max_register = STMFX_REG_MAX,
.volatile_reg = stmfx_reg_volatile,
.writeable_reg = stmfx_reg_writeable,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct resource stmfx_pinctrl_resources[] = {
diff --git a/drivers/mfd/stpmic1.c b/drivers/mfd/stpmic1.c
index c5128fe96cc78..d8a603d95aa66 100644
--- a/drivers/mfd/stpmic1.c
+++ b/drivers/mfd/stpmic1.c
@@ -63,7 +63,7 @@ static const struct regmap_access_table stpmic1_volatile_table = {
static const struct regmap_config stpmic1_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.max_register = PMIC_MAX_REGISTER_ADDRESS,
.rd_table = &stpmic1_readable_table,
.wr_table = &stpmic1_writeable_table,
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index c9550368d9ea5..7d0e91164cbaa 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -238,7 +238,9 @@ struct regmap *syscon_regmap_lookup_by_phandle(struct device_node *np,
return ERR_PTR(-ENODEV);
regmap = syscon_node_to_regmap(syscon_np);
- of_node_put(syscon_np);
+
+ if (property)
+ of_node_put(syscon_np);
return regmap;
}
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 6e384a79e3418..c130ffef182f1 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -124,6 +124,11 @@
#define TWL6030_BASEADD_RSV 0x0000
#define TWL6030_BASEADD_ZERO 0x0000
+/* Some fields in TWL6030_PHOENIX_DEV_ON */
+#define TWL6030_APP_DEVOFF BIT(0)
+#define TWL6030_CON_DEVOFF BIT(1)
+#define TWL6030_MOD_DEVOFF BIT(2)
+
/* Few power values */
#define R_CFG_BOOT 0x05
@@ -687,6 +692,20 @@ static void twl_remove(struct i2c_client *client)
twl_priv->ready = false;
}
+static void twl6030_power_off(void)
+{
+ int err;
+ u8 val;
+
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &val, TWL6030_PHOENIX_DEV_ON);
+ if (err)
+ return;
+
+ val |= TWL6030_APP_DEVOFF | TWL6030_CON_DEVOFF | TWL6030_MOD_DEVOFF;
+ twl_i2c_write_u8(TWL_MODULE_PM_MASTER, val, TWL6030_PHOENIX_DEV_ON);
+}
+
+
static struct of_dev_auxdata twl_auxdata_lookup[] = {
OF_DEV_AUXDATA("ti,twl4030-gpio", 0, "twl4030-gpio", NULL),
{ /* sentinel */ },
@@ -852,6 +871,15 @@ twl_probe(struct i2c_client *client)
goto free;
}
+ if (twl_class_is_6030()) {
+ if (of_device_is_system_power_controller(node)) {
+ if (!pm_power_off)
+ pm_power_off = twl6030_power_off;
+ else
+ dev_warn(&client->dev, "Poweroff callback already assigned\n");
+ }
+ }
+
status = of_platform_populate(node, NULL, twl_auxdata_lookup,
&client->dev);
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
index 1595e9c76132d..0bca948ab6bae 100644
--- a/drivers/mfd/twl4030-power.c
+++ b/drivers/mfd/twl4030-power.c
@@ -686,6 +686,9 @@ static bool twl4030_power_use_poweroff(const struct twl4030_power_data *pdata,
if (of_property_read_bool(node, "ti,use_poweroff"))
return true;
+ if (of_device_is_system_power_controller(node->parent))
+ return true;
+
return false;
}
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index f77ecc635b6f1..6a8602c1c4ee7 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -1922,7 +1922,7 @@ const struct regmap_config wm5102_spi_regmap = {
.readable_reg = wm5102_readable_register,
.volatile_reg = wm5102_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = wm5102_reg_default,
.num_reg_defaults = ARRAY_SIZE(wm5102_reg_default),
};
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index eba324875afdb..6ff33a54a068a 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -3202,7 +3202,7 @@ const struct regmap_config wm5110_spi_regmap = {
.readable_reg = wm5110_readable_register,
.volatile_reg = wm5110_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = wm5110_reg_default,
.num_reg_defaults = ARRAY_SIZE(wm5110_reg_default),
};
diff --git a/drivers/mfd/wm831x-auxadc.c b/drivers/mfd/wm831x-auxadc.c
index 65b98f3fbd929..18618a8f92062 100644
--- a/drivers/mfd/wm831x-auxadc.c
+++ b/drivers/mfd/wm831x-auxadc.c
@@ -152,7 +152,7 @@ static irqreturn_t wm831x_auxadc_irq(int irq, void *irq_data)
static int wm831x_auxadc_read_polled(struct wm831x *wm831x,
enum wm831x_auxadc input)
{
- int ret, src, timeout;
+ int ret, src;
mutex_lock(&wm831x->auxadc_lock);
@@ -179,32 +179,25 @@ static int wm831x_auxadc_read_polled(struct wm831x *wm831x,
goto disable;
}
- /* If we're not using interrupts then poll the
- * interrupt status register */
- timeout = 5;
- while (timeout) {
- msleep(1);
+ /* If we're not using interrupts then read the interrupt status register */
+ msleep(20);
- ret = wm831x_reg_read(wm831x,
- WM831X_INTERRUPT_STATUS_1);
- if (ret < 0) {
- dev_err(wm831x->dev,
- "ISR 1 read failed: %d\n", ret);
- goto disable;
- }
+ ret = wm831x_reg_read(wm831x, WM831X_INTERRUPT_STATUS_1);
+ if (ret < 0) {
+ dev_err(wm831x->dev,
+ "ISR 1 read failed: %d\n", ret);
+ goto disable;
+ }
- /* Did it complete? */
- if (ret & WM831X_AUXADC_DATA_EINT) {
- wm831x_reg_write(wm831x,
- WM831X_INTERRUPT_STATUS_1,
- WM831X_AUXADC_DATA_EINT);
- break;
- } else {
- dev_err(wm831x->dev,
- "AUXADC conversion timeout\n");
- ret = -EBUSY;
- goto disable;
- }
+ /* Did it complete? */
+ if (ret & WM831X_AUXADC_DATA_EINT) {
+ wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1,
+ WM831X_AUXADC_DATA_EINT);
+ } else {
+ dev_err(wm831x->dev,
+ "AUXADC conversion timeout\n");
+ ret = -EBUSY;
+ goto disable;
}
ret = wm831x_reg_read(wm831x, WM831X_AUXADC_DATA);
diff --git a/drivers/mfd/wm8350-regmap.c b/drivers/mfd/wm8350-regmap.c
index 5663b8b0b3ad5..3d0ebb004dbf1 100644
--- a/drivers/mfd/wm8350-regmap.c
+++ b/drivers/mfd/wm8350-regmap.c
@@ -325,7 +325,7 @@ const struct regmap_config wm8350_regmap = {
.reg_bits = 8,
.val_bits = 16,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.max_register = WM8350_MAX_REGISTER,
.readable_reg = wm8350_readable,
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index 75483c9be0c4d..ddfb234849dd1 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -100,7 +100,7 @@ static const struct regmap_config wm8400_regmap_config = {
.volatile_reg = wm8400_volatile,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
/**
diff --git a/drivers/mfd/wm97xx-core.c b/drivers/mfd/wm97xx-core.c
index 663acbb1854c9..1566a9b04b6a0 100644
--- a/drivers/mfd/wm97xx-core.c
+++ b/drivers/mfd/wm97xx-core.c
@@ -95,7 +95,7 @@ static const struct regmap_config wm9705_regmap_config = {
.reg_stride = 2,
.val_bits = 16,
.max_register = 0x7e,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = wm9705_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(wm9705_reg_defaults),
@@ -163,7 +163,7 @@ static const struct regmap_config wm9712_regmap_config = {
.reg_stride = 2,
.val_bits = 16,
.max_register = 0x7e,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = wm9712_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(wm9712_reg_defaults),
@@ -234,7 +234,7 @@ static const struct regmap_config wm9713_regmap_config = {
.reg_stride = 2,
.val_bits = 16,
.max_register = 0x7e,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = wm9713_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(wm9713_reg_defaults),
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index ee590c4a15379..6eac0f3359152 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -251,7 +251,7 @@ static int ssc_probe(struct platform_device *pdev)
return 0;
}
-static int ssc_remove(struct platform_device *pdev)
+static void ssc_remove(struct platform_device *pdev)
{
struct ssc_device *ssc = platform_get_drvdata(pdev);
@@ -260,8 +260,6 @@ static int ssc_remove(struct platform_device *pdev)
mutex_lock(&user_lock);
list_del(&ssc->list);
mutex_unlock(&user_lock);
-
- return 0;
}
static struct platform_driver ssc_driver = {
@@ -271,7 +269,7 @@ static struct platform_driver ssc_driver = {
},
.id_table = atmel_ssc_devtypes,
.probe = ssc_probe,
- .remove = ssc_remove,
+ .remove_new = ssc_remove,
};
module_platform_driver(ssc_driver);
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 1a64364700eb0..0ad2ff9065aad 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -1002,7 +1002,7 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
} else {
pcr->card_removed |= SD_EXIST;
pcr->card_inserted &= ~SD_EXIST;
- if (PCI_PID(pcr) == PID_5261) {
+ if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) {
rtsx_pci_write_register(pcr, RTS5261_FW_STATUS,
RTS5261_EXPRESS_LINK_FAIL_MASK, 0);
pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS;
diff --git a/drivers/misc/cxl/of.c b/drivers/misc/cxl/of.c
index 25ce725035e77..bcc005dff1c0f 100644
--- a/drivers/misc/cxl/of.c
+++ b/drivers/misc/cxl/of.c
@@ -431,7 +431,7 @@ int cxl_of_read_adapter_properties(struct cxl *adapter, struct device_node *np)
return 0;
}
-static int cxl_of_remove(struct platform_device *pdev)
+static void cxl_of_remove(struct platform_device *pdev)
{
struct cxl *adapter;
int afu;
@@ -441,7 +441,6 @@ static int cxl_of_remove(struct platform_device *pdev)
cxl_guest_remove_afu(adapter->afu[afu]);
cxl_guest_remove_adapter(adapter);
- return 0;
}
static void cxl_of_shutdown(struct platform_device *pdev)
@@ -501,6 +500,6 @@ struct platform_driver cxl_of_driver = {
.owner = THIS_MODULE
},
.probe = cxl_of_probe,
- .remove = cxl_of_remove,
+ .remove_new = cxl_of_remove,
.shutdown = cxl_of_shutdown,
};
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index b630625b3024b..e78a76d74ff4f 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -14,7 +14,6 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/nvmem-provider.h>
diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
index d807d08e26144..327afb866b218 100644
--- a/drivers/misc/eeprom/idt_89hpesx.c
+++ b/drivers/misc/eeprom/idt_89hpesx.c
@@ -129,7 +129,7 @@ struct idt_smb_seq {
struct idt_eeprom_seq {
u8 cmd;
u8 eeaddr;
- u16 memaddr;
+ __le16 memaddr;
u8 data;
} __packed;
@@ -141,8 +141,8 @@ struct idt_eeprom_seq {
*/
struct idt_csr_seq {
u8 cmd;
- u16 csraddr;
- u32 data;
+ __le16 csraddr;
+ __le32 data;
} __packed;
/*
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index dbd26c3b245bc..4c67e2c5a82e1 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -2186,7 +2186,7 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
return 0;
}
-static int fastrpc_cb_remove(struct platform_device *pdev)
+static void fastrpc_cb_remove(struct platform_device *pdev)
{
struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent);
struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev);
@@ -2201,8 +2201,6 @@ static int fastrpc_cb_remove(struct platform_device *pdev)
}
}
spin_unlock_irqrestore(&cctx->lock, flags);
-
- return 0;
}
static const struct of_device_id fastrpc_match_table[] = {
@@ -2212,7 +2210,7 @@ static const struct of_device_id fastrpc_match_table[] = {
static struct platform_driver fastrpc_cb_driver = {
.probe = fastrpc_cb_probe,
- .remove = fastrpc_cb_remove,
+ .remove_new = fastrpc_cb_remove,
.driver = {
.name = "qcom,fastrpc-cb",
.of_match_table = fastrpc_match_table,
diff --git a/drivers/misc/hi6421v600-irq.c b/drivers/misc/hi6421v600-irq.c
index b075d803a2c20..69ee4f39af2a7 100644
--- a/drivers/misc/hi6421v600-irq.c
+++ b/drivers/misc/hi6421v600-irq.c
@@ -11,7 +11,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/irqdomain.h>
diff --git a/drivers/misc/hisi_hikey_usb.c b/drivers/misc/hisi_hikey_usb.c
index 2165ec35a3438..fb9be37057a8d 100644
--- a/drivers/misc/hisi_hikey_usb.c
+++ b/drivers/misc/hisi_hikey_usb.c
@@ -14,7 +14,6 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/notifier.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
@@ -239,7 +238,7 @@ static int hisi_hikey_usb_probe(struct platform_device *pdev)
return 0;
}
-static int hisi_hikey_usb_remove(struct platform_device *pdev)
+static void hisi_hikey_usb_remove(struct platform_device *pdev)
{
struct hisi_hikey_usb *hisi_hikey_usb = platform_get_drvdata(pdev);
@@ -251,8 +250,6 @@ static int hisi_hikey_usb_remove(struct platform_device *pdev)
} else {
hub_power_ctrl(hisi_hikey_usb, HUB_VBUS_POWER_OFF);
}
-
- return 0;
}
static const struct of_device_id id_table_hisi_hikey_usb[] = {
@@ -263,7 +260,7 @@ MODULE_DEVICE_TABLE(of, id_table_hisi_hikey_usb);
static struct platform_driver hisi_hikey_usb_driver = {
.probe = hisi_hikey_usb_probe,
- .remove = hisi_hikey_usb_remove,
+ .remove_new = hisi_hikey_usb_remove,
.driver = {
.name = DEVICE_DRIVER_NAME,
.of_match_table = id_table_hisi_hikey_usb,
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index f1b74d3f89586..04bd34c8c5069 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -770,7 +770,7 @@ static void ilo_remove(struct pci_dev *pdev)
static int ilo_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- int devnum, minor, start, error = 0;
+ int devnum, slot, start, error = 0;
struct ilo_hwinfo *ilo_hw;
if (pci_match_id(ilo_blacklist, pdev)) {
@@ -839,11 +839,11 @@ static int ilo_probe(struct pci_dev *pdev,
goto remove_isr;
}
- for (minor = 0 ; minor < max_ccb; minor++) {
+ for (slot = 0; slot < max_ccb; slot++) {
struct device *dev;
dev = device_create(&ilo_class, &pdev->dev,
- MKDEV(ilo_major, minor), NULL,
- "hpilo!d%dccb%d", devnum, minor);
+ MKDEV(ilo_major, start + slot), NULL,
+ "hpilo!d%dccb%d", devnum, slot);
if (IS_ERR(dev))
dev_err(&pdev->dev, "Could not create files\n");
}
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index b92767d6bdd24..5178c02b21eba 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -417,7 +417,7 @@ static void lkdtm_FAM_BOUNDS(void)
pr_err("FAIL: survived access of invalid flexible array member index!\n");
if (!__has_attribute(__counted_by__))
- pr_warn("This is expected since this %s was built a compiler supporting __counted_by\n",
+ pr_warn("This is expected since this %s was built with a compiler that does not support __counted_by\n",
lkdtm_kernel_info);
else if (IS_ENABLED(CONFIG_UBSAN_BOUNDS))
pr_expected_config(CONFIG_UBSAN_TRAP);
diff --git a/drivers/misc/mei/gsc-me.c b/drivers/misc/mei/gsc-me.c
index 6be8f1cc052c1..5a8c26c3df13d 100644
--- a/drivers/misc/mei/gsc-me.c
+++ b/drivers/misc/mei/gsc-me.c
@@ -144,9 +144,6 @@ static void mei_gsc_remove(struct auxiliary_device *aux_dev)
struct mei_me_hw *hw;
dev = dev_get_drvdata(&aux_dev->dev);
- if (!dev)
- return;
-
hw = to_me_hw(dev);
mei_stop(dev);
@@ -168,9 +165,6 @@ static int __maybe_unused mei_gsc_pm_suspend(struct device *device)
{
struct mei_device *dev = dev_get_drvdata(device);
- if (!dev)
- return -ENODEV;
-
mei_stop(dev);
mei_disable_interrupts(dev);
@@ -186,9 +180,6 @@ static int __maybe_unused mei_gsc_pm_resume(struct device *device)
int err;
struct mei_me_hw *hw;
- if (!dev)
- return -ENODEV;
-
hw = to_me_hw(dev);
aux_dev = to_auxiliary_dev(device);
adev = auxiliary_dev_to_mei_aux_dev(aux_dev);
@@ -211,8 +202,6 @@ static int __maybe_unused mei_gsc_pm_runtime_idle(struct device *device)
{
struct mei_device *dev = dev_get_drvdata(device);
- if (!dev)
- return -ENODEV;
if (mei_write_is_idle(dev))
pm_runtime_autosuspend(device);
@@ -225,9 +214,6 @@ static int __maybe_unused mei_gsc_pm_runtime_suspend(struct device *device)
struct mei_me_hw *hw;
int ret;
- if (!dev)
- return -ENODEV;
-
mutex_lock(&dev->device_lock);
if (mei_write_is_idle(dev)) {
@@ -252,9 +238,6 @@ static int __maybe_unused mei_gsc_pm_runtime_resume(struct device *device)
struct mei_me_hw *hw;
irqreturn_t irq_ret;
- if (!dev)
- return -ENODEV;
-
mutex_lock(&dev->device_lock);
hw = to_me_hw(dev);
@@ -293,6 +276,10 @@ static const struct auxiliary_device_id mei_gsc_id_table[] = {
.driver_data = MEI_ME_GSCFI_CFG,
},
{
+ .name = "xe.mei-gscfi",
+ .driver_data = MEI_ME_GSCFI_CFG,
+ },
+ {
/* sentinel */
}
};
@@ -312,5 +299,6 @@ module_auxiliary_driver(mei_gsc_driver);
MODULE_AUTHOR("Intel Corporation");
MODULE_ALIAS("auxiliary:i915.mei-gsc");
MODULE_ALIAS("auxiliary:i915.mei-gscfi");
+MODULE_ALIAS("auxiliary:xe.mei-gscfi");
MODULE_DESCRIPTION("Intel(R) Graphics System Controller");
MODULE_LICENSE("GPL");
diff --git a/drivers/misc/mei/hdcp/Kconfig b/drivers/misc/mei/hdcp/Kconfig
index 9be312ec798de..631dd9651d7c9 100644
--- a/drivers/misc/mei/hdcp/Kconfig
+++ b/drivers/misc/mei/hdcp/Kconfig
@@ -4,7 +4,7 @@
config INTEL_MEI_HDCP
tristate "Intel HDCP2.2 services of ME Interface"
depends on INTEL_MEI_ME
- depends on DRM_I915
+ depends on DRM_I915 || DRM_XE
help
MEI Support for HDCP2.2 Services on Intel platforms.
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c
index 51359cc5ece9a..f8759a6c9ed33 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.c
+++ b/drivers/misc/mei/hdcp/mei_hdcp.c
@@ -17,6 +17,7 @@
*/
#include <linux/module.h>
+#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/mei.h>
#include <linux/mei_cl_bus.h>
@@ -781,9 +782,18 @@ static int mei_hdcp_component_match(struct device *dev, int subcomponent,
void *data)
{
struct device *base = data;
+ struct pci_dev *pdev;
- if (!dev->driver || strcmp(dev->driver->name, "i915") ||
- subcomponent != I915_COMPONENT_HDCP)
+ if (!dev_is_pci(dev))
+ return 0;
+
+ pdev = to_pci_dev(dev);
+
+ if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8) ||
+ pdev->vendor != PCI_VENDOR_ID_INTEL)
+ return 0;
+
+ if (subcomponent != I915_COMPONENT_HDCP)
return 0;
base = base->parent;
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 8cf636c540322..c39718042e2e0 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -116,7 +116,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_SPS_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ARL_S, MEI_ME_PCH15_CFG)},
@@ -297,11 +297,7 @@ end:
*/
static void mei_me_shutdown(struct pci_dev *pdev)
{
- struct mei_device *dev;
-
- dev = pci_get_drvdata(pdev);
- if (!dev)
- return;
+ struct mei_device *dev = pci_get_drvdata(pdev);
dev_dbg(&pdev->dev, "shutdown\n");
mei_stop(dev);
@@ -322,11 +318,7 @@ static void mei_me_shutdown(struct pci_dev *pdev)
*/
static void mei_me_remove(struct pci_dev *pdev)
{
- struct mei_device *dev;
-
- dev = pci_get_drvdata(pdev);
- if (!dev)
- return;
+ struct mei_device *dev = pci_get_drvdata(pdev);
if (mei_pg_is_enabled(dev))
pm_runtime_get_noresume(&pdev->dev);
@@ -355,9 +347,6 @@ static int mei_me_pci_suspend(struct device *device)
struct pci_dev *pdev = to_pci_dev(device);
struct mei_device *dev = pci_get_drvdata(pdev);
- if (!dev)
- return -ENODEV;
-
dev_dbg(&pdev->dev, "suspend\n");
mei_stop(dev);
@@ -373,14 +362,10 @@ static int mei_me_pci_suspend(struct device *device)
static int mei_me_pci_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
- struct mei_device *dev;
+ struct mei_device *dev = pci_get_drvdata(pdev);
unsigned int irqflags;
int err;
- dev = pci_get_drvdata(pdev);
- if (!dev)
- return -ENODEV;
-
pci_enable_msi(pdev);
irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
@@ -421,13 +406,10 @@ static void mei_me_pci_complete(struct device *device)
#ifdef CONFIG_PM
static int mei_me_pm_runtime_idle(struct device *device)
{
- struct mei_device *dev;
+ struct mei_device *dev = dev_get_drvdata(device);
dev_dbg(device, "rpm: me: runtime_idle\n");
- dev = dev_get_drvdata(device);
- if (!dev)
- return -ENODEV;
if (mei_write_is_idle(dev))
pm_runtime_autosuspend(device);
@@ -436,15 +418,11 @@ static int mei_me_pm_runtime_idle(struct device *device)
static int mei_me_pm_runtime_suspend(struct device *device)
{
- struct mei_device *dev;
+ struct mei_device *dev = dev_get_drvdata(device);
int ret;
dev_dbg(device, "rpm: me: runtime suspend\n");
- dev = dev_get_drvdata(device);
- if (!dev)
- return -ENODEV;
-
mutex_lock(&dev->device_lock);
if (mei_write_is_idle(dev))
@@ -464,15 +442,11 @@ static int mei_me_pm_runtime_suspend(struct device *device)
static int mei_me_pm_runtime_resume(struct device *device)
{
- struct mei_device *dev;
+ struct mei_device *dev = dev_get_drvdata(device);
int ret;
dev_dbg(device, "rpm: me: runtime resume\n");
- dev = dev_get_drvdata(device);
- if (!dev)
- return -ENODEV;
-
mutex_lock(&dev->device_lock);
ret = mei_me_pg_exit_sync(dev);
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index fa20d9a27813b..2a584104ba388 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -166,11 +166,7 @@ end:
*/
static void mei_txe_shutdown(struct pci_dev *pdev)
{
- struct mei_device *dev;
-
- dev = pci_get_drvdata(pdev);
- if (!dev)
- return;
+ struct mei_device *dev = pci_get_drvdata(pdev);
dev_dbg(&pdev->dev, "shutdown\n");
mei_stop(dev);
@@ -191,13 +187,7 @@ static void mei_txe_shutdown(struct pci_dev *pdev)
*/
static void mei_txe_remove(struct pci_dev *pdev)
{
- struct mei_device *dev;
-
- dev = pci_get_drvdata(pdev);
- if (!dev) {
- dev_err(&pdev->dev, "mei: dev == NULL\n");
- return;
- }
+ struct mei_device *dev = pci_get_drvdata(pdev);
pm_runtime_get_noresume(&pdev->dev);
@@ -218,9 +208,6 @@ static int mei_txe_pci_suspend(struct device *device)
struct pci_dev *pdev = to_pci_dev(device);
struct mei_device *dev = pci_get_drvdata(pdev);
- if (!dev)
- return -ENODEV;
-
dev_dbg(&pdev->dev, "suspend\n");
mei_stop(dev);
@@ -236,13 +223,9 @@ static int mei_txe_pci_suspend(struct device *device)
static int mei_txe_pci_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
- struct mei_device *dev;
+ struct mei_device *dev = pci_get_drvdata(pdev);
int err;
- dev = pci_get_drvdata(pdev);
- if (!dev)
- return -ENODEV;
-
pci_enable_msi(pdev);
mei_clear_interrupts(dev);
@@ -273,13 +256,10 @@ static int mei_txe_pci_resume(struct device *device)
#ifdef CONFIG_PM
static int mei_txe_pm_runtime_idle(struct device *device)
{
- struct mei_device *dev;
+ struct mei_device *dev = dev_get_drvdata(device);
dev_dbg(device, "rpm: txe: runtime_idle\n");
- dev = dev_get_drvdata(device);
- if (!dev)
- return -ENODEV;
if (mei_write_is_idle(dev))
pm_runtime_autosuspend(device);
@@ -287,15 +267,11 @@ static int mei_txe_pm_runtime_idle(struct device *device)
}
static int mei_txe_pm_runtime_suspend(struct device *device)
{
- struct mei_device *dev;
+ struct mei_device *dev = dev_get_drvdata(device);
int ret;
dev_dbg(device, "rpm: txe: runtime suspend\n");
- dev = dev_get_drvdata(device);
- if (!dev)
- return -ENODEV;
-
mutex_lock(&dev->device_lock);
if (mei_write_is_idle(dev))
@@ -317,15 +293,11 @@ static int mei_txe_pm_runtime_suspend(struct device *device)
static int mei_txe_pm_runtime_resume(struct device *device)
{
- struct mei_device *dev;
+ struct mei_device *dev = dev_get_drvdata(device);
int ret;
dev_dbg(device, "rpm: txe: runtime resume\n");
- dev = dev_get_drvdata(device);
- if (!dev)
- return -ENODEV;
-
mutex_lock(&dev->device_lock);
mei_enable_interrupts(dev);
diff --git a/drivers/misc/mei/platform-vsc.c b/drivers/misc/mei/platform-vsc.c
index 8d303c6c00006..b543e6b9f3cfd 100644
--- a/drivers/misc/mei/platform-vsc.c
+++ b/drivers/misc/mei/platform-vsc.c
@@ -384,7 +384,7 @@ err_cancel:
return ret;
}
-static int mei_vsc_remove(struct platform_device *pdev)
+static void mei_vsc_remove(struct platform_device *pdev)
{
struct mei_device *mei_dev = platform_get_drvdata(pdev);
@@ -395,32 +395,45 @@ static int mei_vsc_remove(struct platform_device *pdev)
mei_disable_interrupts(mei_dev);
mei_deregister(mei_dev);
-
- return 0;
}
static int mei_vsc_suspend(struct device *dev)
{
struct mei_device *mei_dev = dev_get_drvdata(dev);
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
mei_stop(mei_dev);
+ mei_disable_interrupts(mei_dev);
+
+ vsc_tp_free_irq(hw->tp);
+
return 0;
}
static int mei_vsc_resume(struct device *dev)
{
struct mei_device *mei_dev = dev_get_drvdata(dev);
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
int ret;
- ret = mei_restart(mei_dev);
+ ret = vsc_tp_request_irq(hw->tp);
if (ret)
return ret;
+ ret = mei_restart(mei_dev);
+ if (ret)
+ goto err_free;
+
/* start timer if stopped in suspend */
schedule_delayed_work(&mei_dev->timer_work, HZ);
return 0;
+
+err_free:
+ vsc_tp_free_irq(hw->tp);
+
+ return ret;
}
static DEFINE_SIMPLE_DEV_PM_OPS(mei_vsc_pm_ops, mei_vsc_suspend, mei_vsc_resume);
@@ -433,7 +446,7 @@ MODULE_DEVICE_TABLE(platform, mei_vsc_id_table);
static struct platform_driver mei_vsc_drv = {
.probe = mei_vsc_probe,
- .remove = mei_vsc_remove,
+ .remove_new = mei_vsc_remove,
.id_table = mei_vsc_id_table,
.driver = {
.name = MEI_VSC_DRV_NAME,
diff --git a/drivers/misc/mei/pxp/Kconfig b/drivers/misc/mei/pxp/Kconfig
index e9219b61cd92f..aa2dece4a927a 100644
--- a/drivers/misc/mei/pxp/Kconfig
+++ b/drivers/misc/mei/pxp/Kconfig
@@ -4,7 +4,7 @@
config INTEL_MEI_PXP
tristate "Intel PXP services of ME Interface"
depends on INTEL_MEI_ME
- depends on DRM_I915
+ depends on DRM_I915 || DRM_XE
help
MEI Support for PXP Services on Intel platforms.
diff --git a/drivers/misc/mei/pxp/mei_pxp.c b/drivers/misc/mei/pxp/mei_pxp.c
index 787c6a27a4be6..b1e4c23b31a32 100644
--- a/drivers/misc/mei/pxp/mei_pxp.c
+++ b/drivers/misc/mei/pxp/mei_pxp.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/mei.h>
#include <linux/mei_cl_bus.h>
@@ -225,12 +226,21 @@ static int mei_pxp_component_match(struct device *dev, int subcomponent,
void *data)
{
struct device *base = data;
+ struct pci_dev *pdev;
if (!dev)
return 0;
- if (!dev->driver || strcmp(dev->driver->name, "i915") ||
- subcomponent != I915_COMPONENT_PXP)
+ if (!dev_is_pci(dev))
+ return 0;
+
+ pdev = to_pci_dev(dev);
+
+ if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8) ||
+ pdev->vendor != PCI_VENDOR_ID_INTEL)
+ return 0;
+
+ if (subcomponent != I915_COMPONENT_PXP)
return 0;
base = base->parent;
diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c
index 55f7db490d3bb..e6a98dba8a735 100644
--- a/drivers/misc/mei/vsc-tp.c
+++ b/drivers/misc/mei/vsc-tp.c
@@ -25,7 +25,8 @@
#define VSC_TP_ROM_BOOTUP_DELAY_MS 10
#define VSC_TP_ROM_XFER_POLL_TIMEOUT_US (500 * USEC_PER_MSEC)
#define VSC_TP_ROM_XFER_POLL_DELAY_US (20 * USEC_PER_MSEC)
-#define VSC_TP_WAIT_FW_ASSERTED_TIMEOUT (2 * HZ)
+#define VSC_TP_WAIT_FW_POLL_TIMEOUT (2 * HZ)
+#define VSC_TP_WAIT_FW_POLL_DELAY_US (20 * USEC_PER_MSEC)
#define VSC_TP_MAX_XFER_COUNT 5
#define VSC_TP_PACKET_SYNC 0x31
@@ -93,6 +94,27 @@ static const struct acpi_gpio_mapping vsc_tp_acpi_gpios[] = {
{}
};
+static irqreturn_t vsc_tp_isr(int irq, void *data)
+{
+ struct vsc_tp *tp = data;
+
+ atomic_inc(&tp->assert_cnt);
+
+ wake_up(&tp->xfer_wait);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
+{
+ struct vsc_tp *tp = data;
+
+ if (tp->event_notify)
+ tp->event_notify(tp->event_notify_context);
+
+ return IRQ_HANDLED;
+}
+
/* wakeup firmware and wait for response */
static int vsc_tp_wakeup_request(struct vsc_tp *tp)
{
@@ -101,13 +123,15 @@ static int vsc_tp_wakeup_request(struct vsc_tp *tp)
gpiod_set_value_cansleep(tp->wakeupfw, 0);
ret = wait_event_timeout(tp->xfer_wait,
- atomic_read(&tp->assert_cnt) &&
- gpiod_get_value_cansleep(tp->wakeuphost),
- VSC_TP_WAIT_FW_ASSERTED_TIMEOUT);
+ atomic_read(&tp->assert_cnt),
+ VSC_TP_WAIT_FW_POLL_TIMEOUT);
if (!ret)
return -ETIMEDOUT;
- return 0;
+ return read_poll_timeout(gpiod_get_value_cansleep, ret, ret,
+ VSC_TP_WAIT_FW_POLL_DELAY_US,
+ VSC_TP_WAIT_FW_POLL_TIMEOUT, false,
+ tp->wakeuphost);
}
static void vsc_tp_wakeup_release(struct vsc_tp *tp)
@@ -381,6 +405,37 @@ int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
EXPORT_SYMBOL_NS_GPL(vsc_tp_register_event_cb, VSC_TP);
/**
+ * vsc_tp_request_irq - request irq for vsc_tp device
+ * @tp: vsc_tp device handle
+ */
+int vsc_tp_request_irq(struct vsc_tp *tp)
+{
+ struct spi_device *spi = tp->spi;
+ struct device *dev = &spi->dev;
+ int ret;
+
+ irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
+ ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(dev), tp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_request_irq, VSC_TP);
+
+/**
+ * vsc_tp_free_irq - free irq for vsc_tp device
+ * @tp: vsc_tp device handle
+ */
+void vsc_tp_free_irq(struct vsc_tp *tp)
+{
+ free_irq(tp->spi->irq, tp);
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_free_irq, VSC_TP);
+
+/**
* vsc_tp_intr_synchronize - synchronize vsc_tp interrupt
* @tp: vsc_tp device handle
*/
@@ -410,27 +465,6 @@ void vsc_tp_intr_disable(struct vsc_tp *tp)
}
EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_disable, VSC_TP);
-static irqreturn_t vsc_tp_isr(int irq, void *data)
-{
- struct vsc_tp *tp = data;
-
- atomic_inc(&tp->assert_cnt);
-
- wake_up(&tp->xfer_wait);
-
- return IRQ_WAKE_THREAD;
-}
-
-static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
-{
- struct vsc_tp *tp = data;
-
- if (tp->event_notify)
- tp->event_notify(tp->event_notify_context);
-
- return IRQ_HANDLED;
-}
-
static int vsc_tp_match_any(struct acpi_device *adev, void *data)
{
struct acpi_device **__adev = data;
@@ -442,11 +476,16 @@ static int vsc_tp_match_any(struct acpi_device *adev, void *data)
static int vsc_tp_probe(struct spi_device *spi)
{
- struct platform_device_info pinfo = { 0 };
+ struct vsc_tp *tp;
+ struct platform_device_info pinfo = {
+ .name = "intel_vsc",
+ .data = &tp,
+ .size_data = sizeof(tp),
+ .id = PLATFORM_DEVID_NONE,
+ };
struct device *dev = &spi->dev;
struct platform_device *pdev;
struct acpi_device *adev;
- struct vsc_tp *tp;
int ret;
tp = devm_kzalloc(dev, sizeof(*tp), GFP_KERNEL);
@@ -482,10 +521,9 @@ static int vsc_tp_probe(struct spi_device *spi)
tp->spi = spi;
irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
- ret = devm_request_threaded_irq(dev, spi->irq, vsc_tp_isr,
- vsc_tp_thread_isr,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- dev_name(dev), tp);
+ ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(dev), tp);
if (ret)
return ret;
@@ -498,13 +536,8 @@ static int vsc_tp_probe(struct spi_device *spi)
ret = -ENODEV;
goto err_destroy_lock;
}
- pinfo.fwnode = acpi_fwnode_handle(adev);
-
- pinfo.name = "intel_vsc";
- pinfo.data = &tp;
- pinfo.size_data = sizeof(tp);
- pinfo.id = PLATFORM_DEVID_NONE;
+ pinfo.fwnode = acpi_fwnode_handle(adev);
pdev = platform_device_register_full(&pinfo);
if (IS_ERR(pdev)) {
ret = PTR_ERR(pdev);
@@ -519,6 +552,8 @@ static int vsc_tp_probe(struct spi_device *spi)
err_destroy_lock:
mutex_destroy(&tp->mutex);
+ free_irq(spi->irq, tp);
+
return ret;
}
@@ -529,6 +564,8 @@ static void vsc_tp_remove(struct spi_device *spi)
platform_device_unregister(tp->pdev);
mutex_destroy(&tp->mutex);
+
+ free_irq(spi->irq, tp);
}
static const struct acpi_device_id vsc_tp_acpi_ids[] = {
diff --git a/drivers/misc/mei/vsc-tp.h b/drivers/misc/mei/vsc-tp.h
index f9513ddc3e409..14ca195cbddcc 100644
--- a/drivers/misc/mei/vsc-tp.h
+++ b/drivers/misc/mei/vsc-tp.h
@@ -37,6 +37,9 @@ int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen,
int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
void *context);
+int vsc_tp_request_irq(struct vsc_tp *tp);
+void vsc_tp_free_irq(struct vsc_tp *tp);
+
void vsc_tp_intr_enable(struct vsc_tp *tp);
void vsc_tp_intr_disable(struct vsc_tp *tp);
void vsc_tp_intr_synchronize(struct vsc_tp *tp);
diff --git a/drivers/misc/open-dice.c b/drivers/misc/open-dice.c
index d279a4f195e2a..1e3eb2aa44d9d 100644
--- a/drivers/misc/open-dice.c
+++ b/drivers/misc/open-dice.c
@@ -165,12 +165,11 @@ static int __init open_dice_probe(struct platform_device *pdev)
return 0;
}
-static int open_dice_remove(struct platform_device *pdev)
+static void open_dice_remove(struct platform_device *pdev)
{
struct open_dice_drvdata *drvdata = platform_get_drvdata(pdev);
misc_deregister(&drvdata->misc);
- return 0;
}
static const struct of_device_id open_dice_of_match[] = {
@@ -179,7 +178,7 @@ static const struct of_device_id open_dice_of_match[] = {
};
static struct platform_driver open_dice_driver = {
- .remove = open_dice_remove,
+ .remove_new = open_dice_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = open_dice_of_match,
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index 629edb6486dea..3557d78ee47a2 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -227,7 +227,7 @@ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
if (unlikely(pmd_none(*pmdp)))
goto err;
#ifdef CONFIG_X86_64
- if (unlikely(pmd_large(*pmdp)))
+ if (unlikely(pmd_leaf(*pmdp)))
pte = ptep_get((pte_t *)pmdp);
else
#endif
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index e248c0a8882f2..546eb06a40d04 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -435,7 +435,7 @@ err_free_partitions:
return ret;
}
-static int sram_remove(struct platform_device *pdev)
+static void sram_remove(struct platform_device *pdev)
{
struct sram_dev *sram = platform_get_drvdata(pdev);
@@ -443,8 +443,6 @@ static int sram_remove(struct platform_device *pdev)
if (sram->pool && gen_pool_avail(sram->pool) < gen_pool_size(sram->pool))
dev_err(sram->dev, "removed while SRAM allocated\n");
-
- return 0;
}
static struct platform_driver sram_driver = {
@@ -453,7 +451,7 @@ static struct platform_driver sram_driver = {
.of_match_table = sram_dt_ids,
},
.probe = sram_probe,
- .remove = sram_remove,
+ .remove_new = sram_remove,
};
static int __init sram_init(void)
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 4b1be0bb6ac09..47ebe80bf8499 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -774,7 +774,7 @@ err_core_init:
return err;
}
-static int kim_remove(struct platform_device *pdev)
+static void kim_remove(struct platform_device *pdev)
{
/* free the GPIOs requested */
struct ti_st_plat_data *pdata = pdev->dev.platform_data;
@@ -798,7 +798,6 @@ static int kim_remove(struct platform_device *pdev)
kfree(kim_gdata);
kim_gdata = NULL;
- return 0;
}
static int kim_suspend(struct platform_device *pdev, pm_message_t state)
@@ -825,7 +824,7 @@ static int kim_resume(struct platform_device *pdev)
/* entry point for ST KIM module, called in from ST Core */
static struct platform_driver kim_platform_driver = {
.probe = kim_probe,
- .remove = kim_remove,
+ .remove_new = kim_remove,
.suspend = kim_suspend,
.resume = kim_resume,
.driver = {
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
index eee9b6581604e..d2eb31f39aa7d 100644
--- a/drivers/misc/tifm_core.c
+++ b/drivers/misc/tifm_core.c
@@ -166,7 +166,7 @@ static void tifm_free(struct device *dev)
kfree(fm);
}
-static struct class tifm_adapter_class = {
+static const struct class tifm_adapter_class = {
.name = "tifm_adapter",
.dev_release = tifm_free
};
diff --git a/drivers/misc/vcpu_stall_detector.c b/drivers/misc/vcpu_stall_detector.c
index 6479c962da1ac..e2015c87f03fc 100644
--- a/drivers/misc/vcpu_stall_detector.c
+++ b/drivers/misc/vcpu_stall_detector.c
@@ -187,7 +187,7 @@ err:
return ret;
}
-static int vcpu_stall_detect_remove(struct platform_device *pdev)
+static void vcpu_stall_detect_remove(struct platform_device *pdev)
{
int cpu;
@@ -195,8 +195,6 @@ static int vcpu_stall_detect_remove(struct platform_device *pdev)
for_each_possible_cpu(cpu)
stop_stall_detector_cpu(cpu);
-
- return 0;
}
static const struct of_device_id vcpu_stall_detect_of_match[] = {
@@ -208,7 +206,7 @@ MODULE_DEVICE_TABLE(of, vcpu_stall_detect_of_match);
static struct platform_driver vcpu_stall_detect_driver = {
.probe = vcpu_stall_detect_probe,
- .remove = vcpu_stall_detect_remove,
+ .remove_new = vcpu_stall_detect_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = vcpu_stall_detect_of_match,
diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c
index 94a0ee19bf20a..ea433695f4c47 100644
--- a/drivers/misc/xilinx_sdfec.c
+++ b/drivers/misc/xilinx_sdfec.c
@@ -1420,7 +1420,7 @@ err_xsdfec_dev:
return err;
}
-static int xsdfec_remove(struct platform_device *pdev)
+static void xsdfec_remove(struct platform_device *pdev)
{
struct xsdfec_dev *xsdfec;
@@ -1428,7 +1428,6 @@ static int xsdfec_remove(struct platform_device *pdev)
misc_deregister(&xsdfec->miscdev);
ida_free(&dev_nrs, xsdfec->dev_id);
xsdfec_disable_all_clks(&xsdfec->clks);
- return 0;
}
static const struct of_device_id xsdfec_of_match[] = {
@@ -1445,7 +1444,7 @@ static struct platform_driver xsdfec_driver = {
.of_match_table = xsdfec_of_match,
},
.probe = xsdfec_probe,
- .remove = xsdfec_remove,
+ .remove_new = xsdfec_remove,
};
module_platform_driver(xsdfec_driver);
diff --git a/drivers/misc/xilinx_tmr_inject.c b/drivers/misc/xilinx_tmr_inject.c
index 9fc5835bfebc2..73c6da7d09631 100644
--- a/drivers/misc/xilinx_tmr_inject.c
+++ b/drivers/misc/xilinx_tmr_inject.c
@@ -143,11 +143,10 @@ static int xtmr_inject_probe(struct platform_device *pdev)
return 0;
}
-static int xtmr_inject_remove(struct platform_device *pdev)
+static void xtmr_inject_remove(struct platform_device *pdev)
{
debugfs_remove_recursive(dbgfs_root);
dbgfs_root = NULL;
- return 0;
}
static const struct of_device_id xtmr_inject_of_match[] = {
@@ -164,7 +163,7 @@ static struct platform_driver xtmr_inject_driver = {
.of_match_table = xtmr_inject_of_match,
},
.probe = xtmr_inject_probe,
- .remove = xtmr_inject_remove,
+ .remove_new = xtmr_inject_remove,
};
module_platform_driver(xtmr_inject_driver);
MODULE_AUTHOR("Advanced Micro Devices, Inc");
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 64a3492e8002f..90c51b12148e8 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -413,7 +413,7 @@ static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
struct mmc_blk_ioc_data *idata;
int err;
- idata = kmalloc(sizeof(*idata), GFP_KERNEL);
+ idata = kzalloc(sizeof(*idata), GFP_KERNEL);
if (!idata) {
err = -ENOMEM;
goto out;
@@ -488,7 +488,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
if (idata->flags & MMC_BLK_IOC_DROP)
return 0;
- if (idata->flags & MMC_BLK_IOC_SBC)
+ if (idata->flags & MMC_BLK_IOC_SBC && i > 0)
prev_idata = idatas[i - 1];
/*
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 088f8ed4fdc46..a8ee0df471482 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1114,10 +1114,25 @@ static void mmc_omap_set_power(struct mmc_omap_slot *slot, int power_on,
host = slot->host;
- if (slot->vsd)
- gpiod_set_value(slot->vsd, power_on);
- if (slot->vio)
- gpiod_set_value(slot->vio, power_on);
+ if (power_on) {
+ if (slot->vsd) {
+ gpiod_set_value(slot->vsd, power_on);
+ msleep(1);
+ }
+ if (slot->vio) {
+ gpiod_set_value(slot->vio, power_on);
+ msleep(1);
+ }
+ } else {
+ if (slot->vio) {
+ gpiod_set_value(slot->vio, power_on);
+ msleep(50);
+ }
+ if (slot->vsd) {
+ gpiod_set_value(slot->vsd, power_on);
+ msleep(50);
+ }
+ }
if (slot->pdata->set_power != NULL)
slot->pdata->set_power(mmc_dev(slot->mmc), slot->id, power_on,
@@ -1254,18 +1269,18 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
slot->pdata = &host->pdata->slots[id];
/* Check for some optional GPIO controls */
- slot->vsd = gpiod_get_index_optional(host->dev, "vsd",
- id, GPIOD_OUT_LOW);
+ slot->vsd = devm_gpiod_get_index_optional(host->dev, "vsd",
+ id, GPIOD_OUT_LOW);
if (IS_ERR(slot->vsd))
return dev_err_probe(host->dev, PTR_ERR(slot->vsd),
"error looking up VSD GPIO\n");
- slot->vio = gpiod_get_index_optional(host->dev, "vio",
- id, GPIOD_OUT_LOW);
+ slot->vio = devm_gpiod_get_index_optional(host->dev, "vio",
+ id, GPIOD_OUT_LOW);
if (IS_ERR(slot->vio))
return dev_err_probe(host->dev, PTR_ERR(slot->vio),
"error looking up VIO GPIO\n");
- slot->cover = gpiod_get_index_optional(host->dev, "cover",
- id, GPIOD_IN);
+ slot->cover = devm_gpiod_get_index_optional(host->dev, "cover",
+ id, GPIOD_IN);
if (IS_ERR(slot->cover))
return dev_err_probe(host->dev, PTR_ERR(slot->cover),
"error looking up cover switch GPIO\n");
@@ -1379,13 +1394,6 @@ static int mmc_omap_probe(struct platform_device *pdev)
if (IS_ERR(host->virt_base))
return PTR_ERR(host->virt_base);
- host->slot_switch = gpiod_get_optional(host->dev, "switch",
- GPIOD_OUT_LOW);
- if (IS_ERR(host->slot_switch))
- return dev_err_probe(host->dev, PTR_ERR(host->slot_switch),
- "error looking up slot switch GPIO\n");
-
-
INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work);
INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work);
@@ -1404,6 +1412,12 @@ static int mmc_omap_probe(struct platform_device *pdev)
host->dev = &pdev->dev;
platform_set_drvdata(pdev, host);
+ host->slot_switch = devm_gpiod_get_optional(host->dev, "switch",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(host->slot_switch))
+ return dev_err_probe(host->dev, PTR_ERR(host->slot_switch),
+ "error looking up slot switch GPIO\n");
+
host->id = pdev->id;
host->irq = irq;
host->phys_base = res->start;
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index ab4b964d40584..1d8f5a76096ae 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -999,6 +999,17 @@ free_pltfm:
return err;
}
+static void dwcmshc_disable_card_clk(struct sdhci_host *host)
+{
+ u16 ctrl;
+
+ ctrl = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ if (ctrl & SDHCI_CLOCK_CARD_EN) {
+ ctrl &= ~SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, ctrl, SDHCI_CLOCK_CONTROL);
+ }
+}
+
static void dwcmshc_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
@@ -1006,8 +1017,14 @@ static void dwcmshc_remove(struct platform_device *pdev)
struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
struct rk35xx_priv *rk_priv = priv->priv;
+ pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
sdhci_remove_host(host, 0);
+ dwcmshc_disable_card_clk(host);
+
clk_disable_unprepare(pltfm_host->clk);
clk_disable_unprepare(priv->bus_clk);
if (rk_priv)
@@ -1099,17 +1116,6 @@ static void dwcmshc_enable_card_clk(struct sdhci_host *host)
}
}
-static void dwcmshc_disable_card_clk(struct sdhci_host *host)
-{
- u16 ctrl;
-
- ctrl = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
- if (ctrl & SDHCI_CLOCK_CARD_EN) {
- ctrl &= ~SDHCI_CLOCK_CARD_EN;
- sdhci_writew(host, ctrl, SDHCI_CLOCK_CONTROL);
- }
-}
-
static int dwcmshc_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index e78faef67d7ab..94076b0955719 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -1439,6 +1439,9 @@ static int __maybe_unused sdhci_omap_runtime_suspend(struct device *dev)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+ if (host->tuning_mode != SDHCI_TUNING_MODE_3)
+ mmc_retune_needed(host->mmc);
+
if (omap_host->con != -EINVAL)
sdhci_runtime_suspend_host(host);
diff --git a/drivers/most/core.c b/drivers/most/core.c
index e4412c7d25b0e..f13d0e14a48b6 100644
--- a/drivers/most/core.c
+++ b/drivers/most/core.c
@@ -499,7 +499,7 @@ static int most_match(struct device *dev, struct device_driver *drv)
return 1;
}
-static struct bus_type mostbus = {
+static const struct bus_type mostbus = {
.name = "most",
.match = most_match,
};
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index df589d9b4d705..9f2223d3e8e11 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -2411,7 +2411,7 @@ static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
{
struct cfi_private *cfi = map->fldrv_priv;
- unsigned long timeo = jiffies + HZ;
+ unsigned long timeo;
unsigned long int adr;
DECLARE_WAITQUEUE(wait, current);
int ret;
@@ -2512,7 +2512,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
{
struct cfi_private *cfi = map->fldrv_priv;
- unsigned long timeo = jiffies + HZ;
+ unsigned long timeo;
DECLARE_WAITQUEUE(wait, current);
int ret;
int retry_cnt = 0;
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 97a00ec9a4d48..caacdc0a38194 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -209,7 +209,7 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
if (dev->bdev_file) {
invalidate_mapping_pages(dev->bdev_file->f_mapping, 0, -1);
- fput(dev->bdev_file);
+ bdev_fput(dev->bdev_file);
}
kfree(dev);
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index e098ae937ce88..8a8b19874e239 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -341,13 +341,6 @@ config MTD_UCLINUX
help
Map driver to support image based filesystems for uClinux.
-config MTD_INTEL_VR_NOR
- tristate "NOR flash on Intel Vermilion Range Expansion Bus CS0"
- depends on PCI
- help
- Map driver for a NOR flash bank located on the Expansion Bus of the
- Intel Vermilion Range chipset.
-
config MTD_PLATRAM
tristate "Map driver for platform device RAM (mtd-ram)"
select MTD_RAM
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 094cfb2440865..a9083c888e3b8 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -40,6 +40,5 @@ obj-$(CONFIG_MTD_UCLINUX) += uclinux.o
obj-$(CONFIG_MTD_NETtel) += nettel.o
obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o
obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o
-obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o
obj-$(CONFIG_MTD_VMU) += vmu-flash.o
obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
deleted file mode 100644
index d67b845b0e896..0000000000000
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ /dev/null
@@ -1,265 +0,0 @@
-/*
- * drivers/mtd/maps/intel_vr_nor.c
- *
- * An MTD map driver for a NOR flash bank on the Expansion Bus of the Intel
- * Vermilion Range chipset.
- *
- * The Vermilion Range Expansion Bus supports four chip selects, each of which
- * has 64MiB of address space. The 2nd BAR of the Expansion Bus PCI Device
- * is a 256MiB memory region containing the address spaces for all four of the
- * chip selects, with start addresses hardcoded on 64MiB boundaries.
- *
- * This map driver only supports NOR flash on chip select 0. The buswidth
- * (either 8 bits or 16 bits) is determined by reading the Expansion Bus Timing
- * and Control Register for Chip Select 0 (EXP_TIMING_CS0). This driver does
- * not modify the value in the EXP_TIMING_CS0 register except to enable writing
- * and disable boot acceleration. The timing parameters in the register are
- * assumed to have been properly initialized by the BIOS. The reset default
- * timing parameters are maximally conservative (slow), so access to the flash
- * will be slower than it should be if the BIOS has not initialized the timing
- * parameters.
- *
- * Author: Andy Lowe <alowe@mvista.com>
- *
- * 2006 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/cfi.h>
-#include <linux/mtd/flashchip.h>
-
-#define DRV_NAME "vr_nor"
-
-struct vr_nor_mtd {
- void __iomem *csr_base;
- struct map_info map;
- struct mtd_info *info;
- struct pci_dev *dev;
-};
-
-/* Expansion Bus Configuration and Status Registers are in BAR 0 */
-#define EXP_CSR_MBAR 0
-/* Expansion Bus Memory Window is BAR 1 */
-#define EXP_WIN_MBAR 1
-/* Maximum address space for Chip Select 0 is 64MiB */
-#define CS0_SIZE 0x04000000
-/* Chip Select 0 is at offset 0 in the Memory Window */
-#define CS0_START 0x0
-/* Chip Select 0 Timing Register is at offset 0 in CSR */
-#define EXP_TIMING_CS0 0x00
-#define TIMING_CS_EN (1 << 31) /* Chip Select Enable */
-#define TIMING_BOOT_ACCEL_DIS (1 << 8) /* Boot Acceleration Disable */
-#define TIMING_WR_EN (1 << 1) /* Write Enable */
-#define TIMING_BYTE_EN (1 << 0) /* 8-bit vs 16-bit bus */
-#define TIMING_MASK 0x3FFF0000
-
-static void vr_nor_destroy_partitions(struct vr_nor_mtd *p)
-{
- mtd_device_unregister(p->info);
-}
-
-static int vr_nor_init_partitions(struct vr_nor_mtd *p)
-{
- /* register the flash bank */
- /* partition the flash bank */
- return mtd_device_register(p->info, NULL, 0);
-}
-
-static void vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
-{
- map_destroy(p->info);
-}
-
-static int vr_nor_mtd_setup(struct vr_nor_mtd *p)
-{
- static const char * const probe_types[] =
- { "cfi_probe", "jedec_probe", NULL };
- const char * const *type;
-
- for (type = probe_types; !p->info && *type; type++)
- p->info = do_map_probe(*type, &p->map);
- if (!p->info)
- return -ENODEV;
-
- p->info->dev.parent = &p->dev->dev;
-
- return 0;
-}
-
-static void vr_nor_destroy_maps(struct vr_nor_mtd *p)
-{
- unsigned int exp_timing_cs0;
-
- /* write-protect the flash bank */
- exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
- exp_timing_cs0 &= ~TIMING_WR_EN;
- writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
-
- /* unmap the flash window */
- iounmap(p->map.virt);
-
- /* unmap the csr window */
- iounmap(p->csr_base);
-}
-
-/*
- * Initialize the map_info structure and map the flash.
- * Returns 0 on success, nonzero otherwise.
- */
-static int vr_nor_init_maps(struct vr_nor_mtd *p)
-{
- unsigned long csr_phys, csr_len;
- unsigned long win_phys, win_len;
- unsigned int exp_timing_cs0;
- int err;
-
- csr_phys = pci_resource_start(p->dev, EXP_CSR_MBAR);
- csr_len = pci_resource_len(p->dev, EXP_CSR_MBAR);
- win_phys = pci_resource_start(p->dev, EXP_WIN_MBAR);
- win_len = pci_resource_len(p->dev, EXP_WIN_MBAR);
-
- if (!csr_phys || !csr_len || !win_phys || !win_len)
- return -ENODEV;
-
- if (win_len < (CS0_START + CS0_SIZE))
- return -ENXIO;
-
- p->csr_base = ioremap(csr_phys, csr_len);
- if (!p->csr_base)
- return -ENOMEM;
-
- exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
- if (!(exp_timing_cs0 & TIMING_CS_EN)) {
- dev_warn(&p->dev->dev, "Expansion Bus Chip Select 0 "
- "is disabled.\n");
- err = -ENODEV;
- goto release;
- }
- if ((exp_timing_cs0 & TIMING_MASK) == TIMING_MASK) {
- dev_warn(&p->dev->dev, "Expansion Bus Chip Select 0 "
- "is configured for maximally slow access times.\n");
- }
- p->map.name = DRV_NAME;
- p->map.bankwidth = (exp_timing_cs0 & TIMING_BYTE_EN) ? 1 : 2;
- p->map.phys = win_phys + CS0_START;
- p->map.size = CS0_SIZE;
- p->map.virt = ioremap(p->map.phys, p->map.size);
- if (!p->map.virt) {
- err = -ENOMEM;
- goto release;
- }
- simple_map_init(&p->map);
-
- /* Enable writes to flash bank */
- exp_timing_cs0 |= TIMING_BOOT_ACCEL_DIS | TIMING_WR_EN;
- writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
-
- return 0;
-
- release:
- iounmap(p->csr_base);
- return err;
-}
-
-static const struct pci_device_id vr_nor_pci_ids[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x500D)},
- {0,}
-};
-
-static void vr_nor_pci_remove(struct pci_dev *dev)
-{
- struct vr_nor_mtd *p = pci_get_drvdata(dev);
-
- vr_nor_destroy_partitions(p);
- vr_nor_destroy_mtd_setup(p);
- vr_nor_destroy_maps(p);
- kfree(p);
- pci_release_regions(dev);
- pci_disable_device(dev);
-}
-
-static int vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct vr_nor_mtd *p = NULL;
- unsigned int exp_timing_cs0;
- int err;
-
- err = pci_enable_device(dev);
- if (err)
- goto out;
-
- err = pci_request_regions(dev, DRV_NAME);
- if (err)
- goto disable_dev;
-
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- err = -ENOMEM;
- if (!p)
- goto release;
-
- p->dev = dev;
-
- err = vr_nor_init_maps(p);
- if (err)
- goto release;
-
- err = vr_nor_mtd_setup(p);
- if (err)
- goto destroy_maps;
-
- err = vr_nor_init_partitions(p);
- if (err)
- goto destroy_mtd_setup;
-
- pci_set_drvdata(dev, p);
-
- return 0;
-
- destroy_mtd_setup:
- map_destroy(p->info);
-
- destroy_maps:
- /* write-protect the flash bank */
- exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
- exp_timing_cs0 &= ~TIMING_WR_EN;
- writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
-
- /* unmap the flash window */
- iounmap(p->map.virt);
-
- /* unmap the csr window */
- iounmap(p->csr_base);
-
- release:
- kfree(p);
- pci_release_regions(dev);
-
- disable_dev:
- pci_disable_device(dev);
-
- out:
- return err;
-}
-
-static struct pci_driver vr_nor_pci_driver = {
- .name = DRV_NAME,
- .probe = vr_nor_pci_probe,
- .remove = vr_nor_pci_remove,
- .id_table = vr_nor_pci_ids,
-};
-
-module_pci_driver(vr_nor_pci_driver);
-
-MODULE_AUTHOR("Andy Lowe");
-MODULE_DESCRIPTION("MTD map driver for NOR flash on Intel Vermilion Range");
-MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(pci, vr_nor_pci_ids);
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
index 746a27d15d440..96eb2e782c382 100644
--- a/drivers/mtd/maps/physmap-core.c
+++ b/drivers/mtd/maps/physmap-core.c
@@ -518,7 +518,7 @@ static int physmap_flash_probe(struct platform_device *dev)
if (!info->maps[i].phys)
info->maps[i].phys = res->start;
- info->win_order = get_bitmask_order(resource_size(res)) - 1;
+ info->win_order = fls64(resource_size(res)) - 1;
info->maps[i].size = BIT(info->win_order +
(info->gpios ?
info->gpios->ndescs : 0));
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index f58cfb15d6e85..b69dade3f7ad0 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -47,7 +47,7 @@ struct map_info uflash_map_templ = {
.bankwidth = UFLASH_BUSWIDTH,
};
-int uflash_devinit(struct platform_device *op, struct device_node *dp)
+static int uflash_devinit(struct platform_device *op, struct device_node *dp)
{
struct uflash_dev *up;
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index 4cb478bbee4a4..dc75d50d52e84 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -1378,7 +1378,7 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
return ret;
/*
- * The write cycle timing is directly matching tWC, but is also
+ * The read cycle timing is directly matching tRC, but is also
* dependent on the setup and hold timings we calculated earlier,
* which gives:
*
diff --git a/drivers/mtd/nand/raw/brcmnand/Makefile b/drivers/mtd/nand/raw/brcmnand/Makefile
index 9907e3ec4bb2d..0536568c64672 100644
--- a/drivers/mtd/nand/raw/brcmnand/Makefile
+++ b/drivers/mtd/nand/raw/brcmnand/Makefile
@@ -2,7 +2,7 @@
# link order matters; don't link the more generic brcmstb_nand.o before the
# more specific iproc_nand.o, for instance
obj-$(CONFIG_MTD_NAND_BRCMNAND_IPROC) += iproc_nand.o
-obj-$(CONFIG_MTD_NAND_BRCMNAND_BCMBCA) += bcm63138_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND_BCMBCA) += bcmbca_nand.o
obj-$(CONFIG_MTD_NAND_BRCMNAND_BCM63XX) += bcm6368_nand.o
obj-$(CONFIG_MTD_NAND_BRCMNAND_BRCMSTB) += brcmstb_nand.o
obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand.o
diff --git a/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c b/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c
deleted file mode 100644
index 968c5b674b089..0000000000000
--- a/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c
+++ /dev/null
@@ -1,99 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright © 2015 Broadcom Corporation
- */
-
-#include <linux/device.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include "brcmnand.h"
-
-struct bcm63138_nand_soc {
- struct brcmnand_soc soc;
- void __iomem *base;
-};
-
-#define BCM63138_NAND_INT_STATUS 0x00
-#define BCM63138_NAND_INT_EN 0x04
-
-enum {
- BCM63138_CTLRDY = BIT(4),
-};
-
-static bool bcm63138_nand_intc_ack(struct brcmnand_soc *soc)
-{
- struct bcm63138_nand_soc *priv =
- container_of(soc, struct bcm63138_nand_soc, soc);
- void __iomem *mmio = priv->base + BCM63138_NAND_INT_STATUS;
- u32 val = brcmnand_readl(mmio);
-
- if (val & BCM63138_CTLRDY) {
- brcmnand_writel(val & ~BCM63138_CTLRDY, mmio);
- return true;
- }
-
- return false;
-}
-
-static void bcm63138_nand_intc_set(struct brcmnand_soc *soc, bool en)
-{
- struct bcm63138_nand_soc *priv =
- container_of(soc, struct bcm63138_nand_soc, soc);
- void __iomem *mmio = priv->base + BCM63138_NAND_INT_EN;
- u32 val = brcmnand_readl(mmio);
-
- if (en)
- val |= BCM63138_CTLRDY;
- else
- val &= ~BCM63138_CTLRDY;
-
- brcmnand_writel(val, mmio);
-}
-
-static int bcm63138_nand_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct bcm63138_nand_soc *priv;
- struct brcmnand_soc *soc;
-
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
- soc = &priv->soc;
-
- priv->base = devm_platform_ioremap_resource_byname(pdev, "nand-int-base");
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
-
- soc->ctlrdy_ack = bcm63138_nand_intc_ack;
- soc->ctlrdy_set_enabled = bcm63138_nand_intc_set;
-
- return brcmnand_probe(pdev, soc);
-}
-
-static const struct of_device_id bcm63138_nand_of_match[] = {
- { .compatible = "brcm,nand-bcm63138" },
- {},
-};
-MODULE_DEVICE_TABLE(of, bcm63138_nand_of_match);
-
-static struct platform_driver bcm63138_nand_driver = {
- .probe = bcm63138_nand_probe,
- .remove_new = brcmnand_remove,
- .driver = {
- .name = "bcm63138_nand",
- .pm = &brcmnand_pm_ops,
- .of_match_table = bcm63138_nand_of_match,
- }
-};
-module_platform_driver(bcm63138_nand_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Brian Norris");
-MODULE_DESCRIPTION("NAND driver for BCM63138");
diff --git a/drivers/mtd/nand/raw/brcmnand/bcmbca_nand.c b/drivers/mtd/nand/raw/brcmnand/bcmbca_nand.c
new file mode 100644
index 0000000000000..ea534850b97a3
--- /dev/null
+++ b/drivers/mtd/nand/raw/brcmnand/bcmbca_nand.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2015 Broadcom Corporation
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "brcmnand.h"
+
+struct bcmbca_nand_soc {
+ struct brcmnand_soc soc;
+ void __iomem *base;
+};
+
+#define BCMBCA_NAND_INT_STATUS 0x00
+#define BCMBCA_NAND_INT_EN 0x04
+
+enum {
+ BCMBCA_CTLRDY = BIT(4),
+};
+
+#if defined(CONFIG_ARM64)
+#define ALIGN_REQ 8
+#else
+#define ALIGN_REQ 4
+#endif
+
+static inline bool bcmbca_nand_is_buf_aligned(void *flash_cache, void *buffer)
+{
+ return IS_ALIGNED((uintptr_t)buffer, ALIGN_REQ) &&
+ IS_ALIGNED((uintptr_t)flash_cache, ALIGN_REQ);
+}
+
+static bool bcmbca_nand_intc_ack(struct brcmnand_soc *soc)
+{
+ struct bcmbca_nand_soc *priv =
+ container_of(soc, struct bcmbca_nand_soc, soc);
+ void __iomem *mmio = priv->base + BCMBCA_NAND_INT_STATUS;
+ u32 val = brcmnand_readl(mmio);
+
+ if (val & BCMBCA_CTLRDY) {
+ brcmnand_writel(val & ~BCMBCA_CTLRDY, mmio);
+ return true;
+ }
+
+ return false;
+}
+
+static void bcmbca_nand_intc_set(struct brcmnand_soc *soc, bool en)
+{
+ struct bcmbca_nand_soc *priv =
+ container_of(soc, struct bcmbca_nand_soc, soc);
+ void __iomem *mmio = priv->base + BCMBCA_NAND_INT_EN;
+ u32 val = brcmnand_readl(mmio);
+
+ if (en)
+ val |= BCMBCA_CTLRDY;
+ else
+ val &= ~BCMBCA_CTLRDY;
+
+ brcmnand_writel(val, mmio);
+}
+
+static void bcmbca_read_data_bus(struct brcmnand_soc *soc,
+ void __iomem *flash_cache, u32 *buffer, int fc_words)
+{
+ /*
+ * memcpy can do unaligned aligned access depending on source
+ * and dest address, which is incompatible with nand cache. Fallback
+ * to the memcpy_fromio in such case
+ */
+ if (bcmbca_nand_is_buf_aligned((void __force *)flash_cache, buffer))
+ memcpy((void *)buffer, (void __force *)flash_cache, fc_words * 4);
+ else
+ memcpy_fromio((void *)buffer, flash_cache, fc_words * 4);
+}
+
+static int bcmbca_nand_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bcmbca_nand_soc *priv;
+ struct brcmnand_soc *soc;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ soc = &priv->soc;
+
+ priv->base = devm_platform_ioremap_resource_byname(pdev, "nand-int-base");
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ soc->ctlrdy_ack = bcmbca_nand_intc_ack;
+ soc->ctlrdy_set_enabled = bcmbca_nand_intc_set;
+ soc->read_data_bus = bcmbca_read_data_bus;
+
+ return brcmnand_probe(pdev, soc);
+}
+
+static const struct of_device_id bcmbca_nand_of_match[] = {
+ { .compatible = "brcm,nand-bcm63138" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcmbca_nand_of_match);
+
+static struct platform_driver bcmbca_nand_driver = {
+ .probe = bcmbca_nand_probe,
+ .remove_new = brcmnand_remove,
+ .driver = {
+ .name = "bcmbca_nand",
+ .pm = &brcmnand_pm_ops,
+ .of_match_table = bcmbca_nand_of_match,
+ }
+};
+module_platform_driver(bcmbca_nand_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Brian Norris");
+MODULE_DESCRIPTION("NAND driver for BCMBCA");
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 8faca43ae1ff9..a8d12c71f987b 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -625,7 +625,7 @@ enum {
/* Only for v7.2 */
#define ACC_CONTROL_ECC_EXT_SHIFT 13
-static u8 brcmnand_status(struct brcmnand_host *host);
+static int brcmnand_status(struct brcmnand_host *host);
static inline bool brcmnand_non_mmio_ops(struct brcmnand_controller *ctrl)
{
@@ -851,6 +851,20 @@ static inline u32 edu_readl(struct brcmnand_controller *ctrl,
return brcmnand_readl(ctrl->edu_base + offs);
}
+static inline void brcmnand_read_data_bus(struct brcmnand_controller *ctrl,
+ void __iomem *flash_cache, u32 *buffer, int fc_words)
+{
+ struct brcmnand_soc *soc = ctrl->soc;
+ int i;
+
+ if (soc->read_data_bus) {
+ soc->read_data_bus(soc, flash_cache, buffer, fc_words);
+ } else {
+ for (i = 0; i < fc_words; i++)
+ buffer[i] = brcmnand_read_fc(ctrl, i);
+ }
+}
+
static void brcmnand_clear_ecc_addr(struct brcmnand_controller *ctrl)
{
@@ -1024,6 +1038,22 @@ static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl)
return -1;
}
+static bool brcmnand_get_sector_size_1k(struct brcmnand_host *host)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ int sector_size_bit = brcmnand_sector_1k_shift(ctrl);
+ u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_ACC_CONTROL);
+ u32 acc_control;
+
+ if (sector_size_bit < 0)
+ return false;
+
+ acc_control = nand_readreg(ctrl, acc_control_offs);
+
+ return ((acc_control & BIT(sector_size_bit)) != 0);
+}
+
static void brcmnand_set_sector_size_1k(struct brcmnand_host *host, int val)
{
struct brcmnand_controller *ctrl = host->ctrl;
@@ -1041,6 +1071,43 @@ static void brcmnand_set_sector_size_1k(struct brcmnand_host *host, int val)
nand_writereg(ctrl, acc_control_offs, tmp);
}
+static int brcmnand_get_spare_size(struct brcmnand_host *host)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_ACC_CONTROL);
+ u32 acc = nand_readreg(ctrl, acc_control_offs);
+
+ return (acc & brcmnand_spare_area_mask(ctrl));
+}
+
+static void brcmnand_get_ecc_settings(struct brcmnand_host *host, struct nand_chip *chip)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_ACC_CONTROL);
+ bool sector_size_1k = brcmnand_get_sector_size_1k(host);
+ int spare_area_size, ecc_level;
+ u32 acc;
+
+ spare_area_size = brcmnand_get_spare_size(host);
+ acc = nand_readreg(ctrl, acc_control_offs);
+ ecc_level = (acc & brcmnand_ecc_level_mask(ctrl)) >> ctrl->ecc_level_shift;
+ if (sector_size_1k)
+ chip->ecc.strength = ecc_level * 2;
+ else if (spare_area_size == 16 && ecc_level == 15)
+ chip->ecc.strength = 1; /* hamming */
+ else
+ chip->ecc.strength = ecc_level;
+
+ if (chip->ecc.size == 0) {
+ if (sector_size_1k)
+ chip->ecc.size = 1024;
+ else
+ chip->ecc.size = 512;
+ }
+}
+
/***********************************************************************
* CS_NAND_SELECT
***********************************************************************/
@@ -1084,8 +1151,8 @@ static int bcmnand_ctrl_poll_status(struct brcmnand_host *host,
if ((val & mask) == expected_val)
return 0;
- dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n",
- expected_val, val & mask);
+ dev_err(ctrl->dev, "timeout on status poll (expected %x got %x)\n",
+ expected_val, val & mask);
return -ETIMEDOUT;
}
@@ -1690,7 +1757,7 @@ static int brcmnand_waitfunc(struct nand_chip *chip)
INTFC_FLASH_STATUS;
}
-static u8 brcmnand_status(struct brcmnand_host *host)
+static int brcmnand_status(struct brcmnand_host *host)
{
struct nand_chip *chip = &host->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
@@ -1701,7 +1768,7 @@ static u8 brcmnand_status(struct brcmnand_host *host)
return brcmnand_waitfunc(chip);
}
-static u8 brcmnand_reset(struct brcmnand_host *host)
+static int brcmnand_reset(struct brcmnand_host *host)
{
struct nand_chip *chip = &host->chip;
@@ -1975,7 +2042,7 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
{
struct brcmnand_host *host = nand_get_controller_data(chip);
struct brcmnand_controller *ctrl = host->ctrl;
- int i, j, ret = 0;
+ int i, ret = 0;
brcmnand_clear_ecc_addr(ctrl);
@@ -1988,8 +2055,8 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
if (likely(buf)) {
brcmnand_soc_data_bus_prepare(ctrl->soc, false);
- for (j = 0; j < FC_WORDS; j++, buf++)
- *buf = brcmnand_read_fc(ctrl, j);
+ brcmnand_read_data_bus(ctrl, ctrl->nand_fc, buf, FC_WORDS);
+ buf += FC_WORDS;
brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
}
@@ -2137,7 +2204,7 @@ try_dmaread:
return err;
}
- dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n",
+ dev_err(ctrl->dev, "uncorrectable error at 0x%llx\n",
(unsigned long long)err_addr);
mtd->ecc_stats.failed++;
/* NAND layer expects zero on ECC errors */
@@ -2339,7 +2406,7 @@ static int brcmnand_write_oob_raw(struct nand_chip *chip, int page)
}
static int brcmnand_exec_instr(struct brcmnand_host *host, int i,
- const struct nand_operation *op)
+ const struct nand_operation *op)
{
const struct nand_op_instr *instr = &op->instrs[i];
struct brcmnand_controller *ctrl = host->ctrl;
@@ -2353,7 +2420,7 @@ static int brcmnand_exec_instr(struct brcmnand_host *host, int i,
* (WAITRDY excepted).
*/
last_op = ((i == (op->ninstrs - 1)) && (instr->type != NAND_OP_WAITRDY_INSTR)) ||
- ((i == (op->ninstrs - 2)) && (op->instrs[i+1].type == NAND_OP_WAITRDY_INSTR));
+ ((i == (op->ninstrs - 2)) && (op->instrs[i + 1].type == NAND_OP_WAITRDY_INSTR));
switch (instr->type) {
case NAND_OP_CMD_INSTR:
@@ -2398,10 +2465,10 @@ static int brcmnand_exec_instr(struct brcmnand_host *host, int i,
static int brcmnand_op_is_status(const struct nand_operation *op)
{
- if ((op->ninstrs == 2) &&
- (op->instrs[0].type == NAND_OP_CMD_INSTR) &&
- (op->instrs[0].ctx.cmd.opcode == NAND_CMD_STATUS) &&
- (op->instrs[1].type == NAND_OP_DATA_IN_INSTR))
+ if (op->ninstrs == 2 &&
+ op->instrs[0].type == NAND_OP_CMD_INSTR &&
+ op->instrs[0].ctx.cmd.opcode == NAND_CMD_STATUS &&
+ op->instrs[1].type == NAND_OP_DATA_IN_INSTR)
return 1;
return 0;
@@ -2409,10 +2476,10 @@ static int brcmnand_op_is_status(const struct nand_operation *op)
static int brcmnand_op_is_reset(const struct nand_operation *op)
{
- if ((op->ninstrs == 2) &&
- (op->instrs[0].type == NAND_OP_CMD_INSTR) &&
- (op->instrs[0].ctx.cmd.opcode == NAND_CMD_RESET) &&
- (op->instrs[1].type == NAND_OP_WAITRDY_INSTR))
+ if (op->ninstrs == 2 &&
+ op->instrs[0].type == NAND_OP_CMD_INSTR &&
+ op->instrs[0].ctx.cmd.opcode == NAND_CMD_RESET &&
+ op->instrs[1].type == NAND_OP_WAITRDY_INSTR)
return 1;
return 0;
@@ -2433,11 +2500,14 @@ static int brcmnand_exec_op(struct nand_chip *chip,
if (brcmnand_op_is_status(op)) {
status = op->instrs[1].ctx.data.buf.in;
- *status = brcmnand_status(host);
+ ret = brcmnand_status(host);
+ if (ret < 0)
+ return ret;
+
+ *status = ret & 0xFF;
return 0;
- }
- else if (brcmnand_op_is_reset(op)) {
+ } else if (brcmnand_op_is_reset(op)) {
ret = brcmnand_reset(host);
if (ret < 0)
return ret;
@@ -2608,19 +2678,37 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
nanddev_get_memorg(&chip->base);
struct brcmnand_controller *ctrl = host->ctrl;
struct brcmnand_cfg *cfg = &host->hwcfg;
- char msg[128];
+ struct device_node *np = nand_get_flash_node(chip);
u32 offs, tmp, oob_sector;
+ bool use_strap = false;
+ char msg[128];
int ret;
memset(cfg, 0, sizeof(*cfg));
+ use_strap = of_property_read_bool(np, "brcm,nand-ecc-use-strap");
+
+ /*
+ * Either nand-ecc-xxx or brcm,nand-ecc-use-strap can be set. Error out
+ * if both exist.
+ */
+ if (chip->ecc.strength && use_strap) {
+ dev_err(ctrl->dev,
+ "ECC strap and DT ECC configuration properties are mutually exclusive\n");
+ return -EINVAL;
+ }
+
+ if (use_strap)
+ brcmnand_get_ecc_settings(host, chip);
- ret = of_property_read_u32(nand_get_flash_node(chip),
- "brcm,nand-oob-sector-size",
+ ret = of_property_read_u32(np, "brcm,nand-oob-sector-size",
&oob_sector);
if (ret) {
- /* Use detected size */
- cfg->spare_area_size = mtd->oobsize /
- (mtd->writesize >> FC_SHIFT);
+ if (use_strap)
+ cfg->spare_area_size = brcmnand_get_spare_size(host);
+ else
+ /* Use detected size */
+ cfg->spare_area_size = mtd->oobsize /
+ (mtd->writesize >> FC_SHIFT);
} else {
cfg->spare_area_size = oob_sector;
}
@@ -3135,6 +3223,10 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
/* Disable XOR addressing */
brcmnand_rmw_reg(ctrl, BRCMNAND_CS_XOR, 0xff, 0, 0);
+ /* Check if the board connects the WP pin */
+ if (of_property_read_bool(dn, "brcm,wp-not-connected"))
+ wp_on = 0;
+
if (ctrl->features & BRCMNAND_HAS_WP) {
/* Permanently disable write protection */
if (wp_on == 2)
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.h b/drivers/mtd/nand/raw/brcmnand/brcmnand.h
index 928114c0be5ef..9f171252a2ae2 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.h
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.h
@@ -24,6 +24,8 @@ struct brcmnand_soc {
void (*ctlrdy_set_enabled)(struct brcmnand_soc *soc, bool en);
void (*prepare_data_bus)(struct brcmnand_soc *soc, bool prepare,
bool is_param);
+ void (*read_data_bus)(struct brcmnand_soc *soc, void __iomem *flash_cache,
+ u32 *buffer, int fc_words);
const struct brcmnand_io_ops *ops;
};
diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c
index 1e3a80f06f330..df6a0d5c86bb3 100644
--- a/drivers/mtd/nand/raw/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c
@@ -869,7 +869,8 @@ static int fsl_elbc_nand_probe(struct platform_device *pdev)
struct mtd_info *mtd;
if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
- return -ENODEV;
+ return dev_err_probe(&pdev->dev, -EPROBE_DEFER, "lbc_ctrl_dev missing\n");
+
lbc = fsl_lbc_ctrl_dev->regs;
dev = fsl_lbc_ctrl_dev->dev;
diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c
index 488fd452611a6..677fcb03f9bef 100644
--- a/drivers/mtd/nand/raw/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c
@@ -303,8 +303,9 @@ static int lpc32xx_nand_device_ready(struct nand_chip *nand_chip)
return 0;
}
-static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host)
+static irqreturn_t lpc3xxx_nand_irq(int irq, void *data)
{
+ struct lpc32xx_nand_host *host = data;
uint8_t sr;
/* Clear interrupt flag by reading status */
@@ -780,7 +781,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
goto release_dma_chan;
}
- if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
+ if (request_irq(host->irq, &lpc3xxx_nand_irq,
IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
res = -ENXIO;
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index cdb58aca59c08..2a96a87cf79ce 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -63,7 +63,7 @@
#define CMDRWGEN(cmd_dir, ran, bch, short_mode, page_size, pages) \
( \
(cmd_dir) | \
- ((ran) << 19) | \
+ (ran) | \
((bch) << 14) | \
((short_mode) << 13) | \
(((page_size) & 0x7f) << 6) | \
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index 60198e33d2d55..17477bb2d48ff 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -1356,7 +1356,7 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
return -EINVAL;
}
- chip = devm_kzalloc(dev, sizeof(*chip) + nsels * sizeof(u8),
+ chip = devm_kzalloc(dev, struct_size(chip, sels, nsels),
GFP_KERNEL);
if (!chip)
return -ENOMEM;
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 3b3ce2926f5d1..d7dbbd469b892 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -1211,21 +1211,36 @@ static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
return nand_exec_op(chip, &op);
}
+static unsigned int rawnand_last_page_of_lun(unsigned int pages_per_lun, unsigned int lun)
+{
+ /* lun is expected to be very small */
+ return (lun * pages_per_lun) + pages_per_lun - 1;
+}
+
static void rawnand_cap_cont_reads(struct nand_chip *chip)
{
struct nand_memory_organization *memorg;
- unsigned int pages_per_lun, first_lun, last_lun;
+ unsigned int ppl, first_lun, last_lun;
memorg = nanddev_get_memorg(&chip->base);
- pages_per_lun = memorg->pages_per_eraseblock * memorg->eraseblocks_per_lun;
- first_lun = chip->cont_read.first_page / pages_per_lun;
- last_lun = chip->cont_read.last_page / pages_per_lun;
+ ppl = memorg->pages_per_eraseblock * memorg->eraseblocks_per_lun;
+ first_lun = chip->cont_read.first_page / ppl;
+ last_lun = chip->cont_read.last_page / ppl;
/* Prevent sequential cache reads across LUN boundaries */
if (first_lun != last_lun)
- chip->cont_read.pause_page = first_lun * pages_per_lun + pages_per_lun - 1;
+ chip->cont_read.pause_page = rawnand_last_page_of_lun(ppl, first_lun);
else
chip->cont_read.pause_page = chip->cont_read.last_page;
+
+ if (chip->cont_read.first_page == chip->cont_read.pause_page) {
+ chip->cont_read.first_page++;
+ chip->cont_read.pause_page = min(chip->cont_read.last_page,
+ rawnand_last_page_of_lun(ppl, first_lun + 1));
+ }
+
+ if (chip->cont_read.first_page >= chip->cont_read.last_page)
+ chip->cont_read.ongoing = false;
}
static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int page,
@@ -1292,12 +1307,11 @@ static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int p
if (!chip->cont_read.ongoing)
return 0;
- if (page == chip->cont_read.pause_page &&
- page != chip->cont_read.last_page) {
- chip->cont_read.first_page = chip->cont_read.pause_page + 1;
- rawnand_cap_cont_reads(chip);
- } else if (page == chip->cont_read.last_page) {
+ if (page == chip->cont_read.last_page) {
chip->cont_read.ongoing = false;
+ } else if (page == chip->cont_read.pause_page) {
+ chip->cont_read.first_page++;
+ rawnand_cap_cont_reads(chip);
}
return 0;
@@ -3466,30 +3480,36 @@ static void rawnand_enable_cont_reads(struct nand_chip *chip, unsigned int page,
u32 readlen, int col)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- unsigned int end_page, end_col;
+ unsigned int first_page, last_page;
chip->cont_read.ongoing = false;
if (!chip->controller->supported_op.cont_read)
return;
- end_page = DIV_ROUND_UP(col + readlen, mtd->writesize);
- end_col = (col + readlen) % mtd->writesize;
+ /*
+ * Don't bother making any calculations if the length is too small.
+ * Side effect: avoids possible integer underflows below.
+ */
+ if (readlen < (2 * mtd->writesize))
+ return;
+ /* Derive the page where continuous read should start (the first full page read) */
+ first_page = page;
if (col)
- page++;
+ first_page++;
- if (end_col && end_page)
- end_page--;
+ /* Derive the page where continuous read should stop (the last full page read) */
+ last_page = page + ((col + readlen) / mtd->writesize) - 1;
- if (page + 1 > end_page)
- return;
-
- chip->cont_read.first_page = page;
- chip->cont_read.last_page = end_page;
- chip->cont_read.ongoing = true;
-
- rawnand_cap_cont_reads(chip);
+ /* Configure and enable continuous read when suitable */
+ if (first_page < last_page) {
+ chip->cont_read.first_page = first_page;
+ chip->cont_read.last_page = last_page;
+ chip->cont_read.ongoing = true;
+ /* May reset the ongoing flag */
+ rawnand_cap_cont_reads(chip);
+ }
}
static void rawnand_cont_read_skip_first_page(struct nand_chip *chip, unsigned int page)
@@ -3498,10 +3518,7 @@ static void rawnand_cont_read_skip_first_page(struct nand_chip *chip, unsigned i
return;
chip->cont_read.first_page++;
- if (chip->cont_read.first_page == chip->cont_read.pause_page)
- chip->cont_read.first_page++;
- if (chip->cont_read.first_page >= chip->cont_read.last_page)
- chip->cont_read.ongoing = false;
+ rawnand_cap_cont_reads(chip);
}
/**
@@ -3577,7 +3594,8 @@ static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
oob = ops->oobbuf;
oob_required = oob ? 1 : 0;
- rawnand_enable_cont_reads(chip, page, readlen, col);
+ if (likely(ops->mode != MTD_OPS_RAW))
+ rawnand_enable_cont_reads(chip, page, readlen, col);
while (1) {
struct mtd_ecc_stats ecc_stats = mtd->ecc_stats;
@@ -3710,6 +3728,9 @@ read_retry:
}
nand_deselect_target(chip);
+ if (WARN_ON_ONCE(chip->cont_read.ongoing))
+ chip->cont_read.ongoing = false;
+
ops->retlen = ops->len - (size_t) readlen;
if (oob)
ops->oobretlen = ops->ooblen - oobreadlen;
@@ -5195,6 +5216,15 @@ static void rawnand_late_check_supported_ops(struct nand_chip *chip)
if (!nand_has_exec_op(chip))
return;
+ /*
+ * For now, continuous reads can only be used with the core page helpers.
+ * This can be extended later.
+ */
+ if (!(chip->ecc.read_page == nand_read_page_hwecc ||
+ chip->ecc.read_page == nand_read_page_syndrome ||
+ chip->ecc.read_page == nand_read_page_swecc))
+ return;
+
rawnand_check_cont_read_support(chip);
}
diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c
index e4664fa6fd9ef..a8fba5f39f591 100644
--- a/drivers/mtd/nand/raw/nand_bbt.c
+++ b/drivers/mtd/nand/raw/nand_bbt.c
@@ -576,7 +576,6 @@ static int search_bbt(struct nand_chip *this, uint8_t *buf,
startblock &= bbtblocks - 1;
} else {
chips = 1;
- bbtblocks = mtd->size >> this->bbt_erase_shift;
}
for (i = 0; i < chips; i++) {
diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c
index 39076735a3fbb..a74e64e0cfa32 100644
--- a/drivers/mtd/nand/raw/nand_hynix.c
+++ b/drivers/mtd/nand/raw/nand_hynix.c
@@ -31,7 +31,6 @@ struct hynix_read_retry {
/**
* struct hynix_nand - private Hynix NAND struct
- * @nand_technology: manufacturing process expressed in picometer
* @read_retry: read-retry information
*/
struct hynix_nand {
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
index 88811139aaf5b..264556939a00f 100644
--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/mtd/rawnand.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -37,7 +38,7 @@
#define FMC2_MAX_SG 16
/* Max chip enable */
-#define FMC2_MAX_CE 2
+#define FMC2_MAX_CE 4
/* Max ECC buffer length */
#define FMC2_MAX_ECC_BUF_LEN (FMC2_BCHDSRS_LEN * FMC2_MAX_SG)
@@ -243,6 +244,13 @@ static inline struct stm32_fmc2_nand *to_fmc2_nand(struct nand_chip *chip)
return container_of(chip, struct stm32_fmc2_nand, chip);
}
+struct stm32_fmc2_nfc;
+
+struct stm32_fmc2_nfc_data {
+ int max_ncs;
+ int (*set_cdev)(struct stm32_fmc2_nfc *nfc);
+};
+
struct stm32_fmc2_nfc {
struct nand_controller base;
struct stm32_fmc2_nand nand;
@@ -256,6 +264,7 @@ struct stm32_fmc2_nfc {
phys_addr_t data_phys_addr[FMC2_MAX_CE];
struct clk *clk;
u8 irq_state;
+ const struct stm32_fmc2_nfc_data *data;
struct dma_chan *dma_tx_ch;
struct dma_chan *dma_rx_ch;
@@ -264,6 +273,8 @@ struct stm32_fmc2_nfc {
struct sg_table dma_ecc_sg;
u8 *ecc_buf;
int dma_ecc_len;
+ u32 tx_dma_max_burst;
+ u32 rx_dma_max_burst;
struct completion complete;
struct completion dma_data_complete;
@@ -347,20 +358,26 @@ static int stm32_fmc2_nfc_select_chip(struct nand_chip *chip, int chipnr)
stm32_fmc2_nfc_setup(chip);
stm32_fmc2_nfc_timings_init(chip);
- if (nfc->dma_tx_ch && nfc->dma_rx_ch) {
+ if (nfc->dma_tx_ch) {
memset(&dma_cfg, 0, sizeof(dma_cfg));
- dma_cfg.src_addr = nfc->data_phys_addr[nfc->cs_sel];
dma_cfg.dst_addr = nfc->data_phys_addr[nfc->cs_sel];
- dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- dma_cfg.src_maxburst = 32;
- dma_cfg.dst_maxburst = 32;
+ dma_cfg.dst_maxburst = nfc->tx_dma_max_burst /
+ dma_cfg.dst_addr_width;
ret = dmaengine_slave_config(nfc->dma_tx_ch, &dma_cfg);
if (ret) {
dev_err(nfc->dev, "tx DMA engine slave config failed\n");
return ret;
}
+ }
+
+ if (nfc->dma_rx_ch) {
+ memset(&dma_cfg, 0, sizeof(dma_cfg));
+ dma_cfg.src_addr = nfc->data_phys_addr[nfc->cs_sel];
+ dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_cfg.src_maxburst = nfc->rx_dma_max_burst /
+ dma_cfg.src_addr_width;
ret = dmaengine_slave_config(nfc->dma_rx_ch, &dma_cfg);
if (ret) {
@@ -1545,6 +1562,7 @@ static int stm32_fmc2_nfc_setup_interface(struct nand_chip *chip, int chipnr,
static int stm32_fmc2_nfc_dma_setup(struct stm32_fmc2_nfc *nfc)
{
+ struct dma_slave_caps caps;
int ret = 0;
nfc->dma_tx_ch = dma_request_chan(nfc->dev, "tx");
@@ -1557,6 +1575,11 @@ static int stm32_fmc2_nfc_dma_setup(struct stm32_fmc2_nfc *nfc)
goto err_dma;
}
+ ret = dma_get_slave_caps(nfc->dma_tx_ch, &caps);
+ if (ret)
+ return ret;
+ nfc->tx_dma_max_burst = caps.max_burst;
+
nfc->dma_rx_ch = dma_request_chan(nfc->dev, "rx");
if (IS_ERR(nfc->dma_rx_ch)) {
ret = PTR_ERR(nfc->dma_rx_ch);
@@ -1567,6 +1590,11 @@ static int stm32_fmc2_nfc_dma_setup(struct stm32_fmc2_nfc *nfc)
goto err_dma;
}
+ ret = dma_get_slave_caps(nfc->dma_rx_ch, &caps);
+ if (ret)
+ return ret;
+ nfc->rx_dma_max_burst = caps.max_burst;
+
nfc->dma_ecc_ch = dma_request_chan(nfc->dev, "ecc");
if (IS_ERR(nfc->dma_ecc_ch)) {
ret = PTR_ERR(nfc->dma_ecc_ch);
@@ -1790,7 +1818,7 @@ static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc,
return ret;
}
- if (cs >= FMC2_MAX_CE) {
+ if (cs >= nfc->data->max_ncs) {
dev_err(nfc->dev, "invalid reg value: %d\n", cs);
return -EINVAL;
}
@@ -1896,9 +1924,17 @@ static int stm32_fmc2_nfc_probe(struct platform_device *pdev)
nand_controller_init(&nfc->base);
nfc->base.ops = &stm32_fmc2_nfc_controller_ops;
- ret = stm32_fmc2_nfc_set_cdev(nfc);
- if (ret)
- return ret;
+ nfc->data = of_device_get_match_data(dev);
+ if (!nfc->data)
+ return -EINVAL;
+
+ if (nfc->data->set_cdev) {
+ ret = nfc->data->set_cdev(nfc);
+ if (ret)
+ return ret;
+ } else {
+ nfc->cdev = dev->parent;
+ }
ret = stm32_fmc2_nfc_parse_dt(nfc);
if (ret)
@@ -1917,7 +1953,7 @@ static int stm32_fmc2_nfc_probe(struct platform_device *pdev)
if (nfc->dev == nfc->cdev)
start_region = 1;
- for (chip_cs = 0, mem_region = start_region; chip_cs < FMC2_MAX_CE;
+ for (chip_cs = 0, mem_region = start_region; chip_cs < nfc->data->max_ncs;
chip_cs++, mem_region += 3) {
if (!(nfc->cs_assigned & BIT(chip_cs)))
continue;
@@ -2073,7 +2109,7 @@ static int __maybe_unused stm32_fmc2_nfc_resume(struct device *dev)
stm32_fmc2_nfc_wp_disable(nand);
- for (chip_cs = 0; chip_cs < FMC2_MAX_CE; chip_cs++) {
+ for (chip_cs = 0; chip_cs < nfc->data->max_ncs; chip_cs++) {
if (!(nfc->cs_assigned & BIT(chip_cs)))
continue;
@@ -2086,9 +2122,28 @@ static int __maybe_unused stm32_fmc2_nfc_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(stm32_fmc2_nfc_pm_ops, stm32_fmc2_nfc_suspend,
stm32_fmc2_nfc_resume);
+static const struct stm32_fmc2_nfc_data stm32_fmc2_nfc_mp1_data = {
+ .max_ncs = 2,
+ .set_cdev = stm32_fmc2_nfc_set_cdev,
+};
+
+static const struct stm32_fmc2_nfc_data stm32_fmc2_nfc_mp25_data = {
+ .max_ncs = 4,
+};
+
static const struct of_device_id stm32_fmc2_nfc_match[] = {
- {.compatible = "st,stm32mp15-fmc2"},
- {.compatible = "st,stm32mp1-fmc2-nfc"},
+ {
+ .compatible = "st,stm32mp15-fmc2",
+ .data = &stm32_fmc2_nfc_mp1_data,
+ },
+ {
+ .compatible = "st,stm32mp1-fmc2-nfc",
+ .data = &stm32_fmc2_nfc_mp1_data,
+ },
+ {
+ .compatible = "st,stm32mp25-fmc2-nfc",
+ .data = &stm32_fmc2_nfc_mp25_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, stm32_fmc2_nfc_match);
diff --git a/drivers/mtd/nand/spi/esmt.c b/drivers/mtd/nand/spi/esmt.c
index 31c439a557b18..4597a82de23a4 100644
--- a/drivers/mtd/nand/spi/esmt.c
+++ b/drivers/mtd/nand/spi/esmt.c
@@ -104,7 +104,8 @@ static const struct mtd_ooblayout_ops f50l1g41lb_ooblayout = {
static const struct spinand_info esmt_c8_spinand_table[] = {
SPINAND_INFO("F50L1G41LB",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x01),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x01, 0x7f,
+ 0x7f, 0x7f),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -113,7 +114,8 @@ static const struct spinand_info esmt_c8_spinand_table[] = {
0,
SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)),
SPINAND_INFO("F50D1G41LB",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x11),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x11, 0x7f,
+ 0x7f, 0x7f),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -122,7 +124,8 @@ static const struct spinand_info esmt_c8_spinand_table[] = {
0,
SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)),
SPINAND_INFO("F50D2G41KA",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x51),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x51, 0x7f,
+ 0x7f, 0x7f),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c
index 1a473021cca51..ba7c813b9542b 100644
--- a/drivers/mtd/nand/spi/winbond.c
+++ b/drivers/mtd/nand/spi/winbond.c
@@ -15,6 +15,8 @@
#define WINBOND_CFG_BUF_READ BIT(3)
+#define W25N04KV_STATUS_ECC_5_8_BITFLIPS (3 << 4)
+
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
@@ -118,6 +120,7 @@ static int w25n02kv_ecc_get_status(struct spinand_device *spinand,
return -EBADMSG;
case STATUS_ECC_HAS_BITFLIPS:
+ case W25N04KV_STATUS_ECC_5_8_BITFLIPS:
/*
* Let's try to retrieve the real maximum number of bitflips
* in order to avoid forcing the wear-leveling layer to move
@@ -214,6 +217,15 @@ static const struct spinand_info winbond_spinand_table[] = {
&update_cache_variants),
0,
SPINAND_ECCINFO(&w25m02gv_ooblayout, w25n02kv_ecc_get_status)),
+ SPINAND_INFO("W25N04KV",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x23),
+ NAND_MEMORG(1, 2048, 128, 64, 4096, 40, 2, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25n02kv_ooblayout, w25n02kv_ecc_get_status)),
};
static int winbond_spinand_init(struct spinand_device *spinand)
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 4129764fad8cf..3e1f1913536bf 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -1158,7 +1158,7 @@ static u8 spi_nor_convert_3to4_erase(u8 opcode)
static bool spi_nor_has_uniform_erase(const struct spi_nor *nor)
{
- return !!nor->params->erase_map.uniform_erase_type;
+ return !!nor->params->erase_map.uniform_region.erase_mask;
}
static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
@@ -1542,7 +1542,6 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
const struct spi_nor_erase_type *erase;
u32 rem;
int i;
- u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
/*
* Erase types are ordered by size, with the smallest erase type at
@@ -1550,7 +1549,7 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
*/
for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
/* Does the erase region support the tested erase type? */
- if (!(erase_mask & BIT(i)))
+ if (!(region->erase_mask & BIT(i)))
continue;
erase = &map->erase_type[i];
@@ -1558,8 +1557,7 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
continue;
/* Alignment is not mandatory for overlaid regions */
- if (region->offset & SNOR_OVERLAID_REGION &&
- region->size <= len)
+ if (region->overlaid && region->size <= len)
return erase;
/* Don't erase more than what the user has asked for. */
@@ -1574,59 +1572,6 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
return NULL;
}
-static u64 spi_nor_region_is_last(const struct spi_nor_erase_region *region)
-{
- return region->offset & SNOR_LAST_REGION;
-}
-
-static u64 spi_nor_region_end(const struct spi_nor_erase_region *region)
-{
- return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size;
-}
-
-/**
- * spi_nor_region_next() - get the next spi nor region
- * @region: pointer to a structure that describes a SPI NOR erase region
- *
- * Return: the next spi nor region or NULL if last region.
- */
-struct spi_nor_erase_region *
-spi_nor_region_next(struct spi_nor_erase_region *region)
-{
- if (spi_nor_region_is_last(region))
- return NULL;
- region++;
- return region;
-}
-
-/**
- * spi_nor_find_erase_region() - find the region of the serial flash memory in
- * which the offset fits
- * @map: the erase map of the SPI NOR
- * @addr: offset in the serial flash memory
- *
- * Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno)
- * otherwise.
- */
-static struct spi_nor_erase_region *
-spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
-{
- struct spi_nor_erase_region *region = map->regions;
- u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
- u64 region_end = region_start + region->size;
-
- while (addr < region_start || addr >= region_end) {
- region = spi_nor_region_next(region);
- if (!region)
- return ERR_PTR(-EINVAL);
-
- region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
- region_end = region_start + region->size;
- }
-
- return region;
-}
-
/**
* spi_nor_init_erase_cmd() - initialize an erase command
* @region: pointer to a structure that describes a SPI NOR erase region
@@ -1649,7 +1594,7 @@ spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
cmd->opcode = erase->opcode;
cmd->count = 1;
- if (region->offset & SNOR_OVERLAID_REGION)
+ if (region->overlaid)
cmd->size = region->size;
else
cmd->size = erase->size;
@@ -1693,44 +1638,36 @@ static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
struct spi_nor_erase_region *region;
struct spi_nor_erase_command *cmd = NULL;
u64 region_end;
+ unsigned int i;
int ret = -EINVAL;
- region = spi_nor_find_erase_region(map, addr);
- if (IS_ERR(region))
- return PTR_ERR(region);
-
- region_end = spi_nor_region_end(region);
+ for (i = 0; i < map->n_regions && len; i++) {
+ region = &map->regions[i];
+ region_end = region->offset + region->size;
- while (len) {
- erase = spi_nor_find_best_erase_type(map, region, addr, len);
- if (!erase)
- goto destroy_erase_cmd_list;
-
- if (prev_erase != erase ||
- erase->size != cmd->size ||
- region->offset & SNOR_OVERLAID_REGION) {
- cmd = spi_nor_init_erase_cmd(region, erase);
- if (IS_ERR(cmd)) {
- ret = PTR_ERR(cmd);
+ while (len && addr >= region->offset && addr < region_end) {
+ erase = spi_nor_find_best_erase_type(map, region, addr,
+ len);
+ if (!erase)
goto destroy_erase_cmd_list;
- }
-
- list_add_tail(&cmd->list, erase_list);
- } else {
- cmd->count++;
- }
- addr += cmd->size;
- len -= cmd->size;
+ if (prev_erase != erase || erase->size != cmd->size ||
+ region->overlaid) {
+ cmd = spi_nor_init_erase_cmd(region, erase);
+ if (IS_ERR(cmd)) {
+ ret = PTR_ERR(cmd);
+ goto destroy_erase_cmd_list;
+ }
+
+ list_add_tail(&cmd->list, erase_list);
+ } else {
+ cmd->count++;
+ }
- if (len && addr >= region_end) {
- region = spi_nor_region_next(region);
- if (!region)
- goto destroy_erase_cmd_list;
- region_end = spi_nor_region_end(region);
+ len -= cmd->size;
+ addr += cmd->size;
+ prev_erase = erase;
}
-
- prev_erase = erase;
}
return 0;
@@ -2468,12 +2405,11 @@ void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase)
void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
u8 erase_mask, u64 flash_size)
{
- /* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */
- map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
- SNOR_LAST_REGION;
+ map->uniform_region.offset = 0;
map->uniform_region.size = flash_size;
+ map->uniform_region.erase_mask = erase_mask;
map->regions = &map->uniform_region;
- map->uniform_erase_type = erase_mask;
+ map->n_regions = 1;
}
int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
@@ -2560,7 +2496,7 @@ spi_nor_select_uniform_erase(struct spi_nor_erase_map *map)
{
const struct spi_nor_erase_type *tested_erase, *erase = NULL;
int i;
- u8 uniform_erase_type = map->uniform_erase_type;
+ u8 uniform_erase_type = map->uniform_region.erase_mask;
/*
* Search for the biggest erase size, except for when compiled
@@ -2599,8 +2535,7 @@ spi_nor_select_uniform_erase(struct spi_nor_erase_map *map)
return NULL;
/* Disable all other Sector Erase commands. */
- map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
- map->uniform_erase_type |= BIT(erase - map->erase_type);
+ map->uniform_region.erase_mask = BIT(erase - map->erase_type);
return erase;
}
@@ -3434,7 +3369,54 @@ static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
return info;
}
-static void spi_nor_set_mtd_info(struct spi_nor *nor)
+static u32
+spi_nor_get_region_erasesize(const struct spi_nor_erase_region *region,
+ const struct spi_nor_erase_type *erase_type)
+{
+ int i;
+
+ if (region->overlaid)
+ return region->size;
+
+ for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
+ if (region->erase_mask & BIT(i))
+ return erase_type[i].size;
+ }
+
+ return 0;
+}
+
+static int spi_nor_set_mtd_eraseregions(struct spi_nor *nor)
+{
+ const struct spi_nor_erase_map *map = &nor->params->erase_map;
+ const struct spi_nor_erase_region *region = map->regions;
+ struct mtd_erase_region_info *mtd_region;
+ struct mtd_info *mtd = &nor->mtd;
+ u32 erasesize, i;
+
+ mtd_region = devm_kcalloc(nor->dev, map->n_regions, sizeof(*mtd_region),
+ GFP_KERNEL);
+ if (!mtd_region)
+ return -ENOMEM;
+
+ for (i = 0; i < map->n_regions; i++) {
+ erasesize = spi_nor_get_region_erasesize(&region[i],
+ map->erase_type);
+ if (!erasesize)
+ return -EINVAL;
+
+ mtd_region[i].erasesize = erasesize;
+ mtd_region[i].numblocks = div64_ul(region[i].size, erasesize);
+ mtd_region[i].offset = region[i].offset;
+ }
+
+ mtd->numeraseregions = map->n_regions;
+ mtd->eraseregions = mtd_region;
+
+ return 0;
+}
+
+static int spi_nor_set_mtd_info(struct spi_nor *nor)
{
struct mtd_info *mtd = &nor->mtd;
struct device *dev = nor->dev;
@@ -3465,6 +3447,11 @@ static void spi_nor_set_mtd_info(struct spi_nor *nor)
mtd->_resume = spi_nor_resume;
mtd->_get_device = spi_nor_get_device;
mtd->_put_device = spi_nor_put_device;
+
+ if (!spi_nor_has_uniform_erase(nor))
+ return spi_nor_set_mtd_eraseregions(nor);
+
+ return 0;
}
static int spi_nor_hw_reset(struct spi_nor *nor)
@@ -3555,7 +3542,9 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
return ret;
/* No mtd_info fields should be used up to this point. */
- spi_nor_set_mtd_info(nor);
+ ret = spi_nor_set_mtd_info(nor);
+ if (ret)
+ return ret;
dev_dbg(dev, "Manufacturer and device ID: %*phN\n",
SPI_NOR_MAX_ID_LEN, nor->id);
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
index d36c0e0729548..4427866855158 100644
--- a/drivers/mtd/spi-nor/core.h
+++ b/drivers/mtd/spi-nor/core.h
@@ -240,27 +240,21 @@ struct spi_nor_erase_command {
/**
* struct spi_nor_erase_region - Structure to describe a SPI NOR erase region
* @offset: the offset in the data array of erase region start.
- * LSB bits are used as a bitmask encoding flags to
- * determine if this region is overlaid, if this region is
- * the last in the SPI NOR flash memory and to indicate
- * all the supported erase commands inside this region.
- * The erase types are sorted in ascending order with the
- * smallest Erase Type size being at BIT(0).
* @size: the size of the region in bytes.
+ * @erase_mask: bitmask to indicate all the supported erase commands
+ * inside this region. The erase types are sorted in
+ * ascending order with the smallest Erase Type size being
+ * at BIT(0).
+ * @overlaid: determine if this region is overlaid.
*/
struct spi_nor_erase_region {
u64 offset;
u64 size;
+ u8 erase_mask;
+ bool overlaid;
};
#define SNOR_ERASE_TYPE_MAX 4
-#define SNOR_ERASE_TYPE_MASK GENMASK_ULL(SNOR_ERASE_TYPE_MAX - 1, 0)
-
-#define SNOR_LAST_REGION BIT(4)
-#define SNOR_OVERLAID_REGION BIT(5)
-
-#define SNOR_ERASE_FLAGS_MAX 6
-#define SNOR_ERASE_FLAGS_MASK GENMASK_ULL(SNOR_ERASE_FLAGS_MAX - 1, 0)
/**
* struct spi_nor_erase_map - Structure to describe the SPI NOR erase map
@@ -273,17 +267,13 @@ struct spi_nor_erase_region {
* The erase types are sorted in ascending order, with the
* smallest Erase Type size being the first member in the
* erase_type array.
- * @uniform_erase_type: bitmask encoding erase types that can erase the
- * entire memory. This member is completed at init by
- * uniform and non-uniform SPI NOR flash memories if they
- * support at least one erase type that can erase the
- * entire memory.
+ * @n_regions: number of erase regions.
*/
struct spi_nor_erase_map {
struct spi_nor_erase_region *regions;
struct spi_nor_erase_region uniform_region;
struct spi_nor_erase_type erase_type[SNOR_ERASE_TYPE_MAX];
- u8 uniform_erase_type;
+ unsigned int n_regions;
};
/**
@@ -675,8 +665,6 @@ void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
u8 opcode);
void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase);
-struct spi_nor_erase_region *
-spi_nor_region_next(struct spi_nor_erase_region *region);
void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
u8 erase_mask, u64 flash_size);
diff --git a/drivers/mtd/spi-nor/debugfs.c b/drivers/mtd/spi-nor/debugfs.c
index 2dbda6b6938ab..fa6956144d2e4 100644
--- a/drivers/mtd/spi-nor/debugfs.c
+++ b/drivers/mtd/spi-nor/debugfs.c
@@ -78,10 +78,10 @@ static int spi_nor_params_show(struct seq_file *s, void *data)
struct spi_nor *nor = s->private;
struct spi_nor_flash_parameter *params = nor->params;
struct spi_nor_erase_map *erase_map = &params->erase_map;
- struct spi_nor_erase_region *region;
+ struct spi_nor_erase_region *region = erase_map->regions;
const struct flash_info *info = nor->info;
char buf[16], *str;
- int i;
+ unsigned int i;
seq_printf(s, "name\t\t%s\n", info->name);
seq_printf(s, "id\t\t%*ph\n", SPI_NOR_MAX_ID_LEN, nor->id);
@@ -142,22 +142,20 @@ static int spi_nor_params_show(struct seq_file *s, void *data)
}
seq_puts(s, "\nsector map\n");
- seq_puts(s, " region (in hex) | erase mask | flags\n");
+ seq_puts(s, " region (in hex) | erase mask | overlaid\n");
seq_puts(s, " ------------------+------------+----------\n");
- for (region = erase_map->regions;
- region;
- region = spi_nor_region_next(region)) {
- u64 start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
- u64 flags = region->offset & SNOR_ERASE_FLAGS_MASK;
- u64 end = start + region->size - 1;
+ for (i = 0; i < erase_map->n_regions; i++) {
+ u64 start = region[i].offset;
+ u64 end = start + region[i].size - 1;
+ u8 erase_mask = region[i].erase_mask;
seq_printf(s, " %08llx-%08llx | [%c%c%c%c] | %s\n",
start, end,
- flags & BIT(0) ? '0' : ' ',
- flags & BIT(1) ? '1' : ' ',
- flags & BIT(2) ? '2' : ' ',
- flags & BIT(3) ? '3' : ' ',
- flags & SNOR_OVERLAID_REGION ? "overlaid" : "");
+ erase_mask & BIT(0) ? '0' : ' ',
+ erase_mask & BIT(1) ? '1' : ' ',
+ erase_mask & BIT(2) ? '2' : ' ',
+ erase_mask & BIT(3) ? '3' : ' ',
+ region[i].overlaid ? "yes" : "no");
}
return 0;
diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c
index 57713de328327..5b1117265bd28 100644
--- a/drivers/mtd/spi-nor/sfdp.c
+++ b/drivers/mtd/spi-nor/sfdp.c
@@ -389,19 +389,15 @@ static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
{
struct spi_nor_erase_region *region = map->regions;
- u8 region_erase_mask, sorted_erase_mask;
+ u8 sorted_erase_mask;
+ unsigned int i;
- while (region) {
- region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
-
- sorted_erase_mask = spi_nor_sort_erase_mask(map,
- region_erase_mask);
+ for (i = 0; i < map->n_regions; i++) {
+ sorted_erase_mask =
+ spi_nor_sort_erase_mask(map, region[i].erase_mask);
/* Overwrite erase mask. */
- region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
- sorted_erase_mask;
-
- region = spi_nor_region_next(region);
+ region[i].erase_mask = sorted_erase_mask;
}
}
@@ -554,8 +550,6 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
* selecting the uniform erase.
*/
spi_nor_regions_sort_erase_types(map);
- map->uniform_erase_type = map->uniform_region.offset &
- SNOR_ERASE_TYPE_MASK;
/* Stop here if not JESD216 rev A or later. */
if (bfpt_header->length == BFPT_DWORD_MAX_JESD216)
@@ -806,16 +800,6 @@ out:
return ret;
}
-static void spi_nor_region_mark_end(struct spi_nor_erase_region *region)
-{
- region->offset |= SNOR_LAST_REGION;
-}
-
-static void spi_nor_region_mark_overlay(struct spi_nor_erase_region *region)
-{
- region->offset |= SNOR_OVERLAID_REGION;
-}
-
/**
* spi_nor_region_check_overlay() - set overlay bit when the region is overlaid
* @region: pointer to a structure that describes a SPI NOR erase region
@@ -833,7 +817,7 @@ spi_nor_region_check_overlay(struct spi_nor_erase_region *region,
if (!(erase[i].size && erase_type & BIT(erase[i].idx)))
continue;
if (region->size & erase[i].size_mask) {
- spi_nor_region_mark_overlay(region);
+ region->overlaid = true;
return;
}
}
@@ -868,6 +852,7 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
if (!region)
return -ENOMEM;
map->regions = region;
+ map->n_regions = region_count;
uniform_erase_type = 0xff;
regions_erase_type = 0;
@@ -875,9 +860,10 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
/* Populate regions. */
for (i = 0; i < region_count; i++) {
j = i + 1; /* index for the region dword */
+ region[i].offset = offset;
region[i].size = SMPT_MAP_REGION_SIZE(smpt[j]);
erase_type = SMPT_MAP_REGION_ERASE_TYPE(smpt[j]);
- region[i].offset = offset | erase_type;
+ region[i].erase_mask = erase_type;
spi_nor_region_check_overlay(&region[i], erase, erase_type);
@@ -893,21 +879,20 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
*/
regions_erase_type |= erase_type;
- offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
- region[i].size;
+ offset = region[i].offset + region[i].size;
}
- spi_nor_region_mark_end(&region[i - 1]);
- save_uniform_erase_type = map->uniform_erase_type;
- map->uniform_erase_type = spi_nor_sort_erase_mask(map,
- uniform_erase_type);
+ save_uniform_erase_type = map->uniform_region.erase_mask;
+ map->uniform_region.erase_mask =
+ spi_nor_sort_erase_mask(map,
+ uniform_erase_type);
if (!regions_erase_type) {
/*
* Roll back to the previous uniform_erase_type mask, SMPT is
* broken.
*/
- map->uniform_erase_type = save_uniform_erase_type;
+ map->uniform_region.erase_mask = save_uniform_erase_type;
return -EINVAL;
}
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index 211f279a33a96..46c01fa2ec46f 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -295,7 +295,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
if (cis_sector == -1)
return;
- ssfdc = kzalloc(sizeof(struct ssfdcr_record), GFP_KERNEL);
+ ssfdc = kzalloc(sizeof(*ssfdc), GFP_KERNEL);
if (!ssfdc)
return;
@@ -332,7 +332,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
kmalloc_array(ssfdc->map_len,
sizeof(ssfdc->logic_block_map[0]), GFP_KERNEL);
if (!ssfdc->logic_block_map)
- goto out_err;
+ goto out_free_ssfdc;
memset(ssfdc->logic_block_map, 0xff, sizeof(ssfdc->logic_block_map[0]) *
ssfdc->map_len);
@@ -350,7 +350,8 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
out_err:
kfree(ssfdc->logic_block_map);
- kfree(ssfdc);
+out_free_ssfdc:
+ kfree(ssfdc);
}
static void ssfdcr_remove_dev(struct mtd_blktrans_dev *dev)
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index 7499a540121e8..e28a3af83c0eb 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -113,4 +113,17 @@ config MTD_UBI_FAULT_INJECTION
testing purposes.
If in doubt, say "N".
+
+config MTD_UBI_NVMEM
+ tristate "UBI virtual NVMEM"
+ default n
+ depends on NVMEM
+ help
+ This option enabled an additional driver exposing UBI volumes as NVMEM
+ providers, intended for platforms where UBI is part of the firmware
+ specification and used to store also e.g. MAC addresses or board-
+ specific Wi-Fi calibration data.
+
+ If in doubt, say "N".
+
endif # MTD_UBI
diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile
index 543673605ca72..4b51aaf00d1a2 100644
--- a/drivers/mtd/ubi/Makefile
+++ b/drivers/mtd/ubi/Makefile
@@ -7,3 +7,4 @@ ubi-$(CONFIG_MTD_UBI_FASTMAP) += fastmap.o
ubi-$(CONFIG_MTD_UBI_BLOCK) += block.o
obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
+obj-$(CONFIG_MTD_UBI_NVMEM) += nvmem.o
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 5c8fdcc088a0d..f82e3423acb9f 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -65,10 +65,10 @@ struct ubiblock_pdu {
};
/* Numbers of elements set in the @ubiblock_param array */
-static int ubiblock_devs __initdata;
+static int ubiblock_devs;
/* MTD devices specification parameters */
-static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
+static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES];
struct ubiblock {
struct ubi_volume_desc *desc;
@@ -536,6 +536,70 @@ static int ubiblock_resize(struct ubi_volume_info *vi)
return 0;
}
+static bool
+match_volume_desc(struct ubi_volume_info *vi, const char *name, int ubi_num, int vol_id)
+{
+ int err, len, cur_ubi_num, cur_vol_id;
+
+ if (ubi_num == -1) {
+ /* No ubi num, name must be a vol device path */
+ err = ubi_get_num_by_path(name, &cur_ubi_num, &cur_vol_id);
+ if (err || vi->ubi_num != cur_ubi_num || vi->vol_id != cur_vol_id)
+ return false;
+
+ return true;
+ }
+
+ if (vol_id == -1) {
+ /* Got ubi_num, but no vol_id, name must be volume name */
+ if (vi->ubi_num != ubi_num)
+ return false;
+
+ len = strnlen(name, UBI_VOL_NAME_MAX + 1);
+ if (len < 1 || vi->name_len != len)
+ return false;
+
+ if (strcmp(name, vi->name))
+ return false;
+
+ return true;
+ }
+
+ if (vi->ubi_num != ubi_num)
+ return false;
+
+ if (vi->vol_id != vol_id)
+ return false;
+
+ return true;
+}
+
+static void
+ubiblock_create_from_param(struct ubi_volume_info *vi)
+{
+ int i, ret = 0;
+ struct ubiblock_param *p;
+
+ /*
+ * Iterate over ubiblock cmdline parameters. If a parameter matches the
+ * newly added volume create the ubiblock device for it.
+ */
+ for (i = 0; i < ubiblock_devs; i++) {
+ p = &ubiblock_param[i];
+
+ if (!match_volume_desc(vi, p->name, p->ubi_num, p->vol_id))
+ continue;
+
+ ret = ubiblock_create(vi);
+ if (ret) {
+ pr_err(
+ "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
+ vi->name, p->ubi_num, p->vol_id, ret);
+ }
+ break;
+ }
+}
+
static int ubiblock_notify(struct notifier_block *nb,
unsigned long notification_type, void *ns_ptr)
{
@@ -543,10 +607,7 @@ static int ubiblock_notify(struct notifier_block *nb,
switch (notification_type) {
case UBI_VOLUME_ADDED:
- /*
- * We want to enforce explicit block device creation for
- * volumes, so when a volume is added we do nothing.
- */
+ ubiblock_create_from_param(&nt->vi);
break;
case UBI_VOLUME_REMOVED:
ubiblock_remove(&nt->vi);
@@ -572,56 +633,6 @@ static struct notifier_block ubiblock_notifier = {
.notifier_call = ubiblock_notify,
};
-static struct ubi_volume_desc * __init
-open_volume_desc(const char *name, int ubi_num, int vol_id)
-{
- if (ubi_num == -1)
- /* No ubi num, name must be a vol device path */
- return ubi_open_volume_path(name, UBI_READONLY);
- else if (vol_id == -1)
- /* No vol_id, must be vol_name */
- return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
- else
- return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
-}
-
-static void __init ubiblock_create_from_param(void)
-{
- int i, ret = 0;
- struct ubiblock_param *p;
- struct ubi_volume_desc *desc;
- struct ubi_volume_info vi;
-
- /*
- * If there is an error creating one of the ubiblocks, continue on to
- * create the following ubiblocks. This helps in a circumstance where
- * the kernel command-line specifies multiple block devices and some
- * may be broken, but we still want the working ones to come up.
- */
- for (i = 0; i < ubiblock_devs; i++) {
- p = &ubiblock_param[i];
-
- desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
- if (IS_ERR(desc)) {
- pr_err(
- "UBI: block: can't open volume on ubi%d_%d, err=%ld\n",
- p->ubi_num, p->vol_id, PTR_ERR(desc));
- continue;
- }
-
- ubi_get_volume_info(desc, &vi);
- ubi_close_volume(desc);
-
- ret = ubiblock_create(&vi);
- if (ret) {
- pr_err(
- "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
- vi.name, p->ubi_num, p->vol_id, ret);
- continue;
- }
- }
-}
-
static void ubiblock_remove_all(void)
{
struct ubiblock *next;
@@ -647,18 +658,7 @@ int __init ubiblock_init(void)
if (ubiblock_major < 0)
return ubiblock_major;
- /*
- * Attach block devices from 'block=' module param.
- * Even if one block device in the param list fails to come up,
- * still allow the module to load and leave any others up.
- */
- ubiblock_create_from_param();
-
- /*
- * Block devices are only created upon user requests, so we ignore
- * existing volumes.
- */
- ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
+ ret = ubi_register_volume_notifier(&ubiblock_notifier, 0);
if (ret)
goto err_unreg;
return 0;
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 7d4ff1193db6f..a7e3a6246c0e9 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -27,6 +27,7 @@
#include <linux/log2.h>
#include <linux/kthread.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/slab.h>
#include <linux/major.h>
#include "ubi.h"
@@ -92,7 +93,7 @@ static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
/* Serializes UBI devices creations and removals */
DEFINE_MUTEX(ubi_devices_mutex);
-/* Protects @ubi_devices and @ubi->ref_count */
+/* Protects @ubi_devices, @ubi->ref_count and @ubi->is_dead */
static DEFINE_SPINLOCK(ubi_devices_lock);
/* "Show" method for files in '/<sysfs>/class/ubi/' */
@@ -260,6 +261,9 @@ struct ubi_device *ubi_get_device(int ubi_num)
spin_lock(&ubi_devices_lock);
ubi = ubi_devices[ubi_num];
+ if (ubi && ubi->is_dead)
+ ubi = NULL;
+
if (ubi) {
ubi_assert(ubi->ref_count >= 0);
ubi->ref_count += 1;
@@ -297,7 +301,7 @@ struct ubi_device *ubi_get_by_major(int major)
spin_lock(&ubi_devices_lock);
for (i = 0; i < UBI_MAX_DEVICES; i++) {
ubi = ubi_devices[i];
- if (ubi && MAJOR(ubi->cdev.dev) == major) {
+ if (ubi && !ubi->is_dead && MAJOR(ubi->cdev.dev) == major) {
ubi_assert(ubi->ref_count >= 0);
ubi->ref_count += 1;
get_device(&ubi->dev);
@@ -326,7 +330,7 @@ int ubi_major2num(int major)
for (i = 0; i < UBI_MAX_DEVICES; i++) {
struct ubi_device *ubi = ubi_devices[i];
- if (ubi && MAJOR(ubi->cdev.dev) == major) {
+ if (ubi && !ubi->is_dead && MAJOR(ubi->cdev.dev) == major) {
ubi_num = ubi->ubi_num;
break;
}
@@ -513,7 +517,7 @@ static void ubi_free_volumes_from(struct ubi_device *ubi, int from)
int i;
for (i = from; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
- if (!ubi->volumes[i])
+ if (!ubi->volumes[i] || ubi->volumes[i]->is_dead)
continue;
ubi_eba_replace_table(ubi->volumes[i], NULL);
ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
@@ -1098,7 +1102,6 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
return -EINVAL;
spin_lock(&ubi_devices_lock);
- put_device(&ubi->dev);
ubi->ref_count -= 1;
if (ubi->ref_count) {
if (!anyway) {
@@ -1109,6 +1112,13 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
ubi_err(ubi, "%s reference count %d, destroy anyway",
ubi->ubi_name, ubi->ref_count);
}
+ ubi->is_dead = true;
+ spin_unlock(&ubi_devices_lock);
+
+ ubi_notify_all(ubi, UBI_VOLUME_SHUTDOWN, NULL);
+
+ spin_lock(&ubi_devices_lock);
+ put_device(&ubi->dev);
ubi_devices[ubi_num] = NULL;
spin_unlock(&ubi_devices_lock);
@@ -1219,43 +1229,43 @@ static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
return mtd;
}
-static int __init ubi_init(void)
+static void ubi_notify_add(struct mtd_info *mtd)
{
- int err, i, k;
+ struct device_node *np = mtd_get_of_node(mtd);
+ int err;
- /* Ensure that EC and VID headers have correct size */
- BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
- BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
+ if (!of_device_is_compatible(np, "linux,ubi"))
+ return;
- if (mtd_devs > UBI_MAX_DEVICES) {
- pr_err("UBI error: too many MTD devices, maximum is %d\n",
- UBI_MAX_DEVICES);
- return -EINVAL;
- }
+ /*
+ * we are already holding &mtd_table_mutex, but still need
+ * to bump refcount
+ */
+ err = __get_mtd_device(mtd);
+ if (err)
+ return;
- /* Create base sysfs directory and sysfs files */
- err = class_register(&ubi_class);
+ /* called while holding mtd_table_mutex */
+ mutex_lock_nested(&ubi_devices_mutex, SINGLE_DEPTH_NESTING);
+ err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, 0, 0, false, false);
+ mutex_unlock(&ubi_devices_mutex);
if (err < 0)
- return err;
-
- err = misc_register(&ubi_ctrl_cdev);
- if (err) {
- pr_err("UBI error: cannot register device\n");
- goto out;
- }
+ __put_mtd_device(mtd);
+}
- ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
- sizeof(struct ubi_wl_entry),
- 0, 0, NULL);
- if (!ubi_wl_entry_slab) {
- err = -ENOMEM;
- goto out_dev_unreg;
- }
+static void ubi_notify_remove(struct mtd_info *mtd)
+{
+ /* do nothing for now */
+}
- err = ubi_debugfs_init();
- if (err)
- goto out_slab;
+static struct mtd_notifier ubi_mtd_notifier = {
+ .add = ubi_notify_add,
+ .remove = ubi_notify_remove,
+};
+static int __init ubi_init_attach(void)
+{
+ int err, i, k;
/* Attach MTD devices */
for (i = 0; i < mtd_devs; i++) {
@@ -1304,25 +1314,79 @@ static int __init ubi_init(void)
}
}
+ return 0;
+
+out_detach:
+ for (k = 0; k < i; k++)
+ if (ubi_devices[k]) {
+ mutex_lock(&ubi_devices_mutex);
+ ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
+ mutex_unlock(&ubi_devices_mutex);
+ }
+ return err;
+}
+#ifndef CONFIG_MTD_UBI_MODULE
+late_initcall(ubi_init_attach);
+#endif
+
+static int __init ubi_init(void)
+{
+ int err;
+
+ /* Ensure that EC and VID headers have correct size */
+ BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
+ BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
+
+ if (mtd_devs > UBI_MAX_DEVICES) {
+ pr_err("UBI error: too many MTD devices, maximum is %d\n",
+ UBI_MAX_DEVICES);
+ return -EINVAL;
+ }
+
+ /* Create base sysfs directory and sysfs files */
+ err = class_register(&ubi_class);
+ if (err < 0)
+ return err;
+
+ err = misc_register(&ubi_ctrl_cdev);
+ if (err) {
+ pr_err("UBI error: cannot register device\n");
+ goto out;
+ }
+
+ ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
+ sizeof(struct ubi_wl_entry),
+ 0, 0, NULL);
+ if (!ubi_wl_entry_slab) {
+ err = -ENOMEM;
+ goto out_dev_unreg;
+ }
+
+ err = ubi_debugfs_init();
+ if (err)
+ goto out_slab;
+
err = ubiblock_init();
if (err) {
pr_err("UBI error: block: cannot initialize, error %d\n", err);
/* See comment above re-ubi_is_module(). */
if (ubi_is_module())
- goto out_detach;
+ goto out_slab;
+ }
+
+ register_mtd_user(&ubi_mtd_notifier);
+
+ if (ubi_is_module()) {
+ err = ubi_init_attach();
+ if (err)
+ goto out_mtd_notifier;
}
return 0;
-out_detach:
- for (k = 0; k < i; k++)
- if (ubi_devices[k]) {
- mutex_lock(&ubi_devices_mutex);
- ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
- mutex_unlock(&ubi_devices_mutex);
- }
- ubi_debugfs_exit();
+out_mtd_notifier:
+ unregister_mtd_user(&ubi_mtd_notifier);
out_slab:
kmem_cache_destroy(ubi_wl_entry_slab);
out_dev_unreg:
@@ -1332,13 +1396,15 @@ out:
pr_err("UBI error: cannot initialize UBI, error %d\n", err);
return err;
}
-late_initcall(ubi_init);
+device_initcall(ubi_init);
+
static void __exit ubi_exit(void)
{
int i;
ubiblock_exit();
+ unregister_mtd_user(&ubi_mtd_notifier);
for (i = 0; i < UBI_MAX_DEVICES; i++)
if (ubi_devices[i]) {
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 8d1f0e05892c1..e5ac3cd0bbae6 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -1456,7 +1456,14 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
}
ubi_assert(vol->eba_tbl->entries[lnum].pnum == from);
+
+ /**
+ * The volumes_lock lock is needed here to prevent the expired old eba_tbl
+ * being updated when the eba_tbl is copied in the ubi_resize_volume() process.
+ */
+ spin_lock(&ubi->volumes_lock);
vol->eba_tbl->entries[lnum].pnum = to;
+ spin_unlock(&ubi->volumes_lock);
out_unlock_buf:
mutex_unlock(&ubi->buf_mutex);
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 2a728c31e6b85..9a4940874be5b 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -85,9 +85,10 @@ size_t ubi_calc_fm_size(struct ubi_device *ubi)
sizeof(struct ubi_fm_scan_pool) +
sizeof(struct ubi_fm_scan_pool) +
(ubi->peb_count * sizeof(struct ubi_fm_ec)) +
- (sizeof(struct ubi_fm_eba) +
- (ubi->peb_count * sizeof(__be32))) +
- sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
+ ((sizeof(struct ubi_fm_eba) +
+ sizeof(struct ubi_fm_volhdr)) *
+ (UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT)) +
+ (ubi->peb_count * sizeof(__be32));
return roundup(size, ubi->leb_size);
}
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 5db653eacbd45..f1ea8677467fb 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -152,7 +152,7 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
spin_lock(&ubi->volumes_lock);
vol = ubi->volumes[vol_id];
- if (!vol)
+ if (!vol || vol->is_dead)
goto out_unlock;
err = -EBUSY;
@@ -280,6 +280,41 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
/**
+ * ubi_get_num_by_path - get UBI device and volume number from device path
+ * @pathname: volume character device node path
+ * @ubi_num: pointer to UBI device number to be set
+ * @vol_id: pointer to UBI volume ID to be set
+ *
+ * Returns 0 on success and sets ubi_num and vol_id, returns error otherwise.
+ */
+int ubi_get_num_by_path(const char *pathname, int *ubi_num, int *vol_id)
+{
+ int error;
+ struct path path;
+ struct kstat stat;
+
+ error = kern_path(pathname, LOOKUP_FOLLOW, &path);
+ if (error)
+ return error;
+
+ error = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
+ path_put(&path);
+ if (error)
+ return error;
+
+ if (!S_ISCHR(stat.mode))
+ return -EINVAL;
+
+ *ubi_num = ubi_major2num(MAJOR(stat.rdev));
+ *vol_id = MINOR(stat.rdev) - 1;
+
+ if (*vol_id < 0 || *ubi_num < 0)
+ return -ENODEV;
+
+ return 0;
+}
+
+/**
* ubi_open_volume_path - open UBI volume by its character device node path.
* @pathname: volume character device node path
* @mode: open mode
@@ -290,32 +325,17 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
{
int error, ubi_num, vol_id;
- struct path path;
- struct kstat stat;
dbg_gen("open volume %s, mode %d", pathname, mode);
if (!pathname || !*pathname)
return ERR_PTR(-EINVAL);
- error = kern_path(pathname, LOOKUP_FOLLOW, &path);
- if (error)
- return ERR_PTR(error);
-
- error = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
- path_put(&path);
+ error = ubi_get_num_by_path(pathname, &ubi_num, &vol_id);
if (error)
return ERR_PTR(error);
- if (!S_ISCHR(stat.mode))
- return ERR_PTR(-EINVAL);
-
- ubi_num = ubi_major2num(MAJOR(stat.rdev));
- vol_id = MINOR(stat.rdev) - 1;
-
- if (vol_id >= 0 && ubi_num >= 0)
- return ubi_open_volume(ubi_num, vol_id, mode);
- return ERR_PTR(-ENODEV);
+ return ubi_open_volume(ubi_num, vol_id, mode);
}
EXPORT_SYMBOL_GPL(ubi_open_volume_path);
diff --git a/drivers/mtd/ubi/nvmem.c b/drivers/mtd/ubi/nvmem.c
new file mode 100644
index 0000000000000..8aeb9c428e510
--- /dev/null
+++ b/drivers/mtd/ubi/nvmem.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 Daniel Golle <daniel@makrotopia.org>
+ */
+
+/* UBI NVMEM provider */
+#include "ubi.h"
+#include <linux/nvmem-provider.h>
+#include <asm/div64.h>
+
+/* List of all NVMEM devices */
+static LIST_HEAD(nvmem_devices);
+static DEFINE_MUTEX(devices_mutex);
+
+struct ubi_nvmem {
+ struct nvmem_device *nvmem;
+ int ubi_num;
+ int vol_id;
+ int usable_leb_size;
+ struct list_head list;
+};
+
+static int ubi_nvmem_reg_read(void *priv, unsigned int from,
+ void *val, size_t bytes)
+{
+ size_t to_read, bytes_left = bytes;
+ struct ubi_nvmem *unv = priv;
+ struct ubi_volume_desc *desc;
+ uint32_t offs;
+ uint64_t lnum = from;
+ int err = 0;
+
+ desc = ubi_open_volume(unv->ubi_num, unv->vol_id, UBI_READONLY);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ offs = do_div(lnum, unv->usable_leb_size);
+ while (bytes_left) {
+ to_read = unv->usable_leb_size - offs;
+
+ if (to_read > bytes_left)
+ to_read = bytes_left;
+
+ err = ubi_read(desc, lnum, val, offs, to_read);
+ if (err)
+ break;
+
+ lnum += 1;
+ offs = 0;
+ bytes_left -= to_read;
+ val += to_read;
+ }
+ ubi_close_volume(desc);
+
+ if (err)
+ return err;
+
+ return bytes_left == 0 ? 0 : -EIO;
+}
+
+static int ubi_nvmem_add(struct ubi_volume_info *vi)
+{
+ struct device_node *np = dev_of_node(vi->dev);
+ struct nvmem_config config = {};
+ struct ubi_nvmem *unv;
+ int ret;
+
+ if (!np)
+ return 0;
+
+ if (!of_get_child_by_name(np, "nvmem-layout"))
+ return 0;
+
+ if (WARN_ON_ONCE(vi->usable_leb_size <= 0) ||
+ WARN_ON_ONCE(vi->size <= 0))
+ return -EINVAL;
+
+ unv = kzalloc(sizeof(struct ubi_nvmem), GFP_KERNEL);
+ if (!unv)
+ return -ENOMEM;
+
+ config.id = NVMEM_DEVID_NONE;
+ config.dev = vi->dev;
+ config.name = dev_name(vi->dev);
+ config.owner = THIS_MODULE;
+ config.priv = unv;
+ config.reg_read = ubi_nvmem_reg_read;
+ config.size = vi->usable_leb_size * vi->size;
+ config.word_size = 1;
+ config.stride = 1;
+ config.read_only = true;
+ config.root_only = true;
+ config.ignore_wp = true;
+ config.of_node = np;
+
+ unv->ubi_num = vi->ubi_num;
+ unv->vol_id = vi->vol_id;
+ unv->usable_leb_size = vi->usable_leb_size;
+ unv->nvmem = nvmem_register(&config);
+ if (IS_ERR(unv->nvmem)) {
+ ret = dev_err_probe(vi->dev, PTR_ERR(unv->nvmem),
+ "Failed to register NVMEM device\n");
+ kfree(unv);
+ return ret;
+ }
+
+ mutex_lock(&devices_mutex);
+ list_add_tail(&unv->list, &nvmem_devices);
+ mutex_unlock(&devices_mutex);
+
+ return 0;
+}
+
+static void ubi_nvmem_remove(struct ubi_volume_info *vi)
+{
+ struct ubi_nvmem *unv_c, *unv = NULL;
+
+ mutex_lock(&devices_mutex);
+ list_for_each_entry(unv_c, &nvmem_devices, list)
+ if (unv_c->ubi_num == vi->ubi_num && unv_c->vol_id == vi->vol_id) {
+ unv = unv_c;
+ break;
+ }
+
+ if (!unv) {
+ mutex_unlock(&devices_mutex);
+ return;
+ }
+
+ list_del(&unv->list);
+ mutex_unlock(&devices_mutex);
+ nvmem_unregister(unv->nvmem);
+ kfree(unv);
+}
+
+/**
+ * nvmem_notify - UBI notification handler.
+ * @nb: registered notifier block
+ * @l: notification type
+ * @ns_ptr: pointer to the &struct ubi_notification object
+ */
+static int nvmem_notify(struct notifier_block *nb, unsigned long l,
+ void *ns_ptr)
+{
+ struct ubi_notification *nt = ns_ptr;
+
+ switch (l) {
+ case UBI_VOLUME_RESIZED:
+ ubi_nvmem_remove(&nt->vi);
+ fallthrough;
+ case UBI_VOLUME_ADDED:
+ ubi_nvmem_add(&nt->vi);
+ break;
+ case UBI_VOLUME_SHUTDOWN:
+ ubi_nvmem_remove(&nt->vi);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block nvmem_notifier = {
+ .notifier_call = nvmem_notify,
+};
+
+static int __init ubi_nvmem_init(void)
+{
+ return ubi_register_volume_notifier(&nvmem_notifier, 0);
+}
+
+static void __exit ubi_nvmem_exit(void)
+{
+ struct ubi_nvmem *unv, *tmp;
+
+ mutex_lock(&devices_mutex);
+ list_for_each_entry_safe(unv, tmp, &nvmem_devices, list) {
+ nvmem_unregister(unv->nvmem);
+ list_del(&unv->list);
+ kfree(unv);
+ }
+ mutex_unlock(&devices_mutex);
+
+ ubi_unregister_volume_notifier(&nvmem_notifier);
+}
+
+module_init(ubi_nvmem_init);
+module_exit(ubi_nvmem_exit);
+MODULE_DESCRIPTION("NVMEM layer over UBI volumes");
+MODULE_AUTHOR("Daniel Golle");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 0b42bb45dd840..32009a24869e7 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -337,6 +337,7 @@ struct ubi_volume {
int writers;
int exclusive;
int metaonly;
+ bool is_dead;
int reserved_pebs;
int vol_type;
@@ -561,6 +562,7 @@ struct ubi_device {
spinlock_t volumes_lock;
int ref_count;
int image_seq;
+ bool is_dead;
int rsvd_pebs;
int avail_pebs;
@@ -955,6 +957,7 @@ void ubi_free_internal_volumes(struct ubi_device *ubi);
void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di);
void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
struct ubi_volume_info *vi);
+int ubi_get_num_by_path(const char *pathname, int *ubi_num, int *vol_id);
/* scan.c */
int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
int pnum, const struct ubi_vid_hdr *vid_hdr);
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 2c867d16f89f7..5a3558bbb9035 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -59,7 +59,7 @@ static ssize_t vol_attribute_show(struct device *dev,
struct ubi_device *ubi = vol->ubi;
spin_lock(&ubi->volumes_lock);
- if (!ubi->volumes[vol->vol_id]) {
+ if (!ubi->volumes[vol->vol_id] || ubi->volumes[vol->vol_id]->is_dead) {
spin_unlock(&ubi->volumes_lock);
return -ENODEV;
}
@@ -124,6 +124,31 @@ static void vol_release(struct device *dev)
kfree(vol);
}
+static struct fwnode_handle *find_volume_fwnode(struct ubi_volume *vol)
+{
+ struct fwnode_handle *fw_vols, *fw_vol;
+ const char *volname;
+ u32 volid;
+
+ fw_vols = device_get_named_child_node(vol->dev.parent->parent, "volumes");
+ if (!fw_vols)
+ return NULL;
+
+ fwnode_for_each_child_node(fw_vols, fw_vol) {
+ if (!fwnode_property_read_string(fw_vol, "volname", &volname) &&
+ strncmp(volname, vol->name, vol->name_len))
+ continue;
+
+ if (!fwnode_property_read_u32(fw_vol, "volid", &volid) &&
+ vol->vol_id != volid)
+ continue;
+
+ return fw_vol;
+ }
+
+ return NULL;
+}
+
/**
* ubi_create_volume - create volume.
* @ubi: UBI device description object
@@ -189,7 +214,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
/* Ensure that the name is unique */
for (i = 0; i < ubi->vtbl_slots; i++)
- if (ubi->volumes[i] &&
+ if (ubi->volumes[i] && !ubi->volumes[i]->is_dead &&
ubi->volumes[i]->name_len == req->name_len &&
!strcmp(ubi->volumes[i]->name, req->name)) {
ubi_err(ubi, "volume \"%s\" exists (ID %d)",
@@ -223,6 +248,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
vol->name_len = req->name_len;
memcpy(vol->name, req->name, vol->name_len);
vol->ubi = ubi;
+ device_set_node(&vol->dev, find_volume_fwnode(vol));
/*
* Finish all pending erases because there may be some LEBs belonging
@@ -352,6 +378,19 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
err = -EBUSY;
goto out_unlock;
}
+
+ /*
+ * Mark volume as dead at this point to prevent that anyone
+ * can take a reference to the volume from now on.
+ * This is necessary as we have to release the spinlock before
+ * calling ubi_volume_notify.
+ */
+ vol->is_dead = true;
+ spin_unlock(&ubi->volumes_lock);
+
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_SHUTDOWN);
+
+ spin_lock(&ubi->volumes_lock);
ubi->volumes[vol_id] = NULL;
spin_unlock(&ubi->volumes_lock);
@@ -408,6 +447,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
struct ubi_device *ubi = vol->ubi;
struct ubi_vtbl_record vtbl_rec;
struct ubi_eba_table *new_eba_tbl = NULL;
+ struct ubi_eba_table *old_eba_tbl = NULL;
int vol_id = vol->vol_id;
if (ubi->ro_mode)
@@ -453,10 +493,13 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
err = -ENOSPC;
goto out_free;
}
+
ubi->avail_pebs -= pebs;
ubi->rsvd_pebs += pebs;
ubi_eba_copy_table(vol, new_eba_tbl, vol->reserved_pebs);
- ubi_eba_replace_table(vol, new_eba_tbl);
+ old_eba_tbl = vol->eba_tbl;
+ vol->eba_tbl = new_eba_tbl;
+ vol->reserved_pebs = reserved_pebs;
spin_unlock(&ubi->volumes_lock);
}
@@ -471,7 +514,9 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
ubi->avail_pebs -= pebs;
ubi_update_reserved(ubi);
ubi_eba_copy_table(vol, new_eba_tbl, reserved_pebs);
- ubi_eba_replace_table(vol, new_eba_tbl);
+ old_eba_tbl = vol->eba_tbl;
+ vol->eba_tbl = new_eba_tbl;
+ vol->reserved_pebs = reserved_pebs;
spin_unlock(&ubi->volumes_lock);
}
@@ -493,7 +538,6 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
if (err)
goto out_acc;
- vol->reserved_pebs = reserved_pebs;
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
vol->used_ebs = reserved_pebs;
vol->last_eb_bytes = vol->usable_leb_size;
@@ -501,19 +545,23 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
(long long)vol->used_ebs * vol->usable_leb_size;
}
+ /* destroy old table */
+ ubi_eba_destroy_table(old_eba_tbl);
ubi_volume_notify(ubi, vol, UBI_VOLUME_RESIZED);
self_check_volumes(ubi);
return err;
out_acc:
- if (pebs > 0) {
- spin_lock(&ubi->volumes_lock);
- ubi->rsvd_pebs -= pebs;
- ubi->avail_pebs += pebs;
- spin_unlock(&ubi->volumes_lock);
- }
- return err;
-
+ spin_lock(&ubi->volumes_lock);
+ vol->reserved_pebs = reserved_pebs - pebs;
+ ubi->rsvd_pebs -= pebs;
+ ubi->avail_pebs += pebs;
+ if (pebs > 0)
+ ubi_eba_copy_table(vol, old_eba_tbl, vol->reserved_pebs);
+ else
+ ubi_eba_copy_table(vol, old_eba_tbl, reserved_pebs);
+ vol->eba_tbl = old_eba_tbl;
+ spin_unlock(&ubi->volumes_lock);
out_free:
ubi_eba_destroy_table(new_eba_tbl);
return err;
@@ -592,6 +640,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
vol->dev.class = &ubi_class;
vol->dev.groups = volume_dev_groups;
dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
+ device_set_node(&vol->dev, find_volume_fwnode(vol));
err = device_register(&vol->dev);
if (err) {
cdev_del(&vol->cdev);
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index f700f0e4f2ec4..6e5489e233dd2 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -791,6 +791,12 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
* The number of supported volumes is limited by the eraseblock size
* and by the UBI_MAX_VOLUMES constant.
*/
+
+ if (ubi->leb_size < UBI_VTBL_RECORD_SIZE) {
+ ubi_err(ubi, "LEB size too small for a volume record");
+ return -EINVAL;
+ }
+
ubi->vtbl_slots = ubi->leb_size / UBI_VTBL_RECORD_SIZE;
if (ubi->vtbl_slots > UBI_MAX_VOLUMES)
ubi->vtbl_slots = UBI_MAX_VOLUMES;
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index f81b598147b30..7b5028b67cd5c 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -370,8 +370,8 @@ static const struct kvaser_pciefd_irq_mask kvaser_pciefd_sf2_irq_mask = {
static const struct kvaser_pciefd_irq_mask kvaser_pciefd_xilinx_irq_mask = {
.kcan_rx0 = BIT(4),
- .kcan_tx = { BIT(16), BIT(17), BIT(18), BIT(19) },
- .all = GENMASK(19, 16) | BIT(4),
+ .kcan_tx = { BIT(16), BIT(17), BIT(18), BIT(19), BIT(20), BIT(21), BIT(22), BIT(23) },
+ .all = GENMASK(23, 16) | BIT(4),
};
static const struct kvaser_pciefd_dev_ops kvaser_pciefd_altera_dev_ops = {
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 678b51f9cea61..8090390edaf9d 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -950,20 +950,217 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
mutex_unlock(&priv->reg_mutex);
}
+/* In Clause 5 of IEEE Std 802-2014, two sublayers of the data link layer (DLL)
+ * of the Open Systems Interconnection basic reference model (OSI/RM) are
+ * described; the medium access control (MAC) and logical link control (LLC)
+ * sublayers. The MAC sublayer is the one facing the physical layer.
+ *
+ * In 8.2 of IEEE Std 802.1Q-2022, the Bridge architecture is described. A
+ * Bridge component comprises a MAC Relay Entity for interconnecting the Ports
+ * of the Bridge, at least two Ports, and higher layer entities with at least a
+ * Spanning Tree Protocol Entity included.
+ *
+ * Each Bridge Port also functions as an end station and shall provide the MAC
+ * Service to an LLC Entity. Each instance of the MAC Service is provided to a
+ * distinct LLC Entity that supports protocol identification, multiplexing, and
+ * demultiplexing, for protocol data unit (PDU) transmission and reception by
+ * one or more higher layer entities.
+ *
+ * It is described in 8.13.9 of IEEE Std 802.1Q-2022 that in a Bridge, the LLC
+ * Entity associated with each Bridge Port is modeled as being directly
+ * connected to the attached Local Area Network (LAN).
+ *
+ * On the switch with CPU port architecture, CPU port functions as Management
+ * Port, and the Management Port functionality is provided by software which
+ * functions as an end station. Software is connected to an IEEE 802 LAN that is
+ * wholly contained within the system that incorporates the Bridge. Software
+ * provides access to the LLC Entity associated with each Bridge Port by the
+ * value of the source port field on the special tag on the frame received by
+ * software.
+ *
+ * We call frames that carry control information to determine the active
+ * topology and current extent of each Virtual Local Area Network (VLAN), i.e.,
+ * spanning tree or Shortest Path Bridging (SPB) and Multiple VLAN Registration
+ * Protocol Data Units (MVRPDUs), and frames from other link constrained
+ * protocols, such as Extensible Authentication Protocol over LAN (EAPOL) and
+ * Link Layer Discovery Protocol (LLDP), link-local frames. They are not
+ * forwarded by a Bridge. Permanently configured entries in the filtering
+ * database (FDB) ensure that such frames are discarded by the Forwarding
+ * Process. In 8.6.3 of IEEE Std 802.1Q-2022, this is described in detail:
+ *
+ * Each of the reserved MAC addresses specified in Table 8-1
+ * (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]) shall be
+ * permanently configured in the FDB in C-VLAN components and ERs.
+ *
+ * Each of the reserved MAC addresses specified in Table 8-2
+ * (01-80-C2-00-00-[01,02,03,04,05,06,07,08,09,0A,0E]) shall be permanently
+ * configured in the FDB in S-VLAN components.
+ *
+ * Each of the reserved MAC addresses specified in Table 8-3
+ * (01-80-C2-00-00-[01,02,04,0E]) shall be permanently configured in the FDB in
+ * TPMR components.
+ *
+ * The FDB entries for reserved MAC addresses shall specify filtering for all
+ * Bridge Ports and all VIDs. Management shall not provide the capability to
+ * modify or remove entries for reserved MAC addresses.
+ *
+ * The addresses in Table 8-1, Table 8-2, and Table 8-3 determine the scope of
+ * propagation of PDUs within a Bridged Network, as follows:
+ *
+ * The Nearest Bridge group address (01-80-C2-00-00-0E) is an address that no
+ * conformant Two-Port MAC Relay (TPMR) component, Service VLAN (S-VLAN)
+ * component, Customer VLAN (C-VLAN) component, or MAC Bridge can forward.
+ * PDUs transmitted using this destination address, or any other addresses
+ * that appear in Table 8-1, Table 8-2, and Table 8-3
+ * (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]), can
+ * therefore travel no further than those stations that can be reached via a
+ * single individual LAN from the originating station.
+ *
+ * The Nearest non-TPMR Bridge group address (01-80-C2-00-00-03), is an
+ * address that no conformant S-VLAN component, C-VLAN component, or MAC
+ * Bridge can forward; however, this address is relayed by a TPMR component.
+ * PDUs using this destination address, or any of the other addresses that
+ * appear in both Table 8-1 and Table 8-2 but not in Table 8-3
+ * (01-80-C2-00-00-[00,03,05,06,07,08,09,0A,0B,0C,0D,0F]), will be relayed by
+ * any TPMRs but will propagate no further than the nearest S-VLAN component,
+ * C-VLAN component, or MAC Bridge.
+ *
+ * The Nearest Customer Bridge group address (01-80-C2-00-00-00) is an address
+ * that no conformant C-VLAN component, MAC Bridge can forward; however, it is
+ * relayed by TPMR components and S-VLAN components. PDUs using this
+ * destination address, or any of the other addresses that appear in Table 8-1
+ * but not in either Table 8-2 or Table 8-3 (01-80-C2-00-00-[00,0B,0C,0D,0F]),
+ * will be relayed by TPMR components and S-VLAN components but will propagate
+ * no further than the nearest C-VLAN component or MAC Bridge.
+ *
+ * Because the LLC Entity associated with each Bridge Port is provided via CPU
+ * port, we must not filter these frames but forward them to CPU port.
+ *
+ * In a Bridge, the transmission Port is majorly decided by ingress and egress
+ * rules, FDB, and spanning tree Port State functions of the Forwarding Process.
+ * For link-local frames, only CPU port should be designated as destination port
+ * in the FDB, and the other functions of the Forwarding Process must not
+ * interfere with the decision of the transmission Port. We call this process
+ * trapping frames to CPU port.
+ *
+ * Therefore, on the switch with CPU port architecture, link-local frames must
+ * be trapped to CPU port, and certain link-local frames received by a Port of a
+ * Bridge comprising a TPMR component or an S-VLAN component must be excluded
+ * from it.
+ *
+ * A Bridge of the switch with CPU port architecture cannot comprise a Two-Port
+ * MAC Relay (TPMR) component as a TPMR component supports only a subset of the
+ * functionality of a MAC Bridge. A Bridge comprising two Ports (Management Port
+ * doesn't count) of this architecture will either function as a standard MAC
+ * Bridge or a standard VLAN Bridge.
+ *
+ * Therefore, a Bridge of this architecture can only comprise S-VLAN components,
+ * C-VLAN components, or MAC Bridge components. Since there's no TPMR component,
+ * we don't need to relay PDUs using the destination addresses specified on the
+ * Nearest non-TPMR section, and the proportion of the Nearest Customer Bridge
+ * section where they must be relayed by TPMR components.
+ *
+ * One option to trap link-local frames to CPU port is to add static FDB entries
+ * with CPU port designated as destination port. However, because that
+ * Independent VLAN Learning (IVL) is being used on every VID, each entry only
+ * applies to a single VLAN Identifier (VID). For a Bridge comprising a MAC
+ * Bridge component or a C-VLAN component, there would have to be 16 times 4096
+ * entries. This switch intellectual property can only hold a maximum of 2048
+ * entries. Using this option, there also isn't a mechanism to prevent
+ * link-local frames from being discarded when the spanning tree Port State of
+ * the reception Port is discarding.
+ *
+ * The remaining option is to utilise the BPC, RGAC1, RGAC2, RGAC3, and RGAC4
+ * registers. Whilst this applies to every VID, it doesn't contain all of the
+ * reserved MAC addresses without affecting the remaining Standard Group MAC
+ * Addresses. The REV_UN frame tag utilised using the RGAC4 register covers the
+ * remaining 01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F] destination
+ * addresses. It also includes the 01-80-C2-00-00-22 to 01-80-C2-00-00-FF
+ * destination addresses which may be relayed by MAC Bridges or VLAN Bridges.
+ * The latter option provides better but not complete conformance.
+ *
+ * This switch intellectual property also does not provide a mechanism to trap
+ * link-local frames with specific destination addresses to CPU port by Bridge,
+ * to conform to the filtering rules for the distinct Bridge components.
+ *
+ * Therefore, regardless of the type of the Bridge component, link-local frames
+ * with these destination addresses will be trapped to CPU port:
+ *
+ * 01-80-C2-00-00-[00,01,02,03,0E]
+ *
+ * In a Bridge comprising a MAC Bridge component or a C-VLAN component:
+ *
+ * Link-local frames with these destination addresses won't be trapped to CPU
+ * port which won't conform to IEEE Std 802.1Q-2022:
+ *
+ * 01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F]
+ *
+ * In a Bridge comprising an S-VLAN component:
+ *
+ * Link-local frames with these destination addresses will be trapped to CPU
+ * port which won't conform to IEEE Std 802.1Q-2022:
+ *
+ * 01-80-C2-00-00-00
+ *
+ * Link-local frames with these destination addresses won't be trapped to CPU
+ * port which won't conform to IEEE Std 802.1Q-2022:
+ *
+ * 01-80-C2-00-00-[04,05,06,07,08,09,0A]
+ *
+ * To trap link-local frames to CPU port as conformant as this switch
+ * intellectual property can allow, link-local frames are made to be regarded as
+ * Bridge Protocol Data Units (BPDUs). This is because this switch intellectual
+ * property only lets the frames regarded as BPDUs bypass the spanning tree Port
+ * State function of the Forwarding Process.
+ *
+ * The only remaining interference is the ingress rules. When the reception Port
+ * has no PVID assigned on software, VLAN-untagged frames won't be allowed in.
+ * There doesn't seem to be a mechanism on the switch intellectual property to
+ * have link-local frames bypass this function of the Forwarding Process.
+ */
static void
mt753x_trap_frames(struct mt7530_priv *priv)
{
- /* Trap BPDUs to the CPU port(s) */
- mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
- MT753X_BPDU_CPU_ONLY);
-
- /* Trap 802.1X PAE frames to the CPU port(s) */
- mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_PORT_FW_MASK,
- MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY));
-
- /* Trap LLDP frames with :0E MAC DA to the CPU port(s) */
- mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_PORT_FW_MASK,
- MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY));
+ /* Trap 802.1X PAE frames and BPDUs to the CPU port(s) and egress them
+ * VLAN-untagged.
+ */
+ mt7530_rmw(priv, MT753X_BPC,
+ MT753X_PAE_BPDU_FR | MT753X_PAE_EG_TAG_MASK |
+ MT753X_PAE_PORT_FW_MASK | MT753X_BPDU_EG_TAG_MASK |
+ MT753X_BPDU_PORT_FW_MASK,
+ MT753X_PAE_BPDU_FR |
+ MT753X_PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY) |
+ MT753X_BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_BPDU_CPU_ONLY);
+
+ /* Trap frames with :01 and :02 MAC DAs to the CPU port(s) and egress
+ * them VLAN-untagged.
+ */
+ mt7530_rmw(priv, MT753X_RGAC1,
+ MT753X_R02_BPDU_FR | MT753X_R02_EG_TAG_MASK |
+ MT753X_R02_PORT_FW_MASK | MT753X_R01_BPDU_FR |
+ MT753X_R01_EG_TAG_MASK | MT753X_R01_PORT_FW_MASK,
+ MT753X_R02_BPDU_FR |
+ MT753X_R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_R02_PORT_FW(MT753X_BPDU_CPU_ONLY) |
+ MT753X_R01_BPDU_FR |
+ MT753X_R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_BPDU_CPU_ONLY);
+
+ /* Trap frames with :03 and :0E MAC DAs to the CPU port(s) and egress
+ * them VLAN-untagged.
+ */
+ mt7530_rmw(priv, MT753X_RGAC2,
+ MT753X_R0E_BPDU_FR | MT753X_R0E_EG_TAG_MASK |
+ MT753X_R0E_PORT_FW_MASK | MT753X_R03_BPDU_FR |
+ MT753X_R03_EG_TAG_MASK | MT753X_R03_PORT_FW_MASK,
+ MT753X_R0E_BPDU_FR |
+ MT753X_R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY) |
+ MT753X_R03_BPDU_FR |
+ MT753X_R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_BPDU_CPU_ONLY);
}
static void
@@ -1686,14 +1883,16 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
static int mt753x_mirror_port_get(unsigned int id, u32 val)
{
- return (id == ID_MT7531) ? MT7531_MIRROR_PORT_GET(val) :
- MIRROR_PORT(val);
+ return (id == ID_MT7531 || id == ID_MT7988) ?
+ MT7531_MIRROR_PORT_GET(val) :
+ MIRROR_PORT(val);
}
static int mt753x_mirror_port_set(unsigned int id, u32 val)
{
- return (id == ID_MT7531) ? MT7531_MIRROR_PORT_SET(val) :
- MIRROR_PORT(val);
+ return (id == ID_MT7531 || id == ID_MT7988) ?
+ MT7531_MIRROR_PORT_SET(val) :
+ MIRROR_PORT(val);
}
static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
@@ -2192,22 +2391,16 @@ mt7530_setup(struct dsa_switch *ds)
}
}
- /* Disable LEDs before reset to prevent the MT7530 sampling a
- * potentially incorrect HT_XTAL_FSEL value.
- */
- mt7530_write(priv, MT7530_LED_EN, 0);
- usleep_range(1000, 1100);
-
/* Reset whole chip through gpio pin or memory-mapped registers for
* different type of hardware
*/
if (priv->mcm) {
reset_control_assert(priv->rstc);
- usleep_range(1000, 1100);
+ usleep_range(5000, 5100);
reset_control_deassert(priv->rstc);
} else {
gpiod_set_value_cansleep(priv->reset, 0);
- usleep_range(1000, 1100);
+ usleep_range(5000, 5100);
gpiod_set_value_cansleep(priv->reset, 1);
}
@@ -2238,8 +2431,6 @@ mt7530_setup(struct dsa_switch *ds)
SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
SYS_CTRL_REG_RST);
- mt7530_pll_setup(priv);
-
/* Lower Tx driving for TRGMII path */
for (i = 0; i < NUM_TRGMII_CTRL; i++)
mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
@@ -2255,6 +2446,9 @@ mt7530_setup(struct dsa_switch *ds)
val |= MHWTRAP_MANUAL;
mt7530_write(priv, MT7530_MHWTRAP, val);
+ if ((val & HWTRAP_XTAL_MASK) == HWTRAP_XTAL_40MHZ)
+ mt7530_pll_setup(priv);
+
mt753x_trap_frames(priv);
/* Enable and reset MIB counters */
@@ -2288,6 +2482,9 @@ mt7530_setup(struct dsa_switch *ds)
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
+ /* Allow mirroring frames received on the local port (monitor port). */
+ mt7530_set(priv, MT753X_AGC, LOCAL_EN);
+
/* Setup VLAN ID 0 for VLAN-unaware bridges */
ret = mt7530_setup_vlan0(priv);
if (ret)
@@ -2399,6 +2596,9 @@ mt7531_setup_common(struct dsa_switch *ds)
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
+ /* Allow mirroring frames received on the local port (monitor port). */
+ mt7530_set(priv, MT753X_AGC, LOCAL_EN);
+
/* Flush the FDB table */
ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
if (ret < 0)
@@ -2420,11 +2620,11 @@ mt7531_setup(struct dsa_switch *ds)
*/
if (priv->mcm) {
reset_control_assert(priv->rstc);
- usleep_range(1000, 1100);
+ usleep_range(5000, 5100);
reset_control_deassert(priv->rstc);
} else {
gpiod_set_value_cansleep(priv->reset, 0);
- usleep_range(1000, 1100);
+ usleep_range(5000, 5100);
gpiod_set_value_cansleep(priv->reset, 1);
}
@@ -2474,18 +2674,25 @@ mt7531_setup(struct dsa_switch *ds)
mt7530_rmw(priv, MT7531_GPIO_MODE0, MT7531_GPIO0_MASK,
MT7531_GPIO0_INTERRUPT);
- /* Enable PHY core PLL, since phy_device has not yet been created
- * provided for phy_[read,write]_mmd_indirect is called, we provide
- * our own mt7531_ind_mmd_phy_[read,write] to complete this
- * function.
+ /* Enable Energy-Efficient Ethernet (EEE) and PHY core PLL, since
+ * phy_device has not yet been created provided for
+ * phy_[read,write]_mmd_indirect is called, we provide our own
+ * mt7531_ind_mmd_phy_[read,write] to complete this function.
*/
val = mt7531_ind_c45_phy_read(priv, MT753X_CTRL_PHY_ADDR,
MDIO_MMD_VEND2, CORE_PLL_GROUP4);
- val |= MT7531_PHY_PLL_BYPASS_MODE;
+ val |= MT7531_RG_SYSPLL_DMY2 | MT7531_PHY_PLL_BYPASS_MODE;
val &= ~MT7531_PHY_PLL_OFF;
mt7531_ind_c45_phy_write(priv, MT753X_CTRL_PHY_ADDR, MDIO_MMD_VEND2,
CORE_PLL_GROUP4, val);
+ /* Disable EEE advertisement on the switch PHYs. */
+ for (i = MT753X_CTRL_PHY_ADDR;
+ i < MT753X_CTRL_PHY_ADDR + MT7530_NUM_PHYS; i++) {
+ mt7531_ind_c45_phy_write(priv, i, MDIO_MMD_AN, MDIO_AN_EEE_ADV,
+ 0);
+ }
+
mt7531_setup_common(ds);
/* Setup VLAN ID 0 for VLAN-unaware bridges */
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index a71166e0a7fcf..a08053390b285 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -32,6 +32,10 @@ enum mt753x_id {
#define SYSC_REG_RSTCTRL 0x34
#define RESET_MCM BIT(2)
+/* Register for ARL global control */
+#define MT753X_AGC 0xc
+#define LOCAL_EN BIT(7)
+
/* Registers to mac forward control for unknown frames */
#define MT7530_MFC 0x10
#define BC_FFP(x) (((x) & 0xff) << 24)
@@ -65,14 +69,38 @@ enum mt753x_id {
/* Registers for BPDU and PAE frame control*/
#define MT753X_BPC 0x24
-#define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0)
+#define MT753X_PAE_BPDU_FR BIT(25)
+#define MT753X_PAE_EG_TAG_MASK GENMASK(24, 22)
+#define MT753X_PAE_EG_TAG(x) FIELD_PREP(MT753X_PAE_EG_TAG_MASK, x)
#define MT753X_PAE_PORT_FW_MASK GENMASK(18, 16)
#define MT753X_PAE_PORT_FW(x) FIELD_PREP(MT753X_PAE_PORT_FW_MASK, x)
+#define MT753X_BPDU_EG_TAG_MASK GENMASK(8, 6)
+#define MT753X_BPDU_EG_TAG(x) FIELD_PREP(MT753X_BPDU_EG_TAG_MASK, x)
+#define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0)
+
+/* Register for :01 and :02 MAC DA frame control */
+#define MT753X_RGAC1 0x28
+#define MT753X_R02_BPDU_FR BIT(25)
+#define MT753X_R02_EG_TAG_MASK GENMASK(24, 22)
+#define MT753X_R02_EG_TAG(x) FIELD_PREP(MT753X_R02_EG_TAG_MASK, x)
+#define MT753X_R02_PORT_FW_MASK GENMASK(18, 16)
+#define MT753X_R02_PORT_FW(x) FIELD_PREP(MT753X_R02_PORT_FW_MASK, x)
+#define MT753X_R01_BPDU_FR BIT(9)
+#define MT753X_R01_EG_TAG_MASK GENMASK(8, 6)
+#define MT753X_R01_EG_TAG(x) FIELD_PREP(MT753X_R01_EG_TAG_MASK, x)
+#define MT753X_R01_PORT_FW_MASK GENMASK(2, 0)
/* Register for :03 and :0E MAC DA frame control */
#define MT753X_RGAC2 0x2c
+#define MT753X_R0E_BPDU_FR BIT(25)
+#define MT753X_R0E_EG_TAG_MASK GENMASK(24, 22)
+#define MT753X_R0E_EG_TAG(x) FIELD_PREP(MT753X_R0E_EG_TAG_MASK, x)
#define MT753X_R0E_PORT_FW_MASK GENMASK(18, 16)
#define MT753X_R0E_PORT_FW(x) FIELD_PREP(MT753X_R0E_PORT_FW_MASK, x)
+#define MT753X_R03_BPDU_FR BIT(9)
+#define MT753X_R03_EG_TAG_MASK GENMASK(8, 6)
+#define MT753X_R03_EG_TAG(x) FIELD_PREP(MT753X_R03_EG_TAG_MASK, x)
+#define MT753X_R03_PORT_FW_MASK GENMASK(2, 0)
enum mt753x_bpdu_port_fw {
MT753X_BPDU_FOLLOW_MFC,
@@ -253,6 +281,7 @@ enum mt7530_port_mode {
enum mt7530_vlan_port_eg_tag {
MT7530_VLAN_EG_DISABLED = 0,
MT7530_VLAN_EG_CONSISTENT = 1,
+ MT7530_VLAN_EG_UNTAGGED = 4,
};
enum mt7530_vlan_port_attr {
@@ -596,6 +625,7 @@ enum mt7531_clk_skew {
#define RG_SYSPLL_DDSFBK_EN BIT(12)
#define RG_SYSPLL_BIAS_EN BIT(11)
#define RG_SYSPLL_BIAS_LPF_EN BIT(10)
+#define MT7531_RG_SYSPLL_DMY2 BIT(6)
#define MT7531_PHY_PLL_OFF BIT(5)
#define MT7531_PHY_PLL_BYPASS_MODE BIT(4)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 9ed1821184ece..c95787cb90867 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -5503,8 +5503,12 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6250,
.name = "Marvell 88E6020",
.num_databases = 64,
- .num_ports = 4,
+ /* Ports 2-4 are not routed to pins
+ * => usable ports 0, 1, 5, 6
+ */
+ .num_ports = 7,
.num_internal_phys = 2,
+ .invalid_port_mask = BIT(2) | BIT(3) | BIT(4),
.max_vid = 4095,
.port_base_addr = 0x8,
.phy_base_addr = 0x0,
diff --git a/drivers/net/dsa/sja1105/sja1105_mdio.c b/drivers/net/dsa/sja1105/sja1105_mdio.c
index 833e55e4b9612..52ddb4ef259e9 100644
--- a/drivers/net/dsa/sja1105/sja1105_mdio.c
+++ b/drivers/net/dsa/sja1105/sja1105_mdio.c
@@ -94,7 +94,7 @@ int sja1110_pcs_mdio_read_c45(struct mii_bus *bus, int phy, int mmd, int reg)
return tmp & 0xffff;
}
-int sja1110_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int reg, int mmd,
+int sja1110_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int mmd, int reg,
u16 val)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 9e9e4a03f1a8c..2d8a66ea82fab 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -351,7 +351,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
io_sq->bounce_buf_ctrl.next_to_use = 0;
- size = io_sq->bounce_buf_ctrl.buffer_size *
+ size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
io_sq->bounce_buf_ctrl.buffers_num;
dev_node = dev_to_node(ena_dev->dmadev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 09e7da1a69c9f..be5acfa41ee0c 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -718,8 +718,11 @@ void ena_unmap_tx_buff(struct ena_ring *tx_ring,
static void ena_free_tx_bufs(struct ena_ring *tx_ring)
{
bool print_once = true;
+ bool is_xdp_ring;
u32 i;
+ is_xdp_ring = ENA_IS_XDP_INDEX(tx_ring->adapter, tx_ring->qid);
+
for (i = 0; i < tx_ring->ring_size; i++) {
struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
@@ -739,10 +742,15 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
ena_unmap_tx_buff(tx_ring, tx_info);
- dev_kfree_skb_any(tx_info->skb);
+ if (is_xdp_ring)
+ xdp_return_frame(tx_info->xdpf);
+ else
+ dev_kfree_skb_any(tx_info->skb);
}
- netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->qid));
+
+ if (!is_xdp_ring)
+ netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->qid));
}
static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
@@ -3481,10 +3489,11 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
{
struct ena_ring *tx_ring;
struct ena_ring *rx_ring;
- int i, budget, rc;
+ int qid, budget, rc;
int io_queue_count;
io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues;
+
/* Make sure the driver doesn't turn the device in other process */
smp_rmb();
@@ -3497,27 +3506,29 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
return;
- budget = ENA_MONITORED_TX_QUEUES;
+ budget = min_t(u32, io_queue_count, ENA_MONITORED_TX_QUEUES);
- for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) {
- tx_ring = &adapter->tx_ring[i];
- rx_ring = &adapter->rx_ring[i];
+ qid = adapter->last_monitored_tx_qid;
+
+ while (budget) {
+ qid = (qid + 1) % io_queue_count;
+
+ tx_ring = &adapter->tx_ring[qid];
+ rx_ring = &adapter->rx_ring[qid];
rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
if (unlikely(rc))
return;
- rc = !ENA_IS_XDP_INDEX(adapter, i) ?
+ rc = !ENA_IS_XDP_INDEX(adapter, qid) ?
check_for_rx_interrupt_queue(adapter, rx_ring) : 0;
if (unlikely(rc))
return;
budget--;
- if (!budget)
- break;
}
- adapter->last_monitored_tx_qid = i % io_queue_count;
+ adapter->last_monitored_tx_qid = qid;
}
/* trigger napi schedule after 2 consecutive detections */
diff --git a/drivers/net/ethernet/amazon/ena/ena_xdp.c b/drivers/net/ethernet/amazon/ena/ena_xdp.c
index 337c435d3ce99..5b175e7e92a10 100644
--- a/drivers/net/ethernet/amazon/ena/ena_xdp.c
+++ b/drivers/net/ethernet/amazon/ena/ena_xdp.c
@@ -89,7 +89,7 @@ int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
rc = ena_xdp_tx_map_frame(tx_ring, tx_info, xdpf, &ena_tx_ctx);
if (unlikely(rc))
- return rc;
+ goto err;
ena_tx_ctx.req_id = req_id;
@@ -112,7 +112,9 @@ int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
error_unmap_dma:
ena_unmap_tx_buff(tx_ring, tx_info);
+err:
tx_info->xdpf = NULL;
+
return rc;
}
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
index 9662ee72814c0..536635e577279 100644
--- a/drivers/net/ethernet/amd/pds_core/core.c
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -593,6 +593,16 @@ err_out:
pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
}
+void pdsc_pci_reset_thread(struct work_struct *work)
+{
+ struct pdsc *pdsc = container_of(work, struct pdsc, pci_reset_work);
+ struct pci_dev *pdev = pdsc->pdev;
+
+ pci_dev_get(pdev);
+ pci_reset_function(pdev);
+ pci_dev_put(pdev);
+}
+
static void pdsc_check_pci_health(struct pdsc *pdsc)
{
u8 fw_status;
@@ -607,7 +617,8 @@ static void pdsc_check_pci_health(struct pdsc *pdsc)
if (fw_status != PDS_RC_BAD_PCI)
return;
- pci_reset_function(pdsc->pdev);
+ /* prevent deadlock between pdsc_reset_prepare and pdsc_health_thread */
+ queue_work(pdsc->wq, &pdsc->pci_reset_work);
}
void pdsc_health_thread(struct work_struct *work)
diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
index 92d7657dd6147..a3e17a0c187a6 100644
--- a/drivers/net/ethernet/amd/pds_core/core.h
+++ b/drivers/net/ethernet/amd/pds_core/core.h
@@ -197,6 +197,7 @@ struct pdsc {
struct pdsc_qcq notifyqcq;
u64 last_eid;
struct pdsc_viftype *viftype_status;
+ struct work_struct pci_reset_work;
};
/** enum pds_core_dbell_bits - bitwise composition of dbell values.
@@ -313,5 +314,6 @@ int pdsc_firmware_update(struct pdsc *pdsc, const struct firmware *fw,
void pdsc_fw_down(struct pdsc *pdsc);
void pdsc_fw_up(struct pdsc *pdsc);
+void pdsc_pci_reset_thread(struct work_struct *work);
#endif /* _PDSC_H_ */
diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c
index e494e1298dc9a..495ef4ef8c103 100644
--- a/drivers/net/ethernet/amd/pds_core/dev.c
+++ b/drivers/net/ethernet/amd/pds_core/dev.c
@@ -229,6 +229,9 @@ int pdsc_devcmd_reset(struct pdsc *pdsc)
.reset.opcode = PDS_CORE_CMD_RESET,
};
+ if (!pdsc_is_fw_running(pdsc))
+ return 0;
+
return pdsc_devcmd(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
}
diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
index ab6133e7db422..660268ff95623 100644
--- a/drivers/net/ethernet/amd/pds_core/main.c
+++ b/drivers/net/ethernet/amd/pds_core/main.c
@@ -239,6 +239,7 @@ static int pdsc_init_pf(struct pdsc *pdsc)
snprintf(wq_name, sizeof(wq_name), "%s.%d", PDS_CORE_DRV_NAME, pdsc->uid);
pdsc->wq = create_singlethread_workqueue(wq_name);
INIT_WORK(&pdsc->health_work, pdsc_health_thread);
+ INIT_WORK(&pdsc->pci_reset_work, pdsc_pci_reset_thread);
timer_setup(&pdsc->wdtimer, pdsc_wdtimer_cb, 0);
pdsc->wdtimer_period = PDSC_WATCHDOG_SECS * HZ;
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 9e653e2925f78..292b1f9cd9e78 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -1591,7 +1591,7 @@ bmac_proc_info(char *buffer, char **start, off_t offset, int length)
}
#endif
-static int bmac_remove(struct macio_dev *mdev)
+static void bmac_remove(struct macio_dev *mdev)
{
struct net_device *dev = macio_get_drvdata(mdev);
struct bmac_data *bp = netdev_priv(dev);
@@ -1609,8 +1609,6 @@ static int bmac_remove(struct macio_dev *mdev)
macio_release_resources(mdev);
free_netdev(dev);
-
- return 0;
}
static const struct of_device_id bmac_match[] =
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
index fd1b008b7208c..e6350971c7076 100644
--- a/drivers/net/ethernet/apple/mace.c
+++ b/drivers/net/ethernet/apple/mace.c
@@ -272,7 +272,7 @@ static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
return rc;
}
-static int mace_remove(struct macio_dev *mdev)
+static void mace_remove(struct macio_dev *mdev)
{
struct net_device *dev = macio_get_drvdata(mdev);
struct mace_data *mp;
@@ -296,8 +296,6 @@ static int mace_remove(struct macio_dev *mdev)
free_netdev(dev);
macio_release_resources(mdev);
-
- return 0;
}
static void dbdma_reset(volatile struct dbdma_regs __iomem *dma)
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index dd06b68b33ed6..72ea97c5d5d42 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -392,7 +392,9 @@ static void umac_reset(struct bcmasp_intf *intf)
umac_wl(intf, 0x0, UMC_CMD);
umac_wl(intf, UMC_CMD_SW_RESET, UMC_CMD);
usleep_range(10, 100);
- umac_wl(intf, 0x0, UMC_CMD);
+ /* We hold the umac in reset and bring it out of
+ * reset when phy link is up.
+ */
}
static void umac_set_hw_addr(struct bcmasp_intf *intf,
@@ -412,6 +414,8 @@ static void umac_enable_set(struct bcmasp_intf *intf, u32 mask,
u32 reg;
reg = umac_rl(intf, UMC_CMD);
+ if (reg & UMC_CMD_SW_RESET)
+ return;
if (enable)
reg |= mask;
else
@@ -430,7 +434,6 @@ static void umac_init(struct bcmasp_intf *intf)
umac_wl(intf, 0x800, UMC_FRM_LEN);
umac_wl(intf, 0xffff, UMC_PAUSE_CNTRL);
umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ);
- umac_enable_set(intf, UMC_CMD_PROMISC, 1);
}
static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
@@ -658,6 +661,12 @@ static void bcmasp_adj_link(struct net_device *dev)
UMC_CMD_HD_EN | UMC_CMD_RX_PAUSE_IGNORE |
UMC_CMD_TX_PAUSE_IGNORE);
reg |= cmd_bits;
+ if (reg & UMC_CMD_SW_RESET) {
+ reg &= ~UMC_CMD_SW_RESET;
+ umac_wl(intf, reg, UMC_CMD);
+ udelay(2);
+ reg |= UMC_CMD_TX_EN | UMC_CMD_RX_EN | UMC_CMD_PROMISC;
+ }
umac_wl(intf, reg, UMC_CMD);
active = phy_init_eee(phydev, 0) >= 0;
@@ -1035,19 +1044,12 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
/* Indicate that the MAC is responsible for PHY PM */
phydev->mac_managed_pm = true;
- } else if (!intf->wolopts) {
- ret = phy_resume(dev->phydev);
- if (ret)
- goto err_phy_disable;
}
umac_reset(intf);
umac_init(intf);
- /* Disable the UniMAC RX/TX */
- umac_enable_set(intf, (UMC_CMD_RX_EN | UMC_CMD_TX_EN), 0);
-
umac_set_hw_addr(intf, dev->dev_addr);
intf->old_duplex = -1;
@@ -1062,9 +1064,6 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll);
bcmasp_enable_rx(intf, 1);
- /* Turn on UniMAC TX/RX */
- umac_enable_set(intf, (UMC_CMD_RX_EN | UMC_CMD_TX_EN), 1);
-
intf->crc_fwd = !!(umac_rl(intf, UMC_CMD) & UMC_CMD_CRC_FWD);
bcmasp_netif_start(dev);
@@ -1306,7 +1305,14 @@ static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
if (intf->wolopts & WAKE_FILTER)
bcmasp_netfilt_suspend(intf);
- /* UniMAC receive needs to be turned on */
+ /* Bring UniMAC out of reset if needed and enable RX */
+ reg = umac_rl(intf, UMC_CMD);
+ if (reg & UMC_CMD_SW_RESET)
+ reg &= ~UMC_CMD_SW_RESET;
+
+ reg |= UMC_CMD_RX_EN | UMC_CMD_PROMISC;
+ umac_wl(intf, reg, UMC_CMD);
+
umac_enable_set(intf, UMC_CMD_RX_EN, 1);
if (intf->parent->wol_irq > 0) {
@@ -1324,7 +1330,6 @@ int bcmasp_interface_suspend(struct bcmasp_intf *intf)
{
struct device *kdev = &intf->parent->pdev->dev;
struct net_device *dev = intf->ndev;
- int ret = 0;
if (!netif_running(dev))
return 0;
@@ -1334,10 +1339,6 @@ int bcmasp_interface_suspend(struct bcmasp_intf *intf)
bcmasp_netif_deinit(dev);
if (!intf->wolopts) {
- ret = phy_suspend(dev->phydev);
- if (ret)
- goto out;
-
if (intf->internal_phy)
bcmasp_ephy_enable_set(intf, false);
else
@@ -1354,11 +1355,7 @@ int bcmasp_interface_suspend(struct bcmasp_intf *intf)
clk_disable_unprepare(intf->parent->clk);
- return ret;
-
-out:
- bcmasp_netif_init(dev, false);
- return ret;
+ return 0;
}
static void bcmasp_resume_from_wol(struct bcmasp_intf *intf)
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 0d917a9699c58..b65b8592ad759 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -367,6 +367,7 @@ static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
cp->irq_arr[0].status_blk = (void *)
((unsigned long) bnapi->status_blk.msi +
(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
+ cp->irq_arr[0].status_blk_map = bp->status_blk_mapping;
cp->irq_arr[0].status_blk_num = sb_id;
cp->num_irq = 1;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index d8b1824c334d3..0bc1367fd6492 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -1002,9 +1002,6 @@ static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp,
struct bnx2x_alloc_pool *pool)
{
- if (!pool->page)
- return;
-
put_page(pool->page);
pool->page = NULL;
@@ -1015,6 +1012,9 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
{
int i;
+ if (!fp->page_pool.page)
+ return;
+
if (fp->mode == TPA_MODE_DISABLED)
return;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 0d8e61c63c7c6..678829646cec3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -14912,9 +14912,11 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
else
cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
+ cp->irq_arr[0].status_blk_map = bp->cnic_sb_mapping;
cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp);
cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
cp->irq_arr[1].status_blk = bp->def_status_blk;
+ cp->irq_arr[1].status_blk_map = bp->def_status_blk_mapping;
cp->irq_arr[1].status_blk_num = DEF_SB_ID;
cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 493b724848c8f..57e61f9631678 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -11758,6 +11758,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
/* VF-reps may need to be re-opened after the PF is re-opened */
if (BNXT_PF(bp))
bnxt_vf_reps_open(bp);
+ if (bp->ptp_cfg)
+ atomic_set(&bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
bnxt_ptp_init_rtc(bp, true);
bnxt_ptp_cfg_tstamp_filters(bp);
bnxt_cfg_usr_fltrs(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 93f9bd55020f2..195c02dc06830 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -210,6 +210,9 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
if (err)
return;
+ if (edev->ulp_tbl->msix_requested)
+ bnxt_fill_msix_vecs(bp, edev->msix_entries);
+
if (aux_priv) {
struct auxiliary_device *adev;
@@ -392,12 +395,13 @@ void bnxt_rdma_aux_device_init(struct bnxt *bp)
if (!edev)
goto aux_dev_uninit;
+ aux_priv->edev = edev;
+
ulp = kzalloc(sizeof(*ulp), GFP_KERNEL);
if (!ulp)
goto aux_dev_uninit;
edev->ulp_tbl = ulp;
- aux_priv->edev = edev;
bp->edev = edev;
bnxt_set_edev_info(edev, bp);
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 7926aaef8f0c5..3d63177e7e52b 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -1107,10 +1107,11 @@ static int cnic_init_uio(struct cnic_dev *dev)
TX_MAX_TSS_RINGS + 1);
uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
CNIC_PAGE_MASK;
+ uinfo->mem[1].dma_addr = cp->status_blk_map;
if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
- uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
+ uinfo->mem[1].size = PAGE_ALIGN(BNX2_SBLK_MSIX_ALIGN_SIZE * 9);
else
- uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
+ uinfo->mem[1].size = PAGE_ALIGN(BNX2_SBLK_MSIX_ALIGN_SIZE);
uinfo->name = "bnx2_cnic";
} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
@@ -1118,20 +1119,26 @@ static int cnic_init_uio(struct cnic_dev *dev)
uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
CNIC_PAGE_MASK;
- uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
+ uinfo->mem[1].dma_addr = cp->status_blk_map;
+ uinfo->mem[1].size = PAGE_ALIGN(sizeof(*cp->bnx2x_def_status_blk));
uinfo->name = "bnx2x_cnic";
}
- uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
+ uinfo->mem[1].dma_device = &dev->pcidev->dev;
+ uinfo->mem[1].memtype = UIO_MEM_DMA_COHERENT;
uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
- uinfo->mem[2].size = udev->l2_ring_size;
- uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
+ uinfo->mem[2].dma_addr = udev->l2_ring_map;
+ uinfo->mem[2].size = PAGE_ALIGN(udev->l2_ring_size);
+ uinfo->mem[2].dma_device = &dev->pcidev->dev;
+ uinfo->mem[2].memtype = UIO_MEM_DMA_COHERENT;
uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
- uinfo->mem[3].size = udev->l2_buf_size;
- uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
+ uinfo->mem[3].dma_addr = udev->l2_buf_map;
+ uinfo->mem[3].size = PAGE_ALIGN(udev->l2_buf_size);
+ uinfo->mem[3].dma_device = &dev->pcidev->dev;
+ uinfo->mem[3].memtype = UIO_MEM_DMA_COHERENT;
uinfo->version = CNIC_MODULE_VERSION;
uinfo->irq = UIO_IRQ_CUSTOM;
@@ -1313,6 +1320,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
return 0;
cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
+ cp->status_blk_map = cp->ethdev->irq_arr[1].status_blk_map;
cp->l2_rx_ring_size = 15;
@@ -5323,6 +5331,7 @@ static int cnic_start_hw(struct cnic_dev *dev)
pci_dev_get(dev->pcidev);
cp->func = PCI_FUNC(dev->pcidev->devfn);
cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
+ cp->status_blk_map = ethdev->irq_arr[0].status_blk_map;
cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
err = cp->alloc_resc(dev);
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index 4baea81bae7a3..fedc84ada937d 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -260,6 +260,7 @@ struct cnic_local {
#define SM_RX_ID 0
#define SM_TX_ID 1
} status_blk;
+ dma_addr_t status_blk_map;
struct host_sp_status_block *bnx2x_def_status_blk;
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 789e5c7e93116..49a11ec80b364 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -190,6 +190,7 @@ struct cnic_ops {
struct cnic_irq {
unsigned int vector;
void *status_blk;
+ dma_addr_t status_blk_map;
u32 status_blk_num;
u32 status_blk_num2;
u32 irq_flags;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 7396e2823e328..b1f84b37032a7 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -3280,7 +3280,7 @@ static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
}
/* Returns a reusable dma control register value */
-static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
+static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv, bool flush_rx)
{
unsigned int i;
u32 reg;
@@ -3305,6 +3305,14 @@ static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
udelay(10);
bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
+ if (flush_rx) {
+ reg = bcmgenet_rbuf_ctrl_get(priv);
+ bcmgenet_rbuf_ctrl_set(priv, reg | BIT(0));
+ udelay(10);
+ bcmgenet_rbuf_ctrl_set(priv, reg);
+ udelay(10);
+ }
+
return dma_ctrl;
}
@@ -3368,8 +3376,8 @@ static int bcmgenet_open(struct net_device *dev)
bcmgenet_set_hw_addr(priv, dev->dev_addr);
- /* Disable RX/TX DMA and flush TX queues */
- dma_ctrl = bcmgenet_dma_disable(priv);
+ /* Disable RX/TX DMA and flush TX and RX queues */
+ dma_ctrl = bcmgenet_dma_disable(priv, true);
/* Reinitialize TDMA and RDMA and SW housekeeping */
ret = bcmgenet_init_dma(priv);
@@ -4235,7 +4243,7 @@ static int bcmgenet_resume(struct device *d)
bcmgenet_hfb_create_rxnfc_filter(priv, rule);
/* Disable RX/TX DMA and flush TX queues */
- dma_ctrl = bcmgenet_dma_disable(priv);
+ dma_ctrl = bcmgenet_dma_disable(priv, false);
/* Reinitialize TDMA and RDMA and SW housekeeping */
ret = bcmgenet_init_dma(priv);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index eee759054aada..62ff4381ac83c 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -221,7 +221,7 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
-MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
+MODULE_AUTHOR("David S. Miller <davem@redhat.com> and Jeff Garzik <jgarzik@pobox.com>");
MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_TG3);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index d7693fdf640d5..8bd213da8fb6f 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2454,8 +2454,6 @@ static int fec_enet_mii_probe(struct net_device *ndev)
fep->link = 0;
fep->full_duplex = 0;
- phy_dev->mac_managed_pm = true;
-
phy_attached_info(phy_dev);
return 0;
@@ -2467,10 +2465,12 @@ static int fec_enet_mii_init(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
bool suppress_preamble = false;
+ struct phy_device *phydev;
struct device_node *node;
int err = -ENXIO;
u32 mii_speed, holdtime;
u32 bus_freq;
+ int addr;
/*
* The i.MX28 dual fec interfaces are not equal.
@@ -2584,6 +2584,13 @@ static int fec_enet_mii_init(struct platform_device *pdev)
goto err_out_free_mdiobus;
of_node_put(node);
+ /* find all the PHY devices on the bus and set mac_managed_pm to true */
+ for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+ phydev = mdiobus_get_phy(fep->mii_bus, addr);
+ if (phydev)
+ phydev->mac_managed_pm = true;
+ }
+
mii_cnt++;
/* save fec0 mii_bus */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
index f3c9395d8351c..618f66d9586b3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
@@ -85,7 +85,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_TX_STATS,
true);
- desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
+ desc.data[0] = cpu_to_le32(tqp->index);
ret = hclge_comm_cmd_send(hw, &desc, 1);
if (ret) {
dev_err(&hw->cmq.csq.pdev->dev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 999a0ee162a64..941cb529d671f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -78,6 +78,9 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_NIC_LB_TEST_NO_MEM_ERR 1
#define HNS3_NIC_LB_TEST_TX_CNT_ERR 2
#define HNS3_NIC_LB_TEST_RX_CNT_ERR 3
+#define HNS3_NIC_LB_TEST_UNEXECUTED 4
+
+static int hns3_get_sset_count(struct net_device *netdev, int stringset);
static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
{
@@ -418,18 +421,26 @@ static void hns3_do_external_lb(struct net_device *ndev,
static void hns3_self_test(struct net_device *ndev,
struct ethtool_test *eth_test, u64 *data)
{
+ int cnt = hns3_get_sset_count(ndev, ETH_SS_TEST);
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
int st_param[HNAE3_LOOP_NONE][2];
bool if_running = netif_running(ndev);
+ int i;
+
+ /* initialize the loopback test result, avoid marking an unexcuted
+ * loopback test as PASS.
+ */
+ for (i = 0; i < cnt; i++)
+ data[i] = HNS3_NIC_LB_TEST_UNEXECUTED;
if (hns3_nic_resetting(ndev)) {
netdev_err(ndev, "dev resetting!");
- return;
+ goto failure;
}
if (!(eth_test->flags & ETH_TEST_FL_OFFLINE))
- return;
+ goto failure;
if (netif_msg_ifdown(h))
netdev_info(ndev, "self test start\n");
@@ -451,6 +462,10 @@ static void hns3_self_test(struct net_device *ndev,
if (netif_msg_ifdown(h))
netdev_info(ndev, "self test end\n");
+ return;
+
+failure:
+ eth_test->flags |= ETH_TEST_FL_FAILED;
}
static void hns3_update_limit_promisc_mode(struct net_device *netdev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index b4afb66efe5c5..ff6a2ed23ddb6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -11626,6 +11626,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
goto err_pci_uninit;
+ devl_lock(hdev->devlink);
+
/* Firmware command queue initialize */
ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
if (ret)
@@ -11805,6 +11807,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_task_schedule(hdev, round_jiffies_relative(HZ));
+ devl_unlock(hdev->devlink);
return 0;
err_mdiobus_unreg:
@@ -11817,6 +11820,7 @@ err_msi_uninit:
err_cmd_uninit:
hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
err_devlink_uninit:
+ devl_unlock(hdev->devlink);
hclge_devlink_uninit(hdev);
err_pci_uninit:
pcim_iounmap(pdev, hdev->hw.hw.io_base);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
index 8510b88d49820..f3cd5a376eca9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
@@ -24,7 +24,7 @@ TRACE_EVENT(hclge_pf_mbx_get,
__field(u8, code)
__field(u8, subcode)
__string(pciname, pci_name(hdev->pdev))
- __string(devname, &hdev->vport[0].nic.kinfo.netdev->name)
+ __string(devname, hdev->vport[0].nic.kinfo.netdev->name)
__array(u32, mbx_data, PF_GET_MBX_LEN)
),
@@ -33,7 +33,7 @@ TRACE_EVENT(hclge_pf_mbx_get,
__entry->code = req->msg.code;
__entry->subcode = req->msg.subcode;
__assign_str(pciname, pci_name(hdev->pdev));
- __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name);
+ __assign_str(devname, hdev->vport[0].nic.kinfo.netdev->name);
memcpy(__entry->mbx_data, req,
sizeof(struct hclge_mbx_vf_to_pf_cmd));
),
@@ -56,7 +56,7 @@ TRACE_EVENT(hclge_pf_mbx_send,
__field(u8, vfid)
__field(u16, code)
__string(pciname, pci_name(hdev->pdev))
- __string(devname, &hdev->vport[0].nic.kinfo.netdev->name)
+ __string(devname, hdev->vport[0].nic.kinfo.netdev->name)
__array(u32, mbx_data, PF_SEND_MBX_LEN)
),
@@ -64,7 +64,7 @@ TRACE_EVENT(hclge_pf_mbx_send,
__entry->vfid = req->dest_vfid;
__entry->code = le16_to_cpu(req->msg.code);
__assign_str(pciname, pci_name(hdev->pdev));
- __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name);
+ __assign_str(devname, hdev->vport[0].nic.kinfo.netdev->name);
memcpy(__entry->mbx_data, req,
sizeof(struct hclge_mbx_pf_to_vf_cmd));
),
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
index 5d4895bb57a17..b259e95dd53c2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
@@ -23,7 +23,7 @@ TRACE_EVENT(hclge_vf_mbx_get,
__field(u8, vfid)
__field(u16, code)
__string(pciname, pci_name(hdev->pdev))
- __string(devname, &hdev->nic.kinfo.netdev->name)
+ __string(devname, hdev->nic.kinfo.netdev->name)
__array(u32, mbx_data, VF_GET_MBX_LEN)
),
@@ -31,7 +31,7 @@ TRACE_EVENT(hclge_vf_mbx_get,
__entry->vfid = req->dest_vfid;
__entry->code = le16_to_cpu(req->msg.code);
__assign_str(pciname, pci_name(hdev->pdev));
- __assign_str(devname, &hdev->nic.kinfo.netdev->name);
+ __assign_str(devname, hdev->nic.kinfo.netdev->name);
memcpy(__entry->mbx_data, req,
sizeof(struct hclge_mbx_pf_to_vf_cmd));
),
@@ -55,7 +55,7 @@ TRACE_EVENT(hclge_vf_mbx_send,
__field(u8, code)
__field(u8, subcode)
__string(pciname, pci_name(hdev->pdev))
- __string(devname, &hdev->nic.kinfo.netdev->name)
+ __string(devname, hdev->nic.kinfo.netdev->name)
__array(u32, mbx_data, VF_SEND_MBX_LEN)
),
@@ -64,7 +64,7 @@ TRACE_EVENT(hclge_vf_mbx_send,
__entry->code = req->msg.code;
__entry->subcode = req->msg.subcode;
__assign_str(pciname, pci_name(hdev->pdev));
- __assign_str(devname, &hdev->nic.kinfo.netdev->name);
+ __assign_str(devname, hdev->nic.kinfo.netdev->name);
memcpy(__entry->mbx_data, req,
sizeof(struct hclge_mbx_vf_to_pf_cmd));
),
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 1fef6bb5a5fbc..4b6e7536170ab 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -628,6 +628,7 @@ struct e1000_phy_info {
u32 id;
u32 reset_delay_us; /* in usec */
u32 revision;
+ u32 retry_count;
enum e1000_media_type media_type;
@@ -644,6 +645,7 @@ struct e1000_phy_info {
bool polarity_correction;
bool speed_downgraded;
bool autoneg_wait_to_complete;
+ bool retry_enabled;
};
struct e1000_nvm_info {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 19e450a5bd314..f9e94be36e97f 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -222,11 +222,18 @@ out:
if (hw->mac.type >= e1000_pch_lpt) {
/* Only unforce SMBus if ME is not active */
if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+ /* Switching PHY interface always returns MDI error
+ * so disable retry mechanism to avoid wasting time
+ */
+ e1000e_disable_phy_retry(hw);
+
/* Unforce SMBus mode in PHY */
e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
+ e1000e_enable_phy_retry(hw);
+
/* Unforce SMBus mode in MAC */
mac_reg = er32(CTRL_EXT);
mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
@@ -310,6 +317,11 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
goto out;
}
+ /* There is no guarantee that the PHY is accessible at this time
+ * so disable retry mechanism to avoid wasting time
+ */
+ e1000e_disable_phy_retry(hw);
+
/* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
* inaccessible and resetting the PHY is not blocked, toggle the
* LANPHYPC Value bit to force the interconnect to PCIe mode.
@@ -380,6 +392,8 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
break;
}
+ e1000e_enable_phy_retry(hw);
+
hw->phy.ops.release(hw);
if (!ret_val) {
@@ -449,6 +463,11 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
phy->id = e1000_phy_unknown;
+ if (hw->mac.type == e1000_pch_mtp) {
+ phy->retry_count = 2;
+ e1000e_enable_phy_retry(hw);
+ }
+
ret_val = e1000_init_phy_workarounds_pchlan(hw);
if (ret_val)
return ret_val;
@@ -1146,18 +1165,6 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
if (ret_val)
goto out;
- /* Force SMBus mode in PHY */
- ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
- if (ret_val)
- goto release;
- phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
- e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
-
- /* Force SMBus mode in MAC */
- mac_reg = er32(CTRL_EXT);
- mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
- ew32(CTRL_EXT, mac_reg);
-
/* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
* LPLU and disable Gig speed when entering ULP
*/
@@ -1313,6 +1320,11 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
/* Toggle LANPHYPC Value bit */
e1000_toggle_lanphypc_pch_lpt(hw);
+ /* Switching PHY interface always returns MDI error
+ * so disable retry mechanism to avoid wasting time
+ */
+ e1000e_disable_phy_retry(hw);
+
/* Unforce SMBus mode in PHY */
ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
if (ret_val) {
@@ -1333,6 +1345,8 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
+ e1000e_enable_phy_retry(hw);
+
/* Unforce SMBus mode in MAC */
mac_reg = er32(CTRL_EXT);
mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index cc8c531ec3dff..3692fce201959 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6623,6 +6623,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, ctrl_ext, rctl, status, wufc;
int retval = 0;
+ u16 smb_ctrl;
/* Runtime suspend should only enable wakeup for link changes */
if (runtime)
@@ -6696,6 +6697,23 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
if (retval)
return retval;
}
+
+ /* Force SMBUS to allow WOL */
+ /* Switching PHY interface always returns MDI error
+ * so disable retry mechanism to avoid wasting time
+ */
+ e1000e_disable_phy_retry(hw);
+
+ e1e_rphy(hw, CV_SMB_CTRL, &smb_ctrl);
+ smb_ctrl |= CV_SMB_CTRL_FORCE_SMBUS;
+ e1e_wphy(hw, CV_SMB_CTRL, smb_ctrl);
+
+ e1000e_enable_phy_retry(hw);
+
+ /* Force SMBus mode in MAC */
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, ctrl_ext);
}
/* Ensure that the appropriate bits are set in LPI_CTRL
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 5e329156d1bae..93544f1cc2a51 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -107,6 +107,16 @@ s32 e1000e_phy_reset_dsp(struct e1000_hw *hw)
return e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0);
}
+void e1000e_disable_phy_retry(struct e1000_hw *hw)
+{
+ hw->phy.retry_enabled = false;
+}
+
+void e1000e_enable_phy_retry(struct e1000_hw *hw)
+{
+ hw->phy.retry_enabled = true;
+}
+
/**
* e1000e_read_phy_reg_mdic - Read MDI control register
* @hw: pointer to the HW structure
@@ -118,55 +128,73 @@ s32 e1000e_phy_reset_dsp(struct e1000_hw *hw)
**/
s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
{
+ u32 i, mdic = 0, retry_counter, retry_max;
struct e1000_phy_info *phy = &hw->phy;
- u32 i, mdic = 0;
+ bool success;
if (offset > MAX_PHY_REG_ADDRESS) {
e_dbg("PHY Address %d is out of range\n", offset);
return -E1000_ERR_PARAM;
}
+ retry_max = phy->retry_enabled ? phy->retry_count : 0;
+
/* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
*/
- mdic = ((offset << E1000_MDIC_REG_SHIFT) |
- (phy->addr << E1000_MDIC_PHY_SHIFT) |
- (E1000_MDIC_OP_READ));
+ for (retry_counter = 0; retry_counter <= retry_max; retry_counter++) {
+ success = true;
- ew32(MDIC, mdic);
+ mdic = ((offset << E1000_MDIC_REG_SHIFT) |
+ (phy->addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_READ));
- /* Poll the ready bit to see if the MDI read completed
- * Increasing the time out as testing showed failures with
- * the lower time out
- */
- for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
- udelay(50);
- mdic = er32(MDIC);
- if (mdic & E1000_MDIC_READY)
- break;
- }
- if (!(mdic & E1000_MDIC_READY)) {
- e_dbg("MDI Read PHY Reg Address %d did not complete\n", offset);
- return -E1000_ERR_PHY;
- }
- if (mdic & E1000_MDIC_ERROR) {
- e_dbg("MDI Read PHY Reg Address %d Error\n", offset);
- return -E1000_ERR_PHY;
- }
- if (FIELD_GET(E1000_MDIC_REG_MASK, mdic) != offset) {
- e_dbg("MDI Read offset error - requested %d, returned %d\n",
- offset, FIELD_GET(E1000_MDIC_REG_MASK, mdic));
- return -E1000_ERR_PHY;
+ ew32(MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+ usleep_range(50, 60);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ e_dbg("MDI Read PHY Reg Address %d did not complete\n",
+ offset);
+ success = false;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ e_dbg("MDI Read PHY Reg Address %d Error\n", offset);
+ success = false;
+ }
+ if (FIELD_GET(E1000_MDIC_REG_MASK, mdic) != offset) {
+ e_dbg("MDI Read offset error - requested %d, returned %d\n",
+ offset, FIELD_GET(E1000_MDIC_REG_MASK, mdic));
+ success = false;
+ }
+
+ /* Allow some time after each MDIC transaction to avoid
+ * reading duplicate data in the next MDIC transaction.
+ */
+ if (hw->mac.type == e1000_pch2lan)
+ usleep_range(100, 150);
+
+ if (success) {
+ *data = (u16)mdic;
+ return 0;
+ }
+
+ if (retry_counter != retry_max) {
+ e_dbg("Perform retry on PHY transaction...\n");
+ mdelay(10);
+ }
}
- *data = (u16)mdic;
- /* Allow some time after each MDIC transaction to avoid
- * reading duplicate data in the next MDIC transaction.
- */
- if (hw->mac.type == e1000_pch2lan)
- udelay(100);
- return 0;
+ return -E1000_ERR_PHY;
}
/**
@@ -179,56 +207,72 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
**/
s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
{
+ u32 i, mdic = 0, retry_counter, retry_max;
struct e1000_phy_info *phy = &hw->phy;
- u32 i, mdic = 0;
+ bool success;
if (offset > MAX_PHY_REG_ADDRESS) {
e_dbg("PHY Address %d is out of range\n", offset);
return -E1000_ERR_PARAM;
}
+ retry_max = phy->retry_enabled ? phy->retry_count : 0;
+
/* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
*/
- mdic = (((u32)data) |
- (offset << E1000_MDIC_REG_SHIFT) |
- (phy->addr << E1000_MDIC_PHY_SHIFT) |
- (E1000_MDIC_OP_WRITE));
+ for (retry_counter = 0; retry_counter <= retry_max; retry_counter++) {
+ success = true;
- ew32(MDIC, mdic);
+ mdic = (((u32)data) |
+ (offset << E1000_MDIC_REG_SHIFT) |
+ (phy->addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_WRITE));
- /* Poll the ready bit to see if the MDI read completed
- * Increasing the time out as testing showed failures with
- * the lower time out
- */
- for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
- udelay(50);
- mdic = er32(MDIC);
- if (mdic & E1000_MDIC_READY)
- break;
- }
- if (!(mdic & E1000_MDIC_READY)) {
- e_dbg("MDI Write PHY Reg Address %d did not complete\n", offset);
- return -E1000_ERR_PHY;
- }
- if (mdic & E1000_MDIC_ERROR) {
- e_dbg("MDI Write PHY Red Address %d Error\n", offset);
- return -E1000_ERR_PHY;
- }
- if (FIELD_GET(E1000_MDIC_REG_MASK, mdic) != offset) {
- e_dbg("MDI Write offset error - requested %d, returned %d\n",
- offset, FIELD_GET(E1000_MDIC_REG_MASK, mdic));
- return -E1000_ERR_PHY;
- }
+ ew32(MDIC, mdic);
- /* Allow some time after each MDIC transaction to avoid
- * reading duplicate data in the next MDIC transaction.
- */
- if (hw->mac.type == e1000_pch2lan)
- udelay(100);
+ /* Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+ usleep_range(50, 60);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ e_dbg("MDI Write PHY Reg Address %d did not complete\n",
+ offset);
+ success = false;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ e_dbg("MDI Write PHY Reg Address %d Error\n", offset);
+ success = false;
+ }
+ if (FIELD_GET(E1000_MDIC_REG_MASK, mdic) != offset) {
+ e_dbg("MDI Write offset error - requested %d, returned %d\n",
+ offset, FIELD_GET(E1000_MDIC_REG_MASK, mdic));
+ success = false;
+ }
- return 0;
+ /* Allow some time after each MDIC transaction to avoid
+ * reading duplicate data in the next MDIC transaction.
+ */
+ if (hw->mac.type == e1000_pch2lan)
+ usleep_range(100, 150);
+
+ if (success)
+ return 0;
+
+ if (retry_counter != retry_max) {
+ e_dbg("Perform retry on PHY transaction...\n");
+ mdelay(10);
+ }
+ }
+
+ return -E1000_ERR_PHY;
}
/**
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
index c48777d095235..049bb325b4b14 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.h
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -51,6 +51,8 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
void e1000_power_up_phy_copper(struct e1000_hw *hw);
void e1000_power_down_phy_copper(struct e1000_hw *hw);
+void e1000e_disable_phy_retry(struct e1000_hw *hw);
+void e1000e_enable_phy_retry(struct e1000_hw *hw);
s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index ba24f3fa92c37..2fbabcdb5bb5f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -955,6 +955,7 @@ struct i40e_q_vector {
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[I40E_INT_NAME_STR_LEN];
bool arm_wb_state;
+ bool in_busy_poll;
int irq_num; /* IRQ assigned to this q_vector */
} ____cacheline_internodealigned_in_smp;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index f86578857e8ae..48b9ddb2b1b38 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1253,8 +1253,11 @@ int i40e_count_filters(struct i40e_vsi *vsi)
int bkt;
int cnt = 0;
- hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
- ++cnt;
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ if (f->state == I40E_FILTER_NEW ||
+ f->state == I40E_FILTER_ACTIVE)
+ ++cnt;
+ }
return cnt;
}
@@ -3911,6 +3914,12 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
q_vector->tx.target_itr >> 1);
q_vector->tx.current_itr = q_vector->tx.target_itr;
+ /* Set ITR for software interrupts triggered after exiting
+ * busy-loop polling.
+ */
+ wr32(hw, I40E_PFINT_ITRN(I40E_SW_ITR, vector - 1),
+ I40E_ITR_20K);
+
wr32(hw, I40E_PFINT_RATEN(vector - 1),
i40e_intrl_usec_to_reg(vsi->int_rate_limit));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 14ab642cafdb2..432afbb642013 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -333,8 +333,11 @@
#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
#define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */
#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
#define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 0d7177083708f..1a12b732818ee 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2630,7 +2630,22 @@ process_next:
return failure ? budget : (int)total_rx_packets;
}
-static inline u32 i40e_buildreg_itr(const int type, u16 itr)
+/**
+ * i40e_buildreg_itr - build a value for writing to I40E_PFINT_DYN_CTLN register
+ * @itr_idx: interrupt throttling index
+ * @interval: interrupt throttling interval value in usecs
+ * @force_swint: force software interrupt
+ *
+ * The function builds a value for I40E_PFINT_DYN_CTLN register that
+ * is used to update interrupt throttling interval for specified ITR index
+ * and optionally enforces a software interrupt. If the @itr_idx is equal
+ * to I40E_ITR_NONE then no interval change is applied and only @force_swint
+ * parameter is taken into account. If the interval change and enforced
+ * software interrupt are not requested then the built value just enables
+ * appropriate vector interrupt.
+ **/
+static u32 i40e_buildreg_itr(enum i40e_dyn_idx itr_idx, u16 interval,
+ bool force_swint)
{
u32 val;
@@ -2644,23 +2659,33 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr)
* an event in the PBA anyway so we need to rely on the automask
* to hold pending events for us until the interrupt is re-enabled
*
- * The itr value is reported in microseconds, and the register
- * value is recorded in 2 microsecond units. For this reason we
- * only need to shift by the interval shift - 1 instead of the
- * full value.
+ * We have to shift the given value as it is reported in microseconds
+ * and the register value is recorded in 2 microsecond units.
*/
- itr &= I40E_ITR_MASK;
+ interval >>= 1;
+ /* 1. Enable vector interrupt
+ * 2. Update the interval for the specified ITR index
+ * (I40E_ITR_NONE in the register is used to indicate that
+ * no interval update is requested)
+ */
val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
- (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
- (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
+ FIELD_PREP(I40E_PFINT_DYN_CTLN_ITR_INDX_MASK, itr_idx) |
+ FIELD_PREP(I40E_PFINT_DYN_CTLN_INTERVAL_MASK, interval);
+
+ /* 3. Enforce software interrupt trigger if requested
+ * (These software interrupts rate is limited by ITR2 that is
+ * set to 20K interrupts per second)
+ */
+ if (force_swint)
+ val |= I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK |
+ FIELD_PREP(I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK,
+ I40E_SW_ITR);
return val;
}
-/* a small macro to shorten up some long lines */
-#define INTREG I40E_PFINT_DYN_CTLN
-
/* The act of updating the ITR will cause it to immediately trigger. In order
* to prevent this from throwing off adaptive update statistics we defer the
* update so that it can only happen so often. So after either Tx or Rx are
@@ -2679,8 +2704,10 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr)
static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
struct i40e_q_vector *q_vector)
{
+ enum i40e_dyn_idx itr_idx = I40E_ITR_NONE;
struct i40e_hw *hw = &vsi->back->hw;
- u32 intval;
+ u16 interval = 0;
+ u32 itr_val;
/* If we don't have MSIX, then we only need to re-enable icr0 */
if (!test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) {
@@ -2702,8 +2729,8 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/
if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
/* Rx ITR needs to be reduced, this is highest priority */
- intval = i40e_buildreg_itr(I40E_RX_ITR,
- q_vector->rx.target_itr);
+ itr_idx = I40E_RX_ITR;
+ interval = q_vector->rx.target_itr;
q_vector->rx.current_itr = q_vector->rx.target_itr;
q_vector->itr_countdown = ITR_COUNTDOWN_START;
} else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
@@ -2712,25 +2739,36 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
/* Tx ITR needs to be reduced, this is second priority
* Tx ITR needs to be increased more than Rx, fourth priority
*/
- intval = i40e_buildreg_itr(I40E_TX_ITR,
- q_vector->tx.target_itr);
+ itr_idx = I40E_TX_ITR;
+ interval = q_vector->tx.target_itr;
q_vector->tx.current_itr = q_vector->tx.target_itr;
q_vector->itr_countdown = ITR_COUNTDOWN_START;
} else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
/* Rx ITR needs to be increased, third priority */
- intval = i40e_buildreg_itr(I40E_RX_ITR,
- q_vector->rx.target_itr);
+ itr_idx = I40E_RX_ITR;
+ interval = q_vector->rx.target_itr;
q_vector->rx.current_itr = q_vector->rx.target_itr;
q_vector->itr_countdown = ITR_COUNTDOWN_START;
} else {
/* No ITR update, lowest priority */
- intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
if (q_vector->itr_countdown)
q_vector->itr_countdown--;
}
- if (!test_bit(__I40E_VSI_DOWN, vsi->state))
- wr32(hw, INTREG(q_vector->reg_idx), intval);
+ /* Do not update interrupt control register if VSI is down */
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
+ return;
+
+ /* Update ITR interval if necessary and enforce software interrupt
+ * if we are exiting busy poll.
+ */
+ if (q_vector->in_busy_poll) {
+ itr_val = i40e_buildreg_itr(itr_idx, interval, true);
+ q_vector->in_busy_poll = false;
+ } else {
+ itr_val = i40e_buildreg_itr(itr_idx, interval, false);
+ }
+ wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->reg_idx), itr_val);
}
/**
@@ -2845,6 +2883,8 @@ tx_only:
*/
if (likely(napi_complete_done(napi, work_done)))
i40e_update_enable_itr(vsi, q_vector);
+ else
+ q_vector->in_busy_poll = true;
return min(work_done, budget - 1);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index abf15067eb5de..2cdc7de6301c1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -68,6 +68,7 @@ enum i40e_dyn_idx {
/* these are indexes into ITRN registers */
#define I40E_RX_ITR I40E_IDX_ITR0
#define I40E_TX_ITR I40E_IDX_ITR1
+#define I40E_SW_ITR I40E_IDX_ITR2
/* Supported RSS offloads */
#define I40E_DEFAULT_RSS_HENA ( \
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 83a34e98bdc79..232b65b9c8eac 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1624,8 +1624,8 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
{
struct i40e_hw *hw = &pf->hw;
struct i40e_vf *vf;
- int i, v;
u32 reg;
+ int i;
/* If we don't have any VFs, then there is nothing to reset */
if (!pf->num_alloc_vfs)
@@ -1636,11 +1636,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
return false;
/* Begin reset on all VFs at once */
- for (v = 0; v < pf->num_alloc_vfs; v++) {
- vf = &pf->vf[v];
+ for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
/* If VF is being reset no need to trigger reset again */
if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
- i40e_trigger_vf_reset(&pf->vf[v], flr);
+ i40e_trigger_vf_reset(vf, flr);
}
/* HW requires some time to make sure it can flush the FIFO for a VF
@@ -1649,14 +1648,13 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
* the VFs using a simple iterator that increments once that VF has
* finished resetting.
*/
- for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
+ for (i = 0, vf = &pf->vf[0]; i < 10 && vf < &pf->vf[pf->num_alloc_vfs]; ++i) {
usleep_range(10000, 20000);
/* Check each VF in sequence, beginning with the VF to fail
* the previous check.
*/
- while (v < pf->num_alloc_vfs) {
- vf = &pf->vf[v];
+ while (vf < &pf->vf[pf->num_alloc_vfs]) {
if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
@@ -1666,7 +1664,7 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
/* If the current VF has finished resetting, move on
* to the next VF in sequence.
*/
- v++;
+ ++vf;
}
}
@@ -1676,39 +1674,39 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
/* Display a warning if at least one VF didn't manage to reset in
* time, but continue on with the operation.
*/
- if (v < pf->num_alloc_vfs)
+ if (vf < &pf->vf[pf->num_alloc_vfs])
dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
- pf->vf[v].vf_id);
+ vf->vf_id);
usleep_range(10000, 20000);
/* Begin disabling all the rings associated with VFs, but do not wait
* between each VF.
*/
- for (v = 0; v < pf->num_alloc_vfs; v++) {
+ for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
/* On initial reset, we don't have any queues to disable */
- if (pf->vf[v].lan_vsi_idx == 0)
+ if (vf->lan_vsi_idx == 0)
continue;
/* If VF is reset in another thread just continue */
if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
continue;
- i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
+ i40e_vsi_stop_rings_no_wait(pf->vsi[vf->lan_vsi_idx]);
}
/* Now that we've notified HW to disable all of the VF rings, wait
* until they finish.
*/
- for (v = 0; v < pf->num_alloc_vfs; v++) {
+ for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
/* On initial reset, we don't have any queues to disable */
- if (pf->vf[v].lan_vsi_idx == 0)
+ if (vf->lan_vsi_idx == 0)
continue;
/* If VF is reset in another thread just continue */
if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
continue;
- i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
+ i40e_vsi_wait_queues_disabled(pf->vsi[vf->lan_vsi_idx]);
}
/* Hw may need up to 50ms to finish disabling the RX queues. We
@@ -1717,12 +1715,12 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
mdelay(50);
/* Finish the reset on each VF */
- for (v = 0; v < pf->num_alloc_vfs; v++) {
+ for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
/* If VF is reset in another thread just continue */
if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
continue;
- i40e_cleanup_reset_vf(&pf->vf[v]);
+ i40e_cleanup_reset_vf(vf);
}
i40e_flush(hw);
@@ -3139,11 +3137,12 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
/* Allow to delete VF primary MAC only if it was not set
* administratively by PF or if VF is trusted.
*/
- if (ether_addr_equal(addr, vf->default_lan_addr.addr) &&
- i40e_can_vf_change_mac(vf))
- was_unimac_deleted = true;
- else
- continue;
+ if (ether_addr_equal(addr, vf->default_lan_addr.addr)) {
+ if (i40e_can_vf_change_mac(vf))
+ was_unimac_deleted = true;
+ else
+ continue;
+ }
if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
ret = -EINVAL;
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 8040317c95617..1f3e7a6903e56 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -593,8 +593,9 @@ struct ice_aqc_recipe_data_elem {
struct ice_aqc_recipe_to_profile {
__le16 profile_id;
u8 rsvd[6];
- DECLARE_BITMAP(recipe_assoc, ICE_MAX_NUM_RECIPES);
+ __le64 recipe_assoc;
};
+static_assert(sizeof(struct ice_aqc_recipe_to_profile) == 16);
/* Add/Update/Remove/Get switch rules (indirect 0x02A0, 0x02A1, 0x02A2, 0x02A3)
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index d2fd315556a39..a545a7917e4fc 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -956,7 +956,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
u16 q_idx)
{
- DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
return -EINVAL;
@@ -978,7 +978,7 @@ int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
static int
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
{
- DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
int err = 0;
u16 q_idx;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 4d8111aeb0ff0..d9f6cc71d900a 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -1002,8 +1002,8 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
*/
int ice_init_hw(struct ice_hw *hw)
{
- struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
- void *mac_buf __free(kfree);
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL;
+ void *mac_buf __free(kfree) = NULL;
u16 mac_buf_len;
int status;
@@ -3272,7 +3272,7 @@ int ice_update_link_info(struct ice_port_info *pi)
return status;
if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
- struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL;
pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
@@ -3420,7 +3420,7 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
int
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
{
- struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL;
struct ice_aqc_set_phy_cfg_data cfg = { 0 };
struct ice_hw *hw;
int status;
@@ -3561,7 +3561,7 @@ int
ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fec_mode fec)
{
- struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL;
struct ice_hw *hw;
int status;
@@ -4695,7 +4695,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd)
{
- DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
u16 i, buf_size = __struct_size(qg_list);
struct ice_q_ctx *q_ctx;
int status = -ENOENT;
@@ -4917,7 +4917,7 @@ int
ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
u16 *q_id)
{
- DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
u16 qg_size = __struct_size(qg_list);
struct ice_hw *hw;
int status = 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index 7532d11ad7f33..fc91c4d411863 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -1938,8 +1938,8 @@ static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw,
*/
static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
{
- DEFINE_FLEX(struct ice_aqc_get_pkg_info_resp, pkg_info, pkg_info,
- ICE_PKG_CNT);
+ DEFINE_RAW_FLEX(struct ice_aqc_get_pkg_info_resp, pkg_info, pkg_info,
+ ICE_PKG_CNT);
u16 size = __struct_size(pkg_info);
u32 i;
@@ -1990,8 +1990,8 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw,
struct ice_pkg_hdr *ospkg,
struct ice_seg **seg)
{
- DEFINE_FLEX(struct ice_aqc_get_pkg_info_resp, pkg, pkg_info,
- ICE_PKG_CNT);
+ DEFINE_RAW_FLEX(struct ice_aqc_get_pkg_info_resp, pkg, pkg_info,
+ ICE_PKG_CNT);
u16 size = __struct_size(pkg);
enum ice_ddp_state state;
u32 i;
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 255a9c8151b45..78b833b3e1d7e 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -941,11 +941,11 @@ static u64 ice_loopback_test(struct net_device *netdev)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *orig_vsi = np->vsi, *test_vsi;
struct ice_pf *pf = orig_vsi->back;
+ u8 *tx_frame __free(kfree) = NULL;
u8 broadcast[ETH_ALEN], ret = 0;
int num_frames, valid_frames;
struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
- u8 *tx_frame __free(kfree);
int i;
netdev_info(netdev, "loopback test\n");
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 467372d541d21..f0e76f0a6d603 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -491,7 +491,7 @@ static void
ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport,
u16 vsi_num, u8 tc)
{
- DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
@@ -849,7 +849,7 @@ static void
ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num,
u8 tc)
{
- DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
@@ -1873,7 +1873,7 @@ static void
ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw,
u16 vsi_num, u8 tc)
{
- DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
@@ -2041,7 +2041,7 @@ int ice_init_lag(struct ice_pf *pf)
/* associate recipes to profiles */
for (n = 0; n < ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER; n++) {
err = ice_aq_get_recipe_to_profile(&pf->hw, n,
- (u8 *)&recipe_bits, NULL);
+ &recipe_bits, NULL);
if (err)
continue;
@@ -2049,7 +2049,7 @@ int ice_init_lag(struct ice_pf *pf)
recipe_bits |= BIT(lag->pf_recipe) |
BIT(lag->lport_recipe);
ice_aq_map_recipe_to_profile(&pf->hw, n,
- (u8 *)&recipe_bits, NULL);
+ recipe_bits, NULL);
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index ee3f0d3e3f6db..558422120312b 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -3091,7 +3091,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
{
struct ice_vsi_cfg_params params = {};
struct ice_coalesce_stored *coalesce;
- int prev_num_q_vectors = 0;
+ int prev_num_q_vectors;
struct ice_pf *pf;
int ret;
@@ -3105,13 +3105,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
return -EINVAL;
- coalesce = kcalloc(vsi->num_q_vectors,
- sizeof(struct ice_coalesce_stored), GFP_KERNEL);
- if (!coalesce)
- return -ENOMEM;
-
- prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
-
ret = ice_vsi_realloc_stat_arrays(vsi);
if (ret)
goto err_vsi_cfg;
@@ -3121,6 +3114,13 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
if (ret)
goto err_vsi_cfg;
+ coalesce = kcalloc(vsi->num_q_vectors,
+ sizeof(struct ice_coalesce_stored), GFP_KERNEL);
+ if (!coalesce)
+ return -ENOMEM;
+
+ prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
+
ret = ice_vsi_cfg_tc_lan(pf, vsi);
if (ret) {
if (vsi_flags & ICE_VSI_FLAG_INIT) {
@@ -3139,8 +3139,8 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
err_vsi_cfg_tc_lan:
ice_vsi_decfg(vsi);
-err_vsi_cfg:
kfree(coalesce);
+err_vsi_cfg:
return ret;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index d174a4eeb899c..a1525992d14bc 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -237,7 +237,7 @@ static int
ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
u32 node_teid)
{
- DEFINE_FLEX(struct ice_aqc_delete_elem, buf, teid, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_delete_elem, buf, teid, 1);
u16 buf_size = __struct_size(buf);
u16 num_groups_removed = 0;
int status;
@@ -2219,7 +2219,7 @@ int
ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
u16 num_items, u32 *list)
{
- DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
u16 buf_len = __struct_size(buf);
struct ice_sched_node *node;
u16 i, grps_movd = 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index f84bab80ca423..b4ea935e83005 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -1812,7 +1812,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
enum ice_sw_lkup_type lkup_type,
enum ice_adminq_opc opc)
{
- DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
u16 buf_len = __struct_size(sw_buf);
struct ice_aqc_res_elem *vsi_ele;
int status;
@@ -2025,12 +2025,12 @@ error_out:
* ice_aq_map_recipe_to_profile - Map recipe to packet profile
* @hw: pointer to the HW struct
* @profile_id: package profile ID to associate the recipe with
- * @r_bitmap: Recipe bitmap filled in and need to be returned as response
+ * @r_assoc: Recipe bitmap filled in and need to be returned as response
* @cd: pointer to command details structure or NULL
* Recipe to profile association (0x0291)
*/
int
-ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 r_assoc,
struct ice_sq_cd *cd)
{
struct ice_aqc_recipe_to_profile *cmd;
@@ -2042,7 +2042,7 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
/* Set the recipe ID bit in the bitmask to let the device know which
* profile we are associating the recipe to
*/
- memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc));
+ cmd->recipe_assoc = cpu_to_le64(r_assoc);
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
@@ -2051,12 +2051,12 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
* ice_aq_get_recipe_to_profile - Map recipe to packet profile
* @hw: pointer to the HW struct
* @profile_id: package profile ID to associate the recipe with
- * @r_bitmap: Recipe bitmap filled in and need to be returned as response
+ * @r_assoc: Recipe bitmap filled in and need to be returned as response
* @cd: pointer to command details structure or NULL
* Associate profile ID with given recipe (0x0293)
*/
int
-ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc,
struct ice_sq_cd *cd)
{
struct ice_aqc_recipe_to_profile *cmd;
@@ -2069,7 +2069,7 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
if (!status)
- memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc));
+ *r_assoc = le64_to_cpu(cmd->recipe_assoc);
return status;
}
@@ -2081,7 +2081,7 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
*/
int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
{
- DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
u16 buf_len = __struct_size(sw_buf);
int status;
@@ -2108,6 +2108,7 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
static void ice_get_recp_to_prof_map(struct ice_hw *hw)
{
DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+ u64 recp_assoc;
u16 i;
for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
@@ -2115,8 +2116,9 @@ static void ice_get_recp_to_prof_map(struct ice_hw *hw)
bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES);
- if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
+ if (ice_aq_get_recipe_to_profile(hw, i, &recp_assoc, NULL))
continue;
+ bitmap_from_arr64(r_bitmap, &recp_assoc, ICE_MAX_NUM_RECIPES);
bitmap_copy(profile_to_recipe[i], r_bitmap,
ICE_MAX_NUM_RECIPES);
for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
@@ -4418,7 +4420,7 @@ int
ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 *counter_id)
{
- DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
u16 buf_len = __struct_size(buf);
int status;
@@ -4446,7 +4448,7 @@ int
ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 counter_id)
{
- DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
u16 buf_len = __struct_size(buf);
int status;
@@ -4476,7 +4478,7 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
*/
int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id)
{
- DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
u16 buf_len = __struct_size(buf);
u16 res_type;
int status;
@@ -5390,22 +5392,24 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
*/
list_for_each_entry(fvit, &rm->fv_list, list_entry) {
DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+ u64 recp_assoc;
u16 j;
status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
- (u8 *)r_bitmap, NULL);
+ &recp_assoc, NULL);
if (status)
goto err_unroll;
+ bitmap_from_arr64(r_bitmap, &recp_assoc, ICE_MAX_NUM_RECIPES);
bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
ICE_MAX_NUM_RECIPES);
status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
if (status)
goto err_unroll;
+ bitmap_to_arr64(&recp_assoc, r_bitmap, ICE_MAX_NUM_RECIPES);
status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
- (u8 *)r_bitmap,
- NULL);
+ recp_assoc, NULL);
ice_release_change_lock(hw);
if (status)
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index db7e501b7e0a4..89ffa1b51b5ad 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -424,10 +424,10 @@ int ice_aq_add_recipe(struct ice_hw *hw,
struct ice_aqc_recipe_data_elem *s_recipe_list,
u16 num_recipes, struct ice_sq_cd *cd);
int
-ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc,
struct ice_sq_cd *cd);
int
-ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 r_assoc,
struct ice_sq_cd *cd);
#endif /* _ICE_SWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index b890410a2bc0b..688ccb0615ab9 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -28,6 +28,8 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
* - ICE_TC_FLWR_FIELD_VLAN_TPID (present if specified)
* - Tunnel flag (present if tunnel)
*/
+ if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
+ lkups_cnt++;
if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
lkups_cnt++;
@@ -363,6 +365,11 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
/* Always add direction metadata */
ice_rule_add_direction_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
+ if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
+ ice_rule_add_src_vsi_metadata(&list[i]);
+ i++;
+ }
+
rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
if (tc_fltr->tunnel_type != TNL_LAST) {
i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i);
@@ -772,7 +779,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
int ret;
int i;
- if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) {
+ if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)");
return -EOPNOTSUPP;
}
@@ -820,6 +827,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
/* specify the cookie as filter_rule_id */
rule_info.fltr_rule_id = fltr->cookie;
+ rule_info.src_vsi = vsi->idx;
ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
if (ret == -EEXIST) {
@@ -1481,7 +1489,10 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
(BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
- BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS))) {
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel");
return -EOPNOTSUPP;
} else {
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
index 80dc4bcdd3a41..b3e1bdcb80f84 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
@@ -26,24 +26,22 @@ static void ice_port_vlan_on(struct ice_vsi *vsi)
struct ice_vsi_vlan_ops *vlan_ops;
struct ice_pf *pf = vsi->back;
- if (ice_is_dvm_ena(&pf->hw)) {
- vlan_ops = &vsi->outer_vlan_ops;
-
- /* setup outer VLAN ops */
- vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
- vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
+ /* setup inner VLAN ops */
+ vlan_ops = &vsi->inner_vlan_ops;
- /* setup inner VLAN ops */
- vlan_ops = &vsi->inner_vlan_ops;
+ if (ice_is_dvm_ena(&pf->hw)) {
vlan_ops->add_vlan = noop_vlan_arg;
vlan_ops->del_vlan = noop_vlan_arg;
vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
- } else {
- vlan_ops = &vsi->inner_vlan_ops;
+ /* setup outer VLAN ops */
+ vlan_ops = &vsi->outer_vlan_ops;
+ vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
+ vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
+ } else {
vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 6dd7a66bb8979..f5bc4a2780745 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -2941,6 +2941,8 @@ static int idpf_rx_process_skb_fields(struct idpf_queue *rxq,
rx_ptype = le16_get_bits(rx_desc->ptype_err_fflags0,
VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M);
+ skb->protocol = eth_type_trans(skb, rxq->vport->netdev);
+
decoded = rxq->vport->rx_ptype_lkup[rx_ptype];
/* If we don't know the ptype we can't do anything else with it. Just
* pass it up the stack as-is.
@@ -2951,8 +2953,6 @@ static int idpf_rx_process_skb_fields(struct idpf_queue *rxq,
/* process RSS/hash */
idpf_rx_hash(rxq, skb, rx_desc, &decoded);
- skb->protocol = eth_type_trans(skb, rxq->vport->netdev);
-
if (le16_get_bits(rx_desc->hdrlen_flags,
VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
return idpf_rx_rsc(rxq, skb, rx_desc, &decoded);
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 2e1cfbd82f4fd..35ad40a803cb6 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -1642,10 +1642,6 @@ done:
if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) &&
skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
- /* FIXME: add support for retrieving timestamps from
- * the other timer registers before skipping the
- * timestamping request.
- */
unsigned long flags;
u32 tstamp_flags;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index 13a6fca31004a..866024f2b9eeb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -914,7 +914,13 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
goto err_out;
}
- xs = kzalloc(sizeof(*xs), GFP_KERNEL);
+ algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1);
+ if (unlikely(!algo)) {
+ err = -ENOENT;
+ goto err_out;
+ }
+
+ xs = kzalloc(sizeof(*xs), GFP_ATOMIC);
if (unlikely(!xs)) {
err = -ENOMEM;
goto err_out;
@@ -930,14 +936,8 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
memcpy(&xs->id.daddr.a4, sam->addr, sizeof(xs->id.daddr.a4));
xs->xso.dev = adapter->netdev;
- algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1);
- if (unlikely(!algo)) {
- err = -ENOENT;
- goto err_xs;
- }
-
aead_len = sizeof(*xs->aead) + IXGBE_IPSEC_KEY_BITS / 8;
- xs->aead = kzalloc(aead_len, GFP_KERNEL);
+ xs->aead = kzalloc(aead_len, GFP_ATOMIC);
if (unlikely(!xs->aead)) {
err = -ENOMEM;
goto err_xs;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 6c70c84986904..b86f3224f0b78 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -808,6 +808,11 @@ static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cfg |= rx_pause ? CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK : 0x0;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
@@ -1338,7 +1343,7 @@ static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
/* Release thread waiting for completion */
lmac->cmd_pend = false;
- wake_up_interruptible(&lmac->wq_cmd_cmplt);
+ wake_up(&lmac->wq_cmd_cmplt);
break;
case CGX_EVT_ASYNC:
if (cgx_event_is_linkevent(event))
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index b92264d0a77e7..1e5aa53975040 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -214,11 +214,12 @@ int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid)
}
EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
-void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
+static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data)
{
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *tx_hdr, *rx_hdr;
void *hw_mbase = mdev->hwbase;
+ u64 intr_val;
tx_hdr = hw_mbase + mbox->tx_start;
rx_hdr = hw_mbase + mbox->rx_start;
@@ -254,14 +255,52 @@ void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
spin_unlock(&mdev->mbox_lock);
+ /* Check if interrupt pending */
+ intr_val = readq((void __iomem *)mbox->reg_base +
+ (mbox->trigger | (devid << mbox->tr_shift)));
+
+ intr_val |= data;
/* The interrupt should be fired after num_msgs is written
* to the shared memory
*/
- writeq(1, (void __iomem *)mbox->reg_base +
+ writeq(intr_val, (void __iomem *)mbox->reg_base +
(mbox->trigger | (devid << mbox->tr_shift)));
}
+
+void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
+{
+ otx2_mbox_msg_send_data(mbox, devid, MBOX_DOWN_MSG);
+}
EXPORT_SYMBOL(otx2_mbox_msg_send);
+void otx2_mbox_msg_send_up(struct otx2_mbox *mbox, int devid)
+{
+ otx2_mbox_msg_send_data(mbox, devid, MBOX_UP_MSG);
+}
+EXPORT_SYMBOL(otx2_mbox_msg_send_up);
+
+bool otx2_mbox_wait_for_zero(struct otx2_mbox *mbox, int devid)
+{
+ u64 data;
+
+ data = readq((void __iomem *)mbox->reg_base +
+ (mbox->trigger | (devid << mbox->tr_shift)));
+
+ /* If data is non-zero wait for ~1ms and return to caller
+ * whether data has changed to zero or not after the wait.
+ */
+ if (!data)
+ return true;
+
+ usleep_range(950, 1000);
+
+ data = readq((void __iomem *)mbox->reg_base +
+ (mbox->trigger | (devid << mbox->tr_shift)));
+
+ return data == 0;
+}
+EXPORT_SYMBOL(otx2_mbox_wait_for_zero);
+
struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
int size, int size_rsp)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 61ab7f66f053c..eb2a20b5a0d0c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -16,6 +16,9 @@
#define MBOX_SIZE SZ_64K
+#define MBOX_DOWN_MSG 1
+#define MBOX_UP_MSG 2
+
/* AF/PF: PF initiated, PF/VF VF initiated */
#define MBOX_DOWN_RX_START 0
#define MBOX_DOWN_RX_SIZE (46 * SZ_1K)
@@ -101,6 +104,7 @@ int otx2_mbox_regions_init(struct otx2_mbox *mbox, void __force **hwbase,
struct pci_dev *pdev, void __force *reg_base,
int direction, int ndevs, unsigned long *bmap);
void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
+void otx2_mbox_msg_send_up(struct otx2_mbox *mbox, int devid);
int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid);
struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
@@ -118,6 +122,8 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
return otx2_mbox_alloc_msg_rsp(mbox, devid, size, 0);
}
+bool otx2_mbox_wait_for_zero(struct otx2_mbox *mbox, int devid);
+
/* Mailbox message types */
#define MBOX_MSG_MASK 0xFFFF
#define MBOX_MSG_INVALID 0xFFFE
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
index dfd23580e3b8e..d39d86e694ccf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
@@ -121,13 +121,17 @@ int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event)
static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
{
struct mcs_intr_info *req;
- int err, pf;
+ int pf;
pf = rvu_get_pf(event->pcifunc);
+ mutex_lock(&rvu->mbox_lock);
+
req = otx2_mbox_alloc_msg_mcs_intr_notify(rvu, pf);
- if (!req)
+ if (!req) {
+ mutex_unlock(&rvu->mbox_lock);
return -ENOMEM;
+ }
req->mcs_id = event->mcs_id;
req->intr_mask = event->intr_mask;
@@ -135,10 +139,11 @@ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
req->hdr.pcifunc = event->pcifunc;
req->lmac_id = event->lmac_id;
- otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pf);
- err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
- if (err)
- dev_warn(rvu->dev, "MCS notification to pf %d failed\n", pf);
+ otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf);
+
+ otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
+
+ mutex_unlock(&rvu->mbox_lock);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 07d4859de53ad..ff78251f92d44 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -2119,7 +2119,7 @@ bad_message:
}
}
-static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
+static void __rvu_mbox_handler(struct rvu_work *mwork, int type, bool poll)
{
struct rvu *rvu = mwork->rvu;
int offset, err, id, devid;
@@ -2186,6 +2186,9 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
}
mw->mbox_wrk[devid].num_msgs = 0;
+ if (poll)
+ otx2_mbox_wait_for_zero(mbox, devid);
+
/* Send mbox responses to VF/PF */
otx2_mbox_msg_send(mbox, devid);
}
@@ -2193,15 +2196,18 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
static inline void rvu_afpf_mbox_handler(struct work_struct *work)
{
struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+ struct rvu *rvu = mwork->rvu;
- __rvu_mbox_handler(mwork, TYPE_AFPF);
+ mutex_lock(&rvu->mbox_lock);
+ __rvu_mbox_handler(mwork, TYPE_AFPF, true);
+ mutex_unlock(&rvu->mbox_lock);
}
static inline void rvu_afvf_mbox_handler(struct work_struct *work)
{
struct rvu_work *mwork = container_of(work, struct rvu_work, work);
- __rvu_mbox_handler(mwork, TYPE_AFVF);
+ __rvu_mbox_handler(mwork, TYPE_AFVF, false);
}
static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
@@ -2376,6 +2382,8 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
}
}
+ mutex_init(&rvu->mbox_lock);
+
mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
if (!mbox_regions) {
err = -ENOMEM;
@@ -2525,10 +2533,9 @@ static void rvu_queue_work(struct mbox_wq_info *mw, int first,
}
}
-static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
+static irqreturn_t rvu_mbox_pf_intr_handler(int irq, void *rvu_irq)
{
struct rvu *rvu = (struct rvu *)rvu_irq;
- int vfs = rvu->vfs;
u64 intr;
intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
@@ -2542,6 +2549,18 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ int vfs = rvu->vfs;
+ u64 intr;
+
+ /* Sync with mbox memory region */
+ rmb();
+
/* Handle VF interrupts */
if (vfs > 64) {
intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
@@ -2886,7 +2905,7 @@ static int rvu_register_interrupts(struct rvu *rvu)
/* Register mailbox interrupt handler */
sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
- rvu_mbox_intr_handler, 0,
+ rvu_mbox_pf_intr_handler, 0,
&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
if (ret) {
dev_err(rvu->dev,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index f390525a62177..35834687e40fe 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -591,6 +591,8 @@ struct rvu {
spinlock_t mcs_intrq_lock;
/* CPT interrupt lock */
spinlock_t cpt_intr_lock;
+
+ struct mutex mbox_lock; /* Serialize mbox up and down msgs */
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 38acdc7a73bbe..e9bf9231b0185 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -160,6 +160,8 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
continue;
lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
+ if (iter >= MAX_LMAC_COUNT)
+ continue;
lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
iter);
rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
@@ -232,7 +234,7 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
struct cgx_link_user_info *linfo;
struct cgx_link_info_msg *msg;
unsigned long pfmap;
- int err, pfid;
+ int pfid;
linfo = &event->link_uinfo;
pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
@@ -255,16 +257,22 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
continue;
}
+ mutex_lock(&rvu->mbox_lock);
+
/* Send mbox message to PF */
msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
- if (!msg)
+ if (!msg) {
+ mutex_unlock(&rvu->mbox_lock);
continue;
+ }
+
msg->link_info = *linfo;
- otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
- err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
- if (err)
- dev_warn(rvu->dev, "notification to pf %d failed\n",
- pfid);
+
+ otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pfid);
+
+ otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pfid);
+
+ mutex_unlock(&rvu->mbox_lock);
} while (pfmap);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index d39001cdc707e..00af8888e3291 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -4819,18 +4819,18 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
*/
rvu_write64(rvu, blkaddr, NIX_AF_CFG,
rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
+ }
- /* Set chan/link to backpressure TL3 instead of TL2 */
- rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
+ /* Set chan/link to backpressure TL3 instead of TL2 */
+ rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
- /* Disable SQ manager's sticky mode operation (set TM6 = 0)
- * This sticky mode is known to cause SQ stalls when multiple
- * SQs are mapped to same SMQ and transmitting pkts at a time.
- */
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
- cfg &= ~BIT_ULL(15);
- rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
- }
+ /* Disable SQ manager's sticky mode operation (set TM6 = 0)
+ * This sticky mode is known to cause SQ stalls when multiple
+ * SQs are mapped to same SMQ and transmitting pkts at a time.
+ */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
+ cfg &= ~BIT_ULL(15);
+ rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
ltdefs = rvu->kpu.lt_def;
/* Calibrate X2P bus to check if CGX/LBK links are fine */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index e350242bbafba..be709f83f3318 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -1657,7 +1657,7 @@ static int npc_fwdb_detect_load_prfl_img(struct rvu *rvu, uint64_t prfl_sz,
struct npc_coalesced_kpu_prfl *img_data = NULL;
int i = 0, rc = -EINVAL;
void __iomem *kpu_prfl_addr;
- u16 offset;
+ u32 offset;
img_data = (struct npc_coalesced_kpu_prfl __force *)rvu->kpu_prfl_addr;
if (le64_to_cpu(img_data->signature) == KPU_SIGN &&
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 02d0b707aea5b..a85ac039d779b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -1592,7 +1592,7 @@ int otx2_detach_resources(struct mbox *mbox)
detach->partial = false;
/* Send detach request to AF */
- otx2_mbox_msg_send(&mbox->mbox, 0);
+ otx2_sync_mbox_msg(mbox);
mutex_unlock(&mbox->lock);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 06910307085ef..7e16a341ec588 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -815,7 +815,7 @@ static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid)
if (!otx2_mbox_nonempty(&mbox->mbox_up, devid))
return 0;
- otx2_mbox_msg_send(&mbox->mbox_up, devid);
+ otx2_mbox_msg_send_up(&mbox->mbox_up, devid);
err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid);
if (err)
return err;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index e5fe67e738655..3f46d5e0fb2ec 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -292,8 +292,8 @@ static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
return 0;
}
-static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
- int first, int mdevs, u64 intr, int type)
+static void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
+ int first, int mdevs, u64 intr)
{
struct otx2_mbox_dev *mdev;
struct otx2_mbox *mbox;
@@ -307,40 +307,26 @@ static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
mbox = &mw->mbox;
mdev = &mbox->dev[i];
- if (type == TYPE_PFAF)
- otx2_sync_mbox_bbuf(mbox, i);
hdr = mdev->mbase + mbox->rx_start;
/* The hdr->num_msgs is set to zero immediately in the interrupt
- * handler to ensure that it holds a correct value next time
- * when the interrupt handler is called.
- * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
- * pf>mbox.up_num_msgs holds the data for use in
- * pfaf_mbox_up_handler.
+ * handler to ensure that it holds a correct value next time
+ * when the interrupt handler is called. pf->mw[i].num_msgs
+ * holds the data for use in otx2_pfvf_mbox_handler and
+ * pf->mw[i].up_num_msgs holds the data for use in
+ * otx2_pfvf_mbox_up_handler.
*/
if (hdr->num_msgs) {
mw[i].num_msgs = hdr->num_msgs;
hdr->num_msgs = 0;
- if (type == TYPE_PFAF)
- memset(mbox->hwbase + mbox->rx_start, 0,
- ALIGN(sizeof(struct mbox_hdr),
- sizeof(u64)));
-
queue_work(mbox_wq, &mw[i].mbox_wrk);
}
mbox = &mw->mbox_up;
mdev = &mbox->dev[i];
- if (type == TYPE_PFAF)
- otx2_sync_mbox_bbuf(mbox, i);
hdr = mdev->mbase + mbox->rx_start;
if (hdr->num_msgs) {
mw[i].up_num_msgs = hdr->num_msgs;
hdr->num_msgs = 0;
- if (type == TYPE_PFAF)
- memset(mbox->hwbase + mbox->rx_start, 0,
- ALIGN(sizeof(struct mbox_hdr),
- sizeof(u64)));
-
queue_work(mbox_wq, &mw[i].mbox_up_wrk);
}
}
@@ -356,8 +342,10 @@ static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev,
/* Msgs are already copied, trigger VF's mbox irq */
smp_wmb();
+ otx2_mbox_wait_for_zero(pfvf_mbox, devid);
+
offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift);
- writeq(1, (void __iomem *)pfvf_mbox->reg_base + offset);
+ writeq(MBOX_DOWN_MSG, (void __iomem *)pfvf_mbox->reg_base + offset);
/* Restore VF's mbox bounce buffer region address */
src_mdev->mbase = bbuf_base;
@@ -547,7 +535,7 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
end:
offset = mbox->rx_start + msg->next_msgoff;
if (mdev->msgs_acked == (vf_mbox->up_num_msgs - 1))
- __otx2_mbox_reset(mbox, 0);
+ __otx2_mbox_reset(mbox, vf_idx);
mdev->msgs_acked++;
}
}
@@ -564,8 +552,7 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
if (vfs > 64) {
intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1));
otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
- otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
- TYPE_PFVF);
+ otx2_queue_vf_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr);
if (intr)
trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
vfs = 64;
@@ -574,7 +561,7 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr);
- otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
+ otx2_queue_vf_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr);
if (intr)
trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
@@ -597,8 +584,9 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
if (!pf->mbox_pfvf)
return -ENOMEM;
- pf->mbox_pfvf_wq = alloc_ordered_workqueue("otx2_pfvf_mailbox",
- WQ_HIGHPRI | WQ_MEM_RECLAIM);
+ pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI |
+ WQ_MEM_RECLAIM, 0);
if (!pf->mbox_pfvf_wq)
return -ENOMEM;
@@ -821,20 +809,22 @@ static void otx2_pfaf_mbox_handler(struct work_struct *work)
struct mbox *af_mbox;
struct otx2_nic *pf;
int offset, id;
+ u16 num_msgs;
af_mbox = container_of(work, struct mbox, mbox_wrk);
mbox = &af_mbox->mbox;
mdev = &mbox->dev[0];
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ num_msgs = rsp_hdr->num_msgs;
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
pf = af_mbox->pfvf;
- for (id = 0; id < af_mbox->num_msgs; id++) {
+ for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
otx2_process_pfaf_mbox_msg(pf, msg);
offset = mbox->rx_start + msg->next_msgoff;
- if (mdev->msgs_acked == (af_mbox->num_msgs - 1))
+ if (mdev->msgs_acked == (num_msgs - 1))
__otx2_mbox_reset(mbox, 0);
mdev->msgs_acked++;
}
@@ -945,12 +935,14 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
int offset, id, devid = 0;
struct mbox_hdr *rsp_hdr;
struct mbox_msghdr *msg;
+ u16 num_msgs;
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ num_msgs = rsp_hdr->num_msgs;
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
- for (id = 0; id < af_mbox->up_num_msgs; id++) {
+ for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
@@ -959,10 +951,11 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
otx2_process_mbox_msg_up(pf, msg);
offset = mbox->rx_start + msg->next_msgoff;
}
- if (devid) {
+ /* Forward to VF iff VFs are really present */
+ if (devid && pci_num_vf(pf->pdev)) {
otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up,
MBOX_DIR_PFVF_UP, devid - 1,
- af_mbox->up_num_msgs);
+ num_msgs);
return;
}
@@ -972,16 +965,49 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
{
struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
- struct mbox *mbox;
+ struct mbox *mw = &pf->mbox;
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+ u64 mbox_data;
/* Clear the IRQ */
otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
- mbox = &pf->mbox;
- trace_otx2_msg_interrupt(mbox->mbox.pdev, "AF to PF", BIT_ULL(0));
+ mbox_data = otx2_read64(pf, RVU_PF_PFAF_MBOX0);
+
+ if (mbox_data & MBOX_UP_MSG) {
+ mbox_data &= ~MBOX_UP_MSG;
+ otx2_write64(pf, RVU_PF_PFAF_MBOX0, mbox_data);
+
+ mbox = &mw->mbox_up;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(pf->mbox_wq, &mw->mbox_up_wrk);
+
+ trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF",
+ BIT_ULL(0));
+ }
+
+ if (mbox_data & MBOX_DOWN_MSG) {
+ mbox_data &= ~MBOX_DOWN_MSG;
+ otx2_write64(pf, RVU_PF_PFAF_MBOX0, mbox_data);
+
+ mbox = &mw->mbox;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(pf->mbox_wq, &mw->mbox_wrk);
- otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF);
+ trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF",
+ BIT_ULL(0));
+ }
return IRQ_HANDLED;
}
@@ -1907,7 +1933,7 @@ int otx2_open(struct net_device *netdev)
* mcam entries are enabled to receive the packets. Hence disable the
* packet I/O.
*/
- if (err == EIO)
+ if (err == -EIO)
goto err_disable_rxtx;
else if (err)
goto err_tx_stop_queues;
@@ -3087,6 +3113,7 @@ static void otx2_vf_link_event_task(struct work_struct *work)
struct otx2_vf_config *config;
struct cgx_link_info_msg *req;
struct mbox_msghdr *msghdr;
+ struct delayed_work *dwork;
struct otx2_nic *pf;
int vf_idx;
@@ -3095,10 +3122,24 @@ static void otx2_vf_link_event_task(struct work_struct *work)
vf_idx = config - config->pf->vf_configs;
pf = config->pf;
+ if (config->intf_down)
+ return;
+
+ mutex_lock(&pf->mbox.lock);
+
+ dwork = &config->link_event_work;
+
+ if (!otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx)) {
+ schedule_delayed_work(dwork, msecs_to_jiffies(100));
+ mutex_unlock(&pf->mbox.lock);
+ return;
+ }
+
msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx,
sizeof(*req), sizeof(struct msg_rsp));
if (!msghdr) {
dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx);
+ mutex_unlock(&pf->mbox.lock);
return;
}
@@ -3107,7 +3148,11 @@ static void otx2_vf_link_event_task(struct work_struct *work)
req->hdr.sig = OTX2_MBOX_REQ_SIG;
memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
+ otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx);
+
otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
+
+ mutex_unlock(&pf->mbox.lock);
}
static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 87bdb93cb066e..f4655a8c0705d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -689,6 +689,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
struct flow_match_control match;
+ u32 val;
flow_rule_match_control(rule, &match);
if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
@@ -697,12 +698,14 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
}
if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ val = match.key->flags & FLOW_DIS_IS_FRAGMENT;
if (ntohs(flow_spec->etype) == ETH_P_IP) {
- flow_spec->ip_flag = IPV4_FLAG_MORE;
+ flow_spec->ip_flag = val ? IPV4_FLAG_MORE : 0;
flow_mask->ip_flag = IPV4_FLAG_MORE;
req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
} else if (ntohs(flow_spec->etype) == ETH_P_IPV6) {
- flow_spec->next_header = IPPROTO_FRAGMENT;
+ flow_spec->next_header = val ?
+ IPPROTO_FRAGMENT : 0;
flow_mask->next_header = 0xff;
req->features |= BIT_ULL(NPC_IPFRAG_IPV6);
} else {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 35e06048356f4..cf0aa16d75407 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -89,16 +89,20 @@ static void otx2vf_vfaf_mbox_handler(struct work_struct *work)
struct otx2_mbox *mbox;
struct mbox *af_mbox;
int offset, id;
+ u16 num_msgs;
af_mbox = container_of(work, struct mbox, mbox_wrk);
mbox = &af_mbox->mbox;
mdev = &mbox->dev[0];
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
- if (af_mbox->num_msgs == 0)
+ num_msgs = rsp_hdr->num_msgs;
+
+ if (num_msgs == 0)
return;
+
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
- for (id = 0; id < af_mbox->num_msgs; id++) {
+ for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
otx2vf_process_vfaf_mbox_msg(af_mbox->pfvf, msg);
offset = mbox->rx_start + msg->next_msgoff;
@@ -151,6 +155,7 @@ static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work)
struct mbox *vf_mbox;
struct otx2_nic *vf;
int offset, id;
+ u16 num_msgs;
vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
vf = vf_mbox->pfvf;
@@ -158,12 +163,14 @@ static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work)
mdev = &mbox->dev[0];
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
- if (vf_mbox->up_num_msgs == 0)
+ num_msgs = rsp_hdr->num_msgs;
+
+ if (num_msgs == 0)
return;
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
- for (id = 0; id < vf_mbox->up_num_msgs; id++) {
+ for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
otx2vf_process_mbox_msg_up(vf, msg);
offset = mbox->rx_start + msg->next_msgoff;
@@ -178,40 +185,48 @@ static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq)
struct otx2_mbox_dev *mdev;
struct otx2_mbox *mbox;
struct mbox_hdr *hdr;
+ u64 mbox_data;
/* Clear the IRQ */
otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
+ mbox_data = otx2_read64(vf, RVU_VF_VFPF_MBOX0);
+
/* Read latest mbox data */
smp_rmb();
- /* Check for PF => VF response messages */
- mbox = &vf->mbox.mbox;
- mdev = &mbox->dev[0];
- otx2_sync_mbox_bbuf(mbox, 0);
+ if (mbox_data & MBOX_DOWN_MSG) {
+ mbox_data &= ~MBOX_DOWN_MSG;
+ otx2_write64(vf, RVU_VF_VFPF_MBOX0, mbox_data);
+
+ /* Check for PF => VF response messages */
+ mbox = &vf->mbox.mbox;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
- trace_otx2_msg_interrupt(mbox->pdev, "PF to VF", BIT_ULL(0));
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
- hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
- if (hdr->num_msgs) {
- vf->mbox.num_msgs = hdr->num_msgs;
- hdr->num_msgs = 0;
- memset(mbox->hwbase + mbox->rx_start, 0,
- ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
- queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
+ trace_otx2_msg_interrupt(mbox->pdev, "DOWN reply from PF to VF",
+ BIT_ULL(0));
}
- /* Check for PF => VF notification messages */
- mbox = &vf->mbox.mbox_up;
- mdev = &mbox->dev[0];
- otx2_sync_mbox_bbuf(mbox, 0);
- hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
- if (hdr->num_msgs) {
- vf->mbox.up_num_msgs = hdr->num_msgs;
- hdr->num_msgs = 0;
- memset(mbox->hwbase + mbox->rx_start, 0,
- ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
- queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
+ if (mbox_data & MBOX_UP_MSG) {
+ mbox_data &= ~MBOX_UP_MSG;
+ otx2_write64(vf, RVU_VF_VFPF_MBOX0, mbox_data);
+
+ /* Check for PF => VF notification messages */
+ mbox = &vf->mbox.mbox_up;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
+
+ trace_otx2_msg_interrupt(mbox->pdev, "UP message from PF to VF",
+ BIT_ULL(0));
}
return IRQ_HANDLED;
@@ -760,8 +775,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
otx2_mcam_flow_del(vf);
otx2_shutdown_tc(vf);
otx2_shutdown_qos(vf);
- otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
+ otx2vf_disable_mbox_intr(vf);
free_percpu(vf->hw.lmt_info);
if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
qmem_free(vf->dev, vf->dync_lmt);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
index 1e77bbf5d22a1..1723e9912ae07 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
@@ -382,6 +382,7 @@ static void otx2_qos_read_txschq_cfg_tl(struct otx2_qos_node *parent,
otx2_qos_read_txschq_cfg_tl(node, cfg);
cnt = cfg->static_node_pos[node->level];
cfg->schq_contig_list[node->level][cnt] = node->schq;
+ cfg->schq_index_used[node->level][cnt] = true;
cfg->schq_contig[node->level]++;
cfg->static_node_pos[node->level]++;
otx2_qos_read_txschq_cfg_schq(node, cfg);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index de123350bd46b..caa13b9cedff0 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -677,8 +677,7 @@ static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
mcr_new = mcr_cur;
mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
- MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK |
- MAC_MCR_RX_FIFO_CLR_DIS;
+ MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_RX_FIFO_CLR_DIS;
/* Only update control register when needed! */
if (mcr_new != mcr_cur)
@@ -694,7 +693,7 @@ static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
phylink_config);
u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
- mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
+ mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK);
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
@@ -803,7 +802,7 @@ static void mtk_mac_link_up(struct phylink_config *config,
if (rx_pause)
mcr |= MAC_MCR_FORCE_RX_FC;
- mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
+ mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK;
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index b2a5d9c3733d4..6ce0db3a1a920 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -994,7 +994,7 @@ void mtk_ppe_start(struct mtk_ppe *ppe)
MTK_PPE_KEEPALIVE_DISABLE) |
FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
- MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
+ MTK_PPE_SCAN_MODE_CHECK_AGE) |
FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
MTK_PPE_ENTRIES_SHIFT);
if (mtk_is_netsys_v2_or_greater(ppe->eth))
@@ -1090,17 +1090,21 @@ int mtk_ppe_stop(struct mtk_ppe *ppe)
mtk_ppe_cache_enable(ppe, false);
- /* disable offload engine */
- ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
- ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
-
/* disable aging */
val = MTK_PPE_TB_CFG_AGE_NON_L4 |
MTK_PPE_TB_CFG_AGE_UNBIND |
MTK_PPE_TB_CFG_AGE_TCP |
MTK_PPE_TB_CFG_AGE_UDP |
- MTK_PPE_TB_CFG_AGE_TCP_FIN;
+ MTK_PPE_TB_CFG_AGE_TCP_FIN |
+ MTK_PPE_TB_CFG_SCAN_MODE;
ppe_clear(ppe, MTK_PPE_TB_CFG, val);
- return mtk_ppe_wait_busy(ppe);
+ if (mtk_ppe_wait_busy(ppe))
+ return -ETIMEDOUT;
+
+ /* disable offload engine */
+ ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
+ ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
+
+ return 0;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index c895e265ae0eb..61334a71058c7 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -1074,13 +1074,13 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
static void
mtk_wed_stop(struct mtk_wed_device *dev)
{
+ mtk_wed_dma_disable(dev);
mtk_wed_set_ext_int(dev, false);
wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
- wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
if (!mtk_wed_get_rx_capa(dev))
return;
@@ -1093,7 +1093,6 @@ static void
mtk_wed_deinit(struct mtk_wed_device *dev)
{
mtk_wed_stop(dev);
- mtk_wed_dma_disable(dev);
wed_clr(dev, MTK_WED_CTRL,
MTK_WED_CTRL_WDMA_INT_AGENT_EN |
@@ -2605,9 +2604,6 @@ mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
static void
mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
{
- if (!dev->running)
- return;
-
mtk_wed_set_ext_int(dev, !!mask);
wed_w32(dev, MTK_WED_INT_MASK, mask);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index 86f1854698b4e..883c044852f1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -95,9 +95,15 @@ static inline void mlx5e_ptp_metadata_fifo_push(struct mlx5e_ptp_metadata_fifo *
}
static inline u8
+mlx5e_ptp_metadata_fifo_peek(struct mlx5e_ptp_metadata_fifo *fifo)
+{
+ return fifo->data[fifo->mask & fifo->cc];
+}
+
+static inline void
mlx5e_ptp_metadata_fifo_pop(struct mlx5e_ptp_metadata_fifo *fifo)
{
- return fifo->data[fifo->mask & fifo->cc++];
+ fifo->cc++;
}
static inline void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index e87e26f2c669c..6743806b84806 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -83,24 +83,25 @@ int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
txq_ix = mlx5e_qid_from_qos(chs, node_qid);
- WARN_ON(node_qid > priv->htb_max_qos_sqs);
- if (node_qid == priv->htb_max_qos_sqs) {
- struct mlx5e_sq_stats *stats, **stats_list = NULL;
-
- if (priv->htb_max_qos_sqs == 0) {
- stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
- sizeof(*stats_list),
- GFP_KERNEL);
- if (!stats_list)
- return -ENOMEM;
- }
+ WARN_ON(node_qid >= mlx5e_htb_cur_leaf_nodes(priv->htb));
+ if (!priv->htb_qos_sq_stats) {
+ struct mlx5e_sq_stats **stats_list;
+
+ stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
+ sizeof(*stats_list), GFP_KERNEL);
+ if (!stats_list)
+ return -ENOMEM;
+
+ WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
+ }
+
+ if (!priv->htb_qos_sq_stats[node_qid]) {
+ struct mlx5e_sq_stats *stats;
+
stats = kzalloc(sizeof(*stats), GFP_KERNEL);
- if (!stats) {
- kvfree(stats_list);
+ if (!stats)
return -ENOMEM;
- }
- if (stats_list)
- WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
+
WRITE_ONCE(priv->htb_qos_sq_stats[node_qid], stats);
/* Order htb_max_qos_sqs increment after writing the array pointer.
* Pairs with smp_load_acquire in en_stats.c.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 0ab9db3195302..22918b2ef7f12 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -108,7 +108,10 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
mlx5e_reset_txqsq_cc_pc(sq);
sq->stats->recover++;
clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
+ rtnl_lock();
mlx5e_activate_txqsq(sq);
+ rtnl_unlock();
+
if (sq->channel)
mlx5e_trigger_napi_icosq(sq->channel);
else
@@ -179,12 +182,16 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
carrier_ok = netif_carrier_ok(netdev);
netif_carrier_off(netdev);
+ rtnl_lock();
mlx5e_deactivate_priv_channels(priv);
+ rtnl_unlock();
mlx5e_ptp_close(chs->ptp);
err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
+ rtnl_lock();
mlx5e_activate_priv_channels(priv);
+ rtnl_unlock();
/* return carrier back if needed */
if (carrier_ok)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
index bcafb4bf94154..8d9a3b5ec973b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
@@ -179,6 +179,13 @@ u32 mlx5e_rqt_size(struct mlx5_core_dev *mdev, unsigned int num_channels)
return min_t(u32, rqt_size, max_cap_rqt_size);
}
+#define MLX5E_MAX_RQT_SIZE_ALLOWED_WITH_XOR8_HASH 256
+
+unsigned int mlx5e_rqt_max_num_channels_allowed_for_xor8(void)
+{
+ return MLX5E_MAX_RQT_SIZE_ALLOWED_WITH_XOR8_HASH / MLX5E_UNIFORM_SPREAD_RQT_FACTOR;
+}
+
void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt)
{
mlx5_core_destroy_rqt(rqt->mdev, rqt->rqtn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
index e0bc30308c770..2f9e04a8418f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
@@ -38,6 +38,7 @@ static inline u32 mlx5e_rqt_get_rqtn(struct mlx5e_rqt *rqt)
}
u32 mlx5e_rqt_size(struct mlx5_core_dev *mdev, unsigned int num_channels);
+unsigned int mlx5e_rqt_max_num_channels_allowed_for_xor8(void);
int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn, u32 *vhca_id);
int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, u32 *vhca_ids,
unsigned int num_rqns,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
index f675b1926340f..f66bbc8464645 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
@@ -57,6 +57,7 @@ int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
{
+ mutex_lock(selq->state_lock);
WARN_ON_ONCE(selq->is_prepared);
kvfree(selq->standby);
@@ -67,6 +68,7 @@ void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
kvfree(selq->standby);
selq->standby = NULL;
+ mutex_unlock(selq->state_lock);
}
void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index c7f542d0b8f08..93cf23278d93c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -46,6 +46,10 @@ struct arfs_table {
struct hlist_head rules_hash[ARFS_HASH_SIZE];
};
+enum {
+ MLX5E_ARFS_STATE_ENABLED,
+};
+
enum arfs_type {
ARFS_IPV4_TCP,
ARFS_IPV6_TCP,
@@ -60,6 +64,7 @@ struct mlx5e_arfs_tables {
spinlock_t arfs_lock;
int last_filter_id;
struct workqueue_struct *wq;
+ unsigned long state;
};
struct arfs_tuple {
@@ -170,6 +175,8 @@ int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs)
return err;
}
}
+ set_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state);
+
return 0;
}
@@ -455,6 +462,8 @@ static void arfs_del_rules(struct mlx5e_flow_steering *fs)
int i;
int j;
+ clear_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state);
+
spin_lock_bh(&arfs->arfs_lock);
mlx5e_for_each_arfs_rule(rule, htmp, arfs->arfs_tables, i, j) {
hlist_del_init(&rule->hlist);
@@ -627,17 +636,8 @@ static void arfs_handle_work(struct work_struct *work)
struct mlx5_flow_handle *rule;
arfs = mlx5e_fs_get_arfs(priv->fs);
- mutex_lock(&priv->state_lock);
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- spin_lock_bh(&arfs->arfs_lock);
- hlist_del(&arfs_rule->hlist);
- spin_unlock_bh(&arfs->arfs_lock);
-
- mutex_unlock(&priv->state_lock);
- kfree(arfs_rule);
- goto out;
- }
- mutex_unlock(&priv->state_lock);
+ if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state))
+ return;
if (!arfs_rule->rule) {
rule = arfs_add_rule(priv, arfs_rule);
@@ -753,6 +753,11 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
return -EPROTONOSUPPORT;
spin_lock_bh(&arfs->arfs_lock);
+ if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state)) {
+ spin_unlock_bh(&arfs->arfs_lock);
+ return -EPERM;
+ }
+
arfs_rule = arfs_find_rule(arfs_t, &fk);
if (arfs_rule) {
if (arfs_rule->rxq == rxq_index || work_busy(&arfs_rule->arfs_work)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index cc51ce16df14a..67a29826bb570 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -451,6 +451,34 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
mutex_lock(&priv->state_lock);
+ if (mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc == ETH_RSS_HASH_XOR) {
+ unsigned int xor8_max_channels = mlx5e_rqt_max_num_channels_allowed_for_xor8();
+
+ if (count > xor8_max_channels) {
+ err = -EINVAL;
+ netdev_err(priv->netdev, "%s: Requested number of channels (%d) exceeds the maximum allowed by the XOR8 RSS hfunc (%d)\n",
+ __func__, count, xor8_max_channels);
+ goto out;
+ }
+ }
+
+ /* If RXFH is configured, changing the channels number is allowed only if
+ * it does not require resizing the RSS table. This is because the previous
+ * configuration may no longer be compatible with the new RSS table.
+ */
+ if (netif_is_rxfh_configured(priv->netdev)) {
+ int cur_rqt_size = mlx5e_rqt_size(priv->mdev, cur_params->num_channels);
+ int new_rqt_size = mlx5e_rqt_size(priv->mdev, count);
+
+ if (new_rqt_size != cur_rqt_size) {
+ err = -EINVAL;
+ netdev_err(priv->netdev,
+ "%s: RXFH is configured, block changing channels number that affects RSS table size (new: %d, current: %d)\n",
+ __func__, new_rqt_size, cur_rqt_size);
+ goto out;
+ }
+ }
+
/* Don't allow changing the number of channels if HTB offload is active,
* because the numeration of the QoS SQs will change, while per-queue
* qdiscs are attached.
@@ -561,12 +589,12 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
static void
mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
{
- struct mlx5_core_dev *mdev = priv->mdev;
int tc;
int i;
for (i = 0; i < priv->channels.num; ++i) {
struct mlx5e_channel *c = priv->channels.c[i];
+ struct mlx5_core_dev *mdev = c->mdev;
for (tc = 0; tc < c->num_tc; tc++) {
mlx5_core_modify_cq_moderation(mdev,
@@ -580,11 +608,11 @@ mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coal
static void
mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
{
- struct mlx5_core_dev *mdev = priv->mdev;
int i;
for (i = 0; i < priv->channels.num; ++i) {
struct mlx5e_channel *c = priv->channels.c[i];
+ struct mlx5_core_dev *mdev = c->mdev;
mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
coal->rx_coalesce_usecs,
@@ -1281,17 +1309,30 @@ int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
struct mlx5e_priv *priv = netdev_priv(dev);
u32 *rss_context = &rxfh->rss_context;
u8 hfunc = rxfh->hfunc;
+ unsigned int count;
int err;
mutex_lock(&priv->state_lock);
+
+ count = priv->channels.params.num_channels;
+
+ if (hfunc == ETH_RSS_HASH_XOR) {
+ unsigned int xor8_max_channels = mlx5e_rqt_max_num_channels_allowed_for_xor8();
+
+ if (count > xor8_max_channels) {
+ err = -EINVAL;
+ netdev_err(priv->netdev, "%s: Cannot set RSS hash function to XOR, current number of channels (%d) exceeds the maximum allowed for XOR8 RSS hfunc (%d)\n",
+ __func__, count, xor8_max_channels);
+ goto unlock;
+ }
+ }
+
if (*rss_context && rxfh->rss_delete) {
err = mlx5e_rx_res_rss_destroy(priv->rx_res, *rss_context);
goto unlock;
}
if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- unsigned int count = priv->channels.params.num_channels;
-
err = mlx5e_rx_res_rss_init(priv->rx_res, rss_context, count);
if (err)
goto unlock;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 91848eae45655..319930c04093b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -209,8 +209,8 @@ static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data)
*data,
mlx5e_devcom_event_mpv,
priv);
- if (IS_ERR_OR_NULL(priv->devcom))
- return -EOPNOTSUPP;
+ if (IS_ERR(priv->devcom))
+ return PTR_ERR(priv->devcom);
if (mlx5_core_is_mp_master(priv->mdev)) {
mlx5_devcom_send_event(priv->devcom, MPV_DEVCOM_MASTER_UP,
@@ -5726,9 +5726,7 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
kfree(priv->tx_rates);
kfree(priv->txq2sq);
destroy_workqueue(priv->wq);
- mutex_lock(&priv->state_lock);
mlx5e_selq_cleanup(&priv->selq);
- mutex_unlock(&priv->state_lock);
free_cpumask_var(priv->scratchpad.cpumask);
for (i = 0; i < priv->htb_max_qos_sqs; i++)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 2fa076b23fbea..e21a3b4128ce8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -398,6 +398,8 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) {
u8 metadata_index = be32_to_cpu(eseg->flow_table_metadata);
+ mlx5e_ptp_metadata_fifo_pop(&sq->ptpsq->metadata_freelist);
+
mlx5e_skb_cb_hwtstamp_init(skb);
mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb,
metadata_index);
@@ -496,9 +498,6 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
err_drop:
stats->dropped++;
- if (unlikely(sq->ptpsq && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
- mlx5e_ptp_metadata_fifo_push(&sq->ptpsq->metadata_freelist,
- be32_to_cpu(eseg->flow_table_metadata));
dev_kfree_skb_any(skb);
mlx5e_tx_flush(sq);
}
@@ -657,7 +656,7 @@ static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb,
{
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
eseg->flow_table_metadata =
- cpu_to_be32(mlx5e_ptp_metadata_fifo_pop(&ptpsq->metadata_freelist));
+ cpu_to_be32(mlx5e_ptp_metadata_fifo_peek(&ptpsq->metadata_freelist));
}
static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 3047d7015c525..1789800faaeb6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1868,6 +1868,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
if (err)
goto abort;
+ dev->priv.eswitch = esw;
err = esw_offloads_init(esw);
if (err)
goto reps_err;
@@ -1892,11 +1893,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
else
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
- if (MLX5_ESWITCH_MANAGER(dev) &&
- mlx5_esw_vport_match_metadata_supported(esw))
- esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
-
- dev->priv.eswitch = esw;
BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
esw_info(dev,
@@ -1908,6 +1904,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
reps_err:
mlx5_esw_vports_cleanup(esw);
+ dev->priv.eswitch = NULL;
abort:
if (esw->work_queue)
destroy_workqueue(esw->work_queue);
@@ -1926,7 +1923,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw_info(esw->dev, "cleanup\n");
- esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
WARN_ON(refcount_read(&esw->qos.refcnt));
mutex_destroy(&esw->state_lock);
@@ -1937,6 +1933,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
mutex_destroy(&esw->offloads.encap_tbl_lock);
mutex_destroy(&esw->offloads.decap_tbl_lock);
esw_offloads_cleanup(esw);
+ esw->dev->priv.eswitch = NULL;
mlx5_esw_vports_cleanup(esw);
debugfs_remove_recursive(esw->debugfs_root);
devl_params_unregister(priv_to_devlink(esw->dev), mlx5_eswitch_params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index baaae628b0a0f..844d3e3a65ddf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -43,6 +43,7 @@
#include "rdma.h"
#include "en.h"
#include "fs_core.h"
+#include "lib/mlx5.h"
#include "lib/devcom.h"
#include "lib/eq.h"
#include "lib/fs_chains.h"
@@ -2476,6 +2477,10 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
if (err)
return err;
+ if (MLX5_ESWITCH_MANAGER(esw->dev) &&
+ mlx5_esw_vport_match_metadata_supported(esw))
+ esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
+
err = devl_params_register(priv_to_devlink(esw->dev),
esw_devlink_params,
ARRAY_SIZE(esw_devlink_params));
@@ -3055,7 +3060,7 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key)
key,
mlx5_esw_offloads_devcom_event,
esw);
- if (IS_ERR_OR_NULL(esw->devcom))
+ if (IS_ERR(esw->devcom))
return;
mlx5_devcom_send_event(esw->devcom,
@@ -3707,6 +3712,12 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
+ if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && mlx5_get_sd(esw->dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't change E-Switch mode to switchdev when multi-PF netdev (Socket Direct) is configured.");
+ return -EPERM;
+ }
+
mlx5_lag_disable_change(esw->dev);
err = mlx5_esw_try_lock(esw);
if (err < 0) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index e6bfa7e4f146c..cf085a478e3e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1664,6 +1664,16 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
return err;
}
+static bool mlx5_pkt_reformat_cmp(struct mlx5_pkt_reformat *p1,
+ struct mlx5_pkt_reformat *p2)
+{
+ return p1->owner == p2->owner &&
+ (p1->owner == MLX5_FLOW_RESOURCE_OWNER_FW ?
+ p1->id == p2->id :
+ mlx5_fs_dr_action_get_pkt_reformat_id(p1) ==
+ mlx5_fs_dr_action_get_pkt_reformat_id(p2));
+}
+
static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
struct mlx5_flow_destination *d2)
{
@@ -1675,8 +1685,8 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
(d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
- (d1->vport.pkt_reformat->id ==
- d2->vport.pkt_reformat->id) : true)) ||
+ mlx5_pkt_reformat_cmp(d1->vport.pkt_reformat,
+ d2->vport.pkt_reformat) : true)) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
d1->ft == d2->ft) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
@@ -1808,8 +1818,9 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
}
trace_mlx5_fs_set_fte(fte, false);
+ /* Link newly added rules into the tree. */
for (i = 0; i < handle->num_rules; i++) {
- if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
+ if (!handle->rule[i]->node.parent) {
tree_add_node(&handle->rule[i]->node, &fte->node);
trace_mlx5_fs_add_rule(handle->rule[i]);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index d14459e5c04fc..69d482f7c5a29 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -703,8 +703,10 @@ int mlx5_deactivate_lag(struct mlx5_lag *ldev)
return err;
}
- if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags))
+ if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
mlx5_lag_port_sel_destroy(ldev);
+ ldev->buckets = 1;
+ }
if (mlx5_lag_has_drop_rule(ldev))
mlx5_lag_drop_rule_cleanup(ldev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
index e7d59cfa8708e..7b0766c89f4cf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
@@ -220,7 +220,7 @@ mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
struct mlx5_devcom_comp *comp;
if (IS_ERR_OR_NULL(devc))
- return NULL;
+ return ERR_PTR(-EINVAL);
mutex_lock(&comp_list_lock);
comp = devcom_component_get(devc, id, key, handler);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
index 5b28084e8a03c..dd5d186dc6148 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
@@ -213,8 +213,8 @@ static int sd_register(struct mlx5_core_dev *dev)
sd = mlx5_get_sd(dev);
devcom = mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_SD_GROUP,
sd->group_id, NULL, dev);
- if (!devcom)
- return -ENOMEM;
+ if (IS_ERR(devcom))
+ return PTR_ERR(devcom);
sd->devcom = devcom;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c2593625c09ad..331ce47f51a17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -956,7 +956,7 @@ static void mlx5_register_hca_devcom_comp(struct mlx5_core_dev *dev)
mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_HCA_PORTS,
mlx5_query_nic_system_image_guid(dev),
NULL, dev);
- if (IS_ERR_OR_NULL(dev->priv.hca_devcom_comp))
+ if (IS_ERR(dev->priv.hca_devcom_comp))
mlx5_core_err(dev, "Failed to register devcom HCA component\n");
}
@@ -1480,6 +1480,14 @@ int mlx5_init_one_devl_locked(struct mlx5_core_dev *dev)
if (err)
goto err_register;
+ err = mlx5_crdump_enable(dev);
+ if (err)
+ mlx5_core_err(dev, "mlx5_crdump_enable failed with error code %d\n", err);
+
+ err = mlx5_hwmon_dev_register(dev);
+ if (err)
+ mlx5_core_err(dev, "mlx5_hwmon_dev_register failed with error code %d\n", err);
+
mutex_unlock(&dev->intf_state_mutex);
return 0;
@@ -1505,7 +1513,10 @@ int mlx5_init_one(struct mlx5_core_dev *dev)
int err;
devl_lock(devlink);
+ devl_register(devlink);
err = mlx5_init_one_devl_locked(dev);
+ if (err)
+ devl_unregister(devlink);
devl_unlock(devlink);
return err;
}
@@ -1517,6 +1528,8 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev)
devl_lock(devlink);
mutex_lock(&dev->intf_state_mutex);
+ mlx5_hwmon_dev_unregister(dev);
+ mlx5_crdump_disable(dev);
mlx5_unregister_device(dev);
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
@@ -1534,6 +1547,7 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev)
mlx5_function_teardown(dev, true);
out:
mutex_unlock(&dev->intf_state_mutex);
+ devl_unregister(devlink);
devl_unlock(devlink);
}
@@ -1680,16 +1694,23 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev)
}
devl_lock(devlink);
+ devl_register(devlink);
+
err = mlx5_devlink_params_register(priv_to_devlink(dev));
- devl_unlock(devlink);
if (err) {
mlx5_core_warn(dev, "mlx5_devlink_param_reg err = %d\n", err);
- goto query_hca_caps_err;
+ goto params_reg_err;
}
+ devl_unlock(devlink);
return 0;
+params_reg_err:
+ devl_unregister(devlink);
+ devl_unlock(devlink);
query_hca_caps_err:
+ devl_unregister(devlink);
+ devl_unlock(devlink);
mlx5_function_disable(dev, true);
out:
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
@@ -1702,6 +1723,7 @@ void mlx5_uninit_one_light(struct mlx5_core_dev *dev)
devl_lock(devlink);
mlx5_devlink_params_unregister(priv_to_devlink(dev));
+ devl_unregister(devlink);
devl_unlock(devlink);
if (dev->state != MLX5_DEVICE_STATE_UP)
return;
@@ -1943,16 +1965,7 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_init_one;
}
- err = mlx5_crdump_enable(dev);
- if (err)
- dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
-
- err = mlx5_hwmon_dev_register(dev);
- if (err)
- mlx5_core_err(dev, "mlx5_hwmon_dev_register failed with error code %d\n", err);
-
pci_save_state(pdev);
- devlink_register(devlink);
return 0;
err_init_one:
@@ -1973,16 +1986,9 @@ static void remove_one(struct pci_dev *pdev)
struct devlink *devlink = priv_to_devlink(dev);
set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
- /* mlx5_drain_fw_reset() and mlx5_drain_health_wq() are using
- * devlink notify APIs.
- * Hence, we must drain them before unregistering the devlink.
- */
mlx5_drain_fw_reset(dev);
mlx5_drain_health_wq(dev);
- devlink_unregister(devlink);
mlx5_sriov_disable(pdev, false);
- mlx5_hwmon_dev_unregister(dev);
- mlx5_crdump_disable(dev);
mlx5_uninit_one(dev);
mlx5_pci_close(dev);
mlx5_mdev_uninit(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 4dcf995cb1a20..6bac8ad70ba60 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -19,6 +19,7 @@
#define MLX5_IRQ_CTRL_SF_MAX 8
/* min num of vectors for SFs to be enabled */
#define MLX5_IRQ_VEC_COMP_BASE_SF 2
+#define MLX5_IRQ_VEC_COMP_BASE 1
#define MLX5_EQ_SHARE_IRQ_MAX_COMP (8)
#define MLX5_EQ_SHARE_IRQ_MAX_CTRL (UINT_MAX)
@@ -246,6 +247,7 @@ static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
return;
}
+ vecidx -= MLX5_IRQ_VEC_COMP_BASE;
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx);
}
@@ -585,7 +587,7 @@ struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu,
struct mlx5_irq_table *table = mlx5_irq_table_get(dev);
struct mlx5_irq_pool *pool = table->pcif_pool;
struct irq_affinity_desc af_desc;
- int offset = 1;
+ int offset = MLX5_IRQ_VEC_COMP_BASE;
if (!pool->xa_num_irqs.max)
offset = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index bc863e1f062e6..7ebe712808275 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -75,7 +75,6 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
goto peer_devlink_set_err;
}
- devlink_register(devlink);
return 0;
peer_devlink_set_err:
@@ -101,7 +100,6 @@ static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
devlink = priv_to_devlink(mdev);
set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
mlx5_drain_health_wq(mdev);
- devlink_unregister(devlink);
if (mlx5_dev_is_lightweight(mdev))
mlx5_uninit_one_light(mdev);
else
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
index 64f4cc284aea4..030a5776c9374 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
@@ -205,12 +205,11 @@ dr_dump_hex_print(char hex[DR_HEX_SIZE], char *src, u32 size)
}
static int
-dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
+dr_dump_rule_action_mem(struct seq_file *file, char *buff, const u64 rule_id,
struct mlx5dr_rule_action_member *action_mem)
{
struct mlx5dr_action *action = action_mem->action;
const u64 action_id = DR_DBG_PTR_TO_ID(action);
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
u64 hit_tbl_ptr, miss_tbl_ptr;
u32 hit_tbl_id, miss_tbl_id;
int ret;
@@ -488,10 +487,9 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
}
static int
-dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste,
+dr_dump_rule_mem(struct seq_file *file, char *buff, struct mlx5dr_ste *ste,
bool is_rx, const u64 rule_id, u8 format_ver)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
char hw_ste_dump[DR_HEX_SIZE];
u32 mem_rec_type;
int ret;
@@ -522,7 +520,8 @@ dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste,
}
static int
-dr_dump_rule_rx_tx(struct seq_file *file, struct mlx5dr_rule_rx_tx *rule_rx_tx,
+dr_dump_rule_rx_tx(struct seq_file *file, char *buff,
+ struct mlx5dr_rule_rx_tx *rule_rx_tx,
bool is_rx, const u64 rule_id, u8 format_ver)
{
struct mlx5dr_ste *ste_arr[DR_RULE_MAX_STES + DR_ACTION_MAX_STES];
@@ -533,7 +532,7 @@ dr_dump_rule_rx_tx(struct seq_file *file, struct mlx5dr_rule_rx_tx *rule_rx_tx,
return 0;
while (i--) {
- ret = dr_dump_rule_mem(file, ste_arr[i], is_rx, rule_id,
+ ret = dr_dump_rule_mem(file, buff, ste_arr[i], is_rx, rule_id,
format_ver);
if (ret < 0)
return ret;
@@ -542,7 +541,8 @@ dr_dump_rule_rx_tx(struct seq_file *file, struct mlx5dr_rule_rx_tx *rule_rx_tx,
return 0;
}
-static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
+static noinline_for_stack int
+dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
{
struct mlx5dr_rule_action_member *action_mem;
const u64 rule_id = DR_DBG_PTR_TO_ID(rule);
@@ -565,19 +565,19 @@ static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
return ret;
if (rx->nic_matcher) {
- ret = dr_dump_rule_rx_tx(file, rx, true, rule_id, format_ver);
+ ret = dr_dump_rule_rx_tx(file, buff, rx, true, rule_id, format_ver);
if (ret < 0)
return ret;
}
if (tx->nic_matcher) {
- ret = dr_dump_rule_rx_tx(file, tx, false, rule_id, format_ver);
+ ret = dr_dump_rule_rx_tx(file, buff, tx, false, rule_id, format_ver);
if (ret < 0)
return ret;
}
list_for_each_entry(action_mem, &rule->rule_actions_list, list) {
- ret = dr_dump_rule_action_mem(file, rule_id, action_mem);
+ ret = dr_dump_rule_action_mem(file, buff, rule_id, action_mem);
if (ret < 0)
return ret;
}
@@ -586,10 +586,10 @@ static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
}
static int
-dr_dump_matcher_mask(struct seq_file *file, struct mlx5dr_match_param *mask,
+dr_dump_matcher_mask(struct seq_file *file, char *buff,
+ struct mlx5dr_match_param *mask,
u8 criteria, const u64 matcher_id)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
char dump[DR_HEX_SIZE];
int ret;
@@ -681,10 +681,10 @@ dr_dump_matcher_mask(struct seq_file *file, struct mlx5dr_match_param *mask,
}
static int
-dr_dump_matcher_builder(struct seq_file *file, struct mlx5dr_ste_build *builder,
+dr_dump_matcher_builder(struct seq_file *file, char *buff,
+ struct mlx5dr_ste_build *builder,
u32 index, bool is_rx, const u64 matcher_id)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
int ret;
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
@@ -702,11 +702,10 @@ dr_dump_matcher_builder(struct seq_file *file, struct mlx5dr_ste_build *builder,
}
static int
-dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
+dr_dump_matcher_rx_tx(struct seq_file *file, char *buff, bool is_rx,
struct mlx5dr_matcher_rx_tx *matcher_rx_tx,
const u64 matcher_id)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
enum dr_dump_rec_type rec_type;
u64 s_icm_addr, e_icm_addr;
int i, ret;
@@ -731,7 +730,7 @@ dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
return ret;
for (i = 0; i < matcher_rx_tx->num_of_builders; i++) {
- ret = dr_dump_matcher_builder(file,
+ ret = dr_dump_matcher_builder(file, buff,
&matcher_rx_tx->ste_builder[i],
i, is_rx, matcher_id);
if (ret < 0)
@@ -741,7 +740,7 @@ dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
return 0;
}
-static int
+static noinline_for_stack int
dr_dump_matcher(struct seq_file *file, struct mlx5dr_matcher *matcher)
{
struct mlx5dr_matcher_rx_tx *rx = &matcher->rx;
@@ -763,19 +762,19 @@ dr_dump_matcher(struct seq_file *file, struct mlx5dr_matcher *matcher)
if (ret)
return ret;
- ret = dr_dump_matcher_mask(file, &matcher->mask,
+ ret = dr_dump_matcher_mask(file, buff, &matcher->mask,
matcher->match_criteria, matcher_id);
if (ret < 0)
return ret;
if (rx->nic_tbl) {
- ret = dr_dump_matcher_rx_tx(file, true, rx, matcher_id);
+ ret = dr_dump_matcher_rx_tx(file, buff, true, rx, matcher_id);
if (ret < 0)
return ret;
}
if (tx->nic_tbl) {
- ret = dr_dump_matcher_rx_tx(file, false, tx, matcher_id);
+ ret = dr_dump_matcher_rx_tx(file, buff, false, tx, matcher_id);
if (ret < 0)
return ret;
}
@@ -803,11 +802,10 @@ dr_dump_matcher_all(struct seq_file *file, struct mlx5dr_matcher *matcher)
}
static int
-dr_dump_table_rx_tx(struct seq_file *file, bool is_rx,
+dr_dump_table_rx_tx(struct seq_file *file, char *buff, bool is_rx,
struct mlx5dr_table_rx_tx *table_rx_tx,
const u64 table_id)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
enum dr_dump_rec_type rec_type;
u64 s_icm_addr;
int ret;
@@ -829,7 +827,8 @@ dr_dump_table_rx_tx(struct seq_file *file, bool is_rx,
return 0;
}
-static int dr_dump_table(struct seq_file *file, struct mlx5dr_table *table)
+static noinline_for_stack int
+dr_dump_table(struct seq_file *file, struct mlx5dr_table *table)
{
struct mlx5dr_table_rx_tx *rx = &table->rx;
struct mlx5dr_table_rx_tx *tx = &table->tx;
@@ -848,14 +847,14 @@ static int dr_dump_table(struct seq_file *file, struct mlx5dr_table *table)
return ret;
if (rx->nic_dmn) {
- ret = dr_dump_table_rx_tx(file, true, rx,
+ ret = dr_dump_table_rx_tx(file, buff, true, rx,
DR_DBG_PTR_TO_ID(table));
if (ret < 0)
return ret;
}
if (tx->nic_dmn) {
- ret = dr_dump_table_rx_tx(file, false, tx,
+ ret = dr_dump_table_rx_tx(file, buff, false, tx,
DR_DBG_PTR_TO_ID(table));
if (ret < 0)
return ret;
@@ -881,10 +880,10 @@ static int dr_dump_table_all(struct seq_file *file, struct mlx5dr_table *tbl)
}
static int
-dr_dump_send_ring(struct seq_file *file, struct mlx5dr_send_ring *ring,
+dr_dump_send_ring(struct seq_file *file, char *buff,
+ struct mlx5dr_send_ring *ring,
const u64 domain_id)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
int ret;
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
@@ -902,13 +901,13 @@ dr_dump_send_ring(struct seq_file *file, struct mlx5dr_send_ring *ring,
return 0;
}
-static noinline_for_stack int
+static int
dr_dump_domain_info_flex_parser(struct seq_file *file,
+ char *buff,
const char *flex_parser_name,
const u8 flex_parser_value,
const u64 domain_id)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
int ret;
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
@@ -925,11 +924,11 @@ dr_dump_domain_info_flex_parser(struct seq_file *file,
return 0;
}
-static noinline_for_stack int
-dr_dump_domain_info_caps(struct seq_file *file, struct mlx5dr_cmd_caps *caps,
+static int
+dr_dump_domain_info_caps(struct seq_file *file, char *buff,
+ struct mlx5dr_cmd_caps *caps,
const u64 domain_id)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
struct mlx5dr_cmd_vport_cap *vport_caps;
unsigned long i, vports_num;
int ret;
@@ -969,34 +968,35 @@ dr_dump_domain_info_caps(struct seq_file *file, struct mlx5dr_cmd_caps *caps,
}
static int
-dr_dump_domain_info(struct seq_file *file, struct mlx5dr_domain_info *info,
+dr_dump_domain_info(struct seq_file *file, char *buff,
+ struct mlx5dr_domain_info *info,
const u64 domain_id)
{
int ret;
- ret = dr_dump_domain_info_caps(file, &info->caps, domain_id);
+ ret = dr_dump_domain_info_caps(file, buff, &info->caps, domain_id);
if (ret < 0)
return ret;
- ret = dr_dump_domain_info_flex_parser(file, "icmp_dw0",
+ ret = dr_dump_domain_info_flex_parser(file, buff, "icmp_dw0",
info->caps.flex_parser_id_icmp_dw0,
domain_id);
if (ret < 0)
return ret;
- ret = dr_dump_domain_info_flex_parser(file, "icmp_dw1",
+ ret = dr_dump_domain_info_flex_parser(file, buff, "icmp_dw1",
info->caps.flex_parser_id_icmp_dw1,
domain_id);
if (ret < 0)
return ret;
- ret = dr_dump_domain_info_flex_parser(file, "icmpv6_dw0",
+ ret = dr_dump_domain_info_flex_parser(file, buff, "icmpv6_dw0",
info->caps.flex_parser_id_icmpv6_dw0,
domain_id);
if (ret < 0)
return ret;
- ret = dr_dump_domain_info_flex_parser(file, "icmpv6_dw1",
+ ret = dr_dump_domain_info_flex_parser(file, buff, "icmpv6_dw1",
info->caps.flex_parser_id_icmpv6_dw1,
domain_id);
if (ret < 0)
@@ -1032,12 +1032,12 @@ dr_dump_domain(struct seq_file *file, struct mlx5dr_domain *dmn)
if (ret)
return ret;
- ret = dr_dump_domain_info(file, &dmn->info, domain_id);
+ ret = dr_dump_domain_info(file, buff, &dmn->info, domain_id);
if (ret < 0)
return ret;
if (dmn->info.supp_sw_steering) {
- ret = dr_dump_send_ring(file, dmn->send_ring, domain_id);
+ ret = dr_dump_send_ring(file, buff, dmn->send_ring, domain_id);
if (ret < 0)
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index 3d09fa54598f1..ba303868686a7 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include "mlxbf_gige.h"
@@ -139,13 +140,10 @@ static int mlxbf_gige_open(struct net_device *netdev)
control |= MLXBF_GIGE_CONTROL_PORT_EN;
writeq(control, priv->base + MLXBF_GIGE_CONTROL);
- err = mlxbf_gige_request_irqs(priv);
- if (err)
- return err;
mlxbf_gige_cache_stats(priv);
err = mlxbf_gige_clean_port(priv);
if (err)
- goto free_irqs;
+ return err;
/* Clear driver's valid_polarity to match hardware,
* since the above call to clean_port() resets the
@@ -157,7 +155,7 @@ static int mlxbf_gige_open(struct net_device *netdev)
err = mlxbf_gige_tx_init(priv);
if (err)
- goto free_irqs;
+ goto phy_deinit;
err = mlxbf_gige_rx_init(priv);
if (err)
goto tx_deinit;
@@ -166,6 +164,10 @@ static int mlxbf_gige_open(struct net_device *netdev)
napi_enable(&priv->napi);
netif_start_queue(netdev);
+ err = mlxbf_gige_request_irqs(priv);
+ if (err)
+ goto napi_deinit;
+
/* Set bits in INT_EN that we care about */
int_en = MLXBF_GIGE_INT_EN_HW_ACCESS_ERROR |
MLXBF_GIGE_INT_EN_TX_CHECKSUM_INPUTS |
@@ -182,11 +184,17 @@ static int mlxbf_gige_open(struct net_device *netdev)
return 0;
+napi_deinit:
+ netif_stop_queue(netdev);
+ napi_disable(&priv->napi);
+ netif_napi_del(&priv->napi);
+ mlxbf_gige_rx_deinit(priv);
+
tx_deinit:
mlxbf_gige_tx_deinit(priv);
-free_irqs:
- mlxbf_gige_free_irqs(priv);
+phy_deinit:
+ phy_stop(phydev);
return err;
}
@@ -485,8 +493,13 @@ static void mlxbf_gige_shutdown(struct platform_device *pdev)
{
struct mlxbf_gige *priv = platform_get_drvdata(pdev);
- writeq(0, priv->base + MLXBF_GIGE_INT_EN);
- mlxbf_gige_clean_port(priv);
+ rtnl_lock();
+ netif_device_detach(priv->netdev);
+
+ if (netif_running(priv->netdev))
+ dev_close(priv->netdev);
+
+ rtnl_unlock();
}
static const struct acpi_device_id __maybe_unused mlxbf_gige_acpi_match[] = {
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index e5ec0a363aff8..31f75b4a67fd7 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -368,7 +368,6 @@ union ks8851_tx_hdr {
* @rdfifo: FIFO read callback
* @wrfifo: FIFO write callback
* @start_xmit: start_xmit() implementation callback
- * @rx_skb: rx_skb() implementation callback
* @flush_tx_work: flush_tx_work() implementation callback
*
* The @statelock is used to protect information in the structure which may
@@ -423,8 +422,6 @@ struct ks8851_net {
struct sk_buff *txp, bool irq);
netdev_tx_t (*start_xmit)(struct sk_buff *skb,
struct net_device *dev);
- void (*rx_skb)(struct ks8851_net *ks,
- struct sk_buff *skb);
void (*flush_tx_work)(struct ks8851_net *ks);
};
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index 0bf13b38b8f5b..d4cdf3d4f5525 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -232,16 +232,6 @@ static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt)
}
/**
- * ks8851_rx_skb - receive skbuff
- * @ks: The device state.
- * @skb: The skbuff
- */
-static void ks8851_rx_skb(struct ks8851_net *ks, struct sk_buff *skb)
-{
- ks->rx_skb(ks, skb);
-}
-
-/**
* ks8851_rx_pkts - receive packets from the host
* @ks: The device information.
*
@@ -309,7 +299,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
ks8851_dbg_dumpkkt(ks, rxpkt);
skb->protocol = eth_type_trans(skb, ks->netdev);
- ks8851_rx_skb(ks, skb);
+ __netif_rx(skb);
ks->netdev->stats.rx_packets++;
ks->netdev->stats.rx_bytes += rxlen;
@@ -340,6 +330,8 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
unsigned long flags;
unsigned int status;
+ local_bh_disable();
+
ks8851_lock(ks, &flags);
status = ks8851_rdreg16(ks, KS_ISR);
@@ -416,6 +408,8 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
if (status & IRQ_LCI)
mii_check_link(&ks->mii);
+ local_bh_enable();
+
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c
index 2a7f298542670..381b9cd285ebd 100644
--- a/drivers/net/ethernet/micrel/ks8851_par.c
+++ b/drivers/net/ethernet/micrel/ks8851_par.c
@@ -210,16 +210,6 @@ static void ks8851_wrfifo_par(struct ks8851_net *ks, struct sk_buff *txp,
iowrite16_rep(ksp->hw_addr, txp->data, len / 2);
}
-/**
- * ks8851_rx_skb_par - receive skbuff
- * @ks: The device state.
- * @skb: The skbuff
- */
-static void ks8851_rx_skb_par(struct ks8851_net *ks, struct sk_buff *skb)
-{
- netif_rx(skb);
-}
-
static unsigned int ks8851_rdreg16_par_txqcr(struct ks8851_net *ks)
{
return ks8851_rdreg16_par(ks, KS_TXQCR);
@@ -298,7 +288,6 @@ static int ks8851_probe_par(struct platform_device *pdev)
ks->rdfifo = ks8851_rdfifo_par;
ks->wrfifo = ks8851_wrfifo_par;
ks->start_xmit = ks8851_start_xmit_par;
- ks->rx_skb = ks8851_rx_skb_par;
#define STD_IRQ (IRQ_LCI | /* Link Change */ \
IRQ_RXI | /* RX done */ \
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 2f803377c9f9d..670c1de966db8 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -299,16 +299,6 @@ static unsigned int calc_txlen(unsigned int len)
}
/**
- * ks8851_rx_skb_spi - receive skbuff
- * @ks: The device state
- * @skb: The skbuff
- */
-static void ks8851_rx_skb_spi(struct ks8851_net *ks, struct sk_buff *skb)
-{
- netif_rx(skb);
-}
-
-/**
* ks8851_tx_work - process tx packet(s)
* @work: The work strucutre what was scheduled.
*
@@ -435,7 +425,6 @@ static int ks8851_probe_spi(struct spi_device *spi)
ks->rdfifo = ks8851_rdfifo_spi;
ks->wrfifo = ks8851_wrfifo_spi;
ks->start_xmit = ks8851_start_xmit_spi;
- ks->rx_skb = ks8851_rx_skb_spi;
ks->flush_tx_work = ks8851_flush_tx_work_spi;
#define STD_IRQ (IRQ_LCI | /* Link Change */ \
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index bd8aa83b47e5e..75a988c0bd794 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -25,6 +25,8 @@
#define PCS_POWER_STATE_DOWN 0x6
#define PCS_POWER_STATE_UP 0x4
+#define RFE_RD_FIFO_TH_3_DWORDS 0x3
+
static void pci11x1x_strap_get_status(struct lan743x_adapter *adapter)
{
u32 chip_rev;
@@ -3272,6 +3274,21 @@ static void lan743x_full_cleanup(struct lan743x_adapter *adapter)
lan743x_pci_cleanup(adapter);
}
+static void pci11x1x_set_rfe_rd_fifo_threshold(struct lan743x_adapter *adapter)
+{
+ u16 rev = adapter->csr.id_rev & ID_REV_CHIP_REV_MASK_;
+
+ if (rev == ID_REV_CHIP_REV_PCI11X1X_B0_) {
+ u32 misc_ctl;
+
+ misc_ctl = lan743x_csr_read(adapter, MISC_CTL_0);
+ misc_ctl &= ~MISC_CTL_0_RFE_READ_FIFO_MASK_;
+ misc_ctl |= FIELD_PREP(MISC_CTL_0_RFE_READ_FIFO_MASK_,
+ RFE_RD_FIFO_TH_3_DWORDS);
+ lan743x_csr_write(adapter, MISC_CTL_0, misc_ctl);
+ }
+}
+
static int lan743x_hardware_init(struct lan743x_adapter *adapter,
struct pci_dev *pdev)
{
@@ -3287,6 +3304,7 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
pci11x1x_strap_get_status(adapter);
spin_lock_init(&adapter->eth_syslock_spinlock);
mutex_init(&adapter->sgmii_rw_lock);
+ pci11x1x_set_rfe_rd_fifo_threshold(adapter);
} else {
adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS;
adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index be79cb0ae5af3..645bc048e52ef 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -26,6 +26,7 @@
#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF)
#define ID_REV_CHIP_REV_A0_ (0x00000000)
#define ID_REV_CHIP_REV_B0_ (0x00000010)
+#define ID_REV_CHIP_REV_PCI11X1X_B0_ (0x000000B0)
#define FPGA_REV (0x04)
#define FPGA_REV_GET_MINOR_(fpga_rev) (((fpga_rev) >> 8) & 0x000000FF)
@@ -311,6 +312,9 @@
#define SGMII_CTL_LINK_STATUS_SOURCE_ BIT(8)
#define SGMII_CTL_SGMII_POWER_DN_ BIT(1)
+#define MISC_CTL_0 (0x920)
+#define MISC_CTL_0_RFE_READ_FIFO_MASK_ GENMASK(6, 4)
+
/* Vendor Specific SGMII MMD details */
#define SR_VSMMD_PCS_ID1 0x0004
#define SR_VSMMD_PCS_ID2 0x0005
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
index 3a1b1a1f5a195..60dd2fd603a85 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
@@ -731,7 +731,7 @@ static int sparx5_port_pcs_low_set(struct sparx5 *sparx5,
bool sgmii = false, inband_aneg = false;
int err;
- if (port->conf.inband) {
+ if (conf->inband) {
if (conf->portmode == PHY_INTERFACE_MODE_SGMII ||
conf->portmode == PHY_INTERFACE_MODE_QSGMII)
inband_aneg = true; /* Cisco-SGMII in-band-aneg */
@@ -948,7 +948,7 @@ int sparx5_port_pcs_set(struct sparx5 *sparx5,
if (err)
return -EINVAL;
- if (port->conf.inband) {
+ if (conf->inband) {
/* Enable/disable 1G counters in ASM */
spx5_rmw(ASM_PORT_CFG_CSC_STAT_DIS_SET(high_speed_dev),
ASM_PORT_CFG_CSC_STAT_DIS,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
index 523e0c470894f..55f255a3c9db6 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
@@ -36,6 +36,27 @@ struct sparx5_tc_flower_template {
u16 l3_proto; /* protocol specified in the template */
};
+/* SparX-5 VCAP fragment types:
+ * 0 = no fragment, 1 = initial fragment,
+ * 2 = suspicious fragment, 3 = valid follow-up fragment
+ */
+enum { /* key / mask */
+ FRAG_NOT = 0x03, /* 0 / 3 */
+ FRAG_SOME = 0x11, /* 1 / 1 */
+ FRAG_FIRST = 0x13, /* 1 / 3 */
+ FRAG_LATER = 0x33, /* 3 / 3 */
+ FRAG_INVAL = 0xff, /* invalid */
+};
+
+/* Flower fragment flag to VCAP fragment type mapping */
+static const u8 sparx5_vcap_frag_map[4][4] = { /* is_frag */
+ { FRAG_INVAL, FRAG_INVAL, FRAG_INVAL, FRAG_FIRST }, /* 0/0 */
+ { FRAG_NOT, FRAG_NOT, FRAG_INVAL, FRAG_INVAL }, /* 0/1 */
+ { FRAG_INVAL, FRAG_INVAL, FRAG_INVAL, FRAG_INVAL }, /* 1/0 */
+ { FRAG_SOME, FRAG_LATER, FRAG_INVAL, FRAG_FIRST } /* 1/1 */
+ /* 0/0 0/1 1/0 1/1 <-- first_frag */
+};
+
static int
sparx5_tc_flower_es0_tpid(struct vcap_tc_flower_parse_usage *st)
{
@@ -145,29 +166,27 @@ sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
flow_rule_match_control(st->frule, &mt);
if (mt.mask->flags) {
- if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
- if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
- value = 1; /* initial fragment */
- mask = 0x3;
- } else {
- if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
- value = 3; /* follow up fragment */
- mask = 0x3;
- } else {
- value = 0; /* no fragment */
- mask = 0x3;
- }
- }
- } else {
- if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
- value = 3; /* follow up fragment */
- mask = 0x3;
- } else {
- value = 0; /* no fragment */
- mask = 0x3;
- }
+ u8 is_frag_key = !!(mt.key->flags & FLOW_DIS_IS_FRAGMENT);
+ u8 is_frag_mask = !!(mt.mask->flags & FLOW_DIS_IS_FRAGMENT);
+ u8 is_frag_idx = (is_frag_key << 1) | is_frag_mask;
+
+ u8 first_frag_key = !!(mt.key->flags & FLOW_DIS_FIRST_FRAG);
+ u8 first_frag_mask = !!(mt.mask->flags & FLOW_DIS_FIRST_FRAG);
+ u8 first_frag_idx = (first_frag_key << 1) | first_frag_mask;
+
+ /* Lookup verdict based on the 2 + 2 input bits */
+ u8 vdt = sparx5_vcap_frag_map[is_frag_idx][first_frag_idx];
+
+ if (vdt == FRAG_INVAL) {
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack,
+ "Match on invalid fragment flag combination");
+ return -EINVAL;
}
+ /* Extract VCAP fragment key and mask from verdict */
+ value = (vdt >> 4) & 0x3;
+ mask = vdt & 0x3;
+
err = vcap_rule_add_key_u32(st->vrule,
VCAP_KF_L3_FRAGMENT_TYPE,
value, mask);
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 59287c6e6cee6..d8af5e7e15b4d 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -601,7 +601,7 @@ static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size,
*alloc_size = mtu + MANA_RXBUF_PAD + *headroom;
- *datasize = ALIGN(mtu + ETH_HLEN, MANA_RX_DATA_ALIGN);
+ *datasize = mtu + ETH_HLEN;
}
static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
index 321fd8d007304..37efb1ea9fcd9 100644
--- a/drivers/net/ethernet/qualcomm/qca_uart.c
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -45,7 +45,7 @@ struct qcauart {
unsigned char *tx_buffer;
};
-static ssize_t
+static size_t
qca_tty_receive(struct serdev_device *serdev, const u8 *data, size_t count)
{
struct qcauart *qca = serdev_device_get_drvdata(serdev);
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index 4c043052198d4..00882ffc7a029 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -73,6 +73,7 @@ enum mac_version {
};
struct rtl8169_private;
+struct r8169_led_classdev;
void r8169_apply_firmware(struct rtl8169_private *tp);
u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp);
@@ -84,7 +85,8 @@ void r8169_get_led_name(struct rtl8169_private *tp, int idx,
char *buf, int buf_len);
int rtl8168_get_led_mode(struct rtl8169_private *tp);
int rtl8168_led_mod_ctrl(struct rtl8169_private *tp, u16 mask, u16 val);
-void rtl8168_init_leds(struct net_device *ndev);
+struct r8169_led_classdev *rtl8168_init_leds(struct net_device *ndev);
int rtl8125_get_led_mode(struct rtl8169_private *tp, int index);
int rtl8125_set_led_mode(struct rtl8169_private *tp, int index, u16 mode);
-void rtl8125_init_leds(struct net_device *ndev);
+struct r8169_led_classdev *rtl8125_init_leds(struct net_device *ndev);
+void r8169_remove_leds(struct r8169_led_classdev *leds);
diff --git a/drivers/net/ethernet/realtek/r8169_leds.c b/drivers/net/ethernet/realtek/r8169_leds.c
index 7c5dc9d0df855..e10bee706bc69 100644
--- a/drivers/net/ethernet/realtek/r8169_leds.c
+++ b/drivers/net/ethernet/realtek/r8169_leds.c
@@ -146,22 +146,22 @@ static void rtl8168_setup_ldev(struct r8169_led_classdev *ldev,
led_cdev->hw_control_get_device = r8169_led_hw_control_get_device;
/* ignore errors */
- devm_led_classdev_register(&ndev->dev, led_cdev);
+ led_classdev_register(&ndev->dev, led_cdev);
}
-void rtl8168_init_leds(struct net_device *ndev)
+struct r8169_led_classdev *rtl8168_init_leds(struct net_device *ndev)
{
- /* bind resource mgmt to netdev */
- struct device *dev = &ndev->dev;
struct r8169_led_classdev *leds;
int i;
- leds = devm_kcalloc(dev, RTL8168_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
+ leds = kcalloc(RTL8168_NUM_LEDS + 1, sizeof(*leds), GFP_KERNEL);
if (!leds)
- return;
+ return NULL;
for (i = 0; i < RTL8168_NUM_LEDS; i++)
rtl8168_setup_ldev(leds + i, ndev, i);
+
+ return leds;
}
static int rtl8125_led_hw_control_is_supported(struct led_classdev *led_cdev,
@@ -245,20 +245,31 @@ static void rtl8125_setup_led_ldev(struct r8169_led_classdev *ldev,
led_cdev->hw_control_get_device = r8169_led_hw_control_get_device;
/* ignore errors */
- devm_led_classdev_register(&ndev->dev, led_cdev);
+ led_classdev_register(&ndev->dev, led_cdev);
}
-void rtl8125_init_leds(struct net_device *ndev)
+struct r8169_led_classdev *rtl8125_init_leds(struct net_device *ndev)
{
- /* bind resource mgmt to netdev */
- struct device *dev = &ndev->dev;
struct r8169_led_classdev *leds;
int i;
- leds = devm_kcalloc(dev, RTL8125_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
+ leds = kcalloc(RTL8125_NUM_LEDS + 1, sizeof(*leds), GFP_KERNEL);
if (!leds)
- return;
+ return NULL;
for (i = 0; i < RTL8125_NUM_LEDS; i++)
rtl8125_setup_led_ldev(leds + i, ndev, i);
+
+ return leds;
+}
+
+void r8169_remove_leds(struct r8169_led_classdev *leds)
+{
+ if (!leds)
+ return;
+
+ for (struct r8169_led_classdev *l = leds; l->ndev; l++)
+ led_classdev_unregister(&l->led);
+
+ kfree(leds);
}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 5c879a5c86d70..0fc5fe564ae50 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -647,6 +647,8 @@ struct rtl8169_private {
const char *fw_name;
struct rtl_fw *rtl_fw;
+ struct r8169_led_classdev *leds;
+
u32 ocp_base;
};
@@ -1314,17 +1316,40 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01);
}
+static void rtl_dash_loop_wait(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned long usecs, int n, bool high)
+{
+ if (!tp->dash_enabled)
+ return;
+ rtl_loop_wait(tp, c, usecs, n, high);
+}
+
+static void rtl_dash_loop_wait_high(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned long d, int n)
+{
+ rtl_dash_loop_wait(tp, c, d, n, true);
+}
+
+static void rtl_dash_loop_wait_low(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned long d, int n)
+{
+ rtl_dash_loop_wait(tp, c, d, n, false);
+}
+
static void rtl8168dp_driver_start(struct rtl8169_private *tp)
{
r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START);
- rtl_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10);
+ rtl_dash_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10);
}
static void rtl8168ep_driver_start(struct rtl8169_private *tp)
{
r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
- rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30);
+ rtl_dash_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30);
}
static void rtl8168_driver_start(struct rtl8169_private *tp)
@@ -1338,7 +1363,7 @@ static void rtl8168_driver_start(struct rtl8169_private *tp)
static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
{
r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP);
- rtl_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10);
+ rtl_dash_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10);
}
static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
@@ -1346,7 +1371,7 @@ static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
rtl8168ep_stop_cmac(tp);
r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
- rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
+ rtl_dash_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
}
static void rtl8168_driver_stop(struct rtl8169_private *tp)
@@ -5021,6 +5046,9 @@ static void rtl_remove_one(struct pci_dev *pdev)
cancel_work_sync(&tp->wk.work);
+ if (IS_ENABLED(CONFIG_R8169_LEDS))
+ r8169_remove_leds(tp->leds);
+
unregister_netdev(tp->dev);
if (tp->dash_type != RTL_DASH_NONE)
@@ -5141,6 +5169,15 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
struct mii_bus *new_bus;
int ret;
+ /* On some boards with this chip version the BIOS is buggy and misses
+ * to reset the PHY page selector. This results in the PHY ID read
+ * accessing registers on a different page, returning a more or
+ * less random value. Fix this by resetting the page selector first.
+ */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_25 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_26)
+ r8169_mdio_write(tp, 0x1f, 0);
+
new_bus = devm_mdiobus_alloc(&pdev->dev);
if (!new_bus)
return -ENOMEM;
@@ -5469,9 +5506,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (IS_ENABLED(CONFIG_R8169_LEDS)) {
if (rtl_is_8125(tp))
- rtl8125_init_leds(dev);
+ tp->leds = rtl8125_init_leds(dev);
else if (tp->mac_version > RTL_GIGA_MAC_VER_06)
- rtl8168_init_leds(dev);
+ tp->leds = rtl8168_init_leds(dev);
}
netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n",
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index d1be030c88483..fcb756d77681c 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -769,25 +769,28 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
dma_addr_t dma_addr;
int rx_packets = 0;
u8 desc_status;
- u16 pkt_len;
+ u16 desc_len;
u8 die_dt;
int entry;
int limit;
int i;
- entry = priv->cur_rx[q] % priv->num_rx_ring[q];
limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
stats = &priv->stats[q];
- desc = &priv->rx_ring[q].desc[entry];
- for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) {
+ for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
+ entry = priv->cur_rx[q] % priv->num_rx_ring[q];
+ desc = &priv->rx_ring[q].desc[entry];
+ if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
+ break;
+
/* Descriptor type must be checked before all other reads */
dma_rmb();
desc_status = desc->msc;
- pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
+ desc_len = le16_to_cpu(desc->ds_cc) & RX_DS;
/* We use 0-byte descriptors to mark the DMA mapping errors */
- if (!pkt_len)
+ if (!desc_len)
continue;
if (desc_status & MSC_MC)
@@ -808,25 +811,25 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
switch (die_dt) {
case DT_FSINGLE:
skb = ravb_get_skb_gbeth(ndev, entry, desc);
- skb_put(skb, pkt_len);
+ skb_put(skb, desc_len);
skb->protocol = eth_type_trans(skb, ndev);
if (ndev->features & NETIF_F_RXCSUM)
ravb_rx_csum_gbeth(skb);
napi_gro_receive(&priv->napi[q], skb);
rx_packets++;
- stats->rx_bytes += pkt_len;
+ stats->rx_bytes += desc_len;
break;
case DT_FSTART:
priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc);
- skb_put(priv->rx_1st_skb, pkt_len);
+ skb_put(priv->rx_1st_skb, desc_len);
break;
case DT_FMID:
skb = ravb_get_skb_gbeth(ndev, entry, desc);
skb_copy_to_linear_data_offset(priv->rx_1st_skb,
priv->rx_1st_skb->len,
skb->data,
- pkt_len);
- skb_put(priv->rx_1st_skb, pkt_len);
+ desc_len);
+ skb_put(priv->rx_1st_skb, desc_len);
dev_kfree_skb(skb);
break;
case DT_FEND:
@@ -834,23 +837,20 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
skb_copy_to_linear_data_offset(priv->rx_1st_skb,
priv->rx_1st_skb->len,
skb->data,
- pkt_len);
- skb_put(priv->rx_1st_skb, pkt_len);
+ desc_len);
+ skb_put(priv->rx_1st_skb, desc_len);
dev_kfree_skb(skb);
priv->rx_1st_skb->protocol =
eth_type_trans(priv->rx_1st_skb, ndev);
if (ndev->features & NETIF_F_RXCSUM)
- ravb_rx_csum_gbeth(skb);
+ ravb_rx_csum_gbeth(priv->rx_1st_skb);
+ stats->rx_bytes += priv->rx_1st_skb->len;
napi_gro_receive(&priv->napi[q],
priv->rx_1st_skb);
rx_packets++;
- stats->rx_bytes += pkt_len;
break;
}
}
-
- entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
- desc = &priv->rx_ring[q].desc[entry];
}
/* Refill the RX ring buffers. */
@@ -891,30 +891,29 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
- int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
- priv->cur_rx[q];
struct net_device_stats *stats = &priv->stats[q];
struct ravb_ex_rx_desc *desc;
+ unsigned int limit, i;
struct sk_buff *skb;
dma_addr_t dma_addr;
struct timespec64 ts;
+ int rx_packets = 0;
u8 desc_status;
u16 pkt_len;
- int limit;
+ int entry;
+
+ limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
+ for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
+ entry = priv->cur_rx[q] % priv->num_rx_ring[q];
+ desc = &priv->rx_ring[q].ex_desc[entry];
+ if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
+ break;
- boguscnt = min(boguscnt, *quota);
- limit = boguscnt;
- desc = &priv->rx_ring[q].ex_desc[entry];
- while (desc->die_dt != DT_FEMPTY) {
/* Descriptor type must be checked before all other reads */
dma_rmb();
desc_status = desc->msc;
pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
- if (--boguscnt < 0)
- break;
-
/* We use 0-byte descriptors to mark the DMA mapping errors */
if (!pkt_len)
continue;
@@ -960,12 +959,9 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
if (ndev->features & NETIF_F_RXCSUM)
ravb_rx_csum(skb);
napi_gro_receive(&priv->napi[q], skb);
- stats->rx_packets++;
+ rx_packets++;
stats->rx_bytes += pkt_len;
}
-
- entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
- desc = &priv->rx_ring[q].ex_desc[entry];
}
/* Refill the RX ring buffers. */
@@ -995,9 +991,9 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
desc->die_dt = DT_FEMPTY;
}
- *quota -= limit - (++boguscnt);
-
- return boguscnt <= 0;
+ stats->rx_packets += rx_packets;
+ *quota -= rx_packets;
+ return *quota == 0;
}
/* Packet receive function for Ethernet AVB */
@@ -1324,12 +1320,12 @@ static int ravb_poll(struct napi_struct *napi, int budget)
int q = napi - priv->napi;
int mask = BIT(q);
int quota = budget;
+ bool unmask;
/* Processing RX Descriptor Ring */
/* Clear RX interrupt */
ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
- if (ravb_rx(ndev, &quota, q))
- goto out;
+ unmask = !ravb_rx(ndev, &quota, q);
/* Processing TX Descriptor Ring */
spin_lock_irqsave(&priv->lock, flags);
@@ -1339,6 +1335,18 @@ static int ravb_poll(struct napi_struct *napi, int budget)
netif_wake_subqueue(ndev, q);
spin_unlock_irqrestore(&priv->lock, flags);
+ /* Receive error message handling */
+ priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
+ if (info->nc_queues)
+ priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
+ if (priv->rx_over_errors != ndev->stats.rx_over_errors)
+ ndev->stats.rx_over_errors = priv->rx_over_errors;
+ if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
+ ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
+
+ if (!unmask)
+ goto out;
+
napi_complete(napi);
/* Re-enable RX/TX interrupts */
@@ -1352,14 +1360,6 @@ static int ravb_poll(struct napi_struct *napi, int budget)
}
spin_unlock_irqrestore(&priv->lock, flags);
- /* Receive error message handling */
- priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
- if (info->nc_queues)
- priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
- if (priv->rx_over_errors != ndev->stats.rx_over_errors)
- ndev->stats.rx_over_errors = priv->rx_over_errors;
- if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
- ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
out:
return budget - quota;
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 475e1e8c1d35f..0786eb0da3914 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -50,7 +50,7 @@
* the macros available to do this only define GCC 8.
*/
__diag_push();
-__diag_ignore(GCC, 8, "-Woverride-init",
+__diag_ignore_all("-Woverride-init",
"logic to initialize all and then override some is OK");
static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
SH_ETH_OFFSET_DEFAULTS,
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index a6fefe675ef15..3b7d4ac1e7be0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -553,6 +553,7 @@ extern const struct stmmac_hwtimestamp stmmac_ptp;
extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
struct mac_link {
+ u32 caps;
u32 speed_mask;
u32 speed10;
u32 speed100;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index b21d99faa2d04..e1b761dcfa1dd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -1096,6 +1096,8 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
priv->dev->priv_flags |= IFF_UNICAST_FLT;
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
/* The loopback bit seems to be re-set when link change
* Simply mask it each time
* Speed 10/100/1000 are set in BIT(2)/BIT(3)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 3927609abc441..8555299443f4e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -539,6 +539,8 @@ int dwmac1000_setup(struct stmmac_priv *priv)
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
mac->link.duplex = GMAC_CONTROL_DM;
mac->link.speed10 = GMAC_CONTROL_PS;
mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index a6e8d7bd95886..7667d103cd0eb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -175,6 +175,8 @@ int dwmac100_setup(struct stmmac_priv *priv)
dev_info(priv->device, "\tDWMAC100\n");
mac->pcsr = priv->ioaddr;
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100;
mac->link.duplex = MAC_CONTROL_F;
mac->link.speed10 = 0;
mac->link.speed100 = 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 6b6d0de096197..a38226d7cc6a9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -70,7 +70,10 @@ static void dwmac4_core_init(struct mac_device_info *hw,
static void dwmac4_phylink_get_caps(struct stmmac_priv *priv)
{
- priv->phylink_config.mac_capabilities |= MAC_2500FD;
+ if (priv->plat->tx_queues_to_use > 1)
+ priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD | MAC_1000HD);
+ else
+ priv->hw->link.caps |= (MAC_10HD | MAC_100HD | MAC_1000HD);
}
static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
@@ -92,19 +95,41 @@ static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
u32 prio, u32 queue)
{
void __iomem *ioaddr = hw->pcsr;
- u32 base_register;
- u32 value;
+ u32 clear_mask = 0;
+ u32 ctrl2, ctrl3;
+ int i;
- base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
- if (queue >= 4)
- queue -= 4;
+ ctrl2 = readl(ioaddr + GMAC_RXQ_CTRL2);
+ ctrl3 = readl(ioaddr + GMAC_RXQ_CTRL3);
- value = readl(ioaddr + base_register);
+ /* The software must ensure that the same priority
+ * is not mapped to multiple Rx queues
+ */
+ for (i = 0; i < 4; i++)
+ clear_mask |= ((prio << GMAC_RXQCTRL_PSRQX_SHIFT(i)) &
+ GMAC_RXQCTRL_PSRQX_MASK(i));
- value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
- value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
+ ctrl2 &= ~clear_mask;
+ ctrl3 &= ~clear_mask;
+
+ /* First assign new priorities to a queue, then
+ * clear them from others queues
+ */
+ if (queue < 4) {
+ ctrl2 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
GMAC_RXQCTRL_PSRQX_MASK(queue);
- writel(value, ioaddr + base_register);
+
+ writel(ctrl2, ioaddr + GMAC_RXQ_CTRL2);
+ writel(ctrl3, ioaddr + GMAC_RXQ_CTRL3);
+ } else {
+ queue -= 4;
+
+ ctrl3 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
+ GMAC_RXQCTRL_PSRQX_MASK(queue);
+
+ writel(ctrl3, ioaddr + GMAC_RXQ_CTRL3);
+ writel(ctrl2, ioaddr + GMAC_RXQ_CTRL2);
+ }
}
static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
@@ -1356,6 +1381,8 @@ int dwmac4_setup(struct stmmac_priv *priv)
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
mac->link.duplex = GMAC_CONFIG_DM;
mac->link.speed10 = GMAC_CONFIG_PS;
mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 1af2f89a0504a..f8e7775bb6336 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -47,14 +47,6 @@ static void dwxgmac2_core_init(struct mac_device_info *hw,
writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
}
-static void xgmac_phylink_get_caps(struct stmmac_priv *priv)
-{
- priv->phylink_config.mac_capabilities |= MAC_2500FD | MAC_5000FD |
- MAC_10000FD | MAC_25000FD |
- MAC_40000FD | MAC_50000FD |
- MAC_100000FD;
-}
-
static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
{
u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
@@ -105,17 +97,41 @@ static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
u32 queue)
{
void __iomem *ioaddr = hw->pcsr;
- u32 value, reg;
+ u32 clear_mask = 0;
+ u32 ctrl2, ctrl3;
+ int i;
- reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
- if (queue >= 4)
+ ctrl2 = readl(ioaddr + XGMAC_RXQ_CTRL2);
+ ctrl3 = readl(ioaddr + XGMAC_RXQ_CTRL3);
+
+ /* The software must ensure that the same priority
+ * is not mapped to multiple Rx queues
+ */
+ for (i = 0; i < 4; i++)
+ clear_mask |= ((prio << XGMAC_PSRQ_SHIFT(i)) &
+ XGMAC_PSRQ(i));
+
+ ctrl2 &= ~clear_mask;
+ ctrl3 &= ~clear_mask;
+
+ /* First assign new priorities to a queue, then
+ * clear them from others queues
+ */
+ if (queue < 4) {
+ ctrl2 |= (prio << XGMAC_PSRQ_SHIFT(queue)) &
+ XGMAC_PSRQ(queue);
+
+ writel(ctrl2, ioaddr + XGMAC_RXQ_CTRL2);
+ writel(ctrl3, ioaddr + XGMAC_RXQ_CTRL3);
+ } else {
queue -= 4;
- value = readl(ioaddr + reg);
- value &= ~XGMAC_PSRQ(queue);
- value |= (prio << XGMAC_PSRQ_SHIFT(queue)) & XGMAC_PSRQ(queue);
+ ctrl3 |= (prio << XGMAC_PSRQ_SHIFT(queue)) &
+ XGMAC_PSRQ(queue);
- writel(value, ioaddr + reg);
+ writel(ctrl3, ioaddr + XGMAC_RXQ_CTRL3);
+ writel(ctrl2, ioaddr + XGMAC_RXQ_CTRL2);
+ }
}
static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
@@ -1516,7 +1532,6 @@ static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *
const struct stmmac_ops dwxgmac210_ops = {
.core_init = dwxgmac2_core_init,
- .phylink_get_caps = xgmac_phylink_get_caps,
.set_mac = dwxgmac2_set_mac,
.rx_ipc = dwxgmac2_rx_ipc,
.rx_queue_enable = dwxgmac2_rx_queue_enable,
@@ -1577,7 +1592,6 @@ static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
const struct stmmac_ops dwxlgmac2_ops = {
.core_init = dwxgmac2_core_init,
- .phylink_get_caps = xgmac_phylink_get_caps,
.set_mac = dwxgmac2_set_mac,
.rx_ipc = dwxgmac2_rx_ipc,
.rx_queue_enable = dwxlgmac2_rx_queue_enable,
@@ -1637,6 +1651,9 @@ int dwxgmac2_setup(struct stmmac_priv *priv)
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_1000FD | MAC_2500FD | MAC_5000FD |
+ MAC_10000FD;
mac->link.duplex = 0;
mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
@@ -1674,6 +1691,11 @@ int dwxlgmac2_setup(struct stmmac_priv *priv)
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_1000FD | MAC_2500FD | MAC_5000FD |
+ MAC_10000FD | MAC_25000FD |
+ MAC_40000FD | MAC_50000FD |
+ MAC_100000FD;
mac->link.duplex = 0;
mac->link.speed1000 = XLGMAC_CONFIG_SS_1000;
mac->link.speed2500 = XLGMAC_CONFIG_SS_2500;
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index dff02d75d5197..5d1ea3e07459a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -52,6 +52,7 @@ struct stmmac_counters {
unsigned int mmc_tx_excessdef;
unsigned int mmc_tx_pause_frame;
unsigned int mmc_tx_vlan_frame_g;
+ unsigned int mmc_tx_oversize_g;
unsigned int mmc_tx_lpi_usec;
unsigned int mmc_tx_lpi_tran;
@@ -80,6 +81,7 @@ struct stmmac_counters {
unsigned int mmc_rx_fifo_overflow;
unsigned int mmc_rx_vlan_frames_gb;
unsigned int mmc_rx_watchdog_error;
+ unsigned int mmc_rx_error;
unsigned int mmc_rx_lpi_usec;
unsigned int mmc_rx_lpi_tran;
unsigned int mmc_rx_discard_frames_gb;
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 7eb477faa75a3..0fab842902a85 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -53,6 +53,7 @@
#define MMC_TX_EXCESSDEF 0x6c
#define MMC_TX_PAUSE_FRAME 0x70
#define MMC_TX_VLAN_FRAME_G 0x74
+#define MMC_TX_OVERSIZE_G 0x78
/* MMC RX counter registers */
#define MMC_RX_FRAMECOUNT_GB 0x80
@@ -79,6 +80,13 @@
#define MMC_RX_FIFO_OVERFLOW 0xd4
#define MMC_RX_VLAN_FRAMES_GB 0xd8
#define MMC_RX_WATCHDOG_ERROR 0xdc
+#define MMC_RX_ERROR 0xe0
+
+#define MMC_TX_LPI_USEC 0xec
+#define MMC_TX_LPI_TRAN 0xf0
+#define MMC_RX_LPI_USEC 0xf4
+#define MMC_RX_LPI_TRAN 0xf8
+
/* IPC*/
#define MMC_RX_IPC_INTR_MASK 0x100
#define MMC_RX_IPC_INTR 0x108
@@ -283,6 +291,9 @@ static void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
mmc->mmc_tx_excessdef += readl(mmcaddr + MMC_TX_EXCESSDEF);
mmc->mmc_tx_pause_frame += readl(mmcaddr + MMC_TX_PAUSE_FRAME);
mmc->mmc_tx_vlan_frame_g += readl(mmcaddr + MMC_TX_VLAN_FRAME_G);
+ mmc->mmc_tx_oversize_g += readl(mmcaddr + MMC_TX_OVERSIZE_G);
+ mmc->mmc_tx_lpi_usec += readl(mmcaddr + MMC_TX_LPI_USEC);
+ mmc->mmc_tx_lpi_tran += readl(mmcaddr + MMC_TX_LPI_TRAN);
/* MMC RX counter registers */
mmc->mmc_rx_framecount_gb += readl(mmcaddr + MMC_RX_FRAMECOUNT_GB);
@@ -316,6 +327,10 @@ static void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
mmc->mmc_rx_fifo_overflow += readl(mmcaddr + MMC_RX_FIFO_OVERFLOW);
mmc->mmc_rx_vlan_frames_gb += readl(mmcaddr + MMC_RX_VLAN_FRAMES_GB);
mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_RX_WATCHDOG_ERROR);
+ mmc->mmc_rx_error += readl(mmcaddr + MMC_RX_ERROR);
+ mmc->mmc_rx_lpi_usec += readl(mmcaddr + MMC_RX_LPI_USEC);
+ mmc->mmc_rx_lpi_tran += readl(mmcaddr + MMC_RX_LPI_TRAN);
+
/* IPv4 */
mmc->mmc_rx_ipv4_gd += readl(mmcaddr + MMC_RX_IPV4_GD);
mmc->mmc_rx_ipv4_hderr += readl(mmcaddr + MMC_RX_IPV4_HDERR);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index e1537a57815f3..542e2633a6f52 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -212,6 +212,7 @@ static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_tx_excessdef),
STMMAC_MMC_STAT(mmc_tx_pause_frame),
STMMAC_MMC_STAT(mmc_tx_vlan_frame_g),
+ STMMAC_MMC_STAT(mmc_tx_oversize_g),
STMMAC_MMC_STAT(mmc_tx_lpi_usec),
STMMAC_MMC_STAT(mmc_tx_lpi_tran),
STMMAC_MMC_STAT(mmc_rx_framecount_gb),
@@ -238,6 +239,7 @@ static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_rx_fifo_overflow),
STMMAC_MMC_STAT(mmc_rx_vlan_frames_gb),
STMMAC_MMC_STAT(mmc_rx_watchdog_error),
+ STMMAC_MMC_STAT(mmc_rx_error),
STMMAC_MMC_STAT(mmc_rx_lpi_usec),
STMMAC_MMC_STAT(mmc_rx_lpi_tran),
STMMAC_MMC_STAT(mmc_rx_discard_frames_gb),
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 24cd80490d19c..7c6fb14b55550 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1198,17 +1198,6 @@ static int stmmac_init_phy(struct net_device *dev)
return ret;
}
-static void stmmac_set_half_duplex(struct stmmac_priv *priv)
-{
- /* Half-Duplex can only work with single tx queue */
- if (priv->plat->tx_queues_to_use > 1)
- priv->phylink_config.mac_capabilities &=
- ~(MAC_10HD | MAC_100HD | MAC_1000HD);
- else
- priv->phylink_config.mac_capabilities |=
- (MAC_10HD | MAC_100HD | MAC_1000HD);
-}
-
static int stmmac_phy_setup(struct stmmac_priv *priv)
{
struct stmmac_mdio_bus_data *mdio_bus_data;
@@ -1236,15 +1225,11 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
xpcs_get_interfaces(priv->hw->xpcs,
priv->phylink_config.supported_interfaces);
- priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
- MAC_10FD | MAC_100FD |
- MAC_1000FD;
-
- stmmac_set_half_duplex(priv);
-
/* Get the MAC specific capabilities */
stmmac_mac_phylink_get_caps(priv);
+ priv->phylink_config.mac_capabilities = priv->hw->link.caps;
+
max_speed = priv->plat->max_speed;
if (max_speed)
phylink_limit_mac_speed(&priv->phylink_config, max_speed);
@@ -7342,6 +7327,7 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
{
struct stmmac_priv *priv = netdev_priv(dev);
int ret = 0, i;
+ int max_speed;
if (netif_running(dev))
stmmac_release(dev);
@@ -7355,7 +7341,14 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
priv->rss.table[i] = ethtool_rxfh_indir_default(i,
rx_cnt);
- stmmac_set_half_duplex(priv);
+ stmmac_mac_phylink_get_caps(priv);
+
+ priv->phylink_config.mac_capabilities = priv->hw->link.caps;
+
+ max_speed = priv->plat->max_speed;
+ if (max_speed)
+ phylink_limit_mac_speed(&priv->phylink_config, max_speed);
+
stmmac_napi_add(dev);
if (netif_running(dev))
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index b317b94864554..bfb9035063671 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -176,7 +176,7 @@ static char version[] =
static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */
static int link_mode;
-MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
+MODULE_AUTHOR("Adrian Sun <asun@darksunrising.com>");
MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("sun/cassini.bin");
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 21431f43e4c22..f68aa813d4fb1 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -61,7 +61,7 @@ union niu_page {
static char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
-MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
MODULE_DESCRIPTION("NIU ethernet driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index b983b9c23be68..50ace461a1af4 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -59,7 +59,7 @@
#define DRV_NAME "sunhme"
-MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
MODULE_DESCRIPTION("Sun HappyMealEthernet(HME) 10/100baseT ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index e220620d0ffc9..2f30715e9b67f 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -44,7 +44,7 @@
static char version[] =
DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
-MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
MODULE_DESCRIPTION("Sun LDOM virtual network driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index 351609f4f011d..1cacb2a0ee034 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -39,7 +39,7 @@
*/
#define VNET_MAX_RETRIES 10
-MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
MODULE_DESCRIPTION("Sun LDOM virtual network support library");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.1");
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 2939a21ca74f3..1d00e21808c1c 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -2793,6 +2793,8 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
{
+ struct am65_cpsw_rx_chn *rx_chan = &common->rx_chns;
+ struct am65_cpsw_tx_chn *tx_chan = common->tx_chns;
struct device *dev = common->dev;
struct am65_cpsw_port *port;
int ret = 0, i;
@@ -2805,6 +2807,22 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
if (ret)
return ret;
+ /* The DMA Channels are not guaranteed to be in a clean state.
+ * Reset and disable them to ensure that they are back to the
+ * clean state and ready to be used.
+ */
+ for (i = 0; i < common->tx_ch_num; i++) {
+ k3_udma_glue_reset_tx_chn(tx_chan[i].tx_chn, &tx_chan[i],
+ am65_cpsw_nuss_tx_cleanup);
+ k3_udma_glue_disable_tx_chn(tx_chan[i].tx_chn);
+ }
+
+ for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
+ k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i, rx_chan,
+ am65_cpsw_nuss_rx_cleanup, !!i);
+
+ k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);
+
ret = am65_cpsw_nuss_register_devlink(common);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index 93295916b1d2b..2fa511227eac8 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -20,6 +20,8 @@
#include "txgbe_phy.h"
#include "txgbe_hw.h"
+#define TXGBE_I2C_CLK_DEV_NAME "i2c_dw"
+
static int txgbe_swnodes_register(struct txgbe *txgbe)
{
struct txgbe_nodes *nodes = &txgbe->nodes;
@@ -571,8 +573,8 @@ static int txgbe_clock_register(struct txgbe *txgbe)
char clk_name[32];
struct clk *clk;
- snprintf(clk_name, sizeof(clk_name), "i2c_designware.%d",
- pci_dev_id(pdev));
+ snprintf(clk_name, sizeof(clk_name), "%s.%d",
+ TXGBE_I2C_CLK_DEV_NAME, pci_dev_id(pdev));
clk = clk_register_fixed_rate(NULL, clk_name, NULL, 0, 156250000);
if (IS_ERR(clk))
@@ -634,7 +636,7 @@ static int txgbe_i2c_register(struct txgbe *txgbe)
info.parent = &pdev->dev;
info.fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_I2C]);
- info.name = "i2c_designware";
+ info.name = TXGBE_I2C_CLK_DEV_NAME;
info.id = pci_dev_id(pdev);
info.res = &DEFINE_RES_IRQ(pdev->irq);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 9df39cf8b0975..1072e2210aed3 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1443,7 +1443,7 @@ static int temac_probe(struct platform_device *pdev)
}
/* map device registers */
- lp->regs = devm_platform_ioremap_resource_byname(pdev, 0);
+ lp->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lp->regs)) {
dev_err(&pdev->dev, "could not map TEMAC registers\n");
return -ENOMEM;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 2f6739fe78af2..6c2835086b57e 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -822,7 +822,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- if (!pskb_inet_may_pull(skb))
+ if (!skb_vlan_inet_prepare(skb))
return -EINVAL;
if (!gs4)
@@ -929,7 +929,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- if (!pskb_inet_may_pull(skb))
+ if (!skb_vlan_inet_prepare(skb))
return -EINVAL;
if (!gs6)
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index a6fcbda64ecc6..2b6ec979a62f2 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -154,8 +154,11 @@ static void free_netvsc_device(struct rcu_head *head)
int i;
kfree(nvdev->extension);
- vfree(nvdev->recv_buf);
- vfree(nvdev->send_buf);
+
+ if (!nvdev->recv_buf_gpadl_handle.decrypted)
+ vfree(nvdev->recv_buf);
+ if (!nvdev->send_buf_gpadl_handle.decrypted)
+ vfree(nvdev->send_buf);
bitmap_free(nvdev->send_section_map);
for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 8b8634600c519..ddb50a0e2bc82 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -2431,6 +2431,7 @@ static int lan8814_hwtstamp(struct mii_timestamper *mii_ts,
struct lan8814_ptp_rx_ts *rx_ts, *tmp;
int txcfg = 0, rxcfg = 0;
int pkt_ts_enable;
+ int tx_mod;
ptp_priv->hwts_tx_type = config->tx_type;
ptp_priv->rx_filter = config->rx_filter;
@@ -2477,9 +2478,14 @@ static int lan8814_hwtstamp(struct mii_timestamper *mii_ts,
lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_RX_TIMESTAMP_EN, pkt_ts_enable);
lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_TIMESTAMP_EN, pkt_ts_enable);
- if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC)
+ tx_mod = lanphy_read_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD);
+ if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC) {
lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD,
- PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
+ tx_mod | PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
+ } else if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ON) {
+ lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD,
+ tx_mod & ~PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
+ }
if (config->rx_filter != HWTSTAMP_FILTER_NONE)
lan8814_config_ts_intr(ptp_priv->phydev, true);
@@ -2537,7 +2543,7 @@ static void lan8814_txtstamp(struct mii_timestamper *mii_ts,
}
}
-static void lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig)
+static bool lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig)
{
struct ptp_header *ptp_header;
u32 type;
@@ -2547,7 +2553,11 @@ static void lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig)
ptp_header = ptp_parse_header(skb, type);
skb_pull_inline(skb, ETH_HLEN);
+ if (!ptp_header)
+ return false;
+
*sig = (__force u16)(ntohs(ptp_header->sequence_id));
+ return true;
}
static bool lan8814_match_rx_skb(struct kszphy_ptp_priv *ptp_priv,
@@ -2559,7 +2569,8 @@ static bool lan8814_match_rx_skb(struct kszphy_ptp_priv *ptp_priv,
bool ret = false;
u16 skb_sig;
- lan8814_get_sig_rx(skb, &skb_sig);
+ if (!lan8814_get_sig_rx(skb, &skb_sig))
+ return ret;
/* Iterate over all RX timestamps and match it with the received skbs */
spin_lock_irqsave(&ptp_priv->rx_ts_lock, flags);
@@ -2834,7 +2845,7 @@ static int lan8814_ptpci_adjfine(struct ptp_clock_info *ptpci, long scaled_ppm)
return 0;
}
-static void lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig)
+static bool lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig)
{
struct ptp_header *ptp_header;
u32 type;
@@ -2842,7 +2853,11 @@ static void lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig)
type = ptp_classify_raw(skb);
ptp_header = ptp_parse_header(skb, type);
+ if (!ptp_header)
+ return false;
+
*sig = (__force u16)(ntohs(ptp_header->sequence_id));
+ return true;
}
static void lan8814_match_tx_skb(struct kszphy_ptp_priv *ptp_priv,
@@ -2856,7 +2871,8 @@ static void lan8814_match_tx_skb(struct kszphy_ptp_priv *ptp_priv,
spin_lock_irqsave(&ptp_priv->tx_queue.lock, flags);
skb_queue_walk_safe(&ptp_priv->tx_queue, skb, skb_tmp) {
- lan8814_get_sig_tx(skb, &skb_sig);
+ if (!lan8814_get_sig_tx(skb, &skb_sig))
+ continue;
if (memcmp(&skb_sig, &seq_id, sizeof(seq_id)))
continue;
@@ -2910,7 +2926,8 @@ static bool lan8814_match_skb(struct kszphy_ptp_priv *ptp_priv,
spin_lock_irqsave(&ptp_priv->rx_queue.lock, flags);
skb_queue_walk_safe(&ptp_priv->rx_queue, skb, skb_tmp) {
- lan8814_get_sig_rx(skb, &skb_sig);
+ if (!lan8814_get_sig_rx(skb, &skb_sig))
+ continue;
if (memcmp(&skb_sig, &rx_ts->seq_id, sizeof(rx_ts->seq_id)))
continue;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 8297ef681bf5d..6c6ec94757092 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -2831,8 +2831,8 @@ EXPORT_SYMBOL(genphy_resume);
int genphy_loopback(struct phy_device *phydev, bool enable)
{
if (enable) {
- u16 val, ctl = BMCR_LOOPBACK;
- int ret;
+ u16 ctl = BMCR_LOOPBACK;
+ int ret, val;
ctl |= mii_bmcr_encode_fixed(phydev->speed, phydev->duplex);
diff --git a/drivers/net/phy/qcom/at803x.c b/drivers/net/phy/qcom/at803x.c
index 4717c59d51d04..e79657f76bea2 100644
--- a/drivers/net/phy/qcom/at803x.c
+++ b/drivers/net/phy/qcom/at803x.c
@@ -797,7 +797,7 @@ static int at8031_parse_dt(struct phy_device *phydev)
static int at8031_probe(struct phy_device *phydev)
{
- struct at803x_priv *priv = phydev->priv;
+ struct at803x_priv *priv;
int mode_cfg;
int ccr;
int ret;
@@ -806,6 +806,8 @@ static int at8031_probe(struct phy_device *phydev)
if (ret)
return ret;
+ priv = phydev->priv;
+
/* Only supported on AR8031/AR8033, the AR8030/AR8035 use strapping
* options.
*/
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 6833ef0c79305..689687bd2574b 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -694,6 +694,6 @@ module_init(pptp_init_module);
module_exit(pptp_exit_module);
MODULE_DESCRIPTION("Point-to-Point Tunneling Protocol");
-MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
+MODULE_AUTHOR("D. Kozlov <xeb@mail.ru>");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_PPTP);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 0b3f21cba552f..92da8c03d960c 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -2125,14 +2125,16 @@ static ssize_t tun_put_user(struct tun_struct *tun,
tun_is_little_endian(tun), true,
vlan_hlen)) {
struct skb_shared_info *sinfo = skb_shinfo(skb);
- pr_err("unexpected GSO type: "
- "0x%x, gso_size %d, hdr_len %d\n",
- sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
- tun16_to_cpu(tun, gso.hdr_len));
- print_hex_dump(KERN_ERR, "tun: ",
- DUMP_PREFIX_NONE,
- 16, 1, skb->head,
- min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
+
+ if (net_ratelimit()) {
+ netdev_err(tun->dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
+ sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
+ tun16_to_cpu(tun, gso.hdr_len));
+ print_hex_dump(KERN_ERR, "tun: ",
+ DUMP_PREFIX_NONE,
+ 16, 1, skb->head,
+ min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
+ }
WARN_ON_ONCE(1);
return -EINVAL;
}
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 88e084534853d..752f821a19901 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1273,6 +1273,8 @@ static void ax88179_get_mac_addr(struct usbnet *dev)
if (is_valid_ether_addr(mac)) {
eth_hw_addr_set(dev->net, mac);
+ if (!is_local_ether_addr(mac))
+ dev->net->addr_assign_type = NET_ADDR_PERM;
} else {
netdev_info(dev->net, "invalid MAC address, using random\n");
eth_hw_addr_random(dev->net);
@@ -1315,6 +1317,8 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
netif_set_tso_max_size(dev->net, 16384);
+ ax88179_reset(dev);
+
return 0;
}
@@ -1693,7 +1697,6 @@ static const struct driver_info ax88179_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@@ -1706,7 +1709,6 @@ static const struct driver_info ax88178a_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index e2e181378f412..edc34402e787f 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1431,6 +1431,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
{QMI_QUIRK_SET_DTR(0x1546, 0x1312, 4)}, /* u-blox LARA-R6 01B */
{QMI_QUIRK_SET_DTR(0x1546, 0x1342, 4)}, /* u-blox LARA-L6 */
+ {QMI_QUIRK_SET_DTR(0x33f8, 0x0104, 4)}, /* Rolling RW101 RMNET */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 13d902462d8ec..bcdfbf61eb66b 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -1464,8 +1464,6 @@ static netdev_features_t veth_fix_features(struct net_device *dev,
if (peer_priv->_xdp_prog)
features &= ~NETIF_F_GSO_SOFTWARE;
}
- if (priv->_xdp_prog)
- features |= NETIF_F_GRO;
return features;
}
@@ -1569,14 +1567,6 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
}
if (!old_prog) {
- if (!veth_gro_requested(dev)) {
- /* user-space did not require GRO, but adding
- * XDP is supposed to get GRO working
- */
- dev->features |= NETIF_F_GRO;
- netdev_features_change(dev);
- }
-
peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
peer->max_mtu = max_mtu;
}
@@ -1592,14 +1582,6 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
if (dev->flags & IFF_UP)
veth_disable_xdp(dev);
- /* if user-space did not require GRO, since adding XDP
- * enabled it, clear it now
- */
- if (!veth_gro_requested(dev)) {
- dev->features &= ~NETIF_F_GRO;
- netdev_features_change(dev);
- }
-
if (peer) {
peer->hw_features |= NETIF_F_GSO_SOFTWARE;
peer->max_mtu = ETH_MAX_MTU;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d7ce4a1011ea2..115c3c5414f2a 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -80,6 +80,11 @@ struct virtnet_stat_desc {
size_t offset;
};
+struct virtnet_sq_free_stats {
+ u64 packets;
+ u64 bytes;
+};
+
struct virtnet_sq_stats {
struct u64_stats_sync syncp;
u64_stats_t packets;
@@ -304,6 +309,12 @@ struct virtnet_info {
/* Work struct for config space updates */
struct work_struct config_work;
+ /* Work struct for setting rx mode */
+ struct work_struct rx_mode_work;
+
+ /* OK to queue work setting RX mode? */
+ bool rx_mode_work_enabled;
+
/* Does the affinity hint is set for virtqueues? */
bool affinity_hint_set;
@@ -366,6 +377,31 @@ static struct xdp_frame *ptr_to_xdp(void *ptr)
return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
}
+static void __free_old_xmit(struct send_queue *sq, bool in_napi,
+ struct virtnet_sq_free_stats *stats)
+{
+ unsigned int len;
+ void *ptr;
+
+ while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+ ++stats->packets;
+
+ if (!is_xdp_frame(ptr)) {
+ struct sk_buff *skb = ptr;
+
+ pr_debug("Sent skb %p\n", skb);
+
+ stats->bytes += skb->len;
+ napi_consume_skb(skb, in_napi);
+ } else {
+ struct xdp_frame *frame = ptr_to_xdp(ptr);
+
+ stats->bytes += xdp_get_frame_len(frame);
+ xdp_return_frame(frame);
+ }
+ }
+}
+
/* Converting between virtqueue no. and kernel tx/rx queue no.
* 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
*/
@@ -447,6 +483,20 @@ static void disable_delayed_refill(struct virtnet_info *vi)
spin_unlock_bh(&vi->refill_lock);
}
+static void enable_rx_mode_work(struct virtnet_info *vi)
+{
+ rtnl_lock();
+ vi->rx_mode_work_enabled = true;
+ rtnl_unlock();
+}
+
+static void disable_rx_mode_work(struct virtnet_info *vi)
+{
+ rtnl_lock();
+ vi->rx_mode_work_enabled = false;
+ rtnl_unlock();
+}
+
static void virtqueue_napi_schedule(struct napi_struct *napi,
struct virtqueue *vq)
{
@@ -776,39 +826,21 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
virtnet_rq_free_buf(vi, rq, buf);
}
-static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
+static void free_old_xmit(struct send_queue *sq, bool in_napi)
{
- unsigned int len;
- unsigned int packets = 0;
- unsigned int bytes = 0;
- void *ptr;
-
- while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
- if (likely(!is_xdp_frame(ptr))) {
- struct sk_buff *skb = ptr;
-
- pr_debug("Sent skb %p\n", skb);
+ struct virtnet_sq_free_stats stats = {0};
- bytes += skb->len;
- napi_consume_skb(skb, in_napi);
- } else {
- struct xdp_frame *frame = ptr_to_xdp(ptr);
-
- bytes += xdp_get_frame_len(frame);
- xdp_return_frame(frame);
- }
- packets++;
- }
+ __free_old_xmit(sq, in_napi, &stats);
/* Avoid overhead when no packets have been processed
* happens when called speculatively from start_xmit.
*/
- if (!packets)
+ if (!stats.packets)
return;
u64_stats_update_begin(&sq->stats.syncp);
- u64_stats_add(&sq->stats.bytes, bytes);
- u64_stats_add(&sq->stats.packets, packets);
+ u64_stats_add(&sq->stats.bytes, stats.bytes);
+ u64_stats_add(&sq->stats.packets, stats.packets);
u64_stats_update_end(&sq->stats.syncp);
}
@@ -848,7 +880,7 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
virtqueue_napi_schedule(&sq->napi, sq->vq);
} else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
/* More just got used, free them then recheck. */
- free_old_xmit_skbs(sq, false);
+ free_old_xmit(sq, false);
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
netif_start_subqueue(dev, qnum);
virtqueue_disable_cb(sq->vq);
@@ -947,15 +979,12 @@ static int virtnet_xdp_xmit(struct net_device *dev,
int n, struct xdp_frame **frames, u32 flags)
{
struct virtnet_info *vi = netdev_priv(dev);
+ struct virtnet_sq_free_stats stats = {0};
struct receive_queue *rq = vi->rq;
struct bpf_prog *xdp_prog;
struct send_queue *sq;
- unsigned int len;
- int packets = 0;
- int bytes = 0;
int nxmit = 0;
int kicks = 0;
- void *ptr;
int ret;
int i;
@@ -974,20 +1003,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,
}
/* Free up any pending old buffers before queueing new ones. */
- while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
- if (likely(is_xdp_frame(ptr))) {
- struct xdp_frame *frame = ptr_to_xdp(ptr);
-
- bytes += xdp_get_frame_len(frame);
- xdp_return_frame(frame);
- } else {
- struct sk_buff *skb = ptr;
-
- bytes += skb->len;
- napi_consume_skb(skb, false);
- }
- packets++;
- }
+ __free_old_xmit(sq, false, &stats);
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
@@ -1007,8 +1023,8 @@ static int virtnet_xdp_xmit(struct net_device *dev,
}
out:
u64_stats_update_begin(&sq->stats.syncp);
- u64_stats_add(&sq->stats.bytes, bytes);
- u64_stats_add(&sq->stats.packets, packets);
+ u64_stats_add(&sq->stats.bytes, stats.bytes);
+ u64_stats_add(&sq->stats.packets, stats.packets);
u64_stats_add(&sq->stats.xdp_tx, n);
u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit);
u64_stats_add(&sq->stats.kicks, kicks);
@@ -2160,7 +2176,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
do {
virtqueue_disable_cb(sq->vq);
- free_old_xmit_skbs(sq, true);
+ free_old_xmit(sq, true);
} while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
@@ -2308,7 +2324,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
txq = netdev_get_tx_queue(vi->dev, index);
__netif_tx_lock(txq, raw_smp_processor_id());
virtqueue_disable_cb(sq->vq);
- free_old_xmit_skbs(sq, true);
+ free_old_xmit(sq, true);
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
netif_tx_wake_queue(txq);
@@ -2398,7 +2414,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
if (use_napi)
virtqueue_disable_cb(sq->vq);
- free_old_xmit_skbs(sq, false);
+ free_old_xmit(sq, false);
} while (use_napi && kick &&
unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
@@ -2550,8 +2566,10 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
* into the hypervisor, so the request should be handled immediately.
*/
while (!virtqueue_get_buf(vi->cvq, &tmp) &&
- !virtqueue_is_broken(vi->cvq))
+ !virtqueue_is_broken(vi->cvq)) {
+ cond_resched();
cpu_relax();
+ }
return vi->ctrl->status == VIRTIO_NET_OK;
}
@@ -2706,9 +2724,11 @@ static int virtnet_close(struct net_device *dev)
return 0;
}
-static void virtnet_set_rx_mode(struct net_device *dev)
+static void virtnet_rx_mode_work(struct work_struct *work)
{
- struct virtnet_info *vi = netdev_priv(dev);
+ struct virtnet_info *vi =
+ container_of(work, struct virtnet_info, rx_mode_work);
+ struct net_device *dev = vi->dev;
struct scatterlist sg[2];
struct virtio_net_ctrl_mac *mac_data;
struct netdev_hw_addr *ha;
@@ -2721,6 +2741,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
return;
+ rtnl_lock();
+
vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
@@ -2738,14 +2760,19 @@ static void virtnet_set_rx_mode(struct net_device *dev)
dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
vi->ctrl->allmulti ? "en" : "dis");
+ netif_addr_lock_bh(dev);
+
uc_count = netdev_uc_count(dev);
mc_count = netdev_mc_count(dev);
/* MAC filter - use one buffer for both lists */
buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
(2 * sizeof(mac_data->entries)), GFP_ATOMIC);
mac_data = buf;
- if (!buf)
+ if (!buf) {
+ netif_addr_unlock_bh(dev);
+ rtnl_unlock();
return;
+ }
sg_init_table(sg, 2);
@@ -2766,6 +2793,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
netdev_for_each_mc_addr(ha, dev)
memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
+ netif_addr_unlock_bh(dev);
+
sg_set_buf(&sg[1], mac_data,
sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
@@ -2773,9 +2802,19 @@ static void virtnet_set_rx_mode(struct net_device *dev)
VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
+ rtnl_unlock();
+
kfree(buf);
}
+static void virtnet_set_rx_mode(struct net_device *dev)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+
+ if (vi->rx_mode_work_enabled)
+ schedule_work(&vi->rx_mode_work);
+}
+
static int virtnet_vlan_rx_add_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
@@ -3768,6 +3807,7 @@ static int virtnet_set_rxfh(struct net_device *dev,
struct netlink_ext_ack *extack)
{
struct virtnet_info *vi = netdev_priv(dev);
+ bool update = false;
int i;
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
@@ -3775,13 +3815,28 @@ static int virtnet_set_rxfh(struct net_device *dev,
return -EOPNOTSUPP;
if (rxfh->indir) {
+ if (!vi->has_rss)
+ return -EOPNOTSUPP;
+
for (i = 0; i < vi->rss_indir_table_size; ++i)
vi->ctrl->rss.indirection_table[i] = rxfh->indir[i];
+ update = true;
}
- if (rxfh->key)
+
+ if (rxfh->key) {
+ /* If either _F_HASH_REPORT or _F_RSS are negotiated, the
+ * device provides hash calculation capabilities, that is,
+ * hash_key is configured.
+ */
+ if (!vi->has_rss && !vi->has_rss_hash_report)
+ return -EOPNOTSUPP;
+
memcpy(vi->ctrl->rss.key, rxfh->key, vi->rss_key_size);
+ update = true;
+ }
- virtnet_commit_rss_command(vi);
+ if (update)
+ virtnet_commit_rss_command(vi);
return 0;
}
@@ -3856,6 +3911,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
/* Make sure no work handler is accessing the device */
flush_work(&vi->config_work);
+ disable_rx_mode_work(vi);
+ flush_work(&vi->rx_mode_work);
netif_tx_lock_bh(vi->dev);
netif_device_detach(vi->dev);
@@ -3878,6 +3935,7 @@ static int virtnet_restore_up(struct virtio_device *vdev)
virtio_device_ready(vdev);
enable_delayed_refill(vi);
+ enable_rx_mode_work(vi);
if (netif_running(vi->dev)) {
err = virtnet_open(vi->dev);
@@ -4676,6 +4734,7 @@ static int virtnet_probe(struct virtio_device *vdev)
vdev->priv = vi;
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
+ INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work);
spin_lock_init(&vi->refill_lock);
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
@@ -4686,13 +4745,15 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
vi->has_rss_hash_report = true;
- if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) {
vi->has_rss = true;
- if (vi->has_rss || vi->has_rss_hash_report) {
vi->rss_indir_table_size =
virtio_cread16(vdev, offsetof(struct virtio_net_config,
rss_max_indirection_table_length));
+ }
+
+ if (vi->has_rss || vi->has_rss_hash_report) {
vi->rss_key_size =
virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
@@ -4798,6 +4859,8 @@ static int virtnet_probe(struct virtio_device *vdev)
if (vi->has_rss || vi->has_rss_hash_report)
virtnet_init_default_rss(vi);
+ enable_rx_mode_work(vi);
+
/* serialize netdev register + virtio_device_ready() with ndo_open() */
rtnl_lock();
@@ -4895,6 +4958,8 @@ static void virtnet_remove(struct virtio_device *vdev)
/* Make sure no work handler is accessing the device. */
flush_work(&vi->config_work);
+ disable_rx_mode_work(vi);
+ flush_work(&vi->rx_mode_work);
unregister_netdev(vi->dev);
diff --git a/drivers/net/vmxnet3/vmxnet3_xdp.c b/drivers/net/vmxnet3/vmxnet3_xdp.c
index 80ddaff759d47..a6c787454a1ae 100644
--- a/drivers/net/vmxnet3/vmxnet3_xdp.c
+++ b/drivers/net/vmxnet3/vmxnet3_xdp.c
@@ -382,12 +382,12 @@ vmxnet3_process_xdp(struct vmxnet3_adapter *adapter,
page = rbi->page;
dma_sync_single_for_cpu(&adapter->pdev->dev,
page_pool_get_dma_addr(page) +
- rq->page_pool->p.offset, rcd->len,
+ rq->page_pool->p.offset, rbi->len,
page_pool_get_dma_dir(rq->page_pool));
- xdp_init_buff(&xdp, rbi->len, &rq->xdp_rxq);
+ xdp_init_buff(&xdp, PAGE_SIZE, &rq->xdp_rxq);
xdp_prepare_buff(&xdp, page_address(page), rq->page_pool->p.offset,
- rcd->len, false);
+ rbi->len, false);
xdp_buff_clear_frags_flag(&xdp);
xdp_prog = rcu_dereference(rq->adapter->xdp_bpf_prog);
diff --git a/drivers/net/wan/fsl_qmc_hdlc.c b/drivers/net/wan/fsl_qmc_hdlc.c
index 960371df470a0..f69b1f579a0ca 100644
--- a/drivers/net/wan/fsl_qmc_hdlc.c
+++ b/drivers/net/wan/fsl_qmc_hdlc.c
@@ -780,7 +780,7 @@ static const struct of_device_id qmc_hdlc_id_table[] = {
{ .compatible = "fsl,qmc-hdlc" },
{} /* sentinel */
};
-MODULE_DEVICE_TABLE(of, qmc_hdlc_driver);
+MODULE_DEVICE_TABLE(of, qmc_hdlc_id_table);
static struct platform_driver qmc_hdlc_driver = {
.driver = {
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index deb9636b0ecf8..3feb36ee5bfb4 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -237,7 +237,6 @@ static const struct net_device_ops netdev_ops = {
.ndo_open = wg_open,
.ndo_stop = wg_stop,
.ndo_start_xmit = wg_xmit,
- .ndo_get_stats64 = dev_get_tstats64
};
static void wg_destruct(struct net_device *dev)
@@ -262,7 +261,6 @@ static void wg_destruct(struct net_device *dev)
rcu_barrier(); /* Wait for all the peers to be actually freed. */
wg_ratelimiter_uninit();
memzero_explicit(&wg->static_identity, sizeof(wg->static_identity));
- free_percpu(dev->tstats);
kvfree(wg->index_hashtable);
kvfree(wg->peer_hashtable);
mutex_unlock(&wg->device_update_lock);
@@ -297,6 +295,7 @@ static void wg_setup(struct net_device *dev)
dev->hw_enc_features |= WG_NETDEV_FEATURES;
dev->mtu = ETH_DATA_LEN - overhead;
dev->max_mtu = round_down(INT_MAX, MESSAGE_PADDING_MULTIPLE) - overhead;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
SET_NETDEV_DEVTYPE(dev, &device_type);
@@ -331,14 +330,10 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
if (!wg->index_hashtable)
goto err_free_peer_hashtable;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- goto err_free_index_hashtable;
-
wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s",
WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name);
if (!wg->handshake_receive_wq)
- goto err_free_tstats;
+ goto err_free_index_hashtable;
wg->handshake_send_wq = alloc_workqueue("wg-kex-%s",
WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name);
@@ -397,8 +392,6 @@ err_destroy_handshake_send:
destroy_workqueue(wg->handshake_send_wq);
err_destroy_handshake_receive:
destroy_workqueue(wg->handshake_receive_wq);
-err_free_tstats:
- free_percpu(dev->tstats);
err_free_index_hashtable:
kvfree(wg->index_hashtable);
err_free_peer_hashtable:
diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
index e220d761b1f27..f7055180ba4aa 100644
--- a/drivers/net/wireguard/netlink.c
+++ b/drivers/net/wireguard/netlink.c
@@ -164,8 +164,8 @@ get_peer(struct wg_peer *peer, struct sk_buff *skb, struct dump_ctx *ctx)
if (!allowedips_node)
goto no_allowedips;
if (!ctx->allowedips_seq)
- ctx->allowedips_seq = peer->device->peer_allowedips.seq;
- else if (ctx->allowedips_seq != peer->device->peer_allowedips.seq)
+ ctx->allowedips_seq = ctx->wg->peer_allowedips.seq;
+ else if (ctx->allowedips_seq != ctx->wg->peer_allowedips.seq)
goto no_allowedips;
allowedips_nest = nla_nest_start(skb, WGPEER_A_ALLOWEDIPS);
@@ -255,17 +255,17 @@ static int wg_get_device_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (!peers_nest)
goto out;
ret = 0;
- /* If the last cursor was removed via list_del_init in peer_remove, then
+ lockdep_assert_held(&wg->device_update_lock);
+ /* If the last cursor was removed in peer_remove or peer_remove_all, then
* we just treat this the same as there being no more peers left. The
* reason is that seq_nr should indicate to userspace that this isn't a
* coherent dump anyway, so they'll try again.
*/
if (list_empty(&wg->peer_list) ||
- (ctx->next_peer && list_empty(&ctx->next_peer->peer_list))) {
+ (ctx->next_peer && ctx->next_peer->is_dead)) {
nla_nest_cancel(skb, peers_nest);
goto out;
}
- lockdep_assert_held(&wg->device_update_lock);
peer = list_prepare_entry(ctx->next_peer, &wg->peer_list, peer_list);
list_for_each_entry_continue(peer, &wg->peer_list, peer_list) {
if (get_peer(peer, skb, ctx)) {
diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
index df275b4fccb6d..eb8851113654f 100644
--- a/drivers/net/wireguard/receive.c
+++ b/drivers/net/wireguard/receive.c
@@ -251,7 +251,7 @@ static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
if (unlikely(!READ_ONCE(keypair->receiving.is_valid) ||
wg_birthdate_has_expired(keypair->receiving.birthdate, REJECT_AFTER_TIME) ||
- keypair->receiving_counter.counter >= REJECT_AFTER_MESSAGES)) {
+ READ_ONCE(keypair->receiving_counter.counter) >= REJECT_AFTER_MESSAGES)) {
WRITE_ONCE(keypair->receiving.is_valid, false);
return false;
}
@@ -318,7 +318,7 @@ static bool counter_validate(struct noise_replay_counter *counter, u64 their_cou
for (i = 1; i <= top; ++i)
counter->backtrack[(i + index_current) &
((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0;
- counter->counter = their_counter;
+ WRITE_ONCE(counter->counter, their_counter);
}
index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1;
@@ -463,7 +463,7 @@ int wg_packet_rx_poll(struct napi_struct *napi, int budget)
net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n",
peer->device->dev->name,
PACKET_CB(skb)->nonce,
- keypair->receiving_counter.counter);
+ READ_ONCE(keypair->receiving_counter.counter));
goto next;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index db6d7013df665..c3bdf433d8f7b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -3081,8 +3081,6 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
struct iwl_fw_dbg_params params = {0};
struct iwl_fwrt_dump_data *dump_data =
&fwrt->dump.wks[wk_idx].dump_data;
- u32 policy;
- u32 time_point;
if (!test_bit(wk_idx, &fwrt->dump.active_wks))
return;
@@ -3113,13 +3111,16 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
iwl_fw_dbg_stop_restart_recording(fwrt, &params, false);
- policy = le32_to_cpu(dump_data->trig->apply_policy);
- time_point = le32_to_cpu(dump_data->trig->time_point);
+ if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
+ u32 policy = le32_to_cpu(dump_data->trig->apply_policy);
+ u32 time_point = le32_to_cpu(dump_data->trig->time_point);
- if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) {
- IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n");
- iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0);
+ if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) {
+ IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n");
+ iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0);
+ }
}
+
if (fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY)
iwl_force_nmi(fwrt->trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 553c6fffc7c66..52518a47554e7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1260,15 +1260,15 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (IS_ERR_OR_NULL(vif))
return 1;
- if (ieee80211_vif_is_mld(vif) && vif->cfg.assoc) {
+ if (hweight16(vif->active_links) > 1) {
/*
- * Select the 'best' link. May need to revisit, it seems
- * better to not optimize for throughput but rather range,
- * reliability and power here - and select 2.4 GHz ...
+ * Select the 'best' link.
+ * May need to revisit, it seems better to not optimize
+ * for throughput but rather range, reliability and
+ * power here - and select 2.4 GHz ...
*/
- primary_link =
- iwl_mvm_mld_get_primary_link(mvm, vif,
- vif->active_links);
+ primary_link = iwl_mvm_mld_get_primary_link(mvm, vif,
+ vif->active_links);
if (WARN_ONCE(primary_link < 0, "no primary link in 0x%x\n",
vif->active_links))
@@ -1277,6 +1277,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
ret = ieee80211_set_active_links(vif, BIT(primary_link));
if (ret)
return ret;
+ } else if (vif->active_links) {
+ primary_link = __ffs(vif->active_links);
} else {
primary_link = 0;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 51b01f7528bee..7fe57ecd0682b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -748,7 +748,9 @@ void iwl_mvm_vif_dbgfs_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct dentry *dbgfs_dir = vif->debugfs_dir;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- char buf[100];
+ char buf[3 * 3 + 11 + (NL80211_WIPHY_NAME_MAXLEN + 1) +
+ (7 + IFNAMSIZ + 1) + 6 + 1];
+ char name[7 + IFNAMSIZ + 1];
/* this will happen in monitor mode */
if (!dbgfs_dir)
@@ -761,10 +763,11 @@ void iwl_mvm_vif_dbgfs_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
* find
* netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/
*/
- snprintf(buf, 100, "../../../%pd3/iwlmvm", dbgfs_dir);
+ snprintf(name, sizeof(name), "%pd", dbgfs_dir);
+ snprintf(buf, sizeof(buf), "../../../%pd3/iwlmvm", dbgfs_dir);
- mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name,
- mvm->debugfs_dir, buf);
+ mvmvif->dbgfs_slink =
+ debugfs_create_symlink(name, mvm->debugfs_dir, buf);
}
void iwl_mvm_vif_dbgfs_rm_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
index f13f13e6b71af..9f69e04594e49 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
@@ -46,6 +46,27 @@ static int iwl_mvm_link_cmd_send(struct iwl_mvm *mvm,
return ret;
}
+int iwl_mvm_set_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *link_info =
+ mvmvif->link[link_conf->link_id];
+
+ if (link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID) {
+ link_info->fw_link_id = iwl_mvm_get_free_fw_link_id(mvm,
+ mvmvif);
+ if (link_info->fw_link_id >=
+ ARRAY_SIZE(mvm->link_id_to_link_conf))
+ return -EINVAL;
+
+ rcu_assign_pointer(mvm->link_id_to_link_conf[link_info->fw_link_id],
+ link_conf);
+ }
+
+ return 0;
+}
+
int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf)
{
@@ -55,19 +76,14 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_link_config_cmd cmd = {};
unsigned int cmd_id = WIDE_ID(MAC_CONF_GROUP, LINK_CONFIG_CMD);
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1);
+ int ret;
if (WARN_ON_ONCE(!link_info))
return -EINVAL;
- if (link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID) {
- link_info->fw_link_id = iwl_mvm_get_free_fw_link_id(mvm,
- mvmvif);
- if (link_info->fw_link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf))
- return -EINVAL;
-
- rcu_assign_pointer(mvm->link_id_to_link_conf[link_info->fw_link_id],
- link_conf);
- }
+ ret = iwl_mvm_set_link_mapping(mvm, vif, link_conf);
+ if (ret)
+ return ret;
/* Update SF - Disable if needed. if this fails, SF might still be on
* while many macs are bound, which is forbidden - so fail the binding.
@@ -248,6 +264,24 @@ send_cmd:
return ret;
}
+int iwl_mvm_unset_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *link_info =
+ mvmvif->link[link_conf->link_id];
+
+ /* mac80211 thought we have the link, but it was never configured */
+ if (WARN_ON(!link_info ||
+ link_info->fw_link_id >=
+ ARRAY_SIZE(mvm->link_id_to_link_conf)))
+ return -EINVAL;
+
+ RCU_INIT_POINTER(mvm->link_id_to_link_conf[link_info->fw_link_id],
+ NULL);
+ return 0;
+}
+
int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf)
{
@@ -257,13 +291,10 @@ int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_link_config_cmd cmd = {};
int ret;
- /* mac80211 thought we have the link, but it was never configured */
- if (WARN_ON(!link_info ||
- link_info->fw_link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf)))
+ ret = iwl_mvm_unset_link_mapping(mvm, vif, link_conf);
+ if (ret)
return 0;
- RCU_INIT_POINTER(mvm->link_id_to_link_conf[link_info->fw_link_id],
- NULL);
cmd.link_id = cpu_to_le32(link_info->fw_link_id);
iwl_mvm_release_fw_link_id(mvm, link_info->fw_link_id);
link_info->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 1935630d3def0..8f4b063d6243e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -360,7 +360,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (mvm->mld_api_is_used && mvm->nvm_data->sku_cap_11be_enable &&
!iwlwifi_mod_params.disable_11ax &&
!iwlwifi_mod_params.disable_11be)
- hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
+ hw->wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT;
/* With MLD FW API, it tracks timing by itself,
* no need for any timing from the host
@@ -1577,8 +1577,14 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
mvmvif->mvm = mvm;
/* the first link always points to the default one */
+ mvmvif->deflink.fw_link_id = IWL_MVM_FW_LINK_ID_INVALID;
+ mvmvif->deflink.active = 0;
mvmvif->link[0] = &mvmvif->deflink;
+ ret = iwl_mvm_set_link_mapping(mvm, vif, &vif->bss_conf);
+ if (ret)
+ goto out;
+
/*
* Not much to do here. The stack will not allow interface
* types or combinations that we didn't advertise, so we
@@ -1783,6 +1789,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
mvm->p2p_device_vif = NULL;
}
+ iwl_mvm_unset_link_mapping(mvm, vif, &vif->bss_conf);
iwl_mvm_mac_ctxt_remove(mvm, vif);
RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
index 1628bf55458fc..23e64a757cfe8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
@@ -855,10 +855,15 @@ int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id)
{
- int ret = iwl_mvm_mld_rm_sta_from_fw(mvm, sta_id);
+ int ret;
lockdep_assert_held(&mvm->mutex);
+ if (WARN_ON(sta_id == IWL_MVM_INVALID_STA))
+ return 0;
+
+ ret = iwl_mvm_mld_rm_sta_from_fw(mvm, sta_id);
+
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
RCU_INIT_POINTER(mvm->fw_id_to_link_sta[sta_id], NULL);
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 44571114fb154..f0b24f00938bd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1916,11 +1916,15 @@ int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
u32 iwl_mvm_get_lmac_id(struct iwl_mvm *mvm, enum nl80211_band band);
/* Links */
+int iwl_mvm_set_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf);
int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
u32 changes, bool active);
+int iwl_mvm_unset_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf);
int iwl_mvm_disable_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
index 2ecd32bed752f..045c862a8fc4f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
@@ -132,14 +132,18 @@ struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm)
if (ret)
return ERR_PTR(ret);
- if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != resp_size))
+ if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) !=
+ resp_size)) {
+ iwl_free_resp(&cmd);
return ERR_PTR(-EIO);
+ }
resp = kmemdup(cmd.resp_pkt->data, resp_size, GFP_KERNEL);
+ iwl_free_resp(&cmd);
+
if (!resp)
return ERR_PTR(-ENOMEM);
- iwl_free_resp(&cmd);
return resp;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 1484eaedf4529..ce8d83c771a70 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -236,21 +236,13 @@ static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm,
static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
struct napi_struct *napi,
struct sk_buff *skb, int queue,
- struct ieee80211_sta *sta,
- struct ieee80211_link_sta *link_sta)
+ struct ieee80211_sta *sta)
{
if (unlikely(iwl_mvm_check_pn(mvm, skb, queue, sta))) {
kfree_skb(skb);
return;
}
- if (sta && sta->valid_links && link_sta) {
- struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
-
- rx_status->link_valid = 1;
- rx_status->link_id = link_sta->link_id;
- }
-
ieee80211_rx_napi(mvm->hw, sta, skb, napi);
}
@@ -588,7 +580,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
while ((skb = __skb_dequeue(skb_list))) {
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
reorder_buf->queue,
- sta, NULL /* FIXME */);
+ sta);
reorder_buf->num_stored--;
}
}
@@ -2213,6 +2205,11 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (IS_ERR(sta))
sta = NULL;
link_sta = rcu_dereference(mvm->fw_id_to_link_sta[id]);
+
+ if (sta && sta->valid_links && link_sta) {
+ rx_status->link_valid = 1;
+ rx_status->link_id = link_sta->link_id;
+ }
}
} else if (!is_multicast_ether_addr(hdr->addr2)) {
/*
@@ -2356,8 +2353,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
!(desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME))
rx_status->flag |= RX_FLAG_AMSDU_MORE;
- iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta,
- link_sta);
+ iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
}
out:
rcu_read_unlock();
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index a59d264a11c52..ad960faceb0d8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -879,9 +879,8 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mvm_session_prot_notif *notif = (void *)pkt->data;
unsigned int ver =
- iwl_fw_lookup_cmd_ver(mvm->fw,
- WIDE_ID(MAC_CONF_GROUP,
- SESSION_PROTECTION_CMD), 2);
+ iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
+ SESSION_PROTECTION_NOTIF, 2);
int id = le32_to_cpu(notif->mac_link_id);
struct ieee80211_vif *vif;
struct iwl_mvm_vif *mvmvif;
diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
index 33973a60d0bf4..6229c785c8457 100644
--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
@@ -1589,9 +1589,9 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
return;
tfd_num = iwl_txq_get_cmd_index(txq, ssn);
- read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr);
spin_lock_bh(&txq->lock);
+ read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr);
if (!test_bit(txq_id, trans->txqs.queue_used)) {
IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
index 367459bd13457..708132d5be2a6 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
@@ -2233,7 +2233,7 @@ static void rtw8922a_btc_init_cfg(struct rtw89_dev *rtwdev)
* Shared-Ant && BTG-path:WL mask(0x55f), others:WL THRU(0x5ff)
*/
if (btc->ant_type == BTC_ANT_SHARED && btc->btg_pos == path)
- rtw8922a_set_trx_mask(rtwdev, path, BTC_BT_TX_GROUP, 0x5ff);
+ rtw8922a_set_trx_mask(rtwdev, path, BTC_BT_TX_GROUP, 0x55f);
else
rtw8922a_set_trx_mask(rtwdev, path, BTC_BT_TX_GROUP, 0x5ff);
diff --git a/drivers/net/wwan/t7xx/t7xx_cldma.c b/drivers/net/wwan/t7xx/t7xx_cldma.c
index 9f43f256db1d0..f0a4783baf1f3 100644
--- a/drivers/net/wwan/t7xx/t7xx_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_cldma.c
@@ -106,7 +106,7 @@ bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno)
{
u32 offset = REG_CLDMA_UL_START_ADDRL_0 + qno * ADDR_SIZE;
- return ioread64(hw_info->ap_pdn_base + offset);
+ return ioread64_lo_hi(hw_info->ap_pdn_base + offset);
}
void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, unsigned int qno, u64 address,
@@ -117,7 +117,7 @@ void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, unsigned int qn
reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_START_ADDRL_0 :
hw_info->ap_pdn_base + REG_CLDMA_UL_START_ADDRL_0;
- iowrite64(address, reg + offset);
+ iowrite64_lo_hi(address, reg + offset);
}
void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
index abc41a7089fa4..97163e1e5783e 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -137,8 +137,9 @@ static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool
return -ENODEV;
}
- gpd_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_DL_CURRENT_ADDRL_0 +
- queue->index * sizeof(u64));
+ gpd_addr = ioread64_lo_hi(hw_info->ap_pdn_base +
+ REG_CLDMA_DL_CURRENT_ADDRL_0 +
+ queue->index * sizeof(u64));
if (req->gpd_addr == gpd_addr || hwo_polling_count++ >= 100)
return 0;
@@ -316,8 +317,8 @@ static void t7xx_cldma_txq_empty_hndl(struct cldma_queue *queue)
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
/* Check current processing TGPD, 64-bit address is in a table by Q index */
- ul_curr_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 +
- queue->index * sizeof(u64));
+ ul_curr_addr = ioread64_lo_hi(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 +
+ queue->index * sizeof(u64));
if (req->gpd_addr != ul_curr_addr) {
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
dev_err(md_ctrl->dev, "CLDMA%d queue %d is not empty\n",
diff --git a/drivers/net/wwan/t7xx/t7xx_pcie_mac.c b/drivers/net/wwan/t7xx/t7xx_pcie_mac.c
index 76da4c15e3de1..f071ec7ff23d5 100644
--- a/drivers/net/wwan/t7xx/t7xx_pcie_mac.c
+++ b/drivers/net/wwan/t7xx/t7xx_pcie_mac.c
@@ -75,7 +75,7 @@ static void t7xx_pcie_mac_atr_tables_dis(void __iomem *pbase, enum t7xx_atr_src_
for (i = 0; i < ATR_TABLE_NUM_PER_ATR; i++) {
offset = ATR_PORT_OFFSET * port + ATR_TABLE_OFFSET * i;
reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset;
- iowrite64(0, reg);
+ iowrite64_lo_hi(0, reg);
}
}
@@ -112,17 +112,17 @@ static int t7xx_pcie_mac_atr_cfg(struct t7xx_pci_dev *t7xx_dev, struct t7xx_atr_
reg = pbase + ATR_PCIE_WIN0_T0_TRSL_ADDR + offset;
value = cfg->trsl_addr & ATR_PCIE_WIN0_ADDR_ALGMT;
- iowrite64(value, reg);
+ iowrite64_lo_hi(value, reg);
reg = pbase + ATR_PCIE_WIN0_T0_TRSL_PARAM + offset;
iowrite32(cfg->trsl_id, reg);
reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset;
value = (cfg->src_addr & ATR_PCIE_WIN0_ADDR_ALGMT) | (atr_size << 1) | BIT(0);
- iowrite64(value, reg);
+ iowrite64_lo_hi(value, reg);
/* Ensure ATR is set */
- ioread64(reg);
+ ioread64_lo_hi(reg);
return 0;
}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index ad29f370034e4..8d2aee88526c6 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -285,6 +285,7 @@ static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
return NULL;
}
skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
+ skb_mark_for_recycle(skb);
/* Align ip header to a 16 bytes boundary */
skb_reserve(skb, NET_IP_ALIGN);
diff --git a/drivers/nfc/pn533/uart.c b/drivers/nfc/pn533/uart.c
index 2eb5978bd79e1..cfbbe0713317f 100644
--- a/drivers/nfc/pn533/uart.c
+++ b/drivers/nfc/pn533/uart.c
@@ -203,8 +203,8 @@ static int pn532_uart_rx_is_frame(struct sk_buff *skb)
return 0;
}
-static ssize_t pn532_receive_buf(struct serdev_device *serdev,
- const u8 *data, size_t count)
+static size_t pn532_receive_buf(struct serdev_device *serdev,
+ const u8 *data, size_t count)
{
struct pn532_uart_phy *dev = serdev_device_get_drvdata(serdev);
size_t i;
diff --git a/drivers/nfc/s3fwrn5/uart.c b/drivers/nfc/s3fwrn5/uart.c
index 456d3947116c1..9c09c10c2a464 100644
--- a/drivers/nfc/s3fwrn5/uart.c
+++ b/drivers/nfc/s3fwrn5/uart.c
@@ -51,8 +51,8 @@ static const struct s3fwrn5_phy_ops uart_phy_ops = {
.write = s3fwrn82_uart_write,
};
-static ssize_t s3fwrn82_uart_read(struct serdev_device *serdev,
- const u8 *data, size_t count)
+static size_t s3fwrn82_uart_read(struct serdev_device *serdev,
+ const u8 *data, size_t count)
{
struct s3fwrn82_uart_phy *phy = serdev_device_get_drvdata(serdev);
size_t i;
diff --git a/drivers/ntb/core.c b/drivers/ntb/core.c
index 27dd93deff6e5..d702bee780826 100644
--- a/drivers/ntb/core.c
+++ b/drivers/ntb/core.c
@@ -100,6 +100,8 @@ EXPORT_SYMBOL(ntb_unregister_client);
int ntb_register_device(struct ntb_dev *ntb)
{
+ int ret;
+
if (!ntb)
return -EINVAL;
if (!ntb->pdev)
@@ -120,7 +122,11 @@ int ntb_register_device(struct ntb_dev *ntb)
ntb->ctx_ops = NULL;
spin_lock_init(&ntb->ctx_lock);
- return device_register(&ntb->dev);
+ ret = device_register(&ntb->dev);
+ if (ret)
+ put_device(&ntb->dev);
+
+ return ret;
}
EXPORT_SYMBOL(ntb_register_device);
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 77b06d54cc62e..fde3e17c836c8 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -24,7 +24,7 @@ config BLK_DEV_PMEM
select ND_PFN if NVDIMM_PFN
help
Memory ranges for PMEM are described by either an NFIT
- (NVDIMM Firmware Interface Table, see CONFIG_NFIT_ACPI), a
+ (NVDIMM Firmware Interface Table, see CONFIG_ACPI_NFIT), a
non-standard OEM-specific E820 memory type (type-12, see
CONFIG_X86_PMEM_LEGACY), or it is manually specified by the
'memmap=nn[KMG]!ss[KMG]' kernel command line (see
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index ef3d0f83318b9..508aed017ddc0 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -271,7 +271,7 @@ EXPORT_SYMBOL_GPL(nvdimm_clear_poison);
static int nvdimm_bus_match(struct device *dev, struct device_driver *drv);
-static struct bus_type nvdimm_bus_type = {
+static const struct bus_type nvdimm_bus_type = {
.name = "nd",
.uevent = nvdimm_bus_uevent,
.match = nvdimm_bus_match,
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 8dcc10b6db5b1..598fe2e89bda4 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -562,18 +562,19 @@ static int pmem_attach_disk(struct device *dev,
dax_dev = alloc_dax(pmem, &pmem_dax_ops);
if (IS_ERR(dax_dev)) {
rc = PTR_ERR(dax_dev);
- goto out;
+ if (rc != -EOPNOTSUPP)
+ goto out;
+ } else {
+ set_dax_nocache(dax_dev);
+ set_dax_nomc(dax_dev);
+ if (is_nvdimm_sync(nd_region))
+ set_dax_synchronous(dax_dev);
+ pmem->dax_dev = dax_dev;
+ rc = dax_add_host(dax_dev, disk);
+ if (rc)
+ goto out_cleanup_dax;
+ dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
}
- set_dax_nocache(dax_dev);
- set_dax_nomc(dax_dev);
- if (is_nvdimm_sync(nd_region))
- set_dax_synchronous(dax_dev);
- rc = dax_add_host(dax_dev, disk);
- if (rc)
- goto out_cleanup_dax;
- dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
- pmem->dax_dev = dax_dev;
-
rc = device_add_disk(dev, disk, pmem_attribute_groups);
if (rc)
goto out_remove_host;
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index a480cdeac2883..dd6ec0865141a 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -1532,7 +1532,7 @@ put_dev:
return ret;
}
-static int apple_nvme_remove(struct platform_device *pdev)
+static void apple_nvme_remove(struct platform_device *pdev)
{
struct apple_nvme *anv = platform_get_drvdata(pdev);
@@ -1547,8 +1547,6 @@ static int apple_nvme_remove(struct platform_device *pdev)
apple_rtkit_shutdown(anv->rtk);
apple_nvme_detach_genpd(anv);
-
- return 0;
}
static void apple_nvme_shutdown(struct platform_device *pdev)
@@ -1598,7 +1596,7 @@ static struct platform_driver apple_nvme_driver = {
.pm = pm_sleep_ptr(&apple_nvme_pm_ops),
},
.probe = apple_nvme_probe,
- .remove = apple_nvme_remove,
+ .remove_new = apple_nvme_remove,
.shutdown = apple_nvme_shutdown,
};
module_platform_driver(apple_nvme_driver);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 00864a6344709..27281a9a8951d 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1807,9 +1807,6 @@ static void nvme_config_discard(struct nvme_ns *ns, struct queue_limits *lim)
{
struct nvme_ctrl *ctrl = ns->ctrl;
- BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
- NVME_DSM_MAX_RANGES);
-
if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns->head, UINT_MAX))
lim->max_hw_discard_sectors =
nvme_lba_to_sect(ns->head, ctrl->dmrsl);
@@ -2079,6 +2076,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
bool vwc = ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT;
struct queue_limits lim;
struct nvme_id_ns_nvm *nvm = NULL;
+ struct nvme_zone_info zi = {};
struct nvme_id_ns *id;
sector_t capacity;
unsigned lbaf;
@@ -2091,9 +2089,10 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
if (id->ncap == 0) {
/* namespace not allocated or attached */
info->is_removed = true;
- ret = -ENODEV;
+ ret = -ENXIO;
goto out;
}
+ lbaf = nvme_lbaf_index(id->flbas);
if (ns->ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) {
ret = nvme_identify_ns_nvm(ns->ctrl, info->nsid, &nvm);
@@ -2101,8 +2100,14 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
goto out;
}
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
+ ns->head->ids.csi == NVME_CSI_ZNS) {
+ ret = nvme_query_zone_info(ns, lbaf, &zi);
+ if (ret < 0)
+ goto out;
+ }
+
blk_mq_freeze_queue(ns->disk->queue);
- lbaf = nvme_lbaf_index(id->flbas);
ns->head->lba_shift = id->lbaf[lbaf].ds;
ns->head->nuse = le64_to_cpu(id->nuse);
capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze));
@@ -2115,13 +2120,8 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
capacity = 0;
nvme_config_discard(ns, &lim);
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
- ns->head->ids.csi == NVME_CSI_ZNS) {
- ret = nvme_update_zone_info(ns, lbaf, &lim);
- if (ret) {
- blk_mq_unfreeze_queue(ns->disk->queue);
- goto out;
- }
- }
+ ns->head->ids.csi == NVME_CSI_ZNS)
+ nvme_update_zone_info(ns, &lim, &zi);
ret = queue_limits_commit_update(ns->disk->queue, &lim);
if (ret) {
blk_mq_unfreeze_queue(ns->disk->queue);
@@ -2204,6 +2204,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
}
if (!ret && nvme_ns_head_multipath(ns->head)) {
+ struct queue_limits *ns_lim = &ns->disk->queue->limits;
struct queue_limits lim;
blk_mq_freeze_queue(ns->head->disk->queue);
@@ -2215,7 +2216,26 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
nvme_mpath_revalidate_paths(ns);
+ /*
+ * queue_limits mixes values that are the hardware limitations
+ * for bio splitting with what is the device configuration.
+ *
+ * For NVMe the device configuration can change after e.g. a
+ * Format command, and we really want to pick up the new format
+ * value here. But we must still stack the queue limits to the
+ * least common denominator for multipathing to split the bios
+ * properly.
+ *
+ * To work around this, we explicitly set the device
+ * configuration to those that we just queried, but only stack
+ * the splitting limits in to make sure we still obey possibly
+ * lower limitations of other controllers.
+ */
lim = queue_limits_start_update(ns->head->disk->queue);
+ lim.logical_block_size = ns_lim->logical_block_size;
+ lim.physical_block_size = ns_lim->physical_block_size;
+ lim.io_min = ns_lim->io_min;
+ lim.io_opt = ns_lim->io_opt;
queue_limits_stack_bdev(&lim, ns->disk->part0, 0,
ns->head->disk->disk_name);
ret = queue_limits_commit_update(ns->head->disk->queue, &lim);
@@ -3237,7 +3257,7 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
if (ctrl->shutdown_timeout != shutdown_timeout)
dev_info(ctrl->device,
- "Shutdown timeout set to %u seconds\n",
+ "D3 entry latency set to %u seconds\n",
ctrl->shutdown_timeout);
} else
ctrl->shutdown_timeout = shutdown_timeout;
@@ -4391,7 +4411,8 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
set->ops = ops;
set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
if (ctrl->ops->flags & NVME_F_FABRICS)
- set->reserved_tags = NVMF_RESERVED_TAGS;
+ /* Reserved for fabric connect and keep alive */
+ set->reserved_tags = 2;
set->numa_node = ctrl->numa_node;
set->flags = BLK_MQ_F_NO_SCHED;
if (ctrl->ops->flags & NVME_F_BLOCKING)
@@ -4460,7 +4481,8 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
if (ctrl->quirks & NVME_QUIRK_SHARED_TAGS)
set->reserved_tags = NVME_AQ_DEPTH;
else if (ctrl->ops->flags & NVME_F_FABRICS)
- set->reserved_tags = NVMF_RESERVED_TAGS;
+ /* Reserved for fabric connect */
+ set->reserved_tags = 1;
set->numa_node = ctrl->numa_node;
set->flags = BLK_MQ_F_SHOULD_MERGE;
if (ctrl->ops->flags & NVME_F_BLOCKING)
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 06cc54851b1be..37c974c38dcb0 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -19,13 +19,6 @@
#define NVMF_DEF_FAIL_FAST_TMO -1
/*
- * Reserved one command for internal usage. This command is used for sending
- * the connect command, as well as for the keep alive command on the admin
- * queue once live.
- */
-#define NVMF_RESERVED_TAGS 1
-
-/*
* Define a host as seen by the target. We allocate one at boot, but also
* allow the override it when creating controllers. This is both to provide
* persistence of the Host NQN over multiple boots, and to allow using
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 68a5d971657bb..a5b29e9ad342d 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2428,7 +2428,7 @@ nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
* controller. Called after last nvme_put_ctrl() call
*/
static void
-nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
+nvme_fc_free_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
@@ -3384,7 +3384,7 @@ static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
.reg_read32 = nvmf_reg_read32,
.reg_read64 = nvmf_reg_read64,
.reg_write32 = nvmf_reg_write32,
- .free_ctrl = nvme_fc_nvme_ctrl_freed,
+ .free_ctrl = nvme_fc_free_ctrl,
.submit_async_event = nvme_fc_submit_async_event,
.delete_ctrl = nvme_fc_delete_ctrl,
.get_address = nvmf_get_address,
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 24193fcb8bd58..d0ed64dc7380e 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -1036,10 +1036,18 @@ static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
}
#endif /* CONFIG_NVME_MULTIPATH */
+struct nvme_zone_info {
+ u64 zone_size;
+ unsigned int max_open_zones;
+ unsigned int max_active_zones;
+};
+
int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
-int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf,
- struct queue_limits *lim);
+int nvme_query_zone_info(struct nvme_ns *ns, unsigned lbaf,
+ struct nvme_zone_info *zi);
+void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim,
+ struct nvme_zone_info *zi);
#ifdef CONFIG_BLK_DEV_ZONED
blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmnd,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index e6267a6aa3801..8e0bb9692685d 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -3363,6 +3363,9 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_BOGUS_NID, },
{ PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
+ NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
NVME_QUIRK_BOGUS_NID, },
diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c
index fc3eed00f9ff1..e05571b2a1b0c 100644
--- a/drivers/nvme/host/pr.c
+++ b/drivers/nvme/host/pr.c
@@ -97,8 +97,7 @@ static int nvme_sc_to_pr_err(int nvme_sc)
static int nvme_send_pr_command(struct block_device *bdev,
struct nvme_command *c, void *data, unsigned int data_len)
{
- if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
- nvme_disk_is_ns_head(bdev->bd_disk))
+ if (nvme_disk_is_ns_head(bdev->bd_disk))
return nvme_send_ns_head_pr_command(bdev, c, data, data_len);
return nvme_send_ns_pr_command(bdev->bd_disk->private_data, c, data,
diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c
index 09fcaa519e5bc..3c55f7edd1819 100644
--- a/drivers/nvme/host/sysfs.c
+++ b/drivers/nvme/host/sysfs.c
@@ -236,8 +236,7 @@ static ssize_t nuse_show(struct device *dev, struct device_attribute *attr,
struct block_device *bdev = disk->part0;
int ret;
- if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
- bdev->bd_disk->fops == &nvme_ns_head_ops)
+ if (nvme_disk_is_ns_head(bdev->bd_disk))
ret = ns_head_update_nuse(head);
else
ret = ns_update_nuse(bdev->bd_disk->private_data);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 3692b56cb58db..fdbcdcedcee99 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -37,6 +37,14 @@ module_param(so_priority, int, 0644);
MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
/*
+ * Use the unbound workqueue for nvme_tcp_wq, then we can set the cpu affinity
+ * from sysfs.
+ */
+static bool wq_unbound;
+module_param(wq_unbound, bool, 0644);
+MODULE_PARM_DESC(wq_unbound, "Use unbound workqueue for nvme-tcp IO context (default false)");
+
+/*
* TLS handshake timeout
*/
static int tls_handshake_timeout = 10;
@@ -1546,7 +1554,10 @@ static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
else if (nvme_tcp_poll_queue(queue))
n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
ctrl->io_queues[HCTX_TYPE_READ] - 1;
- queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
+ if (wq_unbound)
+ queue->io_cpu = WORK_CPU_UNBOUND;
+ else
+ queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
}
static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid)
@@ -2785,6 +2796,8 @@ static struct nvmf_transport_ops nvme_tcp_transport = {
static int __init nvme_tcp_init_module(void)
{
+ unsigned int wq_flags = WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_SYSFS;
+
BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8);
BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72);
BUILD_BUG_ON(sizeof(struct nvme_tcp_data_pdu) != 24);
@@ -2794,8 +2807,10 @@ static int __init nvme_tcp_init_module(void)
BUILD_BUG_ON(sizeof(struct nvme_tcp_icresp_pdu) != 128);
BUILD_BUG_ON(sizeof(struct nvme_tcp_term_pdu) != 24);
- nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
- WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ if (wq_unbound)
+ wq_flags |= WQ_UNBOUND;
+
+ nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq", wq_flags, 0);
if (!nvme_tcp_wq)
return -ENOMEM;
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
index 1c36fcedea200..0288315f00502 100644
--- a/drivers/nvme/host/trace.c
+++ b/drivers/nvme/host/trace.c
@@ -119,7 +119,10 @@ static const char *nvme_trace_get_lba_status(struct trace_seq *p,
static const char *nvme_trace_admin_format_nvm(struct trace_seq *p, u8 *cdw10)
{
const char *ret = trace_seq_buffer_ptr(p);
- u8 lbaf = cdw10[0] & 0xF;
+ /*
+ * lbafu(bit 13:12) is already in the upper 4 bits, lbafl: bit 03:00.
+ */
+ u8 lbaf = (cdw10[1] & 0x30) | (cdw10[0] & 0xF);
u8 mset = (cdw10[0] >> 4) & 0x1;
u8 pi = (cdw10[0] >> 5) & 0x7;
u8 pil = cdw10[1] & 0x1;
@@ -164,12 +167,27 @@ static const char *nvme_trace_dsm(struct trace_seq *p, u8 *cdw10)
static const char *nvme_trace_zone_mgmt_send(struct trace_seq *p, u8 *cdw10)
{
+ static const char * const zsa_strs[] = {
+ [0x01] = "close zone",
+ [0x02] = "finish zone",
+ [0x03] = "open zone",
+ [0x04] = "reset zone",
+ [0x05] = "offline zone",
+ [0x10] = "set zone descriptor extension"
+ };
const char *ret = trace_seq_buffer_ptr(p);
u64 slba = get_unaligned_le64(cdw10);
+ const char *zsa_str;
u8 zsa = cdw10[12];
u8 all = cdw10[13];
- trace_seq_printf(p, "slba=%llu, zsa=%u, all=%u", slba, zsa, all);
+ if (zsa < ARRAY_SIZE(zsa_strs) && zsa_strs[zsa])
+ zsa_str = zsa_strs[zsa];
+ else
+ zsa_str = "reserved";
+
+ trace_seq_printf(p, "slba=%llu, zsa=%u:%s, all=%u",
+ slba, zsa, zsa_str, all);
trace_seq_putc(p, 0);
return ret;
@@ -177,15 +195,86 @@ static const char *nvme_trace_zone_mgmt_send(struct trace_seq *p, u8 *cdw10)
static const char *nvme_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10)
{
+ static const char * const zrasf_strs[] = {
+ [0x00] = "list all zones",
+ [0x01] = "list the zones in the ZSE: Empty state",
+ [0x02] = "list the zones in the ZSIO: Implicitly Opened state",
+ [0x03] = "list the zones in the ZSEO: Explicitly Opened state",
+ [0x04] = "list the zones in the ZSC: Closed state",
+ [0x05] = "list the zones in the ZSF: Full state",
+ [0x06] = "list the zones in the ZSRO: Read Only state",
+ [0x07] = "list the zones in the ZSO: Offline state",
+ [0x09] = "list the zones that have the zone attribute"
+ };
const char *ret = trace_seq_buffer_ptr(p);
u64 slba = get_unaligned_le64(cdw10);
u32 numd = get_unaligned_le32(cdw10 + 8);
u8 zra = cdw10[12];
u8 zrasf = cdw10[13];
+ const char *zrasf_str;
u8 pr = cdw10[14];
- trace_seq_printf(p, "slba=%llu, numd=%u, zra=%u, zrasf=%u, pr=%u",
- slba, numd, zra, zrasf, pr);
+ if (zrasf < ARRAY_SIZE(zrasf_strs) && zrasf_strs[zrasf])
+ zrasf_str = zrasf_strs[zrasf];
+ else
+ zrasf_str = "reserved";
+
+ trace_seq_printf(p, "slba=%llu, numd=%u, zra=%u, zrasf=%u:%s, pr=%u",
+ slba, numd, zra, zrasf, zrasf_str, pr);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_resv_reg(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 rrega = cdw10[0] & 0x7;
+ u8 iekey = (cdw10[0] >> 3) & 0x1;
+ u8 ptpl = (cdw10[3] >> 6) & 0x3;
+
+ trace_seq_printf(p, "rrega=%u, iekey=%u, ptpl=%u",
+ rrega, iekey, ptpl);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_resv_acq(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 racqa = cdw10[0] & 0x7;
+ u8 iekey = (cdw10[0] >> 3) & 0x1;
+ u8 rtype = cdw10[1];
+
+ trace_seq_printf(p, "racqa=%u, iekey=%u, rtype=%u",
+ racqa, iekey, rtype);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_resv_rel(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 rrela = cdw10[0] & 0x7;
+ u8 iekey = (cdw10[0] >> 3) & 0x1;
+ u8 rtype = cdw10[1];
+
+ trace_seq_printf(p, "rrela=%u, iekey=%u, rtype=%u",
+ rrela, iekey, rtype);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_resv_report(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u32 numd = get_unaligned_le32(cdw10);
+ u8 eds = cdw10[4] & 0x1;
+
+ trace_seq_printf(p, "numd=%u, eds=%u", numd, eds);
trace_seq_putc(p, 0);
return ret;
@@ -243,6 +332,14 @@ const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p,
return nvme_trace_zone_mgmt_send(p, cdw10);
case nvme_cmd_zone_mgmt_recv:
return nvme_trace_zone_mgmt_recv(p, cdw10);
+ case nvme_cmd_resv_register:
+ return nvme_trace_resv_reg(p, cdw10);
+ case nvme_cmd_resv_acquire:
+ return nvme_trace_resv_acq(p, cdw10);
+ case nvme_cmd_resv_release:
+ return nvme_trace_resv_rel(p, cdw10);
+ case nvme_cmd_resv_report:
+ return nvme_trace_resv_report(p, cdw10);
default:
return nvme_trace_common(p, cdw10);
}
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
index 722384bcc765c..77aa0f440a6d2 100644
--- a/drivers/nvme/host/zns.c
+++ b/drivers/nvme/host/zns.c
@@ -35,8 +35,8 @@ static int nvme_set_max_append(struct nvme_ctrl *ctrl)
return 0;
}
-int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf,
- struct queue_limits *lim)
+int nvme_query_zone_info(struct nvme_ns *ns, unsigned lbaf,
+ struct nvme_zone_info *zi)
{
struct nvme_effects_log *log = ns->head->effects;
struct nvme_command c = { };
@@ -89,27 +89,34 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf,
goto free_data;
}
- ns->head->zsze =
- nvme_lba_to_sect(ns->head, le64_to_cpu(id->lbafe[lbaf].zsze));
- if (!is_power_of_2(ns->head->zsze)) {
+ zi->zone_size = le64_to_cpu(id->lbafe[lbaf].zsze);
+ if (!is_power_of_2(zi->zone_size)) {
dev_warn(ns->ctrl->device,
- "invalid zone size:%llu for namespace:%u\n",
- ns->head->zsze, ns->head->ns_id);
+ "invalid zone size: %llu for namespace: %u\n",
+ zi->zone_size, ns->head->ns_id);
status = -ENODEV;
goto free_data;
}
+ zi->max_open_zones = le32_to_cpu(id->mor) + 1;
+ zi->max_active_zones = le32_to_cpu(id->mar) + 1;
- blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ns->queue);
- lim->zoned = 1;
- lim->max_open_zones = le32_to_cpu(id->mor) + 1;
- lim->max_active_zones = le32_to_cpu(id->mar) + 1;
- lim->chunk_sectors = ns->head->zsze;
- lim->max_zone_append_sectors = ns->ctrl->max_zone_append;
free_data:
kfree(id);
return status;
}
+void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim,
+ struct nvme_zone_info *zi)
+{
+ lim->zoned = 1;
+ lim->max_open_zones = zi->max_open_zones;
+ lim->max_active_zones = zi->max_active_zones;
+ lim->max_zone_append_sectors = ns->ctrl->max_zone_append;
+ lim->chunk_sectors = ns->head->zsze =
+ nvme_lba_to_sect(ns->head, zi->zone_size);
+ blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ns->queue);
+}
+
static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns,
unsigned int nr_zones, size_t *buflen)
{
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 77a6e817b3159..a2325330bf221 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -1613,6 +1613,11 @@ static struct config_group *nvmet_subsys_make(struct config_group *group,
return ERR_PTR(-EINVAL);
}
+ if (sysfs_streq(name, nvmet_disc_subsys->subsysnqn)) {
+ pr_err("can't create subsystem using unique discovery NQN\n");
+ return ERR_PTR(-EINVAL);
+ }
+
subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
if (IS_ERR(subsys))
return ERR_CAST(subsys);
@@ -2159,7 +2164,49 @@ static const struct config_item_type nvmet_hosts_type = {
static struct config_group nvmet_hosts_group;
+static ssize_t nvmet_root_discovery_nqn_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%s\n", nvmet_disc_subsys->subsysnqn);
+}
+
+static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct list_head *entry;
+ size_t len;
+
+ len = strcspn(page, "\n");
+ if (!len || len > NVMF_NQN_FIELD_LEN - 1)
+ return -EINVAL;
+
+ down_write(&nvmet_config_sem);
+ list_for_each(entry, &nvmet_subsystems_group.cg_children) {
+ struct config_item *item =
+ container_of(entry, struct config_item, ci_entry);
+
+ if (!strncmp(config_item_name(item), page, len)) {
+ pr_err("duplicate NQN %s\n", config_item_name(item));
+ up_write(&nvmet_config_sem);
+ return -EINVAL;
+ }
+ }
+ memset(nvmet_disc_subsys->subsysnqn, 0, NVMF_NQN_FIELD_LEN);
+ memcpy(nvmet_disc_subsys->subsysnqn, page, len);
+ up_write(&nvmet_config_sem);
+
+ return len;
+}
+
+CONFIGFS_ATTR(nvmet_root_, discovery_nqn);
+
+static struct configfs_attribute *nvmet_root_attrs[] = {
+ &nvmet_root_attr_discovery_nqn,
+ NULL,
+};
+
static const struct config_item_type nvmet_root_type = {
+ .ct_attrs = nvmet_root_attrs,
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 6bbe4df0166ca..8860a3eb71ec8 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -1541,6 +1541,13 @@ static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
}
down_read(&nvmet_config_sem);
+ if (!strncmp(nvmet_disc_subsys->subsysnqn, subsysnqn,
+ NVMF_NQN_SIZE)) {
+ if (kref_get_unless_zero(&nvmet_disc_subsys->ref)) {
+ up_read(&nvmet_config_sem);
+ return nvmet_disc_subsys;
+ }
+ }
list_for_each_entry(p, &port->subsystems, entry) {
if (!strncmp(p->subsys->subsysnqn, subsysnqn,
NVMF_NQN_SIZE)) {
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index fd229f310c931..337ee1cb09ae6 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1115,16 +1115,21 @@ nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
}
static bool
-nvmet_fc_assoc_exits(struct nvmet_fc_tgtport *tgtport, u64 association_id)
+nvmet_fc_assoc_exists(struct nvmet_fc_tgtport *tgtport, u64 association_id)
{
struct nvmet_fc_tgt_assoc *a;
+ bool found = false;
+ rcu_read_lock();
list_for_each_entry_rcu(a, &tgtport->assoc_list, a_list) {
- if (association_id == a->association_id)
- return true;
+ if (association_id == a->association_id) {
+ found = true;
+ break;
+ }
}
+ rcu_read_unlock();
- return false;
+ return found;
}
static struct nvmet_fc_tgt_assoc *
@@ -1164,13 +1169,11 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
ran = ran << BYTES_FOR_QID_SHIFT;
spin_lock_irqsave(&tgtport->lock, flags);
- rcu_read_lock();
- if (!nvmet_fc_assoc_exits(tgtport, ran)) {
+ if (!nvmet_fc_assoc_exists(tgtport, ran)) {
assoc->association_id = ran;
list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
done = true;
}
- rcu_read_unlock();
spin_unlock_irqrestore(&tgtport->lock, flags);
} while (!done);
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index f2bb9d95ecf4b..5b8c63e74639d 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -53,7 +53,6 @@ struct nvmet_rdma_cmd {
enum {
NVMET_RDMA_REQ_INLINE_DATA = (1 << 0),
- NVMET_RDMA_REQ_INVALIDATE_RKEY = (1 << 1),
};
struct nvmet_rdma_rsp {
@@ -722,7 +721,7 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req)
struct rdma_cm_id *cm_id = rsp->queue->cm_id;
struct ib_send_wr *first_wr;
- if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
+ if (rsp->invalidate_rkey) {
rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey;
} else {
@@ -905,10 +904,8 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
goto error_out;
rsp->n_rdma += ret;
- if (invalidate) {
+ if (invalidate)
rsp->invalidate_rkey = key;
- rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY;
- }
return 0;
@@ -1047,6 +1044,7 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
rsp->req.cmd = cmd->nvme_cmd;
rsp->req.port = queue->port;
rsp->n_rdma = 0;
+ rsp->invalidate_rkey = 0;
if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
unsigned long flags;
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 2aa5762e9f50d..a5422e2c979ad 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -898,6 +898,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
pr_err("bad nvme-tcp pdu length (%d)\n",
le32_to_cpu(icreq->hdr.plen));
nvmet_tcp_fatal_error(queue);
+ return -EPROTO;
}
if (icreq->pfv != NVME_TCP_PFV_1_0) {
diff --git a/drivers/nvme/target/trace.c b/drivers/nvme/target/trace.c
index 6ee1f3db81d04..8d1806a828879 100644
--- a/drivers/nvme/target/trace.c
+++ b/drivers/nvme/target/trace.c
@@ -119,6 +119,67 @@ const char *nvmet_trace_parse_admin_cmd(struct trace_seq *p,
}
}
+static const char *nvmet_trace_zone_mgmt_send(struct trace_seq *p, u8 *cdw10)
+{
+ static const char * const zsa_strs[] = {
+ [0x01] = "close zone",
+ [0x02] = "finish zone",
+ [0x03] = "open zone",
+ [0x04] = "reset zone",
+ [0x05] = "offline zone",
+ [0x10] = "set zone descriptor extension"
+ };
+ const char *ret = trace_seq_buffer_ptr(p);
+ u64 slba = get_unaligned_le64(cdw10);
+ const char *zsa_str;
+ u8 zsa = cdw10[12];
+ u8 all = cdw10[13];
+
+ if (zsa < ARRAY_SIZE(zsa_strs) && zsa_strs[zsa])
+ zsa_str = zsa_strs[zsa];
+ else
+ zsa_str = "reserved";
+
+ trace_seq_printf(p, "slba=%llu, zsa=%u:%s, all=%u",
+ slba, zsa, zsa_str, all);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvmet_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10)
+{
+ static const char * const zrasf_strs[] = {
+ [0x00] = "list all zones",
+ [0x01] = "list the zones in the ZSE: Empty state",
+ [0x02] = "list the zones in the ZSIO: Implicitly Opened state",
+ [0x03] = "list the zones in the ZSEO: Explicitly Opened state",
+ [0x04] = "list the zones in the ZSC: Closed state",
+ [0x05] = "list the zones in the ZSF: Full state",
+ [0x06] = "list the zones in the ZSRO: Read Only state",
+ [0x07] = "list the zones in the ZSO: Offline state",
+ [0x09] = "list the zones that have the zone attribute"
+ };
+ const char *ret = trace_seq_buffer_ptr(p);
+ u64 slba = get_unaligned_le64(cdw10);
+ u32 numd = get_unaligned_le32(&cdw10[8]);
+ u8 zra = cdw10[12];
+ u8 zrasf = cdw10[13];
+ const char *zrasf_str;
+ u8 pr = cdw10[14];
+
+ if (zrasf < ARRAY_SIZE(zrasf_strs) && zrasf_strs[zrasf])
+ zrasf_str = zrasf_strs[zrasf];
+ else
+ zrasf_str = "reserved";
+
+ trace_seq_printf(p, "slba=%llu, numd=%u, zra=%u, zrasf=%u:%s, pr=%u",
+ slba, numd, zra, zrasf, zrasf_str, pr);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
const char *nvmet_trace_parse_nvm_cmd(struct trace_seq *p,
u8 opcode, u8 *cdw10)
{
@@ -126,9 +187,14 @@ const char *nvmet_trace_parse_nvm_cmd(struct trace_seq *p,
case nvme_cmd_read:
case nvme_cmd_write:
case nvme_cmd_write_zeroes:
+ case nvme_cmd_zone_append:
return nvmet_trace_read_write(p, cdw10);
case nvme_cmd_dsm:
return nvmet_trace_dsm(p, cdw10);
+ case nvme_cmd_zone_mgmt_send:
+ return nvmet_trace_zone_mgmt_send(p, cdw10);
+ case nvme_cmd_zone_mgmt_recv:
+ return nvmet_trace_zone_mgmt_recv(p, cdw10);
default:
return nvmet_trace_common(p, cdw10);
}
@@ -176,6 +242,34 @@ static const char *nvmet_trace_fabrics_property_get(struct trace_seq *p,
return ret;
}
+static const char *nvmet_trace_fabrics_auth_send(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 spsp0 = spc[1];
+ u8 spsp1 = spc[2];
+ u8 secp = spc[3];
+ u32 tl = get_unaligned_le32(spc + 4);
+
+ trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, tl=%u",
+ spsp0, spsp1, secp, tl);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *nvmet_trace_fabrics_auth_receive(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 spsp0 = spc[1];
+ u8 spsp1 = spc[2];
+ u8 secp = spc[3];
+ u32 al = get_unaligned_le32(spc + 4);
+
+ trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, al=%u",
+ spsp0, spsp1, secp, al);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
static const char *nvmet_trace_fabrics_common(struct trace_seq *p, u8 *spc)
{
const char *ret = trace_seq_buffer_ptr(p);
@@ -195,6 +289,10 @@ const char *nvmet_trace_parse_fabrics_cmd(struct trace_seq *p,
return nvmet_trace_fabrics_connect(p, spc);
case nvme_fabrics_type_property_get:
return nvmet_trace_fabrics_property_get(p, spc);
+ case nvme_fabrics_type_auth_send:
+ return nvmet_trace_fabrics_auth_send(p, spc);
+ case nvme_fabrics_type_auth_receive:
+ return nvmet_trace_fabrics_auth_receive(p, spc);
default:
return nvmet_trace_fabrics_common(p, spc);
}
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index eb357ac2e54a2..2c6b99402df8a 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -807,6 +807,11 @@ static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_nod
if (addr && len == (2 * sizeof(u32))) {
info.bit_offset = be32_to_cpup(addr++);
info.nbits = be32_to_cpup(addr);
+ if (info.bit_offset >= BITS_PER_BYTE || info.nbits < 1) {
+ dev_err(dev, "nvmem: invalid bits on %pOF\n", child);
+ of_node_put(child);
+ return -EINVAL;
+ }
}
info.np = of_node_get(child);
diff --git a/drivers/nvmem/layouts.c b/drivers/nvmem/layouts.c
index 6a6aa58369ff2..8b5e2de138eb5 100644
--- a/drivers/nvmem/layouts.c
+++ b/drivers/nvmem/layouts.c
@@ -45,7 +45,7 @@ static void nvmem_layout_bus_remove(struct device *dev)
return drv->remove(layout);
}
-static struct bus_type nvmem_layout_bus_type = {
+static const struct bus_type nvmem_layout_bus_type = {
.name = "nvmem-layout",
.match = nvmem_layout_bus_match,
.probe = nvmem_layout_bus_probe,
diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c
index b922df99f9bc3..33678d0af2c24 100644
--- a/drivers/nvmem/meson-efuse.c
+++ b/drivers/nvmem/meson-efuse.c
@@ -47,7 +47,6 @@ static int meson_efuse_probe(struct platform_device *pdev)
struct nvmem_config *econfig;
struct clk *clk;
unsigned int size;
- int ret;
sm_np = of_parse_phandle(pdev->dev.of_node, "secure-monitor", 0);
if (!sm_np) {
@@ -60,27 +59,9 @@ static int meson_efuse_probe(struct platform_device *pdev)
if (!fw)
return -EPROBE_DEFER;
- clk = devm_clk_get(dev, NULL);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get efuse gate");
- return ret;
- }
-
- ret = clk_prepare_enable(clk);
- if (ret) {
- dev_err(dev, "failed to enable gate");
- return ret;
- }
-
- ret = devm_add_action_or_reset(dev,
- (void(*)(void *))clk_disable_unprepare,
- clk);
- if (ret) {
- dev_err(dev, "failed to add disable callback");
- return ret;
- }
+ clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "failed to get efuse gate");
if (meson_sm_call(fw, SM_EFUSE_USER_MAX, &size, 0, 0, 0, 0, 0) < 0) {
dev_err(dev, "failed to get max user");
diff --git a/drivers/nvmem/mtk-efuse.c b/drivers/nvmem/mtk-efuse.c
index 84f05b40a4112..9caf046673410 100644
--- a/drivers/nvmem/mtk-efuse.c
+++ b/drivers/nvmem/mtk-efuse.c
@@ -68,6 +68,7 @@ static int mtk_efuse_probe(struct platform_device *pdev)
struct nvmem_config econfig = {};
struct mtk_efuse_priv *priv;
const struct mtk_efuse_pdata *pdata;
+ struct platform_device *socinfo;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -88,8 +89,16 @@ static int mtk_efuse_probe(struct platform_device *pdev)
if (pdata->uses_post_processing)
econfig.fixup_dt_cell_info = &mtk_efuse_fixup_dt_cell_info;
nvmem = devm_nvmem_register(dev, &econfig);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
- return PTR_ERR_OR_ZERO(nvmem);
+ socinfo = platform_device_register_data(&pdev->dev, "mtk-socinfo",
+ PLATFORM_DEVID_AUTO, NULL, 0);
+ if (IS_ERR(socinfo))
+ dev_info(dev, "MediaTek SoC Information will be unavailable\n");
+
+ platform_set_drvdata(pdev, socinfo);
+ return 0;
}
static const struct mtk_efuse_pdata mtk_mt8186_efuse_pdata = {
@@ -108,8 +117,17 @@ static const struct of_device_id mtk_efuse_of_match[] = {
};
MODULE_DEVICE_TABLE(of, mtk_efuse_of_match);
+static void mtk_efuse_remove(struct platform_device *pdev)
+{
+ struct platform_device *socinfo = platform_get_drvdata(pdev);
+
+ if (!IS_ERR_OR_NULL(socinfo))
+ platform_device_unregister(socinfo);
+}
+
static struct platform_driver mtk_efuse_driver = {
.probe = mtk_efuse_probe,
+ .remove_new = mtk_efuse_remove,
.driver = {
.name = "mediatek,efuse",
.of_match_table = mtk_efuse_of_match,
diff --git a/drivers/nvmem/zynqmp_nvmem.c b/drivers/nvmem/zynqmp_nvmem.c
index 7f15aa89a9d09..8682adaacd692 100644
--- a/drivers/nvmem/zynqmp_nvmem.c
+++ b/drivers/nvmem/zynqmp_nvmem.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019 Xilinx, Inc.
+ * Copyright (C) 2022 - 2023, Advanced Micro Devices, Inc.
*/
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
@@ -10,36 +12,190 @@
#include <linux/firmware/xlnx-zynqmp.h>
#define SILICON_REVISION_MASK 0xF
+#define P_USER_0_64_UPPER_MASK GENMASK(31, 16)
+#define P_USER_127_LOWER_4_BIT_MASK GENMASK(3, 0)
+#define WORD_INBYTES 4
+#define SOC_VER_SIZE 0x4
+#define EFUSE_MEMORY_SIZE 0x177
+#define UNUSED_SPACE 0x8
+#define ZYNQMP_NVMEM_SIZE (SOC_VER_SIZE + UNUSED_SPACE + \
+ EFUSE_MEMORY_SIZE)
+#define SOC_VERSION_OFFSET 0x0
+#define EFUSE_START_OFFSET 0xC
+#define EFUSE_END_OFFSET 0xFC
+#define EFUSE_PUF_START_OFFSET 0x100
+#define EFUSE_PUF_MID_OFFSET 0x140
+#define EFUSE_PUF_END_OFFSET 0x17F
+#define EFUSE_NOT_ENABLED 29
-struct zynqmp_nvmem_data {
- struct device *dev;
- struct nvmem_device *nvmem;
+/*
+ * efuse access type
+ */
+enum efuse_access {
+ EFUSE_READ = 0,
+ EFUSE_WRITE
+};
+
+/**
+ * struct xilinx_efuse - the basic structure
+ * @src: address of the buffer to store the data to be write/read
+ * @size: read/write word count
+ * @offset: read/write offset
+ * @flag: 0 - represents efuse read and 1- represents efuse write
+ * @pufuserfuse:0 - represents non-puf efuses, offset is used for read/write
+ * 1 - represents puf user fuse row number.
+ *
+ * this structure stores all the required details to
+ * read/write efuse memory.
+ */
+struct xilinx_efuse {
+ u64 src;
+ u32 size;
+ u32 offset;
+ enum efuse_access flag;
+ u32 pufuserfuse;
};
-static int zynqmp_nvmem_read(void *context, unsigned int offset,
- void *val, size_t bytes)
+static int zynqmp_efuse_access(void *context, unsigned int offset,
+ void *val, size_t bytes, enum efuse_access flag,
+ unsigned int pufflag)
{
+ struct device *dev = context;
+ struct xilinx_efuse *efuse;
+ dma_addr_t dma_addr;
+ dma_addr_t dma_buf;
+ size_t words = bytes / WORD_INBYTES;
int ret;
- int idcode, version;
- struct zynqmp_nvmem_data *priv = context;
-
- ret = zynqmp_pm_get_chipid(&idcode, &version);
- if (ret < 0)
- return ret;
+ int value;
+ char *data;
+
+ if (bytes % WORD_INBYTES != 0) {
+ dev_err(dev, "Bytes requested should be word aligned\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (pufflag == 0 && offset % WORD_INBYTES) {
+ dev_err(dev, "Offset requested should be word aligned\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (pufflag == 1 && flag == EFUSE_WRITE) {
+ memcpy(&value, val, bytes);
+ if ((offset == EFUSE_PUF_START_OFFSET ||
+ offset == EFUSE_PUF_MID_OFFSET) &&
+ value & P_USER_0_64_UPPER_MASK) {
+ dev_err(dev, "Only lower 4 bytes are allowed to be programmed in P_USER_0 & P_USER_64\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (offset == EFUSE_PUF_END_OFFSET &&
+ (value & P_USER_127_LOWER_4_BIT_MASK)) {
+ dev_err(dev, "Only MSB 28 bits are allowed to be programmed for P_USER_127\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ efuse = dma_alloc_coherent(dev, sizeof(struct xilinx_efuse),
+ &dma_addr, GFP_KERNEL);
+ if (!efuse)
+ return -ENOMEM;
- dev_dbg(priv->dev, "Read chipid val %x %x\n", idcode, version);
- *(int *)val = version & SILICON_REVISION_MASK;
+ data = dma_alloc_coherent(dev, sizeof(bytes),
+ &dma_buf, GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto efuse_data_fail;
+ }
+
+ if (flag == EFUSE_WRITE) {
+ memcpy(data, val, bytes);
+ efuse->flag = EFUSE_WRITE;
+ } else {
+ efuse->flag = EFUSE_READ;
+ }
+
+ efuse->src = dma_buf;
+ efuse->size = words;
+ efuse->offset = offset;
+ efuse->pufuserfuse = pufflag;
+
+ zynqmp_pm_efuse_access(dma_addr, (u32 *)&ret);
+ if (ret != 0) {
+ if (ret == EFUSE_NOT_ENABLED) {
+ dev_err(dev, "efuse access is not enabled\n");
+ ret = -EOPNOTSUPP;
+ } else {
+ dev_err(dev, "Error in efuse read %x\n", ret);
+ ret = -EPERM;
+ }
+ goto efuse_access_err;
+ }
+
+ if (flag == EFUSE_READ)
+ memcpy(val, data, bytes);
+efuse_access_err:
+ dma_free_coherent(dev, sizeof(bytes),
+ data, dma_buf);
+efuse_data_fail:
+ dma_free_coherent(dev, sizeof(struct xilinx_efuse),
+ efuse, dma_addr);
+
+ return ret;
+}
- return 0;
+static int zynqmp_nvmem_read(void *context, unsigned int offset, void *val, size_t bytes)
+{
+ struct device *dev = context;
+ int ret;
+ int pufflag = 0;
+ int idcode;
+ int version;
+
+ if (offset >= EFUSE_PUF_START_OFFSET && offset <= EFUSE_PUF_END_OFFSET)
+ pufflag = 1;
+
+ switch (offset) {
+ /* Soc version offset is zero */
+ case SOC_VERSION_OFFSET:
+ if (bytes != SOC_VER_SIZE)
+ return -EOPNOTSUPP;
+
+ ret = zynqmp_pm_get_chipid((u32 *)&idcode, (u32 *)&version);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(dev, "Read chipid val %x %x\n", idcode, version);
+ *(int *)val = version & SILICON_REVISION_MASK;
+ break;
+ /* Efuse offset starts from 0xc */
+ case EFUSE_START_OFFSET ... EFUSE_END_OFFSET:
+ case EFUSE_PUF_START_OFFSET ... EFUSE_PUF_END_OFFSET:
+ ret = zynqmp_efuse_access(context, offset, val,
+ bytes, EFUSE_READ, pufflag);
+ break;
+ default:
+ *(u32 *)val = 0xDEADBEEF;
+ ret = 0;
+ break;
+ }
+
+ return ret;
}
-static struct nvmem_config econfig = {
- .name = "zynqmp-nvmem",
- .owner = THIS_MODULE,
- .word_size = 1,
- .size = 1,
- .read_only = true,
-};
+static int zynqmp_nvmem_write(void *context,
+ unsigned int offset, void *val, size_t bytes)
+{
+ int pufflag = 0;
+
+ if (offset < EFUSE_START_OFFSET || offset > EFUSE_PUF_END_OFFSET)
+ return -EOPNOTSUPP;
+
+ if (offset >= EFUSE_PUF_START_OFFSET && offset <= EFUSE_PUF_END_OFFSET)
+ pufflag = 1;
+
+ return zynqmp_efuse_access(context, offset,
+ val, bytes, EFUSE_WRITE, pufflag);
+}
static const struct of_device_id zynqmp_nvmem_match[] = {
{ .compatible = "xlnx,zynqmp-nvmem-fw", },
@@ -50,21 +206,18 @@ MODULE_DEVICE_TABLE(of, zynqmp_nvmem_match);
static int zynqmp_nvmem_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct zynqmp_nvmem_data *priv;
-
- priv = devm_kzalloc(dev, sizeof(struct zynqmp_nvmem_data), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ struct nvmem_config econfig = {};
- priv->dev = dev;
+ econfig.name = "zynqmp-nvmem";
+ econfig.owner = THIS_MODULE;
+ econfig.word_size = 1;
+ econfig.size = ZYNQMP_NVMEM_SIZE;
econfig.dev = dev;
econfig.add_legacy_fixed_of_cells = true;
econfig.reg_read = zynqmp_nvmem_read;
- econfig.priv = priv;
-
- priv->nvmem = devm_nvmem_register(dev, &econfig);
+ econfig.reg_write = zynqmp_nvmem_write;
- return PTR_ERR_OR_ZERO(priv->nvmem);
+ return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &econfig));
}
static struct platform_driver zynqmp_nvmem_driver = {
diff --git a/drivers/of/.kunitconfig b/drivers/of/.kunitconfig
new file mode 100644
index 0000000000000..5a8fee11978c4
--- /dev/null
+++ b/drivers/of/.kunitconfig
@@ -0,0 +1,3 @@
+CONFIG_KUNIT=y
+CONFIG_OF=y
+CONFIG_OF_KUNIT_TEST=y
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index da9826accb1b5..dd726c7056bf1 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -14,9 +14,8 @@ if OF
config OF_UNITTEST
bool "Device Tree runtime unit tests"
- depends on !SPARC
+ depends on OF_EARLY_FLATTREE
select IRQ_DOMAIN
- select OF_EARLY_FLATTREE
select OF_RESOLVE
help
This option builds in test cases for the device tree infrastructure
@@ -37,6 +36,15 @@ config OF_UNITTEST
If unsure, say N here. This option is not safe to enable.
+config OF_KUNIT_TEST
+ tristate "Devicetree KUnit Test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This option builds KUnit unit tests for device tree infrastructure.
+
+ If unsure, say N here, but this option is safe to enable.
+
config OF_ALL_DTBS
bool "Build all Device Tree Blobs"
depends on COMPILE_TEST
@@ -54,7 +62,7 @@ config OF_FLATTREE
select CRC32
config OF_EARLY_FLATTREE
- bool
+ def_bool OF && !(SPARC || ALPHA || HEXAGON || M68K || PARISC || S390)
select DMA_DECLARE_COHERENT if HAS_DMA && HAS_IOMEM
select OF_FLATTREE
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index eff624854575c..251d335321482 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -2,7 +2,7 @@
obj-y = base.o cpu.o device.o module.o platform.o property.o
obj-$(CONFIG_OF_KOBJ) += kobj.o
obj-$(CONFIG_OF_DYNAMIC) += dynamic.o
-obj-$(CONFIG_OF_FLATTREE) += fdt.o
+obj-$(CONFIG_OF_FLATTREE) += fdt.o empty_root.dtb.o
obj-$(CONFIG_OF_EARLY_FLATTREE) += fdt_address.o
obj-$(CONFIG_OF_PROMTREE) += pdt.o
obj-$(CONFIG_OF_ADDRESS) += address.o
@@ -19,4 +19,6 @@ obj-y += kexec.o
endif
endif
+obj-$(CONFIG_OF_KUNIT_TEST) += of_test.o
+
obj-$(CONFIG_OF_UNITTEST) += unittest-data/
diff --git a/drivers/of/base.c b/drivers/of/base.c
index b0ad8fc06e80e..8856c67c466ac 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -395,25 +395,57 @@ int of_device_compatible_match(const struct device_node *device,
EXPORT_SYMBOL_GPL(of_device_compatible_match);
/**
- * of_machine_is_compatible - Test root of device tree for a given compatible value
- * @compat: compatible string to look for in root node's compatible property.
+ * of_machine_compatible_match - Test root of device tree against a compatible array
+ * @compats: NULL terminated array of compatible strings to look for in root node's compatible property.
*
- * Return: A positive integer if the root node has the given value in its
+ * Returns true if the root node has any of the given compatible values in its
* compatible property.
*/
-int of_machine_is_compatible(const char *compat)
+bool of_machine_compatible_match(const char *const *compats)
{
struct device_node *root;
int rc = 0;
root = of_find_node_by_path("/");
if (root) {
- rc = of_device_is_compatible(root, compat);
+ rc = of_device_compatible_match(root, compats);
of_node_put(root);
}
- return rc;
+
+ return rc != 0;
+}
+EXPORT_SYMBOL(of_machine_compatible_match);
+
+static bool __of_device_is_status(const struct device_node *device,
+ const char * const*strings)
+{
+ const char *status;
+ int statlen;
+
+ if (!device)
+ return false;
+
+ status = __of_get_property(device, "status", &statlen);
+ if (status == NULL)
+ return false;
+
+ if (statlen > 0) {
+ while (*strings) {
+ unsigned int len = strlen(*strings);
+
+ if ((*strings)[len - 1] == '-') {
+ if (!strncmp(status, *strings, len))
+ return true;
+ } else {
+ if (!strcmp(status, *strings))
+ return true;
+ }
+ strings++;
+ }
+ }
+
+ return false;
}
-EXPORT_SYMBOL(of_machine_is_compatible);
/**
* __of_device_is_available - check if a device is available for use
@@ -425,22 +457,27 @@ EXPORT_SYMBOL(of_machine_is_compatible);
*/
static bool __of_device_is_available(const struct device_node *device)
{
- const char *status;
- int statlen;
+ static const char * const ok[] = {"okay", "ok", NULL};
if (!device)
return false;
- status = __of_get_property(device, "status", &statlen);
- if (status == NULL)
- return true;
+ return !__of_get_property(device, "status", NULL) ||
+ __of_device_is_status(device, ok);
+}
- if (statlen > 0) {
- if (!strcmp(status, "okay") || !strcmp(status, "ok"))
- return true;
- }
+/**
+ * __of_device_is_reserved - check if a device is reserved
+ *
+ * @device: Node to check for availability, with locks already held
+ *
+ * Return: True if the status property is set to "reserved", false otherwise
+ */
+static bool __of_device_is_reserved(const struct device_node *device)
+{
+ static const char * const reserved[] = {"reserved", NULL};
- return false;
+ return __of_device_is_status(device, reserved);
}
/**
@@ -474,16 +511,9 @@ EXPORT_SYMBOL(of_device_is_available);
*/
static bool __of_device_is_fail(const struct device_node *device)
{
- const char *status;
+ static const char * const fail[] = {"fail", "fail-", NULL};
- if (!device)
- return false;
-
- status = __of_get_property(device, "status", NULL);
- if (status == NULL)
- return false;
-
- return !strcmp(status, "fail") || !strncmp(status, "fail-", 5);
+ return __of_device_is_status(device, fail);
}
/**
@@ -597,16 +627,9 @@ struct device_node *of_get_next_child(const struct device_node *node,
}
EXPORT_SYMBOL(of_get_next_child);
-/**
- * of_get_next_available_child - Find the next available child node
- * @node: parent node
- * @prev: previous child of the parent node, or NULL to get first
- *
- * This function is like of_get_next_child(), except that it
- * automatically skips any disabled nodes (i.e. status = "disabled").
- */
-struct device_node *of_get_next_available_child(const struct device_node *node,
- struct device_node *prev)
+static struct device_node *of_get_next_status_child(const struct device_node *node,
+ struct device_node *prev,
+ bool (*checker)(const struct device_node *))
{
struct device_node *next;
unsigned long flags;
@@ -617,7 +640,7 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
raw_spin_lock_irqsave(&devtree_lock, flags);
next = prev ? prev->sibling : node->child;
for (; next; next = next->sibling) {
- if (!__of_device_is_available(next))
+ if (!checker(next))
continue;
if (of_node_get(next))
break;
@@ -626,9 +649,38 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return next;
}
+
+/**
+ * of_get_next_available_child - Find the next available child node
+ * @node: parent node
+ * @prev: previous child of the parent node, or NULL to get first
+ *
+ * This function is like of_get_next_child(), except that it
+ * automatically skips any disabled nodes (i.e. status = "disabled").
+ */
+struct device_node *of_get_next_available_child(const struct device_node *node,
+ struct device_node *prev)
+{
+ return of_get_next_status_child(node, prev, __of_device_is_available);
+}
EXPORT_SYMBOL(of_get_next_available_child);
/**
+ * of_get_next_reserved_child - Find the next reserved child node
+ * @node: parent node
+ * @prev: previous child of the parent node, or NULL to get first
+ *
+ * This function is like of_get_next_child(), except that it
+ * automatically skips any disabled nodes (i.e. status = "disabled").
+ */
+struct device_node *of_get_next_reserved_child(const struct device_node *node,
+ struct device_node *prev)
+{
+ return of_get_next_status_child(node, prev, __of_device_is_reserved);
+}
+EXPORT_SYMBOL(of_get_next_reserved_child);
+
+/**
* of_get_next_cpu_node - Iterate on cpu nodes
* @prev: previous child of the /cpus node, or NULL to get first
*
@@ -1345,8 +1397,8 @@ int of_parse_phandle_with_args_map(const struct device_node *np,
char *pass_name = NULL;
struct device_node *cur, *new = NULL;
const __be32 *map, *mask, *pass;
- static const __be32 dummy_mask[] = { [0 ... MAX_PHANDLE_ARGS] = ~0 };
- static const __be32 dummy_pass[] = { [0 ... MAX_PHANDLE_ARGS] = 0 };
+ static const __be32 dummy_mask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) };
+ static const __be32 dummy_pass[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(0) };
__be32 initial_match_array[MAX_PHANDLE_ARGS];
const __be32 *match_array = initial_match_array;
int i, ret, map_len, match;
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 3bf27052832f3..4d57a4e341054 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -9,6 +9,7 @@
#define pr_fmt(fmt) "OF: " fmt
+#include <linux/device.h>
#include <linux/of.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
@@ -667,6 +668,17 @@ void of_changeset_destroy(struct of_changeset *ocs)
{
struct of_changeset_entry *ce, *cen;
+ /*
+ * When a device is deleted, the device links to/from it are also queued
+ * for deletion. Until these device links are freed, the devices
+ * themselves aren't freed. If the device being deleted is due to an
+ * overlay change, this device might be holding a reference to a device
+ * node that will be freed. So, wait until all already pending device
+ * links are deleted before freeing a device node. This ensures we don't
+ * free any device node that has a non-zero reference count.
+ */
+ device_link_wait_removal();
+
list_for_each_entry_safe_reverse(ce, cen, &ocs->entries, node)
__of_changeset_entry_destroy(ce);
}
diff --git a/drivers/of/empty_root.dts b/drivers/of/empty_root.dts
new file mode 100644
index 0000000000000..cf9e97a60f482
--- /dev/null
+++ b/drivers/of/empty_root.dts
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/dts-v1/;
+
+/ {
+
+};
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index bf502ba8da958..a8a04f27915b9 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -8,6 +8,7 @@
#define pr_fmt(fmt) "OF: fdt: " fmt
+#include <linux/acpi.h>
#include <linux/crash_dump.h>
#include <linux/crc32.h>
#include <linux/kernel.h>
@@ -16,7 +17,6 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
-#include <linux/of_reserved_mem.h>
#include <linux/sizes.h>
#include <linux/string.h>
#include <linux/errno.h>
@@ -33,6 +33,13 @@
#include "of_private.h"
/*
+ * __dtb_empty_root_begin[] and __dtb_empty_root_end[] magically created by
+ * cmd_dt_S_dtb in scripts/Makefile.lib
+ */
+extern uint8_t __dtb_empty_root_begin[];
+extern uint8_t __dtb_empty_root_end[];
+
+/*
* of_fdt_limit_memory - limit the number of regions in the /memory node
* @limit: maximum entries
*
@@ -80,7 +87,7 @@ void __init of_fdt_limit_memory(int limit)
}
}
-static bool of_fdt_device_is_available(const void *blob, unsigned long node)
+bool of_fdt_device_is_available(const void *blob, unsigned long node)
{
const char *status = fdt_getprop(blob, node, "status", NULL);
@@ -476,126 +483,6 @@ void *initial_boot_params __ro_after_init;
static u32 of_fdt_crc32;
-static int __init early_init_dt_reserve_memory(phys_addr_t base,
- phys_addr_t size, bool nomap)
-{
- if (nomap) {
- /*
- * If the memory is already reserved (by another region), we
- * should not allow it to be marked nomap, but don't worry
- * if the region isn't memory as it won't be mapped.
- */
- if (memblock_overlaps_region(&memblock.memory, base, size) &&
- memblock_is_region_reserved(base, size))
- return -EBUSY;
-
- return memblock_mark_nomap(base, size);
- }
- return memblock_reserve(base, size);
-}
-
-/*
- * __reserved_mem_reserve_reg() - reserve all memory described in 'reg' property
- */
-static int __init __reserved_mem_reserve_reg(unsigned long node,
- const char *uname)
-{
- int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
- phys_addr_t base, size;
- int len;
- const __be32 *prop;
- int first = 1;
- bool nomap;
-
- prop = of_get_flat_dt_prop(node, "reg", &len);
- if (!prop)
- return -ENOENT;
-
- if (len && len % t_len != 0) {
- pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n",
- uname);
- return -EINVAL;
- }
-
- nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
-
- while (len >= t_len) {
- base = dt_mem_next_cell(dt_root_addr_cells, &prop);
- size = dt_mem_next_cell(dt_root_size_cells, &prop);
-
- if (size &&
- early_init_dt_reserve_memory(base, size, nomap) == 0)
- pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
- uname, &base, (unsigned long)(size / SZ_1M));
- else
- pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
- uname, &base, (unsigned long)(size / SZ_1M));
-
- len -= t_len;
- if (first) {
- fdt_reserved_mem_save_node(node, uname, base, size);
- first = 0;
- }
- }
- return 0;
-}
-
-/*
- * __reserved_mem_check_root() - check if #size-cells, #address-cells provided
- * in /reserved-memory matches the values supported by the current implementation,
- * also check if ranges property has been provided
- */
-static int __init __reserved_mem_check_root(unsigned long node)
-{
- const __be32 *prop;
-
- prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
- if (!prop || be32_to_cpup(prop) != dt_root_size_cells)
- return -EINVAL;
-
- prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
- if (!prop || be32_to_cpup(prop) != dt_root_addr_cells)
- return -EINVAL;
-
- prop = of_get_flat_dt_prop(node, "ranges", NULL);
- if (!prop)
- return -EINVAL;
- return 0;
-}
-
-/*
- * fdt_scan_reserved_mem() - scan a single FDT node for reserved memory
- */
-static int __init fdt_scan_reserved_mem(void)
-{
- int node, child;
- const void *fdt = initial_boot_params;
-
- node = fdt_path_offset(fdt, "/reserved-memory");
- if (node < 0)
- return -ENODEV;
-
- if (__reserved_mem_check_root(node) != 0) {
- pr_err("Reserved memory: unsupported node format, ignoring\n");
- return -EINVAL;
- }
-
- fdt_for_each_subnode(child, fdt, node) {
- const char *uname;
- int err;
-
- if (!of_fdt_device_is_available(fdt, child))
- continue;
-
- uname = fdt_get_name(fdt, child, NULL);
-
- err = __reserved_mem_reserve_reg(child, uname);
- if (err == -ENOENT && of_get_flat_dt_prop(child, "size", NULL))
- fdt_reserved_mem_save_node(child, uname, 0, 0);
- }
- return 0;
-}
-
/*
* fdt_reserve_elfcorehdr() - reserves memory for elf core header
*
@@ -1318,6 +1205,21 @@ bool __init early_init_dt_scan(void *params)
return true;
}
+static void *__init copy_device_tree(void *fdt)
+{
+ int size;
+ void *dt;
+
+ size = fdt_totalsize(fdt);
+ dt = early_init_dt_alloc_memory_arch(size,
+ roundup_pow_of_two(FDT_V17_SIZE));
+
+ if (dt)
+ memcpy(dt, fdt, size);
+
+ return dt;
+}
+
/**
* unflatten_device_tree - create tree of device_nodes from flat blob
*
@@ -1328,7 +1230,29 @@ bool __init early_init_dt_scan(void *params)
*/
void __init unflatten_device_tree(void)
{
- __unflatten_device_tree(initial_boot_params, NULL, &of_root,
+ void *fdt = initial_boot_params;
+
+ /* Don't use the bootloader provided DTB if ACPI is enabled */
+ if (!acpi_disabled)
+ fdt = NULL;
+
+ /*
+ * Populate an empty root node when ACPI is enabled or bootloader
+ * doesn't provide one.
+ */
+ if (!fdt) {
+ fdt = (void *) __dtb_empty_root_begin;
+ /* fdt_totalsize() will be used for copy size */
+ if (fdt_totalsize(fdt) >
+ __dtb_empty_root_end - __dtb_empty_root_begin) {
+ pr_err("invalid size in dtb_empty_root\n");
+ return;
+ }
+ of_fdt_crc32 = crc32_be(~0, fdt, fdt_totalsize(fdt));
+ fdt = copy_device_tree(fdt);
+ }
+
+ __unflatten_device_tree(fdt, NULL, &of_root,
early_init_dt_alloc_memory_arch, false);
/* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */
@@ -1350,22 +1274,9 @@ void __init unflatten_device_tree(void)
*/
void __init unflatten_and_copy_device_tree(void)
{
- int size;
- void *dt;
+ if (initial_boot_params)
+ initial_boot_params = copy_device_tree(initial_boot_params);
- if (!initial_boot_params) {
- pr_warn("No valid device tree found, continuing without\n");
- return;
- }
-
- size = fdt_totalsize(initial_boot_params);
- dt = early_init_dt_alloc_memory_arch(size,
- roundup_pow_of_two(FDT_V17_SIZE));
-
- if (dt) {
- memcpy(dt, initial_boot_params, size);
- initial_boot_params = dt;
- }
unflatten_device_tree();
}
diff --git a/drivers/of/kexec.c b/drivers/of/kexec.c
index 68278340cecfe..9ccde2fd77cbf 100644
--- a/drivers/of/kexec.c
+++ b/drivers/of/kexec.c
@@ -395,6 +395,7 @@ void *of_kexec_alloc_and_setup_fdt(const struct kimage *image,
if (ret)
goto out;
+#ifdef CONFIG_CRASH_DUMP
/* add linux,usable-memory-range */
ret = fdt_appendprop_addrrange(fdt, 0, chosen_node,
"linux,usable-memory-range", crashk_res.start,
@@ -410,6 +411,7 @@ void *of_kexec_alloc_and_setup_fdt(const struct kimage *image,
if (ret)
goto out;
}
+#endif
}
/* add bootargs */
diff --git a/drivers/of/module.c b/drivers/of/module.c
index 0e8aa974f0f2b..f58e624953a20 100644
--- a/drivers/of/module.c
+++ b/drivers/of/module.c
@@ -16,6 +16,14 @@ ssize_t of_modalias(const struct device_node *np, char *str, ssize_t len)
ssize_t csize;
ssize_t tsize;
+ /*
+ * Prevent a kernel oops in vsnprintf() -- it only allows passing a
+ * NULL ptr when the length is also 0. Also filter out the negative
+ * lengths...
+ */
+ if ((len > 0 && !str) || len < 0)
+ return -EINVAL;
+
/* Name & Type */
/* %p eats all alphanum characters, so %c must be used here */
csize = snprintf(str, len, "of:N%pOFn%c%s", np, 'T',
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index f38397c7b5824..485483524b7f1 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -175,8 +175,9 @@ static inline struct device_node *__of_get_dma_parent(const struct device_node *
}
#endif
+int fdt_scan_reserved_mem(void);
void fdt_init_reserved_mem(void);
-void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
- phys_addr_t base, phys_addr_t size);
+
+bool of_fdt_device_is_available(const void *blob, unsigned long node);
#endif /* _LINUX_OF_PRIVATE_H */
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 7ec94cfcbddb1..8236ecae29533 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -12,6 +12,7 @@
#define pr_fmt(fmt) "OF: reserved mem: " fmt
#include <linux/err.h>
+#include <linux/libfdt.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
@@ -58,8 +59,8 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
/*
* fdt_reserved_mem_save_node() - save fdt node for second pass initialization
*/
-void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
- phys_addr_t base, phys_addr_t size)
+static void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
+ phys_addr_t base, phys_addr_t size)
{
struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
@@ -77,6 +78,126 @@ void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
return;
}
+static int __init early_init_dt_reserve_memory(phys_addr_t base,
+ phys_addr_t size, bool nomap)
+{
+ if (nomap) {
+ /*
+ * If the memory is already reserved (by another region), we
+ * should not allow it to be marked nomap, but don't worry
+ * if the region isn't memory as it won't be mapped.
+ */
+ if (memblock_overlaps_region(&memblock.memory, base, size) &&
+ memblock_is_region_reserved(base, size))
+ return -EBUSY;
+
+ return memblock_mark_nomap(base, size);
+ }
+ return memblock_reserve(base, size);
+}
+
+/*
+ * __reserved_mem_reserve_reg() - reserve all memory described in 'reg' property
+ */
+static int __init __reserved_mem_reserve_reg(unsigned long node,
+ const char *uname)
+{
+ int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
+ phys_addr_t base, size;
+ int len;
+ const __be32 *prop;
+ int first = 1;
+ bool nomap;
+
+ prop = of_get_flat_dt_prop(node, "reg", &len);
+ if (!prop)
+ return -ENOENT;
+
+ if (len && len % t_len != 0) {
+ pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n",
+ uname);
+ return -EINVAL;
+ }
+
+ nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
+
+ while (len >= t_len) {
+ base = dt_mem_next_cell(dt_root_addr_cells, &prop);
+ size = dt_mem_next_cell(dt_root_size_cells, &prop);
+
+ if (size &&
+ early_init_dt_reserve_memory(base, size, nomap) == 0)
+ pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
+ uname, &base, (unsigned long)(size / SZ_1M));
+ else
+ pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
+ uname, &base, (unsigned long)(size / SZ_1M));
+
+ len -= t_len;
+ if (first) {
+ fdt_reserved_mem_save_node(node, uname, base, size);
+ first = 0;
+ }
+ }
+ return 0;
+}
+
+/*
+ * __reserved_mem_check_root() - check if #size-cells, #address-cells provided
+ * in /reserved-memory matches the values supported by the current implementation,
+ * also check if ranges property has been provided
+ */
+static int __init __reserved_mem_check_root(unsigned long node)
+{
+ const __be32 *prop;
+
+ prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
+ if (!prop || be32_to_cpup(prop) != dt_root_size_cells)
+ return -EINVAL;
+
+ prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
+ if (!prop || be32_to_cpup(prop) != dt_root_addr_cells)
+ return -EINVAL;
+
+ prop = of_get_flat_dt_prop(node, "ranges", NULL);
+ if (!prop)
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * fdt_scan_reserved_mem() - scan a single FDT node for reserved memory
+ */
+int __init fdt_scan_reserved_mem(void)
+{
+ int node, child;
+ const void *fdt = initial_boot_params;
+
+ node = fdt_path_offset(fdt, "/reserved-memory");
+ if (node < 0)
+ return -ENODEV;
+
+ if (__reserved_mem_check_root(node) != 0) {
+ pr_err("Reserved memory: unsupported node format, ignoring\n");
+ return -EINVAL;
+ }
+
+ fdt_for_each_subnode(child, fdt, node) {
+ const char *uname;
+ int err;
+
+ if (!of_fdt_device_is_available(fdt, child))
+ continue;
+
+ uname = fdt_get_name(fdt, child, NULL);
+
+ err = __reserved_mem_reserve_reg(child, uname);
+ if (err == -ENOENT && of_get_flat_dt_prop(child, "size", NULL))
+ fdt_reserved_mem_save_node(child, uname, 0, 0);
+ }
+ return 0;
+}
+
/*
* __reserved_mem_alloc_in_range() - allocate reserved memory described with
* 'alloc-ranges'. Choose bottom-up/top-down depending on nearby existing
diff --git a/drivers/of/of_test.c b/drivers/of/of_test.c
new file mode 100644
index 0000000000000..a9301d293f014
--- /dev/null
+++ b/drivers/of/of_test.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit tests for OF APIs
+ */
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include <kunit/test.h>
+
+/*
+ * Test that the root node "/" can be found by path.
+ */
+static void of_dtb_root_node_found_by_path(struct kunit *test)
+{
+ struct device_node *np;
+
+ np = of_find_node_by_path("/");
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, np);
+ of_node_put(np);
+}
+
+/*
+ * Test that the 'of_root' global variable is always populated when DT code is
+ * enabled. Remove this test once of_root is removed from global access.
+ */
+static void of_dtb_root_node_populates_of_root(struct kunit *test)
+{
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, of_root);
+}
+
+static struct kunit_case of_dtb_test_cases[] = {
+ KUNIT_CASE(of_dtb_root_node_found_by_path),
+ KUNIT_CASE(of_dtb_root_node_populates_of_root),
+ {}
+};
+
+static int of_dtb_test_init(struct kunit *test)
+{
+ if (!IS_ENABLED(CONFIG_OF_EARLY_FLATTREE))
+ kunit_skip(test, "requires CONFIG_OF_EARLY_FLATTREE");
+
+ return 0;
+}
+
+/*
+ * Test suite to confirm a DTB is loaded.
+ */
+static struct kunit_suite of_dtb_suite = {
+ .name = "of_dtb",
+ .test_cases = of_dtb_test_cases,
+ .init = of_dtb_test_init,
+};
+
+kunit_test_suites(
+ &of_dtb_suite,
+);
+MODULE_LICENSE("GPL");
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index b7708a06dc784..389d4ea6bfc15 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -166,6 +166,8 @@ static struct platform_device *of_platform_device_create_pdata(
{
struct platform_device *dev;
+ pr_debug("create platform device: %pOF\n", np);
+
if (!of_device_is_available(np) ||
of_node_test_and_set_flag(np, OF_POPULATED))
return NULL;
@@ -510,9 +512,6 @@ static int __init of_platform_default_populate_init(void)
device_links_supplier_sync_state_pause();
- if (!of_have_populated_dt())
- return -ENODEV;
-
if (IS_ENABLED(CONFIG_PPC)) {
struct device_node *boot_display = NULL;
struct platform_device *dev;
diff --git a/drivers/of/property.c b/drivers/of/property.c
index fa8cd33be1312..a6358ee99b74b 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -665,7 +665,7 @@ struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
of_node_put(node);
if (!port) {
- pr_err("graph: no port node found in %pOF\n", parent);
+ pr_debug("graph: no port node found in %pOF\n", parent);
return NULL;
}
} else {
@@ -814,10 +814,16 @@ struct device_node *of_graph_get_remote_port(const struct device_node *node)
}
EXPORT_SYMBOL(of_graph_get_remote_port);
-int of_graph_get_endpoint_count(const struct device_node *np)
+/**
+ * of_graph_get_endpoint_count() - get the number of endpoints in a device node
+ * @np: parent device node containing ports and endpoints
+ *
+ * Return: count of endpoint of this device node
+ */
+unsigned int of_graph_get_endpoint_count(const struct device_node *np)
{
struct device_node *endpoint;
- int num = 0;
+ unsigned int num = 0;
for_each_endpoint_of_node(np, endpoint)
num++;
@@ -1066,7 +1072,8 @@ of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
}
static void of_link_to_phandle(struct device_node *con_np,
- struct device_node *sup_np)
+ struct device_node *sup_np,
+ u8 flags)
{
struct device_node *tmp_np = of_node_get(sup_np);
@@ -1085,7 +1092,7 @@ static void of_link_to_phandle(struct device_node *con_np,
tmp_np = of_get_next_parent(tmp_np);
}
- fwnode_link_add(of_fwnode_handle(con_np), of_fwnode_handle(sup_np));
+ fwnode_link_add(of_fwnode_handle(con_np), of_fwnode_handle(sup_np), flags);
}
/**
@@ -1198,6 +1205,8 @@ static struct device_node *parse_##fname(struct device_node *np, \
* to a struct device, implement this ops so fw_devlink can use it
* to find the true consumer.
* @optional: Describes whether a supplier is mandatory or not
+ * @fwlink_flags: Optional fwnode link flags to use when creating a fwnode link
+ * for this property.
*
* Returns:
* parse_prop() return values are
@@ -1210,6 +1219,7 @@ struct supplier_bindings {
const char *prop_name, int index);
struct device_node *(*get_con_dev)(struct device_node *np);
bool optional;
+ u8 fwlink_flags;
};
DEFINE_SIMPLE_PROP(clocks, "clocks", "#clock-cells")
@@ -1217,6 +1227,7 @@ DEFINE_SIMPLE_PROP(interconnects, "interconnects", "#interconnect-cells")
DEFINE_SIMPLE_PROP(iommus, "iommus", "#iommu-cells")
DEFINE_SIMPLE_PROP(mboxes, "mboxes", "#mbox-cells")
DEFINE_SIMPLE_PROP(io_channels, "io-channels", "#io-channel-cells")
+DEFINE_SIMPLE_PROP(io_backends, "io-backends", "#io-backend-cells")
DEFINE_SIMPLE_PROP(interrupt_parent, "interrupt-parent", NULL)
DEFINE_SIMPLE_PROP(dmas, "dmas", "#dma-cells")
DEFINE_SIMPLE_PROP(power_domains, "power-domains", "#power-domain-cells")
@@ -1240,6 +1251,7 @@ DEFINE_SIMPLE_PROP(leds, "leds", NULL)
DEFINE_SIMPLE_PROP(backlight, "backlight", NULL)
DEFINE_SIMPLE_PROP(panel, "panel", NULL)
DEFINE_SIMPLE_PROP(msi_parent, "msi-parent", "#msi-cells")
+DEFINE_SIMPLE_PROP(post_init_providers, "post-init-providers", NULL)
DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
@@ -1317,6 +1329,7 @@ static const struct supplier_bindings of_supplier_bindings[] = {
{ .parse_prop = parse_iommu_maps, .optional = true, },
{ .parse_prop = parse_mboxes, },
{ .parse_prop = parse_io_channels, },
+ { .parse_prop = parse_io_backends, },
{ .parse_prop = parse_interrupt_parent, },
{ .parse_prop = parse_dmas, .optional = true, },
{ .parse_prop = parse_power_domains, },
@@ -1349,6 +1362,10 @@ static const struct supplier_bindings of_supplier_bindings[] = {
{ .parse_prop = parse_regulators, },
{ .parse_prop = parse_gpio, },
{ .parse_prop = parse_gpios, },
+ {
+ .parse_prop = parse_post_init_providers,
+ .fwlink_flags = FWLINK_FLAG_IGNORE,
+ },
{}
};
@@ -1393,7 +1410,7 @@ static int of_link_property(struct device_node *con_np, const char *prop_name)
: of_node_get(con_np);
matched = true;
i++;
- of_link_to_phandle(con_dev_np, phandle);
+ of_link_to_phandle(con_dev_np, phandle, s->fwlink_flags);
of_node_put(phandle);
of_node_put(con_dev_np);
}
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index d7593bde2d02f..6b5c36b6a7586 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -239,27 +239,22 @@ static void __init of_unittest_dynamic(void)
static int __init of_unittest_check_node_linkage(struct device_node *np)
{
- struct device_node *child;
int count = 0, rc;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
if (child->parent != np) {
pr_err("Child node %pOFn links to wrong parent %pOFn\n",
child, np);
- rc = -EINVAL;
- goto put_child;
+ return -EINVAL;
}
rc = of_unittest_check_node_linkage(child);
if (rc < 0)
- goto put_child;
+ return rc;
count += rc;
}
return count + 1;
-put_child:
- of_node_put(child);
- return rc;
}
static void __init of_unittest_check_tree_linkage(void)
@@ -1750,20 +1745,16 @@ static int __init unittest_data_add(void)
return -EINVAL;
}
+ /* attach the sub-tree to live tree */
if (!of_root) {
- of_root = unittest_data_node;
- for_each_of_allnodes(np)
- __of_attach_node_sysfs(np);
- of_aliases = of_find_node_by_path("/aliases");
- of_chosen = of_find_node_by_path("/chosen");
- of_overlay_mutex_unlock();
- return 0;
+ pr_warn("%s: no live tree to attach sub-tree\n", __func__);
+ kfree(unittest_data);
+ return -ENODEV;
}
EXPECT_BEGIN(KERN_INFO,
"Duplicate name in testcase-data, renamed to \"duplicate-name#1\"");
- /* attach the sub-tree to live tree */
np = unittest_data_node->child;
while (np) {
struct device_node *next = np->sibling;
@@ -4093,10 +4084,6 @@ static int __init of_unittest(void)
add_taint(TAINT_TEST, LOCKDEP_STILL_OK);
/* adding data for unittest */
-
- if (IS_ENABLED(CONFIG_UML))
- unittest_unflatten_overlay_base();
-
res = unittest_data_add();
if (res)
return res;
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index 1f75d2416001e..b49cb010a4d80 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -308,15 +308,13 @@ static int hppa_led_generic_probe(struct platform_device *pdev,
return 0;
}
-static int platform_led_remove(struct platform_device *pdev)
+static void platform_led_remove(struct platform_device *pdev)
{
struct hppa_drvdata *p = platform_get_drvdata(pdev);
int i;
for (i = 0; i < NUM_LEDS_PER_BOARD; i++)
led_classdev_unregister(&p->leds[i].led_cdev);
-
- return 0;
}
static struct led_type mainboard_led_types[NUM_LEDS_PER_BOARD] = {
@@ -371,7 +369,7 @@ MODULE_ALIAS("platform:platform-leds");
static struct platform_driver hppa_mainboard_led_driver = {
.probe = platform_led_probe,
- .remove = platform_led_remove,
+ .remove_new = platform_led_remove,
.driver = {
.name = "platform-leds",
},
diff --git a/drivers/parport/parport_amiga.c b/drivers/parport/parport_amiga.c
index 84d5701d606ce..e6dc857aac3fe 100644
--- a/drivers/parport/parport_amiga.c
+++ b/drivers/parport/parport_amiga.c
@@ -219,7 +219,7 @@ out_irq:
return err;
}
-static int __exit amiga_parallel_remove(struct platform_device *pdev)
+static void __exit amiga_parallel_remove(struct platform_device *pdev)
{
struct parport *port = platform_get_drvdata(pdev);
@@ -227,11 +227,10 @@ static int __exit amiga_parallel_remove(struct platform_device *pdev)
if (port->irq != PARPORT_IRQ_NONE)
free_irq(IRQ_AMIGA_CIAA_FLG, port);
parport_put_port(port);
- return 0;
}
static struct platform_driver amiga_parallel_driver = {
- .remove = __exit_p(amiga_parallel_remove),
+ .remove_new = __exit_p(amiga_parallel_remove),
.driver = {
.name = "amiga-parallel",
},
diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c
index c81d4d86994b1..949236a7a27c9 100644
--- a/drivers/parport/parport_sunbpp.c
+++ b/drivers/parport/parport_sunbpp.c
@@ -334,7 +334,7 @@ out_unmap:
return err;
}
-static int bpp_remove(struct platform_device *op)
+static void bpp_remove(struct platform_device *op)
{
struct parport *p = dev_get_drvdata(&op->dev);
struct parport_operations *ops = p->ops;
@@ -351,8 +351,6 @@ static int bpp_remove(struct platform_device *op)
kfree(ops);
dev_set_drvdata(&op->dev, NULL);
-
- return 0;
}
static const struct of_device_id bpp_match[] = {
@@ -370,7 +368,7 @@ static struct platform_driver bpp_sbus_driver = {
.of_match_table = bpp_match,
},
.probe = bpp_probe,
- .remove = bpp_remove,
+ .remove_new = bpp_remove,
};
module_platform_driver(bpp_sbus_driver);
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 74147262625bc..d35001589d88f 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -13,6 +13,11 @@ config FORCE_PCI
select HAVE_PCI
select PCI
+# select this to provide a generic PCI iomap,
+# without PCI itself having to be defined
+config GENERIC_PCI_IOMAP
+ bool
+
menuconfig PCI
bool "PCI support"
depends on HAVE_PCI
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index cc8b4e01e29de..1753020368909 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -4,16 +4,17 @@
obj-$(CONFIG_PCI) += access.o bus.o probe.o host-bridge.o \
remove.o pci.o pci-driver.o search.o \
- pci-sysfs.o rom.o setup-res.o irq.o vpd.o \
- setup-bus.o vc.o mmap.o setup-irq.o
+ rom.o setup-res.o irq.o vpd.o \
+ setup-bus.o vc.o mmap.o devres.o
obj-$(CONFIG_PCI) += msi/
obj-$(CONFIG_PCI) += pcie/
ifdef CONFIG_PCI
obj-$(CONFIG_PROC_FS) += proc.o
-obj-$(CONFIG_SYSFS) += slot.o
+obj-$(CONFIG_SYSFS) += pci-sysfs.o slot.o
obj-$(CONFIG_ACPI) += pci-acpi.o
+obj-$(CONFIG_GENERIC_PCI_IOMAP) += iomap.o
endif
obj-$(CONFIG_OF) += of.o
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 2d0a8d78bffb5..81c50dc64da96 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -565,7 +565,8 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
struct cdns_pcie *pcie = &ep->pcie;
struct device *dev = pcie->dev;
int max_epfs = sizeof(epc->function_num_map) * 8;
- int ret, value, epf;
+ int ret, epf, last_fn;
+ u32 reg, value;
/*
* BIT(0) is hardwired to 1, hence function 0 is always enabled
@@ -573,6 +574,17 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
*/
cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, epc->function_num_map);
+ /*
+ * Next function field in ARI_CAP_AND_CTR register for last function
+ * should be 0.
+ * Clearing Next Function Number field for the last function used.
+ */
+ last_fn = find_last_bit(&epc->function_num_map, BITS_PER_LONG);
+ reg = CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(last_fn);
+ value = cdns_pcie_readl(pcie, reg);
+ value &= ~CDNS_PCIE_ARI_CAP_NFN_MASK;
+ cdns_pcie_writel(pcie, reg, value);
+
if (ep->quirk_disable_flr) {
for (epf = 0; epf < max_epfs; epf++) {
if (!(epc->function_num_map & BIT(epf)))
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index 03b96798f858c..7a66a2f815dce 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -131,6 +131,12 @@
#define CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET 0x200
/*
+ * Endpoint PF Registers
+ */
+#define CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(fn) (0x144 + (fn) * 0x1000)
+#define CDNS_PCIE_ARI_CAP_NFN_MASK GENMASK(15, 8)
+
+/*
* Root Port Registers (PCI configuration space for the root port function)
*/
#define CDNS_PCIE_RP_BASE 0x00200000
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index dc2c036ab28cb..99a60270b26cd 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -42,6 +42,19 @@
#define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8)
#define IMX8MQ_PCIE2_BASE_ADDR 0x33c00000
+#define IMX95_PCIE_PHY_GEN_CTRL 0x0
+#define IMX95_PCIE_REF_USE_PAD BIT(17)
+
+#define IMX95_PCIE_SS_RW_REG_0 0xf0
+#define IMX95_PCIE_REF_CLKEN BIT(23)
+#define IMX95_PCIE_PHY_CR_PARA_SEL BIT(9)
+
+#define IMX95_PE0_GEN_CTRL_1 0x1050
+#define IMX95_PCIE_DEVICE_TYPE GENMASK(3, 0)
+
+#define IMX95_PE0_GEN_CTRL_3 0x1058
+#define IMX95_PCIE_LTSSM_EN BIT(0)
+
#define to_imx6_pcie(x) dev_get_drvdata((x)->dev)
enum imx6_pcie_variants {
@@ -52,14 +65,29 @@ enum imx6_pcie_variants {
IMX8MQ,
IMX8MM,
IMX8MP,
+ IMX95,
IMX8MQ_EP,
IMX8MM_EP,
IMX8MP_EP,
+ IMX95_EP,
};
#define IMX6_PCIE_FLAG_IMX6_PHY BIT(0)
#define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE BIT(1)
#define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2)
+#define IMX6_PCIE_FLAG_HAS_PHYDRV BIT(3)
+#define IMX6_PCIE_FLAG_HAS_APP_RESET BIT(4)
+#define IMX6_PCIE_FLAG_HAS_PHY_RESET BIT(5)
+#define IMX6_PCIE_FLAG_HAS_SERDES BIT(6)
+#define IMX6_PCIE_FLAG_SUPPORT_64BIT BIT(7)
+
+#define imx6_check_flag(pci, val) (pci->drvdata->flags & val)
+
+#define IMX6_PCIE_MAX_CLKS 6
+
+#define IMX6_PCIE_MAX_INSTANCES 2
+
+struct imx6_pcie;
struct imx6_pcie_drvdata {
enum imx6_pcie_variants variant;
@@ -67,6 +95,14 @@ struct imx6_pcie_drvdata {
u32 flags;
int dbi_length;
const char *gpr;
+ const char * const *clk_names;
+ const u32 clks_cnt;
+ const u32 ltssm_off;
+ const u32 ltssm_mask;
+ const u32 mode_off[IMX6_PCIE_MAX_INSTANCES];
+ const u32 mode_mask[IMX6_PCIE_MAX_INSTANCES];
+ const struct pci_epc_features *epc_features;
+ int (*init_phy)(struct imx6_pcie *pcie);
};
struct imx6_pcie {
@@ -74,11 +110,7 @@ struct imx6_pcie {
int reset_gpio;
bool gpio_active_high;
bool link_is_up;
- struct clk *pcie_bus;
- struct clk *pcie_phy;
- struct clk *pcie_inbound_axi;
- struct clk *pcie;
- struct clk *pcie_aux;
+ struct clk_bulk_data clks[IMX6_PCIE_MAX_CLKS];
struct regmap *iomuxc_gpr;
u16 msi_ctrl;
u32 controller_id;
@@ -165,34 +197,44 @@ static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
}
+static int imx95_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+{
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX95_PCIE_SS_RW_REG_0,
+ IMX95_PCIE_PHY_CR_PARA_SEL,
+ IMX95_PCIE_PHY_CR_PARA_SEL);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX95_PCIE_PHY_GEN_CTRL,
+ IMX95_PCIE_REF_USE_PAD, 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX95_PCIE_SS_RW_REG_0,
+ IMX95_PCIE_REF_CLKEN,
+ IMX95_PCIE_REF_CLKEN);
+
+ return 0;
+}
+
static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
{
- unsigned int mask, val, mode;
+ const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata;
+ unsigned int mask, val, mode, id;
- if (imx6_pcie->drvdata->mode == DW_PCIE_EP_TYPE)
+ if (drvdata->mode == DW_PCIE_EP_TYPE)
mode = PCI_EXP_TYPE_ENDPOINT;
else
mode = PCI_EXP_TYPE_ROOT_PORT;
- switch (imx6_pcie->drvdata->variant) {
- case IMX8MQ:
- case IMX8MQ_EP:
- if (imx6_pcie->controller_id == 1) {
- mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
- val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
- mode);
- } else {
- mask = IMX6Q_GPR12_DEVICE_TYPE;
- val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, mode);
- }
- break;
- default:
- mask = IMX6Q_GPR12_DEVICE_TYPE;
- val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, mode);
- break;
- }
+ id = imx6_pcie->controller_id;
+
+ /* If mode_mask[id] is zero, means each controller have its individual gpr */
+ if (!drvdata->mode_mask[id])
+ id = 0;
+
+ mask = drvdata->mode_mask[id];
+ val = mode << (ffs(mask) - 1);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->mode_off[id], mask, val);
}
static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
@@ -320,76 +362,66 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
return 0;
}
-static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+static int imx8mq_pcie_init_phy(struct imx6_pcie *imx6_pcie)
{
- switch (imx6_pcie->drvdata->variant) {
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MP:
- case IMX8MP_EP:
- /*
- * The PHY initialization had been done in the PHY
- * driver, break here directly.
- */
- break;
- case IMX8MQ:
- case IMX8MQ_EP:
- /*
- * TODO: Currently this code assumes external
- * oscillator is being used
- */
+ /* TODO: Currently this code assumes external oscillator is being used */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ imx6_pcie_grp_offset(imx6_pcie),
+ IMX8MQ_GPR_PCIE_REF_USE_PAD,
+ IMX8MQ_GPR_PCIE_REF_USE_PAD);
+ /*
+ * Regarding the datasheet, the PCIE_VPH is suggested to be 1.8V. If the PCIE_VPH is
+ * supplied by 3.3V, the VREG_BYPASS should be cleared to zero.
+ */
+ if (imx6_pcie->vph && regulator_get_voltage(imx6_pcie->vph) > 3000000)
regmap_update_bits(imx6_pcie->iomuxc_gpr,
imx6_pcie_grp_offset(imx6_pcie),
- IMX8MQ_GPR_PCIE_REF_USE_PAD,
- IMX8MQ_GPR_PCIE_REF_USE_PAD);
- /*
- * Regarding the datasheet, the PCIE_VPH is suggested
- * to be 1.8V. If the PCIE_VPH is supplied by 3.3V, the
- * VREG_BYPASS should be cleared to zero.
- */
- if (imx6_pcie->vph &&
- regulator_get_voltage(imx6_pcie->vph) > 3000000)
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
- imx6_pcie_grp_offset(imx6_pcie),
- IMX8MQ_GPR_PCIE_VREG_BYPASS,
- 0);
- break;
- case IMX7D:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
- break;
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_RX_EQ_MASK,
- IMX6SX_GPR12_PCIE_RX_EQ_2);
- fallthrough;
- default:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX8MQ_GPR_PCIE_VREG_BYPASS,
+ 0);
+
+ return 0;
+}
+
+static int imx7d_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+{
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
+
+ return 0;
+}
+
+static int imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+{
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
- /* configure constant input signal to the pcie ctrl and phy */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
-
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN1,
- imx6_pcie->tx_deemph_gen1 << 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
- imx6_pcie->tx_deemph_gen2_3p5db << 6);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
- imx6_pcie->tx_deemph_gen2_6db << 12);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_SWING_FULL,
- imx6_pcie->tx_swing_full << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_SWING_LOW,
- imx6_pcie->tx_swing_low << 25);
- break;
- }
+ /* configure constant input signal to the pcie ctrl and phy */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN1,
+ imx6_pcie->tx_deemph_gen1 << 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
+ imx6_pcie->tx_deemph_gen2_3p5db << 6);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
+ imx6_pcie->tx_deemph_gen2_6db << 12);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_FULL,
+ imx6_pcie->tx_swing_full << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_LOW,
+ imx6_pcie->tx_swing_low << 25);
+ return 0;
+}
- imx6_pcie_configure_type(imx6_pcie);
+static int imx6sx_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+{
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_RX_EQ_MASK, IMX6SX_GPR12_PCIE_RX_EQ_2);
+
+ return imx6_pcie_init_phy(imx6_pcie);
}
static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
@@ -407,13 +439,18 @@ static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
{
- unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
+ unsigned long phy_rate = 0;
int mult, div;
u16 val;
+ int i;
if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
return 0;
+ for (i = 0; i < imx6_pcie->drvdata->clks_cnt; i++)
+ if (strncmp(imx6_pcie->clks[i].id, "pcie_phy", 8) == 0)
+ phy_rate = clk_get_rate(imx6_pcie->clks[i].clk);
+
switch (phy_rate) {
case 125000000:
/*
@@ -550,19 +587,11 @@ static int imx6_pcie_attach_pd(struct device *dev)
static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
{
- struct dw_pcie *pci = imx6_pcie->pci;
- struct device *dev = pci->dev;
unsigned int offset;
int ret = 0;
switch (imx6_pcie->drvdata->variant) {
case IMX6SX:
- ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
- if (ret) {
- dev_err(dev, "unable to enable pcie_axi clock\n");
- break;
- }
-
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
break;
@@ -582,6 +611,8 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
break;
case IMX7D:
+ case IMX95:
+ case IMX95_EP:
break;
case IMX8MM:
case IMX8MM_EP:
@@ -589,12 +620,6 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
case IMX8MQ_EP:
case IMX8MP:
case IMX8MP_EP:
- ret = clk_prepare_enable(imx6_pcie->pcie_aux);
- if (ret) {
- dev_err(dev, "unable to enable pcie_aux clock\n");
- break;
- }
-
offset = imx6_pcie_grp_offset(imx6_pcie);
/*
* Set the over ride low and enabled
@@ -615,9 +640,6 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie)
{
switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
- break;
case IMX6QP:
case IMX6Q:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
@@ -631,14 +653,6 @@ static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie)
IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
break;
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MQ:
- case IMX8MQ_EP:
- case IMX8MP:
- case IMX8MP_EP:
- clk_disable_unprepare(imx6_pcie->pcie_aux);
- break;
default:
break;
}
@@ -650,23 +664,9 @@ static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie)
struct device *dev = pci->dev;
int ret;
- ret = clk_prepare_enable(imx6_pcie->pcie_phy);
- if (ret) {
- dev_err(dev, "unable to enable pcie_phy clock\n");
+ ret = clk_bulk_prepare_enable(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
+ if (ret)
return ret;
- }
-
- ret = clk_prepare_enable(imx6_pcie->pcie_bus);
- if (ret) {
- dev_err(dev, "unable to enable pcie_bus clock\n");
- goto err_pcie_bus;
- }
-
- ret = clk_prepare_enable(imx6_pcie->pcie);
- if (ret) {
- dev_err(dev, "unable to enable pcie clock\n");
- goto err_pcie;
- }
ret = imx6_pcie_enable_ref_clk(imx6_pcie);
if (ret) {
@@ -679,11 +679,7 @@ static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie)
return 0;
err_ref_clk:
- clk_disable_unprepare(imx6_pcie->pcie);
-err_pcie:
- clk_disable_unprepare(imx6_pcie->pcie_bus);
-err_pcie_bus:
- clk_disable_unprepare(imx6_pcie->pcie_phy);
+ clk_bulk_disable_unprepare(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
return ret;
}
@@ -691,25 +687,15 @@ err_pcie_bus:
static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
{
imx6_pcie_disable_ref_clk(imx6_pcie);
- clk_disable_unprepare(imx6_pcie->pcie);
- clk_disable_unprepare(imx6_pcie->pcie_bus);
- clk_disable_unprepare(imx6_pcie->pcie_phy);
+ clk_bulk_disable_unprepare(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
}
static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
{
+ reset_control_assert(imx6_pcie->pciephy_reset);
+ reset_control_assert(imx6_pcie->apps_reset);
+
switch (imx6_pcie->drvdata->variant) {
- case IMX7D:
- case IMX8MQ:
- case IMX8MQ_EP:
- reset_control_assert(imx6_pcie->pciephy_reset);
- fallthrough;
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MP:
- case IMX8MP_EP:
- reset_control_assert(imx6_pcie->apps_reset);
- break;
case IMX6SX:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
@@ -730,6 +716,8 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
break;
+ default:
+ break;
}
/* Some boards don't have PCIe reset GPIO. */
@@ -743,14 +731,10 @@ static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
struct dw_pcie *pci = imx6_pcie->pci;
struct device *dev = pci->dev;
+ reset_control_deassert(imx6_pcie->pciephy_reset);
+
switch (imx6_pcie->drvdata->variant) {
- case IMX8MQ:
- case IMX8MQ_EP:
- reset_control_deassert(imx6_pcie->pciephy_reset);
- break;
case IMX7D:
- reset_control_deassert(imx6_pcie->pciephy_reset);
-
/* Workaround for ERR010728, failure of PCI-e PLL VCO to
* oscillate, especially when cold. This turns off "Duty-cycle
* Corrector" and other mysterious undocumented things.
@@ -782,11 +766,7 @@ static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
usleep_range(200, 500);
break;
- case IMX6Q: /* Nothing to do */
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MP:
- case IMX8MP_EP:
+ default:
break;
}
@@ -824,48 +804,25 @@ static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
static void imx6_pcie_ltssm_enable(struct device *dev)
{
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata;
- switch (imx6_pcie->drvdata->variant) {
- case IMX6Q:
- case IMX6SX:
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2,
- IMX6Q_GPR12_PCIE_CTL_2);
- break;
- case IMX7D:
- case IMX8MQ:
- case IMX8MQ_EP:
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MP:
- case IMX8MP_EP:
- reset_control_deassert(imx6_pcie->apps_reset);
- break;
- }
+ if (drvdata->ltssm_mask)
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->ltssm_off, drvdata->ltssm_mask,
+ drvdata->ltssm_mask);
+
+ reset_control_deassert(imx6_pcie->apps_reset);
}
static void imx6_pcie_ltssm_disable(struct device *dev)
{
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata;
- switch (imx6_pcie->drvdata->variant) {
- case IMX6Q:
- case IMX6SX:
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2, 0);
- break;
- case IMX7D:
- case IMX8MQ:
- case IMX8MQ_EP:
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MP:
- case IMX8MP_EP:
- reset_control_assert(imx6_pcie->apps_reset);
- break;
- }
+ if (drvdata->ltssm_mask)
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->ltssm_off,
+ drvdata->ltssm_mask, 0);
+
+ reset_control_assert(imx6_pcie->apps_reset);
}
static int imx6_pcie_start_link(struct dw_pcie *pci)
@@ -977,7 +934,11 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
}
imx6_pcie_assert_core_reset(imx6_pcie);
- imx6_pcie_init_phy(imx6_pcie);
+
+ if (imx6_pcie->drvdata->init_phy)
+ imx6_pcie->drvdata->init_phy(imx6_pcie);
+
+ imx6_pcie_configure_type(imx6_pcie);
ret = imx6_pcie_clk_enable(imx6_pcie);
if (ret) {
@@ -1081,14 +1042,35 @@ static const struct pci_epc_features imx8m_pcie_epc_features = {
.linkup_notifier = false,
.msi_capable = true,
.msix_capable = false,
- .reserved_bar = 1 << BAR_1 | 1 << BAR_3,
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
.align = SZ_64K,
};
+/*
+ * BAR# | Default BAR enable | Default BAR Type | Default BAR Size | BAR Sizing Scheme
+ * ================================================================================================
+ * BAR0 | Enable | 64-bit | 1 MB | Programmable Size
+ * BAR1 | Disable | 32-bit | 64 KB | Fixed Size
+ * BAR1 should be disabled if BAR0 is 64bit.
+ * BAR2 | Enable | 32-bit | 1 MB | Programmable Size
+ * BAR3 | Enable | 32-bit | 64 KB | Programmable Size
+ * BAR4 | Enable | 32-bit | 1M | Programmable Size
+ * BAR5 | Enable | 32-bit | 64 KB | Programmable Size
+ */
+static const struct pci_epc_features imx95_pcie_epc_features = {
+ .msi_capable = true,
+ .bar[BAR_1] = { .type = BAR_FIXED, .fixed_size = SZ_64K, },
+ .align = SZ_4K,
+};
+
static const struct pci_epc_features*
imx6_pcie_ep_get_features(struct dw_pcie_ep *ep)
{
- return &imx8m_pcie_epc_features;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+
+ return imx6_pcie->drvdata->epc_features;
}
static const struct dw_pcie_ep_ops pcie_ep_ops = {
@@ -1103,7 +1085,6 @@ static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie,
int ret;
unsigned int pcie_dbi2_offset;
struct dw_pcie_ep *ep;
- struct resource *res;
struct dw_pcie *pci = imx6_pcie->pci;
struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = pci->dev;
@@ -1122,14 +1103,20 @@ static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie,
pcie_dbi2_offset = SZ_4K;
break;
}
+
pci->dbi_base2 = pci->dbi_base + pcie_dbi2_offset;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
- if (!res)
- return -EINVAL;
- ep->phys_base = res->start;
- ep->addr_size = resource_size(res);
- ep->page_size = SZ_64K;
+ /*
+ * FIXME: Ideally, dbi2 base address should come from DT. But since only IMX95 is defining
+ * "dbi2" in DT, "dbi_base2" is set to NULL here for that platform alone so that the DWC
+ * core code can fetch that from DT. But once all platform DTs were fixed, this and the
+ * above "dbi_base2" setting should be removed.
+ */
+ if (device_property_match_string(dev, "reg-names", "dbi2") >= 0)
+ pci->dbi_base2 = NULL;
+
+ if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_SUPPORT_64BIT))
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
ret = dw_pcie_ep_init(ep);
if (ret) {
@@ -1251,6 +1238,7 @@ static int imx6_pcie_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node;
int ret;
u16 val;
+ int i;
imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
if (!imx6_pcie)
@@ -1304,81 +1292,48 @@ static int imx6_pcie_probe(struct platform_device *pdev)
return imx6_pcie->reset_gpio;
}
- /* Fetch clocks */
- imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
- if (IS_ERR(imx6_pcie->pcie_bus))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_bus),
- "pcie_bus clock source missing or invalid\n");
+ if (imx6_pcie->drvdata->clks_cnt >= IMX6_PCIE_MAX_CLKS)
+ return dev_err_probe(dev, -ENOMEM, "clks_cnt is too big\n");
- imx6_pcie->pcie = devm_clk_get(dev, "pcie");
- if (IS_ERR(imx6_pcie->pcie))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie),
- "pcie clock source missing or invalid\n");
+ for (i = 0; i < imx6_pcie->drvdata->clks_cnt; i++)
+ imx6_pcie->clks[i].id = imx6_pcie->drvdata->clk_names[i];
- switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
- "pcie_inbound_axi");
- if (IS_ERR(imx6_pcie->pcie_inbound_axi))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_inbound_axi),
- "pcie_inbound_axi clock missing or invalid\n");
- break;
- case IMX8MQ:
- case IMX8MQ_EP:
- imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
- if (IS_ERR(imx6_pcie->pcie_aux))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
- "pcie_aux clock source missing or invalid\n");
- fallthrough;
- case IMX7D:
- if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
- imx6_pcie->controller_id = 1;
+ /* Fetch clocks */
+ ret = devm_clk_bulk_get(dev, imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
+ if (ret)
+ return ret;
- imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev,
- "pciephy");
- if (IS_ERR(imx6_pcie->pciephy_reset)) {
- dev_err(dev, "Failed to get PCIEPHY reset control\n");
- return PTR_ERR(imx6_pcie->pciephy_reset);
- }
+ if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_PHYDRV)) {
+ imx6_pcie->phy = devm_phy_get(dev, "pcie-phy");
+ if (IS_ERR(imx6_pcie->phy))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->phy),
+ "failed to get pcie phy\n");
+ }
- imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
- "apps");
- if (IS_ERR(imx6_pcie->apps_reset)) {
- dev_err(dev, "Failed to get PCIE APPS reset control\n");
- return PTR_ERR(imx6_pcie->apps_reset);
- }
- break;
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MP:
- case IMX8MP_EP:
- imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
- if (IS_ERR(imx6_pcie->pcie_aux))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
- "pcie_aux clock source missing or invalid\n");
- imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
- "apps");
+ if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_APP_RESET)) {
+ imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev, "apps");
if (IS_ERR(imx6_pcie->apps_reset))
return dev_err_probe(dev, PTR_ERR(imx6_pcie->apps_reset),
"failed to get pcie apps reset control\n");
+ }
- imx6_pcie->phy = devm_phy_get(dev, "pcie-phy");
- if (IS_ERR(imx6_pcie->phy))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->phy),
- "failed to get pcie phy\n");
+ if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_PHY_RESET)) {
+ imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, "pciephy");
+ if (IS_ERR(imx6_pcie->pciephy_reset))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pciephy_reset),
+ "Failed to get PCIEPHY reset control\n");
+ }
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8MQ:
+ case IMX8MQ_EP:
+ case IMX7D:
+ if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
+ imx6_pcie->controller_id = 1;
break;
default:
break;
}
- /* Don't fetch the pcie_phy clock, if it has abstract PHY driver */
- if (imx6_pcie->phy == NULL) {
- imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
- if (IS_ERR(imx6_pcie->pcie_phy))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_phy),
- "pcie_phy clock source missing or invalid\n");
- }
-
/* Grab turnoff reset */
imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
@@ -1387,12 +1342,32 @@ static int imx6_pcie_probe(struct platform_device *pdev)
return PTR_ERR(imx6_pcie->turnoff_reset);
}
+ if (imx6_pcie->drvdata->gpr) {
/* Grab GPR config register range */
- imx6_pcie->iomuxc_gpr =
- syscon_regmap_lookup_by_compatible(imx6_pcie->drvdata->gpr);
- if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
- dev_err(dev, "unable to find iomuxc registers\n");
- return PTR_ERR(imx6_pcie->iomuxc_gpr);
+ imx6_pcie->iomuxc_gpr =
+ syscon_regmap_lookup_by_compatible(imx6_pcie->drvdata->gpr);
+ if (IS_ERR(imx6_pcie->iomuxc_gpr))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->iomuxc_gpr),
+ "unable to find iomuxc registers\n");
+ }
+
+ if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_SERDES)) {
+ void __iomem *off = devm_platform_ioremap_resource_byname(pdev, "app");
+
+ if (IS_ERR(off))
+ return dev_err_probe(dev, PTR_ERR(off),
+ "unable to find serdes registers\n");
+
+ static const struct regmap_config regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ };
+
+ imx6_pcie->iomuxc_gpr = devm_regmap_init_mmio(dev, off, &regmap_config);
+ if (IS_ERR(imx6_pcie->iomuxc_gpr))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->iomuxc_gpr),
+ "unable to find iomuxc registers\n");
}
/* Grab PCIe PHY Tx Settings */
@@ -1469,6 +1444,11 @@ static void imx6_pcie_shutdown(struct platform_device *pdev)
imx6_pcie_assert_core_reset(imx6_pcie);
}
+static const char * const imx6q_clks[] = {"pcie_bus", "pcie", "pcie_phy"};
+static const char * const imx8mm_clks[] = {"pcie_bus", "pcie", "pcie_aux"};
+static const char * const imx8mq_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_aux"};
+static const char * const imx6sx_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_inbound_axi"};
+
static const struct imx6_pcie_drvdata drvdata[] = {
[IMX6Q] = {
.variant = IMX6Q,
@@ -1476,6 +1456,13 @@ static const struct imx6_pcie_drvdata drvdata[] = {
IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
.dbi_length = 0x200,
.gpr = "fsl,imx6q-iomuxc-gpr",
+ .clk_names = imx6q_clks,
+ .clks_cnt = ARRAY_SIZE(imx6q_clks),
+ .ltssm_off = IOMUXC_GPR12,
+ .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .init_phy = imx6_pcie_init_phy,
},
[IMX6SX] = {
.variant = IMX6SX,
@@ -1483,6 +1470,13 @@ static const struct imx6_pcie_drvdata drvdata[] = {
IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
.gpr = "fsl,imx6q-iomuxc-gpr",
+ .clk_names = imx6sx_clks,
+ .clks_cnt = ARRAY_SIZE(imx6sx_clks),
+ .ltssm_off = IOMUXC_GPR12,
+ .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .init_phy = imx6sx_pcie_init_phy,
},
[IMX6QP] = {
.variant = IMX6QP,
@@ -1491,40 +1485,122 @@ static const struct imx6_pcie_drvdata drvdata[] = {
IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
.dbi_length = 0x200,
.gpr = "fsl,imx6q-iomuxc-gpr",
+ .clk_names = imx6q_clks,
+ .clks_cnt = ARRAY_SIZE(imx6q_clks),
+ .ltssm_off = IOMUXC_GPR12,
+ .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .init_phy = imx6_pcie_init_phy,
},
[IMX7D] = {
.variant = IMX7D,
- .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX6_PCIE_FLAG_HAS_APP_RESET |
+ IMX6_PCIE_FLAG_HAS_PHY_RESET,
.gpr = "fsl,imx7d-iomuxc-gpr",
+ .clk_names = imx6q_clks,
+ .clks_cnt = ARRAY_SIZE(imx6q_clks),
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .init_phy = imx7d_pcie_init_phy,
},
[IMX8MQ] = {
.variant = IMX8MQ,
+ .flags = IMX6_PCIE_FLAG_HAS_APP_RESET |
+ IMX6_PCIE_FLAG_HAS_PHY_RESET,
.gpr = "fsl,imx8mq-iomuxc-gpr",
+ .clk_names = imx8mq_clks,
+ .clks_cnt = ARRAY_SIZE(imx8mq_clks),
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .mode_off[1] = IOMUXC_GPR12,
+ .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
+ .init_phy = imx8mq_pcie_init_phy,
},
[IMX8MM] = {
.variant = IMX8MM,
- .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX6_PCIE_FLAG_HAS_PHYDRV |
+ IMX6_PCIE_FLAG_HAS_APP_RESET,
.gpr = "fsl,imx8mm-iomuxc-gpr",
+ .clk_names = imx8mm_clks,
+ .clks_cnt = ARRAY_SIZE(imx8mm_clks),
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
},
[IMX8MP] = {
.variant = IMX8MP,
- .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX6_PCIE_FLAG_HAS_PHYDRV |
+ IMX6_PCIE_FLAG_HAS_APP_RESET,
.gpr = "fsl,imx8mp-iomuxc-gpr",
+ .clk_names = imx8mm_clks,
+ .clks_cnt = ARRAY_SIZE(imx8mm_clks),
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ },
+ [IMX95] = {
+ .variant = IMX95,
+ .flags = IMX6_PCIE_FLAG_HAS_SERDES,
+ .clk_names = imx8mq_clks,
+ .clks_cnt = ARRAY_SIZE(imx8mq_clks),
+ .ltssm_off = IMX95_PE0_GEN_CTRL_3,
+ .ltssm_mask = IMX95_PCIE_LTSSM_EN,
+ .mode_off[0] = IMX95_PE0_GEN_CTRL_1,
+ .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE,
+ .init_phy = imx95_pcie_init_phy,
},
[IMX8MQ_EP] = {
.variant = IMX8MQ_EP,
+ .flags = IMX6_PCIE_FLAG_HAS_APP_RESET |
+ IMX6_PCIE_FLAG_HAS_PHY_RESET,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mq-iomuxc-gpr",
+ .clk_names = imx8mq_clks,
+ .clks_cnt = ARRAY_SIZE(imx8mq_clks),
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .mode_off[1] = IOMUXC_GPR12,
+ .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
+ .epc_features = &imx8m_pcie_epc_features,
+ .init_phy = imx8mq_pcie_init_phy,
},
[IMX8MM_EP] = {
.variant = IMX8MM_EP,
+ .flags = IMX6_PCIE_FLAG_HAS_PHYDRV,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mm-iomuxc-gpr",
+ .clk_names = imx8mm_clks,
+ .clks_cnt = ARRAY_SIZE(imx8mm_clks),
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .epc_features = &imx8m_pcie_epc_features,
},
[IMX8MP_EP] = {
.variant = IMX8MP_EP,
+ .flags = IMX6_PCIE_FLAG_HAS_PHYDRV,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mp-iomuxc-gpr",
+ .clk_names = imx8mm_clks,
+ .clks_cnt = ARRAY_SIZE(imx8mm_clks),
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .epc_features = &imx8m_pcie_epc_features,
+ },
+ [IMX95_EP] = {
+ .variant = IMX95_EP,
+ .flags = IMX6_PCIE_FLAG_HAS_SERDES |
+ IMX6_PCIE_FLAG_SUPPORT_64BIT,
+ .clk_names = imx8mq_clks,
+ .clks_cnt = ARRAY_SIZE(imx8mq_clks),
+ .ltssm_off = IMX95_PE0_GEN_CTRL_3,
+ .ltssm_mask = IMX95_PCIE_LTSSM_EN,
+ .mode_off[0] = IMX95_PE0_GEN_CTRL_1,
+ .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE,
+ .init_phy = imx95_pcie_init_phy,
+ .epc_features = &imx95_pcie_epc_features,
+ .mode = DW_PCIE_EP_TYPE,
},
};
@@ -1536,9 +1612,11 @@ static const struct of_device_id imx6_pcie_of_match[] = {
{ .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], },
{ .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], },
{ .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], },
+ { .compatible = "fsl,imx95-pcie", .data = &drvdata[IMX95], },
{ .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], },
{ .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], },
{ .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], },
+ { .compatible = "fsl,imx95-pcie-ep", .data = &drvdata[IMX95_EP], },
{},
};
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index c0c62533a3f17..844de44187242 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -924,12 +924,12 @@ static const struct pci_epc_features ks_pcie_am654_epc_features = {
.linkup_notifier = false,
.msi_capable = true,
.msix_capable = true,
- .reserved_bar = 1 << BAR_0 | 1 << BAR_1,
- .bar_fixed_64bit = 1 << BAR_0,
- .bar_fixed_size[2] = SZ_1M,
- .bar_fixed_size[3] = SZ_64K,
- .bar_fixed_size[4] = 256,
- .bar_fixed_size[5] = SZ_1M,
+ .bar[BAR_0] = { .type = BAR_RESERVED, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
+ .bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_64K, },
+ .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256, },
+ .bar[BAR_5] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
.align = SZ_1M,
};
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index 2e398494e7c0c..1f6ee1460ec2a 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -250,7 +250,10 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = pcie->drvdata->dw_pcie_ops;
- ls_epc->bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4);
+ ls_epc->bar[BAR_2].only_64bit = true;
+ ls_epc->bar[BAR_3].type = BAR_RESERVED;
+ ls_epc->bar[BAR_4].only_64bit = true;
+ ls_epc->bar[BAR_5].type = BAR_RESERVED;
ls_epc->linkup_notifier = true;
pcie->pci = pci;
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 9a437cfce073c..746a11dcb67f1 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -629,8 +629,13 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
PCI_REBAR_CTRL_NBAR_SHIFT;
+ /*
+ * PCIe r6.0, sec 7.8.6.2 require us to support at least one
+ * size in the range from 1 MB to 512 GB. Advertise support
+ * for 1 MB BAR size only.
+ */
for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
- dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
+ dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, BIT(4));
}
/*
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index d5fc31f8345f7..d15a5c2d5b480 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -328,7 +328,7 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
struct platform_device *pdev = to_platform_device(dev);
- u64 *msi_vaddr;
+ u64 *msi_vaddr = NULL;
int ret;
u32 ctrl, num_ctrls;
@@ -379,15 +379,20 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
* memory.
*/
ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
- if (ret)
- dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
+ if (!ret)
+ msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
+ GFP_KERNEL);
- msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
- GFP_KERNEL);
if (!msi_vaddr) {
- dev_err(dev, "Failed to alloc and map MSI data\n");
- dw_pcie_free_msi(pp);
- return -ENOMEM;
+ dev_warn(dev, "Failed to allocate 32-bit MSI address\n");
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
+ msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
+ GFP_KERNEL);
+ if (!msi_vaddr) {
+ dev_err(dev, "Failed to allocate MSI address\n");
+ dw_pcie_free_msi(pp);
+ return -ENOMEM;
+ }
}
return 0;
diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c
index 208d3b0ba1960..5e8e54f597dd4 100644
--- a/drivers/pci/controller/dwc/pcie-keembay.c
+++ b/drivers/pci/controller/dwc/pcie-keembay.c
@@ -312,8 +312,12 @@ static const struct pci_epc_features keembay_pcie_epc_features = {
.linkup_notifier = false,
.msi_capable = true,
.msix_capable = true,
- .reserved_bar = BIT(BAR_1) | BIT(BAR_3) | BIT(BAR_5),
- .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
+ .bar[BAR_0] = { .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .only_64bit = true, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
.align = SZ_16K,
};
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 2ce2a3bd932bd..14772edcf0d34 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -53,6 +53,7 @@
#define PARF_SLV_ADDR_SPACE_SIZE 0x358
#define PARF_DEVICE_TYPE 0x1000
#define PARF_BDF_TO_SID_TABLE_N 0x2000
+#define PARF_BDF_TO_SID_CFG 0x2c00
/* ELBI registers */
#define ELBI_SYS_CTRL 0x04
@@ -120,6 +121,9 @@
/* PARF_DEVICE_TYPE register fields */
#define DEVICE_TYPE_RC 0x4
+/* PARF_BDF_TO_SID_CFG fields */
+#define BDF_TO_SID_BYPASS BIT(0)
+
/* ELBI_SYS_CTRL register fields */
#define ELBI_SYS_CTRL_LT_ENABLE BIT(0)
@@ -229,6 +233,7 @@ struct qcom_pcie_ops {
struct qcom_pcie_cfg {
const struct qcom_pcie_ops *ops;
+ bool no_l0s;
};
struct qcom_pcie {
@@ -272,6 +277,26 @@ static int qcom_pcie_start_link(struct dw_pcie *pci)
return 0;
}
+static void qcom_pcie_clear_aspm_l0s(struct dw_pcie *pci)
+{
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+ u16 offset;
+ u32 val;
+
+ if (!pcie->cfg->no_l0s)
+ return;
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_ASPM_L0S;
+ writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+}
+
static void qcom_pcie_clear_hpc(struct dw_pcie *pci)
{
u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
@@ -961,6 +986,7 @@ err_disable_regulators:
static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
{
+ qcom_pcie_clear_aspm_l0s(pcie->pci);
qcom_pcie_clear_hpc(pcie->pci);
return 0;
@@ -1008,11 +1034,17 @@ static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie)
u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE];
int i, nr_map, size = 0;
u32 smmu_sid_base;
+ u32 val;
of_get_property(dev->of_node, "iommu-map", &size);
if (!size)
return 0;
+ /* Enable BDF to SID translation by disabling bypass mode (default) */
+ val = readl(pcie->parf + PARF_BDF_TO_SID_CFG);
+ val &= ~BDF_TO_SID_BYPASS;
+ writel(val, pcie->parf + PARF_BDF_TO_SID_CFG);
+
map = kzalloc(size, GFP_KERNEL);
if (!map)
return -ENOMEM;
@@ -1358,6 +1390,11 @@ static const struct qcom_pcie_cfg cfg_2_9_0 = {
.ops = &ops_2_9_0,
};
+static const struct qcom_pcie_cfg cfg_sc8280xp = {
+ .ops = &ops_1_9_0,
+ .no_l0s = true,
+};
+
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = qcom_pcie_link_up,
.start_link = qcom_pcie_start_link,
@@ -1629,11 +1666,11 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 },
{ .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
{ .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
- { .compatible = "qcom,pcie-sa8540p", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp },
{ .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_9_0},
{ .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 },
- { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_sc8280xp },
{ .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 },
{ .compatible = "qcom,pcie-sdx55", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 },
@@ -1642,6 +1679,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-x1e80100", .data = &cfg_1_9_0 },
{ }
};
diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
index e9166619b1f9f..0be760ed420bd 100644
--- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c
+++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
@@ -383,7 +383,9 @@ static const struct pci_epc_features rcar_gen4_pcie_epc_features = {
.linkup_notifier = false,
.msi_capable = true,
.msix_capable = false,
- .reserved_bar = 1 << BAR_1 | 1 << BAR_3 | 1 << BAR_5,
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
.align = SZ_1M,
};
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 7afa9e9aabe21..1f7b662cb8e15 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -2007,9 +2007,13 @@ static const struct pci_epc_features tegra_pcie_epc_features = {
.core_init_notifier = true,
.msi_capable = false,
.msix_capable = false,
- .reserved_bar = 1 << BAR_2 | 1 << BAR_3 | 1 << BAR_4 | 1 << BAR_5,
- .bar_fixed_64bit = 1 << BAR_0,
- .bar_fixed_size[0] = SZ_1M,
+ .bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M,
+ .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .type = BAR_RESERVED, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
};
static const struct pci_epc_features*
diff --git a/drivers/pci/controller/dwc/pcie-uniphier-ep.c b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
index 3fced0d3e8512..639bc2e12476f 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier-ep.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
@@ -411,8 +411,12 @@ static const struct uniphier_pcie_ep_soc_data uniphier_pro5_data = {
.msi_capable = true,
.msix_capable = false,
.align = 1 << 16,
- .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
- .reserved_bar = BIT(BAR_4),
+ .bar[BAR_0] = { .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
},
};
@@ -425,7 +429,12 @@ static const struct uniphier_pcie_ep_soc_data uniphier_nx1_data = {
.msi_capable = true,
.msix_capable = false,
.align = 1 << 12,
- .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
+ .bar[BAR_0] = { .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .only_64bit = true, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
},
};
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 1eaffff40b8d4..5992280e8110b 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -49,6 +49,7 @@
#include <linux/refcount.h>
#include <linux/irqdomain.h>
#include <linux/acpi.h>
+#include <linux/sizes.h>
#include <asm/mshyperv.h>
/*
@@ -465,7 +466,7 @@ struct pci_eject_response {
u32 status;
} __packed;
-static int pci_ring_size = (4 * PAGE_SIZE);
+static int pci_ring_size = VMBUS_RING_SIZE(SZ_16K);
/*
* Driver specific state.
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index 5b0730c3891b8..c08683febdd40 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -336,7 +336,7 @@ static int brcm_pcie_mdio_write(void __iomem *base, u8 port,
readl(base + PCIE_RC_DL_MDIO_ADDR);
writel(MDIO_DATA_DONE_MASK | wrdata, base + PCIE_RC_DL_MDIO_WR_DATA);
- err = readw_poll_timeout_atomic(base + PCIE_RC_DL_MDIO_WR_DATA, data,
+ err = readl_poll_timeout_atomic(base + PCIE_RC_DL_MDIO_WR_DATA, data,
MDIO_WT_DONE(data), 10, 100);
return err;
}
diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
index e6909271def79..05967c6c0b426 100644
--- a/drivers/pci/controller/pcie-rcar-ep.c
+++ b/drivers/pci/controller/pcie-rcar-ep.c
@@ -440,11 +440,15 @@ static const struct pci_epc_features rcar_pcie_epc_features = {
.msi_capable = true,
.msix_capable = false,
/* use 64-bit BARs so mark BAR[1,3,5] as reserved */
- .reserved_bar = 1 << BAR_1 | 1 << BAR_3 | 1 << BAR_5,
- .bar_fixed_64bit = 1 << BAR_0 | 1 << BAR_2 | 1 << BAR_4,
- .bar_fixed_size[0] = 128,
- .bar_fixed_size[2] = 256,
- .bar_fixed_size[4] = 256,
+ .bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = 128,
+ .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = 256,
+ .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256,
+ .only_64bit = true, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
};
static const struct pci_epc_features*
diff --git a/drivers/pci/devres.c b/drivers/pci/devres.c
new file mode 100644
index 0000000000000..2c562b9eaf801
--- /dev/null
+++ b/drivers/pci/devres.c
@@ -0,0 +1,448 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/device.h>
+#include <linux/pci.h>
+#include "pci.h"
+
+/*
+ * PCI iomap devres
+ */
+#define PCIM_IOMAP_MAX PCI_STD_NUM_BARS
+
+struct pcim_iomap_devres {
+ void __iomem *table[PCIM_IOMAP_MAX];
+};
+
+
+static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
+{
+ struct resource **res = ptr;
+
+ pci_unmap_iospace(*res);
+}
+
+/**
+ * devm_pci_remap_iospace - Managed pci_remap_iospace()
+ * @dev: Generic device to remap IO address for
+ * @res: Resource describing the I/O space
+ * @phys_addr: physical address of range to be mapped
+ *
+ * Managed pci_remap_iospace(). Map is automatically unmapped on driver
+ * detach.
+ */
+int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
+ phys_addr_t phys_addr)
+{
+ const struct resource **ptr;
+ int error;
+
+ ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ error = pci_remap_iospace(res, phys_addr);
+ if (error) {
+ devres_free(ptr);
+ } else {
+ *ptr = res;
+ devres_add(dev, ptr);
+ }
+
+ return error;
+}
+EXPORT_SYMBOL(devm_pci_remap_iospace);
+
+/**
+ * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
+ * @dev: Generic device to remap IO address for
+ * @offset: Resource address to map
+ * @size: Size of map
+ *
+ * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver
+ * detach.
+ */
+void __iomem *devm_pci_remap_cfgspace(struct device *dev,
+ resource_size_t offset,
+ resource_size_t size)
+{
+ void __iomem **ptr, *addr;
+
+ ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ addr = pci_remap_cfgspace(offset, size);
+ if (addr) {
+ *ptr = addr;
+ devres_add(dev, ptr);
+ } else
+ devres_free(ptr);
+
+ return addr;
+}
+EXPORT_SYMBOL(devm_pci_remap_cfgspace);
+
+/**
+ * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
+ * @dev: generic device to handle the resource for
+ * @res: configuration space resource to be handled
+ *
+ * Checks that a resource is a valid memory region, requests the memory
+ * region and ioremaps with pci_remap_cfgspace() API that ensures the
+ * proper PCI configuration space memory attributes are guaranteed.
+ *
+ * All operations are managed and will be undone on driver detach.
+ *
+ * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
+ * on failure. Usage example::
+ *
+ * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ * base = devm_pci_remap_cfg_resource(&pdev->dev, res);
+ * if (IS_ERR(base))
+ * return PTR_ERR(base);
+ */
+void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
+ struct resource *res)
+{
+ resource_size_t size;
+ const char *name;
+ void __iomem *dest_ptr;
+
+ BUG_ON(!dev);
+
+ if (!res || resource_type(res) != IORESOURCE_MEM) {
+ dev_err(dev, "invalid resource\n");
+ return IOMEM_ERR_PTR(-EINVAL);
+ }
+
+ size = resource_size(res);
+
+ if (res->name)
+ name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev),
+ res->name);
+ else
+ name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
+ if (!name)
+ return IOMEM_ERR_PTR(-ENOMEM);
+
+ if (!devm_request_mem_region(dev, res->start, size, name)) {
+ dev_err(dev, "can't request region for resource %pR\n", res);
+ return IOMEM_ERR_PTR(-EBUSY);
+ }
+
+ dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
+ if (!dest_ptr) {
+ dev_err(dev, "ioremap failed for resource %pR\n", res);
+ devm_release_mem_region(dev, res->start, size);
+ dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
+ }
+
+ return dest_ptr;
+}
+EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
+
+/**
+ * pcim_set_mwi - a device-managed pci_set_mwi()
+ * @dev: the PCI device for which MWI is enabled
+ *
+ * Managed pci_set_mwi().
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int pcim_set_mwi(struct pci_dev *dev)
+{
+ struct pci_devres *dr;
+
+ dr = find_pci_dr(dev);
+ if (!dr)
+ return -ENOMEM;
+
+ dr->mwi = 1;
+ return pci_set_mwi(dev);
+}
+EXPORT_SYMBOL(pcim_set_mwi);
+
+
+static void pcim_release(struct device *gendev, void *res)
+{
+ struct pci_dev *dev = to_pci_dev(gendev);
+ struct pci_devres *this = res;
+ int i;
+
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+ if (this->region_mask & (1 << i))
+ pci_release_region(dev, i);
+
+ if (this->mwi)
+ pci_clear_mwi(dev);
+
+ if (this->restore_intx)
+ pci_intx(dev, this->orig_intx);
+
+ if (this->enabled && !this->pinned)
+ pci_disable_device(dev);
+}
+
+/*
+ * TODO: After the last four callers in pci.c are ported, find_pci_dr()
+ * needs to be made static again.
+ */
+struct pci_devres *find_pci_dr(struct pci_dev *pdev)
+{
+ if (pci_is_managed(pdev))
+ return devres_find(&pdev->dev, pcim_release, NULL, NULL);
+ return NULL;
+}
+
+static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
+{
+ struct pci_devres *dr, *new_dr;
+
+ dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
+ if (dr)
+ return dr;
+
+ new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
+ if (!new_dr)
+ return NULL;
+ return devres_get(&pdev->dev, new_dr, NULL, NULL);
+}
+
+/**
+ * pcim_enable_device - Managed pci_enable_device()
+ * @pdev: PCI device to be initialized
+ *
+ * Managed pci_enable_device().
+ */
+int pcim_enable_device(struct pci_dev *pdev)
+{
+ struct pci_devres *dr;
+ int rc;
+
+ dr = get_pci_dr(pdev);
+ if (unlikely(!dr))
+ return -ENOMEM;
+ if (dr->enabled)
+ return 0;
+
+ rc = pci_enable_device(pdev);
+ if (!rc) {
+ pdev->is_managed = 1;
+ dr->enabled = 1;
+ }
+ return rc;
+}
+EXPORT_SYMBOL(pcim_enable_device);
+
+/**
+ * pcim_pin_device - Pin managed PCI device
+ * @pdev: PCI device to pin
+ *
+ * Pin managed PCI device @pdev. Pinned device won't be disabled on
+ * driver detach. @pdev must have been enabled with
+ * pcim_enable_device().
+ */
+void pcim_pin_device(struct pci_dev *pdev)
+{
+ struct pci_devres *dr;
+
+ dr = find_pci_dr(pdev);
+ WARN_ON(!dr || !dr->enabled);
+ if (dr)
+ dr->pinned = 1;
+}
+EXPORT_SYMBOL(pcim_pin_device);
+
+static void pcim_iomap_release(struct device *gendev, void *res)
+{
+ struct pci_dev *dev = to_pci_dev(gendev);
+ struct pcim_iomap_devres *this = res;
+ int i;
+
+ for (i = 0; i < PCIM_IOMAP_MAX; i++)
+ if (this->table[i])
+ pci_iounmap(dev, this->table[i]);
+}
+
+/**
+ * pcim_iomap_table - access iomap allocation table
+ * @pdev: PCI device to access iomap table for
+ *
+ * Access iomap allocation table for @dev. If iomap table doesn't
+ * exist and @pdev is managed, it will be allocated. All iomaps
+ * recorded in the iomap table are automatically unmapped on driver
+ * detach.
+ *
+ * This function might sleep when the table is first allocated but can
+ * be safely called without context and guaranteed to succeed once
+ * allocated.
+ */
+void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
+{
+ struct pcim_iomap_devres *dr, *new_dr;
+
+ dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
+ if (dr)
+ return dr->table;
+
+ new_dr = devres_alloc_node(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
+ if (!new_dr)
+ return NULL;
+ dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
+ return dr->table;
+}
+EXPORT_SYMBOL(pcim_iomap_table);
+
+/**
+ * pcim_iomap - Managed pcim_iomap()
+ * @pdev: PCI device to iomap for
+ * @bar: BAR to iomap
+ * @maxlen: Maximum length of iomap
+ *
+ * Managed pci_iomap(). Map is automatically unmapped on driver
+ * detach.
+ */
+void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
+{
+ void __iomem **tbl;
+
+ BUG_ON(bar >= PCIM_IOMAP_MAX);
+
+ tbl = (void __iomem **)pcim_iomap_table(pdev);
+ if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
+ return NULL;
+
+ tbl[bar] = pci_iomap(pdev, bar, maxlen);
+ return tbl[bar];
+}
+EXPORT_SYMBOL(pcim_iomap);
+
+/**
+ * pcim_iounmap - Managed pci_iounmap()
+ * @pdev: PCI device to iounmap for
+ * @addr: Address to unmap
+ *
+ * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
+ */
+void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
+{
+ void __iomem **tbl;
+ int i;
+
+ pci_iounmap(pdev, addr);
+
+ tbl = (void __iomem **)pcim_iomap_table(pdev);
+ BUG_ON(!tbl);
+
+ for (i = 0; i < PCIM_IOMAP_MAX; i++)
+ if (tbl[i] == addr) {
+ tbl[i] = NULL;
+ return;
+ }
+ WARN_ON(1);
+}
+EXPORT_SYMBOL(pcim_iounmap);
+
+/**
+ * pcim_iomap_regions - Request and iomap PCI BARs
+ * @pdev: PCI device to map IO resources for
+ * @mask: Mask of BARs to request and iomap
+ * @name: Name used when requesting regions
+ *
+ * Request and iomap regions specified by @mask.
+ */
+int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
+{
+ void __iomem * const *iomap;
+ int i, rc;
+
+ iomap = pcim_iomap_table(pdev);
+ if (!iomap)
+ return -ENOMEM;
+
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ unsigned long len;
+
+ if (!(mask & (1 << i)))
+ continue;
+
+ rc = -EINVAL;
+ len = pci_resource_len(pdev, i);
+ if (!len)
+ goto err_inval;
+
+ rc = pci_request_region(pdev, i, name);
+ if (rc)
+ goto err_inval;
+
+ rc = -ENOMEM;
+ if (!pcim_iomap(pdev, i, 0))
+ goto err_region;
+ }
+
+ return 0;
+
+ err_region:
+ pci_release_region(pdev, i);
+ err_inval:
+ while (--i >= 0) {
+ if (!(mask & (1 << i)))
+ continue;
+ pcim_iounmap(pdev, iomap[i]);
+ pci_release_region(pdev, i);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(pcim_iomap_regions);
+
+/**
+ * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
+ * @pdev: PCI device to map IO resources for
+ * @mask: Mask of BARs to iomap
+ * @name: Name used when requesting regions
+ *
+ * Request all PCI BARs and iomap regions specified by @mask.
+ */
+int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
+ const char *name)
+{
+ int request_mask = ((1 << 6) - 1) & ~mask;
+ int rc;
+
+ rc = pci_request_selected_regions(pdev, request_mask, name);
+ if (rc)
+ return rc;
+
+ rc = pcim_iomap_regions(pdev, mask, name);
+ if (rc)
+ pci_release_selected_regions(pdev, request_mask);
+ return rc;
+}
+EXPORT_SYMBOL(pcim_iomap_regions_request_all);
+
+/**
+ * pcim_iounmap_regions - Unmap and release PCI BARs
+ * @pdev: PCI device to map IO resources for
+ * @mask: Mask of BARs to unmap and release
+ *
+ * Unmap and release regions specified by @mask.
+ */
+void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
+{
+ void __iomem * const *iomap;
+ int i;
+
+ iomap = pcim_iomap_table(pdev);
+ if (!iomap)
+ return;
+
+ for (i = 0; i < PCIM_IOMAP_MAX; i++) {
+ if (!(mask & (1 << i)))
+ continue;
+
+ pcim_iounmap(pdev, iomap[i]);
+ pci_release_region(pdev, i);
+ }
+}
+EXPORT_SYMBOL(pcim_iounmap_regions);
diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
index 1c3e4ea76bd25..2c54d80107cf3 100644
--- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
+++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
@@ -123,6 +123,22 @@ static const struct pci_epf_mhi_ep_info sm8450_info = {
.flags = MHI_EPF_USE_DMA,
};
+static struct pci_epf_header sa8775p_header = {
+ .vendorid = PCI_VENDOR_ID_QCOM,
+ .deviceid = 0x0306, /* FIXME: Update deviceid for sa8775p EP */
+ .baseclass_code = PCI_CLASS_OTHERS,
+ .interrupt_pin = PCI_INTERRUPT_INTA,
+};
+
+static const struct pci_epf_mhi_ep_info sa8775p_info = {
+ .config = &mhi_v1_config,
+ .epf_header = &sa8775p_header,
+ .bar_num = BAR_0,
+ .epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
+ .msi_count = 32,
+ .mru = 0x8000,
+};
+
struct pci_epf_mhi {
const struct pci_epc_features *epc_features;
const struct pci_epf_mhi_ep_info *info;
@@ -913,8 +929,9 @@ static int pci_epf_mhi_probe(struct pci_epf *epf,
}
static const struct pci_epf_device_id pci_epf_mhi_ids[] = {
- { .name = "sdx55", .driver_data = (kernel_ulong_t)&sdx55_info },
- { .name = "sm8450", .driver_data = (kernel_ulong_t)&sm8450_info },
+ { .name = "pci_epf_mhi_sa8775p", .driver_data = (kernel_ulong_t)&sa8775p_info },
+ { .name = "pci_epf_mhi_sdx55", .driver_data = (kernel_ulong_t)&sdx55_info },
+ { .name = "pci_epf_mhi_sm8450", .driver_data = (kernel_ulong_t)&sm8450_info },
{},
};
diff --git a/drivers/pci/endpoint/functions/pci-epf-ntb.c b/drivers/pci/endpoint/functions/pci-epf-ntb.c
index 0553946005c4d..e01a98e74d211 100644
--- a/drivers/pci/endpoint/functions/pci-epf-ntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-ntb.c
@@ -1012,13 +1012,13 @@ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb,
epc_features = ntb_epc->epc_features;
barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
- size = epc_features->bar_fixed_size[barno];
+ size = epc_features->bar[barno].fixed_size;
align = epc_features->align;
peer_ntb_epc = ntb->epc[!type];
peer_epc_features = peer_ntb_epc->epc_features;
peer_barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
- peer_size = peer_epc_features->bar_fixed_size[peer_barno];
+ peer_size = peer_epc_features->bar[peer_barno].fixed_size;
/* Check if epc_features is populated incorrectly */
if ((!IS_ALIGNED(size, align)))
@@ -1067,7 +1067,7 @@ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb,
else if (size < ctrl_size + spad_size)
return -EINVAL;
- base = pci_epf_alloc_space(epf, size, barno, align, type);
+ base = pci_epf_alloc_space(epf, size, barno, epc_features, type);
if (!base) {
dev_err(dev, "%s intf: Config/Status/SPAD alloc region fail\n",
pci_epc_interface_string(type));
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 18c80002d3bd5..cd4ffb39dcdc6 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -729,7 +729,7 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
*/
add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
- if (!!(epc_features->reserved_bar & (1 << bar)))
+ if (epc_features->bar[bar].type == BAR_RESERVED)
continue;
ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
@@ -841,14 +841,8 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
}
test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
- if (epc_features->bar_fixed_size[test_reg_bar]) {
- if (test_reg_size > bar_size[test_reg_bar])
- return -ENOMEM;
- test_reg_size = bar_size[test_reg_bar];
- }
-
base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
- epc_features->align, PRIMARY_INTERFACE);
+ epc_features, PRIMARY_INTERFACE);
if (!base) {
dev_err(dev, "Failed to allocated register space\n");
return -ENOMEM;
@@ -862,12 +856,11 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
if (bar == test_reg_bar)
continue;
- if (!!(epc_features->reserved_bar & (1 << bar)))
+ if (epc_features->bar[bar].type == BAR_RESERVED)
continue;
base = pci_epf_alloc_space(epf, bar_size[bar], bar,
- epc_features->align,
- PRIMARY_INTERFACE);
+ epc_features, PRIMARY_INTERFACE);
if (!base)
dev_err(dev, "Failed to allocate space for BAR%d\n",
bar);
@@ -881,16 +874,12 @@ static void pci_epf_configure_bar(struct pci_epf *epf,
const struct pci_epc_features *epc_features)
{
struct pci_epf_bar *epf_bar;
- bool bar_fixed_64bit;
int i;
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
epf_bar = &epf->bar[i];
- bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i));
- if (bar_fixed_64bit)
+ if (epc_features->bar[i].only_64bit)
epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
- if (epc_features->bar_fixed_size[i])
- bar_size[i] = epc_features->bar_fixed_size[i];
}
}
diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
index e75a2af77328e..8e779eecd62d4 100644
--- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
@@ -422,7 +422,7 @@ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb)
epf->func_no,
epf->vfunc_no);
barno = ntb->epf_ntb_bar[BAR_CONFIG];
- size = epc_features->bar_fixed_size[barno];
+ size = epc_features->bar[barno].fixed_size;
align = epc_features->align;
if ((!IS_ALIGNED(size, align)))
@@ -446,7 +446,7 @@ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb)
else if (size < ctrl_size + spad_size)
return -EINVAL;
- base = pci_epf_alloc_space(epf, size, barno, align, 0);
+ base = pci_epf_alloc_space(epf, size, barno, epc_features, 0);
if (!base) {
dev_err(dev, "Config/Status/SPAD alloc region fail\n");
return -ENOMEM;
@@ -527,7 +527,6 @@ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb)
static int epf_ntb_db_bar_init(struct epf_ntb *ntb)
{
const struct pci_epc_features *epc_features;
- u32 align;
struct device *dev = &ntb->epf->dev;
int ret;
struct pci_epf_bar *epf_bar;
@@ -538,19 +537,9 @@ static int epf_ntb_db_bar_init(struct epf_ntb *ntb)
epc_features = pci_epc_get_features(ntb->epf->epc,
ntb->epf->func_no,
ntb->epf->vfunc_no);
- align = epc_features->align;
-
- if (size < 128)
- size = 128;
-
- if (align)
- size = ALIGN(size, align);
- else
- size = roundup_pow_of_two(size);
-
barno = ntb->epf_ntb_bar[BAR_DB];
- mw_addr = pci_epf_alloc_space(ntb->epf, size, barno, align, 0);
+ mw_addr = pci_epf_alloc_space(ntb->epf, size, barno, epc_features, 0);
if (!mw_addr) {
dev_err(dev, "Failed to allocate OB address\n");
return -ENOMEM;
@@ -1269,21 +1258,17 @@ static int pci_vntb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(dev, "Cannot set DMA mask\n");
- return -EINVAL;
+ return ret;
}
ret = ntb_register_device(&ndev->ntb);
if (ret) {
dev_err(dev, "Failed to register NTB device\n");
- goto err_register_dev;
+ return ret;
}
dev_dbg(dev, "PCI Virtual NTB driver loaded\n");
return 0;
-
-err_register_dev:
- put_device(&ndev->ntb.dev);
- return -EINVAL;
}
static struct pci_device_id pci_vntb_table[] = {
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index dcd4e66430c10..da3fc0795b0b6 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -87,7 +87,7 @@ EXPORT_SYMBOL_GPL(pci_epc_get);
* @epc_features: pci_epc_features structure that holds the reserved bar bitmap
*
* Invoke to get the first unreserved BAR that can be used by the endpoint
- * function. For any incorrect value in reserved_bar return '0'.
+ * function.
*/
enum pci_barno
pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features)
@@ -102,32 +102,27 @@ EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
* @bar: the starting BAR number from where unreserved BAR should be searched
*
* Invoke to get the next unreserved BAR starting from @bar that can be used
- * for endpoint function. For any incorrect value in reserved_bar return '0'.
+ * for endpoint function.
*/
enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
*epc_features, enum pci_barno bar)
{
- unsigned long free_bar;
+ int i;
if (!epc_features)
return BAR_0;
/* If 'bar - 1' is a 64-bit BAR, move to the next BAR */
- if ((epc_features->bar_fixed_64bit << 1) & 1 << bar)
+ if (bar > 0 && epc_features->bar[bar - 1].only_64bit)
bar++;
- /* Find if the reserved BAR is also a 64-bit BAR */
- free_bar = epc_features->reserved_bar & epc_features->bar_fixed_64bit;
-
- /* Set the adjacent bit if the reserved BAR is also a 64-bit BAR */
- free_bar <<= 1;
- free_bar |= epc_features->reserved_bar;
-
- free_bar = find_next_zero_bit(&free_bar, 6, bar);
- if (free_bar > 5)
- return NO_BAR;
+ for (i = bar; i < PCI_STD_NUM_BARS; i++) {
+ /* If the BAR is not reserved, return it. */
+ if (epc_features->bar[i].type != BAR_RESERVED)
+ return i;
+ }
- return free_bar;
+ return NO_BAR;
}
EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 2c32de6679377..0a28a0b0911b1 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -17,7 +17,7 @@
static DEFINE_MUTEX(pci_epf_mutex);
-static struct bus_type pci_epf_bus_type;
+static const struct bus_type pci_epf_bus_type;
static const struct device_type pci_epf_type;
/**
@@ -251,14 +251,17 @@ EXPORT_SYMBOL_GPL(pci_epf_free_space);
* @epf: the EPF device to whom allocate the memory
* @size: the size of the memory that has to be allocated
* @bar: the BAR number corresponding to the allocated register space
- * @align: alignment size for the allocation region
+ * @epc_features: the features provided by the EPC specific to this EPF
* @type: Identifies if the allocation is for primary EPC or secondary EPC
*
* Invoke to allocate memory for the PCI EPF register space.
*/
void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
- size_t align, enum pci_epc_interface_type type)
+ const struct pci_epc_features *epc_features,
+ enum pci_epc_interface_type type)
{
+ u64 bar_fixed_size = epc_features->bar[bar].fixed_size;
+ size_t align = epc_features->align;
struct pci_epf_bar *epf_bar;
dma_addr_t phys_addr;
struct pci_epc *epc;
@@ -268,6 +271,15 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
if (size < 128)
size = 128;
+ if (epc_features->bar[bar].type == BAR_FIXED && bar_fixed_size) {
+ if (size > bar_fixed_size) {
+ dev_err(&epf->dev,
+ "requested BAR size is larger than fixed size\n");
+ return NULL;
+ }
+ size = bar_fixed_size;
+ }
+
if (align)
size = ALIGN(size, align);
else
@@ -507,7 +519,7 @@ static void pci_epf_device_remove(struct device *dev)
epf->driver = NULL;
}
-static struct bus_type pci_epf_bus_type = {
+static const struct bus_type pci_epf_bus_type = {
.name = "pci-epf",
.match = pci_epf_device_match,
.probe = pci_epf_device_probe,
diff --git a/drivers/pci/iomap.c b/drivers/pci/iomap.c
new file mode 100644
index 0000000000000..c9725428e3874
--- /dev/null
+++ b/drivers/pci/iomap.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Implement the default iomap interfaces
+ *
+ * (C) Copyright 2004 Linus Torvalds
+ */
+#include <linux/pci.h>
+#include <linux/io.h>
+
+#include <linux/export.h>
+
+/**
+ * pci_iomap_range - create a virtual mapping cookie for a PCI BAR
+ * @dev: PCI device that owns the BAR
+ * @bar: BAR number
+ * @offset: map memory at the given offset in BAR
+ * @maxlen: max length of the memory to map
+ *
+ * Using this function you will get a __iomem address to your device BAR.
+ * You can access it using ioread*() and iowrite*(). These functions hide
+ * the details if this is a MMIO or PIO address space and will just do what
+ * you expect from them in the correct way.
+ *
+ * @maxlen specifies the maximum length to map. If you want to get access to
+ * the complete BAR from offset to the end, pass %0 here.
+ * */
+void __iomem *pci_iomap_range(struct pci_dev *dev,
+ int bar,
+ unsigned long offset,
+ unsigned long maxlen)
+{
+ resource_size_t start = pci_resource_start(dev, bar);
+ resource_size_t len = pci_resource_len(dev, bar);
+ unsigned long flags = pci_resource_flags(dev, bar);
+
+ if (len <= offset || !start)
+ return NULL;
+ len -= offset;
+ start += offset;
+ if (maxlen && len > maxlen)
+ len = maxlen;
+ if (flags & IORESOURCE_IO)
+ return __pci_ioport_map(dev, start, len);
+ if (flags & IORESOURCE_MEM)
+ return ioremap(start, len);
+ /* What? */
+ return NULL;
+}
+EXPORT_SYMBOL(pci_iomap_range);
+
+/**
+ * pci_iomap_wc_range - create a virtual WC mapping cookie for a PCI BAR
+ * @dev: PCI device that owns the BAR
+ * @bar: BAR number
+ * @offset: map memory at the given offset in BAR
+ * @maxlen: max length of the memory to map
+ *
+ * Using this function you will get a __iomem address to your device BAR.
+ * You can access it using ioread*() and iowrite*(). These functions hide
+ * the details if this is a MMIO or PIO address space and will just do what
+ * you expect from them in the correct way. When possible write combining
+ * is used.
+ *
+ * @maxlen specifies the maximum length to map. If you want to get access to
+ * the complete BAR from offset to the end, pass %0 here.
+ * */
+void __iomem *pci_iomap_wc_range(struct pci_dev *dev,
+ int bar,
+ unsigned long offset,
+ unsigned long maxlen)
+{
+ resource_size_t start = pci_resource_start(dev, bar);
+ resource_size_t len = pci_resource_len(dev, bar);
+ unsigned long flags = pci_resource_flags(dev, bar);
+
+
+ if (flags & IORESOURCE_IO)
+ return NULL;
+
+ if (len <= offset || !start)
+ return NULL;
+
+ len -= offset;
+ start += offset;
+ if (maxlen && len > maxlen)
+ len = maxlen;
+
+ if (flags & IORESOURCE_MEM)
+ return ioremap_wc(start, len);
+
+ /* What? */
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(pci_iomap_wc_range);
+
+/**
+ * pci_iomap - create a virtual mapping cookie for a PCI BAR
+ * @dev: PCI device that owns the BAR
+ * @bar: BAR number
+ * @maxlen: length of the memory to map
+ *
+ * Using this function you will get a __iomem address to your device BAR.
+ * You can access it using ioread*() and iowrite*(). These functions hide
+ * the details if this is a MMIO or PIO address space and will just do what
+ * you expect from them in the correct way.
+ *
+ * @maxlen specifies the maximum length to map. If you want to get access to
+ * the complete BAR without checking for its length first, pass %0 here.
+ * */
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ return pci_iomap_range(dev, bar, 0, maxlen);
+}
+EXPORT_SYMBOL(pci_iomap);
+
+/**
+ * pci_iomap_wc - create a virtual WC mapping cookie for a PCI BAR
+ * @dev: PCI device that owns the BAR
+ * @bar: BAR number
+ * @maxlen: length of the memory to map
+ *
+ * Using this function you will get a __iomem address to your device BAR.
+ * You can access it using ioread*() and iowrite*(). These functions hide
+ * the details if this is a MMIO or PIO address space and will just do what
+ * you expect from them in the correct way. When possible write combining
+ * is used.
+ *
+ * @maxlen specifies the maximum length to map. If you want to get access to
+ * the complete BAR without checking for its length first, pass %0 here.
+ * */
+void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ return pci_iomap_wc_range(dev, bar, 0, maxlen);
+}
+EXPORT_SYMBOL_GPL(pci_iomap_wc);
+
+/*
+ * pci_iounmap() somewhat illogically comes from lib/iomap.c for the
+ * CONFIG_GENERIC_IOMAP case, because that's the code that knows about
+ * the different IOMAP ranges.
+ *
+ * But if the architecture does not use the generic iomap code, and if
+ * it has _not_ defined it's own private pci_iounmap function, we define
+ * it here.
+ *
+ * NOTE! This default implementation assumes that if the architecture
+ * support ioport mapping (HAS_IOPORT_MAP), the ioport mapping will
+ * be fixed to the range [ PCI_IOBASE, PCI_IOBASE+IO_SPACE_LIMIT [,
+ * and does not need unmapping with 'ioport_unmap()'.
+ *
+ * If you have different rules for your architecture, you need to
+ * implement your own pci_iounmap() that knows the rules for where
+ * and how IO vs MEM get mapped.
+ *
+ * This code is odd, and the ARCH_HAS/ARCH_WANTS #define logic comes
+ * from legacy <asm-generic/io.h> header file behavior. In particular,
+ * it would seem to make sense to do the iounmap(p) for the non-IO-space
+ * case here regardless, but that's not what the old header file code
+ * did. Probably incorrectly, but this is meant to be bug-for-bug
+ * compatible.
+ */
+#if defined(ARCH_WANTS_GENERIC_PCI_IOUNMAP)
+
+void pci_iounmap(struct pci_dev *dev, void __iomem *p)
+{
+#ifdef ARCH_HAS_GENERIC_IOPORT_MAP
+ uintptr_t start = (uintptr_t) PCI_IOBASE;
+ uintptr_t addr = (uintptr_t) p;
+
+ if (addr >= start && addr < start + IO_SPACE_LIMIT)
+ return;
+#endif
+ iounmap(p);
+}
+EXPORT_SYMBOL(pci_iounmap);
+
+#endif /* ARCH_WANTS_GENERIC_PCI_IOUNMAP */
diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c
index 0050e8f6814ed..4555630be9ecd 100644
--- a/drivers/pci/irq.c
+++ b/drivers/pci/irq.c
@@ -8,9 +8,13 @@
#include <linux/device.h>
#include <linux/kernel.h>
+#include <linux/errno.h>
#include <linux/export.h>
+#include <linux/interrupt.h>
#include <linux/pci.h>
+#include "pci.h"
+
/**
* pci_request_irq - allocate an interrupt line for a PCI device
* @dev: PCI device to operate on
@@ -74,3 +78,203 @@ void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id)
kfree(free_irq(pci_irq_vector(dev, nr), dev_id));
}
EXPORT_SYMBOL(pci_free_irq);
+
+/**
+ * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
+ * @dev: the PCI device
+ * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
+ *
+ * Perform INTx swizzling for a device behind one level of bridge. This is
+ * required by section 9.1 of the PCI-to-PCI bridge specification for devices
+ * behind bridges on add-in cards. For devices with ARI enabled, the slot
+ * number is always 0 (see the Implementation Note in section 2.2.8.1 of
+ * the PCI Express Base Specification, Revision 2.1)
+ */
+u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
+{
+ int slot;
+
+ if (pci_ari_enabled(dev->bus))
+ slot = 0;
+ else
+ slot = PCI_SLOT(dev->devfn);
+
+ return (((pin - 1) + slot) % 4) + 1;
+}
+
+int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
+{
+ u8 pin;
+
+ pin = dev->pin;
+ if (!pin)
+ return -1;
+
+ while (!pci_is_root_bus(dev->bus)) {
+ pin = pci_swizzle_interrupt_pin(dev, pin);
+ dev = dev->bus->self;
+ }
+ *bridge = dev;
+ return pin;
+}
+
+/**
+ * pci_common_swizzle - swizzle INTx all the way to root bridge
+ * @dev: the PCI device
+ * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
+ *
+ * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
+ * bridges all the way up to a PCI root bus.
+ */
+u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
+{
+ u8 pin = *pinp;
+
+ while (!pci_is_root_bus(dev->bus)) {
+ pin = pci_swizzle_interrupt_pin(dev, pin);
+ dev = dev->bus->self;
+ }
+ *pinp = pin;
+ return PCI_SLOT(dev->devfn);
+}
+EXPORT_SYMBOL_GPL(pci_common_swizzle);
+
+void pci_assign_irq(struct pci_dev *dev)
+{
+ u8 pin;
+ u8 slot = -1;
+ int irq = 0;
+ struct pci_host_bridge *hbrg = pci_find_host_bridge(dev->bus);
+
+ if (!(hbrg->map_irq)) {
+ pci_dbg(dev, "runtime IRQ mapping not provided by arch\n");
+ return;
+ }
+
+ /*
+ * If this device is not on the primary bus, we need to figure out
+ * which interrupt pin it will come in on. We know which slot it
+ * will come in on because that slot is where the bridge is. Each
+ * time the interrupt line passes through a PCI-PCI bridge we must
+ * apply the swizzle function.
+ */
+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+ /* Cope with illegal. */
+ if (pin > 4)
+ pin = 1;
+
+ if (pin) {
+ /* Follow the chain of bridges, swizzling as we go. */
+ if (hbrg->swizzle_irq)
+ slot = (*(hbrg->swizzle_irq))(dev, &pin);
+
+ /*
+ * If a swizzling function is not used, map_irq() must
+ * ignore slot.
+ */
+ irq = (*(hbrg->map_irq))(dev, slot, pin);
+ if (irq == -1)
+ irq = 0;
+ }
+ dev->irq = irq;
+
+ pci_dbg(dev, "assign IRQ: got %d\n", dev->irq);
+
+ /*
+ * Always tell the device, so the driver knows what is the real IRQ
+ * to use; the device does not use it.
+ */
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
+}
+
+static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
+{
+ struct pci_bus *bus = dev->bus;
+ bool mask_updated = true;
+ u32 cmd_status_dword;
+ u16 origcmd, newcmd;
+ unsigned long flags;
+ bool irq_pending;
+
+ /*
+ * We do a single dword read to retrieve both command and status.
+ * Document assumptions that make this possible.
+ */
+ BUILD_BUG_ON(PCI_COMMAND % 4);
+ BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
+
+ raw_spin_lock_irqsave(&pci_lock, flags);
+
+ bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
+
+ irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
+
+ /*
+ * Check interrupt status register to see whether our device
+ * triggered the interrupt (when masking) or the next IRQ is
+ * already pending (when unmasking).
+ */
+ if (mask != irq_pending) {
+ mask_updated = false;
+ goto done;
+ }
+
+ origcmd = cmd_status_dword;
+ newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
+ if (mask)
+ newcmd |= PCI_COMMAND_INTX_DISABLE;
+ if (newcmd != origcmd)
+ bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
+
+done:
+ raw_spin_unlock_irqrestore(&pci_lock, flags);
+
+ return mask_updated;
+}
+
+/**
+ * pci_check_and_mask_intx - mask INTx on pending interrupt
+ * @dev: the PCI device to operate on
+ *
+ * Check if the device dev has its INTx line asserted, mask it and return
+ * true in that case. False is returned if no interrupt was pending.
+ */
+bool pci_check_and_mask_intx(struct pci_dev *dev)
+{
+ return pci_check_and_set_intx_mask(dev, true);
+}
+EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
+
+/**
+ * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
+ * @dev: the PCI device to operate on
+ *
+ * Check if the device dev has its INTx line asserted, unmask it if not and
+ * return true. False is returned and the mask remains active if there was
+ * still an interrupt pending.
+ */
+bool pci_check_and_unmask_intx(struct pci_dev *dev)
+{
+ return pci_check_and_set_intx_mask(dev, false);
+}
+EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
+
+/**
+ * pcibios_penalize_isa_irq - penalize an ISA IRQ
+ * @irq: ISA IRQ to penalize
+ * @active: IRQ active or not
+ *
+ * Permits the platform to provide architecture-specific functionality when
+ * penalizing ISA IRQs. This is the default implementation. Architecture
+ * implementations can override this.
+ */
+void __weak pcibios_penalize_isa_irq(int irq, int active) {}
+
+int __weak pcibios_alloc_irq(struct pci_dev *dev)
+{
+ return 0;
+}
+
+void __weak pcibios_free_irq(struct pci_dev *dev)
+{
+}
diff --git a/drivers/pci/mmap.c b/drivers/pci/mmap.c
index 4504039056d1b..8da3347a95c47 100644
--- a/drivers/pci/mmap.c
+++ b/drivers/pci/mmap.c
@@ -11,6 +11,8 @@
#include <linux/mm.h>
#include <linux/pci.h>
+#include "pci.h"
+
#ifdef ARCH_GENERIC_PCI_MMAP_RESOURCE
static const struct vm_operations_struct pci_phys_vm_ops = {
@@ -50,3 +52,30 @@ int pci_mmap_resource_range(struct pci_dev *pdev, int bar,
}
#endif
+
+#if (defined(CONFIG_SYSFS) || defined(CONFIG_PROC_FS)) && \
+ (defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE))
+
+int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
+ enum pci_mmap_api mmap_api)
+{
+ resource_size_t pci_start = 0, pci_end;
+ unsigned long nr, start, size;
+
+ if (pci_resource_len(pdev, resno) == 0)
+ return 0;
+ nr = vma_pages(vma);
+ start = vma->vm_pgoff;
+ size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
+ if (mmap_api == PCI_MMAP_PROCFS) {
+ pci_resource_to_user(pdev, resno, &pdev->resource[resno],
+ &pci_start, &pci_end);
+ pci_start >>= PAGE_SHIFT;
+ }
+ if (start >= pci_start && start < pci_start + size &&
+ start + nr <= pci_start + size)
+ return 1;
+ return 0;
+}
+
+#endif
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 0c361561b855c..4f47a13cb500f 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -661,7 +661,7 @@ done:
p2pdma = rcu_dereference(provider->p2pdma);
if (p2pdma)
xa_store(&p2pdma->map_types, map_types_idx(client),
- xa_mk_value(map_type), GFP_KERNEL);
+ xa_mk_value(map_type), GFP_ATOMIC);
rcu_read_unlock();
return map_type;
}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 51ec9e7e784f0..af2996d0d17ff 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -419,15 +419,6 @@ static int __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
return error;
}
-int __weak pcibios_alloc_irq(struct pci_dev *dev)
-{
- return 0;
-}
-
-void __weak pcibios_free_irq(struct pci_dev *dev)
-{
-}
-
#ifdef CONFIG_PCI_IOV
static inline bool pci_device_can_probe(struct pci_dev *pdev)
{
@@ -473,6 +464,13 @@ static void pci_device_remove(struct device *dev)
if (drv->remove) {
pm_runtime_get_sync(dev);
+ /*
+ * If the driver provides a .runtime_idle() callback and it has
+ * started to run already, it may continue to run in parallel
+ * with the code below, so wait until all of the runtime PM
+ * activity has completed.
+ */
+ pm_runtime_barrier(dev);
drv->remove(pci_dev);
pm_runtime_put_noidle(dev);
}
@@ -1382,10 +1380,7 @@ static int pci_pm_runtime_idle(struct device *dev)
if (!pci_dev->driver)
return 0;
- if (!pm)
- return -ENOSYS;
-
- if (pm->runtime_idle)
+ if (pm && pm->runtime_idle)
return pm->runtime_idle(dev);
return 0;
@@ -1714,7 +1709,7 @@ static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
return 1;
}
-struct bus_type pcie_port_bus_type = {
+const struct bus_type pcie_port_bus_type = {
.name = "pci_express",
.match = pcie_port_bus_match,
};
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 2321fdfefd7db..40cfa716392fb 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1022,29 +1022,6 @@ void pci_remove_legacy_files(struct pci_bus *b)
#endif /* HAVE_PCI_LEGACY */
#if defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
-
-int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
- enum pci_mmap_api mmap_api)
-{
- unsigned long nr, start, size;
- resource_size_t pci_start = 0, pci_end;
-
- if (pci_resource_len(pdev, resno) == 0)
- return 0;
- nr = vma_pages(vma);
- start = vma->vm_pgoff;
- size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
- if (mmap_api == PCI_MMAP_PROCFS) {
- pci_resource_to_user(pdev, resno, &pdev->resource[resno],
- &pci_start, &pci_end);
- pci_start >>= PAGE_SHIFT;
- }
- if (start >= pci_start && start < pci_start + size &&
- start + nr <= pci_start + size)
- return 1;
- return 0;
-}
-
/**
* pci_mmap_resource - map a PCI resource into user memory space
* @kobj: kobject for mapping
@@ -1410,79 +1387,89 @@ static const struct attribute_group pci_dev_reset_attr_group = {
.is_visible = pci_dev_reset_attr_is_visible,
};
+static ssize_t __resource_resize_show(struct device *dev, int n, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ ssize_t ret;
+
+ pci_config_pm_runtime_get(pdev);
+
+ ret = sysfs_emit(buf, "%016llx\n",
+ (u64)pci_rebar_get_possible_sizes(pdev, n));
+
+ pci_config_pm_runtime_put(pdev);
+
+ return ret;
+}
+
+static ssize_t __resource_resize_store(struct device *dev, int n,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ unsigned long size, flags;
+ int ret, i;
+ u16 cmd;
+
+ if (kstrtoul(buf, 0, &size) < 0)
+ return -EINVAL;
+
+ device_lock(dev);
+ if (dev->driver) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ pci_config_pm_runtime_get(pdev);
+
+ if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) {
+ ret = aperture_remove_conflicting_pci_devices(pdev,
+ "resourceN_resize");
+ if (ret)
+ goto pm_put;
+ }
+
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ pci_write_config_word(pdev, PCI_COMMAND,
+ cmd & ~PCI_COMMAND_MEMORY);
+
+ flags = pci_resource_flags(pdev, n);
+
+ pci_remove_resource_files(pdev);
+
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ if (pci_resource_len(pdev, i) &&
+ pci_resource_flags(pdev, i) == flags)
+ pci_release_resource(pdev, i);
+ }
+
+ ret = pci_resize_resource(pdev, n, size);
+
+ pci_assign_unassigned_bus_resources(pdev->bus);
+
+ if (pci_create_resource_files(pdev))
+ pci_warn(pdev, "Failed to recreate resource files after BAR resizing\n");
+
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+pm_put:
+ pci_config_pm_runtime_put(pdev);
+unlock:
+ device_unlock(dev);
+
+ return ret ? ret : count;
+}
+
#define pci_dev_resource_resize_attr(n) \
static ssize_t resource##n##_resize_show(struct device *dev, \
struct device_attribute *attr, \
- char * buf) \
+ char *buf) \
{ \
- struct pci_dev *pdev = to_pci_dev(dev); \
- ssize_t ret; \
- \
- pci_config_pm_runtime_get(pdev); \
- \
- ret = sysfs_emit(buf, "%016llx\n", \
- (u64)pci_rebar_get_possible_sizes(pdev, n)); \
- \
- pci_config_pm_runtime_put(pdev); \
- \
- return ret; \
+ return __resource_resize_show(dev, n, buf); \
} \
- \
static ssize_t resource##n##_resize_store(struct device *dev, \
struct device_attribute *attr,\
const char *buf, size_t count)\
{ \
- struct pci_dev *pdev = to_pci_dev(dev); \
- unsigned long size, flags; \
- int ret, i; \
- u16 cmd; \
- \
- if (kstrtoul(buf, 0, &size) < 0) \
- return -EINVAL; \
- \
- device_lock(dev); \
- if (dev->driver) { \
- ret = -EBUSY; \
- goto unlock; \
- } \
- \
- pci_config_pm_runtime_get(pdev); \
- \
- if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) { \
- ret = aperture_remove_conflicting_pci_devices(pdev, \
- "resourceN_resize"); \
- if (ret) \
- goto pm_put; \
- } \
- \
- pci_read_config_word(pdev, PCI_COMMAND, &cmd); \
- pci_write_config_word(pdev, PCI_COMMAND, \
- cmd & ~PCI_COMMAND_MEMORY); \
- \
- flags = pci_resource_flags(pdev, n); \
- \
- pci_remove_resource_files(pdev); \
- \
- for (i = 0; i < PCI_STD_NUM_BARS; i++) { \
- if (pci_resource_len(pdev, i) && \
- pci_resource_flags(pdev, i) == flags) \
- pci_release_resource(pdev, i); \
- } \
- \
- ret = pci_resize_resource(pdev, n, size); \
- \
- pci_assign_unassigned_bus_resources(pdev->bus); \
- \
- if (pci_create_resource_files(pdev)) \
- pci_warn(pdev, "Failed to recreate resource files after BAR resizing\n");\
- \
- pci_write_config_word(pdev, PCI_COMMAND, cmd); \
-pm_put: \
- pci_config_pm_runtime_put(pdev); \
-unlock: \
- device_unlock(dev); \
- \
- return ret ? ret : count; \
+ return __resource_resize_store(dev, n, buf, count); \
} \
static DEVICE_ATTR_RW(resource##n##_resize)
@@ -1660,7 +1647,7 @@ static const struct attribute_group pcie_dev_attr_group = {
.is_visible = pcie_dev_attrs_are_visible,
};
-static const struct attribute_group *pci_dev_attr_groups[] = {
+const struct attribute_group *pci_dev_attr_groups[] = {
&pci_dev_attr_group,
&pci_dev_hp_attr_group,
#ifdef CONFIG_PCI_IOV
@@ -1677,7 +1664,3 @@ static const struct attribute_group *pci_dev_attr_groups[] = {
#endif
NULL,
};
-
-const struct device_type pci_dev_type = {
- .groups = pci_dev_attr_groups,
-};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 1e33f0e2d945d..e5f243dd42884 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -24,7 +24,6 @@
#include <linux/log2.h>
#include <linux/logic_pio.h>
#include <linux/pm_wakeup.h>
-#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/pm_runtime.h>
#include <linux/pci_hotplug.h>
@@ -1068,6 +1067,34 @@ disable_acs_redir:
}
/**
+ * pcie_read_tlp_log - read TLP Header Log
+ * @dev: PCIe device
+ * @where: PCI Config offset of TLP Header Log
+ * @tlp_log: TLP Log structure to fill
+ *
+ * Fill @tlp_log from TLP Header Log registers, e.g., AER or DPC.
+ *
+ * Return: 0 on success and filled TLP Log structure, <0 on error.
+ */
+int pcie_read_tlp_log(struct pci_dev *dev, int where,
+ struct pcie_tlp_log *tlp_log)
+{
+ int i, ret;
+
+ memset(tlp_log, 0, sizeof(*tlp_log));
+
+ for (i = 0; i < 4; i++) {
+ ret = pci_read_config_dword(dev, where + i * 4,
+ &tlp_log->dw[i]);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pcie_read_tlp_log);
+
+/**
* pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
* @dev: PCI device to have its BARs restored
*
@@ -1649,25 +1676,10 @@ static int pci_save_pcie_state(struct pci_dev *dev)
pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
- return 0;
-}
-
-void pci_bridge_reconfigure_ltr(struct pci_dev *dev)
-{
-#ifdef CONFIG_PCIEASPM
- struct pci_dev *bridge;
- u32 ctl;
+ pci_save_aspm_l1ss_state(dev);
+ pci_save_ltr_state(dev);
- bridge = pci_upstream_bridge(dev);
- if (bridge && bridge->ltr_path) {
- pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl);
- if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) {
- pci_dbg(bridge, "re-enabling LTR\n");
- pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
- PCI_EXP_DEVCTL2_LTR_EN);
- }
- }
-#endif
+ return 0;
}
static void pci_restore_pcie_state(struct pci_dev *dev)
@@ -1676,6 +1688,13 @@ static void pci_restore_pcie_state(struct pci_dev *dev)
struct pci_cap_saved_state *save_state;
u16 *cap;
+ /*
+ * Restore max latencies (in the LTR capability) before enabling
+ * LTR itself in PCI_EXP_DEVCTL2.
+ */
+ pci_restore_ltr_state(dev);
+ pci_restore_aspm_l1ss_state(dev);
+
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
if (!save_state)
return;
@@ -1733,46 +1752,6 @@ static void pci_restore_pcix_state(struct pci_dev *dev)
pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
}
-static void pci_save_ltr_state(struct pci_dev *dev)
-{
- int ltr;
- struct pci_cap_saved_state *save_state;
- u32 *cap;
-
- if (!pci_is_pcie(dev))
- return;
-
- ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
- if (!ltr)
- return;
-
- save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
- if (!save_state) {
- pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
- return;
- }
-
- /* Some broken devices only support dword access to LTR */
- cap = &save_state->cap.data[0];
- pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap);
-}
-
-static void pci_restore_ltr_state(struct pci_dev *dev)
-{
- struct pci_cap_saved_state *save_state;
- int ltr;
- u32 *cap;
-
- save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
- ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
- if (!save_state || !ltr)
- return;
-
- /* Some broken devices only support dword access to LTR */
- cap = &save_state->cap.data[0];
- pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap);
-}
-
/**
* pci_save_state - save the PCI configuration space of a device before
* suspending
@@ -1797,7 +1776,6 @@ int pci_save_state(struct pci_dev *dev)
if (i != 0)
return i;
- pci_save_ltr_state(dev);
pci_save_dpc_state(dev);
pci_save_aer_state(dev);
pci_save_ptm_state(dev);
@@ -1898,12 +1876,6 @@ void pci_restore_state(struct pci_dev *dev)
if (!dev->state_saved)
return;
- /*
- * Restore max latencies (in the LTR capability) before enabling
- * LTR itself (in the PCIe capability).
- */
- pci_restore_ltr_state(dev);
-
pci_restore_pcie_state(dev);
pci_restore_pasid_state(dev);
pci_restore_pri_state(dev);
@@ -2184,107 +2156,6 @@ int pci_enable_device(struct pci_dev *dev)
EXPORT_SYMBOL(pci_enable_device);
/*
- * Managed PCI resources. This manages device on/off, INTx/MSI/MSI-X
- * on/off and BAR regions. pci_dev itself records MSI/MSI-X status, so
- * there's no need to track it separately. pci_devres is initialized
- * when a device is enabled using managed PCI device enable interface.
- */
-struct pci_devres {
- unsigned int enabled:1;
- unsigned int pinned:1;
- unsigned int orig_intx:1;
- unsigned int restore_intx:1;
- unsigned int mwi:1;
- u32 region_mask;
-};
-
-static void pcim_release(struct device *gendev, void *res)
-{
- struct pci_dev *dev = to_pci_dev(gendev);
- struct pci_devres *this = res;
- int i;
-
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
- if (this->region_mask & (1 << i))
- pci_release_region(dev, i);
-
- if (this->mwi)
- pci_clear_mwi(dev);
-
- if (this->restore_intx)
- pci_intx(dev, this->orig_intx);
-
- if (this->enabled && !this->pinned)
- pci_disable_device(dev);
-}
-
-static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
-{
- struct pci_devres *dr, *new_dr;
-
- dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
- if (dr)
- return dr;
-
- new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
- if (!new_dr)
- return NULL;
- return devres_get(&pdev->dev, new_dr, NULL, NULL);
-}
-
-static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
-{
- if (pci_is_managed(pdev))
- return devres_find(&pdev->dev, pcim_release, NULL, NULL);
- return NULL;
-}
-
-/**
- * pcim_enable_device - Managed pci_enable_device()
- * @pdev: PCI device to be initialized
- *
- * Managed pci_enable_device().
- */
-int pcim_enable_device(struct pci_dev *pdev)
-{
- struct pci_devres *dr;
- int rc;
-
- dr = get_pci_dr(pdev);
- if (unlikely(!dr))
- return -ENOMEM;
- if (dr->enabled)
- return 0;
-
- rc = pci_enable_device(pdev);
- if (!rc) {
- pdev->is_managed = 1;
- dr->enabled = 1;
- }
- return rc;
-}
-EXPORT_SYMBOL(pcim_enable_device);
-
-/**
- * pcim_pin_device - Pin managed PCI device
- * @pdev: PCI device to pin
- *
- * Pin managed PCI device @pdev. Pinned device won't be disabled on
- * driver detach. @pdev must have been enabled with
- * pcim_enable_device().
- */
-void pcim_pin_device(struct pci_dev *pdev)
-{
- struct pci_devres *dr;
-
- dr = find_pci_dr(pdev);
- WARN_ON(!dr || !dr->enabled);
- if (dr)
- dr->pinned = 1;
-}
-EXPORT_SYMBOL(pcim_pin_device);
-
-/*
* pcibios_device_add - provide arch specific hooks when adding device dev
* @dev: the PCI device being added
*
@@ -2318,17 +2189,6 @@ void __weak pcibios_release_device(struct pci_dev *dev) {}
*/
void __weak pcibios_disable_device(struct pci_dev *dev) {}
-/**
- * pcibios_penalize_isa_irq - penalize an ISA IRQ
- * @irq: ISA IRQ to penalize
- * @active: IRQ active or not
- *
- * Permits the platform to provide architecture-specific functionality when
- * penalizing ISA IRQs. This is the default implementation. Architecture
- * implementations can override this.
- */
-void __weak pcibios_penalize_isa_irq(int irq, int active) {}
-
static void do_pci_disable_device(struct pci_dev *dev)
{
u16 pci_command;
@@ -3998,66 +3858,6 @@ int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
/**
- * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
- * @dev: the PCI device
- * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
- *
- * Perform INTx swizzling for a device behind one level of bridge. This is
- * required by section 9.1 of the PCI-to-PCI bridge specification for devices
- * behind bridges on add-in cards. For devices with ARI enabled, the slot
- * number is always 0 (see the Implementation Note in section 2.2.8.1 of
- * the PCI Express Base Specification, Revision 2.1)
- */
-u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
-{
- int slot;
-
- if (pci_ari_enabled(dev->bus))
- slot = 0;
- else
- slot = PCI_SLOT(dev->devfn);
-
- return (((pin - 1) + slot) % 4) + 1;
-}
-
-int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
-{
- u8 pin;
-
- pin = dev->pin;
- if (!pin)
- return -1;
-
- while (!pci_is_root_bus(dev->bus)) {
- pin = pci_swizzle_interrupt_pin(dev, pin);
- dev = dev->bus->self;
- }
- *bridge = dev;
- return pin;
-}
-
-/**
- * pci_common_swizzle - swizzle INTx all the way to root bridge
- * @dev: the PCI device
- * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
- *
- * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
- * bridges all the way up to a PCI root bus.
- */
-u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
-{
- u8 pin = *pinp;
-
- while (!pci_is_root_bus(dev->bus)) {
- pin = pci_swizzle_interrupt_pin(dev, pin);
- dev = dev->bus->self;
- }
- *pinp = pin;
- return PCI_SLOT(dev->devfn);
-}
-EXPORT_SYMBOL_GPL(pci_common_swizzle);
-
-/**
* pci_release_region - Release a PCI bar
* @pdev: PCI device whose resources were previously reserved by
* pci_request_region()
@@ -4385,133 +4185,6 @@ void pci_unmap_iospace(struct resource *res)
}
EXPORT_SYMBOL(pci_unmap_iospace);
-static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
-{
- struct resource **res = ptr;
-
- pci_unmap_iospace(*res);
-}
-
-/**
- * devm_pci_remap_iospace - Managed pci_remap_iospace()
- * @dev: Generic device to remap IO address for
- * @res: Resource describing the I/O space
- * @phys_addr: physical address of range to be mapped
- *
- * Managed pci_remap_iospace(). Map is automatically unmapped on driver
- * detach.
- */
-int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
- phys_addr_t phys_addr)
-{
- const struct resource **ptr;
- int error;
-
- ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
-
- error = pci_remap_iospace(res, phys_addr);
- if (error) {
- devres_free(ptr);
- } else {
- *ptr = res;
- devres_add(dev, ptr);
- }
-
- return error;
-}
-EXPORT_SYMBOL(devm_pci_remap_iospace);
-
-/**
- * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
- * @dev: Generic device to remap IO address for
- * @offset: Resource address to map
- * @size: Size of map
- *
- * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver
- * detach.
- */
-void __iomem *devm_pci_remap_cfgspace(struct device *dev,
- resource_size_t offset,
- resource_size_t size)
-{
- void __iomem **ptr, *addr;
-
- ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return NULL;
-
- addr = pci_remap_cfgspace(offset, size);
- if (addr) {
- *ptr = addr;
- devres_add(dev, ptr);
- } else
- devres_free(ptr);
-
- return addr;
-}
-EXPORT_SYMBOL(devm_pci_remap_cfgspace);
-
-/**
- * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
- * @dev: generic device to handle the resource for
- * @res: configuration space resource to be handled
- *
- * Checks that a resource is a valid memory region, requests the memory
- * region and ioremaps with pci_remap_cfgspace() API that ensures the
- * proper PCI configuration space memory attributes are guaranteed.
- *
- * All operations are managed and will be undone on driver detach.
- *
- * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
- * on failure. Usage example::
- *
- * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- * base = devm_pci_remap_cfg_resource(&pdev->dev, res);
- * if (IS_ERR(base))
- * return PTR_ERR(base);
- */
-void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
- struct resource *res)
-{
- resource_size_t size;
- const char *name;
- void __iomem *dest_ptr;
-
- BUG_ON(!dev);
-
- if (!res || resource_type(res) != IORESOURCE_MEM) {
- dev_err(dev, "invalid resource\n");
- return IOMEM_ERR_PTR(-EINVAL);
- }
-
- size = resource_size(res);
-
- if (res->name)
- name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev),
- res->name);
- else
- name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
- if (!name)
- return IOMEM_ERR_PTR(-ENOMEM);
-
- if (!devm_request_mem_region(dev, res->start, size, name)) {
- dev_err(dev, "can't request region for resource %pR\n", res);
- return IOMEM_ERR_PTR(-EBUSY);
- }
-
- dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
- if (!dest_ptr) {
- dev_err(dev, "ioremap failed for resource %pR\n", res);
- devm_release_mem_region(dev, res->start, size);
- dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
- }
-
- return dest_ptr;
-}
-EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
-
static void __pci_set_master(struct pci_dev *dev, bool enable)
{
u16 old_cmd, cmd;
@@ -4662,27 +4335,6 @@ int pci_set_mwi(struct pci_dev *dev)
EXPORT_SYMBOL(pci_set_mwi);
/**
- * pcim_set_mwi - a device-managed pci_set_mwi()
- * @dev: the PCI device for which MWI is enabled
- *
- * Managed pci_set_mwi().
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-int pcim_set_mwi(struct pci_dev *dev)
-{
- struct pci_devres *dr;
-
- dr = find_pci_dr(dev);
- if (!dr)
- return -ENOMEM;
-
- dr->mwi = 1;
- return pci_set_mwi(dev);
-}
-EXPORT_SYMBOL(pcim_set_mwi);
-
-/**
* pci_try_set_mwi - enables memory-write-invalidate PCI transaction
* @dev: the PCI device for which MWI is enabled
*
@@ -4770,78 +4422,6 @@ void pci_intx(struct pci_dev *pdev, int enable)
}
EXPORT_SYMBOL_GPL(pci_intx);
-static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
-{
- struct pci_bus *bus = dev->bus;
- bool mask_updated = true;
- u32 cmd_status_dword;
- u16 origcmd, newcmd;
- unsigned long flags;
- bool irq_pending;
-
- /*
- * We do a single dword read to retrieve both command and status.
- * Document assumptions that make this possible.
- */
- BUILD_BUG_ON(PCI_COMMAND % 4);
- BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
-
- raw_spin_lock_irqsave(&pci_lock, flags);
-
- bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
-
- irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
-
- /*
- * Check interrupt status register to see whether our device
- * triggered the interrupt (when masking) or the next IRQ is
- * already pending (when unmasking).
- */
- if (mask != irq_pending) {
- mask_updated = false;
- goto done;
- }
-
- origcmd = cmd_status_dword;
- newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
- if (mask)
- newcmd |= PCI_COMMAND_INTX_DISABLE;
- if (newcmd != origcmd)
- bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
-
-done:
- raw_spin_unlock_irqrestore(&pci_lock, flags);
-
- return mask_updated;
-}
-
-/**
- * pci_check_and_mask_intx - mask INTx on pending interrupt
- * @dev: the PCI device to operate on
- *
- * Check if the device dev has its INTx line asserted, mask it and return
- * true in that case. False is returned if no interrupt was pending.
- */
-bool pci_check_and_mask_intx(struct pci_dev *dev)
-{
- return pci_check_and_set_intx_mask(dev, true);
-}
-EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
-
-/**
- * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
- * @dev: the PCI device to operate on
- *
- * Check if the device dev has its INTx line asserted, unmask it if not and
- * return true. False is returned and the mask remains active if there was
- * still an interrupt pending.
- */
-bool pci_check_and_unmask_intx(struct pci_dev *dev)
-{
- return pci_check_and_set_intx_mask(dev, false);
-}
-EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
-
/**
* pci_wait_for_pending_transaction - wait for pending transaction
* @dev: the PCI device to operate on
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index bfc56f7bee1c9..17fed18468474 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -31,9 +31,6 @@ bool pcie_cap_has_rtctl(const struct pci_dev *dev);
/* Functions internal to the PCI core code */
-int pci_create_sysfs_dev_files(struct pci_dev *pdev);
-void pci_remove_sysfs_dev_files(struct pci_dev *pdev);
-void pci_cleanup_rom(struct pci_dev *dev);
#ifdef CONFIG_DMI
extern const struct attribute_group pci_dev_smbios_attr_group;
#endif
@@ -97,7 +94,6 @@ void pci_msi_init(struct pci_dev *dev);
void pci_msix_init(struct pci_dev *dev);
bool pci_bridge_d3_possible(struct pci_dev *dev);
void pci_bridge_d3_update(struct pci_dev *dev);
-void pci_bridge_reconfigure_ltr(struct pci_dev *dev);
int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type);
static inline void pci_wakeup_event(struct pci_dev *dev)
@@ -152,7 +148,7 @@ static inline int pci_proc_detach_bus(struct pci_bus *bus) { return 0; }
/* Functions for PCI Hotplug drivers to use */
int pci_hp_add_bridge(struct pci_dev *dev);
-#ifdef HAVE_PCI_LEGACY
+#if defined(CONFIG_SYSFS) && defined(HAVE_PCI_LEGACY)
void pci_create_legacy_files(struct pci_bus *bus);
void pci_remove_legacy_files(struct pci_bus *bus);
#else
@@ -185,10 +181,22 @@ static inline int pci_no_d1d2(struct pci_dev *dev)
return (dev->no_d1d2 || parent_dstates);
}
+
+#ifdef CONFIG_SYSFS
+int pci_create_sysfs_dev_files(struct pci_dev *pdev);
+void pci_remove_sysfs_dev_files(struct pci_dev *pdev);
extern const struct attribute_group *pci_dev_groups[];
+extern const struct attribute_group *pci_dev_attr_groups[];
extern const struct attribute_group *pcibus_groups[];
-extern const struct device_type pci_dev_type;
extern const struct attribute_group *pci_bus_groups[];
+#else
+static inline int pci_create_sysfs_dev_files(struct pci_dev *pdev) { return 0; }
+static inline void pci_remove_sysfs_dev_files(struct pci_dev *pdev) { }
+#define pci_dev_groups NULL
+#define pci_dev_attr_groups NULL
+#define pcibus_groups NULL
+#define pci_bus_groups NULL
+#endif
extern unsigned long pci_hotplug_io_size;
extern unsigned long pci_hotplug_mmio_size;
@@ -404,7 +412,7 @@ struct aer_err_info {
unsigned int status; /* COR/UNCOR Error Status */
unsigned int mask; /* COR/UNCOR Error Mask */
- struct aer_header_log_regs tlp; /* TLP Header */
+ struct pcie_tlp_log tlp; /* TLP Header */
};
int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
@@ -563,16 +571,28 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
int pcie_retrain_link(struct pci_dev *pdev, bool use_lt);
+
+/* ASPM-related functionality we need even without CONFIG_PCIEASPM */
+void pci_save_ltr_state(struct pci_dev *dev);
+void pci_restore_ltr_state(struct pci_dev *dev);
+void pci_configure_aspm_l1ss(struct pci_dev *dev);
+void pci_save_aspm_l1ss_state(struct pci_dev *dev);
+void pci_restore_aspm_l1ss_state(struct pci_dev *dev);
+
#ifdef CONFIG_PCIEASPM
void pcie_aspm_init_link_state(struct pci_dev *pdev);
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked);
void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
+void pci_configure_ltr(struct pci_dev *pdev);
+void pci_bridge_reconfigure_ltr(struct pci_dev *pdev);
#else
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked) { }
static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
+static inline void pci_configure_ltr(struct pci_dev *pdev) { }
+static inline void pci_bridge_reconfigure_ltr(struct pci_dev *pdev) { }
#endif
#ifdef CONFIG_PCIE_ECRC
@@ -793,6 +813,27 @@ static inline pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
#endif
/*
+ * Managed PCI resources. This manages device on/off, INTx/MSI/MSI-X
+ * on/off and BAR regions. pci_dev itself records MSI/MSI-X status, so
+ * there's no need to track it separately. pci_devres is initialized
+ * when a device is enabled using managed PCI device enable interface.
+ *
+ * TODO: Struct pci_devres and find_pci_dr() only need to be here because
+ * they're used in pci.c. Port or move these functions to devres.c and
+ * then remove them from here.
+ */
+struct pci_devres {
+ unsigned int enabled:1;
+ unsigned int pinned:1;
+ unsigned int orig_intx:1;
+ unsigned int restore_intx:1;
+ unsigned int mwi:1;
+ u32 region_mask;
+};
+
+struct pci_devres *find_pci_dr(struct pci_dev *pdev);
+
+/*
* Config Address for PCI Configuration Mechanism #1
*
* See PCI Local Bus Specification, Revision 3.0,
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index 8de4ed5f98f14..6461aa93fe76e 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -6,7 +6,7 @@ pcieportdrv-y := portdrv.o rcec.o
obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o
-obj-$(CONFIG_PCIEASPM) += aspm.o
+obj-y += aspm.o
obj-$(CONFIG_PCIEAER) += aer.o err.o
obj-$(CONFIG_PCIEAER_INJECT) += aer_inject.o
obj-$(CONFIG_PCIE_PME) += pme.o
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 05fc30bb5134d..ac6293c249766 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -664,11 +664,10 @@ static void pci_rootport_aer_stats_incr(struct pci_dev *pdev,
}
}
-static void __print_tlp_header(struct pci_dev *dev,
- struct aer_header_log_regs *t)
+static void __print_tlp_header(struct pci_dev *dev, struct pcie_tlp_log *t)
{
pci_err(dev, " TLP Header: %08x %08x %08x %08x\n",
- t->dw0, t->dw1, t->dw2, t->dw3);
+ t->dw[0], t->dw[1], t->dw[2], t->dw[3]);
}
static void __aer_print_error(struct pci_dev *dev,
@@ -1210,7 +1209,7 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
{
int type = pci_pcie_type(dev);
int aer = dev->aer_cap;
- int temp;
+ u32 aercc;
/* Must reset in this function */
info->status = 0;
@@ -1241,19 +1240,12 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
return 0;
/* Get First Error Pointer */
- pci_read_config_dword(dev, aer + PCI_ERR_CAP, &temp);
- info->first_error = PCI_ERR_CAP_FEP(temp);
+ pci_read_config_dword(dev, aer + PCI_ERR_CAP, &aercc);
+ info->first_error = PCI_ERR_CAP_FEP(aercc);
if (info->status & AER_LOG_TLP_MASKS) {
info->tlp_header_valid = 1;
- pci_read_config_dword(dev,
- aer + PCI_ERR_HEADER_LOG, &info->tlp.dw0);
- pci_read_config_dword(dev,
- aer + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1);
- pci_read_config_dword(dev,
- aer + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2);
- pci_read_config_dword(dev,
- aer + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3);
+ pcie_read_tlp_log(dev, aer + PCI_ERR_HEADER_LOG, &info->tlp);
}
}
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index bc0bd86695ec6..2428d278e015a 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -24,6 +24,166 @@
#include "../pci.h"
+void pci_save_ltr_state(struct pci_dev *dev)
+{
+ int ltr;
+ struct pci_cap_saved_state *save_state;
+ u32 *cap;
+
+ if (!pci_is_pcie(dev))
+ return;
+
+ ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
+ if (!ltr)
+ return;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
+ if (!save_state) {
+ pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
+ return;
+ }
+
+ /* Some broken devices only support dword access to LTR */
+ cap = &save_state->cap.data[0];
+ pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap);
+}
+
+void pci_restore_ltr_state(struct pci_dev *dev)
+{
+ struct pci_cap_saved_state *save_state;
+ int ltr;
+ u32 *cap;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
+ ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
+ if (!save_state || !ltr)
+ return;
+
+ /* Some broken devices only support dword access to LTR */
+ cap = &save_state->cap.data[0];
+ pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap);
+}
+
+void pci_configure_aspm_l1ss(struct pci_dev *pdev)
+{
+ int rc;
+
+ pdev->l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+
+ rc = pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_L1SS,
+ 2 * sizeof(u32));
+ if (rc)
+ pci_err(pdev, "unable to allocate ASPM L1SS save buffer (%pe)\n",
+ ERR_PTR(rc));
+}
+
+void pci_save_aspm_l1ss_state(struct pci_dev *pdev)
+{
+ struct pci_cap_saved_state *save_state;
+ u16 l1ss = pdev->l1ss;
+ u32 *cap;
+
+ /*
+ * Save L1 substate configuration. The ASPM L0s/L1 configuration
+ * in PCI_EXP_LNKCTL_ASPMC is saved by pci_save_pcie_state().
+ */
+ if (!l1ss)
+ return;
+
+ save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS);
+ if (!save_state)
+ return;
+
+ cap = &save_state->cap.data[0];
+ pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL2, cap++);
+ pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, cap++);
+}
+
+void pci_restore_aspm_l1ss_state(struct pci_dev *pdev)
+{
+ struct pci_cap_saved_state *pl_save_state, *cl_save_state;
+ struct pci_dev *parent = pdev->bus->self;
+ u32 *cap, pl_ctl1, pl_ctl2, pl_l1_2_enable;
+ u32 cl_ctl1, cl_ctl2, cl_l1_2_enable;
+ u16 clnkctl, plnkctl;
+
+ /*
+ * In case BIOS enabled L1.2 when resuming, we need to disable it first
+ * on the downstream component before the upstream. So, don't attempt to
+ * restore either until we are at the downstream component.
+ */
+ if (pcie_downstream_port(pdev) || !parent)
+ return;
+
+ if (!pdev->l1ss || !parent->l1ss)
+ return;
+
+ cl_save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS);
+ pl_save_state = pci_find_saved_ext_cap(parent, PCI_EXT_CAP_ID_L1SS);
+ if (!cl_save_state || !pl_save_state)
+ return;
+
+ cap = &cl_save_state->cap.data[0];
+ cl_ctl2 = *cap++;
+ cl_ctl1 = *cap;
+ cap = &pl_save_state->cap.data[0];
+ pl_ctl2 = *cap++;
+ pl_ctl1 = *cap;
+
+ /* Make sure L0s/L1 are disabled before updating L1SS config */
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &clnkctl);
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &plnkctl);
+ if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, clnkctl) ||
+ FIELD_GET(PCI_EXP_LNKCTL_ASPMC, plnkctl)) {
+ pcie_capability_write_word(pdev, PCI_EXP_LNKCTL,
+ clnkctl & ~PCI_EXP_LNKCTL_ASPMC);
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL,
+ plnkctl & ~PCI_EXP_LNKCTL_ASPMC);
+ }
+
+ /*
+ * Disable L1.2 on this downstream endpoint device first, followed
+ * by the upstream
+ */
+ pci_clear_and_set_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1_2_MASK, 0);
+ pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1_2_MASK, 0);
+
+ /*
+ * In addition, Common_Mode_Restore_Time and LTR_L1.2_THRESHOLD
+ * in PCI_L1SS_CTL1 must be programmed *before* setting the L1.2
+ * enable bits, even though they're all in PCI_L1SS_CTL1.
+ */
+ pl_l1_2_enable = pl_ctl1 & PCI_L1SS_CTL1_L1_2_MASK;
+ pl_ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK;
+ cl_l1_2_enable = cl_ctl1 & PCI_L1SS_CTL1_L1_2_MASK;
+ cl_ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK;
+
+ /* Write back without enables first (above we cleared them in ctl1) */
+ pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, pl_ctl2);
+ pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL2, cl_ctl2);
+ pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, pl_ctl1);
+ pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, cl_ctl1);
+
+ /* Then write back the enables */
+ if (pl_l1_2_enable || cl_l1_2_enable) {
+ pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ pl_ctl1 | pl_l1_2_enable);
+ pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1,
+ cl_ctl1 | cl_l1_2_enable);
+ }
+
+ /* Restore L0s/L1 if they were enabled */
+ if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, clnkctl) ||
+ FIELD_GET(PCI_EXP_LNKCTL_ASPMC, plnkctl)) {
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, clnkctl);
+ pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, plnkctl);
+ }
+}
+
+#ifdef CONFIG_PCIEASPM
+
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
#endif
@@ -141,16 +301,42 @@ static int policy_to_clkpm_state(struct pcie_link_state *link)
return 0;
}
+static void pci_update_aspm_saved_state(struct pci_dev *dev)
+{
+ struct pci_cap_saved_state *save_state;
+ u16 *cap, lnkctl, aspm_ctl;
+
+ save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
+ if (!save_state)
+ return;
+
+ pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnkctl);
+
+ /*
+ * Update ASPM and CLKREQ bits of LNKCTL in save_state. We only
+ * write PCI_EXP_LNKCTL_CCC during enumeration, so it shouldn't
+ * change after being captured in save_state.
+ */
+ aspm_ctl = lnkctl & (PCI_EXP_LNKCTL_ASPMC | PCI_EXP_LNKCTL_CLKREQ_EN);
+ lnkctl &= ~(PCI_EXP_LNKCTL_ASPMC | PCI_EXP_LNKCTL_CLKREQ_EN);
+
+ /* Depends on pci_save_pcie_state(): cap[1] is LNKCTL */
+ cap = (u16 *)&save_state->cap.data[0];
+ cap[1] = lnkctl | aspm_ctl;
+}
+
static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
{
struct pci_dev *child;
struct pci_bus *linkbus = link->pdev->subordinate;
u32 val = enable ? PCI_EXP_LNKCTL_CLKREQ_EN : 0;
- list_for_each_entry(child, &linkbus->devices, bus_list)
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_CLKREQ_EN,
val);
+ pci_update_aspm_saved_state(child);
+ }
link->clkpm_enabled = !!enable;
}
@@ -769,6 +955,12 @@ static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
pcie_config_aspm_dev(parent, upstream);
link->aspm_enabled = state;
+
+ /* Update latest ASPM configuration in saved context */
+ pci_save_aspm_l1ss_state(link->downstream);
+ pci_update_aspm_saved_state(link->downstream);
+ pci_save_aspm_l1ss_state(parent);
+ pci_update_aspm_saved_state(parent);
}
static void pcie_config_aspm_path(struct pcie_link_state *link)
@@ -938,6 +1130,78 @@ out:
up_read(&pci_bus_sem);
}
+void pci_bridge_reconfigure_ltr(struct pci_dev *pdev)
+{
+ struct pci_dev *bridge;
+ u32 ctl;
+
+ bridge = pci_upstream_bridge(pdev);
+ if (bridge && bridge->ltr_path) {
+ pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl);
+ if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) {
+ pci_dbg(bridge, "re-enabling LTR\n");
+ pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_LTR_EN);
+ }
+ }
+}
+
+void pci_configure_ltr(struct pci_dev *pdev)
+{
+ struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus);
+ struct pci_dev *bridge;
+ u32 cap, ctl;
+
+ if (!pci_is_pcie(pdev))
+ return;
+
+ pcie_capability_read_dword(pdev, PCI_EXP_DEVCAP2, &cap);
+ if (!(cap & PCI_EXP_DEVCAP2_LTR))
+ return;
+
+ pcie_capability_read_dword(pdev, PCI_EXP_DEVCTL2, &ctl);
+ if (ctl & PCI_EXP_DEVCTL2_LTR_EN) {
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) {
+ pdev->ltr_path = 1;
+ return;
+ }
+
+ bridge = pci_upstream_bridge(pdev);
+ if (bridge && bridge->ltr_path)
+ pdev->ltr_path = 1;
+
+ return;
+ }
+
+ if (!host->native_ltr)
+ return;
+
+ /*
+ * Software must not enable LTR in an Endpoint unless the Root
+ * Complex and all intermediate Switches indicate support for LTR.
+ * PCIe r4.0, sec 6.18.
+ */
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) {
+ pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_LTR_EN);
+ pdev->ltr_path = 1;
+ return;
+ }
+
+ /*
+ * If we're configuring a hot-added device, LTR was likely
+ * disabled in the upstream bridge, so re-enable it before enabling
+ * it in the new device.
+ */
+ bridge = pci_upstream_bridge(pdev);
+ if (bridge && bridge->ltr_path) {
+ pci_bridge_reconfigure_ltr(pdev);
+ pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_LTR_EN);
+ pdev->ltr_path = 1;
+ }
+}
+
/* Recheck latencies and update aspm_capable for links under the root */
static void pcie_update_aspm_capable(struct pcie_link_state *root)
{
@@ -1447,3 +1711,5 @@ bool pcie_aspm_support_enabled(void)
{
return aspm_support_enabled;
}
+
+#endif /* CONFIG_PCIEASPM */
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index 94111e4382413..a668820696dc0 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -190,7 +190,8 @@ out:
static void dpc_process_rp_pio_error(struct pci_dev *pdev)
{
u16 cap = pdev->dpc_cap, dpc_status, first_error;
- u32 status, mask, sev, syserr, exc, dw0, dw1, dw2, dw3, log, prefix;
+ u32 status, mask, sev, syserr, exc, log, prefix;
+ struct pcie_tlp_log tlp_log;
int i;
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, &status);
@@ -216,16 +217,9 @@ static void dpc_process_rp_pio_error(struct pci_dev *pdev)
if (pdev->dpc_rp_log_size < 4)
goto clear_status;
- pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG,
- &dw0);
- pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 4,
- &dw1);
- pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 8,
- &dw2);
- pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 12,
- &dw3);
+ pcie_read_tlp_log(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG, &tlp_log);
pci_err(pdev, "TLP Header: %#010x %#010x %#010x %#010x\n",
- dw0, dw1, dw2, dw3);
+ tlp_log.dw[0], tlp_log.dw[1], tlp_log.dw[2], tlp_log.dw[3]);
if (pdev->dpc_rp_log_size < 5)
goto clear_status;
@@ -234,7 +228,7 @@ static void dpc_process_rp_pio_error(struct pci_dev *pdev)
for (i = 0; i < pdev->dpc_rp_log_size - 5; i++) {
pci_read_config_dword(pdev,
- cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG, &prefix);
+ cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG + i * 4, &prefix);
pci_err(pdev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix);
}
clear_status:
@@ -303,10 +297,70 @@ void dpc_process_error(struct pci_dev *pdev)
}
}
+static void pci_clear_surpdn_errors(struct pci_dev *pdev)
+{
+ if (pdev->dpc_rp_extensions)
+ pci_write_config_dword(pdev, pdev->dpc_cap +
+ PCI_EXP_DPC_RP_PIO_STATUS, ~0);
+
+ /*
+ * In practice, Surprise Down errors have been observed to also set
+ * error bits in the Status Register as well as the Fatal Error
+ * Detected bit in the Device Status Register.
+ */
+ pci_write_config_word(pdev, PCI_STATUS, 0xffff);
+
+ pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_FED);
+}
+
+static void dpc_handle_surprise_removal(struct pci_dev *pdev)
+{
+ if (!pcie_wait_for_link(pdev, false)) {
+ pci_info(pdev, "Data Link Layer Link Active not cleared in 1000 msec\n");
+ goto out;
+ }
+
+ if (pdev->dpc_rp_extensions && dpc_wait_rp_inactive(pdev))
+ goto out;
+
+ pci_aer_raw_clear_status(pdev);
+ pci_clear_surpdn_errors(pdev);
+
+ pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_STATUS,
+ PCI_EXP_DPC_STATUS_TRIGGER);
+
+out:
+ clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
+ wake_up_all(&dpc_completed_waitqueue);
+}
+
+static bool dpc_is_surprise_removal(struct pci_dev *pdev)
+{
+ u16 status;
+
+ if (!pdev->is_hotplug_bridge)
+ return false;
+
+ if (pci_read_config_word(pdev, pdev->aer_cap + PCI_ERR_UNCOR_STATUS,
+ &status))
+ return false;
+
+ return status & PCI_ERR_UNC_SURPDN;
+}
+
static irqreturn_t dpc_handler(int irq, void *context)
{
struct pci_dev *pdev = context;
+ /*
+ * According to PCIe r6.0 sec 6.7.6, errors are an expected side effect
+ * of async removal and should be ignored by software.
+ */
+ if (dpc_is_surprise_removal(pdev)) {
+ dpc_handle_surprise_removal(pdev);
+ return IRQ_HANDLED;
+ }
+
dpc_process_error(pdev);
/* We configure DPC so it only triggers on ERR_FATAL */
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index 59c90d04a609a..705893b5f7b09 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -13,6 +13,7 @@
#define dev_fmt(fmt) "AER: " fmt
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -85,6 +86,18 @@ static int report_error_detected(struct pci_dev *dev,
return 0;
}
+static int pci_pm_runtime_get_sync(struct pci_dev *pdev, void *data)
+{
+ pm_runtime_get_sync(&pdev->dev);
+ return 0;
+}
+
+static int pci_pm_runtime_put(struct pci_dev *pdev, void *data)
+{
+ pm_runtime_put(&pdev->dev);
+ return 0;
+}
+
static int report_frozen_detected(struct pci_dev *dev, void *data)
{
return report_error_detected(dev, pci_channel_io_frozen, data);
@@ -207,6 +220,8 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
else
bridge = pci_upstream_bridge(dev);
+ pci_walk_bridge(bridge, pci_pm_runtime_get_sync, NULL);
+
pci_dbg(bridge, "broadcast error_detected message\n");
if (state == pci_channel_io_frozen) {
pci_walk_bridge(bridge, report_frozen_detected, &status);
@@ -251,10 +266,15 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
pcie_clear_device_status(dev);
pci_aer_clear_nonfatal_status(dev);
}
+
+ pci_walk_bridge(bridge, pci_pm_runtime_put, NULL);
+
pci_info(bridge, "device recovery successful\n");
return status;
failed:
+ pci_walk_bridge(bridge, pci_pm_runtime_put, NULL);
+
pci_uevent_ers(bridge, PCI_ERS_RESULT_DISCONNECT);
/* TODO: Should kernel panic here? */
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 1f3803bde7ee1..12c89ea0313b9 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -96,7 +96,7 @@ struct pcie_port_service_driver {
int pcie_port_service_register(struct pcie_port_service_driver *new);
void pcie_port_service_unregister(struct pcie_port_service_driver *new);
-extern struct bus_type pcie_port_bus_type;
+extern const struct bus_type pcie_port_bus_type;
struct pci_dev;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index b7335be56008f..1325fbae2f28f 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -2209,67 +2209,6 @@ static void pci_configure_relaxed_ordering(struct pci_dev *dev)
}
}
-static void pci_configure_ltr(struct pci_dev *dev)
-{
-#ifdef CONFIG_PCIEASPM
- struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
- struct pci_dev *bridge;
- u32 cap, ctl;
-
- if (!pci_is_pcie(dev))
- return;
-
- /* Read L1 PM substate capabilities */
- dev->l1ss = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_L1SS);
-
- pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
- if (!(cap & PCI_EXP_DEVCAP2_LTR))
- return;
-
- pcie_capability_read_dword(dev, PCI_EXP_DEVCTL2, &ctl);
- if (ctl & PCI_EXP_DEVCTL2_LTR_EN) {
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
- dev->ltr_path = 1;
- return;
- }
-
- bridge = pci_upstream_bridge(dev);
- if (bridge && bridge->ltr_path)
- dev->ltr_path = 1;
-
- return;
- }
-
- if (!host->native_ltr)
- return;
-
- /*
- * Software must not enable LTR in an Endpoint unless the Root
- * Complex and all intermediate Switches indicate support for LTR.
- * PCIe r4.0, sec 6.18.
- */
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
- pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
- PCI_EXP_DEVCTL2_LTR_EN);
- dev->ltr_path = 1;
- return;
- }
-
- /*
- * If we're configuring a hot-added device, LTR was likely
- * disabled in the upstream bridge, so re-enable it before enabling
- * it in the new device.
- */
- bridge = pci_upstream_bridge(dev);
- if (bridge && bridge->ltr_path) {
- pci_bridge_reconfigure_ltr(dev);
- pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
- PCI_EXP_DEVCTL2_LTR_EN);
- dev->ltr_path = 1;
- }
-#endif
-}
-
static void pci_configure_eetlp_prefix(struct pci_dev *dev)
{
#ifdef CONFIG_PCI_PASID
@@ -2320,6 +2259,7 @@ static void pci_configure_device(struct pci_dev *dev)
pci_configure_extended_tags(dev, NULL);
pci_configure_relaxed_ordering(dev);
pci_configure_ltr(dev);
+ pci_configure_aspm_l1ss(dev);
pci_configure_eetlp_prefix(dev);
pci_configure_serr(dev);
@@ -2357,6 +2297,10 @@ static void pci_release_dev(struct device *dev)
kfree(pci_dev);
}
+static const struct device_type pci_dev_type = {
+ .groups = pci_dev_attr_groups,
+};
+
struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
{
struct pci_dev *dev;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index d797df6e5f3e9..eff7f5df08e27 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -5527,6 +5527,7 @@ static void quirk_no_ext_tags(struct pci_dev *pdev)
pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL);
}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_3WARE, 0x1004, quirk_no_ext_tags);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0132, quirk_no_ext_tags);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0140, quirk_no_ext_tags);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0141, quirk_no_ext_tags);
@@ -6225,6 +6226,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2b, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa73f, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa76e, dpc_log_size);
#endif
/*
diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c
deleted file mode 100644
index cc7d26b015f32..0000000000000
--- a/drivers/pci/setup-irq.c
+++ /dev/null
@@ -1,64 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Support routines for initializing a PCI subsystem
- *
- * Extruded from code written by
- * Dave Rusling (david.rusling@reo.mts.dec.com)
- * David Mosberger (davidm@cs.arizona.edu)
- * David Miller (davem@redhat.com)
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/cache.h>
-#include "pci.h"
-
-void pci_assign_irq(struct pci_dev *dev)
-{
- u8 pin;
- u8 slot = -1;
- int irq = 0;
- struct pci_host_bridge *hbrg = pci_find_host_bridge(dev->bus);
-
- if (!(hbrg->map_irq)) {
- pci_dbg(dev, "runtime IRQ mapping not provided by arch\n");
- return;
- }
-
- /*
- * If this device is not on the primary bus, we need to figure out
- * which interrupt pin it will come in on. We know which slot it
- * will come in on because that slot is where the bridge is. Each
- * time the interrupt line passes through a PCI-PCI bridge we must
- * apply the swizzle function.
- */
- pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
- /* Cope with illegal. */
- if (pin > 4)
- pin = 1;
-
- if (pin) {
- /* Follow the chain of bridges, swizzling as we go. */
- if (hbrg->swizzle_irq)
- slot = (*(hbrg->swizzle_irq))(dev, &pin);
-
- /*
- * If a swizzling function is not used, map_irq() must
- * ignore slot.
- */
- irq = (*(hbrg->map_irq))(dev, slot, pin);
- if (irq == -1)
- irq = 0;
- }
- dev->irq = irq;
-
- pci_dbg(dev, "assign IRQ: got %d\n", dev->irq);
-
- /*
- * Always tell the device, so the driver knows what is the real IRQ
- * to use; the device does not use it.
- */
- pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
-}
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 1804794d0e686..5a4adf6c04cf8 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -1672,7 +1672,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
rc = switchtec_init_isr(stdev);
if (rc) {
dev_err(&stdev->dev, "failed to init isr.\n");
- goto err_put;
+ goto err_exit_pci;
}
iowrite32(SWITCHTEC_EVENT_CLEAR |
@@ -1693,6 +1693,8 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
err_devadd:
stdev_kill(stdev);
+err_exit_pci:
+ switchtec_exit_pci(stdev);
err_put:
ida_free(&switchtec_minor_ida, MINOR(stdev->dev.devt));
put_device(&stdev->dev);
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index b33be1e63c98f..c75f55e1250a3 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -892,7 +892,7 @@ static const struct dev_pm_ops pcmcia_socket_pm_ops = {
#endif /* CONFIG_PM */
-struct class pcmcia_socket_class = {
+const struct class pcmcia_socket_class = {
.name = "pcmcia_socket",
.dev_uevent = pcmcia_socket_uevent,
.dev_release = pcmcia_release_socket,
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index 580369f3c0b06..02a83ca44e775 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -113,7 +113,7 @@ struct pcmcia_callback{
/* cs.c */
extern struct rw_semaphore pcmcia_socket_list_rwsem;
extern struct list_head pcmcia_socket_list;
-extern struct class pcmcia_socket_class;
+extern const struct class pcmcia_socket_class;
int pccard_register_pcmcia(struct pcmcia_socket *s, struct pcmcia_callback *c);
struct pcmcia_socket *pcmcia_get_socket_by_nr(unsigned int nr);
@@ -132,7 +132,7 @@ void pcmcia_put_socket(struct pcmcia_socket *skt);
* Stuff internal to module "pcmcia".
*/
/* ds.c */
-extern struct bus_type pcmcia_bus_type;
+extern const struct bus_type pcmcia_bus_type;
struct pcmcia_device;
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index b4b8363d1de21..d3cfd353fb935 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -1406,7 +1406,7 @@ static const struct dev_pm_ops pcmcia_bus_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pcmcia_dev_suspend, pcmcia_dev_resume)
};
-struct bus_type pcmcia_bus_type = {
+const struct bus_type pcmcia_bus_type = {
.name = "pcmcia",
.uevent = pcmcia_bus_uevent,
.match = pcmcia_bus_match,
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index ec6e0d9194a1c..7526a9e714fa9 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -86,6 +86,30 @@ config RISCV_PMU_SBI
full perf feature support i.e. counter overflow, privilege mode
filtering, counter configuration.
+config STARFIVE_STARLINK_PMU
+ depends on ARCH_STARFIVE || COMPILE_TEST
+ depends on 64BIT
+ bool "StarFive StarLink PMU"
+ help
+ Provide support for StarLink Performance Monitor Unit.
+ StarLink Performance Monitor Unit integrates one or more cores with
+ an L3 memory system. The L3 cache events are added into perf event
+ subsystem, allowing monitoring of various L3 cache perf events.
+
+config ANDES_CUSTOM_PMU
+ bool "Andes custom PMU support"
+ depends on ARCH_RENESAS && RISCV_ALTERNATIVE && RISCV_PMU_SBI
+ default y
+ help
+ The Andes cores implement the PMU overflow extension very
+ similar to the standard Sscofpmf and Smcntrpmf extension.
+
+ This will patch the overflow and pending CSRs and handle the
+ non-standard behaviour via the regular SBI PMU driver and
+ interface.
+
+ If you don't know what to do here, say "Y".
+
config ARM_PMU_ACPI
depends on ARM_PMU && ACPI
def_bool y
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index a06338e3401c9..29b1c28203ef3 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
obj-$(CONFIG_RISCV_PMU) += riscv_pmu.o
obj-$(CONFIG_RISCV_PMU_LEGACY) += riscv_pmu_legacy.o
obj-$(CONFIG_RISCV_PMU_SBI) += riscv_pmu_sbi.o
+obj-$(CONFIG_STARFIVE_STARLINK_PMU) += starfive_starlink_pmu.o
obj-$(CONFIG_THUNDERX2_PMU) += thunderx2_pmu.o
obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o
diff --git a/drivers/perf/alibaba_uncore_drw_pmu.c b/drivers/perf/alibaba_uncore_drw_pmu.c
index 19d459a36be55..a9277dcf90ce0 100644
--- a/drivers/perf/alibaba_uncore_drw_pmu.c
+++ b/drivers/perf/alibaba_uncore_drw_pmu.c
@@ -729,7 +729,7 @@ static int ali_drw_pmu_probe(struct platform_device *pdev)
return ret;
}
-static int ali_drw_pmu_remove(struct platform_device *pdev)
+static void ali_drw_pmu_remove(struct platform_device *pdev)
{
struct ali_drw_pmu *drw_pmu = platform_get_drvdata(pdev);
@@ -739,8 +739,6 @@ static int ali_drw_pmu_remove(struct platform_device *pdev)
ali_drw_pmu_uninit_irq(drw_pmu);
perf_pmu_unregister(&drw_pmu->pmu);
-
- return 0;
}
static int ali_drw_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
@@ -795,7 +793,7 @@ static struct platform_driver ali_drw_pmu_driver = {
.acpi_match_table = ali_drw_acpi_match,
},
.probe = ali_drw_pmu_probe,
- .remove = ali_drw_pmu_remove,
+ .remove_new = ali_drw_pmu_remove,
};
static int __init ali_drw_pmu_init(void)
diff --git a/drivers/perf/amlogic/meson_g12_ddr_pmu.c b/drivers/perf/amlogic/meson_g12_ddr_pmu.c
index 15d52ab3276a5..99cc791892bce 100644
--- a/drivers/perf/amlogic/meson_g12_ddr_pmu.c
+++ b/drivers/perf/amlogic/meson_g12_ddr_pmu.c
@@ -355,11 +355,9 @@ static int g12_ddr_pmu_probe(struct platform_device *pdev)
return meson_ddr_pmu_create(pdev);
}
-static int g12_ddr_pmu_remove(struct platform_device *pdev)
+static void g12_ddr_pmu_remove(struct platform_device *pdev)
{
meson_ddr_pmu_remove(pdev);
-
- return 0;
}
static const struct of_device_id meson_ddr_pmu_dt_match[] = {
@@ -381,7 +379,7 @@ MODULE_DEVICE_TABLE(of, meson_ddr_pmu_dt_match);
static struct platform_driver g12_ddr_pmu_driver = {
.probe = g12_ddr_pmu_probe,
- .remove = g12_ddr_pmu_remove,
+ .remove_new = g12_ddr_pmu_remove,
.driver = {
.name = "meson-g12-ddr-pmu",
diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c
index 61de861eaf91e..6be03f81ae5db 100644
--- a/drivers/perf/arm-cci.c
+++ b/drivers/perf/arm-cci.c
@@ -1697,16 +1697,14 @@ error_pmu_init:
return ret;
}
-static int cci_pmu_remove(struct platform_device *pdev)
+static void cci_pmu_remove(struct platform_device *pdev)
{
if (!g_cci_pmu)
- return 0;
+ return;
cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE);
perf_pmu_unregister(&g_cci_pmu->pmu);
g_cci_pmu = NULL;
-
- return 0;
}
static struct platform_driver cci_pmu_driver = {
@@ -1716,7 +1714,7 @@ static struct platform_driver cci_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = cci_pmu_probe,
- .remove = cci_pmu_remove,
+ .remove_new = cci_pmu_remove,
};
module_platform_driver(cci_pmu_driver);
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index 728d13d8e98ac..641471bd5eff4 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -1515,13 +1515,11 @@ static int arm_ccn_probe(struct platform_device *pdev)
return arm_ccn_pmu_init(ccn);
}
-static int arm_ccn_remove(struct platform_device *pdev)
+static void arm_ccn_remove(struct platform_device *pdev)
{
struct arm_ccn *ccn = platform_get_drvdata(pdev);
arm_ccn_pmu_cleanup(ccn);
-
- return 0;
}
static const struct of_device_id arm_ccn_match[] = {
@@ -1539,7 +1537,7 @@ static struct platform_driver arm_ccn_driver = {
.suppress_bind_attrs = true,
},
.probe = arm_ccn_probe,
- .remove = arm_ccn_remove,
+ .remove_new = arm_ccn_remove,
};
static int __init arm_ccn_init(void)
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index 7e3aa7e2345fa..7ef9c7e4836b7 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -493,6 +493,7 @@ static void arm_cmn_show_logid(struct seq_file *s, int x, int y, int p, int d)
for (dn = cmn->dns; dn->type; dn++) {
struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);
+ int pad = dn->logid < 10;
if (dn->type == CMN_TYPE_XP)
continue;
@@ -503,7 +504,7 @@ static void arm_cmn_show_logid(struct seq_file *s, int x, int y, int p, int d)
if (nid.x != x || nid.y != y || nid.port != p || nid.dev != d)
continue;
- seq_printf(s, " #%-2d |", dn->logid);
+ seq_printf(s, " %*c#%-*d |", pad + 1, ' ', 3 - pad, dn->logid);
return;
}
seq_puts(s, " |");
@@ -516,7 +517,7 @@ static int arm_cmn_map_show(struct seq_file *s, void *data)
seq_puts(s, " X");
for (x = 0; x < cmn->mesh_x; x++)
- seq_printf(s, " %d ", x);
+ seq_printf(s, " %-2d ", x);
seq_puts(s, "\nY P D+");
y = cmn->mesh_y;
while (y--) {
@@ -526,13 +527,13 @@ static int arm_cmn_map_show(struct seq_file *s, void *data)
for (x = 0; x < cmn->mesh_x; x++)
seq_puts(s, "--------+");
- seq_printf(s, "\n%d |", y);
+ seq_printf(s, "\n%-2d |", y);
for (x = 0; x < cmn->mesh_x; x++) {
struct arm_cmn_node *xp = cmn->xps + xp_base + x;
for (p = 0; p < CMN_MAX_PORTS; p++)
port[p][x] = arm_cmn_device_connect_info(cmn, xp, p);
- seq_printf(s, " XP #%-2d |", xp_base + x);
+ seq_printf(s, " XP #%-3d|", xp_base + x);
}
seq_puts(s, "\n |");
@@ -2515,7 +2516,7 @@ static int arm_cmn_probe(struct platform_device *pdev)
return err;
}
-static int arm_cmn_remove(struct platform_device *pdev)
+static void arm_cmn_remove(struct platform_device *pdev)
{
struct arm_cmn *cmn = platform_get_drvdata(pdev);
@@ -2524,7 +2525,6 @@ static int arm_cmn_remove(struct platform_device *pdev)
perf_pmu_unregister(&cmn->pmu);
cpuhp_state_remove_instance_nocalls(arm_cmn_hp_state, &cmn->cpuhp_node);
debugfs_remove(cmn->debug);
- return 0;
}
#ifdef CONFIG_OF
@@ -2555,7 +2555,7 @@ static struct platform_driver arm_cmn_driver = {
.acpi_match_table = ACPI_PTR(arm_cmn_acpi_match),
},
.probe = arm_cmn_probe,
- .remove = arm_cmn_remove,
+ .remove_new = arm_cmn_remove,
};
static int __init arm_cmn_init(void)
diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c
index 50b89b989ce75..b9a252272f1e9 100644
--- a/drivers/perf/arm_cspmu/arm_cspmu.c
+++ b/drivers/perf/arm_cspmu/arm_cspmu.c
@@ -27,6 +27,7 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
@@ -100,13 +101,6 @@
#define ARM_CSPMU_ACTIVE_CPU_MASK 0x0
#define ARM_CSPMU_ASSOCIATED_CPU_MASK 0x1
-/* Check and use default if implementer doesn't provide attribute callback */
-#define CHECK_DEFAULT_IMPL_OPS(ops, callback) \
- do { \
- if (!ops->callback) \
- ops->callback = arm_cspmu_ ## callback; \
- } while (0)
-
/*
* Maximum poll count for reading counter value using high-low-high sequence.
*/
@@ -121,7 +115,9 @@ static void arm_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
static struct acpi_apmt_node *arm_cspmu_apmt_node(struct device *dev)
{
- return *(struct acpi_apmt_node **)dev_get_platdata(dev);
+ struct acpi_apmt_node **ptr = dev_get_platdata(dev);
+
+ return ptr ? *ptr : NULL;
}
/*
@@ -317,6 +313,10 @@ static const char *arm_cspmu_get_name(const struct arm_cspmu *cspmu)
dev = cspmu->dev;
apmt_node = arm_cspmu_apmt_node(dev);
+ if (!apmt_node)
+ return devm_kasprintf(dev, GFP_KERNEL, PMUNAME "_%u",
+ atomic_fetch_inc(&pmu_idx[0]));
+
pmu_type = apmt_node->type;
if (pmu_type >= ACPI_APMT_NODE_TYPE_COUNT) {
@@ -408,21 +408,32 @@ static struct arm_cspmu_impl_match *arm_cspmu_impl_match_get(u32 pmiidr)
return NULL;
}
+#define DEFAULT_IMPL_OP(name) .name = arm_cspmu_##name
+
static int arm_cspmu_init_impl_ops(struct arm_cspmu *cspmu)
{
int ret = 0;
- struct arm_cspmu_impl_ops *impl_ops = &cspmu->impl.ops;
struct acpi_apmt_node *apmt_node = arm_cspmu_apmt_node(cspmu->dev);
struct arm_cspmu_impl_match *match;
- /*
- * Get PMU implementer and product id from APMT node.
- * If APMT node doesn't have implementer/product id, try get it
- * from PMIIDR.
- */
- cspmu->impl.pmiidr =
- (apmt_node->impl_id) ? apmt_node->impl_id :
- readl(cspmu->base0 + PMIIDR);
+ /* Start with a default PMU implementation */
+ cspmu->impl.module = THIS_MODULE;
+ cspmu->impl.pmiidr = readl(cspmu->base0 + PMIIDR);
+ cspmu->impl.ops = (struct arm_cspmu_impl_ops) {
+ DEFAULT_IMPL_OP(get_event_attrs),
+ DEFAULT_IMPL_OP(get_format_attrs),
+ DEFAULT_IMPL_OP(get_identifier),
+ DEFAULT_IMPL_OP(get_name),
+ DEFAULT_IMPL_OP(is_cycle_counter_event),
+ DEFAULT_IMPL_OP(event_type),
+ DEFAULT_IMPL_OP(event_filter),
+ DEFAULT_IMPL_OP(set_ev_filter),
+ DEFAULT_IMPL_OP(event_attr_is_visible),
+ };
+
+ /* Firmware may override implementer/product ID from PMIIDR */
+ if (apmt_node && apmt_node->impl_id)
+ cspmu->impl.pmiidr = apmt_node->impl_id;
/* Find implementer specific attribute ops. */
match = arm_cspmu_impl_match_get(cspmu->impl.pmiidr);
@@ -450,24 +461,9 @@ static int arm_cspmu_init_impl_ops(struct arm_cspmu *cspmu)
}
mutex_unlock(&arm_cspmu_lock);
+ }
- if (ret)
- return ret;
- } else
- cspmu->impl.module = THIS_MODULE;
-
- /* Use default callbacks if implementer doesn't provide one. */
- CHECK_DEFAULT_IMPL_OPS(impl_ops, get_event_attrs);
- CHECK_DEFAULT_IMPL_OPS(impl_ops, get_format_attrs);
- CHECK_DEFAULT_IMPL_OPS(impl_ops, get_identifier);
- CHECK_DEFAULT_IMPL_OPS(impl_ops, get_name);
- CHECK_DEFAULT_IMPL_OPS(impl_ops, is_cycle_counter_event);
- CHECK_DEFAULT_IMPL_OPS(impl_ops, event_type);
- CHECK_DEFAULT_IMPL_OPS(impl_ops, event_filter);
- CHECK_DEFAULT_IMPL_OPS(impl_ops, event_attr_is_visible);
- CHECK_DEFAULT_IMPL_OPS(impl_ops, set_ev_filter);
-
- return 0;
+ return ret;
}
static struct attribute_group *
@@ -512,23 +508,16 @@ arm_cspmu_alloc_format_attr_group(struct arm_cspmu *cspmu)
return format_group;
}
-static struct attribute_group **
-arm_cspmu_alloc_attr_group(struct arm_cspmu *cspmu)
+static int arm_cspmu_alloc_attr_groups(struct arm_cspmu *cspmu)
{
- struct attribute_group **attr_groups = NULL;
- struct device *dev = cspmu->dev;
+ const struct attribute_group **attr_groups = cspmu->attr_groups;
const struct arm_cspmu_impl_ops *impl_ops = &cspmu->impl.ops;
cspmu->identifier = impl_ops->get_identifier(cspmu);
cspmu->name = impl_ops->get_name(cspmu);
if (!cspmu->identifier || !cspmu->name)
- return NULL;
-
- attr_groups = devm_kcalloc(dev, 5, sizeof(struct attribute_group *),
- GFP_KERNEL);
- if (!attr_groups)
- return NULL;
+ return -ENOMEM;
attr_groups[0] = arm_cspmu_alloc_event_attr_group(cspmu);
attr_groups[1] = arm_cspmu_alloc_format_attr_group(cspmu);
@@ -536,18 +525,14 @@ arm_cspmu_alloc_attr_group(struct arm_cspmu *cspmu)
attr_groups[3] = &arm_cspmu_cpumask_attr_group;
if (!attr_groups[0] || !attr_groups[1])
- return NULL;
+ return -ENOMEM;
- return attr_groups;
+ return 0;
}
static inline void arm_cspmu_reset_counters(struct arm_cspmu *cspmu)
{
- u32 pmcr = 0;
-
- pmcr |= PMCR_P;
- pmcr |= PMCR_C;
- writel(pmcr, cspmu->base0 + PMCR);
+ writel(PMCR_C | PMCR_P, cspmu->base0 + PMCR);
}
static inline void arm_cspmu_start_counters(struct arm_cspmu *cspmu)
@@ -962,7 +947,14 @@ static struct arm_cspmu *arm_cspmu_alloc(struct platform_device *pdev)
platform_set_drvdata(pdev, cspmu);
apmt_node = arm_cspmu_apmt_node(dev);
- cspmu->has_atomic_dword = apmt_node->flags & ACPI_APMT_FLAGS_ATOMIC;
+ if (apmt_node) {
+ cspmu->has_atomic_dword = apmt_node->flags & ACPI_APMT_FLAGS_ATOMIC;
+ } else {
+ u32 width = 0;
+
+ device_property_read_u32(dev, "reg-io-width", &width);
+ cspmu->has_atomic_dword = (width == 8);
+ }
return cspmu;
}
@@ -1153,11 +1145,6 @@ static int arm_cspmu_acpi_get_cpus(struct arm_cspmu *cspmu)
}
}
- if (cpumask_empty(&cspmu->associated_cpus)) {
- dev_dbg(cspmu->dev, "No cpu associated with the PMU\n");
- return -ENODEV;
- }
-
return 0;
}
#else
@@ -1167,19 +1154,45 @@ static int arm_cspmu_acpi_get_cpus(struct arm_cspmu *cspmu)
}
#endif
+static int arm_cspmu_of_get_cpus(struct arm_cspmu *cspmu)
+{
+ struct of_phandle_iterator it;
+ int ret, cpu;
+
+ of_for_each_phandle(&it, ret, dev_of_node(cspmu->dev), "cpus", NULL, 0) {
+ cpu = of_cpu_node_to_id(it.node);
+ if (cpu < 0)
+ continue;
+ cpumask_set_cpu(cpu, &cspmu->associated_cpus);
+ }
+ return ret == -ENOENT ? 0 : ret;
+}
+
static int arm_cspmu_get_cpus(struct arm_cspmu *cspmu)
{
- return arm_cspmu_acpi_get_cpus(cspmu);
+ int ret = 0;
+
+ if (arm_cspmu_apmt_node(cspmu->dev))
+ ret = arm_cspmu_acpi_get_cpus(cspmu);
+ else if (device_property_present(cspmu->dev, "cpus"))
+ ret = arm_cspmu_of_get_cpus(cspmu);
+ else
+ cpumask_copy(&cspmu->associated_cpus, cpu_possible_mask);
+
+ if (!ret && cpumask_empty(&cspmu->associated_cpus)) {
+ dev_dbg(cspmu->dev, "No cpu associated with the PMU\n");
+ ret = -ENODEV;
+ }
+ return ret;
}
static int arm_cspmu_register_pmu(struct arm_cspmu *cspmu)
{
int ret, capabilities;
- struct attribute_group **attr_groups;
- attr_groups = arm_cspmu_alloc_attr_group(cspmu);
- if (!attr_groups)
- return -ENOMEM;
+ ret = arm_cspmu_alloc_attr_groups(cspmu);
+ if (ret)
+ return ret;
ret = cpuhp_state_add_instance(arm_cspmu_cpuhp_state,
&cspmu->cpuhp_node);
@@ -1201,12 +1214,11 @@ static int arm_cspmu_register_pmu(struct arm_cspmu *cspmu)
.start = arm_cspmu_start,
.stop = arm_cspmu_stop,
.read = arm_cspmu_read,
- .attr_groups = (const struct attribute_group **)attr_groups,
+ .attr_groups = cspmu->attr_groups,
.capabilities = capabilities,
};
/* Hardware counter init */
- arm_cspmu_stop_counters(cspmu);
arm_cspmu_reset_counters(cspmu);
ret = perf_pmu_register(&cspmu->pmu, cspmu->name, -1);
@@ -1252,14 +1264,12 @@ static int arm_cspmu_device_probe(struct platform_device *pdev)
return ret;
}
-static int arm_cspmu_device_remove(struct platform_device *pdev)
+static void arm_cspmu_device_remove(struct platform_device *pdev)
{
struct arm_cspmu *cspmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&cspmu->pmu);
cpuhp_state_remove_instance(arm_cspmu_cpuhp_state, &cspmu->cpuhp_node);
-
- return 0;
}
static const struct platform_device_id arm_cspmu_id[] = {
@@ -1268,13 +1278,20 @@ static const struct platform_device_id arm_cspmu_id[] = {
};
MODULE_DEVICE_TABLE(platform, arm_cspmu_id);
+static const struct of_device_id arm_cspmu_of_match[] = {
+ { .compatible = "arm,coresight-pmu" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, arm_cspmu_of_match);
+
static struct platform_driver arm_cspmu_driver = {
.driver = {
- .name = DRVNAME,
- .suppress_bind_attrs = true,
- },
+ .name = DRVNAME,
+ .of_match_table = arm_cspmu_of_match,
+ .suppress_bind_attrs = true,
+ },
.probe = arm_cspmu_device_probe,
- .remove = arm_cspmu_device_remove,
+ .remove_new = arm_cspmu_device_remove,
.id_table = arm_cspmu_id,
};
diff --git a/drivers/perf/arm_cspmu/arm_cspmu.h b/drivers/perf/arm_cspmu/arm_cspmu.h
index 2fe723555a6b3..c9163acfe8105 100644
--- a/drivers/perf/arm_cspmu/arm_cspmu.h
+++ b/drivers/perf/arm_cspmu/arm_cspmu.h
@@ -157,6 +157,7 @@ struct arm_cspmu {
int cycle_counter_logical_idx;
struct arm_cspmu_hw_events hw_events;
+ const struct attribute_group *attr_groups[5];
struct arm_cspmu_impl impl;
};
diff --git a/drivers/perf/arm_cspmu/nvidia_cspmu.c b/drivers/perf/arm_cspmu/nvidia_cspmu.c
index 0382b702f0920..5b84b701ad622 100644
--- a/drivers/perf/arm_cspmu/nvidia_cspmu.c
+++ b/drivers/perf/arm_cspmu/nvidia_cspmu.c
@@ -388,12 +388,6 @@ static int nv_cspmu_init_ops(struct arm_cspmu *cspmu)
impl_ops->get_format_attrs = nv_cspmu_get_format_attrs;
impl_ops->get_name = nv_cspmu_get_name;
- /* Set others to NULL to use default callback. */
- impl_ops->event_type = NULL;
- impl_ops->event_attr_is_visible = NULL;
- impl_ops->get_identifier = NULL;
- impl_ops->is_cycle_counter_event = NULL;
-
return 0;
}
diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c
index 30cea68595747..8a81be2dd5ecf 100644
--- a/drivers/perf/arm_dmc620_pmu.c
+++ b/drivers/perf/arm_dmc620_pmu.c
@@ -724,7 +724,7 @@ out_teardown_dev:
return ret;
}
-static int dmc620_pmu_device_remove(struct platform_device *pdev)
+static void dmc620_pmu_device_remove(struct platform_device *pdev)
{
struct dmc620_pmu *dmc620_pmu = platform_get_drvdata(pdev);
@@ -732,8 +732,6 @@ static int dmc620_pmu_device_remove(struct platform_device *pdev)
/* perf will synchronise RCU before devres can free dmc620_pmu */
perf_pmu_unregister(&dmc620_pmu->pmu);
-
- return 0;
}
static const struct acpi_device_id dmc620_acpi_match[] = {
@@ -748,7 +746,7 @@ static struct platform_driver dmc620_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = dmc620_pmu_device_probe,
- .remove = dmc620_pmu_device_remove,
+ .remove_new = dmc620_pmu_device_remove,
};
static int __init dmc620_pmu_init(void)
diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c
index 7ec4498e312f7..bae3ca37f846e 100644
--- a/drivers/perf/arm_dsu_pmu.c
+++ b/drivers/perf/arm_dsu_pmu.c
@@ -774,14 +774,12 @@ static int dsu_pmu_device_probe(struct platform_device *pdev)
return rc;
}
-static int dsu_pmu_device_remove(struct platform_device *pdev)
+static void dsu_pmu_device_remove(struct platform_device *pdev)
{
struct dsu_pmu *dsu_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&dsu_pmu->pmu);
cpuhp_state_remove_instance(dsu_pmu_cpuhp_state, &dsu_pmu->cpuhp_node);
-
- return 0;
}
static const struct of_device_id dsu_pmu_of_match[] = {
@@ -806,7 +804,7 @@ static struct platform_driver dsu_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = dsu_pmu_device_probe,
- .remove = dsu_pmu_device_remove,
+ .remove_new = dsu_pmu_device_remove,
};
static int dsu_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index 9e5d7fa647b6d..719aa953a1c4d 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -965,14 +965,12 @@ out_unregister:
return err;
}
-static int smmu_pmu_remove(struct platform_device *pdev)
+static void smmu_pmu_remove(struct platform_device *pdev)
{
struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&smmu_pmu->pmu);
cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
-
- return 0;
}
static void smmu_pmu_shutdown(struct platform_device *pdev)
@@ -997,7 +995,7 @@ static struct platform_driver smmu_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = smmu_pmu_probe,
- .remove = smmu_pmu_remove,
+ .remove_new = smmu_pmu_remove,
.shutdown = smmu_pmu_shutdown,
};
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index b622d75d8c9e4..35f0de03416fc 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -1263,14 +1263,13 @@ out_free_handle:
return ret;
}
-static int arm_spe_pmu_device_remove(struct platform_device *pdev)
+static void arm_spe_pmu_device_remove(struct platform_device *pdev)
{
struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev);
arm_spe_pmu_perf_destroy(spe_pmu);
arm_spe_pmu_dev_teardown(spe_pmu);
free_percpu(spe_pmu->handle);
- return 0;
}
static struct platform_driver arm_spe_pmu_driver = {
@@ -1281,7 +1280,7 @@ static struct platform_driver arm_spe_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = arm_spe_pmu_device_probe,
- .remove = arm_spe_pmu_device_remove,
+ .remove_new = arm_spe_pmu_device_remove,
};
static int __init arm_spe_pmu_init(void)
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index 7dbfaee372c76..4e8fa5a48fcfe 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -826,7 +826,7 @@ cpuhp_state_err:
return ret;
}
-static int ddr_perf_remove(struct platform_device *pdev)
+static void ddr_perf_remove(struct platform_device *pdev)
{
struct ddr_pmu *pmu = platform_get_drvdata(pdev);
@@ -836,7 +836,6 @@ static int ddr_perf_remove(struct platform_device *pdev)
perf_pmu_unregister(&pmu->pmu);
ida_free(&ddr_ida, pmu->id);
- return 0;
}
static struct platform_driver imx_ddr_pmu_driver = {
@@ -846,7 +845,7 @@ static struct platform_driver imx_ddr_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = ddr_perf_probe,
- .remove = ddr_perf_remove,
+ .remove_new = ddr_perf_remove,
};
module_platform_driver(imx_ddr_pmu_driver);
diff --git a/drivers/perf/fsl_imx9_ddr_perf.c b/drivers/perf/fsl_imx9_ddr_perf.c
index 9685645bfe04d..72c2d3074cded 100644
--- a/drivers/perf/fsl_imx9_ddr_perf.c
+++ b/drivers/perf/fsl_imx9_ddr_perf.c
@@ -679,7 +679,7 @@ format_string_err:
return ret;
}
-static int ddr_perf_remove(struct platform_device *pdev)
+static void ddr_perf_remove(struct platform_device *pdev)
{
struct ddr_pmu *pmu = platform_get_drvdata(pdev);
@@ -689,8 +689,6 @@ static int ddr_perf_remove(struct platform_device *pdev)
perf_pmu_unregister(&pmu->pmu);
ida_free(&ddr_ida, pmu->id);
-
- return 0;
}
static struct platform_driver imx_ddr_pmu_driver = {
@@ -700,7 +698,7 @@ static struct platform_driver imx_ddr_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = ddr_perf_probe,
- .remove = ddr_perf_remove,
+ .remove_new = ddr_perf_remove,
};
module_platform_driver(imx_ddr_pmu_driver);
diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c
index b90ba8aca3fa5..5d1f0e9fdb08d 100644
--- a/drivers/perf/hisilicon/hisi_pcie_pmu.c
+++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c
@@ -216,10 +216,8 @@ static void hisi_pcie_pmu_writeq(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset,
writeq_relaxed(val, pcie_pmu->base + offset);
}
-static void hisi_pcie_pmu_config_filter(struct perf_event *event)
+static u64 hisi_pcie_pmu_get_event_ctrl_val(struct perf_event *event)
{
- struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
u64 port, trig_len, thr_len, len_mode;
u64 reg = HISI_PCIE_INIT_SET;
@@ -256,10 +254,19 @@ static void hisi_pcie_pmu_config_filter(struct perf_event *event)
else
reg |= FIELD_PREP(HISI_PCIE_LEN_M, HISI_PCIE_LEN_M_DEFAULT);
+ return reg;
+}
+
+static void hisi_pcie_pmu_config_event_ctrl(struct perf_event *event)
+{
+ struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u64 reg = hisi_pcie_pmu_get_event_ctrl_val(event);
+
hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, hwc->idx, reg);
}
-static void hisi_pcie_pmu_clear_filter(struct perf_event *event)
+static void hisi_pcie_pmu_clear_event_ctrl(struct perf_event *event)
{
struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
@@ -299,18 +306,24 @@ static bool hisi_pcie_pmu_valid_filter(struct perf_event *event,
if (hisi_pcie_get_trig_len(event) > HISI_PCIE_TRIG_MAX_VAL)
return false;
- if (requester_id) {
- if (!hisi_pcie_pmu_valid_requester_id(pcie_pmu, requester_id))
- return false;
- }
+ /* Need to explicitly set filter of "port" or "bdf" */
+ if (!hisi_pcie_get_port(event) &&
+ !hisi_pcie_pmu_valid_requester_id(pcie_pmu, requester_id))
+ return false;
return true;
}
+/*
+ * Check Whether two events share the same config. The same config means not
+ * only the event code, but also the filter settings of the two events are
+ * the same.
+ */
static bool hisi_pcie_pmu_cmp_event(struct perf_event *target,
struct perf_event *event)
{
- return hisi_pcie_get_real_event(target) == hisi_pcie_get_real_event(event);
+ return hisi_pcie_pmu_get_event_ctrl_val(target) ==
+ hisi_pcie_pmu_get_event_ctrl_val(event);
}
static bool hisi_pcie_pmu_validate_event_group(struct perf_event *event)
@@ -385,40 +398,32 @@ static u64 hisi_pcie_pmu_read_counter(struct perf_event *event)
return hisi_pcie_pmu_readq(pcie_pmu, event->hw.event_base, idx);
}
-static int hisi_pcie_pmu_find_related_event(struct hisi_pcie_pmu *pcie_pmu,
- struct perf_event *event)
+/*
+ * Check all work events, if a relevant event is found then we return it
+ * first, otherwise return the first idle counter (need to reset).
+ */
+static int hisi_pcie_pmu_get_event_idx(struct hisi_pcie_pmu *pcie_pmu,
+ struct perf_event *event)
{
+ int first_idle = -EAGAIN;
struct perf_event *sibling;
int idx;
for (idx = 0; idx < HISI_PCIE_MAX_COUNTERS; idx++) {
sibling = pcie_pmu->hw_events[idx];
- if (!sibling)
- continue;
-
- if (!hisi_pcie_pmu_cmp_event(sibling, event))
+ if (!sibling) {
+ if (first_idle == -EAGAIN)
+ first_idle = idx;
continue;
+ }
/* Related events must be used in group */
- if (sibling->group_leader == event->group_leader)
+ if (hisi_pcie_pmu_cmp_event(sibling, event) &&
+ sibling->group_leader == event->group_leader)
return idx;
- else
- return -EINVAL;
}
- return idx;
-}
-
-static int hisi_pcie_pmu_get_event_idx(struct hisi_pcie_pmu *pcie_pmu)
-{
- int idx;
-
- for (idx = 0; idx < HISI_PCIE_MAX_COUNTERS; idx++) {
- if (!pcie_pmu->hw_events[idx])
- return idx;
- }
-
- return -EINVAL;
+ return first_idle;
}
static void hisi_pcie_pmu_event_update(struct perf_event *event)
@@ -505,7 +510,7 @@ static void hisi_pcie_pmu_start(struct perf_event *event, int flags)
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
hwc->state = 0;
- hisi_pcie_pmu_config_filter(event);
+ hisi_pcie_pmu_config_event_ctrl(event);
hisi_pcie_pmu_enable_counter(pcie_pmu, hwc);
hisi_pcie_pmu_enable_int(pcie_pmu, hwc);
hisi_pcie_pmu_set_period(event);
@@ -526,7 +531,7 @@ static void hisi_pcie_pmu_stop(struct perf_event *event, int flags)
hisi_pcie_pmu_event_update(event);
hisi_pcie_pmu_disable_int(pcie_pmu, hwc);
hisi_pcie_pmu_disable_counter(pcie_pmu, hwc);
- hisi_pcie_pmu_clear_filter(event);
+ hisi_pcie_pmu_clear_event_ctrl(event);
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
hwc->state |= PERF_HES_STOPPED;
@@ -544,27 +549,18 @@ static int hisi_pcie_pmu_add(struct perf_event *event, int flags)
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
- /* Check all working events to find a related event. */
- idx = hisi_pcie_pmu_find_related_event(pcie_pmu, event);
- if (idx < 0)
- return idx;
-
- /* Current event shares an enabled counter with the related event */
- if (idx < HISI_PCIE_MAX_COUNTERS) {
- hwc->idx = idx;
- goto start_count;
- }
-
- idx = hisi_pcie_pmu_get_event_idx(pcie_pmu);
+ idx = hisi_pcie_pmu_get_event_idx(pcie_pmu, event);
if (idx < 0)
return idx;
hwc->idx = idx;
- pcie_pmu->hw_events[idx] = event;
- /* Reset Counter to avoid previous statistic interference. */
- hisi_pcie_pmu_reset_counter(pcie_pmu, idx);
-start_count:
+ /* No enabled counter found with related event, reset it */
+ if (!pcie_pmu->hw_events[idx]) {
+ hisi_pcie_pmu_reset_counter(pcie_pmu, idx);
+ pcie_pmu->hw_events[idx] = event;
+ }
+
if (flags & PERF_EF_START)
hisi_pcie_pmu_start(event, PERF_EF_RELOAD);
@@ -714,10 +710,18 @@ static struct attribute *hisi_pcie_pmu_events_attr[] = {
HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_cnt, 0x10210),
HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_latency, 0x0011),
HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_cnt, 0x10011),
+ HISI_PCIE_PMU_EVENT_ATTR(rx_mwr_flux, 0x0104),
+ HISI_PCIE_PMU_EVENT_ATTR(rx_mwr_time, 0x10104),
HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_flux, 0x0804),
HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_time, 0x10804),
+ HISI_PCIE_PMU_EVENT_ATTR(rx_cpl_flux, 0x2004),
+ HISI_PCIE_PMU_EVENT_ATTR(rx_cpl_time, 0x12004),
+ HISI_PCIE_PMU_EVENT_ATTR(tx_mwr_flux, 0x0105),
+ HISI_PCIE_PMU_EVENT_ATTR(tx_mwr_time, 0x10105),
HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_flux, 0x0405),
HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_time, 0x10405),
+ HISI_PCIE_PMU_EVENT_ATTR(tx_cpl_flux, 0x1005),
+ HISI_PCIE_PMU_EVENT_ATTR(tx_cpl_time, 0x11005),
NULL
};
diff --git a/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
index 40f1bc9f9b913..0e923f94fa5b0 100644
--- a/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
@@ -341,7 +341,7 @@ static int hisi_cpa_pmu_probe(struct platform_device *pdev)
return ret;
}
-static int hisi_cpa_pmu_remove(struct platform_device *pdev)
+static void hisi_cpa_pmu_remove(struct platform_device *pdev)
{
struct hisi_pmu *cpa_pmu = platform_get_drvdata(pdev);
@@ -349,7 +349,6 @@ static int hisi_cpa_pmu_remove(struct platform_device *pdev)
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE,
&cpa_pmu->node);
hisi_cpa_pmu_enable_pm(cpa_pmu);
- return 0;
}
static struct platform_driver hisi_cpa_pmu_driver = {
@@ -359,7 +358,7 @@ static struct platform_driver hisi_cpa_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = hisi_cpa_pmu_probe,
- .remove = hisi_cpa_pmu_remove,
+ .remove_new = hisi_cpa_pmu_remove,
};
static int __init hisi_cpa_pmu_module_init(void)
diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
index ffb039d05d07b..b804e37381134 100644
--- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
@@ -531,14 +531,13 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
return ret;
}
-static int hisi_ddrc_pmu_remove(struct platform_device *pdev)
+static void hisi_ddrc_pmu_remove(struct platform_device *pdev)
{
struct hisi_pmu *ddrc_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&ddrc_pmu->pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
&ddrc_pmu->node);
- return 0;
}
static struct platform_driver hisi_ddrc_pmu_driver = {
@@ -548,7 +547,7 @@ static struct platform_driver hisi_ddrc_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = hisi_ddrc_pmu_probe,
- .remove = hisi_ddrc_pmu_remove,
+ .remove_new = hisi_ddrc_pmu_remove,
};
static int __init hisi_ddrc_pmu_module_init(void)
diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
index 15caf99e1eefe..21e69b1cdd4d2 100644
--- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
@@ -534,14 +534,13 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev)
return ret;
}
-static int hisi_hha_pmu_remove(struct platform_device *pdev)
+static void hisi_hha_pmu_remove(struct platform_device *pdev)
{
struct hisi_pmu *hha_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&hha_pmu->pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
&hha_pmu->node);
- return 0;
}
static struct platform_driver hisi_hha_pmu_driver = {
@@ -551,7 +550,7 @@ static struct platform_driver hisi_hha_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = hisi_hha_pmu_probe,
- .remove = hisi_hha_pmu_remove,
+ .remove_new = hisi_hha_pmu_remove,
};
static int __init hisi_hha_pmu_module_init(void)
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
index 794dbcd19b7a7..51ba76871097a 100644
--- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -568,14 +568,13 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
return ret;
}
-static int hisi_l3c_pmu_remove(struct platform_device *pdev)
+static void hisi_l3c_pmu_remove(struct platform_device *pdev)
{
struct hisi_pmu *l3c_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&l3c_pmu->pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
&l3c_pmu->node);
- return 0;
}
static struct platform_driver hisi_l3c_pmu_driver = {
@@ -585,7 +584,7 @@ static struct platform_driver hisi_l3c_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = hisi_l3c_pmu_probe,
- .remove = hisi_l3c_pmu_remove,
+ .remove_new = hisi_l3c_pmu_remove,
};
static int __init hisi_l3c_pmu_module_init(void)
diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
index 797cf201996a9..3cdb35c741f95 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
@@ -514,14 +514,13 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev)
return ret;
}
-static int hisi_pa_pmu_remove(struct platform_device *pdev)
+static void hisi_pa_pmu_remove(struct platform_device *pdev)
{
struct hisi_pmu *pa_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&pa_pmu->pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
&pa_pmu->node);
- return 0;
}
static const struct acpi_device_id hisi_pa_pmu_acpi_match[] = {
@@ -539,7 +538,7 @@ static struct platform_driver hisi_pa_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = hisi_pa_pmu_probe,
- .remove = hisi_pa_pmu_remove,
+ .remove_new = hisi_pa_pmu_remove,
};
static int __init hisi_pa_pmu_module_init(void)
diff --git a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
index e706ca5676764..765bbd61db26e 100644
--- a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
@@ -460,14 +460,13 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev)
return ret;
}
-static int hisi_sllc_pmu_remove(struct platform_device *pdev)
+static void hisi_sllc_pmu_remove(struct platform_device *pdev)
{
struct hisi_pmu *sllc_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&sllc_pmu->pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
&sllc_pmu->node);
- return 0;
}
static struct platform_driver hisi_sllc_pmu_driver = {
@@ -477,7 +476,7 @@ static struct platform_driver hisi_sllc_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = hisi_sllc_pmu_probe,
- .remove = hisi_sllc_pmu_remove,
+ .remove_new = hisi_sllc_pmu_remove,
};
static int __init hisi_sllc_pmu_module_init(void)
diff --git a/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
index 636fb79647c8c..481dcc9e8fbf8 100644
--- a/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
@@ -287,12 +287,52 @@ static u64 hisi_uc_pmu_read_counter(struct hisi_pmu *uc_pmu,
return readq(uc_pmu->base + HISI_UC_CNTR_REGn(hwc->idx));
}
-static void hisi_uc_pmu_write_counter(struct hisi_pmu *uc_pmu,
+static bool hisi_uc_pmu_get_glb_en_state(struct hisi_pmu *uc_pmu)
+{
+ u32 val;
+
+ val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
+ return !!FIELD_GET(HISI_UC_EVENT_GLB_EN, val);
+}
+
+static void hisi_uc_pmu_write_counter_normal(struct hisi_pmu *uc_pmu,
struct hw_perf_event *hwc, u64 val)
{
writeq(val, uc_pmu->base + HISI_UC_CNTR_REGn(hwc->idx));
}
+static void hisi_uc_pmu_write_counter_quirk_v2(struct hisi_pmu *uc_pmu,
+ struct hw_perf_event *hwc, u64 val)
+{
+ hisi_uc_pmu_start_counters(uc_pmu);
+ hisi_uc_pmu_write_counter_normal(uc_pmu, hwc, val);
+ hisi_uc_pmu_stop_counters(uc_pmu);
+}
+
+static void hisi_uc_pmu_write_counter(struct hisi_pmu *uc_pmu,
+ struct hw_perf_event *hwc, u64 val)
+{
+ bool enable = hisi_uc_pmu_get_glb_en_state(uc_pmu);
+ bool erratum = uc_pmu->identifier == HISI_PMU_V2;
+
+ /*
+ * HiSilicon UC PMU v2 suffers the erratum 162700402 that the
+ * PMU counter cannot be set due to the lack of clock under power
+ * saving mode. This will lead to error or inaccurate counts.
+ * The clock can be enabled by the PMU global enabling control.
+ * The irq handler and pmu_start() will call the function to set
+ * period. If the function under irq context, the PMU has been
+ * enabled therefore we set counter directly. Other situations
+ * the PMU is disabled, we need to enable it to turn on the
+ * counter clock to set period, and then restore PMU enable
+ * status, the counter can hold its value without a clock.
+ */
+ if (enable || !erratum)
+ hisi_uc_pmu_write_counter_normal(uc_pmu, hwc, val);
+ else
+ hisi_uc_pmu_write_counter_quirk_v2(uc_pmu, hwc, val);
+}
+
static void hisi_uc_pmu_enable_counter_int(struct hisi_pmu *uc_pmu,
struct hw_perf_event *hwc)
{
diff --git a/drivers/perf/marvell_cn10k_ddr_pmu.c b/drivers/perf/marvell_cn10k_ddr_pmu.c
index 524ba82bfce2c..e2abca188dbec 100644
--- a/drivers/perf/marvell_cn10k_ddr_pmu.c
+++ b/drivers/perf/marvell_cn10k_ddr_pmu.c
@@ -697,7 +697,7 @@ error:
return ret;
}
-static int cn10k_ddr_perf_remove(struct platform_device *pdev)
+static void cn10k_ddr_perf_remove(struct platform_device *pdev)
{
struct cn10k_ddr_pmu *ddr_pmu = platform_get_drvdata(pdev);
@@ -706,7 +706,6 @@ static int cn10k_ddr_perf_remove(struct platform_device *pdev)
&ddr_pmu->node);
perf_pmu_unregister(&ddr_pmu->pmu);
- return 0;
}
#ifdef CONFIG_OF
@@ -733,7 +732,7 @@ static struct platform_driver cn10k_ddr_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = cn10k_ddr_perf_probe,
- .remove = cn10k_ddr_perf_remove,
+ .remove_new = cn10k_ddr_perf_remove,
};
static int __init cn10k_ddr_pmu_init(void)
diff --git a/drivers/perf/marvell_cn10k_tad_pmu.c b/drivers/perf/marvell_cn10k_tad_pmu.c
index fec8e82edb955..9e635f3554709 100644
--- a/drivers/perf/marvell_cn10k_tad_pmu.c
+++ b/drivers/perf/marvell_cn10k_tad_pmu.c
@@ -351,15 +351,13 @@ static int tad_pmu_probe(struct platform_device *pdev)
return ret;
}
-static int tad_pmu_remove(struct platform_device *pdev)
+static void tad_pmu_remove(struct platform_device *pdev)
{
struct tad_pmu *pmu = platform_get_drvdata(pdev);
cpuhp_state_remove_instance_nocalls(tad_pmu_cpuhp_state,
&pmu->node);
perf_pmu_unregister(&pmu->pmu);
-
- return 0;
}
#ifdef CONFIG_OF
@@ -385,7 +383,7 @@ static struct platform_driver tad_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = tad_pmu_probe,
- .remove = tad_pmu_remove,
+ .remove_new = tad_pmu_remove,
};
static int tad_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index 3f9a98c17a897..148df5ae8ef83 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -965,7 +965,7 @@ out_unregister:
return err;
}
-static int l2_cache_pmu_remove(struct platform_device *pdev)
+static void l2_cache_pmu_remove(struct platform_device *pdev)
{
struct l2cache_pmu *l2cache_pmu =
to_l2cache_pmu(platform_get_drvdata(pdev));
@@ -973,7 +973,6 @@ static int l2_cache_pmu_remove(struct platform_device *pdev)
perf_pmu_unregister(&l2cache_pmu->pmu);
cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
&l2cache_pmu->node);
- return 0;
}
static struct platform_driver l2_cache_pmu_driver = {
@@ -983,7 +982,7 @@ static struct platform_driver l2_cache_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = l2_cache_pmu_probe,
- .remove = l2_cache_pmu_remove,
+ .remove_new = l2_cache_pmu_remove,
};
static int __init register_l2_cache_pmu_driver(void)
diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c
index c78a6fd6c57f6..b4efdddb2ad91 100644
--- a/drivers/perf/riscv_pmu.c
+++ b/drivers/perf/riscv_pmu.c
@@ -313,6 +313,10 @@ static int riscv_pmu_event_init(struct perf_event *event)
u64 event_config = 0;
uint64_t cmask;
+ /* driver does not support branch stack sampling */
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+
hwc->flags = 0;
mapped_event = rvpmu->event_map(event, &event_config);
if (mapped_event < 0) {
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 452aab49db1e8..8cbe6e5f9c39a 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -19,11 +19,33 @@
#include <linux/of.h>
#include <linux/cpu_pm.h>
#include <linux/sched/clock.h>
+#include <linux/soc/andes/irq.h>
#include <asm/errata_list.h>
#include <asm/sbi.h>
#include <asm/cpufeature.h>
+#define ALT_SBI_PMU_OVERFLOW(__ovl) \
+asm volatile(ALTERNATIVE_2( \
+ "csrr %0, " __stringify(CSR_SSCOUNTOVF), \
+ "csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF), \
+ THEAD_VENDOR_ID, ERRATA_THEAD_PMU, \
+ CONFIG_ERRATA_THEAD_PMU, \
+ "csrr %0, " __stringify(ANDES_CSR_SCOUNTEROF), \
+ 0, RISCV_ISA_EXT_XANDESPMU, \
+ CONFIG_ANDES_CUSTOM_PMU) \
+ : "=r" (__ovl) : \
+ : "memory")
+
+#define ALT_SBI_PMU_OVF_CLEAR_PENDING(__irq_mask) \
+asm volatile(ALTERNATIVE( \
+ "csrc " __stringify(CSR_IP) ", %0\n\t", \
+ "csrc " __stringify(ANDES_CSR_SLIP) ", %0\n\t", \
+ 0, RISCV_ISA_EXT_XANDESPMU, \
+ CONFIG_ANDES_CUSTOM_PMU) \
+ : : "r"(__irq_mask) \
+ : "memory")
+
#define SYSCTL_NO_USER_ACCESS 0
#define SYSCTL_USER_ACCESS 1
#define SYSCTL_LEGACY 2
@@ -61,6 +83,7 @@ static int sysctl_perf_user_access __read_mostly = SYSCTL_USER_ACCESS;
static union sbi_pmu_ctr_info *pmu_ctr_list;
static bool riscv_pmu_use_irq;
static unsigned int riscv_pmu_irq_num;
+static unsigned int riscv_pmu_irq_mask;
static unsigned int riscv_pmu_irq;
/* Cache the available counters in a bitmask */
@@ -694,7 +717,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
event = cpu_hw_evt->events[fidx];
if (!event) {
- csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
+ ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask);
return IRQ_NONE;
}
@@ -708,7 +731,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
* Overflow interrupt pending bit should only be cleared after stopping
* all the counters to avoid any race condition.
*/
- csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
+ ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask);
/* No overflow bit is set */
if (!overflow)
@@ -780,8 +803,7 @@ static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
if (riscv_pmu_use_irq) {
cpu_hw_evt->irq = riscv_pmu_irq;
- csr_clear(CSR_IP, BIT(riscv_pmu_irq_num));
- csr_set(CSR_IE, BIT(riscv_pmu_irq_num));
+ ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask);
enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE);
}
@@ -792,7 +814,6 @@ static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node)
{
if (riscv_pmu_use_irq) {
disable_percpu_irq(riscv_pmu_irq);
- csr_clear(CSR_IE, BIT(riscv_pmu_irq_num));
}
/* Disable all counters access for user mode now */
@@ -816,8 +837,14 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde
riscv_cached_mimpid(0) == 0) {
riscv_pmu_irq_num = THEAD_C9XX_RV_IRQ_PMU;
riscv_pmu_use_irq = true;
+ } else if (riscv_isa_extension_available(NULL, XANDESPMU) &&
+ IS_ENABLED(CONFIG_ANDES_CUSTOM_PMU)) {
+ riscv_pmu_irq_num = ANDES_SLI_CAUSE_BASE + ANDES_RV_IRQ_PMOVI;
+ riscv_pmu_use_irq = true;
}
+ riscv_pmu_irq_mask = BIT(riscv_pmu_irq_num % BITS_PER_LONG);
+
if (!riscv_pmu_use_irq)
return -EOPNOTSUPP;
diff --git a/drivers/perf/starfive_starlink_pmu.c b/drivers/perf/starfive_starlink_pmu.c
new file mode 100644
index 0000000000000..5e5a672b42294
--- /dev/null
+++ b/drivers/perf/starfive_starlink_pmu.c
@@ -0,0 +1,642 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * StarFive's StarLink PMU driver
+ *
+ * Copyright (C) 2023 StarFive Technology Co., Ltd.
+ *
+ * Author: Ji Sheng Teoh <jisheng.teoh@starfivetech.com>
+ *
+ */
+
+#define STARLINK_PMU_PDEV_NAME "starfive_starlink_pmu"
+#define pr_fmt(fmt) STARLINK_PMU_PDEV_NAME ": " fmt
+
+#include <linux/bitmap.h>
+#include <linux/cpu_pm.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/sysfs.h>
+
+#define STARLINK_PMU_MAX_COUNTERS 64
+#define STARLINK_PMU_NUM_COUNTERS 16
+#define STARLINK_PMU_IDX_CYCLE_COUNTER 63
+
+#define STARLINK_PMU_EVENT_SELECT 0x060
+#define STARLINK_PMU_EVENT_COUNTER 0x160
+#define STARLINK_PMU_COUNTER_MASK GENMASK_ULL(63, 0)
+#define STARLINK_PMU_CYCLE_COUNTER 0x058
+
+#define STARLINK_PMU_CONTROL 0x040
+#define STARLINK_PMU_GLOBAL_ENABLE BIT_ULL(0)
+
+#define STARLINK_PMU_INTERRUPT_ENABLE 0x050
+#define STARLINK_PMU_COUNTER_OVERFLOW_STATUS 0x048
+#define STARLINK_PMU_CYCLE_OVERFLOW_MASK BIT_ULL(63)
+
+#define STARLINK_CYCLES 0x058
+#define CACHE_READ_REQUEST 0x04000701
+#define CACHE_WRITE_REQUEST 0x03000001
+#define CACHE_RELEASE_REQUEST 0x0003e001
+#define CACHE_READ_HIT 0x00901202
+#define CACHE_READ_MISS 0x04008002
+#define CACHE_WRITE_HIT 0x006c0002
+#define CACHE_WRITE_MISS 0x03000002
+#define CACHE_WRITEBACK 0x00000403
+
+#define to_starlink_pmu(p) (container_of(p, struct starlink_pmu, pmu))
+
+#define STARLINK_FORMAT_ATTR(_name, _config) \
+ (&((struct dev_ext_attribute[]) { \
+ { .attr = __ATTR(_name, 0444, starlink_pmu_sysfs_format_show, NULL), \
+ .var = (void *)_config, } \
+ })[0].attr.attr)
+
+#define STARLINK_EVENT_ATTR(_name, _id) \
+ PMU_EVENT_ATTR_ID(_name, starlink_pmu_sysfs_event_show, _id)
+
+static int starlink_pmu_cpuhp_state;
+
+struct starlink_hw_events {
+ struct perf_event *events[STARLINK_PMU_MAX_COUNTERS];
+ DECLARE_BITMAP(used_mask, STARLINK_PMU_MAX_COUNTERS);
+};
+
+struct starlink_pmu {
+ struct pmu pmu;
+ struct starlink_hw_events __percpu *hw_events;
+ struct hlist_node node;
+ struct notifier_block starlink_pmu_pm_nb;
+ void __iomem *pmu_base;
+ cpumask_t cpumask;
+ int irq;
+};
+
+static ssize_t
+starlink_pmu_sysfs_format_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *eattr = container_of(attr,
+ struct dev_ext_attribute, attr);
+
+ return sysfs_emit(buf, "%s\n", (char *)eattr->var);
+}
+
+static struct attribute *starlink_pmu_format_attrs[] = {
+ STARLINK_FORMAT_ATTR(event, "config:0-31"),
+ NULL
+};
+
+static const struct attribute_group starlink_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = starlink_pmu_format_attrs,
+};
+
+static ssize_t
+starlink_pmu_sysfs_event_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct perf_pmu_events_attr *eattr = container_of(attr,
+ struct perf_pmu_events_attr, attr);
+
+ return sysfs_emit(buf, "event=0x%02llx\n", eattr->id);
+}
+
+static struct attribute *starlink_pmu_event_attrs[] = {
+ STARLINK_EVENT_ATTR(cycles, STARLINK_CYCLES),
+ STARLINK_EVENT_ATTR(read_request, CACHE_READ_REQUEST),
+ STARLINK_EVENT_ATTR(write_request, CACHE_WRITE_REQUEST),
+ STARLINK_EVENT_ATTR(release_request, CACHE_RELEASE_REQUEST),
+ STARLINK_EVENT_ATTR(read_hit, CACHE_READ_HIT),
+ STARLINK_EVENT_ATTR(read_miss, CACHE_READ_MISS),
+ STARLINK_EVENT_ATTR(write_hit, CACHE_WRITE_HIT),
+ STARLINK_EVENT_ATTR(write_miss, CACHE_WRITE_MISS),
+ STARLINK_EVENT_ATTR(writeback, CACHE_WRITEBACK),
+ NULL
+};
+
+static const struct attribute_group starlink_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = starlink_pmu_event_attrs,
+};
+
+static ssize_t
+cpumask_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct starlink_pmu *starlink_pmu = to_starlink_pmu(dev_get_drvdata(dev));
+
+ return cpumap_print_to_pagebuf(true, buf, &starlink_pmu->cpumask);
+}
+
+static DEVICE_ATTR_RO(cpumask);
+
+static struct attribute *starlink_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL
+};
+
+static const struct attribute_group starlink_pmu_cpumask_attr_group = {
+ .attrs = starlink_pmu_cpumask_attrs,
+};
+
+static const struct attribute_group *starlink_pmu_attr_groups[] = {
+ &starlink_pmu_format_attr_group,
+ &starlink_pmu_events_attr_group,
+ &starlink_pmu_cpumask_attr_group,
+ NULL
+};
+
+static void starlink_pmu_set_event_period(struct perf_event *event)
+{
+ struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = event->hw.idx;
+
+ /*
+ * Program counter to half of it's max count to handle
+ * cases of extreme interrupt latency.
+ */
+ u64 val = STARLINK_PMU_COUNTER_MASK >> 1;
+
+ local64_set(&hwc->prev_count, val);
+ if (hwc->config == STARLINK_CYCLES)
+ writeq(val, starlink_pmu->pmu_base + STARLINK_PMU_CYCLE_COUNTER);
+ else
+ writeq(val, starlink_pmu->pmu_base + STARLINK_PMU_EVENT_COUNTER +
+ idx * sizeof(u64));
+}
+
+static void starlink_pmu_counter_start(struct perf_event *event,
+ struct starlink_pmu *starlink_pmu)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = event->hw.idx;
+ u64 val;
+
+ /*
+ * Enable counter overflow interrupt[63:0],
+ * which is mapped as follow:
+ *
+ * event counter 0 - Bit [0]
+ * event counter 1 - Bit [1]
+ * ...
+ * cycle counter - Bit [63]
+ */
+ val = readq(starlink_pmu->pmu_base + STARLINK_PMU_INTERRUPT_ENABLE);
+
+ if (hwc->config == STARLINK_CYCLES) {
+ /*
+ * Cycle count has its dedicated register, and it starts
+ * counting as soon as STARLINK_PMU_GLOBAL_ENABLE is set.
+ */
+ val |= STARLINK_PMU_CYCLE_OVERFLOW_MASK;
+ } else {
+ writeq(event->hw.config, starlink_pmu->pmu_base +
+ STARLINK_PMU_EVENT_SELECT + idx * sizeof(u64));
+
+ val |= BIT_ULL(idx);
+ }
+
+ writeq(val, starlink_pmu->pmu_base + STARLINK_PMU_INTERRUPT_ENABLE);
+
+ writeq(STARLINK_PMU_GLOBAL_ENABLE, starlink_pmu->pmu_base +
+ STARLINK_PMU_CONTROL);
+}
+
+static void starlink_pmu_counter_stop(struct perf_event *event,
+ struct starlink_pmu *starlink_pmu)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = event->hw.idx;
+ u64 val;
+
+ val = readq(starlink_pmu->pmu_base + STARLINK_PMU_CONTROL);
+ val &= ~STARLINK_PMU_GLOBAL_ENABLE;
+ writeq(val, starlink_pmu->pmu_base + STARLINK_PMU_CONTROL);
+
+ val = readq(starlink_pmu->pmu_base + STARLINK_PMU_INTERRUPT_ENABLE);
+ if (hwc->config == STARLINK_CYCLES)
+ val &= ~STARLINK_PMU_CYCLE_OVERFLOW_MASK;
+ else
+ val &= ~BIT_ULL(idx);
+
+ writeq(val, starlink_pmu->pmu_base + STARLINK_PMU_INTERRUPT_ENABLE);
+}
+
+static void starlink_pmu_update(struct perf_event *event)
+{
+ struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+ u64 prev_raw_count, new_raw_count;
+ u64 oldval;
+ u64 delta;
+
+ do {
+ prev_raw_count = local64_read(&hwc->prev_count);
+ if (hwc->config == STARLINK_CYCLES)
+ new_raw_count = readq(starlink_pmu->pmu_base +
+ STARLINK_PMU_CYCLE_COUNTER);
+ else
+ new_raw_count = readq(starlink_pmu->pmu_base +
+ STARLINK_PMU_EVENT_COUNTER +
+ idx * sizeof(u64));
+ oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count);
+ } while (oldval != prev_raw_count);
+
+ delta = (new_raw_count - prev_raw_count) & STARLINK_PMU_COUNTER_MASK;
+ local64_add(delta, &event->count);
+}
+
+static void starlink_pmu_start(struct perf_event *event, int flags)
+{
+ struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
+ return;
+
+ if (flags & PERF_EF_RELOAD)
+ WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+
+ hwc->state = 0;
+
+ starlink_pmu_set_event_period(event);
+ starlink_pmu_counter_start(event, starlink_pmu);
+
+ perf_event_update_userpage(event);
+}
+
+static void starlink_pmu_stop(struct perf_event *event, int flags)
+{
+ struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (hwc->state & PERF_HES_STOPPED)
+ return;
+
+ starlink_pmu_counter_stop(event, starlink_pmu);
+ starlink_pmu_update(event);
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+}
+
+static int starlink_pmu_add(struct perf_event *event, int flags)
+{
+ struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu);
+ struct starlink_hw_events *hw_events =
+ this_cpu_ptr(starlink_pmu->hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned long *used_mask = hw_events->used_mask;
+ u32 n_events = STARLINK_PMU_NUM_COUNTERS;
+ int idx;
+
+ /*
+ * Cycle counter has dedicated register to hold counter value.
+ * Event other than cycle count has to be enabled through
+ * event select register, and assigned with independent counter
+ * as they appear.
+ */
+
+ if (hwc->config == STARLINK_CYCLES) {
+ idx = STARLINK_PMU_IDX_CYCLE_COUNTER;
+ } else {
+ idx = find_first_zero_bit(used_mask, n_events);
+ /* All counter are in use */
+ if (idx < 0)
+ return idx;
+
+ set_bit(idx, used_mask);
+ }
+
+ hwc->idx = idx;
+ hw_events->events[idx] = event;
+ hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+
+ if (flags & PERF_EF_START)
+ starlink_pmu_start(event, PERF_EF_RELOAD);
+
+ perf_event_update_userpage(event);
+
+ return 0;
+}
+
+static void starlink_pmu_del(struct perf_event *event, int flags)
+{
+ struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu);
+ struct starlink_hw_events *hw_events =
+ this_cpu_ptr(starlink_pmu->hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+
+ starlink_pmu_stop(event, PERF_EF_UPDATE);
+ hw_events->events[hwc->idx] = NULL;
+ clear_bit(hwc->idx, hw_events->used_mask);
+
+ perf_event_update_userpage(event);
+}
+
+static bool starlink_pmu_validate_event_group(struct perf_event *event)
+{
+ struct perf_event *leader = event->group_leader;
+ struct perf_event *sibling;
+ int counter = 1;
+
+ /*
+ * Ensure hardware events in the group are on the same PMU,
+ * software events are acceptable.
+ */
+ if (event->group_leader->pmu != event->pmu &&
+ !is_software_event(event->group_leader))
+ return false;
+
+ for_each_sibling_event(sibling, leader) {
+ if (sibling->pmu != event->pmu && !is_software_event(sibling))
+ return false;
+
+ counter++;
+ }
+
+ return counter <= STARLINK_PMU_NUM_COUNTERS;
+}
+
+static int starlink_pmu_event_init(struct perf_event *event)
+{
+ struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ /*
+ * Sampling is not supported, as counters are shared
+ * by all CPU.
+ */
+ if (hwc->sample_period)
+ return -EOPNOTSUPP;
+
+ /*
+ * Per-task and attach to a task are not supported,
+ * as uncore events are not specific to any CPU.
+ */
+ if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK)
+ return -EOPNOTSUPP;
+
+ if (!starlink_pmu_validate_event_group(event))
+ return -EINVAL;
+
+ hwc->idx = -1;
+ hwc->config = event->attr.config;
+ event->cpu = cpumask_first(&starlink_pmu->cpumask);
+
+ return 0;
+}
+
+static irqreturn_t starlink_pmu_handle_irq(int irq_num, void *data)
+{
+ struct starlink_pmu *starlink_pmu = data;
+ struct starlink_hw_events *hw_events =
+ this_cpu_ptr(starlink_pmu->hw_events);
+ bool handled = false;
+ int idx;
+ u64 overflow_status;
+
+ for (idx = 0; idx < STARLINK_PMU_MAX_COUNTERS; idx++) {
+ struct perf_event *event = hw_events->events[idx];
+
+ if (!event)
+ continue;
+
+ overflow_status = readq(starlink_pmu->pmu_base +
+ STARLINK_PMU_COUNTER_OVERFLOW_STATUS);
+ if (!(overflow_status & BIT_ULL(idx)))
+ continue;
+
+ writeq(BIT_ULL(idx), starlink_pmu->pmu_base +
+ STARLINK_PMU_COUNTER_OVERFLOW_STATUS);
+
+ starlink_pmu_update(event);
+ starlink_pmu_set_event_period(event);
+ handled = true;
+ }
+ return IRQ_RETVAL(handled);
+}
+
+static int starlink_setup_irqs(struct starlink_pmu *starlink_pmu,
+ struct platform_device *pdev)
+{
+ int ret, irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -EINVAL;
+
+ ret = devm_request_irq(&pdev->dev, irq, starlink_pmu_handle_irq,
+ 0, STARLINK_PMU_PDEV_NAME, starlink_pmu);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to request IRQ\n");
+
+ starlink_pmu->irq = irq;
+
+ return 0;
+}
+
+static int starlink_pmu_pm_notify(struct notifier_block *b,
+ unsigned long cmd, void *v)
+{
+ struct starlink_pmu *starlink_pmu = container_of(b, struct starlink_pmu,
+ starlink_pmu_pm_nb);
+ struct starlink_hw_events *hw_events =
+ this_cpu_ptr(starlink_pmu->hw_events);
+ int enabled = bitmap_weight(hw_events->used_mask,
+ STARLINK_PMU_MAX_COUNTERS);
+ struct perf_event *event;
+ int idx;
+
+ if (!enabled)
+ return NOTIFY_OK;
+
+ for (idx = 0; idx < STARLINK_PMU_MAX_COUNTERS; idx++) {
+ event = hw_events->events[idx];
+ if (!event)
+ continue;
+
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ /* Stop and update the counter */
+ starlink_pmu_stop(event, PERF_EF_UPDATE);
+ break;
+ case CPU_PM_EXIT:
+ case CPU_PM_ENTER_FAILED:
+ /* Restore and enable the counter */
+ starlink_pmu_start(event, PERF_EF_RELOAD);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return NOTIFY_OK;
+}
+
+static int starlink_pmu_pm_register(struct starlink_pmu *starlink_pmu)
+{
+ if (!IS_ENABLED(CONFIG_CPU_PM))
+ return 0;
+
+ starlink_pmu->starlink_pmu_pm_nb.notifier_call = starlink_pmu_pm_notify;
+ return cpu_pm_register_notifier(&starlink_pmu->starlink_pmu_pm_nb);
+}
+
+static void starlink_pmu_pm_unregister(struct starlink_pmu *starlink_pmu)
+{
+ if (!IS_ENABLED(CONFIG_CPU_PM))
+ return;
+
+ cpu_pm_unregister_notifier(&starlink_pmu->starlink_pmu_pm_nb);
+}
+
+static void starlink_pmu_destroy(struct starlink_pmu *starlink_pmu)
+{
+ starlink_pmu_pm_unregister(starlink_pmu);
+ cpuhp_state_remove_instance(starlink_pmu_cpuhp_state,
+ &starlink_pmu->node);
+}
+
+static int starlink_pmu_probe(struct platform_device *pdev)
+{
+ struct starlink_pmu *starlink_pmu;
+ struct starlink_hw_events *hw_events;
+ struct resource *res;
+ int cpuid, i, ret;
+
+ starlink_pmu = devm_kzalloc(&pdev->dev, sizeof(*starlink_pmu), GFP_KERNEL);
+ if (!starlink_pmu)
+ return -ENOMEM;
+
+ starlink_pmu->pmu_base =
+ devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(starlink_pmu->pmu_base))
+ return PTR_ERR(starlink_pmu->pmu_base);
+
+ starlink_pmu->hw_events = alloc_percpu_gfp(struct starlink_hw_events,
+ GFP_KERNEL);
+ if (!starlink_pmu->hw_events) {
+ dev_err(&pdev->dev, "Failed to allocate per-cpu PMU data\n");
+ return -ENOMEM;
+ }
+
+ for_each_possible_cpu(cpuid) {
+ hw_events = per_cpu_ptr(starlink_pmu->hw_events, cpuid);
+ for (i = 0; i < STARLINK_PMU_MAX_COUNTERS; i++)
+ hw_events->events[i] = NULL;
+ }
+
+ ret = starlink_setup_irqs(starlink_pmu, pdev);
+ if (ret)
+ return ret;
+
+ ret = cpuhp_state_add_instance(starlink_pmu_cpuhp_state,
+ &starlink_pmu->node);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register hotplug\n");
+ return ret;
+ }
+
+ ret = starlink_pmu_pm_register(starlink_pmu);
+ if (ret) {
+ cpuhp_state_remove_instance(starlink_pmu_cpuhp_state,
+ &starlink_pmu->node);
+ return ret;
+ }
+
+ starlink_pmu->pmu = (struct pmu) {
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = starlink_pmu_event_init,
+ .add = starlink_pmu_add,
+ .del = starlink_pmu_del,
+ .start = starlink_pmu_start,
+ .stop = starlink_pmu_stop,
+ .read = starlink_pmu_update,
+ .attr_groups = starlink_pmu_attr_groups,
+ };
+
+ ret = perf_pmu_register(&starlink_pmu->pmu, STARLINK_PMU_PDEV_NAME, -1);
+ if (ret)
+ starlink_pmu_destroy(starlink_pmu);
+
+ return ret;
+}
+
+static const struct of_device_id starlink_pmu_of_match[] = {
+ { .compatible = "starfive,jh8100-starlink-pmu" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, starlink_pmu_of_match);
+
+static struct platform_driver starlink_pmu_driver = {
+ .driver = {
+ .name = STARLINK_PMU_PDEV_NAME,
+ .of_match_table = starlink_pmu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = starlink_pmu_probe,
+};
+
+static int
+starlink_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct starlink_pmu *starlink_pmu = hlist_entry_safe(node,
+ struct starlink_pmu,
+ node);
+
+ if (cpumask_empty(&starlink_pmu->cpumask))
+ cpumask_set_cpu(cpu, &starlink_pmu->cpumask);
+
+ WARN_ON(irq_set_affinity(starlink_pmu->irq, cpumask_of(cpu)));
+
+ return 0;
+}
+
+static int
+starlink_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct starlink_pmu *starlink_pmu = hlist_entry_safe(node,
+ struct starlink_pmu,
+ node);
+ unsigned int target;
+
+ if (!cpumask_test_and_clear_cpu(cpu, &starlink_pmu->cpumask))
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ perf_pmu_migrate_context(&starlink_pmu->pmu, cpu, target);
+
+ cpumask_set_cpu(target, &starlink_pmu->cpumask);
+ WARN_ON(irq_set_affinity(starlink_pmu->irq, cpumask_of(target)));
+
+ return 0;
+}
+
+static int __init starlink_pmu_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "soc/starfive/starlink_pmu:online",
+ starlink_pmu_online_cpu,
+ starlink_pmu_offline_cpu);
+ if (ret < 0)
+ return ret;
+
+ starlink_pmu_cpuhp_state = ret;
+
+ return platform_driver_register(&starlink_pmu_driver);
+}
+
+device_initcall(starlink_pmu_init);
diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
index 1edb9c03704fd..e16d10c763de1 100644
--- a/drivers/perf/thunderx2_pmu.c
+++ b/drivers/perf/thunderx2_pmu.c
@@ -993,7 +993,7 @@ static int tx2_uncore_probe(struct platform_device *pdev)
return 0;
}
-static int tx2_uncore_remove(struct platform_device *pdev)
+static void tx2_uncore_remove(struct platform_device *pdev)
{
struct tx2_uncore_pmu *tx2_pmu, *temp;
struct device *dev = &pdev->dev;
@@ -1009,7 +1009,6 @@ static int tx2_uncore_remove(struct platform_device *pdev)
}
}
}
- return 0;
}
static struct platform_driver tx2_uncore_driver = {
@@ -1019,7 +1018,7 @@ static struct platform_driver tx2_uncore_driver = {
.suppress_bind_attrs = true,
},
.probe = tx2_uncore_probe,
- .remove = tx2_uncore_remove,
+ .remove_new = tx2_uncore_remove,
};
static int __init tx2_uncore_driver_init(void)
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index 7ce344248dda6..0d49343d704b7 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -1937,7 +1937,7 @@ xgene_pmu_dev_cleanup(struct xgene_pmu *xgene_pmu, struct list_head *pmus)
}
}
-static int xgene_pmu_remove(struct platform_device *pdev)
+static void xgene_pmu_remove(struct platform_device *pdev)
{
struct xgene_pmu *xgene_pmu = dev_get_drvdata(&pdev->dev);
@@ -1947,13 +1947,11 @@ static int xgene_pmu_remove(struct platform_device *pdev)
xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcpmus);
cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
&xgene_pmu->node);
-
- return 0;
}
static struct platform_driver xgene_pmu_driver = {
.probe = xgene_pmu_probe,
- .remove = xgene_pmu_remove,
+ .remove_new = xgene_pmu_remove,
.driver = {
.name = "xgene-pmu",
.of_match_table = xgene_pmu_of_match,
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 4cef568231bf0..787354b849c75 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -87,6 +87,7 @@ source "drivers/phy/motorola/Kconfig"
source "drivers/phy/mscc/Kconfig"
source "drivers/phy/qualcomm/Kconfig"
source "drivers/phy/ralink/Kconfig"
+source "drivers/phy/realtek/Kconfig"
source "drivers/phy/renesas/Kconfig"
source "drivers/phy/rockchip/Kconfig"
source "drivers/phy/samsung/Kconfig"
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index fb3dc9de61115..868a220ed0f6d 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -26,6 +26,7 @@ obj-y += allwinner/ \
mscc/ \
qualcomm/ \
ralink/ \
+ realtek/ \
renesas/ \
rockchip/ \
samsung/ \
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index e53a9a9317bc2..b0f19e9506010 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -683,7 +683,7 @@ static int sun4i_usb_phy0_vbus_notify(struct notifier_block *nb,
}
static struct phy *sun4i_usb_phy_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct sun4i_usb_phy_data *data = dev_get_drvdata(dev);
diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
index 2712c4bd549d8..5468831d6ab9b 100644
--- a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
+++ b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
@@ -350,7 +350,7 @@ static int phy_g12a_usb3_pcie_exit(struct phy *phy)
}
static struct phy *phy_g12a_usb3_pcie_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct phy_g12a_usb3_pcie_priv *priv = dev_get_drvdata(dev);
unsigned int mode;
diff --git a/drivers/phy/broadcom/phy-bcm-sr-pcie.c b/drivers/phy/broadcom/phy-bcm-sr-pcie.c
index 8a4aadf166cf9..ff9b3862bf7af 100644
--- a/drivers/phy/broadcom/phy-bcm-sr-pcie.c
+++ b/drivers/phy/broadcom/phy-bcm-sr-pcie.c
@@ -195,7 +195,7 @@ static const struct phy_ops sr_paxc_phy_ops = {
};
static struct phy *sr_pcie_phy_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct sr_pcie_phy_core *core;
int phy_idx;
diff --git a/drivers/phy/broadcom/phy-bcm-sr-usb.c b/drivers/phy/broadcom/phy-bcm-sr-usb.c
index b0bd18a5df879..6bcfe83609c86 100644
--- a/drivers/phy/broadcom/phy-bcm-sr-usb.c
+++ b/drivers/phy/broadcom/phy-bcm-sr-usb.c
@@ -209,7 +209,7 @@ static const struct phy_ops sr_phy_ops = {
};
static struct phy *bcm_usb_phy_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct bcm_usb_phy_cfg *phy_cfg;
int phy_idx;
diff --git a/drivers/phy/broadcom/phy-bcm63xx-usbh.c b/drivers/phy/broadcom/phy-bcm63xx-usbh.c
index f8183dea774b6..647644de041bb 100644
--- a/drivers/phy/broadcom/phy-bcm63xx-usbh.c
+++ b/drivers/phy/broadcom/phy-bcm63xx-usbh.c
@@ -366,7 +366,7 @@ static const struct phy_ops bcm63xx_usbh_phy_ops = {
};
static struct phy *bcm63xx_usbh_phy_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct bcm63xx_usbh_phy *usbh = dev_get_drvdata(dev);
diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
index a16f0b58eb745..ad2eec0956016 100644
--- a/drivers/phy/broadcom/phy-brcm-usb.c
+++ b/drivers/phy/broadcom/phy-brcm-usb.c
@@ -175,7 +175,7 @@ static const struct phy_ops brcm_usb_phy_ops = {
};
static struct phy *brcm_usb_phy_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct brcm_usb_phy_data *data = dev_get_drvdata(dev);
diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c
index a75c96385c57a..95924a09960cc 100644
--- a/drivers/phy/cadence/phy-cadence-torrent.c
+++ b/drivers/phy/cadence/phy-cadence-torrent.c
@@ -355,7 +355,9 @@ struct cdns_torrent_phy {
struct reset_control *apb_rst;
struct device *dev;
struct clk *clk;
+ struct clk *clk1;
enum cdns_torrent_ref_clk ref_clk_rate;
+ enum cdns_torrent_ref_clk ref_clk1_rate;
struct cdns_torrent_inst phys[MAX_NUM_LANES];
int nsubnodes;
const struct cdns_torrent_data *init_data;
@@ -2460,9 +2462,11 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
{
const struct cdns_torrent_data *init_data = cdns_phy->init_data;
struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals;
+ enum cdns_torrent_ref_clk ref_clk1 = cdns_phy->ref_clk1_rate;
enum cdns_torrent_ref_clk ref_clk = cdns_phy->ref_clk_rate;
struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals;
enum cdns_torrent_phy_type phy_t1, phy_t2;
+ struct cdns_torrent_vals *phy_pma_cmn_vals;
struct cdns_torrent_vals *pcs_cmn_vals;
int i, j, node, mlane, num_lanes, ret;
struct cdns_reg_pairs *reg_pairs;
@@ -2489,6 +2493,7 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
* Get the array values as [phy_t2][phy_t1][ssc].
*/
swap(phy_t1, phy_t2);
+ swap(ref_clk, ref_clk1);
}
mlane = cdns_phy->phys[node].mlane;
@@ -2552,9 +2557,22 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
reg_pairs[i].val);
}
+ /* PHY PMA common registers configurations */
+ phy_pma_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->phy_pma_cmn_vals_tbl,
+ CLK_ANY, CLK_ANY,
+ phy_t1, phy_t2, ANY_SSC);
+ if (phy_pma_cmn_vals) {
+ reg_pairs = phy_pma_cmn_vals->reg_pairs;
+ num_regs = phy_pma_cmn_vals->num_regs;
+ regmap = cdns_phy->regmap_phy_pma_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off,
+ reg_pairs[i].val);
+ }
+
/* PMA common registers configurations */
cmn_vals = cdns_torrent_get_tbl_vals(&init_data->cmn_vals_tbl,
- ref_clk, ref_clk,
+ ref_clk, ref_clk1,
phy_t1, phy_t2, ssc);
if (cmn_vals) {
reg_pairs = cmn_vals->reg_pairs;
@@ -2567,7 +2585,7 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
/* PMA TX lane registers configurations */
tx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->tx_ln_vals_tbl,
- ref_clk, ref_clk,
+ ref_clk, ref_clk1,
phy_t1, phy_t2, ssc);
if (tx_ln_vals) {
reg_pairs = tx_ln_vals->reg_pairs;
@@ -2582,7 +2600,7 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
/* PMA RX lane registers configurations */
rx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->rx_ln_vals_tbl,
- ref_clk, ref_clk,
+ ref_clk, ref_clk1,
phy_t1, phy_t2, ssc);
if (rx_ln_vals) {
reg_pairs = rx_ln_vals->reg_pairs;
@@ -2684,9 +2702,11 @@ static int cdns_torrent_reset(struct cdns_torrent_phy *cdns_phy)
static int cdns_torrent_clk(struct cdns_torrent_phy *cdns_phy)
{
struct device *dev = cdns_phy->dev;
+ unsigned long ref_clk1_rate;
unsigned long ref_clk_rate;
int ret;
+ /* refclk: Input reference clock for PLL0 */
cdns_phy->clk = devm_clk_get(dev, "refclk");
if (IS_ERR(cdns_phy->clk)) {
dev_err(dev, "phy ref clock not found\n");
@@ -2695,15 +2715,15 @@ static int cdns_torrent_clk(struct cdns_torrent_phy *cdns_phy)
ret = clk_prepare_enable(cdns_phy->clk);
if (ret) {
- dev_err(cdns_phy->dev, "Failed to prepare ref clock\n");
+ dev_err(cdns_phy->dev, "Failed to prepare ref clock: %d\n", ret);
return ret;
}
ref_clk_rate = clk_get_rate(cdns_phy->clk);
if (!ref_clk_rate) {
dev_err(cdns_phy->dev, "Failed to get ref clock rate\n");
- clk_disable_unprepare(cdns_phy->clk);
- return -EINVAL;
+ ret = -EINVAL;
+ goto disable_clk;
}
switch (ref_clk_rate) {
@@ -2720,12 +2740,62 @@ static int cdns_torrent_clk(struct cdns_torrent_phy *cdns_phy)
cdns_phy->ref_clk_rate = CLK_156_25_MHZ;
break;
default:
- dev_err(cdns_phy->dev, "Invalid Ref Clock Rate\n");
- clk_disable_unprepare(cdns_phy->clk);
- return -EINVAL;
+ dev_err(cdns_phy->dev, "Invalid ref clock rate\n");
+ ret = -EINVAL;
+ goto disable_clk;
+ }
+
+ /* refclk1: Input reference clock for PLL1 */
+ cdns_phy->clk1 = devm_clk_get_optional(dev, "pll1_refclk");
+ if (IS_ERR(cdns_phy->clk1)) {
+ dev_err(dev, "phy PLL1 ref clock not found\n");
+ ret = PTR_ERR(cdns_phy->clk1);
+ goto disable_clk;
+ }
+
+ if (cdns_phy->clk1) {
+ ret = clk_prepare_enable(cdns_phy->clk1);
+ if (ret) {
+ dev_err(cdns_phy->dev, "Failed to prepare PLL1 ref clock: %d\n", ret);
+ goto disable_clk;
+ }
+
+ ref_clk1_rate = clk_get_rate(cdns_phy->clk1);
+ if (!ref_clk1_rate) {
+ dev_err(cdns_phy->dev, "Failed to get PLL1 ref clock rate\n");
+ ret = -EINVAL;
+ goto disable_clk1;
+ }
+
+ switch (ref_clk1_rate) {
+ case REF_CLK_19_2MHZ:
+ cdns_phy->ref_clk1_rate = CLK_19_2_MHZ;
+ break;
+ case REF_CLK_25MHZ:
+ cdns_phy->ref_clk1_rate = CLK_25_MHZ;
+ break;
+ case REF_CLK_100MHZ:
+ cdns_phy->ref_clk1_rate = CLK_100_MHZ;
+ break;
+ case REF_CLK_156_25MHZ:
+ cdns_phy->ref_clk1_rate = CLK_156_25_MHZ;
+ break;
+ default:
+ dev_err(cdns_phy->dev, "Invalid PLL1 ref clock rate\n");
+ ret = -EINVAL;
+ goto disable_clk1;
+ }
+ } else {
+ cdns_phy->ref_clk1_rate = cdns_phy->ref_clk_rate;
}
return 0;
+
+disable_clk1:
+ clk_disable_unprepare(cdns_phy->clk1);
+disable_clk:
+ clk_disable_unprepare(cdns_phy->clk);
+ return ret;
}
static int cdns_torrent_phy_probe(struct platform_device *pdev)
@@ -2980,6 +3050,7 @@ put_lnk_rst:
reset_control_put(cdns_phy->phys[i].lnk_rst);
of_node_put(child);
reset_control_assert(cdns_phy->apb_rst);
+ clk_disable_unprepare(cdns_phy->clk1);
clk_disable_unprepare(cdns_phy->clk);
clk_cleanup:
cdns_torrent_clk_cleanup(cdns_phy);
@@ -2998,6 +3069,7 @@ static void cdns_torrent_phy_remove(struct platform_device *pdev)
reset_control_put(cdns_phy->phys[i].lnk_rst);
}
+ clk_disable_unprepare(cdns_phy->clk1);
clk_disable_unprepare(cdns_phy->clk);
cdns_torrent_clk_cleanup(cdns_phy);
}
@@ -3034,6 +3106,216 @@ static struct cdns_torrent_vals dp_usb_xcvr_diag_ln_vals = {
.num_regs = ARRAY_SIZE(dp_usb_xcvr_diag_ln_regs),
};
+/* USXGMII and SGMII/QSGMII link configuration */
+static struct cdns_reg_pairs usxgmii_sgmii_link_cmn_regs[] = {
+ {0x0002, PHY_PLL_CFG},
+ {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M0},
+ {0x0601, CMN_PDIAG_PLL1_CLK_SEL_M0}
+};
+
+static struct cdns_reg_pairs usxgmii_sgmii_xcvr_diag_ln_regs[] = {
+ {0x0000, XCVR_DIAG_HSCLK_SEL},
+ {0x0001, XCVR_DIAG_HSCLK_DIV},
+ {0x0001, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_reg_pairs sgmii_usxgmii_xcvr_diag_ln_regs[] = {
+ {0x0111, XCVR_DIAG_HSCLK_SEL},
+ {0x0103, XCVR_DIAG_HSCLK_DIV},
+ {0x0A9B, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_torrent_vals usxgmii_sgmii_link_cmn_vals = {
+ .reg_pairs = usxgmii_sgmii_link_cmn_regs,
+ .num_regs = ARRAY_SIZE(usxgmii_sgmii_link_cmn_regs),
+};
+
+static struct cdns_torrent_vals usxgmii_sgmii_xcvr_diag_ln_vals = {
+ .reg_pairs = usxgmii_sgmii_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(usxgmii_sgmii_xcvr_diag_ln_regs),
+};
+
+static struct cdns_torrent_vals sgmii_usxgmii_xcvr_diag_ln_vals = {
+ .reg_pairs = sgmii_usxgmii_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(sgmii_usxgmii_xcvr_diag_ln_regs),
+};
+
+/* Multilink USXGMII, using PLL0, 156.25 MHz Ref clk, no SSC */
+static struct cdns_reg_pairs ml_usxgmii_pll0_156_25_no_ssc_cmn_regs[] = {
+ {0x0014, CMN_PLL0_DSM_FBH_OVRD_M0},
+ {0x0005, CMN_PLL0_DSM_FBL_OVRD_M0},
+ {0x061B, CMN_PLL0_VCOCAL_INIT_TMR},
+ {0x0019, CMN_PLL0_VCOCAL_ITER_TMR},
+ {0x1354, CMN_PLL0_VCOCAL_REFTIM_START},
+ {0x1354, CMN_PLL0_VCOCAL_PLLCNT_START},
+ {0x0003, CMN_PLL0_VCOCAL_TCTRL},
+ {0x0138, CMN_PLL0_LOCK_REFCNT_START},
+ {0x0138, CMN_PLL0_LOCK_PLLCNT_START}
+};
+
+static struct cdns_torrent_vals ml_usxgmii_pll0_156_25_no_ssc_cmn_vals = {
+ .reg_pairs = ml_usxgmii_pll0_156_25_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(ml_usxgmii_pll0_156_25_no_ssc_cmn_regs),
+};
+
+/* Multilink SGMII/QSGMII, using PLL1, 100 MHz Ref clk, no SSC */
+static struct cdns_reg_pairs ml_sgmii_pll1_100_no_ssc_cmn_regs[] = {
+ {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
+ {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL},
+ {0x007F, CMN_TXPUCAL_TUNE},
+ {0x007F, CMN_TXPDCAL_TUNE}
+};
+
+static struct cdns_torrent_vals ml_sgmii_pll1_100_no_ssc_cmn_vals = {
+ .reg_pairs = ml_sgmii_pll1_100_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(ml_sgmii_pll1_100_no_ssc_cmn_regs),
+};
+
+/* TI J7200, Multilink USXGMII, using PLL0, 156.25 MHz Ref clk, no SSC */
+static struct cdns_reg_pairs j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_regs[] = {
+ {0x0014, CMN_SSM_BIAS_TMR},
+ {0x0028, CMN_PLLSM0_PLLPRE_TMR},
+ {0x00A4, CMN_PLLSM0_PLLLOCK_TMR},
+ {0x0062, CMN_BGCAL_INIT_TMR},
+ {0x0062, CMN_BGCAL_ITER_TMR},
+ {0x0014, CMN_IBCAL_INIT_TMR},
+ {0x0018, CMN_TXPUCAL_INIT_TMR},
+ {0x0005, CMN_TXPUCAL_ITER_TMR},
+ {0x0018, CMN_TXPDCAL_INIT_TMR},
+ {0x0005, CMN_TXPDCAL_ITER_TMR},
+ {0x024A, CMN_RXCAL_INIT_TMR},
+ {0x0005, CMN_RXCAL_ITER_TMR},
+ {0x000B, CMN_SD_CAL_REFTIM_START},
+ {0x0132, CMN_SD_CAL_PLLCNT_START},
+ {0x0014, CMN_PLL0_DSM_FBH_OVRD_M0},
+ {0x0005, CMN_PLL0_DSM_FBL_OVRD_M0},
+ {0x061B, CMN_PLL0_VCOCAL_INIT_TMR},
+ {0x0019, CMN_PLL0_VCOCAL_ITER_TMR},
+ {0x1354, CMN_PLL0_VCOCAL_REFTIM_START},
+ {0x1354, CMN_PLL0_VCOCAL_PLLCNT_START},
+ {0x0003, CMN_PLL0_VCOCAL_TCTRL},
+ {0x0138, CMN_PLL0_LOCK_REFCNT_START},
+ {0x0138, CMN_PLL0_LOCK_PLLCNT_START}
+};
+
+static struct cdns_torrent_vals j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_vals = {
+ .reg_pairs = j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_regs),
+};
+
+/* TI J7200, Multilink SGMII/QSGMII, using PLL1, 100 MHz Ref clk, no SSC */
+static struct cdns_reg_pairs j7200_ml_sgmii_pll1_100_no_ssc_cmn_regs[] = {
+ {0x0028, CMN_PLLSM1_PLLPRE_TMR},
+ {0x00A4, CMN_PLLSM1_PLLLOCK_TMR},
+ {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
+ {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL},
+ {0x007F, CMN_TXPUCAL_TUNE},
+ {0x007F, CMN_TXPDCAL_TUNE}
+};
+
+static struct cdns_torrent_vals j7200_ml_sgmii_pll1_100_no_ssc_cmn_vals = {
+ .reg_pairs = j7200_ml_sgmii_pll1_100_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(j7200_ml_sgmii_pll1_100_no_ssc_cmn_regs),
+};
+
+/* PCIe and USXGMII link configuration */
+static struct cdns_reg_pairs pcie_usxgmii_link_cmn_regs[] = {
+ {0x0003, PHY_PLL_CFG},
+ {0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0},
+ {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1},
+ {0x0400, CMN_PDIAG_PLL1_CLK_SEL_M0}
+};
+
+static struct cdns_reg_pairs pcie_usxgmii_xcvr_diag_ln_regs[] = {
+ {0x0000, XCVR_DIAG_HSCLK_SEL},
+ {0x0001, XCVR_DIAG_HSCLK_DIV},
+ {0x0012, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_reg_pairs usxgmii_pcie_xcvr_diag_ln_regs[] = {
+ {0x0011, XCVR_DIAG_HSCLK_SEL},
+ {0x0001, XCVR_DIAG_HSCLK_DIV},
+ {0x0089, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_torrent_vals pcie_usxgmii_link_cmn_vals = {
+ .reg_pairs = pcie_usxgmii_link_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_usxgmii_link_cmn_regs),
+};
+
+static struct cdns_torrent_vals pcie_usxgmii_xcvr_diag_ln_vals = {
+ .reg_pairs = pcie_usxgmii_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(pcie_usxgmii_xcvr_diag_ln_regs),
+};
+
+static struct cdns_torrent_vals usxgmii_pcie_xcvr_diag_ln_vals = {
+ .reg_pairs = usxgmii_pcie_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(usxgmii_pcie_xcvr_diag_ln_regs),
+};
+
+/*
+ * Multilink USXGMII, using PLL1, 156.25 MHz Ref clk, no SSC
+ */
+static struct cdns_reg_pairs ml_usxgmii_pll1_156_25_no_ssc_cmn_regs[] = {
+ {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x0014, CMN_PLL1_DSM_FBH_OVRD_M0},
+ {0x0005, CMN_PLL1_DSM_FBL_OVRD_M0},
+ {0x061B, CMN_PLL1_VCOCAL_INIT_TMR},
+ {0x0019, CMN_PLL1_VCOCAL_ITER_TMR},
+ {0x1354, CMN_PLL1_VCOCAL_REFTIM_START},
+ {0x1354, CMN_PLL1_VCOCAL_PLLCNT_START},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL},
+ {0x0138, CMN_PLL1_LOCK_REFCNT_START},
+ {0x0138, CMN_PLL1_LOCK_PLLCNT_START},
+ {0x007F, CMN_TXPUCAL_TUNE},
+ {0x007F, CMN_TXPDCAL_TUNE}
+};
+
+static struct cdns_reg_pairs ml_usxgmii_156_25_no_ssc_tx_ln_regs[] = {
+ {0x00F3, TX_PSC_A0},
+ {0x04A2, TX_PSC_A2},
+ {0x04A2, TX_PSC_A3 },
+ {0x0000, TX_TXCC_CPOST_MULT_00},
+ {0x0000, XCVR_DIAG_PSC_OVRD}
+};
+
+static struct cdns_reg_pairs ml_usxgmii_156_25_no_ssc_rx_ln_regs[] = {
+ {0x091D, RX_PSC_A0},
+ {0x0900, RX_PSC_A2},
+ {0x0100, RX_PSC_A3},
+ {0x0030, RX_REE_SMGM_CTRL1},
+ {0x03C7, RX_REE_GCSM1_EQENM_PH1},
+ {0x01C7, RX_REE_GCSM1_EQENM_PH2},
+ {0x0000, RX_DIAG_DFE_CTRL},
+ {0x0019, RX_REE_TAP1_CLIP},
+ {0x0019, RX_REE_TAP2TON_CLIP},
+ {0x00B9, RX_DIAG_NQST_CTRL},
+ {0x0C21, RX_DIAG_DFE_AMP_TUNE_2},
+ {0x0002, RX_DIAG_DFE_AMP_TUNE_3},
+ {0x0033, RX_DIAG_PI_RATE},
+ {0x0001, RX_DIAG_ACYA},
+ {0x018C, RX_CDRLF_CNFG}
+};
+
+static struct cdns_torrent_vals ml_usxgmii_pll1_156_25_no_ssc_cmn_vals = {
+ .reg_pairs = ml_usxgmii_pll1_156_25_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(ml_usxgmii_pll1_156_25_no_ssc_cmn_regs),
+};
+
+static struct cdns_torrent_vals ml_usxgmii_156_25_no_ssc_tx_ln_vals = {
+ .reg_pairs = ml_usxgmii_156_25_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(ml_usxgmii_156_25_no_ssc_tx_ln_regs),
+};
+
+static struct cdns_torrent_vals ml_usxgmii_156_25_no_ssc_rx_ln_vals = {
+ .reg_pairs = ml_usxgmii_156_25_no_ssc_rx_ln_regs,
+ .num_regs = ARRAY_SIZE(ml_usxgmii_156_25_no_ssc_rx_ln_regs),
+};
+
/* TI USXGMII configuration: Enable cmn_refclk_rcv_out_en */
static struct cdns_reg_pairs ti_usxgmii_phy_pma_cmn_regs[] = {
{0x0040, PHY_PMA_CMN_CTRL1},
@@ -3811,6 +4093,50 @@ static struct cdns_torrent_vals sgmii_100_no_ssc_rx_ln_vals = {
.num_regs = ARRAY_SIZE(sgmii_100_no_ssc_rx_ln_regs),
};
+/* TI J7200, multilink SGMII */
+static struct cdns_reg_pairs j7200_sgmii_100_no_ssc_tx_ln_regs[] = {
+ {0x07A2, TX_RCVDET_ST_TMR},
+ {0x00F3, TX_PSC_A0},
+ {0x04A2, TX_PSC_A2},
+ {0x04A2, TX_PSC_A3 },
+ {0x0000, TX_TXCC_CPOST_MULT_00},
+ {0x00B3, DRV_DIAG_TX_DRV},
+ {0x0002, XCVR_DIAG_PSC_OVRD},
+ {0x4000, XCVR_DIAG_RXCLK_CTRL}
+};
+
+static struct cdns_torrent_vals j7200_sgmii_100_no_ssc_tx_ln_vals = {
+ .reg_pairs = j7200_sgmii_100_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(j7200_sgmii_100_no_ssc_tx_ln_regs),
+};
+
+static struct cdns_reg_pairs j7200_sgmii_100_no_ssc_rx_ln_regs[] = {
+ {0x0014, RX_SDCAL0_INIT_TMR},
+ {0x0062, RX_SDCAL0_ITER_TMR},
+ {0x0014, RX_SDCAL1_INIT_TMR},
+ {0x0062, RX_SDCAL1_ITER_TMR},
+ {0x091D, RX_PSC_A0},
+ {0x0900, RX_PSC_A2},
+ {0x0100, RX_PSC_A3},
+ {0x03C7, RX_REE_GCSM1_EQENM_PH1},
+ {0x01C7, RX_REE_GCSM1_EQENM_PH2},
+ {0x0000, RX_DIAG_DFE_CTRL},
+ {0x0019, RX_REE_TAP1_CLIP},
+ {0x0019, RX_REE_TAP2TON_CLIP},
+ {0x0098, RX_DIAG_NQST_CTRL},
+ {0x0C01, RX_DIAG_DFE_AMP_TUNE_2},
+ {0x0000, RX_DIAG_DFE_AMP_TUNE_3},
+ {0x0000, RX_DIAG_PI_CAP},
+ {0x0010, RX_DIAG_PI_RATE},
+ {0x0001, RX_DIAG_ACYA},
+ {0x018C, RX_CDRLF_CNFG}
+};
+
+static struct cdns_torrent_vals j7200_sgmii_100_no_ssc_rx_ln_vals = {
+ .reg_pairs = j7200_sgmii_100_no_ssc_rx_ln_regs,
+ .num_regs = ARRAY_SIZE(j7200_sgmii_100_no_ssc_rx_ln_regs),
+};
+
/* SGMII 100 MHz Ref clk, internal SSC */
static struct cdns_reg_pairs sgmii_100_int_ssc_cmn_regs[] = {
{0x0004, CMN_PLL0_DSM_DIAG_M0},
@@ -3944,6 +4270,51 @@ static struct cdns_torrent_vals qsgmii_100_no_ssc_rx_ln_vals = {
.num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_rx_ln_regs),
};
+/* TI J7200, multilink QSGMII */
+static struct cdns_reg_pairs j7200_qsgmii_100_no_ssc_tx_ln_regs[] = {
+ {0x07A2, TX_RCVDET_ST_TMR},
+ {0x00F3, TX_PSC_A0},
+ {0x04A2, TX_PSC_A2},
+ {0x04A2, TX_PSC_A3 },
+ {0x0000, TX_TXCC_CPOST_MULT_00},
+ {0x0011, TX_TXCC_MGNFS_MULT_100},
+ {0x0003, DRV_DIAG_TX_DRV},
+ {0x0002, XCVR_DIAG_PSC_OVRD},
+ {0x4000, XCVR_DIAG_RXCLK_CTRL}
+};
+
+static struct cdns_torrent_vals j7200_qsgmii_100_no_ssc_tx_ln_vals = {
+ .reg_pairs = j7200_qsgmii_100_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(j7200_qsgmii_100_no_ssc_tx_ln_regs),
+};
+
+static struct cdns_reg_pairs j7200_qsgmii_100_no_ssc_rx_ln_regs[] = {
+ {0x0014, RX_SDCAL0_INIT_TMR},
+ {0x0062, RX_SDCAL0_ITER_TMR},
+ {0x0014, RX_SDCAL1_INIT_TMR},
+ {0x0062, RX_SDCAL1_ITER_TMR},
+ {0x091D, RX_PSC_A0},
+ {0x0900, RX_PSC_A2},
+ {0x0100, RX_PSC_A3},
+ {0x03C7, RX_REE_GCSM1_EQENM_PH1},
+ {0x01C7, RX_REE_GCSM1_EQENM_PH2},
+ {0x0000, RX_DIAG_DFE_CTRL},
+ {0x0019, RX_REE_TAP1_CLIP},
+ {0x0019, RX_REE_TAP2TON_CLIP},
+ {0x0098, RX_DIAG_NQST_CTRL},
+ {0x0C01, RX_DIAG_DFE_AMP_TUNE_2},
+ {0x0000, RX_DIAG_DFE_AMP_TUNE_3},
+ {0x0000, RX_DIAG_PI_CAP},
+ {0x0010, RX_DIAG_PI_RATE},
+ {0x0001, RX_DIAG_ACYA},
+ {0x018C, RX_CDRLF_CNFG}
+};
+
+static struct cdns_torrent_vals j7200_qsgmii_100_no_ssc_rx_ln_vals = {
+ .reg_pairs = j7200_qsgmii_100_no_ssc_rx_ln_regs,
+ .num_regs = ARRAY_SIZE(j7200_qsgmii_100_no_ssc_rx_ln_regs),
+};
+
/* QSGMII 100 MHz Ref clk, internal SSC */
static struct cdns_reg_pairs qsgmii_100_int_ssc_cmn_regs[] = {
{0x0004, CMN_PLL0_DSM_DIAG_M0},
@@ -4166,14 +4537,17 @@ static struct cdns_torrent_vals_entry link_cmn_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_QSGMII), &pcie_sgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_USB), &pcie_usb_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_DP), &pcie_dp_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_USXGMII), &pcie_usxgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_NONE), &sl_sgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_PCIE), &pcie_sgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_USB), &usb_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_USXGMII), &usxgmii_sgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_NONE), &sl_sgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_PCIE), &pcie_sgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_USB), &usb_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_USXGMII), &usxgmii_sgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_NONE), &sl_usb_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE), &pcie_usb_link_cmn_vals},
@@ -4182,6 +4556,9 @@ static struct cdns_torrent_vals_entry link_cmn_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_DP), &usb_dp_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_NONE), &sl_usxgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_PCIE), &pcie_usxgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_SGMII), &usxgmii_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_QSGMII), &usxgmii_sgmii_link_cmn_vals},
};
static struct cdns_torrent_vals_entry xcvr_diag_vals_entries[] = {
@@ -4194,14 +4571,17 @@ static struct cdns_torrent_vals_entry xcvr_diag_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_QSGMII), &pcie_sgmii_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_USB), &pcie_usb_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_DP), &pcie_dp_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_USXGMII), &pcie_usxgmii_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_NONE), &sl_sgmii_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_PCIE), &sgmii_pcie_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_USB), &sgmii_usb_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_USXGMII), &sgmii_usxgmii_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_NONE), &sl_sgmii_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_PCIE), &sgmii_pcie_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_USB), &sgmii_usb_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_USXGMII), &sgmii_usxgmii_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_NONE), &sl_usb_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE), &usb_pcie_xcvr_diag_ln_vals},
@@ -4210,6 +4590,9 @@ static struct cdns_torrent_vals_entry xcvr_diag_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_DP), &usb_dp_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_NONE), &sl_usxgmii_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_PCIE), &usxgmii_pcie_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_SGMII), &usxgmii_sgmii_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_QSGMII), &usxgmii_sgmii_xcvr_diag_ln_vals},
};
static struct cdns_torrent_vals_entry pcs_cmn_vals_entries[] = {
@@ -4285,6 +4668,17 @@ static struct cdns_torrent_vals_entry cmn_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_DP, NO_SSC), &usb_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_156_25_MHZ, TYPE_USXGMII, TYPE_NONE, NO_SSC), &sl_usxgmii_156_25_no_ssc_cmn_vals},
+
+ /* Dual refclk */
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_PCIE, TYPE_USXGMII, NO_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_SGMII, TYPE_USXGMII, NO_SSC), &ml_sgmii_pll1_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_QSGMII, TYPE_USXGMII, NO_SSC), &ml_sgmii_pll1_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_PCIE, NO_SSC), &ml_usxgmii_pll1_156_25_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_SGMII, NO_SSC), &ml_usxgmii_pll0_156_25_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &ml_usxgmii_pll0_156_25_no_ssc_cmn_vals},
};
static struct cdns_torrent_vals_entry cdns_tx_ln_vals_entries[] = {
@@ -4352,6 +4746,17 @@ static struct cdns_torrent_vals_entry cdns_tx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_DP, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_156_25_MHZ, TYPE_USXGMII, TYPE_NONE, NO_SSC), &usxgmii_156_25_no_ssc_tx_ln_vals},
+
+ /* Dual refclk */
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_PCIE, TYPE_USXGMII, NO_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_SGMII, TYPE_USXGMII, NO_SSC), &sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_QSGMII, TYPE_USXGMII, NO_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_PCIE, NO_SSC), &ml_usxgmii_156_25_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_SGMII, NO_SSC), &ml_usxgmii_156_25_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &ml_usxgmii_156_25_no_ssc_tx_ln_vals},
};
static struct cdns_torrent_vals_entry cdns_rx_ln_vals_entries[] = {
@@ -4419,6 +4824,17 @@ static struct cdns_torrent_vals_entry cdns_rx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_DP, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_156_25_MHZ, TYPE_USXGMII, TYPE_NONE, NO_SSC), &usxgmii_156_25_no_ssc_rx_ln_vals},
+
+ /* Dual refclk */
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_PCIE, TYPE_USXGMII, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_SGMII, TYPE_USXGMII, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_QSGMII, TYPE_USXGMII, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_PCIE, NO_SSC), &ml_usxgmii_156_25_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_SGMII, NO_SSC), &ml_usxgmii_156_25_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &ml_usxgmii_156_25_no_ssc_rx_ln_vals},
};
static const struct cdns_torrent_data cdns_map_torrent = {
@@ -4452,6 +4868,9 @@ static const struct cdns_torrent_data cdns_map_torrent = {
static struct cdns_torrent_vals_entry j721e_phy_pma_cmn_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_NONE), &ti_usxgmii_phy_pma_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_PCIE), &ti_usxgmii_phy_pma_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_SGMII), &ti_usxgmii_phy_pma_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_QSGMII), &ti_usxgmii_phy_pma_cmn_vals},
};
static struct cdns_torrent_vals_entry ti_tx_ln_vals_entries[] = {
@@ -4519,6 +4938,17 @@ static struct cdns_torrent_vals_entry ti_tx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_DP, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_156_25_MHZ, TYPE_USXGMII, TYPE_NONE, NO_SSC), &usxgmii_156_25_no_ssc_tx_ln_vals},
+
+ /* Dual refclk */
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_PCIE, TYPE_USXGMII, NO_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_SGMII, TYPE_USXGMII, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_QSGMII, TYPE_USXGMII, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_PCIE, NO_SSC), &ml_usxgmii_156_25_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_SGMII, NO_SSC), &ml_usxgmii_156_25_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &ml_usxgmii_156_25_no_ssc_tx_ln_vals},
};
static const struct cdns_torrent_data ti_j721e_map_torrent = {
@@ -4554,6 +4984,274 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
},
};
+/* TI J7200 (Torrent SD0805) */
+static struct cdns_torrent_vals_entry ti_j7200_cmn_vals_entries[] = {
+ {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_PCIE, NO_SSC), &dp_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_USB, NO_SSC), &sl_dp_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), &sl_pcie_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), &pcie_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, NO_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, EXTERNAL_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, INTERNAL_SSC), &pcie_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, NO_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, EXTERNAL_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, INTERNAL_SSC), &pcie_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &sl_sgmii_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &sgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_NONE, NO_SSC), &sl_qsgmii_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, NO_SSC), &qsgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, NO_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, EXTERNAL_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, INTERNAL_SSC), &sl_usb_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, NO_SSC), &usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &sl_usb_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, NO_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, EXTERNAL_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, INTERNAL_SSC), &sl_usb_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_DP, NO_SSC), &usb_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_156_25_MHZ, TYPE_USXGMII, TYPE_NONE, NO_SSC), &sl_usxgmii_156_25_no_ssc_cmn_vals},
+
+ /* Dual refclk */
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_PCIE, TYPE_USXGMII, NO_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_SGMII, TYPE_USXGMII, NO_SSC), &j7200_ml_sgmii_pll1_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_QSGMII, TYPE_USXGMII, NO_SSC), &j7200_ml_sgmii_pll1_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_PCIE, NO_SSC), &ml_usxgmii_pll1_156_25_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_SGMII, NO_SSC), &j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_vals},
+};
+
+static struct cdns_torrent_vals_entry ti_j7200_tx_ln_vals_entries[] = {
+ {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_PCIE, NO_SSC), &dp_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_USB, NO_SSC), &dp_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_NONE, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_DP, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_156_25_MHZ, TYPE_USXGMII, TYPE_NONE, NO_SSC), &usxgmii_156_25_no_ssc_tx_ln_vals},
+
+ /* Dual refclk */
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_PCIE, TYPE_USXGMII, NO_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_SGMII, TYPE_USXGMII, NO_SSC), &j7200_sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_QSGMII, TYPE_USXGMII, NO_SSC), &j7200_qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_PCIE, NO_SSC), &ml_usxgmii_156_25_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_SGMII, NO_SSC), &usxgmii_156_25_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &usxgmii_156_25_no_ssc_tx_ln_vals},
+};
+
+static struct cdns_torrent_vals_entry ti_j7200_rx_ln_vals_entries[] = {
+ {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_PCIE, NO_SSC), &dp_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_USB, NO_SSC), &dp_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_NONE, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_DP, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_156_25_MHZ, TYPE_USXGMII, TYPE_NONE, NO_SSC), &usxgmii_156_25_no_ssc_rx_ln_vals},
+
+ /* Dual refclk */
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_PCIE, TYPE_USXGMII, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_SGMII, TYPE_USXGMII, NO_SSC), &j7200_sgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_QSGMII, TYPE_USXGMII, NO_SSC), &j7200_qsgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_PCIE, NO_SSC), &ml_usxgmii_156_25_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_SGMII, NO_SSC), &usxgmii_156_25_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &usxgmii_156_25_no_ssc_rx_ln_vals},
+};
+
+static const struct cdns_torrent_data ti_j7200_map_torrent = {
+ .block_offset_shift = 0x0,
+ .reg_offset_shift = 0x1,
+ .link_cmn_vals_tbl = {
+ .entries = link_cmn_vals_entries,
+ .num_entries = ARRAY_SIZE(link_cmn_vals_entries),
+ },
+ .xcvr_diag_vals_tbl = {
+ .entries = xcvr_diag_vals_entries,
+ .num_entries = ARRAY_SIZE(xcvr_diag_vals_entries),
+ },
+ .pcs_cmn_vals_tbl = {
+ .entries = pcs_cmn_vals_entries,
+ .num_entries = ARRAY_SIZE(pcs_cmn_vals_entries),
+ },
+ .phy_pma_cmn_vals_tbl = {
+ .entries = j721e_phy_pma_cmn_vals_entries,
+ .num_entries = ARRAY_SIZE(j721e_phy_pma_cmn_vals_entries),
+ },
+ .cmn_vals_tbl = {
+ .entries = ti_j7200_cmn_vals_entries,
+ .num_entries = ARRAY_SIZE(ti_j7200_cmn_vals_entries),
+ },
+ .tx_ln_vals_tbl = {
+ .entries = ti_j7200_tx_ln_vals_entries,
+ .num_entries = ARRAY_SIZE(ti_j7200_tx_ln_vals_entries),
+ },
+ .rx_ln_vals_tbl = {
+ .entries = ti_j7200_rx_ln_vals_entries,
+ .num_entries = ARRAY_SIZE(ti_j7200_rx_ln_vals_entries),
+ },
+};
+
static const struct of_device_id cdns_torrent_phy_of_match[] = {
{
.compatible = "cdns,torrent-phy",
@@ -4563,6 +5261,10 @@ static const struct of_device_id cdns_torrent_phy_of_match[] = {
.compatible = "ti,j721e-serdes-10g",
.data = &ti_j721e_map_torrent,
},
+ {
+ .compatible = "ti,j7200-serdes-10g",
+ .data = &ti_j7200_map_torrent,
+ },
{}
};
MODULE_DEVICE_TABLE(of, cdns_torrent_phy_of_match);
diff --git a/drivers/phy/freescale/phy-fsl-imx8qm-lvds-phy.c b/drivers/phy/freescale/phy-fsl-imx8qm-lvds-phy.c
index 0ae052df37655..38388dd04bdc1 100644
--- a/drivers/phy/freescale/phy-fsl-imx8qm-lvds-phy.c
+++ b/drivers/phy/freescale/phy-fsl-imx8qm-lvds-phy.c
@@ -294,7 +294,7 @@ static int mixel_lvds_phy_reset(struct device *dev)
}
static struct phy *mixel_lvds_phy_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct mixel_lvds_phy_priv *priv = dev_get_drvdata(dev);
unsigned int phy_id;
diff --git a/drivers/phy/freescale/phy-fsl-lynx-28g.c b/drivers/phy/freescale/phy-fsl-lynx-28g.c
index e2187767ce00c..b86da8e9daa46 100644
--- a/drivers/phy/freescale/phy-fsl-lynx-28g.c
+++ b/drivers/phy/freescale/phy-fsl-lynx-28g.c
@@ -556,7 +556,7 @@ static void lynx_28g_lane_read_configuration(struct lynx_28g_lane *lane)
}
static struct phy *lynx_28g_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct lynx_28g_priv *priv = dev_get_drvdata(dev);
int idx = args->args[0];
diff --git a/drivers/phy/hisilicon/phy-histb-combphy.c b/drivers/phy/hisilicon/phy-histb-combphy.c
index c44588fd5a53e..7436dcae39818 100644
--- a/drivers/phy/hisilicon/phy-histb-combphy.c
+++ b/drivers/phy/hisilicon/phy-histb-combphy.c
@@ -163,7 +163,7 @@ static const struct phy_ops histb_combphy_ops = {
};
static struct phy *histb_combphy_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct histb_combphy_priv *priv = dev_get_drvdata(dev);
struct histb_combphy_mode *mode = &priv->mode;
diff --git a/drivers/phy/intel/phy-intel-lgm-combo.c b/drivers/phy/intel/phy-intel-lgm-combo.c
index d32e267c0001d..f8e3054a9e597 100644
--- a/drivers/phy/intel/phy-intel-lgm-combo.c
+++ b/drivers/phy/intel/phy-intel-lgm-combo.c
@@ -508,7 +508,7 @@ static const struct phy_ops intel_cbphy_ops = {
};
static struct phy *intel_cbphy_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct intel_combo_phy *cbphy = dev_get_drvdata(dev);
u32 iphy_id;
diff --git a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
index ef93bf2cba105..406a87c8b7599 100644
--- a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
+++ b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
@@ -358,7 +358,7 @@ static const struct phy_ops ltq_vrx200_pcie_phy_ops = {
};
static struct phy *ltq_vrx200_pcie_phy_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct ltq_vrx200_pcie_phy_priv *priv = dev_get_drvdata(dev);
unsigned int mode;
diff --git a/drivers/phy/marvell/phy-armada375-usb2.c b/drivers/phy/marvell/phy-armada375-usb2.c
index b141e3cd8a941..3731f9b25655e 100644
--- a/drivers/phy/marvell/phy-armada375-usb2.c
+++ b/drivers/phy/marvell/phy-armada375-usb2.c
@@ -61,7 +61,7 @@ static const struct phy_ops armada375_usb_phy_ops = {
* USB3 case it still optional and we use ENODEV.
*/
static struct phy *armada375_usb_phy_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct armada375_cluster_phy *cluster_phy = dev_get_drvdata(dev);
diff --git a/drivers/phy/marvell/phy-armada38x-comphy.c b/drivers/phy/marvell/phy-armada38x-comphy.c
index b7d99861526a5..5063361b01208 100644
--- a/drivers/phy/marvell/phy-armada38x-comphy.c
+++ b/drivers/phy/marvell/phy-armada38x-comphy.c
@@ -47,8 +47,13 @@ struct a38x_comphy {
struct a38x_comphy_lane lane[MAX_A38X_COMPHY];
};
+/*
+ * Map serdes lanes and gbe ports to serdes mux configuration values:
+ * row index = serdes lane,
+ * column index = gbe port number.
+ */
static const u8 gbe_mux[MAX_A38X_COMPHY][MAX_A38X_PORTS] = {
- { 0, 0, 0 },
+ { 3, 0, 0 },
{ 4, 5, 0 },
{ 0, 4, 0 },
{ 0, 0, 4 },
@@ -155,7 +160,7 @@ static const struct phy_ops a38x_comphy_ops = {
};
static struct phy *a38x_comphy_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct a38x_comphy_lane *lane;
struct phy *phy;
diff --git a/drivers/phy/marvell/phy-berlin-sata.c b/drivers/phy/marvell/phy-berlin-sata.c
index f972d78372eaf..c90e2867900c3 100644
--- a/drivers/phy/marvell/phy-berlin-sata.c
+++ b/drivers/phy/marvell/phy-berlin-sata.c
@@ -155,7 +155,7 @@ static int phy_berlin_sata_power_off(struct phy *phy)
}
static struct phy *phy_berlin_sata_phy_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct phy_berlin_priv *priv = dev_get_drvdata(dev);
int i;
diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
index 24c3371e2bb29..41162d7228c91 100644
--- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
@@ -1213,7 +1213,7 @@ static const struct phy_ops mvebu_a3700_comphy_ops = {
};
static struct phy *mvebu_a3700_comphy_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct mvebu_a3700_comphy_lane *lane;
unsigned int port;
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
index b0dd133665986..da5e8f4057490 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
@@ -917,7 +917,7 @@ static const struct phy_ops mvebu_comphy_ops = {
};
static struct phy *mvebu_comphy_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct mvebu_comphy_lane *lane;
struct phy *phy;
diff --git a/drivers/phy/mediatek/Kconfig b/drivers/phy/mediatek/Kconfig
index 3125ecb5d119f..3849b7c87d287 100644
--- a/drivers/phy/mediatek/Kconfig
+++ b/drivers/phy/mediatek/Kconfig
@@ -58,6 +58,18 @@ config PHY_MTK_HDMI
help
Support HDMI PHY for Mediatek SoCs.
+config PHY_MTK_MIPI_CSI_0_5
+ tristate "MediaTek MIPI CSI CD-PHY v0.5 Driver"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on OF
+ select GENERIC_PHY
+ help
+ Enable this to support the MIPI CSI CD-PHY receiver version 0.5.
+ The driver supports multiple CSI cdphy ports simultaneously.
+
+ To compile this driver as a module, choose M here: the
+ module will be called phy-mtk-mipi-csi-0-5.
+
config PHY_MTK_MIPI_DSI
tristate "MediaTek MIPI-DSI Driver"
depends on ARCH_MEDIATEK || COMPILE_TEST
diff --git a/drivers/phy/mediatek/Makefile b/drivers/phy/mediatek/Makefile
index c9a50395533eb..f6e24a47e0815 100644
--- a/drivers/phy/mediatek/Makefile
+++ b/drivers/phy/mediatek/Makefile
@@ -15,6 +15,8 @@ phy-mtk-hdmi-drv-y += phy-mtk-hdmi-mt8173.o
phy-mtk-hdmi-drv-y += phy-mtk-hdmi-mt8195.o
obj-$(CONFIG_PHY_MTK_HDMI) += phy-mtk-hdmi-drv.o
+obj-$(CONFIG_PHY_MTK_MIPI_CSI_0_5) += phy-mtk-mipi-csi-0-5.o
+
phy-mtk-mipi-dsi-drv-y := phy-mtk-mipi-dsi.o
phy-mtk-mipi-dsi-drv-y += phy-mtk-mipi-dsi-mt8173.o
phy-mtk-mipi-dsi-drv-y += phy-mtk-mipi-dsi-mt8183.o
diff --git a/drivers/phy/mediatek/phy-mtk-mipi-csi-0-5-rx-reg.h b/drivers/phy/mediatek/phy-mtk-mipi-csi-0-5-rx-reg.h
new file mode 100644
index 0000000000000..97b4c27a16999
--- /dev/null
+++ b/drivers/phy/mediatek/phy-mtk-mipi-csi-0-5-rx-reg.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, MediaTek Inc.
+ * Copyright (c) 2023, BayLibre Inc.
+ */
+
+#ifndef __PHY_MTK_MIPI_CSI_V_0_5_RX_REG_H__
+#define __PHY_MTK_MIPI_CSI_V_0_5_RX_REG_H__
+
+/*
+ * CSI1 and CSI2 are identical, and similar to CSI0. All CSIX macros are
+ * applicable to the three PHYs. Where differences exist, they are denoted by
+ * macro names using CSI0 and CSI1, the latter being applicable to CSI1 and
+ * CSI2 alike.
+ */
+
+#define MIPI_RX_ANA00_CSIXA 0x0000
+#define RG_CSI0A_CPHY_EN BIT(0)
+#define RG_CSIXA_EQ_PROTECT_EN BIT(1)
+#define RG_CSIXA_BG_LPF_EN BIT(2)
+#define RG_CSIXA_BG_CORE_EN BIT(3)
+#define RG_CSIXA_DPHY_L0_CKMODE_EN BIT(5)
+#define RG_CSIXA_DPHY_L0_CKSEL BIT(6)
+#define RG_CSIXA_DPHY_L1_CKMODE_EN BIT(8)
+#define RG_CSIXA_DPHY_L1_CKSEL BIT(9)
+#define RG_CSIXA_DPHY_L2_CKMODE_EN BIT(11)
+#define RG_CSIXA_DPHY_L2_CKSEL BIT(12)
+
+#define MIPI_RX_ANA18_CSIXA 0x0018
+#define RG_CSI0A_L0_T0AB_EQ_IS GENMASK(5, 4)
+#define RG_CSI0A_L0_T0AB_EQ_BW GENMASK(7, 6)
+#define RG_CSI0A_L1_T1AB_EQ_IS GENMASK(21, 20)
+#define RG_CSI0A_L1_T1AB_EQ_BW GENMASK(23, 22)
+#define RG_CSI0A_L2_T1BC_EQ_IS GENMASK(21, 20)
+#define RG_CSI0A_L2_T1BC_EQ_BW GENMASK(23, 22)
+#define RG_CSI1A_L0_EQ_IS GENMASK(5, 4)
+#define RG_CSI1A_L0_EQ_BW GENMASK(7, 6)
+#define RG_CSI1A_L1_EQ_IS GENMASK(21, 20)
+#define RG_CSI1A_L1_EQ_BW GENMASK(23, 22)
+#define RG_CSI1A_L2_EQ_IS GENMASK(5, 4)
+#define RG_CSI1A_L2_EQ_BW GENMASK(7, 6)
+
+#define MIPI_RX_ANA1C_CSIXA 0x001c
+#define MIPI_RX_ANA20_CSI0A 0x0020
+
+#define MIPI_RX_ANA24_CSIXA 0x0024
+#define RG_CSIXA_RESERVE GENMASK(31, 24)
+
+#define MIPI_RX_ANA40_CSIXA 0x0040
+#define RG_CSIXA_CPHY_FMCK_SEL GENMASK(1, 0)
+#define RG_CSIXA_ASYNC_OPTION GENMASK(7, 4)
+#define RG_CSIXA_CPHY_SPARE GENMASK(31, 16)
+
+#define MIPI_RX_WRAPPER80_CSIXA 0x0080
+#define CSR_CSI_RST_MODE GENMASK(17, 16)
+
+#define MIPI_RX_ANAA8_CSIXA 0x00a8
+#define RG_CSIXA_CDPHY_L0_T0_BYTECK_INVERT BIT(0)
+#define RG_CSIXA_DPHY_L1_BYTECK_INVERT BIT(1)
+#define RG_CSIXA_CDPHY_L2_T1_BYTECK_INVERT BIT(2)
+
+#endif
diff --git a/drivers/phy/mediatek/phy-mtk-mipi-csi-0-5.c b/drivers/phy/mediatek/phy-mtk-mipi-csi-0-5.c
new file mode 100644
index 0000000000000..058e1d9266309
--- /dev/null
+++ b/drivers/phy/mediatek/phy-mtk-mipi-csi-0-5.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MediaTek MIPI CSI v0.5 driver
+ *
+ * Copyright (c) 2023, MediaTek Inc.
+ * Copyright (c) 2023, BayLibre Inc.
+ */
+
+#include <dt-bindings/phy/phy.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "phy-mtk-io.h"
+#include "phy-mtk-mipi-csi-0-5-rx-reg.h"
+
+#define CSIXB_OFFSET 0x1000
+
+struct mtk_mipi_cdphy_port {
+ struct device *dev;
+ void __iomem *base;
+ struct phy *phy;
+ u32 type;
+ u32 mode;
+ u32 num_lanes;
+};
+
+enum PHY_TYPE {
+ DPHY = 0,
+ CPHY,
+ CDPHY,
+};
+
+static void mtk_phy_csi_cdphy_ana_eq_tune(void __iomem *base)
+{
+ mtk_phy_update_field(base + MIPI_RX_ANA18_CSIXA, RG_CSI0A_L0_T0AB_EQ_IS, 1);
+ mtk_phy_update_field(base + MIPI_RX_ANA18_CSIXA, RG_CSI0A_L0_T0AB_EQ_BW, 1);
+ mtk_phy_update_field(base + MIPI_RX_ANA1C_CSIXA, RG_CSI0A_L1_T1AB_EQ_IS, 1);
+ mtk_phy_update_field(base + MIPI_RX_ANA1C_CSIXA, RG_CSI0A_L1_T1AB_EQ_BW, 1);
+ mtk_phy_update_field(base + MIPI_RX_ANA20_CSI0A, RG_CSI0A_L2_T1BC_EQ_IS, 1);
+ mtk_phy_update_field(base + MIPI_RX_ANA20_CSI0A, RG_CSI0A_L2_T1BC_EQ_BW, 1);
+
+ mtk_phy_update_field(base + CSIXB_OFFSET + MIPI_RX_ANA18_CSIXA, RG_CSI0A_L0_T0AB_EQ_IS, 1);
+ mtk_phy_update_field(base + CSIXB_OFFSET + MIPI_RX_ANA18_CSIXA, RG_CSI0A_L0_T0AB_EQ_BW, 1);
+ mtk_phy_update_field(base + CSIXB_OFFSET + MIPI_RX_ANA1C_CSIXA, RG_CSI0A_L1_T1AB_EQ_IS, 1);
+ mtk_phy_update_field(base + CSIXB_OFFSET + MIPI_RX_ANA1C_CSIXA, RG_CSI0A_L1_T1AB_EQ_BW, 1);
+ mtk_phy_update_field(base + CSIXB_OFFSET + MIPI_RX_ANA20_CSI0A, RG_CSI0A_L2_T1BC_EQ_IS, 1);
+ mtk_phy_update_field(base + CSIXB_OFFSET + MIPI_RX_ANA20_CSI0A, RG_CSI0A_L2_T1BC_EQ_BW, 1);
+}
+
+static void mtk_phy_csi_dphy_ana_eq_tune(void __iomem *base)
+{
+ mtk_phy_update_field(base + MIPI_RX_ANA18_CSIXA, RG_CSI1A_L0_EQ_IS, 1);
+ mtk_phy_update_field(base + MIPI_RX_ANA18_CSIXA, RG_CSI1A_L0_EQ_BW, 1);
+ mtk_phy_update_field(base + MIPI_RX_ANA18_CSIXA, RG_CSI1A_L1_EQ_IS, 1);
+ mtk_phy_update_field(base + MIPI_RX_ANA18_CSIXA, RG_CSI1A_L1_EQ_BW, 1);
+ mtk_phy_update_field(base + MIPI_RX_ANA1C_CSIXA, RG_CSI1A_L2_EQ_IS, 1);
+ mtk_phy_update_field(base + MIPI_RX_ANA1C_CSIXA, RG_CSI1A_L2_EQ_BW, 1);
+
+ mtk_phy_update_field(base + CSIXB_OFFSET + MIPI_RX_ANA18_CSIXA, RG_CSI1A_L0_EQ_IS, 1);
+ mtk_phy_update_field(base + CSIXB_OFFSET + MIPI_RX_ANA18_CSIXA, RG_CSI1A_L0_EQ_BW, 1);
+ mtk_phy_update_field(base + CSIXB_OFFSET + MIPI_RX_ANA18_CSIXA, RG_CSI1A_L1_EQ_IS, 1);
+ mtk_phy_update_field(base + CSIXB_OFFSET + MIPI_RX_ANA18_CSIXA, RG_CSI1A_L1_EQ_BW, 1);
+ mtk_phy_update_field(base + CSIXB_OFFSET + MIPI_RX_ANA1C_CSIXA, RG_CSI1A_L2_EQ_IS, 1);
+ mtk_phy_update_field(base + CSIXB_OFFSET + MIPI_RX_ANA1C_CSIXA, RG_CSI1A_L2_EQ_BW, 1);
+}
+
+static int mtk_mipi_phy_power_on(struct phy *phy)
+{
+ struct mtk_mipi_cdphy_port *port = phy_get_drvdata(phy);
+ void __iomem *base = port->base;
+
+ /*
+ * The driver currently supports DPHY and CD-PHY phys,
+ * but the only mode supported is DPHY,
+ * so CD-PHY capable phys must be configured in DPHY mode
+ */
+ if (port->type == CDPHY) {
+ mtk_phy_update_field(base + MIPI_RX_ANA00_CSIXA, RG_CSI0A_CPHY_EN, 0);
+ mtk_phy_update_field(base + CSIXB_OFFSET + MIPI_RX_ANA00_CSIXA,
+ RG_CSI0A_CPHY_EN, 0);
+ }
+
+ /*
+ * Lane configuration:
+ *
+ * Only 4 data + 1 clock is supported for now with the following mapping:
+ *
+ * CSIXA_LNR0 --> D2
+ * CSIXA_LNR1 --> D0
+ * CSIXA_LNR2 --> C
+ * CSIXB_LNR0 --> D1
+ * CSIXB_LNR1 --> D3
+ */
+ mtk_phy_update_field(base + MIPI_RX_ANA00_CSIXA, RG_CSIXA_DPHY_L0_CKMODE_EN, 0);
+ mtk_phy_update_field(base + MIPI_RX_ANA00_CSIXA, RG_CSIXA_DPHY_L0_CKSEL, 1);
+ mtk_phy_update_field(base + MIPI_RX_ANA00_CSIXA, RG_CSIXA_DPHY_L1_CKMODE_EN, 0);
+ mtk_phy_update_field(base + MIPI_RX_ANA00_CSIXA, RG_CSIXA_DPHY_L1_CKSEL, 1);
+ mtk_phy_update_field(base + MIPI_RX_ANA00_CSIXA, RG_CSIXA_DPHY_L2_CKMODE_EN, 1);
+ mtk_phy_update_field(base + MIPI_RX_ANA00_CSIXA, RG_CSIXA_DPHY_L2_CKSEL, 1);
+
+ mtk_phy_update_field(base + CSIXB_OFFSET + MIPI_RX_ANA00_CSIXA,
+ RG_CSIXA_DPHY_L0_CKMODE_EN, 0);
+ mtk_phy_update_field(base + CSIXB_OFFSET + MIPI_RX_ANA00_CSIXA, RG_CSIXA_DPHY_L0_CKSEL, 1);
+ mtk_phy_update_field(base + CSIXB_OFFSET + MIPI_RX_ANA00_CSIXA,
+ RG_CSIXA_DPHY_L1_CKMODE_EN, 0);
+ mtk_phy_update_field(base + CSIXB_OFFSET + MIPI_RX_ANA00_CSIXA, RG_CSIXA_DPHY_L1_CKSEL, 1);
+ mtk_phy_update_field(base + CSIXB_OFFSET + MIPI_RX_ANA00_CSIXA,
+ RG_CSIXA_DPHY_L2_CKMODE_EN, 0);
+ mtk_phy_update_field(base + CSIXB_OFFSET + MIPI_RX_ANA00_CSIXA, RG_CSIXA_DPHY_L2_CKSEL, 1);
+
+ /* Byte clock invert */
+ mtk_phy_update_field(base + MIPI_RX_ANAA8_CSIXA, RG_CSIXA_CDPHY_L0_T0_BYTECK_INVERT, 1);
+ mtk_phy_update_field(base + MIPI_RX_ANAA8_CSIXA, RG_CSIXA_DPHY_L1_BYTECK_INVERT, 1);
+ mtk_phy_update_field(base + MIPI_RX_ANAA8_CSIXA, RG_CSIXA_CDPHY_L2_T1_BYTECK_INVERT, 1);
+
+ mtk_phy_update_field(base + CSIXB_OFFSET + MIPI_RX_ANAA8_CSIXA,
+ RG_CSIXA_CDPHY_L0_T0_BYTECK_INVERT, 1);
+ mtk_phy_update_field(base + CSIXB_OFFSET + MIPI_RX_ANAA8_CSIXA,
+ RG_CSIXA_DPHY_L1_BYTECK_INVERT, 1);
+ mtk_phy_update_field(base + CSIXB_OFFSET + MIPI_RX_ANAA8_CSIXA,
+ RG_CSIXA_CDPHY_L2_T1_BYTECK_INVERT, 1);
+
+ /* Start ANA EQ tuning */
+ if (port->type == CDPHY)
+ mtk_phy_csi_cdphy_ana_eq_tune(base);
+ else
+ mtk_phy_csi_dphy_ana_eq_tune(base);
+
+ /* End ANA EQ tuning */
+ mtk_phy_set_bits(base + MIPI_RX_ANA40_CSIXA, 0x90);
+
+ mtk_phy_update_field(base + MIPI_RX_ANA24_CSIXA, RG_CSIXA_RESERVE, 0x40);
+ mtk_phy_update_field(base + CSIXB_OFFSET + MIPI_RX_ANA24_CSIXA, RG_CSIXA_RESERVE, 0x40);
+ mtk_phy_update_field(base + MIPI_RX_WRAPPER80_CSIXA, CSR_CSI_RST_MODE, 0);
+ mtk_phy_update_field(base + CSIXB_OFFSET + MIPI_RX_WRAPPER80_CSIXA, CSR_CSI_RST_MODE, 0);
+ /* ANA power on */
+ mtk_phy_update_field(base + MIPI_RX_ANA00_CSIXA, RG_CSIXA_BG_CORE_EN, 1);
+ mtk_phy_update_field(base + CSIXB_OFFSET + MIPI_RX_ANA00_CSIXA, RG_CSIXA_BG_CORE_EN, 1);
+ usleep_range(20, 40);
+ mtk_phy_update_field(base + MIPI_RX_ANA00_CSIXA, RG_CSIXA_BG_LPF_EN, 1);
+ mtk_phy_update_field(base + CSIXB_OFFSET + MIPI_RX_ANA00_CSIXA, RG_CSIXA_BG_LPF_EN, 1);
+
+ return 0;
+}
+
+static int mtk_mipi_phy_power_off(struct phy *phy)
+{
+ struct mtk_mipi_cdphy_port *port = phy_get_drvdata(phy);
+ void __iomem *base = port->base;
+
+ /* Disable MIPI BG. */
+ mtk_phy_update_field(base + MIPI_RX_ANA00_CSIXA, RG_CSIXA_BG_CORE_EN, 0);
+ mtk_phy_update_field(base + MIPI_RX_ANA00_CSIXA, RG_CSIXA_BG_LPF_EN, 0);
+
+ mtk_phy_update_field(base + CSIXB_OFFSET + MIPI_RX_ANA00_CSIXA, RG_CSIXA_BG_CORE_EN, 0);
+ mtk_phy_update_field(base + CSIXB_OFFSET + MIPI_RX_ANA00_CSIXA, RG_CSIXA_BG_LPF_EN, 0);
+
+ return 0;
+}
+
+static struct phy *mtk_mipi_cdphy_xlate(struct device *dev,
+ const struct of_phandle_args *args)
+{
+ struct mtk_mipi_cdphy_port *priv = dev_get_drvdata(dev);
+
+ /*
+ * If PHY is CD-PHY then we need to get the operating mode
+ * For now only D-PHY mode is supported
+ */
+ if (priv->type == CDPHY) {
+ if (args->args_count != 1) {
+ dev_err(dev, "invalid number of arguments\n");
+ return ERR_PTR(-EINVAL);
+ }
+ switch (args->args[0]) {
+ case PHY_TYPE_DPHY:
+ priv->mode = DPHY;
+ if (priv->num_lanes != 4) {
+ dev_err(dev, "Only 4D1C mode is supported for now!\n");
+ return ERR_PTR(-EINVAL);
+ }
+ break;
+ default:
+ dev_err(dev, "Unsupported PHY type: %i\n", args->args[0]);
+ return ERR_PTR(-EINVAL);
+ }
+ } else {
+ if (args->args_count) {
+ dev_err(dev, "invalid number of arguments\n");
+ return ERR_PTR(-EINVAL);
+ }
+ priv->mode = DPHY;
+ }
+
+ return priv->phy;
+}
+
+static const struct phy_ops mtk_cdphy_ops = {
+ .power_on = mtk_mipi_phy_power_on,
+ .power_off = mtk_mipi_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int mtk_mipi_cdphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct phy_provider *phy_provider;
+ struct mtk_mipi_cdphy_port *port;
+ struct phy *phy;
+ int ret;
+ u32 phy_type;
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, port);
+
+ port->dev = dev;
+
+ port->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(port->base))
+ return PTR_ERR(port->base);
+
+ ret = of_property_read_u32(dev->of_node, "num-lanes", &port->num_lanes);
+ if (ret) {
+ dev_err(dev, "Failed to read num-lanes property: %i\n", ret);
+ return ret;
+ }
+
+ /*
+ * phy-type is optional, if not present, PHY is considered to be CD-PHY
+ */
+ if (device_property_present(dev, "phy-type")) {
+ ret = of_property_read_u32(dev->of_node, "phy-type", &phy_type);
+ if (ret) {
+ dev_err(dev, "Failed to read phy-type property: %i\n", ret);
+ return ret;
+ }
+ switch (phy_type) {
+ case PHY_TYPE_DPHY:
+ port->type = DPHY;
+ break;
+ default:
+ dev_err(dev, "Unsupported PHY type: %i\n", phy_type);
+ return -EINVAL;
+ }
+ } else {
+ port->type = CDPHY;
+ }
+
+ phy = devm_phy_create(dev, NULL, &mtk_cdphy_ops);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "Failed to create PHY: %ld\n", PTR_ERR(phy));
+ return PTR_ERR(phy);
+ }
+
+ port->phy = phy;
+ phy_set_drvdata(phy, port);
+
+ phy_provider = devm_of_phy_provider_register(dev, mtk_mipi_cdphy_xlate);
+ if (IS_ERR(phy_provider)) {
+ dev_err(dev, "Failed to register PHY provider: %ld\n",
+ PTR_ERR(phy_provider));
+ return PTR_ERR(phy_provider);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id mtk_mipi_cdphy_of_match[] = {
+ { .compatible = "mediatek,mt8365-csi-rx" },
+ { /* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, mtk_mipi_cdphy_of_match);
+
+static struct platform_driver mipi_cdphy_pdrv = {
+ .probe = mtk_mipi_cdphy_probe,
+ .driver = {
+ .name = "mtk-mipi-csi-0-5",
+ .of_match_table = mtk_mipi_cdphy_of_match,
+ },
+};
+module_platform_driver(mipi_cdphy_pdrv);
+
+MODULE_DESCRIPTION("MediaTek MIPI CSI CD-PHY v0.5 Driver");
+MODULE_AUTHOR("Louis Kuo <louis.kuo@mediatek.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index a4746f6cb8a18..25b86bbb9cec0 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -1467,7 +1467,7 @@ static int mtk_phy_set_mode(struct phy *phy, enum phy_mode mode, int submode)
}
static struct phy *mtk_phy_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct mtk_tphy *tphy = dev_get_drvdata(dev);
struct mtk_phy_instance *instance = NULL;
diff --git a/drivers/phy/mediatek/phy-mtk-xsphy.c b/drivers/phy/mediatek/phy-mtk-xsphy.c
index b222fbbd71d18..064fd09417275 100644
--- a/drivers/phy/mediatek/phy-mtk-xsphy.c
+++ b/drivers/phy/mediatek/phy-mtk-xsphy.c
@@ -378,7 +378,7 @@ static int mtk_phy_set_mode(struct phy *phy, enum phy_mode mode, int submode)
}
static struct phy *mtk_phy_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct mtk_xsphy *xsphy = dev_get_drvdata(dev);
struct xsphy_instance *inst = NULL;
diff --git a/drivers/phy/microchip/lan966x_serdes.c b/drivers/phy/microchip/lan966x_serdes.c
index b5ac2b7995e71..835e369cdfc5f 100644
--- a/drivers/phy/microchip/lan966x_serdes.c
+++ b/drivers/phy/microchip/lan966x_serdes.c
@@ -518,7 +518,7 @@ static const struct phy_ops serdes_ops = {
};
static struct phy *serdes_simple_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct serdes_ctrl *ctrl = dev_get_drvdata(dev);
unsigned int port, idx, i;
diff --git a/drivers/phy/microchip/sparx5_serdes.c b/drivers/phy/microchip/sparx5_serdes.c
index 01bd5ea620c5b..7cb85029fab39 100644
--- a/drivers/phy/microchip/sparx5_serdes.c
+++ b/drivers/phy/microchip/sparx5_serdes.c
@@ -2509,7 +2509,7 @@ static struct sparx5_serdes_io_resource sparx5_serdes_iomap[] = {
/* Client lookup function, uses serdes index */
static struct phy *sparx5_serdes_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct sparx5_serdes_private *priv = dev_get_drvdata(dev);
int idx;
diff --git a/drivers/phy/mscc/phy-ocelot-serdes.c b/drivers/phy/mscc/phy-ocelot-serdes.c
index d9443e865a780..1cd1b5db2ad7c 100644
--- a/drivers/phy/mscc/phy-ocelot-serdes.c
+++ b/drivers/phy/mscc/phy-ocelot-serdes.c
@@ -441,7 +441,7 @@ static const struct phy_ops serdes_ops = {
};
static struct phy *serdes_simple_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct serdes_ctrl *ctrl = dev_get_drvdata(dev);
unsigned int port, idx, i;
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index d9be6a4d53838..c5c8d70bc8533 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -490,6 +490,53 @@ int phy_calibrate(struct phy *phy)
EXPORT_SYMBOL_GPL(phy_calibrate);
/**
+ * phy_notify_connect() - phy connect notification
+ * @phy: the phy returned by phy_get()
+ * @port: the port index for connect
+ *
+ * If the phy needs to get connection status, the callback can be used.
+ * Returns: %0 if successful, a negative error code otherwise
+ */
+int phy_notify_connect(struct phy *phy, int port)
+{
+ int ret;
+
+ if (!phy || !phy->ops->connect)
+ return 0;
+
+ mutex_lock(&phy->mutex);
+ ret = phy->ops->connect(phy, port);
+ mutex_unlock(&phy->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_notify_connect);
+
+/**
+ * phy_notify_disconnect() - phy disconnect notification
+ * @phy: the phy returned by phy_get()
+ * @port: the port index for disconnect
+ *
+ * If the phy needs to get connection status, the callback can be used.
+ *
+ * Returns: %0 if successful, a negative error code otherwise
+ */
+int phy_notify_disconnect(struct phy *phy, int port)
+{
+ int ret;
+
+ if (!phy || !phy->ops->disconnect)
+ return 0;
+
+ mutex_lock(&phy->mutex);
+ ret = phy->ops->disconnect(phy, port);
+ mutex_unlock(&phy->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_notify_disconnect);
+
+/**
* phy_configure() - Changes the phy parameters
* @phy: the phy returned by phy_get()
* @opts: New configuration to apply
@@ -700,8 +747,8 @@ EXPORT_SYMBOL_GPL(devm_phy_put);
* should provide a custom of_xlate function that reads the *args* and returns
* the appropriate phy.
*/
-struct phy *of_phy_simple_xlate(struct device *dev, struct of_phandle_args
- *args)
+struct phy *of_phy_simple_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
struct phy *phy;
struct class_dev_iter iter;
@@ -1095,7 +1142,7 @@ EXPORT_SYMBOL_GPL(devm_phy_destroy);
struct phy_provider *__of_phy_provider_register(struct device *dev,
struct device_node *children, struct module *owner,
struct phy * (*of_xlate)(struct device *dev,
- struct of_phandle_args *args))
+ const struct of_phandle_args *args))
{
struct phy_provider *phy_provider;
@@ -1158,7 +1205,7 @@ EXPORT_SYMBOL_GPL(__of_phy_provider_register);
struct phy_provider *__devm_of_phy_provider_register(struct device *dev,
struct device_node *children, struct module *owner,
struct phy * (*of_xlate)(struct device *dev,
- struct of_phandle_args *args))
+ const struct of_phandle_args *args))
{
struct phy_provider **ptr, *phy_provider;
diff --git a/drivers/phy/phy-xgene.c b/drivers/phy/phy-xgene.c
index 1f0f908323f0e..5007dc7a357cb 100644
--- a/drivers/phy/phy-xgene.c
+++ b/drivers/phy/phy-xgene.c
@@ -1611,7 +1611,7 @@ static const struct phy_ops xgene_phy_ops = {
};
static struct phy *xgene_phy_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct xgene_phy_ctx *ctx = dev_get_drvdata(dev);
diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile
index ffd609ac62336..eb60e950ad533 100644
--- a/drivers/phy/qualcomm/Makefile
+++ b/drivers/phy/qualcomm/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA) += phy-qcom-ipq806x-sata.o
obj-$(CONFIG_PHY_QCOM_M31_USB) += phy-qcom-m31.o
obj-$(CONFIG_PHY_QCOM_PCIE2) += phy-qcom-pcie2.o
-obj-$(CONFIG_PHY_QCOM_QMP_COMBO) += phy-qcom-qmp-combo.o
+obj-$(CONFIG_PHY_QCOM_QMP_COMBO) += phy-qcom-qmp-combo.o phy-qcom-qmp-usbc.o
obj-$(CONFIG_PHY_QCOM_QMP_PCIE) += phy-qcom-qmp-pcie.o
obj-$(CONFIG_PHY_QCOM_QMP_PCIE_8996) += phy-qcom-qmp-pcie-msm8996.o
obj-$(CONFIG_PHY_QCOM_QMP_UFS) += phy-qcom-qmp-ufs.o
diff --git a/drivers/phy/qualcomm/phy-qcom-edp.c b/drivers/phy/qualcomm/phy-qcom-edp.c
index 8e5078304646e..9818d994c68b2 100644
--- a/drivers/phy/qualcomm/phy-qcom-edp.c
+++ b/drivers/phy/qualcomm/phy-qcom-edp.c
@@ -21,7 +21,8 @@
#include <dt-bindings/phy/phy.h>
-#include "phy-qcom-qmp.h"
+#include "phy-qcom-qmp-dp-phy.h"
+#include "phy-qcom-qmp-qserdes-com-v4.h"
/* EDP_PHY registers */
#define DP_PHY_CFG 0x0010
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index 17c4ad7553a5e..7d585a4bbbba9 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -25,21 +25,21 @@
#include <dt-bindings/phy/phy-qcom-qmp.h>
+#include "phy-qcom-qmp-common.h"
+
#include "phy-qcom-qmp.h"
#include "phy-qcom-qmp-pcs-misc-v3.h"
#include "phy-qcom-qmp-pcs-usb-v4.h"
#include "phy-qcom-qmp-pcs-usb-v5.h"
#include "phy-qcom-qmp-pcs-usb-v6.h"
-/* QPHY_SW_RESET bit */
-#define SW_RESET BIT(0)
-/* QPHY_POWER_DOWN_CONTROL */
-#define SW_PWRDN BIT(0)
-/* QPHY_START_CONTROL bits */
-#define SERDES_START BIT(0)
-#define PCS_START BIT(1)
-/* QPHY_PCS_STATUS bit */
-#define PHYSTATUS BIT(6)
+#include "phy-qcom-qmp-dp-com-v3.h"
+
+#include "phy-qcom-qmp-dp-phy.h"
+#include "phy-qcom-qmp-dp-phy-v3.h"
+#include "phy-qcom-qmp-dp-phy-v4.h"
+#include "phy-qcom-qmp-dp-phy-v5.h"
+#include "phy-qcom-qmp-dp-phy-v6.h"
/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
/* DP PHY soft reset */
@@ -55,47 +55,12 @@
#define USB3_MODE BIT(0) /* enables USB3 mode */
#define DP_MODE BIT(1) /* enables DP mode */
-/* QPHY_PCS_AUTONOMOUS_MODE_CTRL register bits */
-#define ARCVR_DTCT_EN BIT(0)
-#define ALFPS_DTCT_EN BIT(1)
-#define ARCVR_DTCT_EVENT_SEL BIT(4)
-
-/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */
-#define IRQ_CLEAR BIT(0)
-
-/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */
-#define CLAMP_EN BIT(0) /* enables i/o clamp_n */
-
/* QPHY_V3_DP_COM_TYPEC_CTRL register bits */
#define SW_PORTSELECT_VAL BIT(0)
#define SW_PORTSELECT_MUX BIT(1)
#define PHY_INIT_COMPLETE_TIMEOUT 10000
-struct qmp_phy_init_tbl {
- unsigned int offset;
- unsigned int val;
- /*
- * mask of lanes for which this register is written
- * for cases when second lane needs different values
- */
- u8 lane_mask;
-};
-
-#define QMP_PHY_INIT_CFG(o, v) \
- { \
- .offset = o, \
- .val = v, \
- .lane_mask = 0xff, \
- }
-
-#define QMP_PHY_INIT_CFG_LANE(o, v, l) \
- { \
- .offset = o, \
- .val = v, \
- .lane_mask = l, \
- }
-
/* set of registers with offsets different per-PHY */
enum qphy_reg_layout {
/* PCS registers */
@@ -2031,55 +1996,29 @@ static const struct qmp_phy_cfg sm8550_usb3dpphy_cfg = {
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
};
-static void qmp_combo_configure_lane(void __iomem *base,
- const struct qmp_phy_init_tbl tbl[],
- int num,
- u8 lane_mask)
-{
- int i;
- const struct qmp_phy_init_tbl *t = tbl;
-
- if (!t)
- return;
-
- for (i = 0; i < num; i++, t++) {
- if (!(t->lane_mask & lane_mask))
- continue;
-
- writel(t->val, base + t->offset);
- }
-}
-
-static void qmp_combo_configure(void __iomem *base,
- const struct qmp_phy_init_tbl tbl[],
- int num)
-{
- qmp_combo_configure_lane(base, tbl, num, 0xff);
-}
-
static int qmp_combo_dp_serdes_init(struct qmp_combo *qmp)
{
const struct qmp_phy_cfg *cfg = qmp->cfg;
void __iomem *serdes = qmp->dp_serdes;
const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
- qmp_combo_configure(serdes, cfg->dp_serdes_tbl, cfg->dp_serdes_tbl_num);
+ qmp_configure(serdes, cfg->dp_serdes_tbl, cfg->dp_serdes_tbl_num);
switch (dp_opts->link_rate) {
case 1620:
- qmp_combo_configure(serdes, cfg->serdes_tbl_rbr,
+ qmp_configure(serdes, cfg->serdes_tbl_rbr,
cfg->serdes_tbl_rbr_num);
break;
case 2700:
- qmp_combo_configure(serdes, cfg->serdes_tbl_hbr,
+ qmp_configure(serdes, cfg->serdes_tbl_hbr,
cfg->serdes_tbl_hbr_num);
break;
case 5400:
- qmp_combo_configure(serdes, cfg->serdes_tbl_hbr2,
+ qmp_configure(serdes, cfg->serdes_tbl_hbr2,
cfg->serdes_tbl_hbr2_num);
break;
case 8100:
- qmp_combo_configure(serdes, cfg->serdes_tbl_hbr3,
+ qmp_configure(serdes, cfg->serdes_tbl_hbr3,
cfg->serdes_tbl_hbr3_num);
break;
default:
@@ -2370,7 +2309,7 @@ static int qmp_v456_configure_dp_phy(struct qmp_combo *qmp)
u32 status;
int ret;
- writel(0x0f, qmp->dp_dp_phy + QSERDES_V4_DP_PHY_CFG_1);
+ writel(0x0f, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG_1);
qmp_combo_configure_dp_mode(qmp);
@@ -2681,8 +2620,8 @@ static int qmp_combo_dp_power_on(struct phy *phy)
qmp_combo_dp_serdes_init(qmp);
- qmp_combo_configure_lane(tx, cfg->dp_tx_tbl, cfg->dp_tx_tbl_num, 1);
- qmp_combo_configure_lane(tx2, cfg->dp_tx_tbl, cfg->dp_tx_tbl_num, 2);
+ qmp_configure_lane(tx, cfg->dp_tx_tbl, cfg->dp_tx_tbl_num, 1);
+ qmp_configure_lane(tx2, cfg->dp_tx_tbl, cfg->dp_tx_tbl_num, 2);
/* Configure special DP tx tunings */
cfg->configure_dp_tx(qmp);
@@ -2724,7 +2663,7 @@ static int qmp_combo_usb_power_on(struct phy *phy)
unsigned int val;
int ret;
- qmp_combo_configure(serdes, cfg->serdes_tbl, cfg->serdes_tbl_num);
+ qmp_configure(serdes, cfg->serdes_tbl, cfg->serdes_tbl_num);
ret = clk_prepare_enable(qmp->pipe_clk);
if (ret) {
@@ -2733,16 +2672,16 @@ static int qmp_combo_usb_power_on(struct phy *phy)
}
/* Tx, Rx, and PCS configurations */
- qmp_combo_configure_lane(tx, cfg->tx_tbl, cfg->tx_tbl_num, 1);
- qmp_combo_configure_lane(tx2, cfg->tx_tbl, cfg->tx_tbl_num, 2);
+ qmp_configure_lane(tx, cfg->tx_tbl, cfg->tx_tbl_num, 1);
+ qmp_configure_lane(tx2, cfg->tx_tbl, cfg->tx_tbl_num, 2);
- qmp_combo_configure_lane(rx, cfg->rx_tbl, cfg->rx_tbl_num, 1);
- qmp_combo_configure_lane(rx2, cfg->rx_tbl, cfg->rx_tbl_num, 2);
+ qmp_configure_lane(rx, cfg->rx_tbl, cfg->rx_tbl_num, 1);
+ qmp_configure_lane(rx2, cfg->rx_tbl, cfg->rx_tbl_num, 2);
- qmp_combo_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ qmp_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
if (pcs_usb)
- qmp_combo_configure(pcs_usb, cfg->pcs_usb_tbl, cfg->pcs_usb_tbl_num);
+ qmp_configure(pcs_usb, cfg->pcs_usb_tbl, cfg->pcs_usb_tbl_num);
if (cfg->has_pwrdn_delay)
usleep_range(10, 20);
@@ -3515,7 +3454,7 @@ static int qmp_combo_parse_dt(struct qmp_combo *qmp)
return 0;
}
-static struct phy *qmp_combo_phy_xlate(struct device *dev, struct of_phandle_args *args)
+static struct phy *qmp_combo_phy_xlate(struct device *dev, const struct of_phandle_args *args)
{
struct qmp_combo *qmp = dev_get_drvdata(dev);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-common.h b/drivers/phy/qualcomm/phy-qcom-qmp-common.h
new file mode 100644
index 0000000000000..7993842105093
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-common.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_COMMON_H_
+#define QCOM_PHY_QMP_COMMON_H_
+
+struct qmp_phy_init_tbl {
+ unsigned int offset;
+ unsigned int val;
+ /*
+ * mask of lanes for which this register is written
+ * for cases when second lane needs different values
+ */
+ u8 lane_mask;
+};
+
+#define QMP_PHY_INIT_CFG(o, v) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .lane_mask = 0xff, \
+ }
+
+#define QMP_PHY_INIT_CFG_LANE(o, v, l) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .lane_mask = l, \
+ }
+
+static inline void qmp_configure_lane(void __iomem *base,
+ const struct qmp_phy_init_tbl tbl[],
+ int num,
+ u8 lane_mask)
+{
+ int i;
+ const struct qmp_phy_init_tbl *t = tbl;
+
+ if (!t)
+ return;
+
+ for (i = 0; i < num; i++, t++) {
+ if (!(t->lane_mask & lane_mask))
+ continue;
+
+ writel(t->val, base + t->offset);
+ }
+}
+
+static inline void qmp_configure(void __iomem *base,
+ const struct qmp_phy_init_tbl tbl[],
+ int num)
+{
+ qmp_configure_lane(base, tbl, num, 0xff);
+}
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-dp-com-v3.h b/drivers/phy/qualcomm/phy-qcom-qmp-dp-com-v3.h
new file mode 100644
index 0000000000000..396179ef38b0d
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-dp-com-v3.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_DP_COM_V3_H_
+#define QCOM_PHY_QMP_DP_COM_V3_H_
+
+/* Only for QMP V3 & V4 PHY - DP COM registers */
+#define QPHY_V3_DP_COM_PHY_MODE_CTRL 0x00
+#define QPHY_V3_DP_COM_SW_RESET 0x04
+#define QPHY_V3_DP_COM_POWER_DOWN_CTRL 0x08
+#define QPHY_V3_DP_COM_SWI_CTRL 0x0c
+#define QPHY_V3_DP_COM_TYPEC_CTRL 0x10
+#define QPHY_V3_DP_COM_TYPEC_PWRDN_CTRL 0x14
+#define QPHY_V3_DP_COM_RESET_OVRD_CTRL 0x1c
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v3.h b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v3.h
new file mode 100644
index 0000000000000..00a9702abccd9
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v3.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_DP_PHY_V3_H_
+#define QCOM_PHY_QMP_DP_PHY_V3_H_
+
+/* Only for QMP V3 PHY - DP PHY registers */
+#define QSERDES_V3_DP_PHY_AUX_INTERRUPT_MASK 0x048
+#define QSERDES_V3_DP_PHY_AUX_INTERRUPT_CLEAR 0x04c
+#define QSERDES_V3_DP_PHY_AUX_BIST_CFG 0x050
+
+#define QSERDES_V3_DP_PHY_VCO_DIV 0x064
+#define QSERDES_V3_DP_PHY_TX0_TX1_LANE_CTL 0x06c
+#define QSERDES_V3_DP_PHY_TX2_TX3_LANE_CTL 0x088
+
+#define QSERDES_V3_DP_PHY_SPARE0 0x0ac
+#define QSERDES_V3_DP_PHY_STATUS 0x0c0
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v4.h b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v4.h
new file mode 100644
index 0000000000000..ed6795e1257cb
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v4.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_DP_PHY_V4_H_
+#define QCOM_PHY_QMP_DP_PHY_V4_H_
+
+/* Only for QMP V4 PHY - DP PHY registers */
+#define QSERDES_V4_DP_PHY_AUX_INTERRUPT_MASK 0x054
+#define QSERDES_V4_DP_PHY_AUX_INTERRUPT_CLEAR 0x058
+#define QSERDES_V4_DP_PHY_VCO_DIV 0x070
+#define QSERDES_V4_DP_PHY_TX0_TX1_LANE_CTL 0x078
+#define QSERDES_V4_DP_PHY_TX2_TX3_LANE_CTL 0x09c
+#define QSERDES_V4_DP_PHY_SPARE0 0x0c8
+#define QSERDES_V4_DP_PHY_AUX_INTERRUPT_STATUS 0x0d8
+#define QSERDES_V4_DP_PHY_STATUS 0x0dc
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v5.h b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v5.h
new file mode 100644
index 0000000000000..f5cfacf9be964
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v5.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_DP_PHY_V5_H_
+#define QCOM_PHY_QMP_DP_PHY_V5_H_
+
+/* Only for QMP V5 PHY - DP PHY registers */
+#define QSERDES_V5_DP_PHY_AUX_INTERRUPT_STATUS 0x0d8
+#define QSERDES_V5_DP_PHY_STATUS 0x0dc
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v6.h
new file mode 100644
index 0000000000000..01a20d3be4b81
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v6.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_DP_PHY_V6_H_
+#define QCOM_PHY_QMP_DP_PHY_V6_H_
+
+/* Only for QMP V6 PHY - DP PHY registers */
+#define QSERDES_V6_DP_PHY_AUX_INTERRUPT_STATUS 0x0e0
+#define QSERDES_V6_DP_PHY_STATUS 0x0e4
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy.h b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy.h
new file mode 100644
index 0000000000000..0ebd405bcaf0c
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_DP_PHY_H_
+#define QCOM_PHY_QMP_DP_PHY_H_
+
+/* QMP PHY - DP PHY registers */
+#define QSERDES_DP_PHY_REVISION_ID0 0x000
+#define QSERDES_DP_PHY_REVISION_ID1 0x004
+#define QSERDES_DP_PHY_REVISION_ID2 0x008
+#define QSERDES_DP_PHY_REVISION_ID3 0x00c
+#define QSERDES_DP_PHY_CFG 0x010
+#define QSERDES_DP_PHY_CFG_1 0x014
+#define QSERDES_DP_PHY_PD_CTL 0x018
+#define QSERDES_DP_PHY_MODE 0x01c
+#define QSERDES_DP_PHY_AUX_CFG0 0x020
+#define QSERDES_DP_PHY_AUX_CFG1 0x024
+#define QSERDES_DP_PHY_AUX_CFG2 0x028
+#define QSERDES_DP_PHY_AUX_CFG3 0x02c
+#define QSERDES_DP_PHY_AUX_CFG4 0x030
+#define QSERDES_DP_PHY_AUX_CFG5 0x034
+#define QSERDES_DP_PHY_AUX_CFG6 0x038
+#define QSERDES_DP_PHY_AUX_CFG7 0x03c
+#define QSERDES_DP_PHY_AUX_CFG8 0x040
+#define QSERDES_DP_PHY_AUX_CFG9 0x044
+
+/* QSERDES COM_BIAS_EN_CLKBUFLR_EN bits */
+# define QSERDES_V3_COM_BIAS_EN 0x0001
+# define QSERDES_V3_COM_BIAS_EN_MUX 0x0002
+# define QSERDES_V3_COM_CLKBUF_R_EN 0x0004
+# define QSERDES_V3_COM_CLKBUF_L_EN 0x0008
+# define QSERDES_V3_COM_EN_SYSCLK_TX_SEL 0x0010
+# define QSERDES_V3_COM_CLKBUF_RX_DRIVE_L 0x0020
+# define QSERDES_V3_COM_CLKBUF_RX_DRIVE_R 0x0040
+
+/* QPHY_TX_TX_EMP_POST1_LVL bits */
+# define DP_PHY_TXn_TX_EMP_POST1_LVL_MASK 0x001f
+# define DP_PHY_TXn_TX_EMP_POST1_LVL_MUX_EN 0x0020
+
+/* QPHY_TX_TX_DRV_LVL bits */
+# define DP_PHY_TXn_TX_DRV_LVL_MASK 0x001f
+# define DP_PHY_TXn_TX_DRV_LVL_MUX_EN 0x0020
+
+/* QSERDES_DP_PHY_PD_CTL bits */
+# define DP_PHY_PD_CTL_PWRDN 0x001
+# define DP_PHY_PD_CTL_PSR_PWRDN 0x002
+# define DP_PHY_PD_CTL_AUX_PWRDN 0x004
+# define DP_PHY_PD_CTL_LANE_0_1_PWRDN 0x008
+# define DP_PHY_PD_CTL_LANE_2_3_PWRDN 0x010
+# define DP_PHY_PD_CTL_PLL_PWRDN 0x020
+# define DP_PHY_PD_CTL_DP_CLAMP_EN 0x040
+
+/* QPHY_DP_PHY_AUX_INTERRUPT_STATUS bits */
+# define PHY_AUX_STOP_ERR_MASK 0x01
+# define PHY_AUX_DEC_ERR_MASK 0x02
+# define PHY_AUX_SYNC_ERR_MASK 0x04
+# define PHY_AUX_ALIGN_ERR_MASK 0x08
+# define PHY_AUX_REQ_ERR_MASK 0x10
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
index ab61a9c73b189..0442b31205638 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
@@ -19,19 +19,13 @@
#include <linux/reset.h>
#include <linux/slab.h>
+#include "phy-qcom-qmp-common.h"
+
#include "phy-qcom-qmp.h"
-/* QPHY_SW_RESET bit */
-#define SW_RESET BIT(0)
-/* QPHY_POWER_DOWN_CONTROL */
-#define SW_PWRDN BIT(0)
-#define REFCLK_DRV_DSBL BIT(1)
/* QPHY_START_CONTROL bits */
-#define SERDES_START BIT(0)
-#define PCS_START BIT(1)
#define PLL_READY_GATE_EN BIT(3)
-/* QPHY_PCS_STATUS bit */
-#define PHYSTATUS BIT(6)
+
/* QPHY_COM_PCS_READY_STATUS bit */
#define PCS_READY BIT(0)
@@ -39,30 +33,6 @@
#define POWER_DOWN_DELAY_US_MIN 10
#define POWER_DOWN_DELAY_US_MAX 20
-struct qmp_phy_init_tbl {
- unsigned int offset;
- unsigned int val;
- /*
- * mask of lanes for which this register is written
- * for cases when second lane needs different values
- */
- u8 lane_mask;
-};
-
-#define QMP_PHY_INIT_CFG(o, v) \
- { \
- .offset = o, \
- .val = v, \
- .lane_mask = 0xff, \
- }
-
-#define QMP_PHY_INIT_CFG_LANE(o, v, l) \
- { \
- .offset = o, \
- .val = v, \
- .lane_mask = l, \
- }
-
/* set of registers with offsets different per-PHY */
enum qphy_reg_layout {
/* Common block control registers */
@@ -307,32 +277,6 @@ static const struct qmp_phy_cfg msm8996_pciephy_cfg = {
.regs = pciephy_regs_layout,
};
-static void qmp_pcie_msm8996_configure_lane(void __iomem *base,
- const struct qmp_phy_init_tbl tbl[],
- int num,
- u8 lane_mask)
-{
- int i;
- const struct qmp_phy_init_tbl *t = tbl;
-
- if (!t)
- return;
-
- for (i = 0; i < num; i++, t++) {
- if (!(t->lane_mask & lane_mask))
- continue;
-
- writel(t->val, base + t->offset);
- }
-}
-
-static void qmp_pcie_msm8996_configure(void __iomem *base,
- const struct qmp_phy_init_tbl tbl[],
- int num)
-{
- qmp_pcie_msm8996_configure_lane(base, tbl, num, 0xff);
-}
-
static int qmp_pcie_msm8996_serdes_init(struct qmp_phy *qphy)
{
struct qcom_qmp *qmp = qphy->qmp;
@@ -344,7 +288,7 @@ static int qmp_pcie_msm8996_serdes_init(struct qmp_phy *qphy)
unsigned int val;
int ret;
- qmp_pcie_msm8996_configure(serdes, serdes_tbl, serdes_tbl_num);
+ qmp_configure(serdes, serdes_tbl, serdes_tbl_num);
qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET], SW_RESET);
qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL],
@@ -487,9 +431,9 @@ static int qmp_pcie_msm8996_power_on(struct phy *phy)
}
/* Tx, Rx, and PCS configurations */
- qmp_pcie_msm8996_configure_lane(tx, cfg->tx_tbl, cfg->tx_tbl_num, 1);
- qmp_pcie_msm8996_configure_lane(rx, cfg->rx_tbl, cfg->rx_tbl_num, 1);
- qmp_pcie_msm8996_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ qmp_configure_lane(tx, cfg->tx_tbl, cfg->tx_tbl_num, 1);
+ qmp_configure_lane(rx, cfg->rx_tbl, cfg->rx_tbl_num, 1);
+ qmp_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
/*
* Pull out PHY from POWER DOWN state.
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
index 2af7115ef9689..8836bb1ff0cc1 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
@@ -22,6 +22,8 @@
#include <linux/reset.h>
#include <linux/slab.h>
+#include "phy-qcom-qmp-common.h"
+
#include "phy-qcom-qmp.h"
#include "phy-qcom-qmp-pcs-misc-v3.h"
#include "phy-qcom-qmp-pcs-pcie-v4.h"
@@ -32,44 +34,8 @@
#include "phy-qcom-qmp-pcs-pcie-v6_20.h"
#include "phy-qcom-qmp-pcie-qhp.h"
-/* QPHY_SW_RESET bit */
-#define SW_RESET BIT(0)
-/* QPHY_POWER_DOWN_CONTROL */
-#define SW_PWRDN BIT(0)
-#define REFCLK_DRV_DSBL BIT(1)
-/* QPHY_START_CONTROL bits */
-#define SERDES_START BIT(0)
-#define PCS_START BIT(1)
-/* QPHY_PCS_STATUS bit */
-#define PHYSTATUS BIT(6)
-#define PHYSTATUS_4_20 BIT(7)
-
#define PHY_INIT_COMPLETE_TIMEOUT 10000
-struct qmp_phy_init_tbl {
- unsigned int offset;
- unsigned int val;
- /*
- * mask of lanes for which this register is written
- * for cases when second lane needs different values
- */
- u8 lane_mask;
-};
-
-#define QMP_PHY_INIT_CFG(o, v) \
- { \
- .offset = o, \
- .val = v, \
- .lane_mask = 0xff, \
- }
-
-#define QMP_PHY_INIT_CFG_LANE(o, v, l) \
- { \
- .offset = o, \
- .val = v, \
- .lane_mask = l, \
- }
-
/* set of registers with offsets different per-PHY */
enum qphy_reg_layout {
/* PCS registers */
@@ -116,6 +82,13 @@ static const unsigned int pciephy_v5_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V5_PCS_POWER_DOWN_CONTROL,
};
+static const unsigned int pciephy_v6_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V6_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V6_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V6_PCS_PCS_STATUS1,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V6_PCS_POWER_DOWN_CONTROL,
+};
+
static const struct qmp_phy_init_tbl msm8998_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
@@ -982,6 +955,143 @@ static const struct qmp_phy_init_tbl sc8280xp_qmp_gen3x2_pcie_pcs_misc_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
};
+static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x2_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE1, 0x26),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE1, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE1, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE1, 0x68),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE1, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE1, 0xaa),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0xf8),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CORE_CLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x0d),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x41),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE0, 0xaa),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_HS_SWITCH_SEL_1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x62),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_POST_DIV_MUX, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_ENABLE1, 0x90),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYS_CLK_CTRL, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x46),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_CFG, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_SELECT, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_MISC_1, 0x88),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_MODE, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_VCO_DC_LEVEL_CTRL, 0x0f),
+};
+
+static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x2_pcie_ln_shrd_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RXCLK_DIV2_CTRL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_DFE_DAC_ENABLE1, 0x88),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_TX_ADAPT_POST_THRESH1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_TX_ADAPT_POST_THRESH2, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B0, 0xd4),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B1, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B2, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B3, 0x9a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B4, 0x32),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B5, 0xb6),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B6, 0x64),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH1_RATE210, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH1_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH2_RATE210, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH2_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH3_RATE210, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH3_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH4_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH5_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH6_RATE3, 0x1f),
+};
+
+static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x2_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_TX_RES_CODE_LANE_OFFSET_TX, 0x1d),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_TX_RES_CODE_LANE_OFFSET_RX, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_TX_LANE_MODE_1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_TX_LANE_MODE_2, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_TX_LANE_MODE_3, 0x51),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_TX_TRAN_DRVR_EMP_EN, 0x34),
+};
+
+static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x2_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_FO_GAIN_RATE_2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_SO_GAIN_RATE_2, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_FO_GAIN_RATE_3, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_PI_CONTROLS, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_SO_ACC_DEFAULT_VAL_RATE3, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_IVCM_CAL_CTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_IVCM_POSTCAL_OFFSET, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_BKUP_CTRL1, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_DFE_1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_DFE_2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_DFE_3, 0x45),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_VGA_CAL_MAN_VAL, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_GM_CAL, 0x0d),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_EQU_ADAPTOR_CNTRL4, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_SIGDET_ENABLES, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_PHPRE_CTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_Q_PI_INTRINSIC_BIAS_RATE32, 0x39),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B1, 0xb3),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B2, 0x58),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B3, 0x9a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B4, 0x26),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B5, 0xb6),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B6, 0xee),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B0, 0xe4),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B1, 0xa4),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B2, 0x60),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B3, 0xdf),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B4, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B5, 0x76),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B6, 0xff),
+};
+
+static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x2_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V6_20_PCS_G3S2_PRE_GAIN, 0x2e),
+ QMP_PHY_INIT_CFG(QPHY_V6_20_PCS_RX_SIGDET_LVL, 0xcc),
+ QMP_PHY_INIT_CFG(QPHY_V6_20_PCS_EQ_CONFIG4, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V6_20_PCS_EQ_CONFIG5, 0x22),
+};
+
+static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x2_pcie_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_ENDPOINT_REFCLK_DRIVE, 0xc1),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_OSC_DTCT_ATCIONS, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_EQ_CONFIG1, 0x16),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_EQ_CONFIG5, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_G4_PRE_GAIN, 0x2e),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_RX_MARGINING_CONFIG1, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_RX_MARGINING_CONFIG3, 0x28),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_TX_RX_CONFIG, 0xc0),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_POWER_STATE_CONFIG2, 0x1d),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_RX_MARGINING_CONFIG5, 0x0f),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_G3_FOM_EQ_CONFIG5, 0xf2),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_G4_FOM_EQ_CONFIG5, 0xf2),
+};
+
static const struct qmp_phy_init_tbl sm8250_qmp_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x08),
QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_SELECT, 0x34),
@@ -1747,7 +1857,7 @@ static const struct qmp_phy_init_tbl sm8550_qmp_gen3x2_pcie_rx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_10_HIGH2, 0x5b),
QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_10_HIGH3, 0x1a),
QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_10_HIGH4, 0x89),
- QMP_PHY_INIT_CFG(QSERDES_V6_RX_TX_ADAPT_POST_THRESH, 0xf0),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_TX_ADAPT_POST_THRESH, 0x00),
QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_FO_GAIN, 0x09),
QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_SO_GAIN, 0x05),
QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_SB2_THRESH1, 0x08),
@@ -1767,6 +1877,8 @@ static const struct qmp_phy_init_tbl sm8550_qmp_gen3x2_pcie_pcs_tbl[] = {
};
static const struct qmp_phy_init_tbl sm8550_qmp_gen3x2_pcie_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_PCIE_EQ_CONFIG1, 0x1e),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_PCIE_RXEQEVAL_TIME, 0x27),
QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_PCIE_POWER_STATE_CONFIG2, 0x1d),
QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_PCIE_POWER_STATE_CONFIG4, 0x07),
QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
@@ -1823,10 +1935,9 @@ static const struct qmp_phy_init_tbl sm8550_qmp_gen4x2_pcie_serdes_tbl[] = {
static const struct qmp_phy_init_tbl sm8550_qmp_gen4x2_pcie_ln_shrd_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RXCLK_DIV2_CTRL, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_Q_EN_RATES, 0xe),
QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_DFE_DAC_ENABLE1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_TX_ADAPT_POST_THRESH1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_TX_ADAPT_POST_THRESH2, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_TX_ADAPT_POST_THRESH1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_TX_ADAPT_POST_THRESH2, 0x0d),
QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B0, 0x12),
QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B1, 0x12),
QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B2, 0xdb),
@@ -1843,6 +1954,7 @@ static const struct qmp_phy_init_tbl sm8550_qmp_gen4x2_pcie_ln_shrd_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH4_RATE3, 0x1f),
QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH5_RATE3, 0x1f),
QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH6_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_SUMMER_CAL_SPD_MODE, 0x5b),
};
static const struct qmp_phy_init_tbl sm8550_qmp_gen4x2_pcie_tx_tbl[] = {
@@ -1855,13 +1967,15 @@ static const struct qmp_phy_init_tbl sm8550_qmp_gen4x2_pcie_tx_tbl[] = {
};
static const struct qmp_phy_init_tbl sm8550_qmp_gen4x2_pcie_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_FO_GAIN_RATE_2, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_FO_GAIN_RATE_2, 0x0c),
QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_FO_GAIN_RATE_3, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_SO_GAIN_RATE_2, 0x04),
QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_PI_CONTROLS, 0x16),
QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_SO_ACC_DEFAULT_VAL_RATE3, 0x00),
QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_IVCM_CAL_CTRL2, 0x80),
QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_IVCM_POSTCAL_OFFSET, 0x7c),
QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_DFE_3, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_TX_ADPT_CTRL, 0x10),
QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_VGA_CAL_MAN_VAL, 0x0a),
QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_GM_CAL, 0x0d),
QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_EQU_ADAPTOR_CNTRL4, 0x0b),
@@ -1883,11 +1997,13 @@ static const struct qmp_phy_init_tbl sm8550_qmp_gen4x2_pcie_rx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B4, 0x78),
QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B5, 0x76),
QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B6, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_VGA_CAL_CNTRL1, 0x00),
};
static const struct qmp_phy_init_tbl sm8550_qmp_gen4x2_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V6_20_PCS_G12S1_TXDEEMPH_M6DB, 0x17),
QMP_PHY_INIT_CFG(QPHY_V6_20_PCS_G3S2_PRE_GAIN, 0x2e),
- QMP_PHY_INIT_CFG(QPHY_V6_20_PCS_COM_ELECIDLE_DLY_SEL, 0x25),
+ QMP_PHY_INIT_CFG(QPHY_V6_20_PCS_RX_SIGDET_LVL, 0xcc),
QMP_PHY_INIT_CFG(QPHY_V6_20_PCS_EQ_CONFIG4, 0x00),
QMP_PHY_INIT_CFG(QPHY_V6_20_PCS_EQ_CONFIG5, 0x22),
QMP_PHY_INIT_CFG(QPHY_V6_20_PCS_TX_RX_CONFIG1, 0x04),
@@ -1898,6 +2014,8 @@ static const struct qmp_phy_init_tbl sm8550_qmp_gen4x2_pcie_pcs_misc_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_ENDPOINT_REFCLK_DRIVE, 0xc1),
QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_OSC_DTCT_ATCIONS, 0x00),
QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_EQ_CONFIG1, 0x16),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_G3_RXEQEVAL_TIME, 0x27),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_G4_RXEQEVAL_TIME, 0x27),
QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_EQ_CONFIG5, 0x02),
QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_G4_PRE_GAIN, 0x2e),
QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_RX_MARGINING_CONFIG1, 0x03),
@@ -2936,7 +3054,7 @@ static const struct qmp_phy_cfg sdx65_qmp_pciephy_cfg = {
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = pciephy_v5_regs_layout,
+ .regs = pciephy_v6_regs_layout,
.pwrdn_ctrl = SW_PWRDN,
.phy_status = PHYSTATUS_4_20,
@@ -3069,7 +3187,7 @@ static const struct qmp_phy_cfg sm8550_qmp_gen4x2_pciephy_cfg = {
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = sm8550_qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(sm8550_qmp_phy_vreg_l),
- .regs = pciephy_v5_regs_layout,
+ .regs = pciephy_v6_regs_layout,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
.phy_status = PHYSTATUS_4_20,
@@ -3099,7 +3217,7 @@ static const struct qmp_phy_cfg sm8650_qmp_gen4x2_pciephy_cfg = {
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = sm8550_qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(sm8550_qmp_phy_vreg_l),
- .regs = pciephy_v5_regs_layout,
+ .regs = pciephy_v6_regs_layout,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
.phy_status = PHYSTATUS_4_20,
@@ -3183,31 +3301,35 @@ static const struct qmp_phy_cfg sa8775p_qmp_gen4x4_pciephy_cfg = {
.phy_status = PHYSTATUS_4_20,
};
-static void qmp_pcie_configure_lane(void __iomem *base,
- const struct qmp_phy_init_tbl tbl[],
- int num,
- u8 lane_mask)
-{
- int i;
- const struct qmp_phy_init_tbl *t = tbl;
-
- if (!t)
- return;
+static const struct qmp_phy_cfg x1e80100_qmp_gen4x2_pciephy_cfg = {
+ .lanes = 2,
- for (i = 0; i < num; i++, t++) {
- if (!(t->lane_mask & lane_mask))
- continue;
+ .offsets = &qmp_pcie_offsets_v6_20,
- writel(t->val, base + t->offset);
- }
-}
+ .tbls = {
+ .serdes = x1e80100_qmp_gen4x2_pcie_serdes_tbl,
+ .serdes_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_serdes_tbl),
+ .tx = x1e80100_qmp_gen4x2_pcie_tx_tbl,
+ .tx_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_tx_tbl),
+ .rx = x1e80100_qmp_gen4x2_pcie_rx_tbl,
+ .rx_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_rx_tbl),
+ .pcs = x1e80100_qmp_gen4x2_pcie_pcs_tbl,
+ .pcs_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_pcs_tbl),
+ .pcs_misc = x1e80100_qmp_gen4x2_pcie_pcs_misc_tbl,
+ .pcs_misc_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_pcs_misc_tbl),
+ .ln_shrd = x1e80100_qmp_gen4x2_pcie_ln_shrd_tbl,
+ .ln_shrd_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_ln_shrd_tbl),
+ },
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = sm8550_qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm8550_qmp_phy_vreg_l),
+ .regs = pciephy_v6_regs_layout,
-static void qmp_pcie_configure(void __iomem *base,
- const struct qmp_phy_init_tbl tbl[],
- int num)
-{
- qmp_pcie_configure_lane(base, tbl, num, 0xff);
-}
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS_4_20,
+ .has_nocsr_reset = true,
+};
static void qmp_pcie_init_port_b(struct qmp_pcie *qmp, const struct qmp_phy_cfg_tbls *tbls)
{
@@ -3220,11 +3342,11 @@ static void qmp_pcie_init_port_b(struct qmp_pcie *qmp, const struct qmp_phy_cfg_
tx4 = qmp->port_b + offs->tx2;
rx4 = qmp->port_b + offs->rx2;
- qmp_pcie_configure_lane(tx3, tbls->tx, tbls->tx_num, 1);
- qmp_pcie_configure_lane(rx3, tbls->rx, tbls->rx_num, 1);
+ qmp_configure_lane(tx3, tbls->tx, tbls->tx_num, 1);
+ qmp_configure_lane(rx3, tbls->rx, tbls->rx_num, 1);
- qmp_pcie_configure_lane(tx4, tbls->tx, tbls->tx_num, 2);
- qmp_pcie_configure_lane(rx4, tbls->rx, tbls->rx_num, 2);
+ qmp_configure_lane(tx4, tbls->tx, tbls->tx_num, 2);
+ qmp_configure_lane(rx4, tbls->rx, tbls->rx_num, 2);
}
static void qmp_pcie_init_registers(struct qmp_pcie *qmp, const struct qmp_phy_cfg_tbls *tbls)
@@ -3242,25 +3364,25 @@ static void qmp_pcie_init_registers(struct qmp_pcie *qmp, const struct qmp_phy_c
if (!tbls)
return;
- qmp_pcie_configure(serdes, tbls->serdes, tbls->serdes_num);
+ qmp_configure(serdes, tbls->serdes, tbls->serdes_num);
- qmp_pcie_configure_lane(tx, tbls->tx, tbls->tx_num, 1);
- qmp_pcie_configure_lane(rx, tbls->rx, tbls->rx_num, 1);
+ qmp_configure_lane(tx, tbls->tx, tbls->tx_num, 1);
+ qmp_configure_lane(rx, tbls->rx, tbls->rx_num, 1);
if (cfg->lanes >= 2) {
- qmp_pcie_configure_lane(tx2, tbls->tx, tbls->tx_num, 2);
- qmp_pcie_configure_lane(rx2, tbls->rx, tbls->rx_num, 2);
+ qmp_configure_lane(tx2, tbls->tx, tbls->tx_num, 2);
+ qmp_configure_lane(rx2, tbls->rx, tbls->rx_num, 2);
}
- qmp_pcie_configure(pcs, tbls->pcs, tbls->pcs_num);
- qmp_pcie_configure(pcs_misc, tbls->pcs_misc, tbls->pcs_misc_num);
+ qmp_configure(pcs, tbls->pcs, tbls->pcs_num);
+ qmp_configure(pcs_misc, tbls->pcs_misc, tbls->pcs_misc_num);
if (cfg->lanes >= 4 && qmp->tcsr_4ln_config) {
- qmp_pcie_configure(serdes, cfg->serdes_4ln_tbl, cfg->serdes_4ln_num);
+ qmp_configure(serdes, cfg->serdes_4ln_tbl, cfg->serdes_4ln_num);
qmp_pcie_init_port_b(qmp, tbls);
}
- qmp_pcie_configure(ln_shrd, tbls->ln_shrd, tbls->ln_shrd_num);
+ qmp_configure(ln_shrd, tbls->ln_shrd, tbls->ln_shrd_num);
}
static int qmp_pcie_init(struct phy *phy)
@@ -3885,6 +4007,12 @@ static const struct of_device_id qmp_pcie_of_match_table[] = {
}, {
.compatible = "qcom,sm8650-qmp-gen4x2-pcie-phy",
.data = &sm8650_qmp_gen4x2_pciephy_cfg,
+ }, {
+ .compatible = "qcom,x1e80100-qmp-gen3x2-pcie-phy",
+ .data = &sm8550_qmp_gen3x2_pciephy_cfg,
+ }, {
+ .compatible = "qcom,x1e80100-qmp-gen4x2-pcie-phy",
+ .data = &x1e80100_qmp_gen4x2_pciephy_cfg,
},
{ },
};
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6.h
index 91e70002eb477..0ca79333d9426 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6.h
@@ -7,6 +7,8 @@
#define QCOM_PHY_QMP_PCS_PCIE_V6_H_
/* Only for QMP V6 PHY - PCIE have different offsets than V5 */
+#define QPHY_PCIE_V6_PCS_PCIE_EQ_CONFIG1 0xa4
+#define QPHY_PCIE_V6_PCS_PCIE_RXEQEVAL_TIME 0xf4
#define QPHY_PCIE_V6_PCS_PCIE_POWER_STATE_CONFIG2 0x0c
#define QPHY_PCIE_V6_PCS_PCIE_POWER_STATE_CONFIG4 0x14
#define QPHY_PCIE_V6_PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x20
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6_20.h
index e3eb08776339d..dfcecf31a6060 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6_20.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6_20.h
@@ -12,6 +12,8 @@
#define QPHY_PCIE_V6_20_PCS_ENDPOINT_REFCLK_DRIVE 0x01c
#define QPHY_PCIE_V6_20_PCS_OSC_DTCT_ATCIONS 0x090
#define QPHY_PCIE_V6_20_PCS_EQ_CONFIG1 0x0a0
+#define QPHY_PCIE_V6_20_PCS_G3_RXEQEVAL_TIME 0x0f0
+#define QPHY_PCIE_V6_20_PCS_G4_RXEQEVAL_TIME 0x0f4
#define QPHY_PCIE_V6_20_PCS_EQ_CONFIG5 0x108
#define QPHY_PCIE_V6_20_PCS_G4_PRE_GAIN 0x15c
#define QPHY_PCIE_V6_20_PCS_RX_MARGINING_CONFIG1 0x17c
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-sgmii.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-sgmii.h
new file mode 100644
index 0000000000000..4d8c962f5e0f5
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-sgmii.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_SGMII_H_
+#define QCOM_PHY_QMP_PCS_SGMII_H_
+
+#define QPHY_PCS_PHY_START 0x000
+#define QPHY_PCS_POWER_DOWN_CONTROL 0x004
+#define QPHY_PCS_SW_RESET 0x008
+#define QPHY_PCS_LINE_RESET_TIME 0x00c
+#define QPHY_PCS_TX_LARGE_AMP_DRV_LVL 0x020
+#define QPHY_PCS_TX_SMALL_AMP_DRV_LVL 0x028
+#define QPHY_PCS_PCS_READY_STATUS 0x094
+#define QPHY_PCS_TX_MID_TERM_CTRL1 0x0d8
+#define QPHY_PCS_TX_MID_TERM_CTRL2 0x0dc
+#define QPHY_PCS_SGMII_MISC_CTRL8 0x118
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v6.h
index fe6c450f61238..970cc06678094 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v6.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v6.h
@@ -19,6 +19,7 @@
#define QPHY_V6_PCS_UFS_BIST_FIXED_PAT_CTRL 0x060
#define QPHY_V6_PCS_UFS_TX_HSGEAR_CAPABILITY 0x074
#define QPHY_V6_PCS_UFS_RX_HSGEAR_CAPABILITY 0x0bc
+#define QPHY_V6_PCS_UFS_RX_HS_G5_SYNC_LENGTH_CAPABILITY 0x12c
#define QPHY_V6_PCS_UFS_DEBUG_BUS_CLKSEL 0x158
#define QPHY_V6_PCS_UFS_LINECFG_DISABLE 0x17c
#define QPHY_V6_PCS_UFS_RX_MIN_HIBERN8_TIME 0x184
@@ -28,5 +29,6 @@
#define QPHY_V6_PCS_UFS_READY_STATUS 0x1a8
#define QPHY_V6_PCS_UFS_TX_MID_TERM_CTRL1 0x1f4
#define QPHY_V6_PCS_UFS_MULTI_LANE_CTRL1 0x1fc
+#define QPHY_V6_PCS_UFS_RX_HSG5_SYNC_WAIT_TIME 0x220
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6_20.h
index 9c3f1e4950e6b..4d9615cc0383c 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6_20.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6_20.h
@@ -7,6 +7,7 @@
#define QCOM_PHY_QMP_PCS_V6_20_H_
/* Only for QMP V6_20 PHY - USB/PCIe PCS registers */
+#define QPHY_V6_20_PCS_G12S1_TXDEEMPH_M6DB 0x170
#define QPHY_V6_20_PCS_G3S2_PRE_GAIN 0x178
#define QPHY_V6_20_PCS_RX_SIGDET_LVL 0x190
#define QPHY_V6_20_PCS_COM_ELECIDLE_DLY_SEL 0x1b8
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v6.h
index ec7291424dd1f..328c6c0b0b09a 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v6.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v6.h
@@ -60,6 +60,8 @@
#define QSERDES_V6_COM_SYSCLK_BUF_ENABLE 0xe8
#define QSERDES_V6_COM_PLL_IVCO 0xf4
#define QSERDES_V6_COM_PLL_IVCO_MODE1 0xf8
+#define QSERDES_V6_COM_CMN_IETRIM 0xfc
+#define QSERDES_V6_COM_CMN_IPTRIM 0x100
#define QSERDES_V6_COM_SYSCLK_EN_SEL 0x110
#define QSERDES_V6_COM_RESETSM_CNTRL 0x118
#define QSERDES_V6_COM_LOCK_CMP_EN 0x120
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-ufs-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-ufs-v6.h
index 35d497fd9f9a4..d9a87bd955908 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-ufs-v6.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-ufs-v6.h
@@ -15,13 +15,19 @@
#define QSERDES_UFS_V6_RX_UCDR_FASTLOCK_FO_GAIN_RATE2 0x08
#define QSERDES_UFS_V6_RX_UCDR_FASTLOCK_FO_GAIN_RATE4 0x10
+#define QSERDES_UFS_V6_RX_UCDR_FASTLOCK_SO_GAIN_RATE4 0x24
#define QSERDES_UFS_V6_RX_UCDR_SO_SATURATION 0x28
+#define QSERDES_UFS_V6_RX_UCDR_FASTLOCK_COUNT_HIGH_RATE4 0x54
#define QSERDES_UFS_V6_RX_UCDR_PI_CTRL1 0x58
#define QSERDES_UFS_V6_RX_RX_TERM_BW_CTRL0 0xc4
#define QSERDES_UFS_V6_RX_UCDR_FO_GAIN_RATE2 0xd4
#define QSERDES_UFS_V6_RX_UCDR_FO_GAIN_RATE4 0xdc
+#define QSERDES_UFS_V6_RX_UCDR_SO_GAIN_RATE4 0xf0
+#define QSERDES_UFS_V6_RX_UCDR_PI_CONTROLS 0xf4
#define QSERDES_UFS_V6_RX_VGA_CAL_MAN_VAL 0x178
+#define QSERDES_UFS_V6_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x1bc
#define QSERDES_UFS_V6_RX_INTERFACE_MODE 0x1e0
+#define QSERDES_UFS_V6_RX_OFFSET_ADAPTOR_CNTRL3 0x1c4
#define QSERDES_UFS_V6_RX_MODE_RATE_0_1_B0 0x208
#define QSERDES_UFS_V6_RX_MODE_RATE_0_1_B1 0x20c
#define QSERDES_UFS_V6_RX_MODE_RATE_0_1_B3 0x214
@@ -33,6 +39,8 @@
#define QSERDES_UFS_V6_RX_MODE_RATE3_B5 0x264
#define QSERDES_UFS_V6_RX_MODE_RATE3_B8 0x270
#define QSERDES_UFS_V6_RX_MODE_RATE4_B3 0x280
+#define QSERDES_UFS_V6_RX_MODE_RATE4_B4 0x284
#define QSERDES_UFS_V6_RX_MODE_RATE4_B6 0x28c
+#define QSERDES_UFS_V6_RX_DLL0_FTUNE_CTRL 0x2f8
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_20.h
index 6ed5339fd2ea8..7bac5d5c6c344 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_20.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_20.h
@@ -23,6 +23,8 @@
#define QSERDES_V6_20_RX_DFE_1 0xac
#define QSERDES_V6_20_RX_DFE_2 0xb0
#define QSERDES_V6_20_RX_DFE_3 0xb4
+#define QSERDES_V6_20_RX_TX_ADPT_CTRL 0xd4
+#define QSERDES_V6_20_VGA_CAL_CNTRL1 0xe0
#define QSERDES_V6_20_RX_VGA_CAL_MAN_VAL 0xe8
#define QSERDES_V6_20_RX_GM_CAL 0x10c
#define QSERDES_V6_20_RX_EQU_ADAPTOR_CNTRL4 0x120
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
index 3c2e6255e26f6..590432d581f97 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
@@ -20,6 +20,9 @@
#include <linux/slab.h>
#include <ufs/unipro.h>
+
+#include "phy-qcom-qmp-common.h"
+
#include "phy-qcom-qmp.h"
#include "phy-qcom-qmp-pcs-ufs-v2.h"
#include "phy-qcom-qmp-pcs-ufs-v3.h"
@@ -29,41 +32,12 @@
#include "phy-qcom-qmp-qserdes-txrx-ufs-v6.h"
-/* QPHY_SW_RESET bit */
-#define SW_RESET BIT(0)
-/* QPHY_POWER_DOWN_CONTROL */
-#define SW_PWRDN BIT(0)
-/* QPHY_START_CONTROL bits */
-#define SERDES_START BIT(0)
-#define PCS_START BIT(1)
/* QPHY_PCS_READY_STATUS bit */
#define PCS_READY BIT(0)
#define PHY_INIT_COMPLETE_TIMEOUT 10000
-struct qmp_phy_init_tbl {
- unsigned int offset;
- unsigned int val;
- /*
- * mask of lanes for which this register is written
- * for cases when second lane needs different values
- */
- u8 lane_mask;
-};
-
-#define QMP_PHY_INIT_CFG(o, v) \
- { \
- .offset = o, \
- .val = v, \
- .lane_mask = 0xff, \
- }
-
-#define QMP_PHY_INIT_CFG_LANE(o, v, l) \
- { \
- .offset = o, \
- .val = v, \
- .lane_mask = l, \
- }
+#define NUM_OVERLAY 2
/* set of registers with offsets different per-PHY */
enum qphy_reg_layout {
@@ -754,15 +728,22 @@ static const struct qmp_phy_init_tbl sm8550_ufsphy_serdes[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x11),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_HS_SWITCH_SEL_1, 0x00),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x0f),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_INITVAL2, 0x00),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x41),
- QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x0a),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x18),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x14),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x7f),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x06),
+};
+
+static const struct qmp_phy_init_tbl sm8550_ufsphy_hs_b_serdes[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x44),
+};
+
+static const struct qmp_phy_init_tbl sm8550_ufsphy_g4_serdes[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x0a),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE1, 0x4c),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE1, 0x0a),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE1, 0x18),
@@ -771,19 +752,24 @@ static const struct qmp_phy_init_tbl sm8550_ufsphy_serdes[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE1, 0x07),
};
-static const struct qmp_phy_init_tbl sm8550_ufsphy_hs_b_serdes[] = {
- QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x44),
+static const struct qmp_phy_init_tbl sm8550_ufsphy_g5_serdes[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_IETRIM, 0x1b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_IPTRIM, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x06),
};
static const struct qmp_phy_init_tbl sm8550_ufsphy_tx[] = {
QMP_PHY_INIT_CFG(QSERDES_UFS_V6_TX_LANE_MODE_1, 0x05),
QMP_PHY_INIT_CFG(QSERDES_UFS_V6_TX_RES_CODE_LANE_OFFSET_TX, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sm8550_ufsphy_g4_tx[] = {
QMP_PHY_INIT_CFG(QSERDES_UFS_V6_TX_FR_DCC_CTRL, 0x4c),
};
static const struct qmp_phy_init_tbl sm8550_ufsphy_rx[] = {
QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_FO_GAIN_RATE2, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_VGA_CAL_MAN_VAL, 0x0e),
QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE_0_1_B0, 0xc2),
QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE_0_1_B1, 0xc2),
@@ -799,16 +785,45 @@ static const struct qmp_phy_init_tbl sm8550_ufsphy_rx[] = {
QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE3_B8, 0x02),
};
+static const struct qmp_phy_init_tbl sm8550_ufsphy_g4_rx[] = {
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_VGA_CAL_MAN_VAL, 0x0e),
+};
+
+static const struct qmp_phy_init_tbl sm8550_ufsphy_g5_rx[] = {
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_FO_GAIN_RATE4, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_SO_GAIN_RATE4, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_PI_CONTROLS, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_OFFSET_ADAPTOR_CNTRL3, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_FASTLOCK_COUNT_HIGH_RATE4, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_FASTLOCK_FO_GAIN_RATE4, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_FASTLOCK_SO_GAIN_RATE4, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_VGA_CAL_MAN_VAL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE4_B3, 0xb9),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE4_B4, 0x4f),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE4_B6, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_DLL0_FTUNE_CTRL, 0x30),
+};
+
static const struct qmp_phy_init_tbl sm8550_ufsphy_pcs[] = {
QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_RX_SIGDET_CTRL2, 0x69),
QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_LARGE_AMP_DRV_LVL, 0x0f),
QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_MID_TERM_CTRL1, 0x43),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_PLL_CNTL, 0x2b),
QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_MULTI_LANE_CTRL1, 0x02),
+};
+
+static const struct qmp_phy_init_tbl sm8550_ufsphy_g4_pcs[] = {
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_PLL_CNTL, 0x2b),
QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_HSGEAR_CAPABILITY, 0x04),
QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_RX_HSGEAR_CAPABILITY, 0x04),
};
+static const struct qmp_phy_init_tbl sm8550_ufsphy_g5_pcs[] = {
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_PLL_CNTL, 0x33),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_RX_HS_G5_SYNC_LENGTH_CAPABILITY, 0x4f),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_RX_HSG5_SYNC_WAIT_TIME, 0x9e),
+};
+
static const struct qmp_phy_init_tbl sm8650_ufsphy_serdes[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0xd9),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x16),
@@ -889,6 +904,8 @@ struct qmp_phy_cfg_tbls {
int rx_num;
const struct qmp_phy_init_tbl *pcs;
int pcs_num;
+ /* Maximum supported Gear of this tbls */
+ u32 max_gear;
};
/* struct qmp_phy_cfg - per-PHY initialization config */
@@ -896,17 +913,16 @@ struct qmp_phy_cfg {
int lanes;
const struct qmp_ufs_offsets *offsets;
+ /* Maximum supported Gear of this config */
+ u32 max_supported_gear;
/* Main init sequence for PHY blocks - serdes, tx, rx, pcs */
const struct qmp_phy_cfg_tbls tbls;
/* Additional sequence for HS Series B */
const struct qmp_phy_cfg_tbls tbls_hs_b;
- /* Additional sequence for HS G4 */
- const struct qmp_phy_cfg_tbls tbls_hs_g4;
+ /* Additional sequence for different HS Gears */
+ const struct qmp_phy_cfg_tbls tbls_hs_overlay[NUM_OVERLAY];
- /* clock ids to be requested */
- const char * const *clk_list;
- int num_clks;
/* regulators to be requested */
const char * const *vreg_list;
int num_vregs;
@@ -932,6 +948,7 @@ struct qmp_ufs {
void __iomem *rx2;
struct clk_bulk_data *clks;
+ int num_clks;
struct regulator_bulk_data *vregs;
struct reset_control *ufs_reset;
@@ -964,20 +981,6 @@ static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val)
readl(base + offset);
}
-/* list of clocks required by phy */
-static const char * const msm8996_ufs_phy_clk_l[] = {
- "ref",
-};
-
-/* the primary usb3 phy on sm8250 doesn't have a ref clock */
-static const char * const sm8450_ufs_phy_clk_l[] = {
- "qref", "ref", "ref_aux",
-};
-
-static const char * const sdm845_ufs_phy_clk_l[] = {
- "ref", "ref_aux",
-};
-
/* list of regulators */
static const char * const qmp_phy_vreg_l[] = {
"vdda-phy", "vdda-pll",
@@ -1005,6 +1008,7 @@ static const struct qmp_phy_cfg msm8996_ufsphy_cfg = {
.lanes = 1,
.offsets = &qmp_ufs_offsets,
+ .max_supported_gear = UFS_HS_G3,
.tbls = {
.serdes = msm8996_ufsphy_serdes,
@@ -1015,9 +1019,6 @@ static const struct qmp_phy_cfg msm8996_ufsphy_cfg = {
.rx_num = ARRAY_SIZE(msm8996_ufsphy_rx),
},
- .clk_list = msm8996_ufs_phy_clk_l,
- .num_clks = ARRAY_SIZE(msm8996_ufs_phy_clk_l),
-
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
@@ -1030,6 +1031,7 @@ static const struct qmp_phy_cfg sa8775p_ufsphy_cfg = {
.lanes = 2,
.offsets = &qmp_ufs_offsets,
+ .max_supported_gear = UFS_HS_G4,
.tbls = {
.serdes = sm8350_ufsphy_serdes,
@@ -1045,16 +1047,15 @@ static const struct qmp_phy_cfg sa8775p_ufsphy_cfg = {
.serdes = sm8350_ufsphy_hs_b_serdes,
.serdes_num = ARRAY_SIZE(sm8350_ufsphy_hs_b_serdes),
},
- .tbls_hs_g4 = {
+ .tbls_hs_overlay[0] = {
.tx = sm8350_ufsphy_g4_tx,
.tx_num = ARRAY_SIZE(sm8350_ufsphy_g4_tx),
.rx = sm8350_ufsphy_g4_rx,
.rx_num = ARRAY_SIZE(sm8350_ufsphy_g4_rx),
.pcs = sm8350_ufsphy_g4_pcs,
.pcs_num = ARRAY_SIZE(sm8350_ufsphy_g4_pcs),
+ .max_gear = UFS_HS_G4,
},
- .clk_list = sm8450_ufs_phy_clk_l,
- .num_clks = ARRAY_SIZE(sm8450_ufs_phy_clk_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = ufsphy_v5_regs_layout,
@@ -1064,6 +1065,7 @@ static const struct qmp_phy_cfg sc7280_ufsphy_cfg = {
.lanes = 2,
.offsets = &qmp_ufs_offsets,
+ .max_supported_gear = UFS_HS_G4,
.tbls = {
.serdes = sm8150_ufsphy_serdes,
@@ -1079,16 +1081,15 @@ static const struct qmp_phy_cfg sc7280_ufsphy_cfg = {
.serdes = sm8150_ufsphy_hs_b_serdes,
.serdes_num = ARRAY_SIZE(sm8150_ufsphy_hs_b_serdes),
},
- .tbls_hs_g4 = {
+ .tbls_hs_overlay[0] = {
.tx = sm8250_ufsphy_hs_g4_tx,
.tx_num = ARRAY_SIZE(sm8250_ufsphy_hs_g4_tx),
.rx = sc7280_ufsphy_hs_g4_rx,
.rx_num = ARRAY_SIZE(sc7280_ufsphy_hs_g4_rx),
.pcs = sm8150_ufsphy_hs_g4_pcs,
.pcs_num = ARRAY_SIZE(sm8150_ufsphy_hs_g4_pcs),
+ .max_gear = UFS_HS_G4,
},
- .clk_list = sm8450_ufs_phy_clk_l,
- .num_clks = ARRAY_SIZE(sm8450_ufs_phy_clk_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = ufsphy_v4_regs_layout,
@@ -1098,6 +1099,7 @@ static const struct qmp_phy_cfg sc8280xp_ufsphy_cfg = {
.lanes = 2,
.offsets = &qmp_ufs_offsets,
+ .max_supported_gear = UFS_HS_G4,
.tbls = {
.serdes = sm8350_ufsphy_serdes,
@@ -1113,16 +1115,15 @@ static const struct qmp_phy_cfg sc8280xp_ufsphy_cfg = {
.serdes = sm8350_ufsphy_hs_b_serdes,
.serdes_num = ARRAY_SIZE(sm8350_ufsphy_hs_b_serdes),
},
- .tbls_hs_g4 = {
+ .tbls_hs_overlay[0] = {
.tx = sm8350_ufsphy_g4_tx,
.tx_num = ARRAY_SIZE(sm8350_ufsphy_g4_tx),
.rx = sm8350_ufsphy_g4_rx,
.rx_num = ARRAY_SIZE(sm8350_ufsphy_g4_rx),
.pcs = sm8350_ufsphy_g4_pcs,
.pcs_num = ARRAY_SIZE(sm8350_ufsphy_g4_pcs),
+ .max_gear = UFS_HS_G4,
},
- .clk_list = sdm845_ufs_phy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = ufsphy_v5_regs_layout,
@@ -1132,6 +1133,7 @@ static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
.lanes = 2,
.offsets = &qmp_ufs_offsets,
+ .max_supported_gear = UFS_HS_G3,
.tbls = {
.serdes = sdm845_ufsphy_serdes,
@@ -1147,8 +1149,6 @@ static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
.serdes = sdm845_ufsphy_hs_b_serdes,
.serdes_num = ARRAY_SIZE(sdm845_ufsphy_hs_b_serdes),
},
- .clk_list = sdm845_ufs_phy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = ufsphy_v3_regs_layout,
@@ -1160,6 +1160,7 @@ static const struct qmp_phy_cfg sm6115_ufsphy_cfg = {
.lanes = 1,
.offsets = &qmp_ufs_offsets,
+ .max_supported_gear = UFS_HS_G3,
.tbls = {
.serdes = sm6115_ufsphy_serdes,
@@ -1175,8 +1176,6 @@ static const struct qmp_phy_cfg sm6115_ufsphy_cfg = {
.serdes = sm6115_ufsphy_hs_b_serdes,
.serdes_num = ARRAY_SIZE(sm6115_ufsphy_hs_b_serdes),
},
- .clk_list = sdm845_ufs_phy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = ufsphy_v2_regs_layout,
@@ -1188,6 +1187,7 @@ static const struct qmp_phy_cfg sm7150_ufsphy_cfg = {
.lanes = 1,
.offsets = &qmp_ufs_offsets,
+ .max_supported_gear = UFS_HS_G3,
.tbls = {
.serdes = sdm845_ufsphy_serdes,
@@ -1203,8 +1203,6 @@ static const struct qmp_phy_cfg sm7150_ufsphy_cfg = {
.serdes = sdm845_ufsphy_hs_b_serdes,
.serdes_num = ARRAY_SIZE(sdm845_ufsphy_hs_b_serdes),
},
- .clk_list = sdm845_ufs_phy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = ufsphy_v3_regs_layout,
@@ -1216,6 +1214,7 @@ static const struct qmp_phy_cfg sm8150_ufsphy_cfg = {
.lanes = 2,
.offsets = &qmp_ufs_offsets,
+ .max_supported_gear = UFS_HS_G4,
.tbls = {
.serdes = sm8150_ufsphy_serdes,
@@ -1231,16 +1230,15 @@ static const struct qmp_phy_cfg sm8150_ufsphy_cfg = {
.serdes = sm8150_ufsphy_hs_b_serdes,
.serdes_num = ARRAY_SIZE(sm8150_ufsphy_hs_b_serdes),
},
- .tbls_hs_g4 = {
+ .tbls_hs_overlay[0] = {
.tx = sm8150_ufsphy_hs_g4_tx,
.tx_num = ARRAY_SIZE(sm8150_ufsphy_hs_g4_tx),
.rx = sm8150_ufsphy_hs_g4_rx,
.rx_num = ARRAY_SIZE(sm8150_ufsphy_hs_g4_rx),
.pcs = sm8150_ufsphy_hs_g4_pcs,
.pcs_num = ARRAY_SIZE(sm8150_ufsphy_hs_g4_pcs),
+ .max_gear = UFS_HS_G4,
},
- .clk_list = sdm845_ufs_phy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = ufsphy_v4_regs_layout,
@@ -1250,6 +1248,7 @@ static const struct qmp_phy_cfg sm8250_ufsphy_cfg = {
.lanes = 2,
.offsets = &qmp_ufs_offsets,
+ .max_supported_gear = UFS_HS_G4,
.tbls = {
.serdes = sm8150_ufsphy_serdes,
@@ -1265,16 +1264,15 @@ static const struct qmp_phy_cfg sm8250_ufsphy_cfg = {
.serdes = sm8150_ufsphy_hs_b_serdes,
.serdes_num = ARRAY_SIZE(sm8150_ufsphy_hs_b_serdes),
},
- .tbls_hs_g4 = {
+ .tbls_hs_overlay[0] = {
.tx = sm8250_ufsphy_hs_g4_tx,
.tx_num = ARRAY_SIZE(sm8250_ufsphy_hs_g4_tx),
.rx = sm8250_ufsphy_hs_g4_rx,
.rx_num = ARRAY_SIZE(sm8250_ufsphy_hs_g4_rx),
.pcs = sm8150_ufsphy_hs_g4_pcs,
.pcs_num = ARRAY_SIZE(sm8150_ufsphy_hs_g4_pcs),
+ .max_gear = UFS_HS_G4,
},
- .clk_list = sdm845_ufs_phy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = ufsphy_v4_regs_layout,
@@ -1284,6 +1282,7 @@ static const struct qmp_phy_cfg sm8350_ufsphy_cfg = {
.lanes = 2,
.offsets = &qmp_ufs_offsets,
+ .max_supported_gear = UFS_HS_G4,
.tbls = {
.serdes = sm8350_ufsphy_serdes,
@@ -1299,16 +1298,15 @@ static const struct qmp_phy_cfg sm8350_ufsphy_cfg = {
.serdes = sm8350_ufsphy_hs_b_serdes,
.serdes_num = ARRAY_SIZE(sm8350_ufsphy_hs_b_serdes),
},
- .tbls_hs_g4 = {
+ .tbls_hs_overlay[0] = {
.tx = sm8350_ufsphy_g4_tx,
.tx_num = ARRAY_SIZE(sm8350_ufsphy_g4_tx),
.rx = sm8350_ufsphy_g4_rx,
.rx_num = ARRAY_SIZE(sm8350_ufsphy_g4_rx),
.pcs = sm8350_ufsphy_g4_pcs,
.pcs_num = ARRAY_SIZE(sm8350_ufsphy_g4_pcs),
+ .max_gear = UFS_HS_G4,
},
- .clk_list = sdm845_ufs_phy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = ufsphy_v5_regs_layout,
@@ -1318,6 +1316,7 @@ static const struct qmp_phy_cfg sm8450_ufsphy_cfg = {
.lanes = 2,
.offsets = &qmp_ufs_offsets,
+ .max_supported_gear = UFS_HS_G4,
.tbls = {
.serdes = sm8350_ufsphy_serdes,
@@ -1333,16 +1332,15 @@ static const struct qmp_phy_cfg sm8450_ufsphy_cfg = {
.serdes = sm8350_ufsphy_hs_b_serdes,
.serdes_num = ARRAY_SIZE(sm8350_ufsphy_hs_b_serdes),
},
- .tbls_hs_g4 = {
+ .tbls_hs_overlay[0] = {
.tx = sm8350_ufsphy_g4_tx,
.tx_num = ARRAY_SIZE(sm8350_ufsphy_g4_tx),
.rx = sm8350_ufsphy_g4_rx,
.rx_num = ARRAY_SIZE(sm8350_ufsphy_g4_rx),
.pcs = sm8350_ufsphy_g4_pcs,
.pcs_num = ARRAY_SIZE(sm8350_ufsphy_g4_pcs),
+ .max_gear = UFS_HS_G4,
},
- .clk_list = sm8450_ufs_phy_clk_l,
- .num_clks = ARRAY_SIZE(sm8450_ufs_phy_clk_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = ufsphy_v5_regs_layout,
@@ -1352,6 +1350,7 @@ static const struct qmp_phy_cfg sm8550_ufsphy_cfg = {
.lanes = 2,
.offsets = &qmp_ufs_offsets_v6,
+ .max_supported_gear = UFS_HS_G5,
.tbls = {
.serdes = sm8550_ufsphy_serdes,
@@ -1367,8 +1366,26 @@ static const struct qmp_phy_cfg sm8550_ufsphy_cfg = {
.serdes = sm8550_ufsphy_hs_b_serdes,
.serdes_num = ARRAY_SIZE(sm8550_ufsphy_hs_b_serdes),
},
- .clk_list = sdm845_ufs_phy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
+ .tbls_hs_overlay[0] = {
+ .serdes = sm8550_ufsphy_g4_serdes,
+ .serdes_num = ARRAY_SIZE(sm8550_ufsphy_g4_serdes),
+ .tx = sm8550_ufsphy_g4_tx,
+ .tx_num = ARRAY_SIZE(sm8550_ufsphy_g4_tx),
+ .rx = sm8550_ufsphy_g4_rx,
+ .rx_num = ARRAY_SIZE(sm8550_ufsphy_g4_rx),
+ .pcs = sm8550_ufsphy_g4_pcs,
+ .pcs_num = ARRAY_SIZE(sm8550_ufsphy_g4_pcs),
+ .max_gear = UFS_HS_G4,
+ },
+ .tbls_hs_overlay[1] = {
+ .serdes = sm8550_ufsphy_g5_serdes,
+ .serdes_num = ARRAY_SIZE(sm8550_ufsphy_g5_serdes),
+ .rx = sm8550_ufsphy_g5_rx,
+ .rx_num = ARRAY_SIZE(sm8550_ufsphy_g5_rx),
+ .pcs = sm8550_ufsphy_g5_pcs,
+ .pcs_num = ARRAY_SIZE(sm8550_ufsphy_g5_pcs),
+ .max_gear = UFS_HS_G5,
+ },
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = ufsphy_v6_regs_layout,
@@ -1378,6 +1395,7 @@ static const struct qmp_phy_cfg sm8650_ufsphy_cfg = {
.lanes = 2,
.offsets = &qmp_ufs_offsets_v6,
+ .max_supported_gear = UFS_HS_G5,
.tbls = {
.serdes = sm8650_ufsphy_serdes,
@@ -1389,44 +1407,16 @@ static const struct qmp_phy_cfg sm8650_ufsphy_cfg = {
.pcs = sm8650_ufsphy_pcs,
.pcs_num = ARRAY_SIZE(sm8650_ufsphy_pcs),
},
- .clk_list = sdm845_ufs_phy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = ufsphy_v6_regs_layout,
};
-static void qmp_ufs_configure_lane(void __iomem *base,
- const struct qmp_phy_init_tbl tbl[],
- int num,
- u8 lane_mask)
-{
- int i;
- const struct qmp_phy_init_tbl *t = tbl;
-
- if (!t)
- return;
-
- for (i = 0; i < num; i++, t++) {
- if (!(t->lane_mask & lane_mask))
- continue;
-
- writel(t->val, base + t->offset);
- }
-}
-
-static void qmp_ufs_configure(void __iomem *base,
- const struct qmp_phy_init_tbl tbl[],
- int num)
-{
- qmp_ufs_configure_lane(base, tbl, num, 0xff);
-}
-
static void qmp_ufs_serdes_init(struct qmp_ufs *qmp, const struct qmp_phy_cfg_tbls *tbls)
{
void __iomem *serdes = qmp->serdes;
- qmp_ufs_configure(serdes, tbls->serdes, tbls->serdes_num);
+ qmp_configure(serdes, tbls->serdes, tbls->serdes_num);
}
static void qmp_ufs_lanes_init(struct qmp_ufs *qmp, const struct qmp_phy_cfg_tbls *tbls)
@@ -1435,12 +1425,12 @@ static void qmp_ufs_lanes_init(struct qmp_ufs *qmp, const struct qmp_phy_cfg_tbl
void __iomem *tx = qmp->tx;
void __iomem *rx = qmp->rx;
- qmp_ufs_configure_lane(tx, tbls->tx, tbls->tx_num, 1);
- qmp_ufs_configure_lane(rx, tbls->rx, tbls->rx_num, 1);
+ qmp_configure_lane(tx, tbls->tx, tbls->tx_num, 1);
+ qmp_configure_lane(rx, tbls->rx, tbls->rx_num, 1);
if (cfg->lanes >= 2) {
- qmp_ufs_configure_lane(qmp->tx2, tbls->tx, tbls->tx_num, 2);
- qmp_ufs_configure_lane(qmp->rx2, tbls->rx, tbls->rx_num, 2);
+ qmp_configure_lane(qmp->tx2, tbls->tx, tbls->tx_num, 2);
+ qmp_configure_lane(qmp->rx2, tbls->rx, tbls->rx_num, 2);
}
}
@@ -1448,20 +1438,52 @@ static void qmp_ufs_pcs_init(struct qmp_ufs *qmp, const struct qmp_phy_cfg_tbls
{
void __iomem *pcs = qmp->pcs;
- qmp_ufs_configure(pcs, tbls->pcs, tbls->pcs_num);
+ qmp_configure(pcs, tbls->pcs, tbls->pcs_num);
+}
+
+static int qmp_ufs_get_gear_overlay(struct qmp_ufs *qmp, const struct qmp_phy_cfg *cfg)
+{
+ u32 max_gear, floor_max_gear = cfg->max_supported_gear;
+ int idx, ret = -EINVAL;
+
+ for (idx = NUM_OVERLAY - 1; idx >= 0; idx--) {
+ max_gear = cfg->tbls_hs_overlay[idx].max_gear;
+
+ /* Skip if the table is not available */
+ if (max_gear == 0)
+ continue;
+
+ /* Direct matching, bail */
+ if (qmp->submode == max_gear)
+ return idx;
+
+ /* If no direct matching, the lowest gear is the best matching */
+ if (max_gear < floor_max_gear) {
+ ret = idx;
+ floor_max_gear = max_gear;
+ }
+ }
+
+ return ret;
}
static void qmp_ufs_init_registers(struct qmp_ufs *qmp, const struct qmp_phy_cfg *cfg)
{
+ int i;
+
qmp_ufs_serdes_init(qmp, &cfg->tbls);
- if (qmp->mode == PHY_MODE_UFS_HS_B)
- qmp_ufs_serdes_init(qmp, &cfg->tbls_hs_b);
qmp_ufs_lanes_init(qmp, &cfg->tbls);
- if (qmp->submode == UFS_HS_G4)
- qmp_ufs_lanes_init(qmp, &cfg->tbls_hs_g4);
qmp_ufs_pcs_init(qmp, &cfg->tbls);
- if (qmp->submode == UFS_HS_G4)
- qmp_ufs_pcs_init(qmp, &cfg->tbls_hs_g4);
+
+ i = qmp_ufs_get_gear_overlay(qmp, cfg);
+ if (i >= 0) {
+ qmp_ufs_serdes_init(qmp, &cfg->tbls_hs_overlay[i]);
+ qmp_ufs_lanes_init(qmp, &cfg->tbls_hs_overlay[i]);
+ qmp_ufs_pcs_init(qmp, &cfg->tbls_hs_overlay[i]);
+ }
+
+ if (qmp->mode == PHY_MODE_UFS_HS_B)
+ qmp_ufs_serdes_init(qmp, &cfg->tbls_hs_b);
}
static int qmp_ufs_com_init(struct qmp_ufs *qmp)
@@ -1476,7 +1498,7 @@ static int qmp_ufs_com_init(struct qmp_ufs *qmp)
return ret;
}
- ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
+ ret = clk_bulk_prepare_enable(qmp->num_clks, qmp->clks);
if (ret)
goto err_disable_regulators;
@@ -1496,7 +1518,7 @@ static int qmp_ufs_com_exit(struct qmp_ufs *qmp)
reset_control_assert(qmp->ufs_reset);
- clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+ clk_bulk_disable_unprepare(qmp->num_clks, qmp->clks);
regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
@@ -1633,6 +1655,12 @@ static int qmp_ufs_disable(struct phy *phy)
static int qmp_ufs_set_mode(struct phy *phy, enum phy_mode mode, int submode)
{
struct qmp_ufs *qmp = phy_get_drvdata(phy);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+
+ if (submode > cfg->max_supported_gear || submode == 0) {
+ dev_err(qmp->dev, "Invalid PHY submode %d\n", submode);
+ return -EINVAL;
+ }
qmp->mode = mode;
qmp->submode = submode;
@@ -1666,19 +1694,13 @@ static int qmp_ufs_vreg_init(struct qmp_ufs *qmp)
static int qmp_ufs_clk_init(struct qmp_ufs *qmp)
{
- const struct qmp_phy_cfg *cfg = qmp->cfg;
struct device *dev = qmp->dev;
- int num = cfg->num_clks;
- int i;
- qmp->clks = devm_kcalloc(dev, num, sizeof(*qmp->clks), GFP_KERNEL);
- if (!qmp->clks)
- return -ENOMEM;
-
- for (i = 0; i < num; i++)
- qmp->clks[i].id = cfg->clk_list[i];
+ qmp->num_clks = devm_clk_bulk_get_all(dev, &qmp->clks);
+ if (qmp->num_clks < 0)
+ return qmp->num_clks;
- return devm_clk_bulk_get(dev, num, qmp->clks);
+ return 0;
}
static void qmp_ufs_clk_release_provider(void *res)
@@ -1881,6 +1903,9 @@ static const struct of_device_id qmp_ufs_of_match_table[] = {
.compatible = "qcom,sa8775p-qmp-ufs-phy",
.data = &sa8775p_ufsphy_cfg,
}, {
+ .compatible = "qcom,sc7180-qmp-ufs-phy",
+ .data = &sm7150_ufsphy_cfg,
+ }, {
.compatible = "qcom,sc7280-qmp-ufs-phy",
.data = &sc7280_ufsphy_cfg,
}, {
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb-legacy.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb-legacy.c
index cf466f6df94d7..6d0ba39c19431 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb-legacy.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb-legacy.c
@@ -25,15 +25,7 @@
#include "phy-qcom-qmp-pcs-usb-v4.h"
#include "phy-qcom-qmp-pcs-usb-v5.h"
-/* QPHY_SW_RESET bit */
-#define SW_RESET BIT(0)
-/* QPHY_POWER_DOWN_CONTROL */
-#define SW_PWRDN BIT(0)
-/* QPHY_START_CONTROL bits */
-#define SERDES_START BIT(0)
-#define PCS_START BIT(1)
-/* QPHY_PCS_STATUS bit */
-#define PHYSTATUS BIT(6)
+#include "phy-qcom-qmp-dp-com-v3.h"
/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
/* DP PHY soft reset */
@@ -49,17 +41,6 @@
#define USB3_MODE BIT(0) /* enables USB3 mode */
#define DP_MODE BIT(1) /* enables DP mode */
-/* QPHY_PCS_AUTONOMOUS_MODE_CTRL register bits */
-#define ARCVR_DTCT_EN BIT(0)
-#define ALFPS_DTCT_EN BIT(1)
-#define ARCVR_DTCT_EVENT_SEL BIT(4)
-
-/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */
-#define IRQ_CLEAR BIT(0)
-
-/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */
-#define CLAMP_EN BIT(0) /* enables i/o clamp_n */
-
#define PHY_INIT_COMPLETE_TIMEOUT 10000
struct qmp_phy_init_tbl {
@@ -507,8 +488,6 @@ struct qmp_usb_legacy_offsets {
/* struct qmp_phy_cfg - per-PHY initialization config */
struct qmp_phy_cfg {
- int lanes;
-
const struct qmp_usb_legacy_offsets *offsets;
/* Init sequence for PHY blocks - serdes, tx, rx, pcs */
@@ -621,8 +600,6 @@ static const char * const qmp_phy_vreg_l[] = {
};
static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
- .lanes = 2,
-
.serdes_tbl = qmp_v3_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
.tx_tbl = qmp_v3_usb3_tx_tbl,
@@ -641,8 +618,6 @@ static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
};
static const struct qmp_phy_cfg sc7180_usb3phy_cfg = {
- .lanes = 2,
-
.serdes_tbl = qmp_v3_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
.tx_tbl = qmp_v3_usb3_tx_tbl,
@@ -661,8 +636,6 @@ static const struct qmp_phy_cfg sc7180_usb3phy_cfg = {
};
static const struct qmp_phy_cfg sm8150_usb3phy_cfg = {
- .lanes = 2,
-
.serdes_tbl = sm8150_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
.tx_tbl = sm8150_usb3_tx_tbl,
@@ -684,8 +657,6 @@ static const struct qmp_phy_cfg sm8150_usb3phy_cfg = {
};
static const struct qmp_phy_cfg sm8250_usb3phy_cfg = {
- .lanes = 2,
-
.serdes_tbl = sm8150_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
.tx_tbl = sm8250_usb3_tx_tbl,
@@ -707,8 +678,6 @@ static const struct qmp_phy_cfg sm8250_usb3phy_cfg = {
};
static const struct qmp_phy_cfg sm8350_usb3phy_cfg = {
- .lanes = 2,
-
.serdes_tbl = sm8150_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
.tx_tbl = sm8350_usb3_tx_tbl,
@@ -874,10 +843,8 @@ static int qmp_usb_legacy_power_on(struct phy *phy)
qmp_usb_legacy_configure_lane(tx, cfg->tx_tbl, cfg->tx_tbl_num, 1);
qmp_usb_legacy_configure_lane(rx, cfg->rx_tbl, cfg->rx_tbl_num, 1);
- if (cfg->lanes >= 2) {
- qmp_usb_legacy_configure_lane(qmp->tx2, cfg->tx_tbl, cfg->tx_tbl_num, 2);
- qmp_usb_legacy_configure_lane(qmp->rx2, cfg->rx_tbl, cfg->rx_tbl_num, 2);
- }
+ qmp_usb_legacy_configure_lane(qmp->tx2, cfg->tx_tbl, cfg->tx_tbl_num, 2);
+ qmp_usb_legacy_configure_lane(qmp->rx2, cfg->rx_tbl, cfg->rx_tbl_num, 2);
qmp_usb_legacy_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
@@ -1180,27 +1147,11 @@ static int phy_pipe_clk_register(struct qmp_usb *qmp, struct device_node *np)
return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np);
}
-static void __iomem *qmp_usb_legacy_iomap(struct device *dev, struct device_node *np,
- int index, bool exclusive)
-{
- struct resource res;
-
- if (!exclusive) {
- if (of_address_to_resource(np, index, &res))
- return IOMEM_ERR_PTR(-EINVAL);
-
- return devm_ioremap(dev, res.start, resource_size(&res));
- }
-
- return devm_of_iomap(dev, np, index, NULL);
-}
-
static int qmp_usb_legacy_parse_dt_legacy(struct qmp_usb *qmp, struct device_node *np)
{
struct platform_device *pdev = to_platform_device(qmp->dev);
const struct qmp_phy_cfg *cfg = qmp->cfg;
struct device *dev = qmp->dev;
- bool exclusive = true;
qmp->serdes = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(qmp->serdes))
@@ -1224,27 +1175,22 @@ static int qmp_usb_legacy_parse_dt_legacy(struct qmp_usb *qmp, struct device_nod
if (IS_ERR(qmp->rx))
return PTR_ERR(qmp->rx);
- qmp->pcs = qmp_usb_legacy_iomap(dev, np, 2, exclusive);
+ qmp->pcs = devm_of_iomap(dev, np, 2, NULL);
if (IS_ERR(qmp->pcs))
return PTR_ERR(qmp->pcs);
if (cfg->pcs_usb_offset)
qmp->pcs_usb = qmp->pcs + cfg->pcs_usb_offset;
- if (cfg->lanes >= 2) {
- qmp->tx2 = devm_of_iomap(dev, np, 3, NULL);
- if (IS_ERR(qmp->tx2))
- return PTR_ERR(qmp->tx2);
-
- qmp->rx2 = devm_of_iomap(dev, np, 4, NULL);
- if (IS_ERR(qmp->rx2))
- return PTR_ERR(qmp->rx2);
+ qmp->tx2 = devm_of_iomap(dev, np, 3, NULL);
+ if (IS_ERR(qmp->tx2))
+ return PTR_ERR(qmp->tx2);
- qmp->pcs_misc = devm_of_iomap(dev, np, 5, NULL);
- } else {
- qmp->pcs_misc = devm_of_iomap(dev, np, 3, NULL);
- }
+ qmp->rx2 = devm_of_iomap(dev, np, 4, NULL);
+ if (IS_ERR(qmp->rx2))
+ return PTR_ERR(qmp->rx2);
+ qmp->pcs_misc = devm_of_iomap(dev, np, 5, NULL);
if (IS_ERR(qmp->pcs_misc)) {
dev_vdbg(dev, "PHY pcs_misc-reg not used\n");
qmp->pcs_misc = NULL;
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
index 5c003988c35d3..85253936fac35 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
@@ -19,6 +19,8 @@
#include <linux/reset.h>
#include <linux/slab.h>
+#include "phy-qcom-qmp-common.h"
+
#include "phy-qcom-qmp.h"
#include "phy-qcom-qmp-pcs-misc-v3.h"
#include "phy-qcom-qmp-pcs-misc-v4.h"
@@ -27,67 +29,8 @@
#include "phy-qcom-qmp-pcs-usb-v6.h"
#include "phy-qcom-qmp-pcs-usb-v7.h"
-/* QPHY_SW_RESET bit */
-#define SW_RESET BIT(0)
-/* QPHY_POWER_DOWN_CONTROL */
-#define SW_PWRDN BIT(0)
-/* QPHY_START_CONTROL bits */
-#define SERDES_START BIT(0)
-#define PCS_START BIT(1)
-/* QPHY_PCS_STATUS bit */
-#define PHYSTATUS BIT(6)
-
-/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
-/* DP PHY soft reset */
-#define SW_DPPHY_RESET BIT(0)
-/* mux to select DP PHY reset control, 0:HW control, 1: software reset */
-#define SW_DPPHY_RESET_MUX BIT(1)
-/* USB3 PHY soft reset */
-#define SW_USB3PHY_RESET BIT(2)
-/* mux to select USB3 PHY reset control, 0:HW control, 1: software reset */
-#define SW_USB3PHY_RESET_MUX BIT(3)
-
-/* QPHY_V3_DP_COM_PHY_MODE_CTRL register bits */
-#define USB3_MODE BIT(0) /* enables USB3 mode */
-#define DP_MODE BIT(1) /* enables DP mode */
-
-/* QPHY_PCS_AUTONOMOUS_MODE_CTRL register bits */
-#define ARCVR_DTCT_EN BIT(0)
-#define ALFPS_DTCT_EN BIT(1)
-#define ARCVR_DTCT_EVENT_SEL BIT(4)
-
-/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */
-#define IRQ_CLEAR BIT(0)
-
-/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */
-#define CLAMP_EN BIT(0) /* enables i/o clamp_n */
-
#define PHY_INIT_COMPLETE_TIMEOUT 10000
-struct qmp_phy_init_tbl {
- unsigned int offset;
- unsigned int val;
- /*
- * mask of lanes for which this register is written
- * for cases when second lane needs different values
- */
- u8 lane_mask;
-};
-
-#define QMP_PHY_INIT_CFG(o, v) \
- { \
- .offset = o, \
- .val = v, \
- .lane_mask = 0xff, \
- }
-
-#define QMP_PHY_INIT_CFG_LANE(o, v, l) \
- { \
- .offset = o, \
- .val = v, \
- .lane_mask = l, \
- }
-
/* set of registers with offsets different per-PHY */
enum qphy_reg_layout {
/* PCS registers */
@@ -121,15 +64,6 @@ static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_PCS_MISC_CLAMP_ENABLE] = QPHY_V3_PCS_MISC_CLAMP_ENABLE,
};
-static const unsigned int qmp_v3_usb3phy_regs_layout_qcm2290[QPHY_LAYOUT_SIZE] = {
- [QPHY_SW_RESET] = QPHY_V3_PCS_SW_RESET,
- [QPHY_START_CTRL] = QPHY_V3_PCS_START_CONTROL,
- [QPHY_PCS_STATUS] = QPHY_V3_PCS_PCS_STATUS,
- [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V3_PCS_AUTONOMOUS_MODE_CTRL,
- [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V3_PCS_LFPS_RXTERM_IRQ_CLEAR,
- [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V3_PCS_POWER_DOWN_CONTROL,
-};
-
static const unsigned int qmp_v4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_SW_RESET] = QPHY_V4_PCS_SW_RESET,
[QPHY_START_CTRL] = QPHY_V4_PCS_START_CONTROL,
@@ -514,115 +448,6 @@ static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V3_PCS_REFGEN_REQ_CONFIG2, 0x60),
};
-static const struct qmp_phy_init_tbl msm8998_usb3_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL2, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x15),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_CFG, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_INITVAL, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_MODE, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x31),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x85),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x07),
-};
-
-static const struct qmp_phy_init_tbl msm8998_usb3_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x00),
-};
-
-static const struct qmp_phy_init_tbl msm8998_usb3_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x43),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x1c),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FO_GAIN, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_ENABLES, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_VGA_CAL_CNTRL2, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x05),
-};
-
-static const struct qmp_phy_init_tbl msm8998_usb3_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb7),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4e),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x65),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6b),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V1, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x8a),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
-};
-
static const struct qmp_phy_init_tbl sm8150_usb3_uniphy_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x1a),
QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
@@ -1089,99 +914,6 @@ static const struct qmp_phy_init_tbl sm8350_usb3_uniphy_pcs_usb_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
};
-static const struct qmp_phy_init_tbl qcm2290_usb3_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30),
- QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL2, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
- QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x15),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0xde),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_INITVAL, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CTRL_BY_PSM, 0x01),
-};
-
-static const struct qmp_phy_init_tbl qcm2290_usb3_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0xc6),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x00),
-};
-
-static const struct qmp_phy_init_tbl qcm2290_usb3_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FO_GAIN, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_VGA_CAL_CNTRL2, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_ENABLES, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x00),
-};
-
-static const struct qmp_phy_init_tbl qcm2290_usb3_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x17),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x85),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x88),
-};
-
static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x1a),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
@@ -1448,15 +1180,10 @@ struct qmp_usb_offsets {
u16 pcs_usb;
u16 tx;
u16 rx;
- /* for PHYs with >= 2 lanes */
- u16 tx2;
- u16 rx2;
};
/* struct qmp_phy_cfg - per-PHY initialization config */
struct qmp_phy_cfg {
- int lanes;
-
const struct qmp_usb_offsets *offsets;
/* Init sequence for PHY blocks - serdes, tx, rx, pcs */
@@ -1496,8 +1223,6 @@ struct qmp_usb {
void __iomem *pcs_usb;
void __iomem *tx;
void __iomem *rx;
- void __iomem *tx2;
- void __iomem *rx2;
struct clk *pipe_clk;
struct clk_bulk_data *clks;
@@ -1579,16 +1304,6 @@ static const struct qmp_usb_offsets qmp_usb_offsets_v3_msm8996 = {
.rx = 0x400,
};
-static const struct qmp_usb_offsets qmp_usb_offsets_v3_qcm2290 = {
- .serdes = 0x0,
- .pcs = 0xc00,
- .pcs_misc = 0xa00,
- .tx = 0x200,
- .rx = 0x400,
- .tx2 = 0x600,
- .rx2 = 0x800,
-};
-
static const struct qmp_usb_offsets qmp_usb_offsets_v4 = {
.serdes = 0,
.pcs = 0x0800,
@@ -1622,8 +1337,6 @@ static const struct qmp_usb_offsets qmp_usb_offsets_v7 = {
};
static const struct qmp_phy_cfg ipq6018_usb3phy_cfg = {
- .lanes = 1,
-
.offsets = &qmp_usb_offsets_v3,
.serdes_tbl = ipq9574_usb3_serdes_tbl,
@@ -1640,8 +1353,6 @@ static const struct qmp_phy_cfg ipq6018_usb3phy_cfg = {
};
static const struct qmp_phy_cfg ipq8074_usb3phy_cfg = {
- .lanes = 1,
-
.offsets = &qmp_usb_offsets_v3,
.serdes_tbl = ipq8074_usb3_serdes_tbl,
@@ -1658,8 +1369,6 @@ static const struct qmp_phy_cfg ipq8074_usb3phy_cfg = {
};
static const struct qmp_phy_cfg ipq9574_usb3phy_cfg = {
- .lanes = 1,
-
.offsets = &qmp_usb_offsets_ipq9574,
.serdes_tbl = ipq9574_usb3_serdes_tbl,
@@ -1676,8 +1385,6 @@ static const struct qmp_phy_cfg ipq9574_usb3phy_cfg = {
};
static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
- .lanes = 1,
-
.offsets = &qmp_usb_offsets_v3_msm8996,
.serdes_tbl = msm8996_usb3_serdes_tbl,
@@ -1694,8 +1401,6 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
};
static const struct qmp_phy_cfg sa8775p_usb3_uniphy_cfg = {
- .lanes = 1,
-
.offsets = &qmp_usb_offsets_v5,
.serdes_tbl = sc8280xp_usb3_uniphy_serdes_tbl,
@@ -1714,8 +1419,6 @@ static const struct qmp_phy_cfg sa8775p_usb3_uniphy_cfg = {
};
static const struct qmp_phy_cfg sc8280xp_usb3_uniphy_cfg = {
- .lanes = 1,
-
.offsets = &qmp_usb_offsets_v5,
.serdes_tbl = sc8280xp_usb3_uniphy_serdes_tbl,
@@ -1734,8 +1437,6 @@ static const struct qmp_phy_cfg sc8280xp_usb3_uniphy_cfg = {
};
static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
- .lanes = 1,
-
.offsets = &qmp_usb_offsets_v3,
.serdes_tbl = qmp_v3_usb3_uniphy_serdes_tbl,
@@ -1753,27 +1454,7 @@ static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
.has_pwrdn_delay = true,
};
-static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
- .lanes = 2,
-
- .offsets = &qmp_usb_offsets_v3_qcm2290,
-
- .serdes_tbl = msm8998_usb3_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(msm8998_usb3_serdes_tbl),
- .tx_tbl = msm8998_usb3_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(msm8998_usb3_tx_tbl),
- .rx_tbl = msm8998_usb3_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(msm8998_usb3_rx_tbl),
- .pcs_tbl = msm8998_usb3_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(msm8998_usb3_pcs_tbl),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v3_usb3phy_regs_layout,
-};
-
static const struct qmp_phy_cfg sm8150_usb3_uniphy_cfg = {
- .lanes = 1,
-
.offsets = &qmp_usb_offsets_v4,
.serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
@@ -1795,8 +1476,6 @@ static const struct qmp_phy_cfg sm8150_usb3_uniphy_cfg = {
};
static const struct qmp_phy_cfg sm8250_usb3_uniphy_cfg = {
- .lanes = 1,
-
.offsets = &qmp_usb_offsets_v4,
.serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
@@ -1818,8 +1497,6 @@ static const struct qmp_phy_cfg sm8250_usb3_uniphy_cfg = {
};
static const struct qmp_phy_cfg sdx55_usb3_uniphy_cfg = {
- .lanes = 1,
-
.offsets = &qmp_usb_offsets_v4,
.serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
@@ -1841,8 +1518,6 @@ static const struct qmp_phy_cfg sdx55_usb3_uniphy_cfg = {
};
static const struct qmp_phy_cfg sdx65_usb3_uniphy_cfg = {
- .lanes = 1,
-
.offsets = &qmp_usb_offsets_v5,
.serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
@@ -1864,7 +1539,6 @@ static const struct qmp_phy_cfg sdx65_usb3_uniphy_cfg = {
};
static const struct qmp_phy_cfg sdx75_usb3_uniphy_cfg = {
- .lanes = 1,
.offsets = &qmp_usb_offsets_v6,
.serdes_tbl = sdx75_usb3_uniphy_serdes_tbl,
@@ -1886,8 +1560,6 @@ static const struct qmp_phy_cfg sdx75_usb3_uniphy_cfg = {
};
static const struct qmp_phy_cfg sm8350_usb3_uniphy_cfg = {
- .lanes = 1,
-
.offsets = &qmp_usb_offsets_v5,
.serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
@@ -1908,27 +1580,7 @@ static const struct qmp_phy_cfg sm8350_usb3_uniphy_cfg = {
.has_pwrdn_delay = true,
};
-static const struct qmp_phy_cfg qcm2290_usb3phy_cfg = {
- .lanes = 2,
-
- .offsets = &qmp_usb_offsets_v3_qcm2290,
-
- .serdes_tbl = qcm2290_usb3_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(qcm2290_usb3_serdes_tbl),
- .tx_tbl = qcm2290_usb3_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(qcm2290_usb3_tx_tbl),
- .rx_tbl = qcm2290_usb3_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(qcm2290_usb3_rx_tbl),
- .pcs_tbl = qcm2290_usb3_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(qcm2290_usb3_pcs_tbl),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v3_usb3phy_regs_layout_qcm2290,
-};
-
static const struct qmp_phy_cfg x1e80100_usb3_uniphy_cfg = {
- .lanes = 1,
-
.offsets = &qmp_usb_offsets_v7,
.serdes_tbl = x1e80100_usb3_uniphy_serdes_tbl,
@@ -1946,32 +1598,6 @@ static const struct qmp_phy_cfg x1e80100_usb3_uniphy_cfg = {
.regs = qmp_v7_usb3phy_regs_layout,
};
-static void qmp_usb_configure_lane(void __iomem *base,
- const struct qmp_phy_init_tbl tbl[],
- int num,
- u8 lane_mask)
-{
- int i;
- const struct qmp_phy_init_tbl *t = tbl;
-
- if (!t)
- return;
-
- for (i = 0; i < num; i++, t++) {
- if (!(t->lane_mask & lane_mask))
- continue;
-
- writel(t->val, base + t->offset);
- }
-}
-
-static void qmp_usb_configure(void __iomem *base,
- const struct qmp_phy_init_tbl tbl[],
- int num)
-{
- qmp_usb_configure_lane(base, tbl, num, 0xff);
-}
-
static int qmp_usb_serdes_init(struct qmp_usb *qmp)
{
const struct qmp_phy_cfg *cfg = qmp->cfg;
@@ -1979,7 +1605,7 @@ static int qmp_usb_serdes_init(struct qmp_usb *qmp)
const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl;
int serdes_tbl_num = cfg->serdes_tbl_num;
- qmp_usb_configure(serdes, serdes_tbl, serdes_tbl_num);
+ qmp_configure(serdes, serdes_tbl, serdes_tbl_num);
return 0;
}
@@ -2060,18 +1686,13 @@ static int qmp_usb_power_on(struct phy *phy)
}
/* Tx, Rx, and PCS configurations */
- qmp_usb_configure_lane(tx, cfg->tx_tbl, cfg->tx_tbl_num, 1);
- qmp_usb_configure_lane(rx, cfg->rx_tbl, cfg->rx_tbl_num, 1);
-
- if (cfg->lanes >= 2) {
- qmp_usb_configure_lane(qmp->tx2, cfg->tx_tbl, cfg->tx_tbl_num, 2);
- qmp_usb_configure_lane(qmp->rx2, cfg->rx_tbl, cfg->rx_tbl_num, 2);
- }
+ qmp_configure_lane(tx, cfg->tx_tbl, cfg->tx_tbl_num, 1);
+ qmp_configure_lane(rx, cfg->rx_tbl, cfg->rx_tbl_num, 1);
- qmp_usb_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ qmp_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
if (pcs_usb)
- qmp_usb_configure(pcs_usb, cfg->pcs_usb_tbl, cfg->pcs_usb_tbl_num);
+ qmp_configure(pcs_usb, cfg->pcs_usb_tbl, cfg->pcs_usb_tbl_num);
if (cfg->has_pwrdn_delay)
usleep_range(10, 20);
@@ -2414,7 +2035,6 @@ static int qmp_usb_parse_dt_legacy(struct qmp_usb *qmp, struct device_node *np)
/*
* Get memory resources for the PHY:
* Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2.
- * For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5
* For single lane PHYs: pcs_misc (optional) -> 3.
*/
qmp->tx = devm_of_iomap(dev, np, 0, NULL);
@@ -2432,19 +2052,7 @@ static int qmp_usb_parse_dt_legacy(struct qmp_usb *qmp, struct device_node *np)
if (cfg->pcs_usb_offset)
qmp->pcs_usb = qmp->pcs + cfg->pcs_usb_offset;
- if (cfg->lanes >= 2) {
- qmp->tx2 = devm_of_iomap(dev, np, 3, NULL);
- if (IS_ERR(qmp->tx2))
- return PTR_ERR(qmp->tx2);
-
- qmp->rx2 = devm_of_iomap(dev, np, 4, NULL);
- if (IS_ERR(qmp->rx2))
- return PTR_ERR(qmp->rx2);
-
- qmp->pcs_misc = devm_of_iomap(dev, np, 5, NULL);
- } else {
- qmp->pcs_misc = devm_of_iomap(dev, np, 3, NULL);
- }
+ qmp->pcs_misc = devm_of_iomap(dev, np, 3, NULL);
if (IS_ERR(qmp->pcs_misc)) {
dev_vdbg(dev, "PHY pcs_misc-reg not used\n");
@@ -2496,11 +2104,6 @@ static int qmp_usb_parse_dt(struct qmp_usb *qmp)
qmp->tx = base + offs->tx;
qmp->rx = base + offs->rx;
- if (cfg->lanes >= 2) {
- qmp->tx2 = base + offs->tx2;
- qmp->rx2 = base + offs->rx2;
- }
-
ret = qmp_usb_clk_init(qmp);
if (ret)
return ret;
@@ -2600,12 +2203,6 @@ static const struct of_device_id qmp_usb_of_match_table[] = {
.compatible = "qcom,msm8996-qmp-usb3-phy",
.data = &msm8996_usb3phy_cfg,
}, {
- .compatible = "qcom,msm8998-qmp-usb3-phy",
- .data = &msm8998_usb3phy_cfg,
- }, {
- .compatible = "qcom,qcm2290-qmp-usb3-phy",
- .data = &qcm2290_usb3phy_cfg,
- }, {
.compatible = "qcom,sa8775p-qmp-usb3-uni-phy",
.data = &sa8775p_usb3_uniphy_cfg,
}, {
@@ -2624,9 +2221,6 @@ static const struct of_device_id qmp_usb_of_match_table[] = {
.compatible = "qcom,sdx75-qmp-usb3-uni-phy",
.data = &sdx75_usb3_uniphy_cfg,
}, {
- .compatible = "qcom,sm6115-qmp-usb3-phy",
- .data = &qcm2290_usb3phy_cfg,
- }, {
.compatible = "qcom,sm8150-qmp-usb3-uni-phy",
.data = &sm8150_usb3_uniphy_cfg,
}, {
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c b/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c
new file mode 100644
index 0000000000000..5cbc5fd529ebe
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c
@@ -0,0 +1,1149 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/usb/typec.h>
+#include <linux/usb/typec_mux.h>
+
+#include "phy-qcom-qmp-common.h"
+
+#include "phy-qcom-qmp.h"
+#include "phy-qcom-qmp-pcs-misc-v3.h"
+
+#define PHY_INIT_COMPLETE_TIMEOUT 10000
+
+/* set of registers with offsets different per-PHY */
+enum qphy_reg_layout {
+ /* PCS registers */
+ QPHY_SW_RESET,
+ QPHY_START_CTRL,
+ QPHY_PCS_STATUS,
+ QPHY_PCS_AUTONOMOUS_MODE_CTRL,
+ QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
+ QPHY_PCS_POWER_DOWN_CONTROL,
+ /* Keep last to ensure regs_layout arrays are properly initialized */
+ QPHY_LAYOUT_SIZE
+};
+
+static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V3_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V3_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V3_PCS_PCS_STATUS,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V3_PCS_AUTONOMOUS_MODE_CTRL,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V3_PCS_LFPS_RXTERM_IRQ_CLEAR,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V3_PCS_POWER_DOWN_CONTROL,
+};
+
+static const unsigned int qmp_v3_usb3phy_regs_layout_qcm2290[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V3_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V3_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V3_PCS_PCS_STATUS,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V3_PCS_AUTONOMOUS_MODE_CTRL,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V3_PCS_LFPS_RXTERM_IRQ_CLEAR,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V3_PCS_POWER_DOWN_CONTROL,
+};
+
+static const struct qmp_phy_init_tbl msm8998_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_INITVAL, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_MODE, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x85),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl msm8998_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x00),
+};
+
+static const struct qmp_phy_init_tbl msm8998_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x43),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_ENABLES, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_VGA_CAL_CNTRL2, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x05),
+};
+
+static const struct qmp_phy_init_tbl msm8998_usb3_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4e),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x65),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6b),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V1, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x8a),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
+};
+
+static const struct qmp_phy_init_tbl qcm2290_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_INITVAL, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CTRL_BY_PSM, 0x01),
+};
+
+static const struct qmp_phy_init_tbl qcm2290_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0xc6),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x00),
+};
+
+static const struct qmp_phy_init_tbl qcm2290_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_VGA_CAL_CNTRL2, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_ENABLES, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x00),
+};
+
+/* the only difference is QSERDES_V3_RX_UCDR_PI_CONTROLS */
+static const struct qmp_phy_init_tbl sdm660_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_VGA_CAL_CNTRL2, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_ENABLES, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x00),
+};
+
+static const struct qmp_phy_init_tbl qcm2290_usb3_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x17),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x85),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x88),
+};
+
+struct qmp_usbc_offsets {
+ u16 serdes;
+ u16 pcs;
+ u16 pcs_misc;
+ u16 tx;
+ u16 rx;
+ /* for PHYs with >= 2 lanes */
+ u16 tx2;
+ u16 rx2;
+};
+
+/* struct qmp_phy_cfg - per-PHY initialization config */
+struct qmp_phy_cfg {
+ const struct qmp_usbc_offsets *offsets;
+
+ /* Init sequence for PHY blocks - serdes, tx, rx, pcs */
+ const struct qmp_phy_init_tbl *serdes_tbl;
+ int serdes_tbl_num;
+ const struct qmp_phy_init_tbl *tx_tbl;
+ int tx_tbl_num;
+ const struct qmp_phy_init_tbl *rx_tbl;
+ int rx_tbl_num;
+ const struct qmp_phy_init_tbl *pcs_tbl;
+ int pcs_tbl_num;
+
+ /* regulators to be requested */
+ const char * const *vreg_list;
+ int num_vregs;
+
+ /* array of registers with different offsets */
+ const unsigned int *regs;
+};
+
+struct qmp_usbc {
+ struct device *dev;
+
+ const struct qmp_phy_cfg *cfg;
+
+ void __iomem *serdes;
+ void __iomem *pcs;
+ void __iomem *pcs_misc;
+ void __iomem *tx;
+ void __iomem *rx;
+ void __iomem *tx2;
+ void __iomem *rx2;
+
+ struct regmap *tcsr_map;
+ u32 vls_clamp_reg;
+
+ struct clk *pipe_clk;
+ struct clk_bulk_data *clks;
+ int num_clks;
+ int num_resets;
+ struct reset_control_bulk_data *resets;
+ struct regulator_bulk_data *vregs;
+
+ struct mutex phy_mutex;
+
+ enum phy_mode mode;
+ unsigned int usb_init_count;
+
+ struct phy *phy;
+
+ struct clk_fixed_rate pipe_clk_fixed;
+
+ struct typec_switch_dev *sw;
+ enum typec_orientation orientation;
+};
+
+static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg |= val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg &= ~val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+/* list of clocks required by phy */
+static const char * const qmp_usbc_phy_clk_l[] = {
+ "aux", "cfg_ahb", "ref", "com_aux",
+};
+
+/* list of resets */
+static const char * const usb3phy_legacy_reset_l[] = {
+ "phy", "common",
+};
+
+static const char * const usb3phy_reset_l[] = {
+ "phy_phy", "phy",
+};
+
+/* list of regulators */
+static const char * const qmp_phy_vreg_l[] = {
+ "vdda-phy", "vdda-pll",
+};
+
+static const struct qmp_usbc_offsets qmp_usbc_offsets_v3_qcm2290 = {
+ .serdes = 0x0,
+ .pcs = 0xc00,
+ .pcs_misc = 0xa00,
+ .tx = 0x200,
+ .rx = 0x400,
+ .tx2 = 0x600,
+ .rx2 = 0x800,
+};
+
+static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
+ .offsets = &qmp_usbc_offsets_v3_qcm2290,
+
+ .serdes_tbl = msm8998_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(msm8998_usb3_serdes_tbl),
+ .tx_tbl = msm8998_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(msm8998_usb3_tx_tbl),
+ .rx_tbl = msm8998_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(msm8998_usb3_rx_tbl),
+ .pcs_tbl = msm8998_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(msm8998_usb3_pcs_tbl),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+};
+
+static const struct qmp_phy_cfg qcm2290_usb3phy_cfg = {
+ .offsets = &qmp_usbc_offsets_v3_qcm2290,
+
+ .serdes_tbl = qcm2290_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qcm2290_usb3_serdes_tbl),
+ .tx_tbl = qcm2290_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qcm2290_usb3_tx_tbl),
+ .rx_tbl = qcm2290_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(qcm2290_usb3_rx_tbl),
+ .pcs_tbl = qcm2290_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(qcm2290_usb3_pcs_tbl),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout_qcm2290,
+};
+
+static const struct qmp_phy_cfg sdm660_usb3phy_cfg = {
+ .offsets = &qmp_usbc_offsets_v3_qcm2290,
+
+ .serdes_tbl = qcm2290_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qcm2290_usb3_serdes_tbl),
+ .tx_tbl = qcm2290_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qcm2290_usb3_tx_tbl),
+ .rx_tbl = sdm660_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sdm660_usb3_rx_tbl),
+ .pcs_tbl = qcm2290_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(qcm2290_usb3_pcs_tbl),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout_qcm2290,
+};
+
+static int qmp_usbc_init(struct phy *phy)
+{
+ struct qmp_usbc *qmp = phy_get_drvdata(phy);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *pcs = qmp->pcs;
+ u32 val = 0;
+ int ret;
+
+ ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
+ if (ret) {
+ dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_bulk_assert(qmp->num_resets, qmp->resets);
+ if (ret) {
+ dev_err(qmp->dev, "reset assert failed\n");
+ goto err_disable_regulators;
+ }
+
+ ret = reset_control_bulk_deassert(qmp->num_resets, qmp->resets);
+ if (ret) {
+ dev_err(qmp->dev, "reset deassert failed\n");
+ goto err_disable_regulators;
+ }
+
+ ret = clk_bulk_prepare_enable(qmp->num_clks, qmp->clks);
+ if (ret)
+ goto err_assert_reset;
+
+ qphy_setbits(pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], SW_PWRDN);
+
+#define SW_PORTSELECT_VAL BIT(0)
+#define SW_PORTSELECT_MUX BIT(1)
+ /* Use software based port select and switch on typec orientation */
+ val = SW_PORTSELECT_MUX;
+ if (qmp->orientation == TYPEC_ORIENTATION_REVERSE)
+ val |= SW_PORTSELECT_VAL;
+ writel(val, qmp->pcs_misc);
+
+ return 0;
+
+err_assert_reset:
+ reset_control_bulk_assert(qmp->num_resets, qmp->resets);
+err_disable_regulators:
+ regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
+
+ return ret;
+}
+
+static int qmp_usbc_exit(struct phy *phy)
+{
+ struct qmp_usbc *qmp = phy_get_drvdata(phy);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+
+ reset_control_bulk_assert(qmp->num_resets, qmp->resets);
+
+ clk_bulk_disable_unprepare(qmp->num_clks, qmp->clks);
+
+ regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
+
+ return 0;
+}
+
+static int qmp_usbc_power_on(struct phy *phy)
+{
+ struct qmp_usbc *qmp = phy_get_drvdata(phy);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *status;
+ unsigned int val;
+ int ret;
+
+ qmp_configure(qmp->serdes, cfg->serdes_tbl, cfg->serdes_tbl_num);
+
+ ret = clk_prepare_enable(qmp->pipe_clk);
+ if (ret) {
+ dev_err(qmp->dev, "pipe_clk enable failed err=%d\n", ret);
+ return ret;
+ }
+
+ /* Tx, Rx, and PCS configurations */
+ qmp_configure_lane(qmp->tx, cfg->tx_tbl, cfg->tx_tbl_num, 1);
+ qmp_configure_lane(qmp->rx, cfg->rx_tbl, cfg->rx_tbl_num, 1);
+
+ qmp_configure_lane(qmp->tx2, cfg->tx_tbl, cfg->tx_tbl_num, 2);
+ qmp_configure_lane(qmp->rx2, cfg->rx_tbl, cfg->rx_tbl_num, 2);
+
+ qmp_configure(qmp->pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+
+ /* Pull PHY out of reset state */
+ qphy_clrbits(qmp->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+
+ /* start SerDes and Phy-Coding-Sublayer */
+ qphy_setbits(qmp->pcs, cfg->regs[QPHY_START_CTRL], SERDES_START | PCS_START);
+
+ status = qmp->pcs + cfg->regs[QPHY_PCS_STATUS];
+ ret = readl_poll_timeout(status, val, !(val & PHYSTATUS), 200,
+ PHY_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ dev_err(qmp->dev, "phy initialization timed-out\n");
+ goto err_disable_pipe_clk;
+ }
+
+ return 0;
+
+err_disable_pipe_clk:
+ clk_disable_unprepare(qmp->pipe_clk);
+
+ return ret;
+}
+
+static int qmp_usbc_power_off(struct phy *phy)
+{
+ struct qmp_usbc *qmp = phy_get_drvdata(phy);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+
+ clk_disable_unprepare(qmp->pipe_clk);
+
+ /* PHY reset */
+ qphy_setbits(qmp->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+
+ /* stop SerDes and Phy-Coding-Sublayer */
+ qphy_clrbits(qmp->pcs, cfg->regs[QPHY_START_CTRL],
+ SERDES_START | PCS_START);
+
+ /* Put PHY into POWER DOWN state: active low */
+ qphy_clrbits(qmp->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
+ SW_PWRDN);
+
+ return 0;
+}
+
+static int qmp_usbc_enable(struct phy *phy)
+{
+ struct qmp_usbc *qmp = phy_get_drvdata(phy);
+ int ret;
+
+ mutex_lock(&qmp->phy_mutex);
+
+ ret = qmp_usbc_init(phy);
+ if (ret)
+ goto out_unlock;
+
+ ret = qmp_usbc_power_on(phy);
+ if (ret) {
+ qmp_usbc_exit(phy);
+ goto out_unlock;
+ }
+
+ qmp->usb_init_count++;
+out_unlock:
+ mutex_unlock(&qmp->phy_mutex);
+
+ return ret;
+}
+
+static int qmp_usbc_disable(struct phy *phy)
+{
+ struct qmp_usbc *qmp = phy_get_drvdata(phy);
+ int ret;
+
+ qmp->usb_init_count--;
+ ret = qmp_usbc_power_off(phy);
+ if (ret)
+ return ret;
+ return qmp_usbc_exit(phy);
+}
+
+static int qmp_usbc_set_mode(struct phy *phy, enum phy_mode mode, int submode)
+{
+ struct qmp_usbc *qmp = phy_get_drvdata(phy);
+
+ qmp->mode = mode;
+
+ return 0;
+}
+
+static const struct phy_ops qmp_usbc_phy_ops = {
+ .init = qmp_usbc_enable,
+ .exit = qmp_usbc_disable,
+ .set_mode = qmp_usbc_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static void qmp_usbc_enable_autonomous_mode(struct qmp_usbc *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *pcs = qmp->pcs;
+ u32 intr_mask;
+
+ if (qmp->mode == PHY_MODE_USB_HOST_SS ||
+ qmp->mode == PHY_MODE_USB_DEVICE_SS)
+ intr_mask = ARCVR_DTCT_EN | ALFPS_DTCT_EN;
+ else
+ intr_mask = ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL;
+
+ /* Clear any pending interrupts status */
+ qphy_setbits(pcs, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+ /* Writing 1 followed by 0 clears the interrupt */
+ qphy_clrbits(pcs, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+
+ qphy_clrbits(pcs, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL],
+ ARCVR_DTCT_EN | ALFPS_DTCT_EN | ARCVR_DTCT_EVENT_SEL);
+
+ /* Enable required PHY autonomous mode interrupts */
+ qphy_setbits(pcs, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL], intr_mask);
+
+ /* Enable i/o clamp_n for autonomous mode */
+ if (qmp->tcsr_map && qmp->vls_clamp_reg)
+ regmap_write(qmp->tcsr_map, qmp->vls_clamp_reg, 1);
+}
+
+static void qmp_usbc_disable_autonomous_mode(struct qmp_usbc *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *pcs = qmp->pcs;
+
+ /* Disable i/o clamp_n on resume for normal mode */
+ if (qmp->tcsr_map && qmp->vls_clamp_reg)
+ regmap_write(qmp->tcsr_map, qmp->vls_clamp_reg, 0);
+
+ qphy_clrbits(pcs, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL],
+ ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL | ALFPS_DTCT_EN);
+
+ qphy_setbits(pcs, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+ /* Writing 1 followed by 0 clears the interrupt */
+ qphy_clrbits(pcs, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+}
+
+static int __maybe_unused qmp_usbc_runtime_suspend(struct device *dev)
+{
+ struct qmp_usbc *qmp = dev_get_drvdata(dev);
+
+ dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qmp->mode);
+
+ if (!qmp->phy->init_count) {
+ dev_vdbg(dev, "PHY not initialized, bailing out\n");
+ return 0;
+ }
+
+ qmp_usbc_enable_autonomous_mode(qmp);
+
+ clk_disable_unprepare(qmp->pipe_clk);
+ clk_bulk_disable_unprepare(qmp->num_clks, qmp->clks);
+
+ return 0;
+}
+
+static int __maybe_unused qmp_usbc_runtime_resume(struct device *dev)
+{
+ struct qmp_usbc *qmp = dev_get_drvdata(dev);
+ int ret = 0;
+
+ dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qmp->mode);
+
+ if (!qmp->phy->init_count) {
+ dev_vdbg(dev, "PHY not initialized, bailing out\n");
+ return 0;
+ }
+
+ ret = clk_bulk_prepare_enable(qmp->num_clks, qmp->clks);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(qmp->pipe_clk);
+ if (ret) {
+ dev_err(dev, "pipe_clk enable failed, err=%d\n", ret);
+ clk_bulk_disable_unprepare(qmp->num_clks, qmp->clks);
+ return ret;
+ }
+
+ qmp_usbc_disable_autonomous_mode(qmp);
+
+ return 0;
+}
+
+static const struct dev_pm_ops qmp_usbc_pm_ops = {
+ SET_RUNTIME_PM_OPS(qmp_usbc_runtime_suspend,
+ qmp_usbc_runtime_resume, NULL)
+};
+
+static int qmp_usbc_vreg_init(struct qmp_usbc *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ struct device *dev = qmp->dev;
+ int num = cfg->num_vregs;
+ int i;
+
+ qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL);
+ if (!qmp->vregs)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ qmp->vregs[i].supply = cfg->vreg_list[i];
+
+ return devm_regulator_bulk_get(dev, num, qmp->vregs);
+}
+
+static int qmp_usbc_reset_init(struct qmp_usbc *qmp,
+ const char *const *reset_list,
+ int num_resets)
+{
+ struct device *dev = qmp->dev;
+ int i;
+ int ret;
+
+ qmp->resets = devm_kcalloc(dev, num_resets,
+ sizeof(*qmp->resets), GFP_KERNEL);
+ if (!qmp->resets)
+ return -ENOMEM;
+
+ for (i = 0; i < num_resets; i++)
+ qmp->resets[i].id = reset_list[i];
+
+ qmp->num_resets = num_resets;
+
+ ret = devm_reset_control_bulk_get_exclusive(dev, num_resets, qmp->resets);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get resets\n");
+
+ return 0;
+}
+
+static int qmp_usbc_clk_init(struct qmp_usbc *qmp)
+{
+ struct device *dev = qmp->dev;
+ int num = ARRAY_SIZE(qmp_usbc_phy_clk_l);
+ int i;
+
+ qmp->clks = devm_kcalloc(dev, num, sizeof(*qmp->clks), GFP_KERNEL);
+ if (!qmp->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ qmp->clks[i].id = qmp_usbc_phy_clk_l[i];
+
+ qmp->num_clks = num;
+
+ return devm_clk_bulk_get_optional(dev, num, qmp->clks);
+}
+
+static void phy_clk_release_provider(void *res)
+{
+ of_clk_del_provider(res);
+}
+
+/*
+ * Register a fixed rate pipe clock.
+ *
+ * The <s>_pipe_clksrc generated by PHY goes to the GCC that gate
+ * controls it. The <s>_pipe_clk coming out of the GCC is requested
+ * by the PHY driver for its operations.
+ * We register the <s>_pipe_clksrc here. The gcc driver takes care
+ * of assigning this <s>_pipe_clksrc as parent to <s>_pipe_clk.
+ * Below picture shows this relationship.
+ *
+ * +---------------+
+ * | PHY block |<<---------------------------------------+
+ * | | |
+ * | +-------+ | +-----+ |
+ * I/P---^-->| PLL |---^--->pipe_clksrc--->| GCC |--->pipe_clk---+
+ * clk | +-------+ | +-----+
+ * +---------------+
+ */
+static int phy_pipe_clk_register(struct qmp_usbc *qmp, struct device_node *np)
+{
+ struct clk_fixed_rate *fixed = &qmp->pipe_clk_fixed;
+ struct clk_init_data init = { };
+ int ret;
+
+ ret = of_property_read_string(np, "clock-output-names", &init.name);
+ if (ret) {
+ dev_err(qmp->dev, "%pOFn: No clock-output-names\n", np);
+ return ret;
+ }
+
+ init.ops = &clk_fixed_rate_ops;
+
+ /* controllers using QMP phys use 125MHz pipe clock interface */
+ fixed->fixed_rate = 125000000;
+ fixed->hw.init = &init;
+
+ ret = devm_clk_hw_register(qmp->dev, &fixed->hw);
+ if (ret)
+ return ret;
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, &fixed->hw);
+ if (ret)
+ return ret;
+
+ /*
+ * Roll a devm action because the clock provider is the child node, but
+ * the child node is not actually a device.
+ */
+ return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np);
+}
+
+#if IS_ENABLED(CONFIG_TYPEC)
+static int qmp_usbc_typec_switch_set(struct typec_switch_dev *sw,
+ enum typec_orientation orientation)
+{
+ struct qmp_usbc *qmp = typec_switch_get_drvdata(sw);
+
+ if (orientation == qmp->orientation || orientation == TYPEC_ORIENTATION_NONE)
+ return 0;
+
+ mutex_lock(&qmp->phy_mutex);
+ qmp->orientation = orientation;
+
+ if (qmp->usb_init_count) {
+ qmp_usbc_power_off(qmp->phy);
+ qmp_usbc_exit(qmp->phy);
+
+ qmp_usbc_init(qmp->phy);
+ qmp_usbc_power_on(qmp->phy);
+ }
+
+ mutex_unlock(&qmp->phy_mutex);
+
+ return 0;
+}
+
+static void qmp_usbc_typec_unregister(void *data)
+{
+ struct qmp_usbc *qmp = data;
+
+ typec_switch_unregister(qmp->sw);
+}
+
+static int qmp_usbc_typec_switch_register(struct qmp_usbc *qmp)
+{
+ struct typec_switch_desc sw_desc = {};
+ struct device *dev = qmp->dev;
+
+ sw_desc.drvdata = qmp;
+ sw_desc.fwnode = dev->fwnode;
+ sw_desc.set = qmp_usbc_typec_switch_set;
+ qmp->sw = typec_switch_register(dev, &sw_desc);
+ if (IS_ERR(qmp->sw)) {
+ dev_err(dev, "Unable to register typec switch: %pe\n", qmp->sw);
+ return PTR_ERR(qmp->sw);
+ }
+
+ return devm_add_action_or_reset(dev, qmp_usbc_typec_unregister, qmp);
+}
+#else
+static int qmp_usbc_typec_switch_register(struct qmp_usbc *qmp)
+{
+ return 0;
+}
+#endif
+
+static int qmp_usbc_parse_dt_legacy(struct qmp_usbc *qmp, struct device_node *np)
+{
+ struct platform_device *pdev = to_platform_device(qmp->dev);
+ struct device *dev = qmp->dev;
+ int ret;
+
+ qmp->serdes = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(qmp->serdes))
+ return PTR_ERR(qmp->serdes);
+
+ /*
+ * Get memory resources for the PHY:
+ * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2.
+ * For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5
+ * For single lane PHYs: pcs_misc (optional) -> 3.
+ */
+ qmp->tx = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(qmp->tx))
+ return PTR_ERR(qmp->tx);
+
+ qmp->rx = devm_of_iomap(dev, np, 1, NULL);
+ if (IS_ERR(qmp->rx))
+ return PTR_ERR(qmp->rx);
+
+ qmp->pcs = devm_of_iomap(dev, np, 2, NULL);
+ if (IS_ERR(qmp->pcs))
+ return PTR_ERR(qmp->pcs);
+
+ qmp->tx2 = devm_of_iomap(dev, np, 3, NULL);
+ if (IS_ERR(qmp->tx2))
+ return PTR_ERR(qmp->tx2);
+
+ qmp->rx2 = devm_of_iomap(dev, np, 4, NULL);
+ if (IS_ERR(qmp->rx2))
+ return PTR_ERR(qmp->rx2);
+
+ qmp->pcs_misc = devm_of_iomap(dev, np, 5, NULL);
+ if (IS_ERR(qmp->pcs_misc)) {
+ dev_vdbg(dev, "PHY pcs_misc-reg not used\n");
+ qmp->pcs_misc = NULL;
+ }
+
+ qmp->pipe_clk = devm_get_clk_from_child(dev, np, NULL);
+ if (IS_ERR(qmp->pipe_clk)) {
+ return dev_err_probe(dev, PTR_ERR(qmp->pipe_clk),
+ "failed to get pipe clock\n");
+ }
+
+ ret = devm_clk_bulk_get_all(qmp->dev, &qmp->clks);
+ if (ret < 0)
+ return ret;
+
+ qmp->num_clks = ret;
+
+ ret = qmp_usbc_reset_init(qmp, usb3phy_legacy_reset_l,
+ ARRAY_SIZE(usb3phy_legacy_reset_l));
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int qmp_usbc_parse_dt(struct qmp_usbc *qmp)
+{
+ struct platform_device *pdev = to_platform_device(qmp->dev);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ const struct qmp_usbc_offsets *offs = cfg->offsets;
+ struct device *dev = qmp->dev;
+ void __iomem *base;
+ int ret;
+
+ if (!offs)
+ return -EINVAL;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ qmp->serdes = base + offs->serdes;
+ qmp->pcs = base + offs->pcs;
+ if (offs->pcs_misc)
+ qmp->pcs_misc = base + offs->pcs_misc;
+ qmp->tx = base + offs->tx;
+ qmp->rx = base + offs->rx;
+
+ qmp->tx2 = base + offs->tx2;
+ qmp->rx2 = base + offs->rx2;
+
+ ret = qmp_usbc_clk_init(qmp);
+ if (ret)
+ return ret;
+
+ qmp->pipe_clk = devm_clk_get(dev, "pipe");
+ if (IS_ERR(qmp->pipe_clk)) {
+ return dev_err_probe(dev, PTR_ERR(qmp->pipe_clk),
+ "failed to get pipe clock\n");
+ }
+
+ ret = qmp_usbc_reset_init(qmp, usb3phy_reset_l,
+ ARRAY_SIZE(usb3phy_reset_l));
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int qmp_usbc_parse_vls_clamp(struct qmp_usbc *qmp)
+{
+ struct of_phandle_args tcsr_args;
+ struct device *dev = qmp->dev;
+ int ret;
+
+ /* for backwards compatibility ignore if there is no property */
+ ret = of_parse_phandle_with_fixed_args(dev->of_node, "qcom,tcsr-reg", 1, 0,
+ &tcsr_args);
+ if (ret == -ENOENT)
+ return 0;
+ else if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to parse qcom,tcsr-reg\n");
+
+ qmp->tcsr_map = syscon_node_to_regmap(tcsr_args.np);
+ of_node_put(tcsr_args.np);
+ if (IS_ERR(qmp->tcsr_map))
+ return PTR_ERR(qmp->tcsr_map);
+
+ qmp->vls_clamp_reg = tcsr_args.args[0];
+
+ return 0;
+}
+
+static int qmp_usbc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct phy_provider *phy_provider;
+ struct device_node *np;
+ struct qmp_usbc *qmp;
+ int ret;
+
+ qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL);
+ if (!qmp)
+ return -ENOMEM;
+
+ qmp->dev = dev;
+
+ qmp->orientation = TYPEC_ORIENTATION_NORMAL;
+
+ qmp->cfg = of_device_get_match_data(dev);
+ if (!qmp->cfg)
+ return -EINVAL;
+
+ mutex_init(&qmp->phy_mutex);
+
+ ret = qmp_usbc_vreg_init(qmp);
+ if (ret)
+ return ret;
+
+ ret = qmp_usbc_typec_switch_register(qmp);
+ if (ret)
+ return ret;
+
+ ret = qmp_usbc_parse_vls_clamp(qmp);
+ if (ret)
+ return ret;
+
+ /* Check for legacy binding with child node. */
+ np = of_get_child_by_name(dev->of_node, "phy");
+ if (np) {
+ ret = qmp_usbc_parse_dt_legacy(qmp, np);
+ } else {
+ np = of_node_get(dev->of_node);
+ ret = qmp_usbc_parse_dt(qmp);
+ }
+ if (ret)
+ goto err_node_put;
+
+ pm_runtime_set_active(dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ goto err_node_put;
+ /*
+ * Prevent runtime pm from being ON by default. Users can enable
+ * it using power/control in sysfs.
+ */
+ pm_runtime_forbid(dev);
+
+ ret = phy_pipe_clk_register(qmp, np);
+ if (ret)
+ goto err_node_put;
+
+ qmp->phy = devm_phy_create(dev, np, &qmp_usbc_phy_ops);
+ if (IS_ERR(qmp->phy)) {
+ ret = PTR_ERR(qmp->phy);
+ dev_err(dev, "failed to create PHY: %d\n", ret);
+ goto err_node_put;
+ }
+
+ phy_set_drvdata(qmp->phy, qmp);
+
+ of_node_put(np);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+
+err_node_put:
+ of_node_put(np);
+ return ret;
+}
+
+static const struct of_device_id qmp_usbc_of_match_table[] = {
+ {
+ .compatible = "qcom,msm8998-qmp-usb3-phy",
+ .data = &msm8998_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,qcm2290-qmp-usb3-phy",
+ .data = &qcm2290_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sdm660-qmp-usb3-phy",
+ .data = &sdm660_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sm6115-qmp-usb3-phy",
+ .data = &qcm2290_usb3phy_cfg,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qmp_usbc_of_match_table);
+
+static struct platform_driver qmp_usbc_driver = {
+ .probe = qmp_usbc_probe,
+ .driver = {
+ .name = "qcom-qmp-usbc-phy",
+ .pm = &qmp_usbc_pm_ops,
+ .of_match_table = qmp_usbc_of_match_table,
+ },
+};
+
+module_platform_driver(qmp_usbc_driver);
+
+MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm QMP USB-C PHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index 6923496cbfee2..d10b8f653c4b2 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -50,92 +50,29 @@
#include "phy-qcom-qmp-pcs-v7.h"
-/* Only for QMP V3 & V4 PHY - DP COM registers */
-#define QPHY_V3_DP_COM_PHY_MODE_CTRL 0x00
-#define QPHY_V3_DP_COM_SW_RESET 0x04
-#define QPHY_V3_DP_COM_POWER_DOWN_CTRL 0x08
-#define QPHY_V3_DP_COM_SWI_CTRL 0x0c
-#define QPHY_V3_DP_COM_TYPEC_CTRL 0x10
-#define QPHY_V3_DP_COM_TYPEC_PWRDN_CTRL 0x14
-#define QPHY_V3_DP_COM_RESET_OVRD_CTRL 0x1c
-
-/* QSERDES V3 COM bits */
-# define QSERDES_V3_COM_BIAS_EN 0x0001
-# define QSERDES_V3_COM_BIAS_EN_MUX 0x0002
-# define QSERDES_V3_COM_CLKBUF_R_EN 0x0004
-# define QSERDES_V3_COM_CLKBUF_L_EN 0x0008
-# define QSERDES_V3_COM_EN_SYSCLK_TX_SEL 0x0010
-# define QSERDES_V3_COM_CLKBUF_RX_DRIVE_L 0x0020
-# define QSERDES_V3_COM_CLKBUF_RX_DRIVE_R 0x0040
-
-/* QSERDES V3 TX bits */
-# define DP_PHY_TXn_TX_EMP_POST1_LVL_MASK 0x001f
-# define DP_PHY_TXn_TX_EMP_POST1_LVL_MUX_EN 0x0020
-# define DP_PHY_TXn_TX_DRV_LVL_MASK 0x001f
-# define DP_PHY_TXn_TX_DRV_LVL_MUX_EN 0x0020
-
-/* QMP PHY - DP PHY registers */
-#define QSERDES_DP_PHY_REVISION_ID0 0x000
-#define QSERDES_DP_PHY_REVISION_ID1 0x004
-#define QSERDES_DP_PHY_REVISION_ID2 0x008
-#define QSERDES_DP_PHY_REVISION_ID3 0x00c
-#define QSERDES_DP_PHY_CFG 0x010
-#define QSERDES_DP_PHY_PD_CTL 0x018
-# define DP_PHY_PD_CTL_PWRDN 0x001
-# define DP_PHY_PD_CTL_PSR_PWRDN 0x002
-# define DP_PHY_PD_CTL_AUX_PWRDN 0x004
-# define DP_PHY_PD_CTL_LANE_0_1_PWRDN 0x008
-# define DP_PHY_PD_CTL_LANE_2_3_PWRDN 0x010
-# define DP_PHY_PD_CTL_PLL_PWRDN 0x020
-# define DP_PHY_PD_CTL_DP_CLAMP_EN 0x040
-#define QSERDES_DP_PHY_MODE 0x01c
-#define QSERDES_DP_PHY_AUX_CFG0 0x020
-#define QSERDES_DP_PHY_AUX_CFG1 0x024
-#define QSERDES_DP_PHY_AUX_CFG2 0x028
-#define QSERDES_DP_PHY_AUX_CFG3 0x02c
-#define QSERDES_DP_PHY_AUX_CFG4 0x030
-#define QSERDES_DP_PHY_AUX_CFG5 0x034
-#define QSERDES_DP_PHY_AUX_CFG6 0x038
-#define QSERDES_DP_PHY_AUX_CFG7 0x03c
-#define QSERDES_DP_PHY_AUX_CFG8 0x040
-#define QSERDES_DP_PHY_AUX_CFG9 0x044
-
-/* Only for QMP V3 PHY - DP PHY registers */
-#define QSERDES_V3_DP_PHY_AUX_INTERRUPT_MASK 0x048
-# define PHY_AUX_STOP_ERR_MASK 0x01
-# define PHY_AUX_DEC_ERR_MASK 0x02
-# define PHY_AUX_SYNC_ERR_MASK 0x04
-# define PHY_AUX_ALIGN_ERR_MASK 0x08
-# define PHY_AUX_REQ_ERR_MASK 0x10
-
-#define QSERDES_V3_DP_PHY_AUX_INTERRUPT_CLEAR 0x04c
-#define QSERDES_V3_DP_PHY_AUX_BIST_CFG 0x050
-
-#define QSERDES_V3_DP_PHY_VCO_DIV 0x064
-#define QSERDES_V3_DP_PHY_TX0_TX1_LANE_CTL 0x06c
-#define QSERDES_V3_DP_PHY_TX2_TX3_LANE_CTL 0x088
-
-#define QSERDES_V3_DP_PHY_SPARE0 0x0ac
-#define DP_PHY_SPARE0_MASK 0x0f
-#define DP_PHY_SPARE0_ORIENTATION_INFO_SHIFT 0x04(0x0004)
-
-#define QSERDES_V3_DP_PHY_STATUS 0x0c0
-
-/* Only for QMP V4 PHY - DP PHY registers */
-#define QSERDES_V4_DP_PHY_CFG_1 0x014
-#define QSERDES_V4_DP_PHY_AUX_INTERRUPT_MASK 0x054
-#define QSERDES_V4_DP_PHY_AUX_INTERRUPT_CLEAR 0x058
-#define QSERDES_V4_DP_PHY_VCO_DIV 0x070
-#define QSERDES_V4_DP_PHY_TX0_TX1_LANE_CTL 0x078
-#define QSERDES_V4_DP_PHY_TX2_TX3_LANE_CTL 0x09c
-#define QSERDES_V4_DP_PHY_SPARE0 0x0c8
-#define QSERDES_V4_DP_PHY_AUX_INTERRUPT_STATUS 0x0d8
-#define QSERDES_V4_DP_PHY_STATUS 0x0dc
-
-#define QSERDES_V5_DP_PHY_STATUS 0x0dc
-
-/* Only for QMP V6 PHY - DP PHY registers */
-#define QSERDES_V6_DP_PHY_AUX_INTERRUPT_STATUS 0x0e0
-#define QSERDES_V6_DP_PHY_STATUS 0x0e4
+/* QPHY_SW_RESET bit */
+#define SW_RESET BIT(0)
+/* QPHY_POWER_DOWN_CONTROL */
+#define SW_PWRDN BIT(0)
+#define REFCLK_DRV_DSBL BIT(1) /* PCIe */
+
+/* QPHY_START_CONTROL bits */
+#define SERDES_START BIT(0)
+#define PCS_START BIT(1)
+
+/* QPHY_PCS_STATUS bit */
+#define PHYSTATUS BIT(6)
+#define PHYSTATUS_4_20 BIT(7)
+
+/* QPHY_PCS_AUTONOMOUS_MODE_CTRL register bits */
+#define ARCVR_DTCT_EN BIT(0)
+#define ALFPS_DTCT_EN BIT(1)
+#define ARCVR_DTCT_EVENT_SEL BIT(4)
+
+/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */
+#define IRQ_CLEAR BIT(0)
+
+/* QPHY_PCS_MISC_CLAMP_ENABLE register bits */
+#define CLAMP_EN BIT(0) /* enables i/o clamp_n */
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-sgmii-eth.c b/drivers/phy/qualcomm/phy-qcom-sgmii-eth.c
index 03dc753f0de1f..5b1c82459c126 100644
--- a/drivers/phy/qualcomm/phy-qcom-sgmii-eth.c
+++ b/drivers/phy/qualcomm/phy-qcom-sgmii-eth.c
@@ -11,93 +11,14 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#define QSERDES_QMP_PLL 0x0
-#define QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE0 (QSERDES_QMP_PLL + 0x1ac)
-#define QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE0 (QSERDES_QMP_PLL + 0x1b0)
-#define QSERDES_COM_BIN_VCOCAL_HSCLK_SEL (QSERDES_QMP_PLL + 0x1bc)
-#define QSERDES_COM_CORE_CLK_EN (QSERDES_QMP_PLL + 0x174)
-#define QSERDES_COM_CORECLK_DIV_MODE0 (QSERDES_QMP_PLL + 0x168)
-#define QSERDES_COM_CP_CTRL_MODE0 (QSERDES_QMP_PLL + 0x74)
-#define QSERDES_COM_DEC_START_MODE0 (QSERDES_QMP_PLL + 0xbc)
-#define QSERDES_COM_DIV_FRAC_START1_MODE0 (QSERDES_QMP_PLL + 0xcc)
-#define QSERDES_COM_DIV_FRAC_START2_MODE0 (QSERDES_QMP_PLL + 0xd0)
-#define QSERDES_COM_DIV_FRAC_START3_MODE0 (QSERDES_QMP_PLL + 0xd4)
-#define QSERDES_COM_HSCLK_HS_SWITCH_SEL (QSERDES_QMP_PLL + 0x15c)
-#define QSERDES_COM_HSCLK_SEL (QSERDES_QMP_PLL + 0x158)
-#define QSERDES_COM_LOCK_CMP1_MODE0 (QSERDES_QMP_PLL + 0xac)
-#define QSERDES_COM_LOCK_CMP2_MODE0 (QSERDES_QMP_PLL + 0xb0)
-#define QSERDES_COM_PLL_CCTRL_MODE0 (QSERDES_QMP_PLL + 0x84)
-#define QSERDES_COM_PLL_IVCO (QSERDES_QMP_PLL + 0x58)
-#define QSERDES_COM_PLL_RCTRL_MODE0 (QSERDES_QMP_PLL + 0x7c)
-#define QSERDES_COM_SYSCLK_EN_SEL (QSERDES_QMP_PLL + 0x94)
-#define QSERDES_COM_VCO_TUNE1_MODE0 (QSERDES_QMP_PLL + 0x110)
-#define QSERDES_COM_VCO_TUNE2_MODE0 (QSERDES_QMP_PLL + 0x114)
-#define QSERDES_COM_VCO_TUNE_INITVAL2 (QSERDES_QMP_PLL + 0x124)
-#define QSERDES_COM_C_READY_STATUS (QSERDES_QMP_PLL + 0x178)
-#define QSERDES_COM_CMN_STATUS (QSERDES_QMP_PLL + 0x140)
+#include "phy-qcom-qmp-pcs-sgmii.h"
+#include "phy-qcom-qmp-qserdes-com-v5.h"
+#include "phy-qcom-qmp-qserdes-txrx-v5.h"
+#define QSERDES_QMP_PLL 0x0
#define QSERDES_RX 0x600
-#define QSERDES_RX_UCDR_FO_GAIN (QSERDES_RX + 0x8)
-#define QSERDES_RX_UCDR_SO_GAIN (QSERDES_RX + 0x14)
-#define QSERDES_RX_UCDR_FASTLOCK_FO_GAIN (QSERDES_RX + 0x30)
-#define QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE (QSERDES_RX + 0x34)
-#define QSERDES_RX_UCDR_FASTLOCK_COUNT_LOW (QSERDES_RX + 0x3c)
-#define QSERDES_RX_UCDR_FASTLOCK_COUNT_HIGH (QSERDES_RX + 0x40)
-#define QSERDES_RX_UCDR_PI_CONTROLS (QSERDES_RX + 0x44)
-#define QSERDES_RX_UCDR_PI_CTRL2 (QSERDES_RX + 0x48)
-#define QSERDES_RX_RX_TERM_BW (QSERDES_RX + 0x80)
-#define QSERDES_RX_VGA_CAL_CNTRL2 (QSERDES_RX + 0xd8)
-#define QSERDES_RX_GM_CAL (QSERDES_RX + 0xdc)
-#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL1 (QSERDES_RX + 0xe8)
-#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2 (QSERDES_RX + 0xec)
-#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3 (QSERDES_RX + 0xf0)
-#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4 (QSERDES_RX + 0xf4)
-#define QSERDES_RX_RX_IDAC_TSETTLE_LOW (QSERDES_RX + 0xf8)
-#define QSERDES_RX_RX_IDAC_TSETTLE_HIGH (QSERDES_RX + 0xfc)
-#define QSERDES_RX_RX_IDAC_MEASURE_TIME (QSERDES_RX + 0x100)
-#define QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 (QSERDES_RX + 0x110)
-#define QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2 (QSERDES_RX + 0x114)
-#define QSERDES_RX_SIGDET_CNTRL (QSERDES_RX + 0x11c)
-#define QSERDES_RX_SIGDET_DEGLITCH_CNTRL (QSERDES_RX + 0x124)
-#define QSERDES_RX_RX_BAND (QSERDES_RX + 0x128)
-#define QSERDES_RX_RX_MODE_00_LOW (QSERDES_RX + 0x15c)
-#define QSERDES_RX_RX_MODE_00_HIGH (QSERDES_RX + 0x160)
-#define QSERDES_RX_RX_MODE_00_HIGH2 (QSERDES_RX + 0x164)
-#define QSERDES_RX_RX_MODE_00_HIGH3 (QSERDES_RX + 0x168)
-#define QSERDES_RX_RX_MODE_00_HIGH4 (QSERDES_RX + 0x16c)
-#define QSERDES_RX_RX_MODE_01_LOW (QSERDES_RX + 0x170)
-#define QSERDES_RX_RX_MODE_01_HIGH (QSERDES_RX + 0x174)
-#define QSERDES_RX_RX_MODE_01_HIGH2 (QSERDES_RX + 0x178)
-#define QSERDES_RX_RX_MODE_01_HIGH3 (QSERDES_RX + 0x17c)
-#define QSERDES_RX_RX_MODE_01_HIGH4 (QSERDES_RX + 0x180)
-#define QSERDES_RX_RX_MODE_10_LOW (QSERDES_RX + 0x184)
-#define QSERDES_RX_RX_MODE_10_HIGH (QSERDES_RX + 0x188)
-#define QSERDES_RX_RX_MODE_10_HIGH2 (QSERDES_RX + 0x18c)
-#define QSERDES_RX_RX_MODE_10_HIGH3 (QSERDES_RX + 0x190)
-#define QSERDES_RX_RX_MODE_10_HIGH4 (QSERDES_RX + 0x194)
-#define QSERDES_RX_DCC_CTRL1 (QSERDES_RX + 0x1a8)
-
#define QSERDES_TX 0x400
-#define QSERDES_TX_TX_BAND (QSERDES_TX + 0x24)
-#define QSERDES_TX_SLEW_CNTL (QSERDES_TX + 0x28)
-#define QSERDES_TX_RES_CODE_LANE_OFFSET_TX (QSERDES_TX + 0x3c)
-#define QSERDES_TX_RES_CODE_LANE_OFFSET_RX (QSERDES_TX + 0x40)
-#define QSERDES_TX_LANE_MODE_1 (QSERDES_TX + 0x84)
-#define QSERDES_TX_LANE_MODE_3 (QSERDES_TX + 0x8c)
-#define QSERDES_TX_RCV_DETECT_LVL_2 (QSERDES_TX + 0xa4)
-#define QSERDES_TX_TRAN_DRVR_EMP_EN (QSERDES_TX + 0xc0)
-
-#define QSERDES_PCS 0xC00
-#define QSERDES_PCS_PHY_START (QSERDES_PCS + 0x0)
-#define QSERDES_PCS_POWER_DOWN_CONTROL (QSERDES_PCS + 0x4)
-#define QSERDES_PCS_SW_RESET (QSERDES_PCS + 0x8)
-#define QSERDES_PCS_LINE_RESET_TIME (QSERDES_PCS + 0xc)
-#define QSERDES_PCS_TX_LARGE_AMP_DRV_LVL (QSERDES_PCS + 0x20)
-#define QSERDES_PCS_TX_SMALL_AMP_DRV_LVL (QSERDES_PCS + 0x28)
-#define QSERDES_PCS_TX_MID_TERM_CTRL1 (QSERDES_PCS + 0xd8)
-#define QSERDES_PCS_TX_MID_TERM_CTRL2 (QSERDES_PCS + 0xdc)
-#define QSERDES_PCS_SGMII_MISC_CTRL8 (QSERDES_PCS + 0x118)
-#define QSERDES_PCS_PCS_READY_STATUS (QSERDES_PCS + 0x94)
+#define QSERDES_PCS 0xc00
#define QSERDES_COM_C_READY BIT(0)
#define QSERDES_PCS_READY BIT(0)
@@ -112,178 +33,178 @@ struct qcom_dwmac_sgmii_phy_data {
static void qcom_dwmac_sgmii_phy_init_1g(struct regmap *regmap)
{
- regmap_write(regmap, QSERDES_PCS_SW_RESET, 0x01);
- regmap_write(regmap, QSERDES_PCS_POWER_DOWN_CONTROL, 0x01);
-
- regmap_write(regmap, QSERDES_COM_PLL_IVCO, 0x0F);
- regmap_write(regmap, QSERDES_COM_CP_CTRL_MODE0, 0x06);
- regmap_write(regmap, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
- regmap_write(regmap, QSERDES_COM_PLL_CCTRL_MODE0, 0x36);
- regmap_write(regmap, QSERDES_COM_SYSCLK_EN_SEL, 0x1A);
- regmap_write(regmap, QSERDES_COM_LOCK_CMP1_MODE0, 0x0A);
- regmap_write(regmap, QSERDES_COM_LOCK_CMP2_MODE0, 0x1A);
- regmap_write(regmap, QSERDES_COM_DEC_START_MODE0, 0x82);
- regmap_write(regmap, QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55);
- regmap_write(regmap, QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55);
- regmap_write(regmap, QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03);
- regmap_write(regmap, QSERDES_COM_VCO_TUNE1_MODE0, 0x24);
-
- regmap_write(regmap, QSERDES_COM_VCO_TUNE2_MODE0, 0x02);
- regmap_write(regmap, QSERDES_COM_VCO_TUNE_INITVAL2, 0x00);
- regmap_write(regmap, QSERDES_COM_HSCLK_SEL, 0x04);
- regmap_write(regmap, QSERDES_COM_HSCLK_HS_SWITCH_SEL, 0x00);
- regmap_write(regmap, QSERDES_COM_CORECLK_DIV_MODE0, 0x0A);
- regmap_write(regmap, QSERDES_COM_CORE_CLK_EN, 0x00);
- regmap_write(regmap, QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xB9);
- regmap_write(regmap, QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1E);
- regmap_write(regmap, QSERDES_COM_BIN_VCOCAL_HSCLK_SEL, 0x11);
-
- regmap_write(regmap, QSERDES_TX_TX_BAND, 0x05);
- regmap_write(regmap, QSERDES_TX_SLEW_CNTL, 0x0A);
- regmap_write(regmap, QSERDES_TX_RES_CODE_LANE_OFFSET_TX, 0x09);
- regmap_write(regmap, QSERDES_TX_RES_CODE_LANE_OFFSET_RX, 0x09);
- regmap_write(regmap, QSERDES_TX_LANE_MODE_1, 0x05);
- regmap_write(regmap, QSERDES_TX_LANE_MODE_3, 0x00);
- regmap_write(regmap, QSERDES_TX_RCV_DETECT_LVL_2, 0x12);
- regmap_write(regmap, QSERDES_TX_TRAN_DRVR_EMP_EN, 0x0C);
-
- regmap_write(regmap, QSERDES_RX_UCDR_FO_GAIN, 0x0A);
- regmap_write(regmap, QSERDES_RX_UCDR_SO_GAIN, 0x06);
- regmap_write(regmap, QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0A);
- regmap_write(regmap, QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7F);
- regmap_write(regmap, QSERDES_RX_UCDR_FASTLOCK_COUNT_LOW, 0x00);
- regmap_write(regmap, QSERDES_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x01);
- regmap_write(regmap, QSERDES_RX_UCDR_PI_CONTROLS, 0x81);
- regmap_write(regmap, QSERDES_RX_UCDR_PI_CTRL2, 0x80);
- regmap_write(regmap, QSERDES_RX_RX_TERM_BW, 0x04);
- regmap_write(regmap, QSERDES_RX_VGA_CAL_CNTRL2, 0x08);
- regmap_write(regmap, QSERDES_RX_GM_CAL, 0x0F);
- regmap_write(regmap, QSERDES_RX_RX_EQU_ADAPTOR_CNTRL1, 0x04);
- regmap_write(regmap, QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x00);
- regmap_write(regmap, QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4A);
- regmap_write(regmap, QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0A);
- regmap_write(regmap, QSERDES_RX_RX_IDAC_TSETTLE_LOW, 0x80);
- regmap_write(regmap, QSERDES_RX_RX_IDAC_TSETTLE_HIGH, 0x01);
- regmap_write(regmap, QSERDES_RX_RX_IDAC_MEASURE_TIME, 0x20);
- regmap_write(regmap, QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x17);
- regmap_write(regmap, QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x00);
- regmap_write(regmap, QSERDES_RX_SIGDET_CNTRL, 0x0F);
- regmap_write(regmap, QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x1E);
- regmap_write(regmap, QSERDES_RX_RX_BAND, 0x05);
- regmap_write(regmap, QSERDES_RX_RX_MODE_00_LOW, 0xE0);
- regmap_write(regmap, QSERDES_RX_RX_MODE_00_HIGH, 0xC8);
- regmap_write(regmap, QSERDES_RX_RX_MODE_00_HIGH2, 0xC8);
- regmap_write(regmap, QSERDES_RX_RX_MODE_00_HIGH3, 0x09);
- regmap_write(regmap, QSERDES_RX_RX_MODE_00_HIGH4, 0xB1);
- regmap_write(regmap, QSERDES_RX_RX_MODE_01_LOW, 0xE0);
- regmap_write(regmap, QSERDES_RX_RX_MODE_01_HIGH, 0xC8);
- regmap_write(regmap, QSERDES_RX_RX_MODE_01_HIGH2, 0xC8);
- regmap_write(regmap, QSERDES_RX_RX_MODE_01_HIGH3, 0x09);
- regmap_write(regmap, QSERDES_RX_RX_MODE_01_HIGH4, 0xB1);
- regmap_write(regmap, QSERDES_RX_RX_MODE_10_LOW, 0xE0);
- regmap_write(regmap, QSERDES_RX_RX_MODE_10_HIGH, 0xC8);
- regmap_write(regmap, QSERDES_RX_RX_MODE_10_HIGH2, 0xC8);
- regmap_write(regmap, QSERDES_RX_RX_MODE_10_HIGH3, 0x3B);
- regmap_write(regmap, QSERDES_RX_RX_MODE_10_HIGH4, 0xB7);
- regmap_write(regmap, QSERDES_RX_DCC_CTRL1, 0x0C);
-
- regmap_write(regmap, QSERDES_PCS_LINE_RESET_TIME, 0x0C);
- regmap_write(regmap, QSERDES_PCS_TX_LARGE_AMP_DRV_LVL, 0x1F);
- regmap_write(regmap, QSERDES_PCS_TX_SMALL_AMP_DRV_LVL, 0x03);
- regmap_write(regmap, QSERDES_PCS_TX_MID_TERM_CTRL1, 0x83);
- regmap_write(regmap, QSERDES_PCS_TX_MID_TERM_CTRL2, 0x08);
- regmap_write(regmap, QSERDES_PCS_SGMII_MISC_CTRL8, 0x0C);
- regmap_write(regmap, QSERDES_PCS_SW_RESET, 0x00);
-
- regmap_write(regmap, QSERDES_PCS_PHY_START, 0x01);
+ regmap_write(regmap, QSERDES_PCS + QPHY_PCS_SW_RESET, 0x01);
+ regmap_write(regmap, QSERDES_PCS + QPHY_PCS_POWER_DOWN_CONTROL, 0x01);
+
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_PLL_IVCO, 0x0F);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_CP_CTRL_MODE0, 0x06);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x16);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x36);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_SYSCLK_EN_SEL, 0x1A);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_LOCK_CMP1_MODE0, 0x0A);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x1A);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_DEC_START_MODE0, 0x82);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_DIV_FRAC_START1_MODE0, 0x55);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_DIV_FRAC_START2_MODE0, 0x55);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_DIV_FRAC_START3_MODE0, 0x03);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_VCO_TUNE1_MODE0, 0x24);
+
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_VCO_TUNE2_MODE0, 0x02);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_VCO_TUNE_INITVAL2, 0x00);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_HSCLK_SEL, 0x04);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL, 0x00);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_CORECLK_DIV_MODE0, 0x0A);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_CORE_CLK_EN, 0x00);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xB9);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1E);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_BIN_VCOCAL_HSCLK_SEL, 0x11);
+
+ regmap_write(regmap, QSERDES_TX + QSERDES_V5_TX_TX_BAND, 0x05);
+ regmap_write(regmap, QSERDES_TX + QSERDES_V5_TX_SLEW_CNTL, 0x0A);
+ regmap_write(regmap, QSERDES_TX + QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x09);
+ regmap_write(regmap, QSERDES_TX + QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x09);
+ regmap_write(regmap, QSERDES_TX + QSERDES_V5_TX_LANE_MODE_1, 0x05);
+ regmap_write(regmap, QSERDES_TX + QSERDES_V5_TX_LANE_MODE_3, 0x00);
+ regmap_write(regmap, QSERDES_TX + QSERDES_V5_TX_RCV_DETECT_LVL_2, 0x12);
+ regmap_write(regmap, QSERDES_TX + QSERDES_V5_TX_TRAN_DRVR_EMP_EN, 0x0C);
+
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_UCDR_FO_GAIN, 0x0A);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_UCDR_SO_GAIN, 0x06);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x0A);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7F);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0x00);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x01);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_UCDR_PI_CONTROLS, 0x81);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_UCDR_PI_CTRL2, 0x80);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_TERM_BW, 0x04);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x08);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_GM_CAL, 0x0F);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL1, 0x04);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x00);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4A);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0A);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_IDAC_TSETTLE_LOW, 0x80);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_IDAC_TSETTLE_HIGH, 0x01);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_IDAC_MEASURE_TIME, 0x20);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x17);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x00);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_SIGDET_CNTRL, 0x0F);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x1E);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_BAND, 0x05);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_00_LOW, 0xE0);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_00_HIGH, 0xC8);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xC8);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x09);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xB1);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_01_LOW, 0xE0);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_01_HIGH, 0xC8);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xC8);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x09);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xB1);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_10_LOW, 0xE0);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_10_HIGH, 0xC8);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_10_HIGH2, 0xC8);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_10_HIGH3, 0x3B);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_10_HIGH4, 0xB7);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_DCC_CTRL1, 0x0C);
+
+ regmap_write(regmap, QSERDES_PCS + QPHY_PCS_LINE_RESET_TIME, 0x0C);
+ regmap_write(regmap, QSERDES_PCS + QPHY_PCS_TX_LARGE_AMP_DRV_LVL, 0x1F);
+ regmap_write(regmap, QSERDES_PCS + QPHY_PCS_TX_SMALL_AMP_DRV_LVL, 0x03);
+ regmap_write(regmap, QSERDES_PCS + QPHY_PCS_TX_MID_TERM_CTRL1, 0x83);
+ regmap_write(regmap, QSERDES_PCS + QPHY_PCS_TX_MID_TERM_CTRL2, 0x08);
+ regmap_write(regmap, QSERDES_PCS + QPHY_PCS_SGMII_MISC_CTRL8, 0x0C);
+ regmap_write(regmap, QSERDES_PCS + QPHY_PCS_SW_RESET, 0x00);
+
+ regmap_write(regmap, QSERDES_PCS + QPHY_PCS_PHY_START, 0x01);
}
static void qcom_dwmac_sgmii_phy_init_2p5g(struct regmap *regmap)
{
- regmap_write(regmap, QSERDES_PCS_SW_RESET, 0x01);
- regmap_write(regmap, QSERDES_PCS_POWER_DOWN_CONTROL, 0x01);
-
- regmap_write(regmap, QSERDES_COM_PLL_IVCO, 0x0F);
- regmap_write(regmap, QSERDES_COM_CP_CTRL_MODE0, 0x06);
- regmap_write(regmap, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
- regmap_write(regmap, QSERDES_COM_PLL_CCTRL_MODE0, 0x36);
- regmap_write(regmap, QSERDES_COM_SYSCLK_EN_SEL, 0x1A);
- regmap_write(regmap, QSERDES_COM_LOCK_CMP1_MODE0, 0x1A);
- regmap_write(regmap, QSERDES_COM_LOCK_CMP2_MODE0, 0x41);
- regmap_write(regmap, QSERDES_COM_DEC_START_MODE0, 0x7A);
- regmap_write(regmap, QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00);
- regmap_write(regmap, QSERDES_COM_DIV_FRAC_START2_MODE0, 0x20);
- regmap_write(regmap, QSERDES_COM_DIV_FRAC_START3_MODE0, 0x01);
- regmap_write(regmap, QSERDES_COM_VCO_TUNE1_MODE0, 0xA1);
-
- regmap_write(regmap, QSERDES_COM_VCO_TUNE2_MODE0, 0x02);
- regmap_write(regmap, QSERDES_COM_VCO_TUNE_INITVAL2, 0x00);
- regmap_write(regmap, QSERDES_COM_HSCLK_SEL, 0x03);
- regmap_write(regmap, QSERDES_COM_HSCLK_HS_SWITCH_SEL, 0x00);
- regmap_write(regmap, QSERDES_COM_CORECLK_DIV_MODE0, 0x05);
- regmap_write(regmap, QSERDES_COM_CORE_CLK_EN, 0x00);
- regmap_write(regmap, QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xCD);
- regmap_write(regmap, QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1C);
- regmap_write(regmap, QSERDES_COM_BIN_VCOCAL_HSCLK_SEL, 0x11);
-
- regmap_write(regmap, QSERDES_TX_TX_BAND, 0x04);
- regmap_write(regmap, QSERDES_TX_SLEW_CNTL, 0x0A);
- regmap_write(regmap, QSERDES_TX_RES_CODE_LANE_OFFSET_TX, 0x09);
- regmap_write(regmap, QSERDES_TX_RES_CODE_LANE_OFFSET_RX, 0x02);
- regmap_write(regmap, QSERDES_TX_LANE_MODE_1, 0x05);
- regmap_write(regmap, QSERDES_TX_LANE_MODE_3, 0x00);
- regmap_write(regmap, QSERDES_TX_RCV_DETECT_LVL_2, 0x12);
- regmap_write(regmap, QSERDES_TX_TRAN_DRVR_EMP_EN, 0x0C);
-
- regmap_write(regmap, QSERDES_RX_UCDR_FO_GAIN, 0x0A);
- regmap_write(regmap, QSERDES_RX_UCDR_SO_GAIN, 0x06);
- regmap_write(regmap, QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0A);
- regmap_write(regmap, QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7F);
- regmap_write(regmap, QSERDES_RX_UCDR_FASTLOCK_COUNT_LOW, 0x00);
- regmap_write(regmap, QSERDES_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x01);
- regmap_write(regmap, QSERDES_RX_UCDR_PI_CONTROLS, 0x81);
- regmap_write(regmap, QSERDES_RX_UCDR_PI_CTRL2, 0x80);
- regmap_write(regmap, QSERDES_RX_RX_TERM_BW, 0x00);
- regmap_write(regmap, QSERDES_RX_VGA_CAL_CNTRL2, 0x08);
- regmap_write(regmap, QSERDES_RX_GM_CAL, 0x0F);
- regmap_write(regmap, QSERDES_RX_RX_EQU_ADAPTOR_CNTRL1, 0x04);
- regmap_write(regmap, QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x00);
- regmap_write(regmap, QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4A);
- regmap_write(regmap, QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0A);
- regmap_write(regmap, QSERDES_RX_RX_IDAC_TSETTLE_LOW, 0x80);
- regmap_write(regmap, QSERDES_RX_RX_IDAC_TSETTLE_HIGH, 0x01);
- regmap_write(regmap, QSERDES_RX_RX_IDAC_MEASURE_TIME, 0x20);
- regmap_write(regmap, QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x17);
- regmap_write(regmap, QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x00);
- regmap_write(regmap, QSERDES_RX_SIGDET_CNTRL, 0x0F);
- regmap_write(regmap, QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x1E);
- regmap_write(regmap, QSERDES_RX_RX_BAND, 0x18);
- regmap_write(regmap, QSERDES_RX_RX_MODE_00_LOW, 0x18);
- regmap_write(regmap, QSERDES_RX_RX_MODE_00_HIGH, 0xC8);
- regmap_write(regmap, QSERDES_RX_RX_MODE_00_HIGH2, 0xC8);
- regmap_write(regmap, QSERDES_RX_RX_MODE_00_HIGH3, 0x0C);
- regmap_write(regmap, QSERDES_RX_RX_MODE_00_HIGH4, 0xB8);
- regmap_write(regmap, QSERDES_RX_RX_MODE_01_LOW, 0xE0);
- regmap_write(regmap, QSERDES_RX_RX_MODE_01_HIGH, 0xC8);
- regmap_write(regmap, QSERDES_RX_RX_MODE_01_HIGH2, 0xC8);
- regmap_write(regmap, QSERDES_RX_RX_MODE_01_HIGH3, 0x09);
- regmap_write(regmap, QSERDES_RX_RX_MODE_01_HIGH4, 0xB1);
- regmap_write(regmap, QSERDES_RX_RX_MODE_10_LOW, 0xE0);
- regmap_write(regmap, QSERDES_RX_RX_MODE_10_HIGH, 0xC8);
- regmap_write(regmap, QSERDES_RX_RX_MODE_10_HIGH2, 0xC8);
- regmap_write(regmap, QSERDES_RX_RX_MODE_10_HIGH3, 0x3B);
- regmap_write(regmap, QSERDES_RX_RX_MODE_10_HIGH4, 0xB7);
- regmap_write(regmap, QSERDES_RX_DCC_CTRL1, 0x0C);
-
- regmap_write(regmap, QSERDES_PCS_LINE_RESET_TIME, 0x0C);
- regmap_write(regmap, QSERDES_PCS_TX_LARGE_AMP_DRV_LVL, 0x1F);
- regmap_write(regmap, QSERDES_PCS_TX_SMALL_AMP_DRV_LVL, 0x03);
- regmap_write(regmap, QSERDES_PCS_TX_MID_TERM_CTRL1, 0x83);
- regmap_write(regmap, QSERDES_PCS_TX_MID_TERM_CTRL2, 0x08);
- regmap_write(regmap, QSERDES_PCS_SGMII_MISC_CTRL8, 0x8C);
- regmap_write(regmap, QSERDES_PCS_SW_RESET, 0x00);
-
- regmap_write(regmap, QSERDES_PCS_PHY_START, 0x01);
+ regmap_write(regmap, QSERDES_PCS + QPHY_PCS_SW_RESET, 0x01);
+ regmap_write(regmap, QSERDES_PCS + QPHY_PCS_POWER_DOWN_CONTROL, 0x01);
+
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_PLL_IVCO, 0x0F);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_CP_CTRL_MODE0, 0x06);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x16);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x36);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_SYSCLK_EN_SEL, 0x1A);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_LOCK_CMP1_MODE0, 0x1A);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x41);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_DEC_START_MODE0, 0x7A);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_DIV_FRAC_START1_MODE0, 0x00);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_DIV_FRAC_START2_MODE0, 0x20);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_DIV_FRAC_START3_MODE0, 0x01);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_VCO_TUNE1_MODE0, 0xA1);
+
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_VCO_TUNE2_MODE0, 0x02);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_VCO_TUNE_INITVAL2, 0x00);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_HSCLK_SEL, 0x03);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL, 0x00);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_CORECLK_DIV_MODE0, 0x05);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_CORE_CLK_EN, 0x00);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xCD);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1C);
+ regmap_write(regmap, QSERDES_QMP_PLL + QSERDES_V5_COM_BIN_VCOCAL_HSCLK_SEL, 0x11);
+
+ regmap_write(regmap, QSERDES_TX + QSERDES_V5_TX_TX_BAND, 0x04);
+ regmap_write(regmap, QSERDES_TX + QSERDES_V5_TX_SLEW_CNTL, 0x0A);
+ regmap_write(regmap, QSERDES_TX + QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x09);
+ regmap_write(regmap, QSERDES_TX + QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x02);
+ regmap_write(regmap, QSERDES_TX + QSERDES_V5_TX_LANE_MODE_1, 0x05);
+ regmap_write(regmap, QSERDES_TX + QSERDES_V5_TX_LANE_MODE_3, 0x00);
+ regmap_write(regmap, QSERDES_TX + QSERDES_V5_TX_RCV_DETECT_LVL_2, 0x12);
+ regmap_write(regmap, QSERDES_TX + QSERDES_V5_TX_TRAN_DRVR_EMP_EN, 0x0C);
+
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_UCDR_FO_GAIN, 0x0A);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_UCDR_SO_GAIN, 0x06);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x0A);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7F);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0x00);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x01);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_UCDR_PI_CONTROLS, 0x81);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_UCDR_PI_CTRL2, 0x80);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_TERM_BW, 0x00);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x08);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_GM_CAL, 0x0F);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL1, 0x04);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x00);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4A);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0A);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_IDAC_TSETTLE_LOW, 0x80);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_IDAC_TSETTLE_HIGH, 0x01);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_IDAC_MEASURE_TIME, 0x20);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x17);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x00);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_SIGDET_CNTRL, 0x0F);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x1E);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_BAND, 0x18);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_00_LOW, 0x18);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_00_HIGH, 0xC8);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xC8);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x0C);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xB8);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_01_LOW, 0xE0);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_01_HIGH, 0xC8);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xC8);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x09);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xB1);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_10_LOW, 0xE0);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_10_HIGH, 0xC8);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_10_HIGH2, 0xC8);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_10_HIGH3, 0x3B);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_RX_MODE_10_HIGH4, 0xB7);
+ regmap_write(regmap, QSERDES_RX + QSERDES_V5_RX_DCC_CTRL1, 0x0C);
+
+ regmap_write(regmap, QSERDES_PCS + QPHY_PCS_LINE_RESET_TIME, 0x0C);
+ regmap_write(regmap, QSERDES_PCS + QPHY_PCS_TX_LARGE_AMP_DRV_LVL, 0x1F);
+ regmap_write(regmap, QSERDES_PCS + QPHY_PCS_TX_SMALL_AMP_DRV_LVL, 0x03);
+ regmap_write(regmap, QSERDES_PCS + QPHY_PCS_TX_MID_TERM_CTRL1, 0x83);
+ regmap_write(regmap, QSERDES_PCS + QPHY_PCS_TX_MID_TERM_CTRL2, 0x08);
+ regmap_write(regmap, QSERDES_PCS + QPHY_PCS_SGMII_MISC_CTRL8, 0x8C);
+ regmap_write(regmap, QSERDES_PCS + QPHY_PCS_SW_RESET, 0x00);
+
+ regmap_write(regmap, QSERDES_PCS + QPHY_PCS_PHY_START, 0x01);
}
static inline int
@@ -313,28 +234,28 @@ static int qcom_dwmac_sgmii_phy_calibrate(struct phy *phy)
}
if (qcom_dwmac_sgmii_phy_poll_status(data->regmap,
- QSERDES_COM_C_READY_STATUS,
+ QSERDES_QMP_PLL + QSERDES_V5_COM_C_READY_STATUS,
QSERDES_COM_C_READY)) {
dev_err(dev, "QSERDES_COM_C_READY_STATUS timed-out");
return -ETIMEDOUT;
}
if (qcom_dwmac_sgmii_phy_poll_status(data->regmap,
- QSERDES_PCS_PCS_READY_STATUS,
+ QSERDES_PCS + QPHY_PCS_PCS_READY_STATUS,
QSERDES_PCS_READY)) {
dev_err(dev, "PCS_READY timed-out");
return -ETIMEDOUT;
}
if (qcom_dwmac_sgmii_phy_poll_status(data->regmap,
- QSERDES_PCS_PCS_READY_STATUS,
+ QSERDES_PCS + QPHY_PCS_PCS_READY_STATUS,
QSERDES_PCS_SGMIIPHY_READY)) {
dev_err(dev, "SGMIIPHY_READY timed-out");
return -ETIMEDOUT;
}
if (qcom_dwmac_sgmii_phy_poll_status(data->regmap,
- QSERDES_COM_CMN_STATUS,
+ QSERDES_QMP_PLL + QSERDES_V5_COM_CMN_STATUS,
QSERDES_COM_C_PLL_LOCKED)) {
dev_err(dev, "PLL Lock Status timed-out");
return -ETIMEDOUT;
@@ -354,11 +275,11 @@ static int qcom_dwmac_sgmii_phy_power_off(struct phy *phy)
{
struct qcom_dwmac_sgmii_phy_data *data = phy_get_drvdata(phy);
- regmap_write(data->regmap, QSERDES_PCS_TX_MID_TERM_CTRL2, 0x08);
- regmap_write(data->regmap, QSERDES_PCS_SW_RESET, 0x01);
+ regmap_write(data->regmap, QSERDES_PCS + QPHY_PCS_TX_MID_TERM_CTRL2, 0x08);
+ regmap_write(data->regmap, QSERDES_PCS + QPHY_PCS_SW_RESET, 0x01);
udelay(100);
- regmap_write(data->regmap, QSERDES_PCS_SW_RESET, 0x00);
- regmap_write(data->regmap, QSERDES_PCS_PHY_START, 0x01);
+ regmap_write(data->regmap, QSERDES_PCS + QPHY_PCS_SW_RESET, 0x00);
+ regmap_write(data->regmap, QSERDES_PCS + QPHY_PCS_PHY_START, 0x01);
clk_disable_unprepare(data->refclk);
diff --git a/drivers/phy/ralink/phy-mt7621-pci.c b/drivers/phy/ralink/phy-mt7621-pci.c
index 2f876f158e1df..a591ad95347cc 100644
--- a/drivers/phy/ralink/phy-mt7621-pci.c
+++ b/drivers/phy/ralink/phy-mt7621-pci.c
@@ -263,7 +263,7 @@ static const struct phy_ops mt7621_pci_phy_ops = {
};
static struct phy *mt7621_pcie_phy_of_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct mt7621_pci_phy *mt7621_phy = dev_get_drvdata(dev);
diff --git a/drivers/phy/realtek/Kconfig b/drivers/phy/realtek/Kconfig
new file mode 100644
index 0000000000000..75ac7e7c31aec
--- /dev/null
+++ b/drivers/phy/realtek/Kconfig
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Phy drivers for Realtek platforms
+#
+
+if ARCH_REALTEK || COMPILE_TEST
+
+config PHY_RTK_RTD_USB2PHY
+ tristate "Realtek RTD USB2 PHY Transceiver Driver"
+ depends on USB_SUPPORT
+ select GENERIC_PHY
+ select USB_PHY
+ select USB_COMMON
+ help
+ Enable this to support Realtek SoC USB2 phy transceiver.
+ The DHC (digital home center) RTD series SoCs used the Synopsys
+ DWC3 USB IP. This driver will do the PHY initialization
+ of the parameters.
+
+config PHY_RTK_RTD_USB3PHY
+ tristate "Realtek RTD USB3 PHY Transceiver Driver"
+ depends on USB_SUPPORT
+ select GENERIC_PHY
+ select USB_PHY
+ select USB_COMMON
+ help
+ Enable this to support Realtek SoC USB3 phy transceiver.
+ The DHC (digital home center) RTD series SoCs used the Synopsys
+ DWC3 USB IP. This driver will do the PHY initialization
+ of the parameters.
+
+endif # ARCH_REALTEK || COMPILE_TEST
diff --git a/drivers/phy/realtek/Makefile b/drivers/phy/realtek/Makefile
new file mode 100644
index 0000000000000..ed7b47ff8a268
--- /dev/null
+++ b/drivers/phy/realtek/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PHY_RTK_RTD_USB2PHY) += phy-rtk-usb2.o
+obj-$(CONFIG_PHY_RTK_RTD_USB3PHY) += phy-rtk-usb3.o
diff --git a/drivers/phy/realtek/phy-rtk-usb2.c b/drivers/phy/realtek/phy-rtk-usb2.c
new file mode 100644
index 0000000000000..e3ad7cea51099
--- /dev/null
+++ b/drivers/phy/realtek/phy-rtk-usb2.c
@@ -0,0 +1,1312 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-rtk-usb2.c RTK usb2.0 PHY driver
+ *
+ * Copyright (C) 2023 Realtek Semiconductor Corporation
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/regmap.h>
+#include <linux/sys_soc.h>
+#include <linux/mfd/syscon.h>
+#include <linux/phy/phy.h>
+#include <linux/usb.h>
+
+/* GUSB2PHYACCn register */
+#define PHY_NEW_REG_REQ BIT(25)
+#define PHY_VSTS_BUSY BIT(23)
+#define PHY_VCTRL_SHIFT 8
+#define PHY_REG_DATA_MASK 0xff
+
+#define GET_LOW_NIBBLE(addr) ((addr) & 0x0f)
+#define GET_HIGH_NIBBLE(addr) (((addr) & 0xf0) >> 4)
+
+#define EFUS_USB_DC_CAL_RATE 2
+#define EFUS_USB_DC_CAL_MAX 7
+
+#define EFUS_USB_DC_DIS_RATE 1
+#define EFUS_USB_DC_DIS_MAX 7
+
+#define MAX_PHY_DATA_SIZE 20
+#define OFFEST_PHY_READ 0x20
+
+#define MAX_USB_PHY_NUM 4
+#define MAX_USB_PHY_PAGE0_DATA_SIZE 16
+#define MAX_USB_PHY_PAGE1_DATA_SIZE 16
+#define MAX_USB_PHY_PAGE2_DATA_SIZE 8
+
+#define SET_PAGE_OFFSET 0xf4
+#define SET_PAGE_0 0x9b
+#define SET_PAGE_1 0xbb
+#define SET_PAGE_2 0xdb
+
+#define PAGE_START 0xe0
+#define PAGE0_0XE4 0xe4
+#define PAGE0_0XE6 0xe6
+#define PAGE0_0XE7 0xe7
+#define PAGE1_0XE0 0xe0
+#define PAGE1_0XE2 0xe2
+
+#define SENSITIVITY_CTRL (BIT(4) | BIT(5) | BIT(6))
+#define ENABLE_AUTO_SENSITIVITY_CALIBRATION BIT(2)
+#define DEFAULT_DC_DRIVING_VALUE (0x8)
+#define DEFAULT_DC_DISCONNECTION_VALUE (0x6)
+#define HS_CLK_SELECT BIT(6)
+
+struct phy_reg {
+ void __iomem *reg_wrap_vstatus;
+ void __iomem *reg_gusb2phyacc0;
+ int vstatus_index;
+};
+
+struct phy_data {
+ u8 addr;
+ u8 data;
+};
+
+struct phy_cfg {
+ int page0_size;
+ struct phy_data page0[MAX_USB_PHY_PAGE0_DATA_SIZE];
+ int page1_size;
+ struct phy_data page1[MAX_USB_PHY_PAGE1_DATA_SIZE];
+ int page2_size;
+ struct phy_data page2[MAX_USB_PHY_PAGE2_DATA_SIZE];
+
+ int num_phy;
+
+ bool check_efuse;
+ int check_efuse_version;
+#define CHECK_EFUSE_V1 1
+#define CHECK_EFUSE_V2 2
+ int efuse_dc_driving_rate;
+ int efuse_dc_disconnect_rate;
+ int dc_driving_mask;
+ int dc_disconnect_mask;
+ bool usb_dc_disconnect_at_page0;
+ int driving_updated_for_dev_dis;
+
+ bool do_toggle;
+ bool do_toggle_driving;
+ bool use_default_parameter;
+ bool is_double_sensitivity_mode;
+};
+
+struct phy_parameter {
+ struct phy_reg phy_reg;
+
+ /* Get from efuse */
+ s8 efuse_usb_dc_cal;
+ s8 efuse_usb_dc_dis;
+
+ /* Get from dts */
+ bool inverse_hstx_sync_clock;
+ u32 driving_level;
+ s32 driving_level_compensate;
+ s32 disconnection_compensate;
+};
+
+struct rtk_phy {
+ struct device *dev;
+
+ struct phy_cfg *phy_cfg;
+ int num_phy;
+ struct phy_parameter *phy_parameter;
+
+ struct dentry *debug_dir;
+};
+
+/* mapping 0xE0 to 0 ... 0xE7 to 7, 0xF0 to 8 ,,, 0xF7 to 15 */
+static inline int page_addr_to_array_index(u8 addr)
+{
+ return (int)((((addr) - PAGE_START) & 0x7) +
+ ((((addr) - PAGE_START) & 0x10) >> 1));
+}
+
+static inline u8 array_index_to_page_addr(int index)
+{
+ return ((((index) + PAGE_START) & 0x7) +
+ ((((index) & 0x8) << 1) + PAGE_START));
+}
+
+#define PHY_IO_TIMEOUT_USEC (50000)
+#define PHY_IO_DELAY_US (100)
+
+static inline int utmi_wait_register(void __iomem *reg, u32 mask, u32 result)
+{
+ int ret;
+ unsigned int val;
+
+ ret = read_poll_timeout(readl, val, ((val & mask) == result),
+ PHY_IO_DELAY_US, PHY_IO_TIMEOUT_USEC, false, reg);
+ if (ret) {
+ pr_err("%s can't program USB phy\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static char rtk_phy_read(struct phy_reg *phy_reg, char addr)
+{
+ void __iomem *reg_gusb2phyacc0 = phy_reg->reg_gusb2phyacc0;
+ unsigned int val;
+ int ret = 0;
+
+ addr -= OFFEST_PHY_READ;
+
+ /* polling until VBusy == 0 */
+ ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+ if (ret)
+ return (char)ret;
+
+ /* VCtrl = low nibble of addr, and set PHY_NEW_REG_REQ */
+ val = PHY_NEW_REG_REQ | (GET_LOW_NIBBLE(addr) << PHY_VCTRL_SHIFT);
+ writel(val, reg_gusb2phyacc0);
+ ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+ if (ret)
+ return (char)ret;
+
+ /* VCtrl = high nibble of addr, and set PHY_NEW_REG_REQ */
+ val = PHY_NEW_REG_REQ | (GET_HIGH_NIBBLE(addr) << PHY_VCTRL_SHIFT);
+ writel(val, reg_gusb2phyacc0);
+ ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+ if (ret)
+ return (char)ret;
+
+ val = readl(reg_gusb2phyacc0);
+
+ return (char)(val & PHY_REG_DATA_MASK);
+}
+
+static int rtk_phy_write(struct phy_reg *phy_reg, char addr, char data)
+{
+ unsigned int val;
+ void __iomem *reg_wrap_vstatus = phy_reg->reg_wrap_vstatus;
+ void __iomem *reg_gusb2phyacc0 = phy_reg->reg_gusb2phyacc0;
+ int shift_bits = phy_reg->vstatus_index * 8;
+ int ret = 0;
+
+ /* write data to VStatusOut2 (data output to phy) */
+ writel((u32)data << shift_bits, reg_wrap_vstatus);
+
+ ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+ if (ret)
+ return ret;
+
+ /* VCtrl = low nibble of addr, set PHY_NEW_REG_REQ */
+ val = PHY_NEW_REG_REQ | (GET_LOW_NIBBLE(addr) << PHY_VCTRL_SHIFT);
+
+ writel(val, reg_gusb2phyacc0);
+ ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+ if (ret)
+ return ret;
+
+ /* VCtrl = high nibble of addr, set PHY_NEW_REG_REQ */
+ val = PHY_NEW_REG_REQ | (GET_HIGH_NIBBLE(addr) << PHY_VCTRL_SHIFT);
+
+ writel(val, reg_gusb2phyacc0);
+ ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtk_phy_set_page(struct phy_reg *phy_reg, int page)
+{
+ switch (page) {
+ case 0:
+ return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_0);
+ case 1:
+ return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_1);
+ case 2:
+ return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_2);
+ default:
+ pr_err("%s error page=%d\n", __func__, page);
+ }
+
+ return -EINVAL;
+}
+
+static u8 __updated_dc_disconnect_level_page0_0xe4(struct phy_cfg *phy_cfg,
+ struct phy_parameter *phy_parameter, u8 data)
+{
+ u8 ret;
+ s32 val;
+ s32 dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
+ int offset = 4;
+
+ val = (s32)((data >> offset) & dc_disconnect_mask)
+ + phy_parameter->efuse_usb_dc_dis
+ + phy_parameter->disconnection_compensate;
+
+ if (val > dc_disconnect_mask)
+ val = dc_disconnect_mask;
+ else if (val < 0)
+ val = 0;
+
+ ret = (data & (~(dc_disconnect_mask << offset))) |
+ (val & dc_disconnect_mask) << offset;
+
+ return ret;
+}
+
+/* updated disconnect level at page0 */
+static void update_dc_disconnect_level_at_page0(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter, bool update)
+{
+ struct phy_cfg *phy_cfg;
+ struct phy_reg *phy_reg;
+ struct phy_data *phy_data_page;
+ struct phy_data *phy_data;
+ u8 addr, data;
+ int offset = 4;
+ s32 dc_disconnect_mask;
+ int i;
+
+ phy_cfg = rtk_phy->phy_cfg;
+ phy_reg = &phy_parameter->phy_reg;
+
+ /* Set page 0 */
+ phy_data_page = phy_cfg->page0;
+ rtk_phy_set_page(phy_reg, 0);
+
+ i = page_addr_to_array_index(PAGE0_0XE4);
+ phy_data = phy_data_page + i;
+ if (!phy_data->addr) {
+ phy_data->addr = PAGE0_0XE4;
+ phy_data->data = rtk_phy_read(phy_reg, PAGE0_0XE4);
+ }
+
+ addr = phy_data->addr;
+ data = phy_data->data;
+ dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
+
+ if (update)
+ data = __updated_dc_disconnect_level_page0_0xe4(phy_cfg, phy_parameter, data);
+ else
+ data = (data & ~(dc_disconnect_mask << offset)) |
+ (DEFAULT_DC_DISCONNECTION_VALUE << offset);
+
+ if (rtk_phy_write(phy_reg, addr, data))
+ dev_err(rtk_phy->dev,
+ "%s: Error to set page1 parameter addr=0x%x value=0x%x\n",
+ __func__, addr, data);
+}
+
+static u8 __updated_dc_disconnect_level_page1_0xe2(struct phy_cfg *phy_cfg,
+ struct phy_parameter *phy_parameter, u8 data)
+{
+ u8 ret;
+ s32 val;
+ s32 dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
+
+ if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
+ val = (s32)(data & dc_disconnect_mask)
+ + phy_parameter->efuse_usb_dc_dis
+ + phy_parameter->disconnection_compensate;
+ } else { /* for CHECK_EFUSE_V2 or no efuse */
+ if (phy_parameter->efuse_usb_dc_dis)
+ val = (s32)(phy_parameter->efuse_usb_dc_dis +
+ phy_parameter->disconnection_compensate);
+ else
+ val = (s32)((data & dc_disconnect_mask) +
+ phy_parameter->disconnection_compensate);
+ }
+
+ if (val > dc_disconnect_mask)
+ val = dc_disconnect_mask;
+ else if (val < 0)
+ val = 0;
+
+ ret = (data & (~dc_disconnect_mask)) | (val & dc_disconnect_mask);
+
+ return ret;
+}
+
+/* updated disconnect level at page1 */
+static void update_dc_disconnect_level_at_page1(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter, bool update)
+{
+ struct phy_cfg *phy_cfg;
+ struct phy_data *phy_data_page;
+ struct phy_data *phy_data;
+ struct phy_reg *phy_reg;
+ u8 addr, data;
+ s32 dc_disconnect_mask;
+ int i;
+
+ phy_cfg = rtk_phy->phy_cfg;
+ phy_reg = &phy_parameter->phy_reg;
+
+ /* Set page 1 */
+ phy_data_page = phy_cfg->page1;
+ rtk_phy_set_page(phy_reg, 1);
+
+ i = page_addr_to_array_index(PAGE1_0XE2);
+ phy_data = phy_data_page + i;
+ if (!phy_data->addr) {
+ phy_data->addr = PAGE1_0XE2;
+ phy_data->data = rtk_phy_read(phy_reg, PAGE1_0XE2);
+ }
+
+ addr = phy_data->addr;
+ data = phy_data->data;
+ dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
+
+ if (update)
+ data = __updated_dc_disconnect_level_page1_0xe2(phy_cfg, phy_parameter, data);
+ else
+ data = (data & ~dc_disconnect_mask) | DEFAULT_DC_DISCONNECTION_VALUE;
+
+ if (rtk_phy_write(phy_reg, addr, data))
+ dev_err(rtk_phy->dev,
+ "%s: Error to set page1 parameter addr=0x%x value=0x%x\n",
+ __func__, addr, data);
+}
+
+static void update_dc_disconnect_level(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter, bool update)
+{
+ struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
+
+ if (phy_cfg->usb_dc_disconnect_at_page0)
+ update_dc_disconnect_level_at_page0(rtk_phy, phy_parameter, update);
+ else
+ update_dc_disconnect_level_at_page1(rtk_phy, phy_parameter, update);
+}
+
+static u8 __update_dc_driving_page0_0xe4(struct phy_cfg *phy_cfg,
+ struct phy_parameter *phy_parameter, u8 data)
+{
+ s32 driving_level_compensate = phy_parameter->driving_level_compensate;
+ s32 dc_driving_mask = phy_cfg->dc_driving_mask;
+ s32 val;
+ u8 ret;
+
+ if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
+ val = (s32)(data & dc_driving_mask) + driving_level_compensate
+ + phy_parameter->efuse_usb_dc_cal;
+ } else { /* for CHECK_EFUSE_V2 or no efuse */
+ if (phy_parameter->efuse_usb_dc_cal)
+ val = (s32)((phy_parameter->efuse_usb_dc_cal & dc_driving_mask)
+ + driving_level_compensate);
+ else
+ val = (s32)(data & dc_driving_mask);
+ }
+
+ if (val > dc_driving_mask)
+ val = dc_driving_mask;
+ else if (val < 0)
+ val = 0;
+
+ ret = (data & (~dc_driving_mask)) | (val & dc_driving_mask);
+
+ return ret;
+}
+
+static void update_dc_driving_level(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter)
+{
+ struct phy_cfg *phy_cfg;
+ struct phy_reg *phy_reg;
+
+ phy_reg = &phy_parameter->phy_reg;
+ phy_cfg = rtk_phy->phy_cfg;
+ if (!phy_cfg->page0[4].addr) {
+ rtk_phy_set_page(phy_reg, 0);
+ phy_cfg->page0[4].addr = PAGE0_0XE4;
+ phy_cfg->page0[4].data = rtk_phy_read(phy_reg, PAGE0_0XE4);
+ }
+
+ if (phy_parameter->driving_level != DEFAULT_DC_DRIVING_VALUE) {
+ u32 dc_driving_mask;
+ u8 driving_level;
+ u8 data;
+
+ data = phy_cfg->page0[4].data;
+ dc_driving_mask = phy_cfg->dc_driving_mask;
+ driving_level = data & dc_driving_mask;
+
+ dev_dbg(rtk_phy->dev, "%s driving_level=%d => dts driving_level=%d\n",
+ __func__, driving_level, phy_parameter->driving_level);
+
+ phy_cfg->page0[4].data = (data & (~dc_driving_mask)) |
+ (phy_parameter->driving_level & dc_driving_mask);
+ }
+
+ phy_cfg->page0[4].data = __update_dc_driving_page0_0xe4(phy_cfg,
+ phy_parameter,
+ phy_cfg->page0[4].data);
+}
+
+static void update_hs_clk_select(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter)
+{
+ struct phy_cfg *phy_cfg;
+ struct phy_reg *phy_reg;
+
+ phy_cfg = rtk_phy->phy_cfg;
+ phy_reg = &phy_parameter->phy_reg;
+
+ if (phy_parameter->inverse_hstx_sync_clock) {
+ if (!phy_cfg->page0[6].addr) {
+ rtk_phy_set_page(phy_reg, 0);
+ phy_cfg->page0[6].addr = PAGE0_0XE6;
+ phy_cfg->page0[6].data = rtk_phy_read(phy_reg, PAGE0_0XE6);
+ }
+
+ phy_cfg->page0[6].data = phy_cfg->page0[6].data | HS_CLK_SELECT;
+ }
+}
+
+static void do_rtk_phy_toggle(struct rtk_phy *rtk_phy,
+ int index, bool connect)
+{
+ struct phy_parameter *phy_parameter;
+ struct phy_cfg *phy_cfg;
+ struct phy_reg *phy_reg;
+ struct phy_data *phy_data_page;
+ u8 addr, data;
+ int i;
+
+ phy_cfg = rtk_phy->phy_cfg;
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+ phy_reg = &phy_parameter->phy_reg;
+
+ if (!phy_cfg->do_toggle)
+ goto out;
+
+ if (phy_cfg->is_double_sensitivity_mode)
+ goto do_toggle_driving;
+
+ /* Set page 0 */
+ rtk_phy_set_page(phy_reg, 0);
+
+ addr = PAGE0_0XE7;
+ data = rtk_phy_read(phy_reg, addr);
+
+ if (connect)
+ rtk_phy_write(phy_reg, addr, data & (~SENSITIVITY_CTRL));
+ else
+ rtk_phy_write(phy_reg, addr, data | (SENSITIVITY_CTRL));
+
+do_toggle_driving:
+
+ if (!phy_cfg->do_toggle_driving)
+ goto do_toggle;
+
+ /* Page 0 addr 0xE4 driving capability */
+
+ /* Set page 0 */
+ phy_data_page = phy_cfg->page0;
+ rtk_phy_set_page(phy_reg, 0);
+
+ i = page_addr_to_array_index(PAGE0_0XE4);
+ addr = phy_data_page[i].addr;
+ data = phy_data_page[i].data;
+
+ if (connect) {
+ rtk_phy_write(phy_reg, addr, data);
+ } else {
+ u8 value;
+ s32 tmp;
+ s32 driving_updated =
+ phy_cfg->driving_updated_for_dev_dis;
+ s32 dc_driving_mask = phy_cfg->dc_driving_mask;
+
+ tmp = (s32)(data & dc_driving_mask) + driving_updated;
+
+ if (tmp > dc_driving_mask)
+ tmp = dc_driving_mask;
+ else if (tmp < 0)
+ tmp = 0;
+
+ value = (data & (~dc_driving_mask)) | (tmp & dc_driving_mask);
+
+ rtk_phy_write(phy_reg, addr, value);
+ }
+
+do_toggle:
+ /* restore dc disconnect level before toggle */
+ update_dc_disconnect_level(rtk_phy, phy_parameter, false);
+
+ /* Set page 1 */
+ rtk_phy_set_page(phy_reg, 1);
+
+ addr = PAGE1_0XE0;
+ data = rtk_phy_read(phy_reg, addr);
+
+ rtk_phy_write(phy_reg, addr, data &
+ (~ENABLE_AUTO_SENSITIVITY_CALIBRATION));
+ mdelay(1);
+ rtk_phy_write(phy_reg, addr, data |
+ (ENABLE_AUTO_SENSITIVITY_CALIBRATION));
+
+ /* update dc disconnect level after toggle */
+ update_dc_disconnect_level(rtk_phy, phy_parameter, true);
+
+out:
+ return;
+}
+
+static int do_rtk_phy_init(struct rtk_phy *rtk_phy, int index)
+{
+ struct phy_parameter *phy_parameter;
+ struct phy_cfg *phy_cfg;
+ struct phy_data *phy_data_page;
+ struct phy_reg *phy_reg;
+ int i;
+
+ phy_cfg = rtk_phy->phy_cfg;
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+ phy_reg = &phy_parameter->phy_reg;
+
+ if (phy_cfg->use_default_parameter) {
+ dev_dbg(rtk_phy->dev, "%s phy#%d use default parameter\n",
+ __func__, index);
+ goto do_toggle;
+ }
+
+ /* Set page 0 */
+ phy_data_page = phy_cfg->page0;
+ rtk_phy_set_page(phy_reg, 0);
+
+ for (i = 0; i < phy_cfg->page0_size; i++) {
+ struct phy_data *phy_data = phy_data_page + i;
+ u8 addr = phy_data->addr;
+ u8 data = phy_data->data;
+
+ if (!addr)
+ continue;
+
+ if (rtk_phy_write(phy_reg, addr, data)) {
+ dev_err(rtk_phy->dev,
+ "%s: Error to set page0 parameter addr=0x%x value=0x%x\n",
+ __func__, addr, data);
+ return -EINVAL;
+ }
+ }
+
+ /* Set page 1 */
+ phy_data_page = phy_cfg->page1;
+ rtk_phy_set_page(phy_reg, 1);
+
+ for (i = 0; i < phy_cfg->page1_size; i++) {
+ struct phy_data *phy_data = phy_data_page + i;
+ u8 addr = phy_data->addr;
+ u8 data = phy_data->data;
+
+ if (!addr)
+ continue;
+
+ if (rtk_phy_write(phy_reg, addr, data)) {
+ dev_err(rtk_phy->dev,
+ "%s: Error to set page1 parameter addr=0x%x value=0x%x\n",
+ __func__, addr, data);
+ return -EINVAL;
+ }
+ }
+
+ if (phy_cfg->page2_size == 0)
+ goto do_toggle;
+
+ /* Set page 2 */
+ phy_data_page = phy_cfg->page2;
+ rtk_phy_set_page(phy_reg, 2);
+
+ for (i = 0; i < phy_cfg->page2_size; i++) {
+ struct phy_data *phy_data = phy_data_page + i;
+ u8 addr = phy_data->addr;
+ u8 data = phy_data->data;
+
+ if (!addr)
+ continue;
+
+ if (rtk_phy_write(phy_reg, addr, data)) {
+ dev_err(rtk_phy->dev,
+ "%s: Error to set page2 parameter addr=0x%x value=0x%x\n",
+ __func__, addr, data);
+ return -EINVAL;
+ }
+ }
+
+do_toggle:
+ do_rtk_phy_toggle(rtk_phy, index, false);
+
+ return 0;
+}
+
+static int rtk_phy_init(struct phy *phy)
+{
+ struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
+ unsigned long phy_init_time = jiffies;
+ int i, ret = 0;
+
+ if (!rtk_phy)
+ return -EINVAL;
+
+ for (i = 0; i < rtk_phy->num_phy; i++)
+ ret = do_rtk_phy_init(rtk_phy, i);
+
+ dev_dbg(rtk_phy->dev, "Initialized RTK USB 2.0 PHY (take %dms)\n",
+ jiffies_to_msecs(jiffies - phy_init_time));
+ return ret;
+}
+
+static int rtk_phy_exit(struct phy *phy)
+{
+ return 0;
+}
+
+static void rtk_phy_toggle(struct rtk_phy *rtk_phy, bool connect, int port)
+{
+ int index = port;
+
+ if (index > rtk_phy->num_phy) {
+ dev_err(rtk_phy->dev, "%s: The port=%d is not in usb phy (num_phy=%d)\n",
+ __func__, index, rtk_phy->num_phy);
+ return;
+ }
+
+ do_rtk_phy_toggle(rtk_phy, index, connect);
+}
+
+static int rtk_phy_connect(struct phy *phy, int port)
+{
+ struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
+
+ dev_dbg(rtk_phy->dev, "%s port=%d\n", __func__, port);
+ rtk_phy_toggle(rtk_phy, true, port);
+
+ return 0;
+}
+
+static int rtk_phy_disconnect(struct phy *phy, int port)
+{
+ struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
+
+ dev_dbg(rtk_phy->dev, "%s port=%d\n", __func__, port);
+ rtk_phy_toggle(rtk_phy, false, port);
+
+ return 0;
+}
+
+static const struct phy_ops ops = {
+ .init = rtk_phy_init,
+ .exit = rtk_phy_exit,
+ .connect = rtk_phy_connect,
+ .disconnect = rtk_phy_disconnect,
+ .owner = THIS_MODULE,
+};
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *create_phy_debug_root(void)
+{
+ struct dentry *phy_debug_root;
+
+ phy_debug_root = debugfs_lookup("phy", usb_debug_root);
+ if (!phy_debug_root)
+ phy_debug_root = debugfs_create_dir("phy", usb_debug_root);
+
+ return phy_debug_root;
+}
+
+static int rtk_usb2_parameter_show(struct seq_file *s, void *unused)
+{
+ struct rtk_phy *rtk_phy = s->private;
+ struct phy_cfg *phy_cfg;
+ int i, index;
+
+ phy_cfg = rtk_phy->phy_cfg;
+
+ seq_puts(s, "Property:\n");
+ seq_printf(s, " check_efuse: %s\n",
+ phy_cfg->check_efuse ? "Enable" : "Disable");
+ seq_printf(s, " check_efuse_version: %d\n",
+ phy_cfg->check_efuse_version);
+ seq_printf(s, " efuse_dc_driving_rate: %d\n",
+ phy_cfg->efuse_dc_driving_rate);
+ seq_printf(s, " dc_driving_mask: 0x%x\n",
+ phy_cfg->dc_driving_mask);
+ seq_printf(s, " efuse_dc_disconnect_rate: %d\n",
+ phy_cfg->efuse_dc_disconnect_rate);
+ seq_printf(s, " dc_disconnect_mask: 0x%x\n",
+ phy_cfg->dc_disconnect_mask);
+ seq_printf(s, " usb_dc_disconnect_at_page0: %s\n",
+ phy_cfg->usb_dc_disconnect_at_page0 ? "true" : "false");
+ seq_printf(s, " do_toggle: %s\n",
+ phy_cfg->do_toggle ? "Enable" : "Disable");
+ seq_printf(s, " do_toggle_driving: %s\n",
+ phy_cfg->do_toggle_driving ? "Enable" : "Disable");
+ seq_printf(s, " driving_updated_for_dev_dis: 0x%x\n",
+ phy_cfg->driving_updated_for_dev_dis);
+ seq_printf(s, " use_default_parameter: %s\n",
+ phy_cfg->use_default_parameter ? "Enable" : "Disable");
+ seq_printf(s, " is_double_sensitivity_mode: %s\n",
+ phy_cfg->is_double_sensitivity_mode ? "Enable" : "Disable");
+
+ for (index = 0; index < rtk_phy->num_phy; index++) {
+ struct phy_parameter *phy_parameter;
+ struct phy_reg *phy_reg;
+ struct phy_data *phy_data_page;
+
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+ phy_reg = &phy_parameter->phy_reg;
+
+ seq_printf(s, "PHY %d:\n", index);
+
+ seq_puts(s, "Page 0:\n");
+ /* Set page 0 */
+ phy_data_page = phy_cfg->page0;
+ rtk_phy_set_page(phy_reg, 0);
+
+ for (i = 0; i < phy_cfg->page0_size; i++) {
+ struct phy_data *phy_data = phy_data_page + i;
+ u8 addr = array_index_to_page_addr(i);
+ u8 data = phy_data->data;
+ u8 value = rtk_phy_read(phy_reg, addr);
+
+ if (phy_data->addr)
+ seq_printf(s, " Page 0: addr=0x%x data=0x%02x ==> read value=0x%02x\n",
+ addr, data, value);
+ else
+ seq_printf(s, " Page 0: addr=0x%x data=none ==> read value=0x%02x\n",
+ addr, value);
+ }
+
+ seq_puts(s, "Page 1:\n");
+ /* Set page 1 */
+ phy_data_page = phy_cfg->page1;
+ rtk_phy_set_page(phy_reg, 1);
+
+ for (i = 0; i < phy_cfg->page1_size; i++) {
+ struct phy_data *phy_data = phy_data_page + i;
+ u8 addr = array_index_to_page_addr(i);
+ u8 data = phy_data->data;
+ u8 value = rtk_phy_read(phy_reg, addr);
+
+ if (phy_data->addr)
+ seq_printf(s, " Page 1: addr=0x%x data=0x%02x ==> read value=0x%02x\n",
+ addr, data, value);
+ else
+ seq_printf(s, " Page 1: addr=0x%x data=none ==> read value=0x%02x\n",
+ addr, value);
+ }
+
+ if (phy_cfg->page2_size == 0)
+ goto out;
+
+ seq_puts(s, "Page 2:\n");
+ /* Set page 2 */
+ phy_data_page = phy_cfg->page2;
+ rtk_phy_set_page(phy_reg, 2);
+
+ for (i = 0; i < phy_cfg->page2_size; i++) {
+ struct phy_data *phy_data = phy_data_page + i;
+ u8 addr = array_index_to_page_addr(i);
+ u8 data = phy_data->data;
+ u8 value = rtk_phy_read(phy_reg, addr);
+
+ if (phy_data->addr)
+ seq_printf(s, " Page 2: addr=0x%x data=0x%02x ==> read value=0x%02x\n",
+ addr, data, value);
+ else
+ seq_printf(s, " Page 2: addr=0x%x data=none ==> read value=0x%02x\n",
+ addr, value);
+ }
+
+out:
+ seq_puts(s, "PHY Property:\n");
+ seq_printf(s, " efuse_usb_dc_cal: %d\n",
+ (int)phy_parameter->efuse_usb_dc_cal);
+ seq_printf(s, " efuse_usb_dc_dis: %d\n",
+ (int)phy_parameter->efuse_usb_dc_dis);
+ seq_printf(s, " inverse_hstx_sync_clock: %s\n",
+ phy_parameter->inverse_hstx_sync_clock ? "Enable" : "Disable");
+ seq_printf(s, " driving_level: %d\n",
+ phy_parameter->driving_level);
+ seq_printf(s, " driving_level_compensate: %d\n",
+ phy_parameter->driving_level_compensate);
+ seq_printf(s, " disconnection_compensate: %d\n",
+ phy_parameter->disconnection_compensate);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(rtk_usb2_parameter);
+
+static inline void create_debug_files(struct rtk_phy *rtk_phy)
+{
+ struct dentry *phy_debug_root = NULL;
+
+ phy_debug_root = create_phy_debug_root();
+ if (!phy_debug_root)
+ return;
+
+ rtk_phy->debug_dir = debugfs_create_dir(dev_name(rtk_phy->dev),
+ phy_debug_root);
+
+ debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
+ &rtk_usb2_parameter_fops);
+}
+
+static inline void remove_debug_files(struct rtk_phy *rtk_phy)
+{
+ debugfs_remove_recursive(rtk_phy->debug_dir);
+}
+#else
+static inline void create_debug_files(struct rtk_phy *rtk_phy) { }
+static inline void remove_debug_files(struct rtk_phy *rtk_phy) { }
+#endif /* CONFIG_DEBUG_FS */
+
+static int get_phy_data_by_efuse(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter, int index)
+{
+ struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
+ u8 value = 0;
+ struct nvmem_cell *cell;
+ struct soc_device_attribute rtk_soc_groot[] = {
+ { .family = "Realtek Groot",},
+ { /* empty */ } };
+
+ if (!phy_cfg->check_efuse)
+ goto out;
+
+ /* Read efuse for usb dc cal */
+ cell = nvmem_cell_get(rtk_phy->dev, "usb-dc-cal");
+ if (IS_ERR(cell)) {
+ dev_dbg(rtk_phy->dev, "%s no usb-dc-cal: %ld\n",
+ __func__, PTR_ERR(cell));
+ } else {
+ unsigned char *buf;
+ size_t buf_size;
+
+ buf = nvmem_cell_read(cell, &buf_size);
+ if (!IS_ERR(buf)) {
+ value = buf[0] & phy_cfg->dc_driving_mask;
+ kfree(buf);
+ }
+ nvmem_cell_put(cell);
+ }
+
+ if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
+ int rate = phy_cfg->efuse_dc_driving_rate;
+
+ if (value <= EFUS_USB_DC_CAL_MAX)
+ phy_parameter->efuse_usb_dc_cal = (int8_t)(value * rate);
+ else
+ phy_parameter->efuse_usb_dc_cal = -(int8_t)
+ ((EFUS_USB_DC_CAL_MAX & value) * rate);
+
+ if (soc_device_match(rtk_soc_groot)) {
+ dev_dbg(rtk_phy->dev, "For groot IC we need a workaround to adjust efuse_usb_dc_cal\n");
+
+ /* We don't multiple dc_cal_rate=2 for positive dc cal compensate */
+ if (value <= EFUS_USB_DC_CAL_MAX)
+ phy_parameter->efuse_usb_dc_cal = (int8_t)(value);
+
+ /* We set max dc cal compensate is 0x8 if otp is 0x7 */
+ if (value == 0x7)
+ phy_parameter->efuse_usb_dc_cal = (int8_t)(value + 1);
+ }
+ } else { /* for CHECK_EFUSE_V2 */
+ phy_parameter->efuse_usb_dc_cal = value & phy_cfg->dc_driving_mask;
+ }
+
+ /* Read efuse for usb dc disconnect level */
+ value = 0;
+ cell = nvmem_cell_get(rtk_phy->dev, "usb-dc-dis");
+ if (IS_ERR(cell)) {
+ dev_dbg(rtk_phy->dev, "%s no usb-dc-dis: %ld\n",
+ __func__, PTR_ERR(cell));
+ } else {
+ unsigned char *buf;
+ size_t buf_size;
+
+ buf = nvmem_cell_read(cell, &buf_size);
+ if (!IS_ERR(buf)) {
+ value = buf[0] & phy_cfg->dc_disconnect_mask;
+ kfree(buf);
+ }
+ nvmem_cell_put(cell);
+ }
+
+ if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
+ int rate = phy_cfg->efuse_dc_disconnect_rate;
+
+ if (value <= EFUS_USB_DC_DIS_MAX)
+ phy_parameter->efuse_usb_dc_dis = (int8_t)(value * rate);
+ else
+ phy_parameter->efuse_usb_dc_dis = -(int8_t)
+ ((EFUS_USB_DC_DIS_MAX & value) * rate);
+ } else { /* for CHECK_EFUSE_V2 */
+ phy_parameter->efuse_usb_dc_dis = value & phy_cfg->dc_disconnect_mask;
+ }
+
+out:
+ return 0;
+}
+
+static int parse_phy_data(struct rtk_phy *rtk_phy)
+{
+ struct device *dev = rtk_phy->dev;
+ struct device_node *np = dev->of_node;
+ struct phy_parameter *phy_parameter;
+ int ret = 0;
+ int index;
+
+ rtk_phy->phy_parameter = devm_kzalloc(dev, sizeof(struct phy_parameter) *
+ rtk_phy->num_phy, GFP_KERNEL);
+ if (!rtk_phy->phy_parameter)
+ return -ENOMEM;
+
+ for (index = 0; index < rtk_phy->num_phy; index++) {
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+
+ phy_parameter->phy_reg.reg_wrap_vstatus = of_iomap(np, 0);
+ phy_parameter->phy_reg.reg_gusb2phyacc0 = of_iomap(np, 1) + index;
+ phy_parameter->phy_reg.vstatus_index = index;
+
+ if (of_property_read_bool(np, "realtek,inverse-hstx-sync-clock"))
+ phy_parameter->inverse_hstx_sync_clock = true;
+ else
+ phy_parameter->inverse_hstx_sync_clock = false;
+
+ if (of_property_read_u32_index(np, "realtek,driving-level",
+ index, &phy_parameter->driving_level))
+ phy_parameter->driving_level = DEFAULT_DC_DRIVING_VALUE;
+
+ if (of_property_read_u32_index(np, "realtek,driving-level-compensate",
+ index, &phy_parameter->driving_level_compensate))
+ phy_parameter->driving_level_compensate = 0;
+
+ if (of_property_read_u32_index(np, "realtek,disconnection-compensate",
+ index, &phy_parameter->disconnection_compensate))
+ phy_parameter->disconnection_compensate = 0;
+
+ get_phy_data_by_efuse(rtk_phy, phy_parameter, index);
+
+ update_dc_driving_level(rtk_phy, phy_parameter);
+
+ update_hs_clk_select(rtk_phy, phy_parameter);
+ }
+
+ return ret;
+}
+
+static int rtk_usb2phy_probe(struct platform_device *pdev)
+{
+ struct rtk_phy *rtk_phy;
+ struct device *dev = &pdev->dev;
+ struct phy *generic_phy;
+ struct phy_provider *phy_provider;
+ const struct phy_cfg *phy_cfg;
+ int ret = 0;
+
+ phy_cfg = of_device_get_match_data(dev);
+ if (!phy_cfg) {
+ dev_err(dev, "phy config are not assigned!\n");
+ return -EINVAL;
+ }
+
+ rtk_phy = devm_kzalloc(dev, sizeof(*rtk_phy), GFP_KERNEL);
+ if (!rtk_phy)
+ return -ENOMEM;
+
+ rtk_phy->dev = &pdev->dev;
+ rtk_phy->phy_cfg = devm_kzalloc(dev, sizeof(*phy_cfg), GFP_KERNEL);
+
+ memcpy(rtk_phy->phy_cfg, phy_cfg, sizeof(*phy_cfg));
+
+ rtk_phy->num_phy = phy_cfg->num_phy;
+
+ ret = parse_phy_data(rtk_phy);
+ if (ret)
+ goto err;
+
+ platform_set_drvdata(pdev, rtk_phy);
+
+ generic_phy = devm_phy_create(rtk_phy->dev, NULL, &ops);
+ if (IS_ERR(generic_phy))
+ return PTR_ERR(generic_phy);
+
+ phy_set_drvdata(generic_phy, rtk_phy);
+
+ phy_provider = devm_of_phy_provider_register(rtk_phy->dev,
+ of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ create_debug_files(rtk_phy);
+
+err:
+ return ret;
+}
+
+static void rtk_usb2phy_remove(struct platform_device *pdev)
+{
+ struct rtk_phy *rtk_phy = platform_get_drvdata(pdev);
+
+ remove_debug_files(rtk_phy);
+}
+
+static const struct phy_cfg rtd1295_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [0] = {0xe0, 0x90},
+ [3] = {0xe3, 0x3a},
+ [4] = {0xe4, 0x68},
+ [6] = {0xe6, 0x91},
+ [13] = {0xf5, 0x81},
+ [15] = {0xf7, 0x02}, },
+ .page1_size = 8,
+ .page1 = { /* default parameter */ },
+ .page2_size = 0,
+ .page2 = { /* no parameter */ },
+ .num_phy = 1,
+ .check_efuse = false,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = 1,
+ .dc_driving_mask = 0xf,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = true,
+ .do_toggle = true,
+ .do_toggle_driving = false,
+ .driving_updated_for_dev_dis = 0xf,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = false,
+};
+
+static const struct phy_cfg rtd1395_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [4] = {0xe4, 0xac},
+ [13] = {0xf5, 0x00},
+ [15] = {0xf7, 0x02}, },
+ .page1_size = 8,
+ .page1 = { /* default parameter */ },
+ .page2_size = 0,
+ .page2 = { /* no parameter */ },
+ .num_phy = 1,
+ .check_efuse = false,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = 1,
+ .dc_driving_mask = 0xf,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = true,
+ .do_toggle = true,
+ .do_toggle_driving = false,
+ .driving_updated_for_dev_dis = 0xf,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = false,
+};
+
+static const struct phy_cfg rtd1395_phy_cfg_2port = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [4] = {0xe4, 0xac},
+ [13] = {0xf5, 0x00},
+ [15] = {0xf7, 0x02}, },
+ .page1_size = 8,
+ .page1 = { /* default parameter */ },
+ .page2_size = 0,
+ .page2 = { /* no parameter */ },
+ .num_phy = 2,
+ .check_efuse = false,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = 1,
+ .dc_driving_mask = 0xf,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = true,
+ .do_toggle = true,
+ .do_toggle_driving = false,
+ .driving_updated_for_dev_dis = 0xf,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = false,
+};
+
+static const struct phy_cfg rtd1619_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [4] = {0xe4, 0x68}, },
+ .page1_size = 8,
+ .page1 = { /* default parameter */ },
+ .page2_size = 0,
+ .page2 = { /* no parameter */ },
+ .num_phy = 1,
+ .check_efuse = true,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = 1,
+ .dc_driving_mask = 0xf,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = true,
+ .do_toggle = true,
+ .do_toggle_driving = false,
+ .driving_updated_for_dev_dis = 0xf,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = false,
+};
+
+static const struct phy_cfg rtd1319_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [0] = {0xe0, 0x18},
+ [4] = {0xe4, 0x6a},
+ [7] = {0xe7, 0x71},
+ [13] = {0xf5, 0x15},
+ [15] = {0xf7, 0x32}, },
+ .page1_size = 8,
+ .page1 = { [3] = {0xe3, 0x44}, },
+ .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+ .page2 = { [0] = {0xe0, 0x01}, },
+ .num_phy = 1,
+ .check_efuse = true,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = 1,
+ .dc_driving_mask = 0xf,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = true,
+ .do_toggle = true,
+ .do_toggle_driving = true,
+ .driving_updated_for_dev_dis = 0xf,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = true,
+};
+
+static const struct phy_cfg rtd1312c_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [0] = {0xe0, 0x14},
+ [4] = {0xe4, 0x67},
+ [5] = {0xe5, 0x55}, },
+ .page1_size = 8,
+ .page1 = { [3] = {0xe3, 0x23},
+ [6] = {0xe6, 0x58}, },
+ .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+ .page2 = { /* default parameter */ },
+ .num_phy = 1,
+ .check_efuse = true,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = 1,
+ .dc_driving_mask = 0xf,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = true,
+ .do_toggle = true,
+ .do_toggle_driving = true,
+ .driving_updated_for_dev_dis = 0xf,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = true,
+};
+
+static const struct phy_cfg rtd1619b_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [0] = {0xe0, 0xa3},
+ [4] = {0xe4, 0xa8},
+ [5] = {0xe5, 0x4f},
+ [6] = {0xe6, 0x02}, },
+ .page1_size = 8,
+ .page1 = { [3] = {0xe3, 0x64}, },
+ .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+ .page2 = { [7] = {0xe7, 0x45}, },
+ .num_phy = 1,
+ .check_efuse = true,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE,
+ .dc_driving_mask = 0x1f,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = false,
+ .do_toggle = true,
+ .do_toggle_driving = true,
+ .driving_updated_for_dev_dis = 0x8,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = true,
+};
+
+static const struct phy_cfg rtd1319d_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [0] = {0xe0, 0xa3},
+ [4] = {0xe4, 0x8e},
+ [5] = {0xe5, 0x4f},
+ [6] = {0xe6, 0x02}, },
+ .page1_size = MAX_USB_PHY_PAGE1_DATA_SIZE,
+ .page1 = { [14] = {0xf5, 0x1}, },
+ .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+ .page2 = { [7] = {0xe7, 0x44}, },
+ .check_efuse = true,
+ .num_phy = 1,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE,
+ .dc_driving_mask = 0x1f,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = false,
+ .do_toggle = true,
+ .do_toggle_driving = false,
+ .driving_updated_for_dev_dis = 0x8,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = true,
+};
+
+static const struct phy_cfg rtd1315e_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [0] = {0xe0, 0xa3},
+ [4] = {0xe4, 0x8c},
+ [5] = {0xe5, 0x4f},
+ [6] = {0xe6, 0x02}, },
+ .page1_size = MAX_USB_PHY_PAGE1_DATA_SIZE,
+ .page1 = { [3] = {0xe3, 0x7f},
+ [14] = {0xf5, 0x01}, },
+ .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+ .page2 = { [7] = {0xe7, 0x44}, },
+ .num_phy = 1,
+ .check_efuse = true,
+ .check_efuse_version = CHECK_EFUSE_V2,
+ .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE,
+ .dc_driving_mask = 0x1f,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = false,
+ .do_toggle = true,
+ .do_toggle_driving = false,
+ .driving_updated_for_dev_dis = 0x8,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = true,
+};
+
+static const struct of_device_id usbphy_rtk_dt_match[] = {
+ { .compatible = "realtek,rtd1295-usb2phy", .data = &rtd1295_phy_cfg },
+ { .compatible = "realtek,rtd1312c-usb2phy", .data = &rtd1312c_phy_cfg },
+ { .compatible = "realtek,rtd1315e-usb2phy", .data = &rtd1315e_phy_cfg },
+ { .compatible = "realtek,rtd1319-usb2phy", .data = &rtd1319_phy_cfg },
+ { .compatible = "realtek,rtd1319d-usb2phy", .data = &rtd1319d_phy_cfg },
+ { .compatible = "realtek,rtd1395-usb2phy", .data = &rtd1395_phy_cfg },
+ { .compatible = "realtek,rtd1395-usb2phy-2port", .data = &rtd1395_phy_cfg_2port },
+ { .compatible = "realtek,rtd1619-usb2phy", .data = &rtd1619_phy_cfg },
+ { .compatible = "realtek,rtd1619b-usb2phy", .data = &rtd1619b_phy_cfg },
+ {},
+};
+MODULE_DEVICE_TABLE(of, usbphy_rtk_dt_match);
+
+static struct platform_driver rtk_usb2phy_driver = {
+ .probe = rtk_usb2phy_probe,
+ .remove_new = rtk_usb2phy_remove,
+ .driver = {
+ .name = "rtk-usb2phy",
+ .of_match_table = usbphy_rtk_dt_match,
+ },
+};
+
+module_platform_driver(rtk_usb2phy_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Stanley Chang <stanley_chang@realtek.com>");
+MODULE_DESCRIPTION("Realtek usb 2.0 phy driver");
diff --git a/drivers/phy/realtek/phy-rtk-usb3.c b/drivers/phy/realtek/phy-rtk-usb3.c
new file mode 100644
index 0000000000000..dfcf4b921bba6
--- /dev/null
+++ b/drivers/phy/realtek/phy-rtk-usb3.c
@@ -0,0 +1,748 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-rtk-usb3.c RTK usb3.0 phy driver
+ *
+ * copyright (c) 2023 realtek semiconductor corporation
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/regmap.h>
+#include <linux/sys_soc.h>
+#include <linux/mfd/syscon.h>
+#include <linux/phy/phy.h>
+#include <linux/usb.h>
+
+#define USB_MDIO_CTRL_PHY_BUSY BIT(7)
+#define USB_MDIO_CTRL_PHY_WRITE BIT(0)
+#define USB_MDIO_CTRL_PHY_ADDR_SHIFT 8
+#define USB_MDIO_CTRL_PHY_DATA_SHIFT 16
+
+#define MAX_USB_PHY_DATA_SIZE 0x30
+#define PHY_ADDR_0X09 0x09
+#define PHY_ADDR_0X0B 0x0b
+#define PHY_ADDR_0X0D 0x0d
+#define PHY_ADDR_0X10 0x10
+#define PHY_ADDR_0X1F 0x1f
+#define PHY_ADDR_0X20 0x20
+#define PHY_ADDR_0X21 0x21
+#define PHY_ADDR_0X30 0x30
+
+#define REG_0X09_FORCE_CALIBRATION BIT(9)
+#define REG_0X0B_RX_OFFSET_RANGE_MASK 0xc
+#define REG_0X0D_RX_DEBUG_TEST_EN BIT(6)
+#define REG_0X10_DEBUG_MODE_SETTING 0x3c0
+#define REG_0X10_DEBUG_MODE_SETTING_MASK 0x3f8
+#define REG_0X1F_RX_OFFSET_CODE_MASK 0x1e
+
+#define USB_U3_TX_LFPS_SWING_TRIM_SHIFT 4
+#define USB_U3_TX_LFPS_SWING_TRIM_MASK 0xf
+#define AMPLITUDE_CONTROL_COARSE_MASK 0xff
+#define AMPLITUDE_CONTROL_FINE_MASK 0xffff
+#define AMPLITUDE_CONTROL_COARSE_DEFAULT 0xff
+#define AMPLITUDE_CONTROL_FINE_DEFAULT 0xffff
+
+#define PHY_ADDR_MAP_ARRAY_INDEX(addr) (addr)
+#define ARRAY_INDEX_MAP_PHY_ADDR(index) (index)
+
+struct phy_reg {
+ void __iomem *reg_mdio_ctl;
+};
+
+struct phy_data {
+ u8 addr;
+ u16 data;
+};
+
+struct phy_cfg {
+ int param_size;
+ struct phy_data param[MAX_USB_PHY_DATA_SIZE];
+
+ bool check_efuse;
+ bool do_toggle;
+ bool do_toggle_once;
+ bool use_default_parameter;
+ bool check_rx_front_end_offset;
+};
+
+struct phy_parameter {
+ struct phy_reg phy_reg;
+
+ /* Get from efuse */
+ u8 efuse_usb_u3_tx_lfps_swing_trim;
+
+ /* Get from dts */
+ u32 amplitude_control_coarse;
+ u32 amplitude_control_fine;
+};
+
+struct rtk_phy {
+ struct device *dev;
+
+ struct phy_cfg *phy_cfg;
+ int num_phy;
+ struct phy_parameter *phy_parameter;
+
+ struct dentry *debug_dir;
+};
+
+#define PHY_IO_TIMEOUT_USEC (50000)
+#define PHY_IO_DELAY_US (100)
+
+static inline int utmi_wait_register(void __iomem *reg, u32 mask, u32 result)
+{
+ int ret;
+ unsigned int val;
+
+ ret = read_poll_timeout(readl, val, ((val & mask) == result),
+ PHY_IO_DELAY_US, PHY_IO_TIMEOUT_USEC, false, reg);
+ if (ret) {
+ pr_err("%s can't program USB phy\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int rtk_phy3_wait_vbusy(struct phy_reg *phy_reg)
+{
+ return utmi_wait_register(phy_reg->reg_mdio_ctl, USB_MDIO_CTRL_PHY_BUSY, 0);
+}
+
+static u16 rtk_phy_read(struct phy_reg *phy_reg, char addr)
+{
+ unsigned int tmp;
+ u32 value;
+
+ tmp = (addr << USB_MDIO_CTRL_PHY_ADDR_SHIFT);
+
+ writel(tmp, phy_reg->reg_mdio_ctl);
+
+ rtk_phy3_wait_vbusy(phy_reg);
+
+ value = readl(phy_reg->reg_mdio_ctl);
+ value = value >> USB_MDIO_CTRL_PHY_DATA_SHIFT;
+
+ return (u16)value;
+}
+
+static int rtk_phy_write(struct phy_reg *phy_reg, char addr, u16 data)
+{
+ unsigned int val;
+
+ val = USB_MDIO_CTRL_PHY_WRITE |
+ (addr << USB_MDIO_CTRL_PHY_ADDR_SHIFT) |
+ (data << USB_MDIO_CTRL_PHY_DATA_SHIFT);
+
+ writel(val, phy_reg->reg_mdio_ctl);
+
+ rtk_phy3_wait_vbusy(phy_reg);
+
+ return 0;
+}
+
+static void do_rtk_usb3_phy_toggle(struct rtk_phy *rtk_phy, int index, bool connect)
+{
+ struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
+ struct phy_reg *phy_reg;
+ struct phy_parameter *phy_parameter;
+ struct phy_data *phy_data;
+ u8 addr;
+ u16 data;
+ int i;
+
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+ phy_reg = &phy_parameter->phy_reg;
+
+ if (!phy_cfg->do_toggle)
+ return;
+
+ i = PHY_ADDR_MAP_ARRAY_INDEX(PHY_ADDR_0X09);
+ phy_data = phy_cfg->param + i;
+ addr = phy_data->addr;
+ data = phy_data->data;
+
+ if (!addr && !data) {
+ addr = PHY_ADDR_0X09;
+ data = rtk_phy_read(phy_reg, addr);
+ phy_data->addr = addr;
+ phy_data->data = data;
+ }
+
+ rtk_phy_write(phy_reg, addr, data & (~REG_0X09_FORCE_CALIBRATION));
+ mdelay(1);
+ rtk_phy_write(phy_reg, addr, data | REG_0X09_FORCE_CALIBRATION);
+}
+
+static int do_rtk_phy_init(struct rtk_phy *rtk_phy, int index)
+{
+ struct phy_cfg *phy_cfg;
+ struct phy_reg *phy_reg;
+ struct phy_parameter *phy_parameter;
+ int i = 0;
+
+ phy_cfg = rtk_phy->phy_cfg;
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+ phy_reg = &phy_parameter->phy_reg;
+
+ if (phy_cfg->use_default_parameter)
+ goto do_toggle;
+
+ for (i = 0; i < phy_cfg->param_size; i++) {
+ struct phy_data *phy_data = phy_cfg->param + i;
+ u8 addr = phy_data->addr;
+ u16 data = phy_data->data;
+
+ if (!addr && !data)
+ continue;
+
+ rtk_phy_write(phy_reg, addr, data);
+ }
+
+do_toggle:
+ if (phy_cfg->do_toggle_once)
+ phy_cfg->do_toggle = true;
+
+ do_rtk_usb3_phy_toggle(rtk_phy, index, false);
+
+ if (phy_cfg->do_toggle_once) {
+ u16 check_value = 0;
+ int count = 10;
+ u16 value_0x0d, value_0x10;
+
+ /* Enable Debug mode by set 0x0D and 0x10 */
+ value_0x0d = rtk_phy_read(phy_reg, PHY_ADDR_0X0D);
+ value_0x10 = rtk_phy_read(phy_reg, PHY_ADDR_0X10);
+
+ rtk_phy_write(phy_reg, PHY_ADDR_0X0D,
+ value_0x0d | REG_0X0D_RX_DEBUG_TEST_EN);
+ rtk_phy_write(phy_reg, PHY_ADDR_0X10,
+ (value_0x10 & ~REG_0X10_DEBUG_MODE_SETTING_MASK) |
+ REG_0X10_DEBUG_MODE_SETTING);
+
+ check_value = rtk_phy_read(phy_reg, PHY_ADDR_0X30);
+
+ while (!(check_value & BIT(15))) {
+ check_value = rtk_phy_read(phy_reg, PHY_ADDR_0X30);
+ mdelay(1);
+ if (count-- < 0)
+ break;
+ }
+
+ if (!(check_value & BIT(15)))
+ dev_info(rtk_phy->dev, "toggle fail addr=0x%02x, data=0x%04x\n",
+ PHY_ADDR_0X30, check_value);
+
+ /* Disable Debug mode by set 0x0D and 0x10 to default*/
+ rtk_phy_write(phy_reg, PHY_ADDR_0X0D, value_0x0d);
+ rtk_phy_write(phy_reg, PHY_ADDR_0X10, value_0x10);
+
+ phy_cfg->do_toggle = false;
+ }
+
+ if (phy_cfg->check_rx_front_end_offset) {
+ u16 rx_offset_code, rx_offset_range;
+ u16 code_mask = REG_0X1F_RX_OFFSET_CODE_MASK;
+ u16 range_mask = REG_0X0B_RX_OFFSET_RANGE_MASK;
+ bool do_update = false;
+
+ rx_offset_code = rtk_phy_read(phy_reg, PHY_ADDR_0X1F);
+ if (((rx_offset_code & code_mask) == 0x0) ||
+ ((rx_offset_code & code_mask) == code_mask))
+ do_update = true;
+
+ rx_offset_range = rtk_phy_read(phy_reg, PHY_ADDR_0X0B);
+ if (((rx_offset_range & range_mask) == range_mask) && do_update) {
+ dev_warn(rtk_phy->dev, "Don't update rx_offset_range (rx_offset_code=0x%x, rx_offset_range=0x%x)\n",
+ rx_offset_code, rx_offset_range);
+ do_update = false;
+ }
+
+ if (do_update) {
+ u16 tmp1, tmp2;
+
+ tmp1 = rx_offset_range & (~range_mask);
+ tmp2 = rx_offset_range & range_mask;
+ tmp2 += (1 << 2);
+ rx_offset_range = tmp1 | (tmp2 & range_mask);
+ rtk_phy_write(phy_reg, PHY_ADDR_0X0B, rx_offset_range);
+ goto do_toggle;
+ }
+ }
+
+ return 0;
+}
+
+static int rtk_phy_init(struct phy *phy)
+{
+ struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
+ int ret = 0;
+ int i;
+ unsigned long phy_init_time = jiffies;
+
+ for (i = 0; i < rtk_phy->num_phy; i++)
+ ret = do_rtk_phy_init(rtk_phy, i);
+
+ dev_dbg(rtk_phy->dev, "Initialized RTK USB 3.0 PHY (take %dms)\n",
+ jiffies_to_msecs(jiffies - phy_init_time));
+
+ return ret;
+}
+
+static int rtk_phy_exit(struct phy *phy)
+{
+ return 0;
+}
+
+static void rtk_phy_toggle(struct rtk_phy *rtk_phy, bool connect, int port)
+{
+ int index = port;
+
+ if (index > rtk_phy->num_phy) {
+ dev_err(rtk_phy->dev, "%s: The port=%d is not in usb phy (num_phy=%d)\n",
+ __func__, index, rtk_phy->num_phy);
+ return;
+ }
+
+ do_rtk_usb3_phy_toggle(rtk_phy, index, connect);
+}
+
+static int rtk_phy_connect(struct phy *phy, int port)
+{
+ struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
+
+ dev_dbg(rtk_phy->dev, "%s port=%d\n", __func__, port);
+ rtk_phy_toggle(rtk_phy, true, port);
+
+ return 0;
+}
+
+static int rtk_phy_disconnect(struct phy *phy, int port)
+{
+ struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
+
+ dev_dbg(rtk_phy->dev, "%s port=%d\n", __func__, port);
+ rtk_phy_toggle(rtk_phy, false, port);
+
+ return 0;
+}
+
+static const struct phy_ops ops = {
+ .init = rtk_phy_init,
+ .exit = rtk_phy_exit,
+ .connect = rtk_phy_connect,
+ .disconnect = rtk_phy_disconnect,
+ .owner = THIS_MODULE,
+};
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *create_phy_debug_root(void)
+{
+ struct dentry *phy_debug_root;
+
+ phy_debug_root = debugfs_lookup("phy", usb_debug_root);
+ if (!phy_debug_root)
+ phy_debug_root = debugfs_create_dir("phy", usb_debug_root);
+
+ return phy_debug_root;
+}
+
+static int rtk_usb3_parameter_show(struct seq_file *s, void *unused)
+{
+ struct rtk_phy *rtk_phy = s->private;
+ struct phy_cfg *phy_cfg;
+ int i, index;
+
+ phy_cfg = rtk_phy->phy_cfg;
+
+ seq_puts(s, "Property:\n");
+ seq_printf(s, " check_efuse: %s\n",
+ phy_cfg->check_efuse ? "Enable" : "Disable");
+ seq_printf(s, " do_toggle: %s\n",
+ phy_cfg->do_toggle ? "Enable" : "Disable");
+ seq_printf(s, " do_toggle_once: %s\n",
+ phy_cfg->do_toggle_once ? "Enable" : "Disable");
+ seq_printf(s, " use_default_parameter: %s\n",
+ phy_cfg->use_default_parameter ? "Enable" : "Disable");
+
+ for (index = 0; index < rtk_phy->num_phy; index++) {
+ struct phy_reg *phy_reg;
+ struct phy_parameter *phy_parameter;
+
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+ phy_reg = &phy_parameter->phy_reg;
+
+ seq_printf(s, "PHY %d:\n", index);
+
+ for (i = 0; i < phy_cfg->param_size; i++) {
+ struct phy_data *phy_data = phy_cfg->param + i;
+ u8 addr = ARRAY_INDEX_MAP_PHY_ADDR(i);
+ u16 data = phy_data->data;
+
+ if (!phy_data->addr && !data)
+ seq_printf(s, " addr = 0x%02x, data = none ==> read value = 0x%04x\n",
+ addr, rtk_phy_read(phy_reg, addr));
+ else
+ seq_printf(s, " addr = 0x%02x, data = 0x%04x ==> read value = 0x%04x\n",
+ addr, data, rtk_phy_read(phy_reg, addr));
+ }
+
+ seq_puts(s, "PHY Property:\n");
+ seq_printf(s, " efuse_usb_u3_tx_lfps_swing_trim: 0x%x\n",
+ (int)phy_parameter->efuse_usb_u3_tx_lfps_swing_trim);
+ seq_printf(s, " amplitude_control_coarse: 0x%x\n",
+ (int)phy_parameter->amplitude_control_coarse);
+ seq_printf(s, " amplitude_control_fine: 0x%x\n",
+ (int)phy_parameter->amplitude_control_fine);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(rtk_usb3_parameter);
+
+static inline void create_debug_files(struct rtk_phy *rtk_phy)
+{
+ struct dentry *phy_debug_root = NULL;
+
+ phy_debug_root = create_phy_debug_root();
+
+ if (!phy_debug_root)
+ return;
+
+ rtk_phy->debug_dir = debugfs_create_dir(dev_name(rtk_phy->dev), phy_debug_root);
+
+ debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
+ &rtk_usb3_parameter_fops);
+}
+
+static inline void remove_debug_files(struct rtk_phy *rtk_phy)
+{
+ debugfs_remove_recursive(rtk_phy->debug_dir);
+}
+#else
+static inline void create_debug_files(struct rtk_phy *rtk_phy) { }
+static inline void remove_debug_files(struct rtk_phy *rtk_phy) { }
+#endif /* CONFIG_DEBUG_FS */
+
+static int get_phy_data_by_efuse(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter, int index)
+{
+ struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
+ u8 value = 0;
+ struct nvmem_cell *cell;
+
+ if (!phy_cfg->check_efuse)
+ goto out;
+
+ cell = nvmem_cell_get(rtk_phy->dev, "usb_u3_tx_lfps_swing_trim");
+ if (IS_ERR(cell)) {
+ dev_dbg(rtk_phy->dev, "%s no usb_u3_tx_lfps_swing_trim: %ld\n",
+ __func__, PTR_ERR(cell));
+ } else {
+ unsigned char *buf;
+ size_t buf_size;
+
+ buf = nvmem_cell_read(cell, &buf_size);
+ if (!IS_ERR(buf)) {
+ value = buf[0] & USB_U3_TX_LFPS_SWING_TRIM_MASK;
+ kfree(buf);
+ }
+ nvmem_cell_put(cell);
+ }
+
+ if (value > 0 && value < 0x8)
+ phy_parameter->efuse_usb_u3_tx_lfps_swing_trim = 0x8;
+ else
+ phy_parameter->efuse_usb_u3_tx_lfps_swing_trim = (u8)value;
+
+out:
+ return 0;
+}
+
+static void update_amplitude_control_value(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter)
+{
+ struct phy_cfg *phy_cfg;
+ struct phy_reg *phy_reg;
+
+ phy_reg = &phy_parameter->phy_reg;
+ phy_cfg = rtk_phy->phy_cfg;
+
+ if (phy_parameter->amplitude_control_coarse != AMPLITUDE_CONTROL_COARSE_DEFAULT) {
+ u16 val_mask = AMPLITUDE_CONTROL_COARSE_MASK;
+ u16 data;
+
+ if (!phy_cfg->param[PHY_ADDR_0X20].addr && !phy_cfg->param[PHY_ADDR_0X20].data) {
+ phy_cfg->param[PHY_ADDR_0X20].addr = PHY_ADDR_0X20;
+ data = rtk_phy_read(phy_reg, PHY_ADDR_0X20);
+ } else {
+ data = phy_cfg->param[PHY_ADDR_0X20].data;
+ }
+
+ data &= (~val_mask);
+ data |= (phy_parameter->amplitude_control_coarse & val_mask);
+
+ phy_cfg->param[PHY_ADDR_0X20].data = data;
+ }
+
+ if (phy_parameter->efuse_usb_u3_tx_lfps_swing_trim) {
+ u8 efuse_val = phy_parameter->efuse_usb_u3_tx_lfps_swing_trim;
+ u16 val_mask = USB_U3_TX_LFPS_SWING_TRIM_MASK;
+ int val_shift = USB_U3_TX_LFPS_SWING_TRIM_SHIFT;
+ u16 data;
+
+ if (!phy_cfg->param[PHY_ADDR_0X20].addr && !phy_cfg->param[PHY_ADDR_0X20].data) {
+ phy_cfg->param[PHY_ADDR_0X20].addr = PHY_ADDR_0X20;
+ data = rtk_phy_read(phy_reg, PHY_ADDR_0X20);
+ } else {
+ data = phy_cfg->param[PHY_ADDR_0X20].data;
+ }
+
+ data &= ~(val_mask << val_shift);
+ data |= ((efuse_val & val_mask) << val_shift);
+
+ phy_cfg->param[PHY_ADDR_0X20].data = data;
+ }
+
+ if (phy_parameter->amplitude_control_fine != AMPLITUDE_CONTROL_FINE_DEFAULT) {
+ u16 val_mask = AMPLITUDE_CONTROL_FINE_MASK;
+
+ if (!phy_cfg->param[PHY_ADDR_0X21].addr && !phy_cfg->param[PHY_ADDR_0X21].data)
+ phy_cfg->param[PHY_ADDR_0X21].addr = PHY_ADDR_0X21;
+
+ phy_cfg->param[PHY_ADDR_0X21].data =
+ phy_parameter->amplitude_control_fine & val_mask;
+ }
+}
+
+static int parse_phy_data(struct rtk_phy *rtk_phy)
+{
+ struct device *dev = rtk_phy->dev;
+ struct phy_parameter *phy_parameter;
+ int ret = 0;
+ int index;
+
+ rtk_phy->phy_parameter = devm_kzalloc(dev, sizeof(struct phy_parameter) *
+ rtk_phy->num_phy, GFP_KERNEL);
+ if (!rtk_phy->phy_parameter)
+ return -ENOMEM;
+
+ for (index = 0; index < rtk_phy->num_phy; index++) {
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+
+ phy_parameter->phy_reg.reg_mdio_ctl = of_iomap(dev->of_node, 0) + index;
+
+ /* Amplitude control address 0x20 bit 0 to bit 7 */
+ if (of_property_read_u32(dev->of_node, "realtek,amplitude-control-coarse-tuning",
+ &phy_parameter->amplitude_control_coarse))
+ phy_parameter->amplitude_control_coarse = AMPLITUDE_CONTROL_COARSE_DEFAULT;
+
+ /* Amplitude control address 0x21 bit 0 to bit 16 */
+ if (of_property_read_u32(dev->of_node, "realtek,amplitude-control-fine-tuning",
+ &phy_parameter->amplitude_control_fine))
+ phy_parameter->amplitude_control_fine = AMPLITUDE_CONTROL_FINE_DEFAULT;
+
+ get_phy_data_by_efuse(rtk_phy, phy_parameter, index);
+
+ update_amplitude_control_value(rtk_phy, phy_parameter);
+ }
+
+ return ret;
+}
+
+static int rtk_usb3phy_probe(struct platform_device *pdev)
+{
+ struct rtk_phy *rtk_phy;
+ struct device *dev = &pdev->dev;
+ struct phy *generic_phy;
+ struct phy_provider *phy_provider;
+ const struct phy_cfg *phy_cfg;
+ int ret;
+
+ phy_cfg = of_device_get_match_data(dev);
+ if (!phy_cfg) {
+ dev_err(dev, "phy config are not assigned!\n");
+ return -EINVAL;
+ }
+
+ rtk_phy = devm_kzalloc(dev, sizeof(*rtk_phy), GFP_KERNEL);
+ if (!rtk_phy)
+ return -ENOMEM;
+
+ rtk_phy->dev = &pdev->dev;
+ rtk_phy->phy_cfg = devm_kzalloc(dev, sizeof(*phy_cfg), GFP_KERNEL);
+
+ memcpy(rtk_phy->phy_cfg, phy_cfg, sizeof(*phy_cfg));
+
+ rtk_phy->num_phy = 1;
+
+ ret = parse_phy_data(rtk_phy);
+ if (ret)
+ goto err;
+
+ platform_set_drvdata(pdev, rtk_phy);
+
+ generic_phy = devm_phy_create(rtk_phy->dev, NULL, &ops);
+ if (IS_ERR(generic_phy))
+ return PTR_ERR(generic_phy);
+
+ phy_set_drvdata(generic_phy, rtk_phy);
+
+ phy_provider = devm_of_phy_provider_register(rtk_phy->dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ create_debug_files(rtk_phy);
+
+err:
+ return ret;
+}
+
+static void rtk_usb3phy_remove(struct platform_device *pdev)
+{
+ struct rtk_phy *rtk_phy = platform_get_drvdata(pdev);
+
+ remove_debug_files(rtk_phy);
+}
+
+static const struct phy_cfg rtd1295_phy_cfg = {
+ .param_size = MAX_USB_PHY_DATA_SIZE,
+ .param = { [0] = {0x01, 0x4008}, [1] = {0x01, 0xe046},
+ [2] = {0x02, 0x6046}, [3] = {0x03, 0x2779},
+ [4] = {0x04, 0x72f5}, [5] = {0x05, 0x2ad3},
+ [6] = {0x06, 0x000e}, [7] = {0x07, 0x2e00},
+ [8] = {0x08, 0x3591}, [9] = {0x09, 0x525c},
+ [10] = {0x0a, 0xa600}, [11] = {0x0b, 0xa904},
+ [12] = {0x0c, 0xc000}, [13] = {0x0d, 0xef1c},
+ [14] = {0x0e, 0x2000}, [15] = {0x0f, 0x0000},
+ [16] = {0x10, 0x000c}, [17] = {0x11, 0x4c00},
+ [18] = {0x12, 0xfc00}, [19] = {0x13, 0x0c81},
+ [20] = {0x14, 0xde01}, [21] = {0x15, 0x0000},
+ [22] = {0x16, 0x0000}, [23] = {0x17, 0x0000},
+ [24] = {0x18, 0x0000}, [25] = {0x19, 0x4004},
+ [26] = {0x1a, 0x1260}, [27] = {0x1b, 0xff00},
+ [28] = {0x1c, 0xcb00}, [29] = {0x1d, 0xa03f},
+ [30] = {0x1e, 0xc2e0}, [31] = {0x1f, 0x2807},
+ [32] = {0x20, 0x947a}, [33] = {0x21, 0x88aa},
+ [34] = {0x22, 0x0057}, [35] = {0x23, 0xab66},
+ [36] = {0x24, 0x0800}, [37] = {0x25, 0x0000},
+ [38] = {0x26, 0x040a}, [39] = {0x27, 0x01d6},
+ [40] = {0x28, 0xf8c2}, [41] = {0x29, 0x3080},
+ [42] = {0x2a, 0x3082}, [43] = {0x2b, 0x2078},
+ [44] = {0x2c, 0xffff}, [45] = {0x2d, 0xffff},
+ [46] = {0x2e, 0x0000}, [47] = {0x2f, 0x0040}, },
+ .check_efuse = false,
+ .do_toggle = true,
+ .do_toggle_once = false,
+ .use_default_parameter = false,
+ .check_rx_front_end_offset = false,
+};
+
+static const struct phy_cfg rtd1619_phy_cfg = {
+ .param_size = MAX_USB_PHY_DATA_SIZE,
+ .param = { [8] = {0x08, 0x3591},
+ [38] = {0x26, 0x840b},
+ [40] = {0x28, 0xf842}, },
+ .check_efuse = false,
+ .do_toggle = true,
+ .do_toggle_once = false,
+ .use_default_parameter = false,
+ .check_rx_front_end_offset = false,
+};
+
+static const struct phy_cfg rtd1319_phy_cfg = {
+ .param_size = MAX_USB_PHY_DATA_SIZE,
+ .param = { [1] = {0x01, 0xac86},
+ [6] = {0x06, 0x0003},
+ [9] = {0x09, 0x924c},
+ [10] = {0x0a, 0xa608},
+ [11] = {0x0b, 0xb905},
+ [14] = {0x0e, 0x2010},
+ [32] = {0x20, 0x705a},
+ [33] = {0x21, 0xf645},
+ [34] = {0x22, 0x0013},
+ [35] = {0x23, 0xcb66},
+ [41] = {0x29, 0xff00}, },
+ .check_efuse = true,
+ .do_toggle = true,
+ .do_toggle_once = false,
+ .use_default_parameter = false,
+ .check_rx_front_end_offset = false,
+};
+
+static const struct phy_cfg rtd1619b_phy_cfg = {
+ .param_size = MAX_USB_PHY_DATA_SIZE,
+ .param = { [1] = {0x01, 0xac8c},
+ [6] = {0x06, 0x0017},
+ [9] = {0x09, 0x724c},
+ [10] = {0x0a, 0xb610},
+ [11] = {0x0b, 0xb90d},
+ [13] = {0x0d, 0xef2a},
+ [15] = {0x0f, 0x9050},
+ [16] = {0x10, 0x000c},
+ [32] = {0x20, 0x70ff},
+ [34] = {0x22, 0x0013},
+ [35] = {0x23, 0xdb66},
+ [38] = {0x26, 0x8609},
+ [41] = {0x29, 0xff13},
+ [42] = {0x2a, 0x3070}, },
+ .check_efuse = true,
+ .do_toggle = false,
+ .do_toggle_once = true,
+ .use_default_parameter = false,
+ .check_rx_front_end_offset = false,
+};
+
+static const struct phy_cfg rtd1319d_phy_cfg = {
+ .param_size = MAX_USB_PHY_DATA_SIZE,
+ .param = { [1] = {0x01, 0xac89},
+ [4] = {0x04, 0xf2f5},
+ [6] = {0x06, 0x0017},
+ [9] = {0x09, 0x424c},
+ [10] = {0x0a, 0x9610},
+ [11] = {0x0b, 0x9901},
+ [12] = {0x0c, 0xf000},
+ [13] = {0x0d, 0xef2a},
+ [14] = {0x0e, 0x1000},
+ [15] = {0x0f, 0x9050},
+ [32] = {0x20, 0x7077},
+ [35] = {0x23, 0x0b62},
+ [37] = {0x25, 0x10ec},
+ [42] = {0x2a, 0x3070}, },
+ .check_efuse = true,
+ .do_toggle = false,
+ .do_toggle_once = true,
+ .use_default_parameter = false,
+ .check_rx_front_end_offset = true,
+};
+
+static const struct of_device_id usbphy_rtk_dt_match[] = {
+ { .compatible = "realtek,rtd1295-usb3phy", .data = &rtd1295_phy_cfg },
+ { .compatible = "realtek,rtd1319-usb3phy", .data = &rtd1319_phy_cfg },
+ { .compatible = "realtek,rtd1319d-usb3phy", .data = &rtd1319d_phy_cfg },
+ { .compatible = "realtek,rtd1619-usb3phy", .data = &rtd1619_phy_cfg },
+ { .compatible = "realtek,rtd1619b-usb3phy", .data = &rtd1619b_phy_cfg },
+ {},
+};
+MODULE_DEVICE_TABLE(of, usbphy_rtk_dt_match);
+
+static struct platform_driver rtk_usb3phy_driver = {
+ .probe = rtk_usb3phy_probe,
+ .remove_new = rtk_usb3phy_remove,
+ .driver = {
+ .name = "rtk-usb3phy",
+ .of_match_table = usbphy_rtk_dt_match,
+ },
+};
+
+module_platform_driver(rtk_usb3phy_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Stanley Chang <stanley_chang@realtek.com>");
+MODULE_DESCRIPTION("Realtek usb 3.0 phy driver");
diff --git a/drivers/phy/renesas/phy-rcar-gen2.c b/drivers/phy/renesas/phy-rcar-gen2.c
index 507435af26567..c0221e7258c03 100644
--- a/drivers/phy/renesas/phy-rcar-gen2.c
+++ b/drivers/phy/renesas/phy-rcar-gen2.c
@@ -306,7 +306,7 @@ static const struct of_device_id rcar_gen2_phy_match_table[] = {
MODULE_DEVICE_TABLE(of, rcar_gen2_phy_match_table);
static struct phy *rcar_gen2_phy_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct rcar_gen2_phy_driver *drv;
struct device_node *np = args->np;
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index 6387c0d34c551..fbab6ac0f0d10 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -608,7 +608,7 @@ static const unsigned int rcar_gen3_phy_cable[] = {
};
static struct phy *rcar_gen3_phy_usb2_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct rcar_gen3_chan *ch = dev_get_drvdata(dev);
diff --git a/drivers/phy/renesas/r8a779f0-ether-serdes.c b/drivers/phy/renesas/r8a779f0-ether-serdes.c
index fc6e398fa3bfb..f1f1da4a0b1fe 100644
--- a/drivers/phy/renesas/r8a779f0-ether-serdes.c
+++ b/drivers/phy/renesas/r8a779f0-ether-serdes.c
@@ -334,7 +334,7 @@ static const struct phy_ops r8a779f0_eth_serdes_ops = {
};
static struct phy *r8a779f0_eth_serdes_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct r8a779f0_eth_serdes_drv_data *dd = dev_get_drvdata(dev);
diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
index 94360fc96a6fb..a34f67bb7e61a 100644
--- a/drivers/phy/rockchip/Kconfig
+++ b/drivers/phy/rockchip/Kconfig
@@ -83,6 +83,14 @@ config PHY_ROCKCHIP_PCIE
help
Enable this to support the Rockchip PCIe PHY.
+config PHY_ROCKCHIP_SAMSUNG_HDPTX
+ tristate "Rockchip Samsung HDMI/eDP Combo PHY driver"
+ depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
+ select GENERIC_PHY
+ help
+ Enable this to support the Rockchip HDMI/eDP Combo PHY
+ with Samsung IP block.
+
config PHY_ROCKCHIP_SNPS_PCIE3
tristate "Rockchip Snps PCIe3 PHY Driver"
depends on (ARCH_ROCKCHIP && OF) || COMPILE_TEST
diff --git a/drivers/phy/rockchip/Makefile b/drivers/phy/rockchip/Makefile
index 7eab129230d17..3d911304e6543 100644
--- a/drivers/phy/rockchip/Makefile
+++ b/drivers/phy/rockchip/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_PHY_ROCKCHIP_INNO_HDMI) += phy-rockchip-inno-hdmi.o
obj-$(CONFIG_PHY_ROCKCHIP_INNO_USB2) += phy-rockchip-inno-usb2.o
obj-$(CONFIG_PHY_ROCKCHIP_NANENG_COMBO_PHY) += phy-rockchip-naneng-combphy.o
obj-$(CONFIG_PHY_ROCKCHIP_PCIE) += phy-rockchip-pcie.o
+obj-$(CONFIG_PHY_ROCKCHIP_SAMSUNG_HDPTX) += phy-rockchip-samsung-hdptx.o
obj-$(CONFIG_PHY_ROCKCHIP_SNPS_PCIE3) += phy-rockchip-snps-pcie3.o
obj-$(CONFIG_PHY_ROCKCHIP_TYPEC) += phy-rockchip-typec.o
obj-$(CONFIG_PHY_ROCKCHIP_USB) += phy-rockchip-usb.o
diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
index 5de5e2e97ffa0..76b9cf417591d 100644
--- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
@@ -251,7 +251,7 @@ static const struct phy_ops rochchip_combphy_ops = {
.owner = THIS_MODULE,
};
-static struct phy *rockchip_combphy_xlate(struct device *dev, struct of_phandle_args *args)
+static struct phy *rockchip_combphy_xlate(struct device *dev, const struct of_phandle_args *args)
{
struct rockchip_combphy_priv *priv = dev_get_drvdata(dev);
diff --git a/drivers/phy/rockchip/phy-rockchip-pcie.c b/drivers/phy/rockchip/phy-rockchip-pcie.c
index 1bbd6be2a5847..51cc5ece0e637 100644
--- a/drivers/phy/rockchip/phy-rockchip-pcie.c
+++ b/drivers/phy/rockchip/phy-rockchip-pcie.c
@@ -82,7 +82,7 @@ static struct rockchip_pcie_phy *to_pcie_phy(struct phy_pcie_instance *inst)
}
static struct phy *rockchip_pcie_phy_of_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct rockchip_pcie_phy *rk_phy = dev_get_drvdata(dev);
diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
new file mode 100644
index 0000000000000..946c01210ac8c
--- /dev/null
+++ b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
@@ -0,0 +1,1028 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2021-2022 Rockchip Electronics Co., Ltd.
+ * Copyright (c) 2024 Collabora Ltd.
+ *
+ * Author: Algea Cao <algea.cao@rock-chips.com>
+ * Author: Cristian Ciocaltea <cristian.ciocaltea@collabora.com>
+ */
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/rational.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#define GRF_HDPTX_CON0 0x00
+#define HDPTX_I_PLL_EN BIT(7)
+#define HDPTX_I_BIAS_EN BIT(6)
+#define HDPTX_I_BGR_EN BIT(5)
+#define GRF_HDPTX_STATUS 0x80
+#define HDPTX_O_PLL_LOCK_DONE BIT(3)
+#define HDPTX_O_PHY_CLK_RDY BIT(2)
+#define HDPTX_O_PHY_RDY BIT(1)
+#define HDPTX_O_SB_RDY BIT(0)
+
+#define HDTPX_REG(_n, _min, _max) \
+ ( \
+ BUILD_BUG_ON_ZERO((0x##_n) < (0x##_min)) + \
+ BUILD_BUG_ON_ZERO((0x##_n) > (0x##_max)) + \
+ ((0x##_n) * 4) \
+ )
+
+#define CMN_REG(n) HDTPX_REG(n, 0000, 00a7)
+#define SB_REG(n) HDTPX_REG(n, 0100, 0129)
+#define LNTOP_REG(n) HDTPX_REG(n, 0200, 0229)
+#define LANE_REG(n) HDTPX_REG(n, 0300, 062d)
+
+/* CMN_REG(0008) */
+#define LCPLL_EN_MASK BIT(6)
+#define LCPLL_LCVCO_MODE_EN_MASK BIT(4)
+/* CMN_REG(001e) */
+#define LCPLL_PI_EN_MASK BIT(5)
+#define LCPLL_100M_CLK_EN_MASK BIT(0)
+/* CMN_REG(0025) */
+#define LCPLL_PMS_IQDIV_RSTN BIT(4)
+/* CMN_REG(0028) */
+#define LCPLL_SDC_FRAC_EN BIT(2)
+#define LCPLL_SDC_FRAC_RSTN BIT(0)
+/* CMN_REG(002d) */
+#define LCPLL_SDC_N_MASK GENMASK(3, 1)
+/* CMN_REG(002e) */
+#define LCPLL_SDC_NUMBERATOR_MASK GENMASK(5, 0)
+/* CMN_REG(002f) */
+#define LCPLL_SDC_DENOMINATOR_MASK GENMASK(7, 2)
+#define LCPLL_SDC_NDIV_RSTN BIT(0)
+/* CMN_REG(003d) */
+#define ROPLL_LCVCO_EN BIT(4)
+/* CMN_REG(004e) */
+#define ROPLL_PI_EN BIT(5)
+/* CMN_REG(005c) */
+#define ROPLL_PMS_IQDIV_RSTN BIT(5)
+/* CMN_REG(005e) */
+#define ROPLL_SDM_EN_MASK BIT(6)
+#define ROPLL_SDM_FRAC_EN_RBR BIT(3)
+#define ROPLL_SDM_FRAC_EN_HBR BIT(2)
+#define ROPLL_SDM_FRAC_EN_HBR2 BIT(1)
+#define ROPLL_SDM_FRAC_EN_HBR3 BIT(0)
+/* CMN_REG(0064) */
+#define ROPLL_SDM_NUM_SIGN_RBR_MASK BIT(3)
+/* CMN_REG(0069) */
+#define ROPLL_SDC_N_RBR_MASK GENMASK(2, 0)
+/* CMN_REG(0074) */
+#define ROPLL_SDC_NDIV_RSTN BIT(2)
+#define ROPLL_SSC_EN BIT(0)
+/* CMN_REG(0081) */
+#define OVRD_PLL_CD_CLK_EN BIT(8)
+#define PLL_CD_HSCLK_EAST_EN BIT(0)
+/* CMN_REG(0086) */
+#define PLL_PCG_POSTDIV_SEL_MASK GENMASK(7, 4)
+#define PLL_PCG_CLK_SEL_MASK GENMASK(3, 1)
+#define PLL_PCG_CLK_EN BIT(0)
+/* CMN_REG(0087) */
+#define PLL_FRL_MODE_EN BIT(3)
+#define PLL_TX_HS_CLK_EN BIT(2)
+/* CMN_REG(0089) */
+#define LCPLL_ALONE_MODE BIT(1)
+/* CMN_REG(0097) */
+#define DIG_CLK_SEL BIT(1)
+#define ROPLL_REF BIT(1)
+#define LCPLL_REF 0
+/* CMN_REG(0099) */
+#define CMN_ROPLL_ALONE_MODE BIT(2)
+#define ROPLL_ALONE_MODE BIT(2)
+/* CMN_REG(009a) */
+#define HS_SPEED_SEL BIT(0)
+#define DIV_10_CLOCK BIT(0)
+/* CMN_REG(009b) */
+#define IS_SPEED_SEL BIT(4)
+#define LINK_SYMBOL_CLOCK BIT(4)
+#define LINK_SYMBOL_CLOCK1_2 0
+
+/* SB_REG(0102) */
+#define OVRD_SB_RXTERM_EN_MASK BIT(5)
+#define SB_RXTERM_EN_MASK BIT(4)
+#define ANA_SB_RXTERM_OFFSP_MASK GENMASK(3, 0)
+/* SB_REG(0103) */
+#define ANA_SB_RXTERM_OFFSN_MASK GENMASK(6, 3)
+#define OVRD_SB_RX_RESCAL_DONE_MASK BIT(1)
+#define SB_RX_RESCAL_DONE_MASK BIT(0)
+/* SB_REG(0104) */
+#define OVRD_SB_EN_MASK BIT(5)
+#define SB_EN_MASK BIT(4)
+/* SB_REG(0105) */
+#define OVRD_SB_EARC_CMDC_EN_MASK BIT(6)
+#define SB_EARC_CMDC_EN_MASK BIT(5)
+#define ANA_SB_TX_HLVL_PROG_MASK GENMASK(2, 0)
+/* SB_REG(0106) */
+#define ANA_SB_TX_LLVL_PROG_MASK GENMASK(6, 4)
+/* SB_REG(0109) */
+#define ANA_SB_DMRX_AFC_DIV_RATIO_MASK GENMASK(2, 0)
+/* SB_REG(010f) */
+#define OVRD_SB_VREG_EN_MASK BIT(7)
+#define SB_VREG_EN_MASK BIT(6)
+#define OVRD_SB_VREG_LPF_BYPASS_MASK BIT(5)
+#define SB_VREG_LPF_BYPASS_MASK BIT(4)
+#define ANA_SB_VREG_GAIN_CTRL_MASK GENMASK(3, 0)
+/* SB_REG(0110) */
+#define ANA_SB_VREG_REF_SEL_MASK BIT(0)
+/* SB_REG(0113) */
+#define SB_RX_RCAL_OPT_CODE_MASK GENMASK(5, 4)
+#define SB_RX_RTERM_CTRL_MASK GENMASK(3, 0)
+/* SB_REG(0114) */
+#define SB_TG_SB_EN_DELAY_TIME_MASK GENMASK(5, 3)
+#define SB_TG_RXTERM_EN_DELAY_TIME_MASK GENMASK(2, 0)
+/* SB_REG(0115) */
+#define SB_READY_DELAY_TIME_MASK GENMASK(5, 3)
+#define SB_TG_OSC_EN_DELAY_TIME_MASK GENMASK(2, 0)
+/* SB_REG(0116) */
+#define AFC_RSTN_DELAY_TIME_MASK GENMASK(6, 4)
+/* SB_REG(0117) */
+#define FAST_PULSE_TIME_MASK GENMASK(3, 0)
+/* SB_REG(011b) */
+#define SB_EARC_SIG_DET_BYPASS_MASK BIT(4)
+#define SB_AFC_TOL_MASK GENMASK(3, 0)
+/* SB_REG(011f) */
+#define SB_PWM_AFC_CTRL_MASK GENMASK(7, 2)
+#define SB_RCAL_RSTN_MASK BIT(1)
+/* SB_REG(0120) */
+#define SB_EARC_EN_MASK BIT(1)
+#define SB_EARC_AFC_EN_MASK BIT(2)
+/* SB_REG(0123) */
+#define OVRD_SB_READY_MASK BIT(5)
+#define SB_READY_MASK BIT(4)
+
+/* LNTOP_REG(0200) */
+#define PROTOCOL_SEL BIT(2)
+#define HDMI_MODE BIT(2)
+#define HDMI_TMDS_FRL_SEL BIT(1)
+/* LNTOP_REG(0206) */
+#define DATA_BUS_SEL BIT(0)
+#define DATA_BUS_36_40 BIT(0)
+/* LNTOP_REG(0207) */
+#define LANE_EN 0xf
+#define ALL_LANE_EN 0xf
+
+/* LANE_REG(0312) */
+#define LN0_TX_SER_RATE_SEL_RBR BIT(5)
+#define LN0_TX_SER_RATE_SEL_HBR BIT(4)
+#define LN0_TX_SER_RATE_SEL_HBR2 BIT(3)
+#define LN0_TX_SER_RATE_SEL_HBR3 BIT(2)
+/* LANE_REG(0412) */
+#define LN1_TX_SER_RATE_SEL_RBR BIT(5)
+#define LN1_TX_SER_RATE_SEL_HBR BIT(4)
+#define LN1_TX_SER_RATE_SEL_HBR2 BIT(3)
+#define LN1_TX_SER_RATE_SEL_HBR3 BIT(2)
+/* LANE_REG(0512) */
+#define LN2_TX_SER_RATE_SEL_RBR BIT(5)
+#define LN2_TX_SER_RATE_SEL_HBR BIT(4)
+#define LN2_TX_SER_RATE_SEL_HBR2 BIT(3)
+#define LN2_TX_SER_RATE_SEL_HBR3 BIT(2)
+/* LANE_REG(0612) */
+#define LN3_TX_SER_RATE_SEL_RBR BIT(5)
+#define LN3_TX_SER_RATE_SEL_HBR BIT(4)
+#define LN3_TX_SER_RATE_SEL_HBR2 BIT(3)
+#define LN3_TX_SER_RATE_SEL_HBR3 BIT(2)
+
+struct lcpll_config {
+ u32 bit_rate;
+ u8 lcvco_mode_en;
+ u8 pi_en;
+ u8 clk_en_100m;
+ u8 pms_mdiv;
+ u8 pms_mdiv_afc;
+ u8 pms_pdiv;
+ u8 pms_refdiv;
+ u8 pms_sdiv;
+ u8 pi_cdiv_rstn;
+ u8 pi_cdiv_sel;
+ u8 sdm_en;
+ u8 sdm_rstn;
+ u8 sdc_frac_en;
+ u8 sdc_rstn;
+ u8 sdm_deno;
+ u8 sdm_num_sign;
+ u8 sdm_num;
+ u8 sdc_n;
+ u8 sdc_n2;
+ u8 sdc_num;
+ u8 sdc_deno;
+ u8 sdc_ndiv_rstn;
+ u8 ssc_en;
+ u8 ssc_fm_dev;
+ u8 ssc_fm_freq;
+ u8 ssc_clk_div_sel;
+ u8 cd_tx_ser_rate_sel;
+};
+
+struct ropll_config {
+ u32 bit_rate;
+ u8 pms_mdiv;
+ u8 pms_mdiv_afc;
+ u8 pms_pdiv;
+ u8 pms_refdiv;
+ u8 pms_sdiv;
+ u8 pms_iqdiv_rstn;
+ u8 ref_clk_sel;
+ u8 sdm_en;
+ u8 sdm_rstn;
+ u8 sdc_frac_en;
+ u8 sdc_rstn;
+ u8 sdm_clk_div;
+ u8 sdm_deno;
+ u8 sdm_num_sign;
+ u8 sdm_num;
+ u8 sdc_n;
+ u8 sdc_num;
+ u8 sdc_deno;
+ u8 sdc_ndiv_rstn;
+ u8 ssc_en;
+ u8 ssc_fm_dev;
+ u8 ssc_fm_freq;
+ u8 ssc_clk_div_sel;
+ u8 ana_cpp_ctrl;
+ u8 ana_lpf_c_sel;
+ u8 cd_tx_ser_rate_sel;
+};
+
+enum rk_hdptx_reset {
+ RST_PHY = 0,
+ RST_APB,
+ RST_INIT,
+ RST_CMN,
+ RST_LANE,
+ RST_ROPLL,
+ RST_LCPLL,
+ RST_MAX
+};
+
+struct rk_hdptx_phy {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regmap *grf;
+
+ struct phy *phy;
+ struct phy_config *phy_cfg;
+ struct clk_bulk_data *clks;
+ int nr_clks;
+ struct reset_control_bulk_data rsts[RST_MAX];
+};
+
+static const struct ropll_config ropll_tmds_cfg[] = {
+ { 5940000, 124, 124, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 62, 1, 16, 5, 0,
+ 1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
+ { 3712500, 155, 155, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 62, 1, 16, 5, 0,
+ 1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
+ { 2970000, 124, 124, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 62, 1, 16, 5, 0,
+ 1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
+ { 1620000, 135, 135, 1, 1, 3, 1, 1, 0, 1, 1, 1, 1, 4, 0, 3, 5, 5, 0x10,
+ 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
+ { 1856250, 155, 155, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 62, 1, 16, 5, 0,
+ 1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
+ { 1540000, 193, 193, 1, 1, 5, 1, 1, 1, 1, 1, 1, 1, 193, 1, 32, 2, 1,
+ 1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
+ { 1485000, 0x7b, 0x7b, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 4, 0, 3, 5, 5,
+ 0x10, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
+ { 1462500, 122, 122, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 244, 1, 16, 2, 1, 1,
+ 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
+ { 1190000, 149, 149, 1, 1, 5, 1, 1, 1, 1, 1, 1, 1, 149, 1, 16, 2, 1, 1,
+ 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
+ { 1065000, 89, 89, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 89, 1, 16, 1, 0, 1,
+ 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
+ { 1080000, 135, 135, 1, 1, 5, 1, 1, 0, 1, 0, 1, 1, 0x9, 0, 0x05, 0,
+ 0x14, 0x18, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
+ { 855000, 214, 214, 1, 1, 11, 1, 1, 1, 1, 1, 1, 1, 214, 1, 16, 2, 1,
+ 1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
+ { 835000, 105, 105, 1, 1, 5, 1, 1, 1, 1, 1, 1, 1, 42, 1, 16, 1, 0,
+ 1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
+ { 928125, 155, 155, 1, 1, 7, 1, 1, 1, 1, 1, 1, 1, 62, 1, 16, 5, 0,
+ 1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
+ { 742500, 124, 124, 1, 1, 7, 1, 1, 1, 1, 1, 1, 1, 62, 1, 16, 5, 0,
+ 1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
+ { 650000, 162, 162, 1, 1, 11, 1, 1, 1, 1, 1, 1, 1, 54, 0, 16, 4, 1,
+ 1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
+ { 337500, 0x70, 0x70, 1, 1, 0xf, 1, 1, 1, 1, 1, 1, 1, 0x2, 0, 0x01, 5,
+ 1, 1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
+ { 400000, 100, 100, 1, 1, 11, 1, 1, 0, 1, 0, 1, 1, 0x9, 0, 0x05, 0,
+ 0x14, 0x18, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
+ { 270000, 0x5a, 0x5a, 1, 1, 0xf, 1, 1, 0, 1, 0, 1, 1, 0x9, 0, 0x05, 0,
+ 0x14, 0x18, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
+ { 251750, 84, 84, 1, 1, 0xf, 1, 1, 1, 1, 1, 1, 1, 168, 1, 16, 4, 1, 1,
+ 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
+};
+
+static const struct reg_sequence rk_hdtpx_common_cmn_init_seq[] = {
+ REG_SEQ0(CMN_REG(0009), 0x0c),
+ REG_SEQ0(CMN_REG(000a), 0x83),
+ REG_SEQ0(CMN_REG(000b), 0x06),
+ REG_SEQ0(CMN_REG(000c), 0x20),
+ REG_SEQ0(CMN_REG(000d), 0xb8),
+ REG_SEQ0(CMN_REG(000e), 0x0f),
+ REG_SEQ0(CMN_REG(000f), 0x0f),
+ REG_SEQ0(CMN_REG(0010), 0x04),
+ REG_SEQ0(CMN_REG(0011), 0x00),
+ REG_SEQ0(CMN_REG(0012), 0x26),
+ REG_SEQ0(CMN_REG(0013), 0x22),
+ REG_SEQ0(CMN_REG(0014), 0x24),
+ REG_SEQ0(CMN_REG(0015), 0x77),
+ REG_SEQ0(CMN_REG(0016), 0x08),
+ REG_SEQ0(CMN_REG(0017), 0x00),
+ REG_SEQ0(CMN_REG(0018), 0x04),
+ REG_SEQ0(CMN_REG(0019), 0x48),
+ REG_SEQ0(CMN_REG(001a), 0x01),
+ REG_SEQ0(CMN_REG(001b), 0x00),
+ REG_SEQ0(CMN_REG(001c), 0x01),
+ REG_SEQ0(CMN_REG(001d), 0x64),
+ REG_SEQ0(CMN_REG(001f), 0x00),
+ REG_SEQ0(CMN_REG(0026), 0x53),
+ REG_SEQ0(CMN_REG(0029), 0x01),
+ REG_SEQ0(CMN_REG(0030), 0x00),
+ REG_SEQ0(CMN_REG(0031), 0x20),
+ REG_SEQ0(CMN_REG(0032), 0x30),
+ REG_SEQ0(CMN_REG(0033), 0x0b),
+ REG_SEQ0(CMN_REG(0034), 0x23),
+ REG_SEQ0(CMN_REG(0035), 0x00),
+ REG_SEQ0(CMN_REG(0038), 0x00),
+ REG_SEQ0(CMN_REG(0039), 0x00),
+ REG_SEQ0(CMN_REG(003a), 0x00),
+ REG_SEQ0(CMN_REG(003b), 0x00),
+ REG_SEQ0(CMN_REG(003c), 0x80),
+ REG_SEQ0(CMN_REG(003e), 0x0c),
+ REG_SEQ0(CMN_REG(003f), 0x83),
+ REG_SEQ0(CMN_REG(0040), 0x06),
+ REG_SEQ0(CMN_REG(0041), 0x20),
+ REG_SEQ0(CMN_REG(0042), 0xb8),
+ REG_SEQ0(CMN_REG(0043), 0x00),
+ REG_SEQ0(CMN_REG(0044), 0x46),
+ REG_SEQ0(CMN_REG(0045), 0x24),
+ REG_SEQ0(CMN_REG(0046), 0xff),
+ REG_SEQ0(CMN_REG(0047), 0x00),
+ REG_SEQ0(CMN_REG(0048), 0x44),
+ REG_SEQ0(CMN_REG(0049), 0xfa),
+ REG_SEQ0(CMN_REG(004a), 0x08),
+ REG_SEQ0(CMN_REG(004b), 0x00),
+ REG_SEQ0(CMN_REG(004c), 0x01),
+ REG_SEQ0(CMN_REG(004d), 0x64),
+ REG_SEQ0(CMN_REG(004e), 0x14),
+ REG_SEQ0(CMN_REG(004f), 0x00),
+ REG_SEQ0(CMN_REG(0050), 0x00),
+ REG_SEQ0(CMN_REG(005d), 0x0c),
+ REG_SEQ0(CMN_REG(005f), 0x01),
+ REG_SEQ0(CMN_REG(006b), 0x04),
+ REG_SEQ0(CMN_REG(0073), 0x30),
+ REG_SEQ0(CMN_REG(0074), 0x00),
+ REG_SEQ0(CMN_REG(0075), 0x20),
+ REG_SEQ0(CMN_REG(0076), 0x30),
+ REG_SEQ0(CMN_REG(0077), 0x08),
+ REG_SEQ0(CMN_REG(0078), 0x0c),
+ REG_SEQ0(CMN_REG(0079), 0x00),
+ REG_SEQ0(CMN_REG(007b), 0x00),
+ REG_SEQ0(CMN_REG(007c), 0x00),
+ REG_SEQ0(CMN_REG(007d), 0x00),
+ REG_SEQ0(CMN_REG(007e), 0x00),
+ REG_SEQ0(CMN_REG(007f), 0x00),
+ REG_SEQ0(CMN_REG(0080), 0x00),
+ REG_SEQ0(CMN_REG(0081), 0x09),
+ REG_SEQ0(CMN_REG(0082), 0x04),
+ REG_SEQ0(CMN_REG(0083), 0x24),
+ REG_SEQ0(CMN_REG(0084), 0x20),
+ REG_SEQ0(CMN_REG(0085), 0x03),
+ REG_SEQ0(CMN_REG(0086), 0x01),
+ REG_SEQ0(CMN_REG(0087), 0x0c),
+ REG_SEQ0(CMN_REG(008a), 0x55),
+ REG_SEQ0(CMN_REG(008b), 0x25),
+ REG_SEQ0(CMN_REG(008c), 0x2c),
+ REG_SEQ0(CMN_REG(008d), 0x22),
+ REG_SEQ0(CMN_REG(008e), 0x14),
+ REG_SEQ0(CMN_REG(008f), 0x20),
+ REG_SEQ0(CMN_REG(0090), 0x00),
+ REG_SEQ0(CMN_REG(0091), 0x00),
+ REG_SEQ0(CMN_REG(0092), 0x00),
+ REG_SEQ0(CMN_REG(0093), 0x00),
+ REG_SEQ0(CMN_REG(009a), 0x11),
+ REG_SEQ0(CMN_REG(009b), 0x10),
+};
+
+static const struct reg_sequence rk_hdtpx_tmds_cmn_init_seq[] = {
+ REG_SEQ0(CMN_REG(0008), 0x00),
+ REG_SEQ0(CMN_REG(0011), 0x01),
+ REG_SEQ0(CMN_REG(0017), 0x20),
+ REG_SEQ0(CMN_REG(001e), 0x14),
+ REG_SEQ0(CMN_REG(0020), 0x00),
+ REG_SEQ0(CMN_REG(0021), 0x00),
+ REG_SEQ0(CMN_REG(0022), 0x11),
+ REG_SEQ0(CMN_REG(0023), 0x00),
+ REG_SEQ0(CMN_REG(0024), 0x00),
+ REG_SEQ0(CMN_REG(0025), 0x53),
+ REG_SEQ0(CMN_REG(0026), 0x00),
+ REG_SEQ0(CMN_REG(0027), 0x00),
+ REG_SEQ0(CMN_REG(0028), 0x01),
+ REG_SEQ0(CMN_REG(002a), 0x00),
+ REG_SEQ0(CMN_REG(002b), 0x00),
+ REG_SEQ0(CMN_REG(002c), 0x00),
+ REG_SEQ0(CMN_REG(002d), 0x00),
+ REG_SEQ0(CMN_REG(002e), 0x04),
+ REG_SEQ0(CMN_REG(002f), 0x00),
+ REG_SEQ0(CMN_REG(0030), 0x20),
+ REG_SEQ0(CMN_REG(0031), 0x30),
+ REG_SEQ0(CMN_REG(0032), 0x0b),
+ REG_SEQ0(CMN_REG(0033), 0x23),
+ REG_SEQ0(CMN_REG(0034), 0x00),
+ REG_SEQ0(CMN_REG(003d), 0x40),
+ REG_SEQ0(CMN_REG(0042), 0x78),
+ REG_SEQ0(CMN_REG(004e), 0x34),
+ REG_SEQ0(CMN_REG(005c), 0x25),
+ REG_SEQ0(CMN_REG(005e), 0x4f),
+ REG_SEQ0(CMN_REG(0074), 0x04),
+ REG_SEQ0(CMN_REG(0081), 0x01),
+ REG_SEQ0(CMN_REG(0087), 0x04),
+ REG_SEQ0(CMN_REG(0089), 0x00),
+ REG_SEQ0(CMN_REG(0095), 0x00),
+ REG_SEQ0(CMN_REG(0097), 0x02),
+ REG_SEQ0(CMN_REG(0099), 0x04),
+ REG_SEQ0(CMN_REG(009b), 0x00),
+};
+
+static const struct reg_sequence rk_hdtpx_common_sb_init_seq[] = {
+ REG_SEQ0(SB_REG(0114), 0x00),
+ REG_SEQ0(SB_REG(0115), 0x00),
+ REG_SEQ0(SB_REG(0116), 0x00),
+ REG_SEQ0(SB_REG(0117), 0x00),
+};
+
+static const struct reg_sequence rk_hdtpx_tmds_lntop_highbr_seq[] = {
+ REG_SEQ0(LNTOP_REG(0201), 0x00),
+ REG_SEQ0(LNTOP_REG(0202), 0x00),
+ REG_SEQ0(LNTOP_REG(0203), 0x0f),
+ REG_SEQ0(LNTOP_REG(0204), 0xff),
+ REG_SEQ0(LNTOP_REG(0205), 0xff),
+};
+
+static const struct reg_sequence rk_hdtpx_tmds_lntop_lowbr_seq[] = {
+ REG_SEQ0(LNTOP_REG(0201), 0x07),
+ REG_SEQ0(LNTOP_REG(0202), 0xc1),
+ REG_SEQ0(LNTOP_REG(0203), 0xf0),
+ REG_SEQ0(LNTOP_REG(0204), 0x7c),
+ REG_SEQ0(LNTOP_REG(0205), 0x1f),
+};
+
+static const struct reg_sequence rk_hdtpx_common_lane_init_seq[] = {
+ REG_SEQ0(LANE_REG(0303), 0x0c),
+ REG_SEQ0(LANE_REG(0307), 0x20),
+ REG_SEQ0(LANE_REG(030a), 0x17),
+ REG_SEQ0(LANE_REG(030b), 0x77),
+ REG_SEQ0(LANE_REG(030c), 0x77),
+ REG_SEQ0(LANE_REG(030d), 0x77),
+ REG_SEQ0(LANE_REG(030e), 0x38),
+ REG_SEQ0(LANE_REG(0310), 0x03),
+ REG_SEQ0(LANE_REG(0311), 0x0f),
+ REG_SEQ0(LANE_REG(0316), 0x02),
+ REG_SEQ0(LANE_REG(031b), 0x01),
+ REG_SEQ0(LANE_REG(031f), 0x15),
+ REG_SEQ0(LANE_REG(0320), 0xa0),
+ REG_SEQ0(LANE_REG(0403), 0x0c),
+ REG_SEQ0(LANE_REG(0407), 0x20),
+ REG_SEQ0(LANE_REG(040a), 0x17),
+ REG_SEQ0(LANE_REG(040b), 0x77),
+ REG_SEQ0(LANE_REG(040c), 0x77),
+ REG_SEQ0(LANE_REG(040d), 0x77),
+ REG_SEQ0(LANE_REG(040e), 0x38),
+ REG_SEQ0(LANE_REG(0410), 0x03),
+ REG_SEQ0(LANE_REG(0411), 0x0f),
+ REG_SEQ0(LANE_REG(0416), 0x02),
+ REG_SEQ0(LANE_REG(041b), 0x01),
+ REG_SEQ0(LANE_REG(041f), 0x15),
+ REG_SEQ0(LANE_REG(0420), 0xa0),
+ REG_SEQ0(LANE_REG(0503), 0x0c),
+ REG_SEQ0(LANE_REG(0507), 0x20),
+ REG_SEQ0(LANE_REG(050a), 0x17),
+ REG_SEQ0(LANE_REG(050b), 0x77),
+ REG_SEQ0(LANE_REG(050c), 0x77),
+ REG_SEQ0(LANE_REG(050d), 0x77),
+ REG_SEQ0(LANE_REG(050e), 0x38),
+ REG_SEQ0(LANE_REG(0510), 0x03),
+ REG_SEQ0(LANE_REG(0511), 0x0f),
+ REG_SEQ0(LANE_REG(0516), 0x02),
+ REG_SEQ0(LANE_REG(051b), 0x01),
+ REG_SEQ0(LANE_REG(051f), 0x15),
+ REG_SEQ0(LANE_REG(0520), 0xa0),
+ REG_SEQ0(LANE_REG(0603), 0x0c),
+ REG_SEQ0(LANE_REG(0607), 0x20),
+ REG_SEQ0(LANE_REG(060a), 0x17),
+ REG_SEQ0(LANE_REG(060b), 0x77),
+ REG_SEQ0(LANE_REG(060c), 0x77),
+ REG_SEQ0(LANE_REG(060d), 0x77),
+ REG_SEQ0(LANE_REG(060e), 0x38),
+ REG_SEQ0(LANE_REG(0610), 0x03),
+ REG_SEQ0(LANE_REG(0611), 0x0f),
+ REG_SEQ0(LANE_REG(0616), 0x02),
+ REG_SEQ0(LANE_REG(061b), 0x01),
+ REG_SEQ0(LANE_REG(061f), 0x15),
+ REG_SEQ0(LANE_REG(0620), 0xa0),
+};
+
+static const struct reg_sequence rk_hdtpx_tmds_lane_init_seq[] = {
+ REG_SEQ0(LANE_REG(0312), 0x00),
+ REG_SEQ0(LANE_REG(031e), 0x00),
+ REG_SEQ0(LANE_REG(0412), 0x00),
+ REG_SEQ0(LANE_REG(041e), 0x00),
+ REG_SEQ0(LANE_REG(0512), 0x00),
+ REG_SEQ0(LANE_REG(051e), 0x00),
+ REG_SEQ0(LANE_REG(0612), 0x00),
+ REG_SEQ0(LANE_REG(061e), 0x08),
+ REG_SEQ0(LANE_REG(0303), 0x2f),
+ REG_SEQ0(LANE_REG(0403), 0x2f),
+ REG_SEQ0(LANE_REG(0503), 0x2f),
+ REG_SEQ0(LANE_REG(0603), 0x2f),
+ REG_SEQ0(LANE_REG(0305), 0x03),
+ REG_SEQ0(LANE_REG(0405), 0x03),
+ REG_SEQ0(LANE_REG(0505), 0x03),
+ REG_SEQ0(LANE_REG(0605), 0x03),
+ REG_SEQ0(LANE_REG(0306), 0x1c),
+ REG_SEQ0(LANE_REG(0406), 0x1c),
+ REG_SEQ0(LANE_REG(0506), 0x1c),
+ REG_SEQ0(LANE_REG(0606), 0x1c),
+};
+
+static bool rk_hdptx_phy_is_rw_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case 0x0000 ... 0x029c:
+ case 0x0400 ... 0x04a4:
+ case 0x0800 ... 0x08a4:
+ case 0x0c00 ... 0x0cb4:
+ case 0x1000 ... 0x10b4:
+ case 0x1400 ... 0x14b4:
+ case 0x1800 ... 0x18b4:
+ return true;
+ }
+
+ return false;
+}
+
+static const struct regmap_config rk_hdptx_phy_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .writeable_reg = rk_hdptx_phy_is_rw_reg,
+ .readable_reg = rk_hdptx_phy_is_rw_reg,
+ .fast_io = true,
+ .max_register = 0x18b4,
+};
+
+#define rk_hdptx_multi_reg_write(hdptx, seq) \
+ regmap_multi_reg_write((hdptx)->regmap, seq, ARRAY_SIZE(seq))
+
+static void rk_hdptx_pre_power_up(struct rk_hdptx_phy *hdptx)
+{
+ u32 val;
+
+ reset_control_assert(hdptx->rsts[RST_APB].rstc);
+ usleep_range(20, 25);
+ reset_control_deassert(hdptx->rsts[RST_APB].rstc);
+
+ reset_control_assert(hdptx->rsts[RST_LANE].rstc);
+ reset_control_assert(hdptx->rsts[RST_CMN].rstc);
+ reset_control_assert(hdptx->rsts[RST_INIT].rstc);
+
+ val = (HDPTX_I_PLL_EN | HDPTX_I_BIAS_EN | HDPTX_I_BGR_EN) << 16;
+ regmap_write(hdptx->grf, GRF_HDPTX_CON0, val);
+}
+
+static int rk_hdptx_post_enable_lane(struct rk_hdptx_phy *hdptx)
+{
+ u32 val;
+ int ret;
+
+ reset_control_deassert(hdptx->rsts[RST_LANE].rstc);
+
+ val = (HDPTX_I_BIAS_EN | HDPTX_I_BGR_EN) << 16 |
+ HDPTX_I_BIAS_EN | HDPTX_I_BGR_EN;
+ regmap_write(hdptx->grf, GRF_HDPTX_CON0, val);
+
+ ret = regmap_read_poll_timeout(hdptx->grf, GRF_HDPTX_STATUS, val,
+ (val & HDPTX_O_PHY_RDY) &&
+ (val & HDPTX_O_PLL_LOCK_DONE),
+ 100, 5000);
+ if (ret) {
+ dev_err(hdptx->dev, "Failed to get PHY lane lock: %d\n", ret);
+ return ret;
+ }
+
+ dev_dbg(hdptx->dev, "PHY lane locked\n");
+
+ return 0;
+}
+
+static int rk_hdptx_post_enable_pll(struct rk_hdptx_phy *hdptx)
+{
+ u32 val;
+ int ret;
+
+ val = (HDPTX_I_BIAS_EN | HDPTX_I_BGR_EN) << 16 |
+ HDPTX_I_BIAS_EN | HDPTX_I_BGR_EN;
+ regmap_write(hdptx->grf, GRF_HDPTX_CON0, val);
+
+ usleep_range(10, 15);
+ reset_control_deassert(hdptx->rsts[RST_INIT].rstc);
+
+ usleep_range(10, 15);
+ val = HDPTX_I_PLL_EN << 16 | HDPTX_I_PLL_EN;
+ regmap_write(hdptx->grf, GRF_HDPTX_CON0, val);
+
+ usleep_range(10, 15);
+ reset_control_deassert(hdptx->rsts[RST_CMN].rstc);
+
+ ret = regmap_read_poll_timeout(hdptx->grf, GRF_HDPTX_STATUS, val,
+ val & HDPTX_O_PHY_CLK_RDY, 20, 400);
+ if (ret) {
+ dev_err(hdptx->dev, "Failed to get PHY clk ready: %d\n", ret);
+ return ret;
+ }
+
+ dev_dbg(hdptx->dev, "PHY clk ready\n");
+
+ return 0;
+}
+
+static void rk_hdptx_phy_disable(struct rk_hdptx_phy *hdptx)
+{
+ u32 val;
+
+ /* reset phy and apb, or phy locked flag may keep 1 */
+ reset_control_assert(hdptx->rsts[RST_PHY].rstc);
+ usleep_range(20, 30);
+ reset_control_deassert(hdptx->rsts[RST_PHY].rstc);
+
+ reset_control_assert(hdptx->rsts[RST_APB].rstc);
+ usleep_range(20, 30);
+ reset_control_deassert(hdptx->rsts[RST_APB].rstc);
+
+ regmap_write(hdptx->regmap, LANE_REG(0300), 0x82);
+ regmap_write(hdptx->regmap, SB_REG(010f), 0xc1);
+ regmap_write(hdptx->regmap, SB_REG(0110), 0x1);
+ regmap_write(hdptx->regmap, LANE_REG(0301), 0x80);
+ regmap_write(hdptx->regmap, LANE_REG(0401), 0x80);
+ regmap_write(hdptx->regmap, LANE_REG(0501), 0x80);
+ regmap_write(hdptx->regmap, LANE_REG(0601), 0x80);
+
+ reset_control_assert(hdptx->rsts[RST_LANE].rstc);
+ reset_control_assert(hdptx->rsts[RST_CMN].rstc);
+ reset_control_assert(hdptx->rsts[RST_INIT].rstc);
+
+ val = (HDPTX_I_PLL_EN | HDPTX_I_BIAS_EN | HDPTX_I_BGR_EN) << 16;
+ regmap_write(hdptx->grf, GRF_HDPTX_CON0, val);
+}
+
+static bool rk_hdptx_phy_clk_pll_calc(unsigned int data_rate,
+ struct ropll_config *cfg)
+{
+ const unsigned int fout = data_rate / 2, fref = 24000;
+ unsigned long k = 0, lc, k_sub, lc_sub;
+ unsigned int fvco, sdc;
+ u32 mdiv, sdiv, n = 8;
+
+ if (fout > 0xfffffff)
+ return false;
+
+ for (sdiv = 16; sdiv >= 1; sdiv--) {
+ if (sdiv % 2 && sdiv != 1)
+ continue;
+
+ fvco = fout * sdiv;
+
+ if (fvco < 2000000 || fvco > 4000000)
+ continue;
+
+ mdiv = DIV_ROUND_UP(fvco, fref);
+ if (mdiv < 20 || mdiv > 255)
+ continue;
+
+ if (fref * mdiv - fvco) {
+ for (sdc = 264000; sdc <= 750000; sdc += fref)
+ if (sdc * n > fref * mdiv)
+ break;
+
+ if (sdc > 750000)
+ continue;
+
+ rational_best_approximation(fref * mdiv - fvco,
+ sdc / 16,
+ GENMASK(6, 0),
+ GENMASK(7, 0),
+ &k, &lc);
+
+ rational_best_approximation(sdc * n - fref * mdiv,
+ sdc,
+ GENMASK(6, 0),
+ GENMASK(7, 0),
+ &k_sub, &lc_sub);
+ }
+
+ break;
+ }
+
+ if (sdiv < 1)
+ return false;
+
+ if (cfg) {
+ cfg->pms_mdiv = mdiv;
+ cfg->pms_mdiv_afc = mdiv;
+ cfg->pms_pdiv = 1;
+ cfg->pms_refdiv = 1;
+ cfg->pms_sdiv = sdiv - 1;
+
+ cfg->sdm_en = k > 0 ? 1 : 0;
+ if (cfg->sdm_en) {
+ cfg->sdm_deno = lc;
+ cfg->sdm_num_sign = 1;
+ cfg->sdm_num = k;
+ cfg->sdc_n = n - 3;
+ cfg->sdc_num = k_sub;
+ cfg->sdc_deno = lc_sub;
+ }
+ }
+
+ return true;
+}
+
+static int rk_hdptx_ropll_tmds_cmn_config(struct rk_hdptx_phy *hdptx,
+ unsigned int rate)
+{
+ const struct ropll_config *cfg = NULL;
+ struct ropll_config rc = {0};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ropll_tmds_cfg); i++)
+ if (rate == ropll_tmds_cfg[i].bit_rate) {
+ cfg = &ropll_tmds_cfg[i];
+ break;
+ }
+
+ if (!cfg) {
+ if (rk_hdptx_phy_clk_pll_calc(rate, &rc)) {
+ cfg = &rc;
+ } else {
+ dev_err(hdptx->dev, "%s cannot find pll cfg\n", __func__);
+ return -EINVAL;
+ }
+ }
+
+ dev_dbg(hdptx->dev, "mdiv=%u, sdiv=%u, sdm_en=%u, k_sign=%u, k=%u, lc=%u\n",
+ cfg->pms_mdiv, cfg->pms_sdiv + 1, cfg->sdm_en,
+ cfg->sdm_num_sign, cfg->sdm_num, cfg->sdm_deno);
+
+ rk_hdptx_pre_power_up(hdptx);
+
+ reset_control_assert(hdptx->rsts[RST_ROPLL].rstc);
+ usleep_range(20, 30);
+ reset_control_deassert(hdptx->rsts[RST_ROPLL].rstc);
+
+ rk_hdptx_multi_reg_write(hdptx, rk_hdtpx_common_cmn_init_seq);
+ rk_hdptx_multi_reg_write(hdptx, rk_hdtpx_tmds_cmn_init_seq);
+
+ regmap_write(hdptx->regmap, CMN_REG(0051), cfg->pms_mdiv);
+ regmap_write(hdptx->regmap, CMN_REG(0055), cfg->pms_mdiv_afc);
+ regmap_write(hdptx->regmap, CMN_REG(0059),
+ (cfg->pms_pdiv << 4) | cfg->pms_refdiv);
+ regmap_write(hdptx->regmap, CMN_REG(005a), cfg->pms_sdiv << 4);
+
+ regmap_update_bits(hdptx->regmap, CMN_REG(005e), ROPLL_SDM_EN_MASK,
+ FIELD_PREP(ROPLL_SDM_EN_MASK, cfg->sdm_en));
+ if (!cfg->sdm_en)
+ regmap_update_bits(hdptx->regmap, CMN_REG(005e), 0xf, 0);
+
+ regmap_update_bits(hdptx->regmap, CMN_REG(0064), ROPLL_SDM_NUM_SIGN_RBR_MASK,
+ FIELD_PREP(ROPLL_SDM_NUM_SIGN_RBR_MASK, cfg->sdm_num_sign));
+
+ regmap_write(hdptx->regmap, CMN_REG(0060), cfg->sdm_deno);
+ regmap_write(hdptx->regmap, CMN_REG(0065), cfg->sdm_num);
+
+ regmap_update_bits(hdptx->regmap, CMN_REG(0069), ROPLL_SDC_N_RBR_MASK,
+ FIELD_PREP(ROPLL_SDC_N_RBR_MASK, cfg->sdc_n));
+
+ regmap_write(hdptx->regmap, CMN_REG(006c), cfg->sdc_num);
+ regmap_write(hdptx->regmap, CMN_REG(0070), cfg->sdc_deno);
+
+ regmap_update_bits(hdptx->regmap, CMN_REG(0086), PLL_PCG_POSTDIV_SEL_MASK,
+ FIELD_PREP(PLL_PCG_POSTDIV_SEL_MASK, cfg->pms_sdiv));
+
+ regmap_update_bits(hdptx->regmap, CMN_REG(0086), PLL_PCG_CLK_EN,
+ PLL_PCG_CLK_EN);
+
+ return rk_hdptx_post_enable_pll(hdptx);
+}
+
+static int rk_hdptx_ropll_tmds_mode_config(struct rk_hdptx_phy *hdptx,
+ unsigned int rate)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read(hdptx->grf, GRF_HDPTX_STATUS, &val);
+ if (ret)
+ return ret;
+
+ if (!(val & HDPTX_O_PLL_LOCK_DONE)) {
+ ret = rk_hdptx_ropll_tmds_cmn_config(hdptx, rate);
+ if (ret)
+ return ret;
+ }
+
+ rk_hdptx_multi_reg_write(hdptx, rk_hdtpx_common_sb_init_seq);
+
+ regmap_write(hdptx->regmap, LNTOP_REG(0200), 0x06);
+
+ if (rate >= 3400000) {
+ /* For 1/40 bitrate clk */
+ rk_hdptx_multi_reg_write(hdptx, rk_hdtpx_tmds_lntop_highbr_seq);
+ } else {
+ /* For 1/10 bitrate clk */
+ rk_hdptx_multi_reg_write(hdptx, rk_hdtpx_tmds_lntop_lowbr_seq);
+ }
+
+ regmap_write(hdptx->regmap, LNTOP_REG(0206), 0x07);
+ regmap_write(hdptx->regmap, LNTOP_REG(0207), 0x0f);
+
+ rk_hdptx_multi_reg_write(hdptx, rk_hdtpx_common_lane_init_seq);
+ rk_hdptx_multi_reg_write(hdptx, rk_hdtpx_tmds_lane_init_seq);
+
+ return rk_hdptx_post_enable_lane(hdptx);
+}
+
+static int rk_hdptx_phy_power_on(struct phy *phy)
+{
+ struct rk_hdptx_phy *hdptx = phy_get_drvdata(phy);
+ int ret, bus_width = phy_get_bus_width(hdptx->phy);
+ /*
+ * FIXME: Temporary workaround to pass pixel_clk_rate
+ * from the HDMI bridge driver until phy_configure_opts_hdmi
+ * becomes available in the PHY API.
+ */
+ unsigned int rate = bus_width & 0xfffffff;
+
+ dev_dbg(hdptx->dev, "%s bus_width=%x rate=%u\n",
+ __func__, bus_width, rate);
+
+ ret = pm_runtime_resume_and_get(hdptx->dev);
+ if (ret) {
+ dev_err(hdptx->dev, "Failed to resume phy: %d\n", ret);
+ return ret;
+ }
+
+ ret = rk_hdptx_ropll_tmds_mode_config(hdptx, rate);
+ if (ret)
+ pm_runtime_put(hdptx->dev);
+
+ return ret;
+}
+
+static int rk_hdptx_phy_power_off(struct phy *phy)
+{
+ struct rk_hdptx_phy *hdptx = phy_get_drvdata(phy);
+ u32 val;
+ int ret;
+
+ ret = regmap_read(hdptx->grf, GRF_HDPTX_STATUS, &val);
+ if (ret == 0 && (val & HDPTX_O_PLL_LOCK_DONE))
+ rk_hdptx_phy_disable(hdptx);
+
+ pm_runtime_put(hdptx->dev);
+
+ return ret;
+}
+
+static const struct phy_ops rk_hdptx_phy_ops = {
+ .power_on = rk_hdptx_phy_power_on,
+ .power_off = rk_hdptx_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int rk_hdptx_phy_runtime_suspend(struct device *dev)
+{
+ struct rk_hdptx_phy *hdptx = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(hdptx->nr_clks, hdptx->clks);
+
+ return 0;
+}
+
+static int rk_hdptx_phy_runtime_resume(struct device *dev)
+{
+ struct rk_hdptx_phy *hdptx = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_bulk_prepare_enable(hdptx->nr_clks, hdptx->clks);
+ if (ret)
+ dev_err(hdptx->dev, "Failed to enable clocks: %d\n", ret);
+
+ return ret;
+}
+
+static int rk_hdptx_phy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ struct device *dev = &pdev->dev;
+ struct rk_hdptx_phy *hdptx;
+ void __iomem *regs;
+ int ret;
+
+ hdptx = devm_kzalloc(dev, sizeof(*hdptx), GFP_KERNEL);
+ if (!hdptx)
+ return -ENOMEM;
+
+ hdptx->dev = dev;
+
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs))
+ return dev_err_probe(dev, PTR_ERR(regs),
+ "Failed to ioremap resource\n");
+
+ ret = devm_clk_bulk_get_all(dev, &hdptx->clks);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get clocks\n");
+ if (ret == 0)
+ return dev_err_probe(dev, -EINVAL, "Missing clocks\n");
+
+ hdptx->nr_clks = ret;
+
+ hdptx->regmap = devm_regmap_init_mmio(dev, regs,
+ &rk_hdptx_phy_regmap_config);
+ if (IS_ERR(hdptx->regmap))
+ return dev_err_probe(dev, PTR_ERR(hdptx->regmap),
+ "Failed to init regmap\n");
+
+ hdptx->rsts[RST_PHY].id = "phy";
+ hdptx->rsts[RST_APB].id = "apb";
+ hdptx->rsts[RST_INIT].id = "init";
+ hdptx->rsts[RST_CMN].id = "cmn";
+ hdptx->rsts[RST_LANE].id = "lane";
+ hdptx->rsts[RST_ROPLL].id = "ropll";
+ hdptx->rsts[RST_LCPLL].id = "lcpll";
+
+ ret = devm_reset_control_bulk_get_exclusive(dev, RST_MAX, hdptx->rsts);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get resets\n");
+
+ hdptx->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,grf");
+ if (IS_ERR(hdptx->grf))
+ return dev_err_probe(dev, PTR_ERR(hdptx->grf),
+ "Could not get GRF syscon\n");
+
+ hdptx->phy = devm_phy_create(dev, NULL, &rk_hdptx_phy_ops);
+ if (IS_ERR(hdptx->phy))
+ return dev_err_probe(dev, PTR_ERR(hdptx->phy),
+ "Failed to create HDMI PHY\n");
+
+ platform_set_drvdata(pdev, hdptx);
+ phy_set_drvdata(hdptx->phy, hdptx);
+ phy_set_bus_width(hdptx->phy, 8);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable runtime PM\n");
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return dev_err_probe(dev, PTR_ERR(phy_provider),
+ "Failed to register PHY provider\n");
+
+ reset_control_deassert(hdptx->rsts[RST_APB].rstc);
+ reset_control_deassert(hdptx->rsts[RST_CMN].rstc);
+ reset_control_deassert(hdptx->rsts[RST_INIT].rstc);
+
+ return 0;
+}
+
+static const struct dev_pm_ops rk_hdptx_phy_pm_ops = {
+ RUNTIME_PM_OPS(rk_hdptx_phy_runtime_suspend,
+ rk_hdptx_phy_runtime_resume, NULL)
+};
+
+static const struct of_device_id rk_hdptx_phy_of_match[] = {
+ { .compatible = "rockchip,rk3588-hdptx-phy", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rk_hdptx_phy_of_match);
+
+static struct platform_driver rk_hdptx_phy_driver = {
+ .probe = rk_hdptx_phy_probe,
+ .driver = {
+ .name = "rockchip-hdptx-phy",
+ .pm = &rk_hdptx_phy_pm_ops,
+ .of_match_table = rk_hdptx_phy_of_match,
+ },
+};
+module_platform_driver(rk_hdptx_phy_driver);
+
+MODULE_AUTHOR("Algea Cao <algea.cao@rock-chips.com>");
+MODULE_AUTHOR("Cristian Ciocaltea <cristian.ciocaltea@collabora.com>");
+MODULE_DESCRIPTION("Samsung HDMI/eDP Transmitter Combo PHY Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/samsung/phy-exynos-mipi-video.c b/drivers/phy/samsung/phy-exynos-mipi-video.c
index 592d8067e848e..f6756a609a9a0 100644
--- a/drivers/phy/samsung/phy-exynos-mipi-video.c
+++ b/drivers/phy/samsung/phy-exynos-mipi-video.c
@@ -274,7 +274,7 @@ static int exynos_mipi_video_phy_power_off(struct phy *phy)
}
static struct phy *exynos_mipi_video_phy_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct exynos_mipi_video_phy *state = dev_get_drvdata(dev);
diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
index 3f310b28bfff7..04171eed5b16f 100644
--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
+++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
@@ -715,7 +715,7 @@ static int exynos5420_usbdrd_phy_calibrate(struct exynos5_usbdrd_phy *phy_drd)
}
static struct phy *exynos5_usbdrd_phy_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct exynos5_usbdrd_phy *phy_drd = dev_get_drvdata(dev);
diff --git a/drivers/phy/samsung/phy-samsung-usb2.c b/drivers/phy/samsung/phy-samsung-usb2.c
index 68a174eca0ba8..9de744cd6f392 100644
--- a/drivers/phy/samsung/phy-samsung-usb2.c
+++ b/drivers/phy/samsung/phy-samsung-usb2.c
@@ -87,7 +87,7 @@ static const struct phy_ops samsung_usb2_phy_ops = {
};
static struct phy *samsung_usb2_phy_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct samsung_usb2_phy_driver *drv;
diff --git a/drivers/phy/socionext/phy-uniphier-usb2.c b/drivers/phy/socionext/phy-uniphier-usb2.c
index 3f2086ed4fe4f..21c201717d952 100644
--- a/drivers/phy/socionext/phy-uniphier-usb2.c
+++ b/drivers/phy/socionext/phy-uniphier-usb2.c
@@ -81,7 +81,7 @@ static int uniphier_u2phy_init(struct phy *phy)
}
static struct phy *uniphier_u2phy_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct uniphier_u2phy_priv *priv = dev_get_drvdata(dev);
diff --git a/drivers/phy/st/phy-miphy28lp.c b/drivers/phy/st/phy-miphy28lp.c
index e30305b77f0d1..063fc38788ed4 100644
--- a/drivers/phy/st/phy-miphy28lp.c
+++ b/drivers/phy/st/phy-miphy28lp.c
@@ -1074,7 +1074,7 @@ static int miphy28lp_get_addr(struct miphy28lp_phy *miphy_phy)
}
static struct phy *miphy28lp_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct miphy28lp_dev *miphy_dev = dev_get_drvdata(dev);
struct miphy28lp_phy *miphy_phy = NULL;
diff --git a/drivers/phy/st/phy-spear1310-miphy.c b/drivers/phy/st/phy-spear1310-miphy.c
index 35a9831b51610..c661ab63505f9 100644
--- a/drivers/phy/st/phy-spear1310-miphy.c
+++ b/drivers/phy/st/phy-spear1310-miphy.c
@@ -183,7 +183,7 @@ static const struct phy_ops spear1310_miphy_ops = {
};
static struct phy *spear1310_miphy_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct spear1310_miphy_priv *priv = dev_get_drvdata(dev);
diff --git a/drivers/phy/st/phy-spear1340-miphy.c b/drivers/phy/st/phy-spear1340-miphy.c
index 34a1cf21015f5..85a60d64ebb7d 100644
--- a/drivers/phy/st/phy-spear1340-miphy.c
+++ b/drivers/phy/st/phy-spear1340-miphy.c
@@ -220,7 +220,7 @@ static SIMPLE_DEV_PM_OPS(spear1340_miphy_pm_ops, spear1340_miphy_suspend,
spear1340_miphy_resume);
static struct phy *spear1340_miphy_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct spear1340_miphy_priv *priv = dev_get_drvdata(dev);
diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c
index d5e7e44000b56..9dbe60dcf3190 100644
--- a/drivers/phy/st/phy-stm32-usbphyc.c
+++ b/drivers/phy/st/phy-stm32-usbphyc.c
@@ -574,7 +574,7 @@ static void stm32_usbphyc_switch_setup(struct stm32_usbphyc *usbphyc,
}
static struct phy *stm32_usbphyc_of_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct stm32_usbphyc *usbphyc = dev_get_drvdata(dev);
struct stm32_usbphyc_phy *usbphyc_phy = NULL;
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index 142ebe0247cc0..cfdb54b6070a4 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -22,7 +22,7 @@
#include "xusb.h"
static struct phy *tegra_xusb_pad_of_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct tegra_xusb_pad *pad = dev_get_drvdata(dev);
struct phy *phy = NULL;
@@ -1531,6 +1531,19 @@ int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl,
}
EXPORT_SYMBOL_GPL(tegra_xusb_padctl_get_usb3_companion);
+int tegra_xusb_padctl_get_port_number(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane;
+
+ if (!phy)
+ return -ENODEV;
+
+ lane = phy_get_drvdata(phy);
+
+ return lane->index;
+}
+EXPORT_SYMBOL_GPL(tegra_xusb_padctl_get_port_number);
+
MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
MODULE_DESCRIPTION("Tegra XUSB Pad Controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c
index 3f1d43e8b7ad1..8b3b937de6248 100644
--- a/drivers/phy/ti/phy-am654-serdes.c
+++ b/drivers/phy/ti/phy-am654-serdes.c
@@ -495,7 +495,7 @@ static void serdes_am654_release(struct phy *x)
}
static struct phy *serdes_am654_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct serdes_am654 *am654_phy;
struct phy *phy;
diff --git a/drivers/phy/ti/phy-da8xx-usb.c b/drivers/phy/ti/phy-da8xx-usb.c
index b7a9ef3f46544..0fe577f0d6c1e 100644
--- a/drivers/phy/ti/phy-da8xx-usb.c
+++ b/drivers/phy/ti/phy-da8xx-usb.c
@@ -119,7 +119,7 @@ static const struct phy_ops da8xx_usb20_phy_ops = {
};
static struct phy *da8xx_usb_phy_of_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct da8xx_usb_phy *d_phy = dev_get_drvdata(dev);
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
index 0f4818adb4400..b30bf740e2e0d 100644
--- a/drivers/phy/ti/phy-gmii-sel.c
+++ b/drivers/phy/ti/phy-gmii-sel.c
@@ -297,7 +297,7 @@ static const struct phy_ops phy_gmii_sel_ops = {
};
static struct phy *phy_gmii_sel_of_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct phy_gmii_sel_priv *priv = dev_get_drvdata(dev);
int phy_id = args->args[0];
@@ -494,11 +494,35 @@ static int phy_gmii_sel_probe(struct platform_device *pdev)
return 0;
}
+static int phy_gmii_sel_resume_noirq(struct device *dev)
+{
+ struct phy_gmii_sel_priv *priv = dev_get_drvdata(dev);
+ struct phy_gmii_sel_phy_priv *if_phys = priv->if_phys;
+ int ret, i;
+
+ for (i = 0; i < priv->num_ports; i++) {
+ if (if_phys[i].phy_if_mode) {
+ ret = phy_gmii_sel_mode(if_phys[i].if_phy,
+ PHY_MODE_ETHERNET, if_phys[i].phy_if_mode);
+ if (ret) {
+ dev_err(dev, "port%u: restore mode fail %d\n",
+ if_phys[i].if_phy->id, ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static DEFINE_NOIRQ_DEV_PM_OPS(phy_gmii_sel_pm_ops, NULL, phy_gmii_sel_resume_noirq);
+
static struct platform_driver phy_gmii_sel_driver = {
.probe = phy_gmii_sel_probe,
.driver = {
.name = "phy-gmii-sel",
.of_match_table = phy_gmii_sel_id_table,
+ .pm = pm_sleep_ptr(&phy_gmii_sel_pm_ops),
},
};
module_platform_driver(phy_gmii_sel_driver);
diff --git a/drivers/phy/ti/phy-tusb1210.c b/drivers/phy/ti/phy-tusb1210.c
index b4881cb344759..13cd614e12a1d 100644
--- a/drivers/phy/ti/phy-tusb1210.c
+++ b/drivers/phy/ti/phy-tusb1210.c
@@ -17,6 +17,10 @@
#include <linux/property.h>
#include <linux/workqueue.h>
+#define TI_VENDOR_ID 0x0451
+#define TI_DEVICE_TUSB1210 0x1507
+#define TI_DEVICE_TUSB1211 0x1508
+
#define TUSB1211_POWER_CONTROL 0x3d
#define TUSB1211_POWER_CONTROL_SET 0x3e
#define TUSB1211_POWER_CONTROL_CLEAR 0x3f
@@ -52,7 +56,7 @@ enum tusb1210_chg_det_state {
};
struct tusb1210 {
- struct ulpi *ulpi;
+ struct device *dev;
struct phy *phy;
struct gpio_desc *gpio_reset;
struct gpio_desc *gpio_cs;
@@ -71,26 +75,27 @@ struct tusb1210 {
static int tusb1210_ulpi_write(struct tusb1210 *tusb, u8 reg, u8 val)
{
+ struct device *dev = tusb->dev;
int ret;
- ret = ulpi_write(tusb->ulpi, reg, val);
+ ret = ulpi_write(to_ulpi_dev(dev), reg, val);
if (ret)
- dev_err(&tusb->ulpi->dev, "error %d writing val 0x%02x to reg 0x%02x\n",
- ret, val, reg);
+ dev_err(dev, "error %d writing val 0x%02x to reg 0x%02x\n", ret, val, reg);
return ret;
}
static int tusb1210_ulpi_read(struct tusb1210 *tusb, u8 reg, u8 *val)
{
+ struct device *dev = tusb->dev;
int ret;
- ret = ulpi_read(tusb->ulpi, reg);
+ ret = ulpi_read(to_ulpi_dev(dev), reg);
if (ret >= 0) {
*val = ret;
ret = 0;
} else {
- dev_err(&tusb->ulpi->dev, "error %d reading reg 0x%02x\n", ret, reg);
+ dev_err(dev, "error %d reading reg 0x%02x\n", ret, reg);
}
return ret;
@@ -178,7 +183,7 @@ static void tusb1210_reset(struct tusb1210 *tusb)
static void tusb1210_chg_det_set_type(struct tusb1210 *tusb,
enum power_supply_usb_type type)
{
- dev_dbg(&tusb->ulpi->dev, "charger type: %d\n", type);
+ dev_dbg(tusb->dev, "charger type: %d\n", type);
tusb->chg_type = type;
tusb->chg_det_retries = 0;
power_supply_changed(tusb->psy);
@@ -189,7 +194,7 @@ static void tusb1210_chg_det_set_state(struct tusb1210 *tusb,
int delay_ms)
{
if (delay_ms)
- dev_dbg(&tusb->ulpi->dev, "chg_det new state %s in %d ms\n",
+ dev_dbg(tusb->dev, "chg_det new state %s in %d ms\n",
tusb1210_chg_det_states[new_state], delay_ms);
tusb->chg_det_state = new_state;
@@ -253,7 +258,7 @@ static void tusb1210_chg_det_work(struct work_struct *work)
int ret;
u8 val;
- dev_dbg(&tusb->ulpi->dev, "chg_det state %s vbus_present %d\n",
+ dev_dbg(tusb->dev, "chg_det state %s vbus_present %d\n",
tusb1210_chg_det_states[tusb->chg_det_state], vbus_present);
switch (tusb->chg_det_state) {
@@ -261,9 +266,9 @@ static void tusb1210_chg_det_work(struct work_struct *work)
tusb->chg_type = POWER_SUPPLY_USB_TYPE_UNKNOWN;
tusb->chg_det_retries = 0;
/* Power on USB controller for ulpi_read()/_write() */
- ret = pm_runtime_resume_and_get(tusb->ulpi->dev.parent);
+ ret = pm_runtime_resume_and_get(tusb->dev->parent);
if (ret < 0) {
- dev_err(&tusb->ulpi->dev, "error %d runtime-resuming\n", ret);
+ dev_err(tusb->dev, "error %d runtime-resuming\n", ret);
/* Should never happen, skip charger detection */
tusb1210_chg_det_set_state(tusb, TUSB1210_CHG_DET_CONNECTED, 0);
return;
@@ -332,7 +337,7 @@ static void tusb1210_chg_det_work(struct work_struct *work)
mutex_unlock(&tusb->phy->mutex);
- pm_runtime_put(tusb->ulpi->dev.parent);
+ pm_runtime_put(tusb->dev->parent);
tusb1210_chg_det_set_state(tusb, TUSB1210_CHG_DET_CONNECTED, 0);
break;
case TUSB1210_CHG_DET_CONNECTED:
@@ -428,13 +433,14 @@ static const struct power_supply_desc tusb1210_psy_desc = {
static void tusb1210_probe_charger_detect(struct tusb1210 *tusb)
{
struct power_supply_config psy_cfg = { .drv_data = tusb };
- struct device *dev = &tusb->ulpi->dev;
+ struct device *dev = tusb->dev;
+ struct ulpi *ulpi = to_ulpi_dev(dev);
int ret;
if (!device_property_read_bool(dev->parent, "linux,phy_charger_detect"))
return;
- if (tusb->ulpi->id.product != 0x1508) {
+ if (ulpi->id.product != TI_DEVICE_TUSB1211) {
dev_err(dev, "error charger detection is only supported on the TUSB1211\n");
return;
}
@@ -485,25 +491,24 @@ static const struct phy_ops phy_ops = {
static int tusb1210_probe(struct ulpi *ulpi)
{
+ struct device *dev = &ulpi->dev;
struct tusb1210 *tusb;
u8 val, reg;
int ret;
- tusb = devm_kzalloc(&ulpi->dev, sizeof(*tusb), GFP_KERNEL);
+ tusb = devm_kzalloc(dev, sizeof(*tusb), GFP_KERNEL);
if (!tusb)
return -ENOMEM;
- tusb->ulpi = ulpi;
+ tusb->dev = dev;
- tusb->gpio_reset = devm_gpiod_get_optional(&ulpi->dev, "reset",
- GPIOD_OUT_LOW);
+ tusb->gpio_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(tusb->gpio_reset))
return PTR_ERR(tusb->gpio_reset);
gpiod_set_value_cansleep(tusb->gpio_reset, 1);
- tusb->gpio_cs = devm_gpiod_get_optional(&ulpi->dev, "cs",
- GPIOD_OUT_LOW);
+ tusb->gpio_cs = devm_gpiod_get_optional(dev, "cs", GPIOD_OUT_LOW);
if (IS_ERR(tusb->gpio_cs))
return PTR_ERR(tusb->gpio_cs);
@@ -519,15 +524,15 @@ static int tusb1210_probe(struct ulpi *ulpi)
return ret;
/* High speed output drive strength configuration */
- if (!device_property_read_u8(&ulpi->dev, "ihstx", &val))
+ if (!device_property_read_u8(dev, "ihstx", &val))
u8p_replace_bits(&reg, val, (u8)TUSB1210_VENDOR_SPECIFIC2_IHSTX_MASK);
/* High speed output impedance configuration */
- if (!device_property_read_u8(&ulpi->dev, "zhsdrv", &val))
+ if (!device_property_read_u8(dev, "zhsdrv", &val))
u8p_replace_bits(&reg, val, (u8)TUSB1210_VENDOR_SPECIFIC2_ZHSDRV_MASK);
/* DP/DM swap control */
- if (!device_property_read_u8(&ulpi->dev, "datapolarity", &val))
+ if (!device_property_read_u8(dev, "datapolarity", &val))
u8p_replace_bits(&reg, val, (u8)TUSB1210_VENDOR_SPECIFIC2_DP_MASK);
ret = tusb1210_ulpi_write(tusb, TUSB1210_VENDOR_SPECIFIC2, reg);
@@ -561,11 +566,9 @@ static void tusb1210_remove(struct ulpi *ulpi)
tusb1210_remove_charger_detect(tusb);
}
-#define TI_VENDOR_ID 0x0451
-
static const struct ulpi_device_id tusb1210_ulpi_id[] = {
- { TI_VENDOR_ID, 0x1507, }, /* TUSB1210 */
- { TI_VENDOR_ID, 0x1508, }, /* TUSB1211 */
+ { TI_VENDOR_ID, TI_DEVICE_TUSB1210 },
+ { TI_VENDOR_ID, TI_DEVICE_TUSB1211 },
{ },
};
MODULE_DEVICE_TABLE(ulpi, tusb1210_ulpi_id);
diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c
index 2559c6594cea2..f72c5257d7127 100644
--- a/drivers/phy/xilinx/phy-zynqmp.c
+++ b/drivers/phy/xilinx/phy-zynqmp.c
@@ -768,7 +768,7 @@ static const unsigned int icm_matrix[NUM_LANES][CONTROLLERS_PER_LANE] = {
/* Translate OF phandle and args to PHY instance. */
static struct phy *xpsgtr_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
struct xpsgtr_phy *gtr_phy;
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 8163a5983166a..d45657aa986ae 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -127,6 +127,24 @@ config PINCTRL_AXP209
selected.
Say Y to enable pinctrl and GPIO support for the AXP209 PMIC.
+config PINCTRL_AW9523
+ tristate "Awinic AW9523/AW9523B I2C GPIO expander pinctrl driver"
+ depends on OF && I2C
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GPIOLIB
+ select GPIOLIB_IRQCHIP
+ select REGMAP
+ select REGMAP_I2C
+ help
+ The Awinic AW9523/AW9523B is a multi-function I2C GPIO
+ expander with PWM functionality. This driver bundles a
+ pinctrl driver to select the function muxing and a GPIO
+ driver to handle GPIO, when the GPIO function is selected.
+
+ Say yes to enable pinctrl and GPIO support for the AW9523(B).
+
config PINCTRL_BM1880
bool "Bitmain BM1880 Pinctrl driver"
depends on OF && (ARCH_BITMAIN || COMPILE_TEST)
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 1071f301cc70b..2152539b53d54 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_PINCTRL_ARTPEC6) += pinctrl-artpec6.o
obj-$(CONFIG_PINCTRL_AS3722) += pinctrl-as3722.o
obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o
obj-$(CONFIG_PINCTRL_AT91PIO4) += pinctrl-at91-pio4.o
+obj-$(CONFIG_PINCTRL_AW9523) += pinctrl-aw9523.o
obj-$(CONFIG_PINCTRL_AXP209) += pinctrl-axp209.o
obj-$(CONFIG_PINCTRL_BM1880) += pinctrl-bm1880.o
obj-$(CONFIG_PINCTRL_CY8C95X0) += pinctrl-cy8c95x0.o
diff --git a/drivers/pinctrl/aspeed/Makefile b/drivers/pinctrl/aspeed/Makefile
index 489ea1778353f..db2a7600ae2bd 100644
--- a/drivers/pinctrl/aspeed/Makefile
+++ b/drivers/pinctrl/aspeed/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
# Aspeed pinctrl support
-ccflags-y += $(call cc-option,-Woverride-init)
+ccflags-y += -Woverride-init
obj-$(CONFIG_PINCTRL_ASPEED) += pinctrl-aspeed.o pinmux-aspeed.o
obj-$(CONFIG_PINCTRL_ASPEED_G4) += pinctrl-aspeed-g4.o
obj-$(CONFIG_PINCTRL_ASPEED_G5) += pinctrl-aspeed-g5.o
diff --git a/drivers/pinctrl/cirrus/pinctrl-cs42l43.c b/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
index 012b0a3bad5a9..628b60ccc2b07 100644
--- a/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
+++ b/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
@@ -5,10 +5,10 @@
// Copyright (c) 2023 Cirrus Logic, Inc. and
// Cirrus Logic International Semiconductor Ltd.
+#include <linux/array_size.h>
#include <linux/bits.h>
#include <linux/build_bug.h>
#include <linux/err.h>
-#include <linux/errno.h>
#include <linux/gpio/driver.h>
#include <linux/mfd/cs42l43.h>
#include <linux/mfd/cs42l43-regs.h>
@@ -17,7 +17,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
-#include <linux/string_helpers.h>
+#include <linux/string_choices.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/pinctrl.h>
@@ -276,7 +276,7 @@ static const struct pinmux_ops cs42l43_pin_mux_ops = {
static const unsigned int cs42l43_pin_drv_str_ma[] = { 1, 2, 4, 8, 9, 10, 12, 16 };
-static inline int cs42l43_pin_get_drv_str(struct cs42l43_pin *priv, unsigned int pin)
+static int cs42l43_pin_get_drv_str(struct cs42l43_pin *priv, unsigned int pin)
{
const struct cs42l43_pin_data *pdat = cs42l43_pin_pins[pin].drv_data;
unsigned int val;
@@ -289,8 +289,8 @@ static inline int cs42l43_pin_get_drv_str(struct cs42l43_pin *priv, unsigned int
return cs42l43_pin_drv_str_ma[(val & pdat->mask) >> pdat->shift];
}
-static inline int cs42l43_pin_set_drv_str(struct cs42l43_pin *priv, unsigned int pin,
- unsigned int ma)
+static int cs42l43_pin_set_drv_str(struct cs42l43_pin *priv, unsigned int pin,
+ unsigned int ma)
{
const struct cs42l43_pin_data *pdat = cs42l43_pin_pins[pin].drv_data;
int i;
@@ -314,7 +314,7 @@ err:
return -EINVAL;
}
-static inline int cs42l43_pin_get_db(struct cs42l43_pin *priv, unsigned int pin)
+static int cs42l43_pin_get_db(struct cs42l43_pin *priv, unsigned int pin)
{
unsigned int val;
int ret;
@@ -332,8 +332,8 @@ static inline int cs42l43_pin_get_db(struct cs42l43_pin *priv, unsigned int pin)
return 85; // Debounce is roughly 85uS
}
-static inline int cs42l43_pin_set_db(struct cs42l43_pin *priv, unsigned int pin,
- unsigned int us)
+static int cs42l43_pin_set_db(struct cs42l43_pin *priv, unsigned int pin,
+ unsigned int us)
{
if (pin >= CS42L43_NUM_GPIOS)
return -ENOTSUPP;
@@ -490,7 +490,7 @@ static void cs42l43_gpio_set(struct gpio_chip *chip, unsigned int offset, int va
int ret;
dev_dbg(priv->dev, "Setting gpio%d to %s\n",
- offset + 1, value ? "high" : "low");
+ offset + 1, str_high_low(value));
ret = pm_runtime_resume_and_get(priv->dev);
if (ret) {
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index bbcdece83bf42..6649357637ff3 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -412,6 +412,10 @@ static int pinctrl_get_device_gpio_range(struct gpio_chip *gc,
* @pctldev: pin controller device to add the range to
* @range: the GPIO range to add
*
+ * DEPRECATED: Don't use this function in new code. See section 2 of
+ * Documentation/devicetree/bindings/gpio/gpio.txt on how to bind pinctrl and
+ * gpio drivers.
+ *
* This adds a range of GPIOs to be handled by a certain pin controller. Call
* this to register handled ranges after registering your pin controller.
*/
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7981.c b/drivers/pinctrl/mediatek/pinctrl-mt7981.c
index 7e59a44078590..ef61237658850 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7981.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7981.c
@@ -700,6 +700,15 @@ static int mt7981_drv_vbus_pins[] = { 14, };
static int mt7981_drv_vbus_funcs[] = { 1, };
/* EMMC */
+static int mt7981_emmc_reset_pins[] = { 15, };
+static int mt7981_emmc_reset_funcs[] = { 2, };
+
+static int mt7981_emmc_4_pins[] = { 16, 17, 18, 19, 24, 25, };
+static int mt7981_emmc_4_funcs[] = { 2, 2, 2, 2, 2, 2, };
+
+static int mt7981_emmc_8_pins[] = { 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, };
+static int mt7981_emmc_8_funcs[] = { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, };
+
static int mt7981_emmc_45_pins[] = { 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, };
static int mt7981_emmc_45_funcs[] = { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, };
@@ -737,6 +746,9 @@ static int mt7981_uart1_1_funcs[] = { 2, 2, 2, 2, };
static int mt7981_uart1_2_pins[] = { 9, 10, };
static int mt7981_uart1_2_funcs[] = { 2, 2, };
+static int mt7981_uart1_3_pins[] = { 26, 27, };
+static int mt7981_uart1_3_funcs[] = { 2, 2, };
+
/* UART2 */
static int mt7981_uart2_1_pins[] = { 22, 23, 24, 25, };
static int mt7981_uart2_1_funcs[] = { 3, 3, 3, 3, };
@@ -851,6 +863,12 @@ static const struct group_desc mt7981_groups[] = {
PINCTRL_PIN_GROUP("udi", mt7981_udi),
/* @GPIO(14) DRV_VBUS(1) */
PINCTRL_PIN_GROUP("drv_vbus", mt7981_drv_vbus),
+ /* @GPIO(15): EMMC_RSTB(2) */
+ PINCTRL_PIN_GROUP("emmc_reset", mt7981_emmc_reset),
+ /* @GPIO(16,17,18,19,24,25): EMMC_DATx, EMMC_CLK, EMMC_CMD */
+ PINCTRL_PIN_GROUP("emmc_4", mt7981_emmc_4),
+ /* @GPIO(16,17,18,19,20,21,22,23,24,25): EMMC_DATx, EMMC_CLK, EMMC_CMD */
+ PINCTRL_PIN_GROUP("emmc_8", mt7981_emmc_8),
/* @GPIO(15,25): EMMC(2) */
PINCTRL_PIN_GROUP("emmc_45", mt7981_emmc_45),
/* @GPIO(16,21): SNFI(3) */
@@ -871,6 +889,8 @@ static const struct group_desc mt7981_groups[] = {
PINCTRL_PIN_GROUP("uart1_1", mt7981_uart1_1),
/* @GPIO(9,10): UART1(2) */
PINCTRL_PIN_GROUP("uart1_2", mt7981_uart1_2),
+ /* @GPIO(26,27): UART1(2) */
+ PINCTRL_PIN_GROUP("uart1_3", mt7981_uart1_3),
/* @GPIO(22,25): UART1(3) */
PINCTRL_PIN_GROUP("uart2_1", mt7981_uart2_1),
/* @GPIO(22,24) PTA_EXT(4) */
@@ -933,7 +953,7 @@ static const struct group_desc mt7981_groups[] = {
static const char *mt7981_wa_aice_groups[] = { "wa_aice1", "wa_aice2", "wm_aice1_1",
"wa_aice3", "wm_aice1_2", };
static const char *mt7981_uart_groups[] = { "net_wo0_uart_txd_0", "net_wo0_uart_txd_1",
- "net_wo0_uart_txd_2", "uart0", "uart1_0", "uart1_1", "uart1_2", "uart2_0",
+ "net_wo0_uart_txd_2", "uart0", "uart1_0", "uart1_1", "uart1_2", "uart1_3", "uart2_0",
"uart2_0_tx_rx", "uart2_1", "wm_uart_0", "wm_aurt_1", "wm_aurt_2", };
static const char *mt7981_dfd_groups[] = { "dfd", "dfd_ntrst", };
static const char *mt7981_wdt_groups[] = { "watchdog", "watchdog1", };
@@ -952,7 +972,7 @@ static const char *mt7981_i2c_groups[] = { "i2c0_0", "i2c0_1", "u2_phy_i2c",
static const char *mt7981_pcm_groups[] = { "pcm", };
static const char *mt7981_udi_groups[] = { "udi", };
static const char *mt7981_usb_groups[] = { "drv_vbus", };
-static const char *mt7981_flash_groups[] = { "emmc_45", "snfi", };
+static const char *mt7981_flash_groups[] = { "emmc_reset", "emmc_4", "emmc_8", "emmc_45", "snfi", };
static const char *mt7981_ethernet_groups[] = { "smi_mdc_mdio", "gbe_ext_mdc_mdio",
"wf0_mode1", "wf0_mode3", "mt7531_int", };
static const char *mt7981_ant_groups[] = { "ant_sel", };
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7986.c b/drivers/pinctrl/mediatek/pinctrl-mt7986.c
index acaac9b38aa8a..39e80fa644c1c 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7986.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7986.c
@@ -16,7 +16,7 @@
PIN_FIELD_CALC(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, \
_x_bits, 32, 0)
-/**
+/*
* enum - Locking variants of the iocfg bases
*
* MT7986 have multiple bases to program pin configuration listed as the below:
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8186.c b/drivers/pinctrl/mediatek/pinctrl-mt8186.c
index 7be591591cce5..dd19e74856a92 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8186.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8186.c
@@ -1198,7 +1198,6 @@ static const struct mtk_pin_reg_calc mt8186_reg_cals[PINCTRL_PIN_REG_MAX] = {
[PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt8186_pin_dir_range),
[PINCTRL_PIN_REG_DI] = MTK_RANGE(mt8186_pin_di_range),
[PINCTRL_PIN_REG_DO] = MTK_RANGE(mt8186_pin_do_range),
- [PINCTRL_PIN_REG_SR] = MTK_RANGE(mt8186_pin_dir_range),
[PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt8186_pin_smt_range),
[PINCTRL_PIN_REG_IES] = MTK_RANGE(mt8186_pin_ies_range),
[PINCTRL_PIN_REG_PU] = MTK_RANGE(mt8186_pin_pu_range),
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8192.c b/drivers/pinctrl/mediatek/pinctrl-mt8192.c
index e3a76381f7f4e..3f8a9dbcb7041 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8192.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8192.c
@@ -1379,7 +1379,6 @@ static const struct mtk_pin_reg_calc mt8192_reg_cals[PINCTRL_PIN_REG_MAX] = {
[PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt8192_pin_dir_range),
[PINCTRL_PIN_REG_DI] = MTK_RANGE(mt8192_pin_di_range),
[PINCTRL_PIN_REG_DO] = MTK_RANGE(mt8192_pin_do_range),
- [PINCTRL_PIN_REG_SR] = MTK_RANGE(mt8192_pin_dir_range),
[PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt8192_pin_smt_range),
[PINCTRL_PIN_REG_IES] = MTK_RANGE(mt8192_pin_ies_range),
[PINCTRL_PIN_REG_PU] = MTK_RANGE(mt8192_pin_pu_range),
diff --git a/drivers/pinctrl/nomadik/Kconfig b/drivers/pinctrl/nomadik/Kconfig
index 0fea167c283fb..aafecf3486704 100644
--- a/drivers/pinctrl/nomadik/Kconfig
+++ b/drivers/pinctrl/nomadik/Kconfig
@@ -18,15 +18,15 @@ config PINCTRL_AB8505
endif
-if (ARCH_U8500 || ARCH_NOMADIK)
+if (ARCH_U8500 || ARCH_NOMADIK || COMPILE_TEST)
config PINCTRL_NOMADIK
bool "Nomadik pin controller driver"
- depends on OF && GPIOLIB
+ depends on OF
select PINMUX
select PINCONF
- select OF_GPIO
- select GPIOLIB_IRQCHIP
+ select GPIOLIB
+ select GPIO_NOMADIK
config PINCTRL_STN8815
bool "STN8815 pin controller driver"
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
index 490e0959e8be6..0b4a3dd9d8c79 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
@@ -3,8 +3,9 @@
#include <linux/types.h>
#include <linux/pinctrl/pinctrl.h>
+#include <linux/gpio/driver.h>
-#include "pinctrl-nomadik.h"
+#include <linux/gpio/gpio-nomadik.h>
/* All the pins that can be used for GPIO and some other functions */
#define _GPIO(offset) (offset)
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
index 1552222ac68e7..c5a52fcaba307 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
@@ -3,8 +3,9 @@
#include <linux/types.h>
#include <linux/pinctrl/pinctrl.h>
+#include <linux/gpio/driver.h>
-#include "pinctrl-nomadik.h"
+#include <linux/gpio/gpio-nomadik.h>
/* All the pins that can be used for GPIO and some other functions */
#define _GPIO(offset) (offset)
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 7911353ac97d5..cb0f0d5a5e456 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -1,12 +1,15 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Generic GPIO driver for logic cells found in the Nomadik SoC
+ * Pinmux & pinconf driver for the IP block found in the Nomadik SoC. This
+ * depends on gpio-nomadik and some handling is intertwined; see nmk_gpio_chips
+ * which is used by this driver to access the GPIO banks array.
*
* Copyright (C) 2008,2009 STMicroelectronics
* Copyright (C) 2009 Alessandro Rubini <rubini@unipv.it>
* Rewritten based on work by Prafulla WADASKAR <prafulla.wadaskar@st.com>
* Copyright (C) 2011-2013 Linus Walleij <linus.walleij@linaro.org>
*/
+
#include <linux/bitops.h>
#include <linux/cleanup.h>
#include <linux/clk.h>
@@ -25,6 +28,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/types.h>
/* Since we request GPIOs from ourself */
#include <linux/pinctrl/consumer.h>
@@ -36,15 +40,7 @@
#include "../core.h"
#include "../pinctrl-utils.h"
-#include "pinctrl-nomadik.h"
-
-/*
- * The GPIO module in the Nomadik family of Systems-on-Chip is an
- * AMBA device, managing 32 pins and alternate functions. The logic block
- * is currently used in the Nomadik and ux500.
- *
- * Symbols in this file are called "nmk_gpio" for "nomadik gpio"
- */
+#include <linux/gpio/gpio-nomadik.h>
/*
* pin configurations are represented by 32-bit integers:
@@ -76,8 +72,6 @@
* PIN_CFG - default config with alternate function
*/
-typedef unsigned long pin_cfg_t;
-
#define PIN_NUM_MASK 0x1ff
#define PIN_NUM(x) ((x) & PIN_NUM_MASK)
@@ -172,7 +166,6 @@ typedef unsigned long pin_cfg_t;
#define PIN_SLEEPMODE_DISABLED (0 << PIN_SLEEPMODE_SHIFT)
#define PIN_SLEEPMODE_ENABLED (1 << PIN_SLEEPMODE_SHIFT)
-
/* Shortcuts. Use these instead of separate DIR, PULL, and VAL. */
#define PIN_INPUT_PULLDOWN (PIN_DIR_INPUT | PIN_PULL_DOWN)
#define PIN_INPUT_PULLUP (PIN_DIR_INPUT | PIN_PULL_UP)
@@ -200,75 +193,6 @@ typedef unsigned long pin_cfg_t;
(PIN_CFG_DEFAULT |\
(PIN_NUM(num) | PIN_##alt | PIN_OUTPUT_##val))
-/*
- * "nmk_gpio" and "NMK_GPIO" stand for "Nomadik GPIO", leaving
- * the "gpio" namespace for generic and cross-machine functions
- */
-
-#define GPIO_BLOCK_SHIFT 5
-#define NMK_GPIO_PER_CHIP (1 << GPIO_BLOCK_SHIFT)
-#define NMK_MAX_BANKS DIV_ROUND_UP(512, NMK_GPIO_PER_CHIP)
-
-/* Register in the logic block */
-#define NMK_GPIO_DAT 0x00
-#define NMK_GPIO_DATS 0x04
-#define NMK_GPIO_DATC 0x08
-#define NMK_GPIO_PDIS 0x0c
-#define NMK_GPIO_DIR 0x10
-#define NMK_GPIO_DIRS 0x14
-#define NMK_GPIO_DIRC 0x18
-#define NMK_GPIO_SLPC 0x1c
-#define NMK_GPIO_AFSLA 0x20
-#define NMK_GPIO_AFSLB 0x24
-#define NMK_GPIO_LOWEMI 0x28
-
-#define NMK_GPIO_RIMSC 0x40
-#define NMK_GPIO_FIMSC 0x44
-#define NMK_GPIO_IS 0x48
-#define NMK_GPIO_IC 0x4c
-#define NMK_GPIO_RWIMSC 0x50
-#define NMK_GPIO_FWIMSC 0x54
-#define NMK_GPIO_WKS 0x58
-/* These appear in DB8540 and later ASICs */
-#define NMK_GPIO_EDGELEVEL 0x5C
-#define NMK_GPIO_LEVEL 0x60
-
-
-/* Pull up/down values */
-enum nmk_gpio_pull {
- NMK_GPIO_PULL_NONE,
- NMK_GPIO_PULL_UP,
- NMK_GPIO_PULL_DOWN,
-};
-
-/* Sleep mode */
-enum nmk_gpio_slpm {
- NMK_GPIO_SLPM_INPUT,
- NMK_GPIO_SLPM_WAKEUP_ENABLE = NMK_GPIO_SLPM_INPUT,
- NMK_GPIO_SLPM_NOCHANGE,
- NMK_GPIO_SLPM_WAKEUP_DISABLE = NMK_GPIO_SLPM_NOCHANGE,
-};
-
-struct nmk_gpio_chip {
- struct gpio_chip chip;
- void __iomem *addr;
- struct clk *clk;
- unsigned int bank;
- void (*set_ioforce)(bool enable);
- spinlock_t lock;
- bool sleepmode;
- /* Keep track of configured edges */
- u32 edge_rising;
- u32 edge_falling;
- u32 real_wake;
- u32 rwimsc;
- u32 fwimsc;
- u32 rimsc;
- u32 fimsc;
- u32 pull_up;
- u32 lowemi;
-};
-
/**
* struct nmk_pinctrl - state container for the Nomadik pin controller
* @dev: containing device pointer
@@ -283,14 +207,13 @@ struct nmk_pinctrl {
void __iomem *prcm_base;
};
-static struct nmk_gpio_chip *nmk_gpio_chips[NMK_MAX_BANKS];
+/* See nmk_gpio_populate_chip() that fills this array. */
+struct nmk_gpio_chip *nmk_gpio_chips[NMK_MAX_BANKS];
-static DEFINE_SPINLOCK(nmk_gpio_slpm_lock);
-
-#define NUM_BANKS ARRAY_SIZE(nmk_gpio_chips)
+DEFINE_SPINLOCK(nmk_gpio_slpm_lock);
static void __nmk_gpio_set_mode(struct nmk_gpio_chip *nmk_chip,
- unsigned offset, int gpio_mode)
+ unsigned int offset, int gpio_mode)
{
u32 afunc, bfunc;
@@ -304,21 +227,8 @@ static void __nmk_gpio_set_mode(struct nmk_gpio_chip *nmk_chip,
writel(bfunc, nmk_chip->addr + NMK_GPIO_AFSLB);
}
-static void __nmk_gpio_set_slpm(struct nmk_gpio_chip *nmk_chip,
- unsigned offset, enum nmk_gpio_slpm mode)
-{
- u32 slpm;
-
- slpm = readl(nmk_chip->addr + NMK_GPIO_SLPC);
- if (mode == NMK_GPIO_SLPM_NOCHANGE)
- slpm |= BIT(offset);
- else
- slpm &= ~BIT(offset);
- writel(slpm, nmk_chip->addr + NMK_GPIO_SLPC);
-}
-
static void __nmk_gpio_set_pull(struct nmk_gpio_chip *nmk_chip,
- unsigned offset, enum nmk_gpio_pull pull)
+ unsigned int offset, enum nmk_gpio_pull pull)
{
u32 pdis;
@@ -342,7 +252,7 @@ static void __nmk_gpio_set_pull(struct nmk_gpio_chip *nmk_chip,
}
static void __nmk_gpio_set_lowemi(struct nmk_gpio_chip *nmk_chip,
- unsigned offset, bool lowemi)
+ unsigned int offset, bool lowemi)
{
bool enabled = nmk_chip->lowemi & BIT(offset);
@@ -359,29 +269,13 @@ static void __nmk_gpio_set_lowemi(struct nmk_gpio_chip *nmk_chip,
}
static void __nmk_gpio_make_input(struct nmk_gpio_chip *nmk_chip,
- unsigned offset)
+ unsigned int offset)
{
writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DIRC);
}
-static void __nmk_gpio_set_output(struct nmk_gpio_chip *nmk_chip,
- unsigned offset, int val)
-{
- if (val)
- writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DATS);
- else
- writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DATC);
-}
-
-static void __nmk_gpio_make_output(struct nmk_gpio_chip *nmk_chip,
- unsigned offset, int val)
-{
- writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DIRS);
- __nmk_gpio_set_output(nmk_chip, offset, val);
-}
-
static void __nmk_gpio_set_mode_safe(struct nmk_gpio_chip *nmk_chip,
- unsigned offset, int gpio_mode,
+ unsigned int offset, int gpio_mode,
bool glitch)
{
u32 rwimsc = nmk_chip->rwimsc;
@@ -408,7 +302,7 @@ static void __nmk_gpio_set_mode_safe(struct nmk_gpio_chip *nmk_chip,
}
static void
-nmk_gpio_disable_lazy_irq(struct nmk_gpio_chip *nmk_chip, unsigned offset)
+nmk_gpio_disable_lazy_irq(struct nmk_gpio_chip *nmk_chip, unsigned int offset)
{
u32 falling = nmk_chip->fimsc & BIT(offset);
u32 rising = nmk_chip->rimsc & BIT(offset);
@@ -447,7 +341,7 @@ static void nmk_write_masked(void __iomem *reg, u32 mask, u32 value)
}
static void nmk_prcm_altcx_set_mode(struct nmk_pinctrl *npct,
- unsigned offset, unsigned alt_num)
+ unsigned int offset, unsigned int alt_num)
{
int i;
u16 reg;
@@ -484,14 +378,14 @@ static void nmk_prcm_altcx_set_mode(struct nmk_pinctrl *npct,
*/
if (!alt_num) {
for (i = 0 ; i < PRCM_IDX_GPIOCR_ALTC_MAX ; i++) {
- if (pin_desc->altcx[i].used == true) {
+ if (pin_desc->altcx[i].used) {
reg = gpiocr_regs[pin_desc->altcx[i].reg_index];
bit = pin_desc->altcx[i].control_bit;
if (readl(npct->prcm_base + reg) & BIT(bit)) {
nmk_write_masked(npct->prcm_base + reg, BIT(bit), 0);
dev_dbg(npct->dev,
"PRCM GPIOCR: pin %i: alternate-C%i has been disabled\n",
- offset, i+1);
+ offset, i + 1);
}
}
}
@@ -499,10 +393,10 @@ static void nmk_prcm_altcx_set_mode(struct nmk_pinctrl *npct,
}
alt_index = alt_num - 1;
- if (pin_desc->altcx[alt_index].used == false) {
+ if (!pin_desc->altcx[alt_index].used) {
dev_warn(npct->dev,
- "PRCM GPIOCR: pin %i: alternate-C%i does not exist\n",
- offset, alt_num);
+ "PRCM GPIOCR: pin %i: alternate-C%i does not exist\n",
+ offset, alt_num);
return;
}
@@ -513,14 +407,14 @@ static void nmk_prcm_altcx_set_mode(struct nmk_pinctrl *npct,
for (i = 0 ; i < PRCM_IDX_GPIOCR_ALTC_MAX ; i++) {
if (i == alt_index)
continue;
- if (pin_desc->altcx[i].used == true) {
+ if (pin_desc->altcx[i].used) {
reg = gpiocr_regs[pin_desc->altcx[i].reg_index];
bit = pin_desc->altcx[i].control_bit;
if (readl(npct->prcm_base + reg) & BIT(bit)) {
nmk_write_masked(npct->prcm_base + reg, BIT(bit), 0);
dev_dbg(npct->dev,
"PRCM GPIOCR: pin %i: alternate-C%i has been disabled\n",
- offset, i+1);
+ offset, i + 1);
}
}
}
@@ -528,7 +422,7 @@ static void nmk_prcm_altcx_set_mode(struct nmk_pinctrl *npct,
reg = gpiocr_regs[pin_desc->altcx[alt_index].reg_index];
bit = pin_desc->altcx[alt_index].control_bit;
dev_dbg(npct->dev, "PRCM GPIOCR: pin %i: alternate-C%i has been selected\n",
- offset, alt_index+1);
+ offset, alt_index + 1);
nmk_write_masked(npct->prcm_base + reg, BIT(bit), BIT(bit));
}
@@ -548,7 +442,7 @@ static void nmk_gpio_glitch_slpm_init(unsigned int *slpm)
{
int i;
- for (i = 0; i < NUM_BANKS; i++) {
+ for (i = 0; i < NMK_MAX_BANKS; i++) {
struct nmk_gpio_chip *chip = nmk_gpio_chips[i];
unsigned int temp = slpm[i];
@@ -566,7 +460,7 @@ static void nmk_gpio_glitch_slpm_restore(unsigned int *slpm)
{
int i;
- for (i = 0; i < NUM_BANKS; i++) {
+ for (i = 0; i < NMK_MAX_BANKS; i++) {
struct nmk_gpio_chip *chip = nmk_gpio_chips[i];
if (!chip)
@@ -578,7 +472,8 @@ static void nmk_gpio_glitch_slpm_restore(unsigned int *slpm)
}
}
-static int __maybe_unused nmk_prcm_gpiocr_get_mode(struct pinctrl_dev *pctldev, int gpio)
+/* Only called by gpio-nomadik but requires knowledge of struct nmk_pinctrl. */
+int __maybe_unused nmk_prcm_gpiocr_get_mode(struct pinctrl_dev *pctldev, int gpio)
{
int i;
u16 reg;
@@ -600,586 +495,16 @@ static int __maybe_unused nmk_prcm_gpiocr_get_mode(struct pinctrl_dev *pctldev,
pin_desc = npct->soc->altcx_pins + i;
gpiocr_regs = npct->soc->prcm_gpiocr_registers;
for (i = 0; i < PRCM_IDX_GPIOCR_ALTC_MAX; i++) {
- if (pin_desc->altcx[i].used == true) {
+ if (pin_desc->altcx[i].used) {
reg = gpiocr_regs[pin_desc->altcx[i].reg_index];
bit = pin_desc->altcx[i].control_bit;
if (readl(npct->prcm_base + reg) & BIT(bit))
- return NMK_GPIO_ALT_C+i+1;
+ return NMK_GPIO_ALT_C + i + 1;
}
}
return NMK_GPIO_ALT_C;
}
-/* IRQ functions */
-
-static void nmk_gpio_irq_ack(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
-
- clk_enable(nmk_chip->clk);
- writel(BIT(d->hwirq), nmk_chip->addr + NMK_GPIO_IC);
- clk_disable(nmk_chip->clk);
-}
-
-enum nmk_gpio_irq_type {
- NORMAL,
- WAKE,
-};
-
-static void __nmk_gpio_irq_modify(struct nmk_gpio_chip *nmk_chip,
- int offset, enum nmk_gpio_irq_type which,
- bool enable)
-{
- u32 *rimscval;
- u32 *fimscval;
- u32 rimscreg;
- u32 fimscreg;
-
- if (which == NORMAL) {
- rimscreg = NMK_GPIO_RIMSC;
- fimscreg = NMK_GPIO_FIMSC;
- rimscval = &nmk_chip->rimsc;
- fimscval = &nmk_chip->fimsc;
- } else {
- rimscreg = NMK_GPIO_RWIMSC;
- fimscreg = NMK_GPIO_FWIMSC;
- rimscval = &nmk_chip->rwimsc;
- fimscval = &nmk_chip->fwimsc;
- }
-
- /* we must individually set/clear the two edges */
- if (nmk_chip->edge_rising & BIT(offset)) {
- if (enable)
- *rimscval |= BIT(offset);
- else
- *rimscval &= ~BIT(offset);
- writel(*rimscval, nmk_chip->addr + rimscreg);
- }
- if (nmk_chip->edge_falling & BIT(offset)) {
- if (enable)
- *fimscval |= BIT(offset);
- else
- *fimscval &= ~BIT(offset);
- writel(*fimscval, nmk_chip->addr + fimscreg);
- }
-}
-
-static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip,
- int offset, bool on)
-{
- /*
- * Ensure WAKEUP_ENABLE is on. No need to disable it if wakeup is
- * disabled, since setting SLPM to 1 increases power consumption, and
- * wakeup is anyhow controlled by the RIMSC and FIMSC registers.
- */
- if (nmk_chip->sleepmode && on) {
- __nmk_gpio_set_slpm(nmk_chip, offset,
- NMK_GPIO_SLPM_WAKEUP_ENABLE);
- }
-
- __nmk_gpio_irq_modify(nmk_chip, offset, WAKE, on);
-}
-
-static void nmk_gpio_irq_maskunmask(struct nmk_gpio_chip *nmk_chip,
- struct irq_data *d, bool enable)
-{
- unsigned long flags;
-
- clk_enable(nmk_chip->clk);
- spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
- spin_lock(&nmk_chip->lock);
-
- __nmk_gpio_irq_modify(nmk_chip, d->hwirq, NORMAL, enable);
-
- if (!(nmk_chip->real_wake & BIT(d->hwirq)))
- __nmk_gpio_set_wake(nmk_chip, d->hwirq, enable);
-
- spin_unlock(&nmk_chip->lock);
- spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
- clk_disable(nmk_chip->clk);
-}
-
-static void nmk_gpio_irq_mask(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
-
- nmk_gpio_irq_maskunmask(nmk_chip, d, false);
- gpiochip_disable_irq(gc, irqd_to_hwirq(d));
-}
-
-static void nmk_gpio_irq_unmask(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
-
- gpiochip_enable_irq(gc, irqd_to_hwirq(d));
- nmk_gpio_irq_maskunmask(nmk_chip, d, true);
-}
-
-static int nmk_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
- unsigned long flags;
-
- clk_enable(nmk_chip->clk);
- spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
- spin_lock(&nmk_chip->lock);
-
- if (irqd_irq_disabled(d))
- __nmk_gpio_set_wake(nmk_chip, d->hwirq, on);
-
- if (on)
- nmk_chip->real_wake |= BIT(d->hwirq);
- else
- nmk_chip->real_wake &= ~BIT(d->hwirq);
-
- spin_unlock(&nmk_chip->lock);
- spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
- clk_disable(nmk_chip->clk);
-
- return 0;
-}
-
-static int nmk_gpio_irq_set_type(struct irq_data *d, unsigned int type)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
- bool enabled = !irqd_irq_disabled(d);
- bool wake = irqd_is_wakeup_set(d);
- unsigned long flags;
-
- if (type & IRQ_TYPE_LEVEL_HIGH)
- return -EINVAL;
- if (type & IRQ_TYPE_LEVEL_LOW)
- return -EINVAL;
-
- clk_enable(nmk_chip->clk);
- spin_lock_irqsave(&nmk_chip->lock, flags);
-
- if (enabled)
- __nmk_gpio_irq_modify(nmk_chip, d->hwirq, NORMAL, false);
-
- if (enabled || wake)
- __nmk_gpio_irq_modify(nmk_chip, d->hwirq, WAKE, false);
-
- nmk_chip->edge_rising &= ~BIT(d->hwirq);
- if (type & IRQ_TYPE_EDGE_RISING)
- nmk_chip->edge_rising |= BIT(d->hwirq);
-
- nmk_chip->edge_falling &= ~BIT(d->hwirq);
- if (type & IRQ_TYPE_EDGE_FALLING)
- nmk_chip->edge_falling |= BIT(d->hwirq);
-
- if (enabled)
- __nmk_gpio_irq_modify(nmk_chip, d->hwirq, NORMAL, true);
-
- if (enabled || wake)
- __nmk_gpio_irq_modify(nmk_chip, d->hwirq, WAKE, true);
-
- spin_unlock_irqrestore(&nmk_chip->lock, flags);
- clk_disable(nmk_chip->clk);
-
- return 0;
-}
-
-static unsigned int nmk_gpio_irq_startup(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
-
- clk_enable(nmk_chip->clk);
- nmk_gpio_irq_unmask(d);
- return 0;
-}
-
-static void nmk_gpio_irq_shutdown(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
-
- nmk_gpio_irq_mask(d);
- clk_disable(nmk_chip->clk);
-}
-
-static void nmk_gpio_irq_handler(struct irq_desc *desc)
-{
- struct irq_chip *host_chip = irq_desc_get_chip(desc);
- struct gpio_chip *chip = irq_desc_get_handler_data(desc);
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
- u32 status;
-
- chained_irq_enter(host_chip, desc);
-
- clk_enable(nmk_chip->clk);
- status = readl(nmk_chip->addr + NMK_GPIO_IS);
- clk_disable(nmk_chip->clk);
-
- while (status) {
- int bit = __ffs(status);
-
- generic_handle_domain_irq(chip->irq.domain, bit);
- status &= ~BIT(bit);
- }
-
- chained_irq_exit(host_chip, desc);
-}
-
-/* I/O Functions */
-
-static int nmk_gpio_get_dir(struct gpio_chip *chip, unsigned offset)
-{
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
- int dir;
-
- clk_enable(nmk_chip->clk);
-
- dir = readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset);
-
- clk_disable(nmk_chip->clk);
-
- if (dir)
- return GPIO_LINE_DIRECTION_OUT;
-
- return GPIO_LINE_DIRECTION_IN;
-}
-
-static int nmk_gpio_make_input(struct gpio_chip *chip, unsigned offset)
-{
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
-
- clk_enable(nmk_chip->clk);
-
- writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DIRC);
-
- clk_disable(nmk_chip->clk);
-
- return 0;
-}
-
-static int nmk_gpio_get_input(struct gpio_chip *chip, unsigned offset)
-{
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
- int value;
-
- clk_enable(nmk_chip->clk);
-
- value = !!(readl(nmk_chip->addr + NMK_GPIO_DAT) & BIT(offset));
-
- clk_disable(nmk_chip->clk);
-
- return value;
-}
-
-static void nmk_gpio_set_output(struct gpio_chip *chip, unsigned offset,
- int val)
-{
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
-
- clk_enable(nmk_chip->clk);
-
- __nmk_gpio_set_output(nmk_chip, offset, val);
-
- clk_disable(nmk_chip->clk);
-}
-
-static int nmk_gpio_make_output(struct gpio_chip *chip, unsigned offset,
- int val)
-{
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
-
- clk_enable(nmk_chip->clk);
-
- __nmk_gpio_make_output(nmk_chip, offset, val);
-
- clk_disable(nmk_chip->clk);
-
- return 0;
-}
-
-#ifdef CONFIG_DEBUG_FS
-static int nmk_gpio_get_mode(struct nmk_gpio_chip *nmk_chip, int offset)
-{
- u32 afunc, bfunc;
-
- clk_enable(nmk_chip->clk);
-
- afunc = readl(nmk_chip->addr + NMK_GPIO_AFSLA) & BIT(offset);
- bfunc = readl(nmk_chip->addr + NMK_GPIO_AFSLB) & BIT(offset);
-
- clk_disable(nmk_chip->clk);
-
- return (afunc ? NMK_GPIO_ALT_A : 0) | (bfunc ? NMK_GPIO_ALT_B : 0);
-}
-
-static void nmk_gpio_dbg_show_one(struct seq_file *s,
- struct pinctrl_dev *pctldev, struct gpio_chip *chip,
- unsigned offset, unsigned gpio)
-{
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
- int mode;
- bool is_out;
- bool data_out;
- bool pull;
- const char *modes[] = {
- [NMK_GPIO_ALT_GPIO] = "gpio",
- [NMK_GPIO_ALT_A] = "altA",
- [NMK_GPIO_ALT_B] = "altB",
- [NMK_GPIO_ALT_C] = "altC",
- [NMK_GPIO_ALT_C+1] = "altC1",
- [NMK_GPIO_ALT_C+2] = "altC2",
- [NMK_GPIO_ALT_C+3] = "altC3",
- [NMK_GPIO_ALT_C+4] = "altC4",
- };
-
- char *label = gpiochip_dup_line_label(chip, offset);
- if (IS_ERR(label))
- return;
-
- clk_enable(nmk_chip->clk);
- is_out = !!(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset));
- pull = !(readl(nmk_chip->addr + NMK_GPIO_PDIS) & BIT(offset));
- data_out = !!(readl(nmk_chip->addr + NMK_GPIO_DAT) & BIT(offset));
- mode = nmk_gpio_get_mode(nmk_chip, offset);
- if ((mode == NMK_GPIO_ALT_C) && pctldev)
- mode = nmk_prcm_gpiocr_get_mode(pctldev, gpio);
-
- if (is_out) {
- seq_printf(s, " gpio-%-3d (%-20.20s) out %s %s",
- gpio,
- label ?: "(none)",
- data_out ? "hi" : "lo",
- (mode < 0) ? "unknown" : modes[mode]);
- } else {
- int irq = chip->to_irq(chip, offset);
- const int pullidx = pull ? 1 : 0;
- int val;
- static const char * const pulls[] = {
- "none ",
- "pull enabled",
- };
-
- seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s",
- gpio,
- label ?: "(none)",
- pulls[pullidx],
- (mode < 0) ? "unknown" : modes[mode]);
-
- val = nmk_gpio_get_input(chip, offset);
- seq_printf(s, " VAL %d", val);
-
- /*
- * This races with request_irq(), set_irq_type(),
- * and set_irq_wake() ... but those are "rare".
- */
- if (irq > 0 && irq_has_action(irq)) {
- char *trigger;
- bool wake;
-
- if (nmk_chip->edge_rising & BIT(offset))
- trigger = "edge-rising";
- else if (nmk_chip->edge_falling & BIT(offset))
- trigger = "edge-falling";
- else
- trigger = "edge-undefined";
-
- wake = !!(nmk_chip->real_wake & BIT(offset));
-
- seq_printf(s, " irq-%d %s%s",
- irq, trigger, wake ? " wakeup" : "");
- }
- }
- clk_disable(nmk_chip->clk);
-}
-
-static void nmk_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
-{
- unsigned i;
- unsigned gpio = chip->base;
-
- for (i = 0; i < chip->ngpio; i++, gpio++) {
- nmk_gpio_dbg_show_one(s, NULL, chip, i, gpio);
- seq_printf(s, "\n");
- }
-}
-
-#else
-static inline void nmk_gpio_dbg_show_one(struct seq_file *s,
- struct pinctrl_dev *pctldev,
- struct gpio_chip *chip,
- unsigned offset, unsigned gpio)
-{
-}
-#define nmk_gpio_dbg_show NULL
-#endif
-
-/*
- * We will allocate memory for the state container using devm* allocators
- * binding to the first device reaching this point, it doesn't matter if
- * it is the pin controller or GPIO driver. However we need to use the right
- * platform device when looking up resources so pay attention to pdev.
- */
-static struct nmk_gpio_chip *nmk_gpio_populate_chip(struct device_node *np,
- struct platform_device *pdev)
-{
- struct nmk_gpio_chip *nmk_chip;
- struct platform_device *gpio_pdev;
- struct gpio_chip *chip;
- struct resource *res;
- struct clk *clk;
- void __iomem *base;
- u32 id;
-
- gpio_pdev = of_find_device_by_node(np);
- if (!gpio_pdev) {
- pr_err("populate \"%pOFn\": device not found\n", np);
- return ERR_PTR(-ENODEV);
- }
- if (of_property_read_u32(np, "gpio-bank", &id)) {
- dev_err(&pdev->dev, "populate: gpio-bank property not found\n");
- platform_device_put(gpio_pdev);
- return ERR_PTR(-EINVAL);
- }
-
- /* Already populated? */
- nmk_chip = nmk_gpio_chips[id];
- if (nmk_chip) {
- platform_device_put(gpio_pdev);
- return nmk_chip;
- }
-
- nmk_chip = devm_kzalloc(&pdev->dev, sizeof(*nmk_chip), GFP_KERNEL);
- if (!nmk_chip) {
- platform_device_put(gpio_pdev);
- return ERR_PTR(-ENOMEM);
- }
-
- nmk_chip->bank = id;
- chip = &nmk_chip->chip;
- chip->base = id * NMK_GPIO_PER_CHIP;
- chip->ngpio = NMK_GPIO_PER_CHIP;
- chip->label = dev_name(&gpio_pdev->dev);
- chip->parent = &gpio_pdev->dev;
-
- res = platform_get_resource(gpio_pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base)) {
- platform_device_put(gpio_pdev);
- return ERR_CAST(base);
- }
- nmk_chip->addr = base;
-
- clk = clk_get(&gpio_pdev->dev, NULL);
- if (IS_ERR(clk)) {
- platform_device_put(gpio_pdev);
- return (void *) clk;
- }
- clk_prepare(clk);
- nmk_chip->clk = clk;
-
- BUG_ON(nmk_chip->bank >= ARRAY_SIZE(nmk_gpio_chips));
- nmk_gpio_chips[id] = nmk_chip;
- return nmk_chip;
-}
-
-static void nmk_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
-
- seq_printf(p, "nmk%u-%u-%u", nmk_chip->bank,
- gc->base, gc->base + gc->ngpio - 1);
-}
-
-static const struct irq_chip nmk_irq_chip = {
- .irq_ack = nmk_gpio_irq_ack,
- .irq_mask = nmk_gpio_irq_mask,
- .irq_unmask = nmk_gpio_irq_unmask,
- .irq_set_type = nmk_gpio_irq_set_type,
- .irq_set_wake = nmk_gpio_irq_set_wake,
- .irq_startup = nmk_gpio_irq_startup,
- .irq_shutdown = nmk_gpio_irq_shutdown,
- .irq_print_chip = nmk_gpio_irq_print_chip,
- .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE,
- GPIOCHIP_IRQ_RESOURCE_HELPERS,
-};
-
-static int nmk_gpio_probe(struct platform_device *dev)
-{
- struct device_node *np = dev->dev.of_node;
- struct nmk_gpio_chip *nmk_chip;
- struct gpio_chip *chip;
- struct gpio_irq_chip *girq;
- bool supports_sleepmode;
- int irq;
- int ret;
-
- nmk_chip = nmk_gpio_populate_chip(np, dev);
- if (IS_ERR(nmk_chip)) {
- dev_err(&dev->dev, "could not populate nmk chip struct\n");
- return PTR_ERR(nmk_chip);
- }
-
- supports_sleepmode =
- of_property_read_bool(np, "st,supports-sleepmode");
-
- /* Correct platform device ID */
- dev->id = nmk_chip->bank;
-
- irq = platform_get_irq(dev, 0);
- if (irq < 0)
- return irq;
-
- /*
- * The virt address in nmk_chip->addr is in the nomadik register space,
- * so we can simply convert the resource address, without remapping
- */
- nmk_chip->sleepmode = supports_sleepmode;
- spin_lock_init(&nmk_chip->lock);
-
- chip = &nmk_chip->chip;
- chip->parent = &dev->dev;
- chip->request = gpiochip_generic_request;
- chip->free = gpiochip_generic_free;
- chip->get_direction = nmk_gpio_get_dir;
- chip->direction_input = nmk_gpio_make_input;
- chip->get = nmk_gpio_get_input;
- chip->direction_output = nmk_gpio_make_output;
- chip->set = nmk_gpio_set_output;
- chip->dbg_show = nmk_gpio_dbg_show;
- chip->can_sleep = false;
- chip->owner = THIS_MODULE;
-
- girq = &chip->irq;
- gpio_irq_chip_set_chip(girq, &nmk_irq_chip);
- girq->parent_handler = nmk_gpio_irq_handler;
- girq->num_parents = 1;
- girq->parents = devm_kcalloc(&dev->dev, 1,
- sizeof(*girq->parents),
- GFP_KERNEL);
- if (!girq->parents)
- return -ENOMEM;
- girq->parents[0] = irq;
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_edge_irq;
-
- clk_enable(nmk_chip->clk);
- nmk_chip->lowemi = readl_relaxed(nmk_chip->addr + NMK_GPIO_LOWEMI);
- clk_disable(nmk_chip->clk);
-
- ret = gpiochip_add_data(chip, nmk_chip);
- if (ret)
- return ret;
-
- platform_set_drvdata(dev, nmk_chip);
-
- dev_info(&dev->dev, "chip registered\n");
-
- return 0;
-}
-
static int nmk_get_groups_cnt(struct pinctrl_dev *pctldev)
{
struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
@@ -1188,43 +513,51 @@ static int nmk_get_groups_cnt(struct pinctrl_dev *pctldev)
}
static const char *nmk_get_group_name(struct pinctrl_dev *pctldev,
- unsigned selector)
+ unsigned int selector)
{
struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
return npct->soc->groups[selector].grp.name;
}
-static int nmk_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
- const unsigned **pins,
- unsigned *npins)
+static int nmk_get_group_pins(struct pinctrl_dev *pctldev, unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
{
struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
*pins = npct->soc->groups[selector].grp.pins;
- *npins = npct->soc->groups[selector].grp.npins;
+ *num_pins = npct->soc->groups[selector].grp.npins;
return 0;
}
-static struct nmk_gpio_chip *find_nmk_gpio_from_pin(unsigned pin)
+/* This makes the mapping from pin number to a GPIO chip. We also return the pin
+ * offset in the GPIO chip for convenience (and to avoid a second loop).
+ */
+static struct nmk_gpio_chip *find_nmk_gpio_from_pin(unsigned int pin,
+ unsigned int *offset)
{
- int i;
+ int i, j = 0;
struct nmk_gpio_chip *nmk_gpio;
- for(i = 0; i < NMK_MAX_BANKS; i++) {
+ /* We assume that pins are allocated in bank order. */
+ for (i = 0; i < NMK_MAX_BANKS; i++) {
nmk_gpio = nmk_gpio_chips[i];
if (!nmk_gpio)
continue;
- if (pin >= nmk_gpio->chip.base &&
- pin < nmk_gpio->chip.base + nmk_gpio->chip.ngpio)
+ if (pin >= j && pin < j + nmk_gpio->chip.ngpio) {
+ if (offset)
+ *offset = pin - j;
return nmk_gpio;
+ }
+ j += nmk_gpio->chip.ngpio;
}
return NULL;
}
-static struct gpio_chip *find_gc_from_pin(unsigned pin)
+static struct gpio_chip *find_gc_from_pin(unsigned int pin)
{
- struct nmk_gpio_chip *nmk_gpio = find_nmk_gpio_from_pin(pin);
+ struct nmk_gpio_chip *nmk_gpio = find_nmk_gpio_from_pin(pin, NULL);
if (nmk_gpio)
return &nmk_gpio->chip;
@@ -1232,7 +565,7 @@ static struct gpio_chip *find_gc_from_pin(unsigned pin)
}
static void nmk_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
- unsigned offset)
+ unsigned int offset)
{
struct gpio_chip *chip = find_gc_from_pin(offset);
@@ -1243,9 +576,9 @@ static void nmk_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
nmk_gpio_dbg_show_one(s, pctldev, chip, offset - chip->base, offset);
}
-static int nmk_dt_add_map_mux(struct pinctrl_map **map, unsigned *reserved_maps,
- unsigned *num_maps, const char *group,
- const char *function)
+static int nmk_dt_add_map_mux(struct pinctrl_map **map, unsigned int *reserved_maps,
+ unsigned int *num_maps, const char *group,
+ const char *function)
{
if (*num_maps == *reserved_maps)
return -ENOSPC;
@@ -1259,9 +592,9 @@ static int nmk_dt_add_map_mux(struct pinctrl_map **map, unsigned *reserved_maps,
}
static int nmk_dt_add_map_configs(struct pinctrl_map **map,
- unsigned *reserved_maps,
- unsigned *num_maps, const char *group,
- unsigned long *configs, unsigned num_configs)
+ unsigned int *reserved_maps,
+ unsigned int *num_maps, const char *group,
+ unsigned long *configs, unsigned int num_configs)
{
unsigned long *dup_configs;
@@ -1352,9 +685,9 @@ static const struct nmk_cfg_param nmk_cfg_params[] = {
static int nmk_dt_pin_config(int index, int val, unsigned long *config)
{
- if (nmk_cfg_params[index].choice == NULL)
+ if (!nmk_cfg_params[index].choice) {
*config = nmk_cfg_params[index].config;
- else {
+ } else {
/* test if out of range */
if (val < nmk_cfg_params[index].size) {
*config = nmk_cfg_params[index].config |
@@ -1377,15 +710,14 @@ static const char *nmk_find_pin_name(struct pinctrl_dev *pctldev, const char *pi
}
static bool nmk_pinctrl_dt_get_config(struct device_node *np,
- unsigned long *configs)
+ unsigned long *configs)
{
bool has_config = 0;
unsigned long cfg = 0;
int i, val, ret;
for (i = 0; i < ARRAY_SIZE(nmk_cfg_params); i++) {
- ret = of_property_read_u32(np,
- nmk_cfg_params[i].property, &val);
+ ret = of_property_read_u32(np, nmk_cfg_params[i].property, &val);
if (ret != -EINVAL) {
if (nmk_dt_pin_config(i, val, &cfg) == 0) {
*configs |= cfg;
@@ -1398,10 +730,10 @@ static bool nmk_pinctrl_dt_get_config(struct device_node *np,
}
static int nmk_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
- struct device_node *np,
- struct pinctrl_map **map,
- unsigned *reserved_maps,
- unsigned *num_maps)
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned int *reserved_maps,
+ unsigned int *num_maps)
{
int ret;
const char *function = NULL;
@@ -1426,7 +758,7 @@ static int nmk_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
of_property_for_each_string(np, "groups", prop, group) {
ret = nmk_dt_add_map_mux(map, reserved_maps, num_maps,
- group, function);
+ group, function);
if (ret < 0)
goto exit;
}
@@ -1467,10 +799,11 @@ exit:
}
static int nmk_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
- struct device_node *np_config,
- struct pinctrl_map **map, unsigned *num_maps)
+ struct device_node *np_config,
+ struct pinctrl_map **map,
+ unsigned int *num_maps)
{
- unsigned reserved_maps;
+ unsigned int reserved_maps;
struct device_node *np;
int ret;
@@ -1480,7 +813,7 @@ static int nmk_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
for_each_child_of_node(np_config, np) {
ret = nmk_pinctrl_dt_subnode_to_map(pctldev, np, map,
- &reserved_maps, num_maps);
+ &reserved_maps, num_maps);
if (ret < 0) {
pinctrl_utils_free_map(pctldev, *map, *num_maps);
of_node_put(np);
@@ -1508,7 +841,7 @@ static int nmk_pmx_get_funcs_cnt(struct pinctrl_dev *pctldev)
}
static const char *nmk_pmx_get_func_name(struct pinctrl_dev *pctldev,
- unsigned function)
+ unsigned int function)
{
struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
@@ -1516,7 +849,7 @@ static const char *nmk_pmx_get_func_name(struct pinctrl_dev *pctldev,
}
static int nmk_pmx_get_func_groups(struct pinctrl_dev *pctldev,
- unsigned function,
+ unsigned int function,
const char * const **groups,
unsigned * const num_groups)
{
@@ -1528,12 +861,12 @@ static int nmk_pmx_get_func_groups(struct pinctrl_dev *pctldev,
return 0;
}
-static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned function,
- unsigned group)
+static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned int function,
+ unsigned int group)
{
struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
const struct nmk_pingroup *g;
- static unsigned int slpm[NUM_BANKS];
+ static unsigned int slpm[NMK_MAX_BANKS];
unsigned long flags = 0;
bool glitch;
int ret = -EINVAL;
@@ -1544,7 +877,7 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned function,
if (g->altsetting < 0)
return -EINVAL;
- dev_dbg(npct->dev, "enable group %s, %u pins\n", g->grp.name, g->grp.npins);
+ dev_dbg(npct->dev, "enable group %s, %zu pins\n", g->grp.name, g->grp.npins);
/*
* If we're setting altfunc C by setting both AFSLA and AFSLB to 1,
@@ -1579,26 +912,38 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned function,
* Then mask the pins that need to be sleeping now when we're
* switching to the ALT C function.
*/
- for (i = 0; i < g->grp.npins; i++)
- slpm[g->grp.pins[i] / NMK_GPIO_PER_CHIP] &= ~BIT(g->grp.pins[i]);
+ for (i = 0; i < g->grp.npins; i++) {
+ struct nmk_gpio_chip *nmk_chip;
+ unsigned int bit;
+
+ nmk_chip = find_nmk_gpio_from_pin(g->grp.pins[i], &bit);
+ if (!nmk_chip) {
+ dev_err(npct->dev,
+ "invalid pin offset %d in group %s at index %d\n",
+ g->grp.pins[i], g->grp.name, i);
+ goto out_pre_slpm_init;
+ }
+
+ slpm[nmk_chip->bank] &= ~BIT(bit);
+ }
nmk_gpio_glitch_slpm_init(slpm);
}
for (i = 0; i < g->grp.npins; i++) {
struct nmk_gpio_chip *nmk_chip;
- unsigned bit;
+ unsigned int bit;
- nmk_chip = find_nmk_gpio_from_pin(g->grp.pins[i]);
+ nmk_chip = find_nmk_gpio_from_pin(g->grp.pins[i], &bit);
if (!nmk_chip) {
dev_err(npct->dev,
"invalid pin offset %d in group %s at index %d\n",
g->grp.pins[i], g->grp.name, i);
goto out_glitch;
}
- dev_dbg(npct->dev, "setting pin %d to altsetting %d\n", g->grp.pins[i], g->altsetting);
+ dev_dbg(npct->dev, "setting pin %d to altsetting %d\n",
+ g->grp.pins[i], g->altsetting);
clk_enable(nmk_chip->clk);
- bit = g->grp.pins[i] % NMK_GPIO_PER_CHIP;
/*
* If the pin is switching to altfunc, and there was an
* interrupt installed on it which has been lazy disabled,
@@ -1609,7 +954,7 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned function,
nmk_gpio_disable_lazy_irq(nmk_chip, bit);
__nmk_gpio_set_mode_safe(nmk_chip, bit,
- (g->altsetting & NMK_GPIO_ALT_C), glitch);
+ (g->altsetting & NMK_GPIO_ALT_C), glitch);
clk_disable(nmk_chip->clk);
/*
@@ -1622,29 +967,30 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned function,
*/
if ((g->altsetting & NMK_GPIO_ALT_C) == NMK_GPIO_ALT_C)
nmk_prcm_altcx_set_mode(npct, g->grp.pins[i],
- g->altsetting >> NMK_GPIO_ALT_CX_SHIFT);
+ g->altsetting >> NMK_GPIO_ALT_CX_SHIFT);
}
/* When all pins are successfully reconfigured we get here */
ret = 0;
out_glitch:
- if (glitch) {
+ if (glitch)
nmk_gpio_glitch_slpm_restore(slpm);
+out_pre_slpm_init:
+ if (glitch)
spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
- }
return ret;
}
static int nmk_gpio_request_enable(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
- unsigned offset)
+ unsigned int pin)
{
struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
struct nmk_gpio_chip *nmk_chip;
struct gpio_chip *chip;
- unsigned bit;
+ unsigned int bit;
if (!range) {
dev_err(npct->dev, "invalid range\n");
@@ -1657,10 +1003,11 @@ static int nmk_gpio_request_enable(struct pinctrl_dev *pctldev,
chip = range->gc;
nmk_chip = gpiochip_get_data(chip);
- dev_dbg(npct->dev, "enable pin %u as GPIO\n", offset);
+ dev_dbg(npct->dev, "enable pin %u as GPIO\n", pin);
+
+ find_nmk_gpio_from_pin(pin, &bit);
clk_enable(nmk_chip->clk);
- bit = offset % NMK_GPIO_PER_CHIP;
/* There is no glitch when converting any pin to GPIO */
__nmk_gpio_set_mode(nmk_chip, bit, NMK_GPIO_ALT_GPIO);
clk_disable(nmk_chip->clk);
@@ -1670,11 +1017,11 @@ static int nmk_gpio_request_enable(struct pinctrl_dev *pctldev,
static void nmk_gpio_disable_free(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
- unsigned offset)
+ unsigned int pin)
{
struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
- dev_dbg(npct->dev, "disable pin %u as GPIO\n", offset);
+ dev_dbg(npct->dev, "disable pin %u as GPIO\n", pin);
/* Set the pin to some default state, GPIO is usually default */
}
@@ -1688,34 +1035,34 @@ static const struct pinmux_ops nmk_pinmux_ops = {
.strict = true,
};
-static int nmk_pin_config_get(struct pinctrl_dev *pctldev, unsigned pin,
+static int nmk_pin_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long *config)
{
/* Not implemented */
return -EINVAL;
}
-static int nmk_pin_config_set(struct pinctrl_dev *pctldev, unsigned pin,
- unsigned long *configs, unsigned num_configs)
+static int nmk_pin_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
{
- static const char *pullnames[] = {
+ static const char * const pullnames[] = {
[NMK_GPIO_PULL_NONE] = "none",
[NMK_GPIO_PULL_UP] = "up",
[NMK_GPIO_PULL_DOWN] = "down",
[3] /* illegal */ = "??"
};
- static const char *slpmnames[] = {
+ static const char * const slpmnames[] = {
[NMK_GPIO_SLPM_INPUT] = "input/wakeup",
[NMK_GPIO_SLPM_NOCHANGE] = "no-change/no-wakeup",
};
struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
struct nmk_gpio_chip *nmk_chip;
- unsigned bit;
- pin_cfg_t cfg;
+ unsigned int bit;
+ unsigned long cfg;
int pull, slpm, output, val, i;
bool lowemi, gpiomode, sleep;
- nmk_chip = find_nmk_gpio_from_pin(pin);
+ nmk_chip = find_nmk_gpio_from_pin(pin, &bit);
if (!nmk_chip) {
dev_err(npct->dev,
"invalid pin offset %d\n", pin);
@@ -1728,7 +1075,7 @@ static int nmk_pin_config_set(struct pinctrl_dev *pctldev, unsigned pin,
* here we just ignore that part. It's being handled by the
* framework and pinmux callback respectively.
*/
- cfg = (pin_cfg_t) configs[i];
+ cfg = configs[i];
pull = PIN_PULL(cfg);
slpm = PIN_SLPM(cfg);
output = PIN_DIR(cfg);
@@ -1773,13 +1120,12 @@ static int nmk_pin_config_set(struct pinctrl_dev *pctldev, unsigned pin,
lowemi ? "on" : "off");
clk_enable(nmk_chip->clk);
- bit = pin % NMK_GPIO_PER_CHIP;
if (gpiomode)
/* No glitch when going to GPIO mode */
__nmk_gpio_set_mode(nmk_chip, bit, NMK_GPIO_ALT_GPIO);
- if (output)
+ if (output) {
__nmk_gpio_make_output(nmk_chip, bit, val);
- else {
+ } else {
__nmk_gpio_make_input(nmk_chip, bit);
__nmk_gpio_set_pull(nmk_chip, bit, pull);
}
@@ -1844,17 +1190,17 @@ static int nmk_pinctrl_resume(struct device *dev)
static int nmk_pinctrl_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
- struct device_node *prcm_np;
+ struct fwnode_handle *fwnode = dev_fwnode(&pdev->dev);
+ struct fwnode_handle *prcm_fwnode;
struct nmk_pinctrl *npct;
- unsigned int version = 0;
+ uintptr_t version = 0;
int i;
npct = devm_kzalloc(&pdev->dev, sizeof(*npct), GFP_KERNEL);
if (!npct)
return -ENOMEM;
- version = (unsigned int)device_get_match_data(&pdev->dev);
+ version = (uintptr_t)device_get_match_data(&pdev->dev);
/* Poke in other ASIC variants here */
if (version == PINCTRL_NMK_STN8815)
@@ -1870,33 +1216,33 @@ static int nmk_pinctrl_probe(struct platform_device *pdev)
* or after this point: it shouldn't matter as the APIs are orthogonal.
*/
for (i = 0; i < NMK_MAX_BANKS; i++) {
- struct device_node *gpio_np;
+ struct fwnode_handle *gpio_fwnode;
struct nmk_gpio_chip *nmk_chip;
- gpio_np = of_parse_phandle(np, "nomadik-gpio-chips", i);
- if (gpio_np) {
- dev_info(&pdev->dev,
- "populate NMK GPIO %d \"%pOFn\"\n",
- i, gpio_np);
- nmk_chip = nmk_gpio_populate_chip(gpio_np, pdev);
- if (IS_ERR(nmk_chip))
- dev_err(&pdev->dev,
- "could not populate nmk chip struct "
- "- continue anyway\n");
- of_node_put(gpio_np);
- }
+ gpio_fwnode = fwnode_find_reference(fwnode, "nomadik-gpio-chips", i);
+ if (IS_ERR(gpio_fwnode))
+ continue;
+
+ dev_info(&pdev->dev, "populate NMK GPIO %d \"%pfwP\"\n", i, gpio_fwnode);
+ nmk_chip = nmk_gpio_populate_chip(gpio_fwnode, pdev);
+ if (IS_ERR(nmk_chip))
+ dev_err(&pdev->dev,
+ "could not populate nmk chip struct - continue anyway\n");
+ else
+ /* We are NOT compatible with mobileye,eyeq5-gpio. */
+ BUG_ON(nmk_chip->is_mobileye_soc);
+ fwnode_handle_put(gpio_fwnode);
}
- prcm_np = of_parse_phandle(np, "prcm", 0);
- if (prcm_np) {
- npct->prcm_base = of_iomap(prcm_np, 0);
- of_node_put(prcm_np);
+ prcm_fwnode = fwnode_find_reference(fwnode, "prcm", 0);
+ if (!IS_ERR(prcm_fwnode)) {
+ npct->prcm_base = fwnode_iomap(prcm_fwnode, 0);
+ fwnode_handle_put(prcm_fwnode);
}
if (!npct->prcm_base) {
if (version == PINCTRL_NMK_STN8815) {
dev_info(&pdev->dev,
- "No PRCM base, "
- "assuming no ALT-Cx control is available\n");
+ "No PRCM base, assuming no ALT-Cx control is available\n");
} else {
dev_err(&pdev->dev, "missing PRCM base address\n");
return -EINVAL;
@@ -1919,19 +1265,6 @@ static int nmk_pinctrl_probe(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id nmk_gpio_match[] = {
- { .compatible = "st,nomadik-gpio", },
- {}
-};
-
-static struct platform_driver nmk_gpio_driver = {
- .driver = {
- .name = "gpio",
- .of_match_table = nmk_gpio_match,
- },
- .probe = nmk_gpio_probe,
-};
-
static SIMPLE_DEV_PM_OPS(nmk_pinctrl_pm_ops,
nmk_pinctrl_suspend,
nmk_pinctrl_resume);
@@ -1945,12 +1278,6 @@ static struct platform_driver nmk_pinctrl_driver = {
.probe = nmk_pinctrl_probe,
};
-static int __init nmk_gpio_init(void)
-{
- return platform_driver_register(&nmk_gpio_driver);
-}
-subsys_initcall(nmk_gpio_init);
-
static int __init nmk_pinctrl_init(void)
{
return platform_driver_register(&nmk_pinctrl_driver);
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.h b/drivers/pinctrl/nomadik/pinctrl-nomadik.h
deleted file mode 100644
index 1ef2559bc5710..0000000000000
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.h
+++ /dev/null
@@ -1,180 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef PINCTRL_PINCTRL_NOMADIK_H
-#define PINCTRL_PINCTRL_NOMADIK_H
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-#include <linux/pinctrl/pinctrl.h>
-
-/* Package definitions */
-#define PINCTRL_NMK_STN8815 0
-#define PINCTRL_NMK_DB8500 1
-
-/* Alternate functions: function C is set in hw by setting both A and B */
-#define NMK_GPIO_ALT_GPIO 0
-#define NMK_GPIO_ALT_A 1
-#define NMK_GPIO_ALT_B 2
-#define NMK_GPIO_ALT_C (NMK_GPIO_ALT_A | NMK_GPIO_ALT_B)
-
-#define NMK_GPIO_ALT_CX_SHIFT 2
-#define NMK_GPIO_ALT_C1 ((1<<NMK_GPIO_ALT_CX_SHIFT) | NMK_GPIO_ALT_C)
-#define NMK_GPIO_ALT_C2 ((2<<NMK_GPIO_ALT_CX_SHIFT) | NMK_GPIO_ALT_C)
-#define NMK_GPIO_ALT_C3 ((3<<NMK_GPIO_ALT_CX_SHIFT) | NMK_GPIO_ALT_C)
-#define NMK_GPIO_ALT_C4 ((4<<NMK_GPIO_ALT_CX_SHIFT) | NMK_GPIO_ALT_C)
-
-#define PRCM_GPIOCR_ALTCX(pin_num,\
- altc1_used, altc1_ri, altc1_cb,\
- altc2_used, altc2_ri, altc2_cb,\
- altc3_used, altc3_ri, altc3_cb,\
- altc4_used, altc4_ri, altc4_cb)\
-{\
- .pin = pin_num,\
- .altcx[PRCM_IDX_GPIOCR_ALTC1] = {\
- .used = altc1_used,\
- .reg_index = altc1_ri,\
- .control_bit = altc1_cb\
- },\
- .altcx[PRCM_IDX_GPIOCR_ALTC2] = {\
- .used = altc2_used,\
- .reg_index = altc2_ri,\
- .control_bit = altc2_cb\
- },\
- .altcx[PRCM_IDX_GPIOCR_ALTC3] = {\
- .used = altc3_used,\
- .reg_index = altc3_ri,\
- .control_bit = altc3_cb\
- },\
- .altcx[PRCM_IDX_GPIOCR_ALTC4] = {\
- .used = altc4_used,\
- .reg_index = altc4_ri,\
- .control_bit = altc4_cb\
- },\
-}
-
-/**
- * enum prcm_gpiocr_reg_index
- * Used to reference an PRCM GPIOCR register address.
- */
-enum prcm_gpiocr_reg_index {
- PRCM_IDX_GPIOCR1,
- PRCM_IDX_GPIOCR2,
- PRCM_IDX_GPIOCR3
-};
-/**
- * enum prcm_gpiocr_altcx_index
- * Used to reference an Other alternate-C function.
- */
-enum prcm_gpiocr_altcx_index {
- PRCM_IDX_GPIOCR_ALTC1,
- PRCM_IDX_GPIOCR_ALTC2,
- PRCM_IDX_GPIOCR_ALTC3,
- PRCM_IDX_GPIOCR_ALTC4,
- PRCM_IDX_GPIOCR_ALTC_MAX,
-};
-
-/**
- * struct prcm_gpio_altcx - Other alternate-C function
- * @used: other alternate-C function availability
- * @reg_index: PRCM GPIOCR register index used to control the function
- * @control_bit: PRCM GPIOCR bit used to control the function
- */
-struct prcm_gpiocr_altcx {
- bool used:1;
- u8 reg_index:2;
- u8 control_bit:5;
-} __packed;
-
-/**
- * struct prcm_gpio_altcx_pin_desc - Other alternate-C pin
- * @pin: The pin number
- * @altcx: array of other alternate-C[1-4] functions
- */
-struct prcm_gpiocr_altcx_pin_desc {
- unsigned short pin;
- struct prcm_gpiocr_altcx altcx[PRCM_IDX_GPIOCR_ALTC_MAX];
-};
-
-/**
- * struct nmk_function - Nomadik pinctrl mux function
- * @name: The name of the function, exported to pinctrl core.
- * @groups: An array of pin groups that may select this function.
- * @ngroups: The number of entries in @groups.
- */
-struct nmk_function {
- const char *name;
- const char * const *groups;
- unsigned ngroups;
-};
-
-/**
- * struct nmk_pingroup - describes a Nomadik pin group
- * @grp: Generic data of the pin group (name and pins)
- * @altsetting: the altsetting to apply to all pins in this group to
- * configure them to be used by a function
- */
-struct nmk_pingroup {
- struct pingroup grp;
- int altsetting;
-};
-
-#define NMK_PIN_GROUP(a, b) \
- { \
- .grp = PINCTRL_PINGROUP(#a, a##_pins, ARRAY_SIZE(a##_pins)), \
- .altsetting = b, \
- }
-
-/**
- * struct nmk_pinctrl_soc_data - Nomadik pin controller per-SoC configuration
- * @pins: An array describing all pins the pin controller affects.
- * All pins which are also GPIOs must be listed first within the
- * array, and be numbered identically to the GPIO controller's
- * numbering.
- * @npins: The number of entries in @pins.
- * @functions: The functions supported on this SoC.
- * @nfunction: The number of entries in @functions.
- * @groups: An array describing all pin groups the pin SoC supports.
- * @ngroups: The number of entries in @groups.
- * @altcx_pins: The pins that support Other alternate-C function on this SoC
- * @npins_altcx: The number of Other alternate-C pins
- * @prcm_gpiocr_registers: The array of PRCM GPIOCR registers on this SoC
- */
-struct nmk_pinctrl_soc_data {
- const struct pinctrl_pin_desc *pins;
- unsigned npins;
- const struct nmk_function *functions;
- unsigned nfunctions;
- const struct nmk_pingroup *groups;
- unsigned ngroups;
- const struct prcm_gpiocr_altcx_pin_desc *altcx_pins;
- unsigned npins_altcx;
- const u16 *prcm_gpiocr_registers;
-};
-
-#ifdef CONFIG_PINCTRL_STN8815
-
-void nmk_pinctrl_stn8815_init(const struct nmk_pinctrl_soc_data **soc);
-
-#else
-
-static inline void
-nmk_pinctrl_stn8815_init(const struct nmk_pinctrl_soc_data **soc)
-{
-}
-
-#endif
-
-#ifdef CONFIG_PINCTRL_DB8500
-
-void nmk_pinctrl_db8500_init(const struct nmk_pinctrl_soc_data **soc);
-
-#else
-
-static inline void
-nmk_pinctrl_db8500_init(const struct nmk_pinctrl_soc_data **soc)
-{
-}
-
-#endif
-
-#endif /* PINCTRL_PINCTRL_NOMADIK_H */
diff --git a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
index 4589900244c79..cdad4ef11a2f6 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
@@ -474,7 +474,7 @@ enum {
#undef WPCM450_GRP
};
-static struct pingroup wpcm450_groups[] = {
+static const struct pingroup wpcm450_groups[] = {
#define WPCM450_GRP(x) PINCTRL_PINGROUP(#x, x ## _pins, ARRAY_SIZE(x ## _pins))
WPCM450_GRPS
#undef WPCM450_GRP
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 49f89b70dcecb..7f66ec73199a9 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -1159,7 +1159,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
}
ret = devm_request_irq(&pdev->dev, gpio_dev->irq, amd_gpio_irq_handler,
- IRQF_SHARED | IRQF_ONESHOT, KBUILD_MODNAME, gpio_dev);
+ IRQF_SHARED | IRQF_COND_ONESHOT, KBUILD_MODNAME, gpio_dev);
if (ret)
goto out2;
diff --git a/drivers/pinctrl/pinctrl-aw9523.c b/drivers/pinctrl/pinctrl-aw9523.c
new file mode 100644
index 0000000000000..4edd371c469fb
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-aw9523.c
@@ -0,0 +1,1119 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Awinic AW9523B i2c pin controller driver
+ * Copyright (c) 2020, AngeloGioacchino Del Regno
+ * <angelogioacchino.delregno@somainline.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#define AW9523_MAX_FUNCS 2
+#define AW9523_NUM_PORTS 2
+#define AW9523_PINS_PER_PORT 8
+
+/*
+ * HW needs at least 20uS for reset and at least 1-2uS to recover from
+ * reset, but we have to account for eventual board quirks, if any:
+ * for this reason, keep reset asserted for 50uS and wait for 20uS
+ * to recover from the reset.
+ */
+#define AW9523_HW_RESET_US 50
+#define AW9523_HW_RESET_RECOVERY_US 20
+
+/* Port 0: P0_0...P0_7 - Port 1: P1_0...P1_7 */
+#define AW9523_PIN_TO_PORT(pin) (pin >> 3)
+#define AW9523_REG_IN_STATE(pin) (0x00 + AW9523_PIN_TO_PORT(pin))
+#define AW9523_REG_OUT_STATE(pin) (0x02 + AW9523_PIN_TO_PORT(pin))
+#define AW9523_REG_CONF_STATE(pin) (0x04 + AW9523_PIN_TO_PORT(pin))
+#define AW9523_REG_INTR_DIS(pin) (0x06 + AW9523_PIN_TO_PORT(pin))
+#define AW9523_REG_CHIPID 0x10
+#define AW9523_VAL_EXPECTED_CHIPID 0x23
+
+#define AW9523_REG_GCR 0x11
+#define AW9523_GCR_ISEL_MASK GENMASK(0, 1)
+#define AW9523_GCR_GPOMD_MASK BIT(4)
+
+#define AW9523_REG_PORT_MODE(pin) (0x12 + AW9523_PIN_TO_PORT(pin))
+#define AW9523_REG_SOFT_RESET 0x7f
+#define AW9523_VAL_RESET 0x00
+
+/*
+ * struct aw9523_irq - Interrupt controller structure
+ * @lock: mutex locking for the irq bus
+ * @irqchip: structure holding irqchip params
+ * @cached_gpio: stores the previous gpio status for bit comparison
+ */
+struct aw9523_irq {
+ struct mutex lock;
+ struct irq_chip *irqchip;
+ u16 cached_gpio;
+};
+
+/*
+ * struct aw9523_pinmux - Pin mux params
+ * @name: Name of the mux
+ * @grps: Groups of the mux
+ * @num_grps: Number of groups (sizeof array grps)
+ */
+struct aw9523_pinmux {
+ const char *name;
+ const char * const *grps;
+ const u8 num_grps;
+};
+
+/*
+ * struct aw9523 - Main driver structure
+ * @dev: device handle
+ * @regmap: regmap handle for current device
+ * @i2c_lock: Mutex lock for i2c operations
+ * @reset_gpio: Hardware reset (RSTN) signal GPIO
+ * @vio_vreg: VCC regulator (Optional)
+ * @pctl: pinctrl handle for current device
+ * @gpio: structure holding gpiochip params
+ * @irq: Interrupt controller structure
+ */
+struct aw9523 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct mutex i2c_lock;
+ struct gpio_desc *reset_gpio;
+ struct regulator *vio_vreg;
+ struct pinctrl_dev *pctl;
+ struct gpio_chip gpio;
+ struct aw9523_irq *irq;
+};
+
+static const struct pinctrl_pin_desc aw9523_pins[] = {
+ /* Port 0 */
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+
+ /* Port 1 */
+ PINCTRL_PIN(8, "gpio8"),
+ PINCTRL_PIN(9, "gpio9"),
+ PINCTRL_PIN(10, "gpio10"),
+ PINCTRL_PIN(11, "gpio11"),
+ PINCTRL_PIN(12, "gpio12"),
+ PINCTRL_PIN(13, "gpio13"),
+ PINCTRL_PIN(14, "gpio14"),
+ PINCTRL_PIN(15, "gpio15"),
+};
+
+static int aw9523_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(aw9523_pins);
+}
+
+static const char *aw9523_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ return aw9523_pins[selector].name;
+}
+
+static int aw9523_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ *pins = &aw9523_pins[selector].number;
+ *num_pins = 1;
+ return 0;
+}
+
+static const struct pinctrl_ops aw9523_pinctrl_ops = {
+ .get_groups_count = aw9523_pinctrl_get_groups_count,
+ .get_group_pins = aw9523_pinctrl_get_group_pins,
+ .get_group_name = aw9523_pinctrl_get_group_name,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+static const char * const gpio_pwm_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5",
+ "gpio6", "gpio7", "gpio8", "gpio9", "gpio10", "gpio11",
+ "gpio12", "gpio13", "gpio14", "gpio15"
+};
+
+/* Warning: Do NOT reorder this array */
+static const struct aw9523_pinmux aw9523_pmx[] = {
+ {
+ .name = "pwm",
+ .grps = gpio_pwm_groups,
+ .num_grps = ARRAY_SIZE(gpio_pwm_groups),
+ },
+ {
+ .name = "gpio",
+ .grps = gpio_pwm_groups,
+ .num_grps = ARRAY_SIZE(gpio_pwm_groups),
+ },
+};
+
+static int aw9523_pmx_get_funcs_count(struct pinctrl_dev *pctl)
+{
+ return ARRAY_SIZE(aw9523_pmx);
+}
+
+static const char *aw9523_pmx_get_fname(struct pinctrl_dev *pctl,
+ unsigned int sel)
+{
+ return aw9523_pmx[sel].name;
+}
+
+static int aw9523_pmx_get_groups(struct pinctrl_dev *pctl, unsigned int sel,
+ const char * const **groups,
+ unsigned int * const num_groups)
+{
+ *groups = aw9523_pmx[sel].grps;
+ *num_groups = aw9523_pmx[sel].num_grps;
+ return 0;
+}
+
+static int aw9523_pmx_set_mux(struct pinctrl_dev *pctl, unsigned int fsel,
+ unsigned int grp)
+{
+ struct aw9523 *awi = pinctrl_dev_get_drvdata(pctl);
+ int ret, pin = aw9523_pins[grp].number % AW9523_PINS_PER_PORT;
+
+ if (fsel >= ARRAY_SIZE(aw9523_pmx))
+ return -EINVAL;
+
+ /*
+ * This maps directly to the aw9523_pmx array: programming a
+ * high bit means "gpio" and a low bit means "pwm".
+ */
+ mutex_lock(&awi->i2c_lock);
+ ret = regmap_update_bits(awi->regmap, AW9523_REG_PORT_MODE(pin),
+ BIT(pin), (fsel ? BIT(pin) : 0));
+ mutex_unlock(&awi->i2c_lock);
+ return ret;
+}
+
+static const struct pinmux_ops aw9523_pinmux_ops = {
+ .get_functions_count = aw9523_pmx_get_funcs_count,
+ .get_function_name = aw9523_pmx_get_fname,
+ .get_function_groups = aw9523_pmx_get_groups,
+ .set_mux = aw9523_pmx_set_mux,
+};
+
+static int aw9523_pcfg_param_to_reg(enum pin_config_param pcp, int pin, u8 *r)
+{
+ u8 reg;
+
+ switch (pcp) {
+ case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ reg = AW9523_REG_IN_STATE(pin);
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ reg = AW9523_REG_GCR;
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ reg = AW9523_REG_CONF_STATE(pin);
+ break;
+ case PIN_CONFIG_OUTPUT:
+ reg = AW9523_REG_OUT_STATE(pin);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ *r = reg;
+
+ return 0;
+}
+
+static int aw9523_pconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ struct aw9523 *awi = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ int regbit = pin % AW9523_PINS_PER_PORT;
+ unsigned int val;
+ u8 reg;
+ int rc;
+
+ rc = aw9523_pcfg_param_to_reg(param, pin, &reg);
+ if (rc)
+ return rc;
+
+ mutex_lock(&awi->i2c_lock);
+ rc = regmap_read(awi->regmap, reg, &val);
+ mutex_unlock(&awi->i2c_lock);
+ if (rc)
+ return rc;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_INPUT_ENABLE:
+ case PIN_CONFIG_OUTPUT:
+ val &= BIT(regbit);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ val &= BIT(regbit);
+ val = !val;
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ if (pin >= AW9523_PINS_PER_PORT)
+ val = 0;
+ else
+ val = !FIELD_GET(AW9523_GCR_GPOMD_MASK, val);
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ if (pin >= AW9523_PINS_PER_PORT)
+ val = 1;
+ else
+ val = FIELD_GET(AW9523_GCR_GPOMD_MASK, val);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ if (val < 1)
+ return -EINVAL;
+
+ *config = pinconf_to_config_packed(param, !!val);
+
+ return rc;
+}
+
+static int aw9523_pconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct aw9523 *awi = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param;
+ int regbit = pin % AW9523_PINS_PER_PORT;
+ u32 arg;
+ u8 reg;
+ unsigned int mask, val;
+ int i, rc;
+
+ mutex_lock(&awi->i2c_lock);
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ rc = aw9523_pcfg_param_to_reg(param, pin, &reg);
+ if (rc)
+ goto end;
+
+ switch (param) {
+ case PIN_CONFIG_OUTPUT:
+ /* First, enable pin output */
+ rc = regmap_update_bits(awi->regmap,
+ AW9523_REG_CONF_STATE(pin),
+ BIT(regbit), 0);
+ if (rc)
+ goto end;
+
+ /* Then, fall through to config output level */
+ fallthrough;
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ arg = !arg;
+ fallthrough;
+ case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_INPUT_ENABLE:
+ mask = BIT(regbit);
+ val = arg ? BIT(regbit) : 0;
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ /* Open-Drain is supported only on port 0 */
+ if (pin >= AW9523_PINS_PER_PORT) {
+ rc = -EOPNOTSUPP;
+ goto end;
+ }
+ mask = AW9523_GCR_GPOMD_MASK;
+ val = 0;
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ /* Port 1 is always Push-Pull */
+ if (pin >= AW9523_PINS_PER_PORT) {
+ mask = 0;
+ val = 0;
+ continue;
+ }
+ mask = AW9523_GCR_GPOMD_MASK;
+ val = AW9523_GCR_GPOMD_MASK;
+ break;
+ default:
+ rc = -EOPNOTSUPP;
+ goto end;
+ }
+
+ rc = regmap_update_bits(awi->regmap, reg, mask, val);
+ if (rc)
+ goto end;
+ }
+end:
+ mutex_unlock(&awi->i2c_lock);
+ return rc;
+}
+
+static const struct pinconf_ops aw9523_pinconf_ops = {
+ .pin_config_get = aw9523_pconf_get,
+ .pin_config_set = aw9523_pconf_set,
+ .is_generic = true,
+};
+
+/*
+ * aw9523_get_pin_direction - Get pin direction
+ * @regmap: Regmap structure
+ * @pin: gpiolib pin number
+ * @n: pin index in port register
+ *
+ * Return: Pin direction for success or negative number for error
+ */
+static int aw9523_get_pin_direction(struct regmap *regmap, u8 pin, u8 n)
+{
+ int ret;
+
+ ret = regmap_test_bits(regmap, AW9523_REG_CONF_STATE(pin), BIT(n));
+ if (ret < 0)
+ return ret;
+
+ return ret ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT;
+}
+
+/*
+ * aw9523_get_port_state - Get input or output state for entire port
+ * @regmap: Regmap structure
+ * @pin: gpiolib pin number
+ * @regbit: hw pin index, used to retrieve port number
+ * @state: returned port state
+ *
+ * Return: Zero for success or negative number for error
+ */
+static int aw9523_get_port_state(struct regmap *regmap, u8 pin,
+ u8 regbit, unsigned int *state)
+{
+ u8 reg;
+ int dir;
+
+ dir = aw9523_get_pin_direction(regmap, pin, regbit);
+ if (dir < 0)
+ return dir;
+
+ if (dir == GPIO_LINE_DIRECTION_IN)
+ reg = AW9523_REG_IN_STATE(pin);
+ else
+ reg = AW9523_REG_OUT_STATE(pin);
+
+ return regmap_read(regmap, reg, state);
+}
+
+static int aw9523_gpio_irq_type(struct irq_data *d, unsigned int type)
+{
+ switch (type) {
+ case IRQ_TYPE_NONE:
+ case IRQ_TYPE_EDGE_BOTH:
+ return 0;
+ default:
+ return -EINVAL;
+ };
+}
+
+/*
+ * aw9523_irq_mask - Mask interrupt
+ * @d: irq data
+ *
+ * Sets which interrupt to mask in the bitmap;
+ * The interrupt will be masked when unlocking the irq bus.
+ */
+static void aw9523_irq_mask(struct irq_data *d)
+{
+ struct aw9523 *awi = gpiochip_get_data(irq_data_get_irq_chip_data(d));
+ unsigned int n = d->hwirq % AW9523_PINS_PER_PORT;
+
+ regmap_update_bits(awi->regmap,
+ AW9523_REG_INTR_DIS(d->hwirq),
+ BIT(n), BIT(n));
+ gpiochip_disable_irq(&awi->gpio, irqd_to_hwirq(d));
+}
+
+/*
+ * aw9523_irq_unmask - Unmask interrupt
+ * @d: irq data
+ *
+ * Sets which interrupt to unmask in the bitmap;
+ * The interrupt will be masked when unlocking the irq bus.
+ */
+static void aw9523_irq_unmask(struct irq_data *d)
+{
+ struct aw9523 *awi = gpiochip_get_data(irq_data_get_irq_chip_data(d));
+ unsigned int n = d->hwirq % AW9523_PINS_PER_PORT;
+
+ gpiochip_enable_irq(&awi->gpio, irqd_to_hwirq(d));
+ regmap_update_bits(awi->regmap,
+ AW9523_REG_INTR_DIS(d->hwirq),
+ BIT(n), 0);
+}
+
+static irqreturn_t aw9523_irq_thread_func(int irq, void *dev_id)
+{
+ struct aw9523 *awi = (struct aw9523 *)dev_id;
+ unsigned long n, val = 0;
+ unsigned long changed_gpio;
+ unsigned int tmp, port_pin, i, ret;
+
+ for (i = 0; i < AW9523_NUM_PORTS; i++) {
+ port_pin = i * AW9523_PINS_PER_PORT;
+ ret = regmap_read(awi->regmap,
+ AW9523_REG_IN_STATE(port_pin),
+ &tmp);
+ if (ret)
+ return ret;
+ val |= (u8)tmp << (i * 8);
+ }
+
+ /* Handle GPIO input release interrupt as well */
+ changed_gpio = awi->irq->cached_gpio ^ val;
+ awi->irq->cached_gpio = val;
+
+ /*
+ * To avoid up to four *slow* i2c reads from any driver hooked
+ * up to our interrupts, just check for the irq_find_mapping
+ * result: if the interrupt is not mapped, then we don't want
+ * to care about it.
+ */
+ for_each_set_bit(n, &changed_gpio, awi->gpio.ngpio) {
+ tmp = irq_find_mapping(awi->gpio.irq.domain, n);
+ if (tmp <= 0)
+ continue;
+ handle_nested_irq(tmp);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * aw9523_irq_bus_lock - Grab lock for interrupt operation
+ * @d: irq data
+ */
+static void aw9523_irq_bus_lock(struct irq_data *d)
+{
+ struct aw9523 *awi = gpiochip_get_data(irq_data_get_irq_chip_data(d));
+
+ mutex_lock(&awi->irq->lock);
+ regcache_cache_only(awi->regmap, true);
+}
+
+/*
+ * aw9523_irq_bus_sync_unlock - Synchronize state and unlock
+ * @d: irq data
+ *
+ * Writes the interrupt mask bits (found in the bit map) to the
+ * hardware, then unlocks the bus.
+ */
+static void aw9523_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct aw9523 *awi = gpiochip_get_data(irq_data_get_irq_chip_data(d));
+
+ regcache_cache_only(awi->regmap, false);
+ regcache_sync(awi->regmap);
+ mutex_unlock(&awi->irq->lock);
+}
+
+static int aw9523_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct aw9523 *awi = gpiochip_get_data(chip);
+ u8 regbit = offset % AW9523_PINS_PER_PORT;
+ int ret;
+
+ mutex_lock(&awi->i2c_lock);
+ ret = aw9523_get_pin_direction(awi->regmap, offset, regbit);
+ mutex_unlock(&awi->i2c_lock);
+
+ return ret;
+}
+
+static int aw9523_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct aw9523 *awi = gpiochip_get_data(chip);
+ u8 regbit = offset % AW9523_PINS_PER_PORT;
+ unsigned int val;
+ int ret;
+
+ mutex_lock(&awi->i2c_lock);
+ ret = aw9523_get_port_state(awi->regmap, offset, regbit, &val);
+ mutex_unlock(&awi->i2c_lock);
+ if (ret)
+ return ret;
+
+ return !!(val & BIT(regbit));
+}
+
+/**
+ * _aw9523_gpio_get_multiple - Get I/O state for an entire port
+ * @regmap: Regmap structure
+ * @pin: gpiolib pin number
+ * @regbit: hw pin index, used to retrieve port number
+ * @state: returned port I/O state
+ *
+ * Return: Zero for success or negative number for error
+ */
+static int _aw9523_gpio_get_multiple(struct aw9523 *awi, u8 regbit,
+ u8 *state, u8 mask)
+{
+ u32 dir_in, val;
+ u8 m;
+ int ret;
+
+ /* Registers are 8-bits wide */
+ ret = regmap_read(awi->regmap, AW9523_REG_CONF_STATE(regbit), &dir_in);
+ if (ret)
+ return ret;
+ *state = 0;
+
+ m = mask & dir_in;
+ if (m) {
+ ret = regmap_read(awi->regmap, AW9523_REG_IN_STATE(regbit),
+ &val);
+ if (ret)
+ return ret;
+ *state |= (u8)val & m;
+ }
+
+ m = mask & ~dir_in;
+ if (m) {
+ ret = regmap_read(awi->regmap, AW9523_REG_OUT_STATE(regbit),
+ &val);
+ if (ret)
+ return ret;
+ *state |= (u8)val & m;
+ }
+
+ return 0;
+}
+
+static int aw9523_gpio_get_multiple(struct gpio_chip *chip,
+ unsigned long *mask,
+ unsigned long *bits)
+{
+ struct aw9523 *awi = gpiochip_get_data(chip);
+ u8 m, state = 0;
+ int ret;
+
+ mutex_lock(&awi->i2c_lock);
+
+ /* Port 0 (gpio 0-7) */
+ m = *mask & U8_MAX;
+ if (m) {
+ ret = _aw9523_gpio_get_multiple(awi, 0, &state, m);
+ if (ret)
+ goto out;
+ }
+ *bits = state;
+
+ /* Port 1 (gpio 8-15) */
+ m = (*mask >> 8) & U8_MAX;
+ if (m) {
+ ret = _aw9523_gpio_get_multiple(awi, AW9523_PINS_PER_PORT,
+ &state, m);
+ if (ret)
+ goto out;
+
+ *bits |= (state << 8);
+ }
+out:
+ mutex_unlock(&awi->i2c_lock);
+ return ret;
+}
+
+static void aw9523_gpio_set_multiple(struct gpio_chip *chip,
+ unsigned long *mask,
+ unsigned long *bits)
+{
+ struct aw9523 *awi = gpiochip_get_data(chip);
+ u8 mask_lo, mask_hi, bits_lo, bits_hi;
+ unsigned int reg;
+ int ret = 0;
+
+ mask_lo = *mask & U8_MAX;
+ mask_hi = (*mask >> 8) & U8_MAX;
+ mutex_lock(&awi->i2c_lock);
+ if (mask_hi) {
+ reg = AW9523_REG_OUT_STATE(AW9523_PINS_PER_PORT);
+ bits_hi = (*bits >> 8) & U8_MAX;
+
+ ret = regmap_write_bits(awi->regmap, reg, mask_hi, bits_hi);
+ if (ret) {
+ dev_warn(awi->dev, "Cannot write port1 out level\n");
+ goto out;
+ }
+ }
+ if (mask_lo) {
+ reg = AW9523_REG_OUT_STATE(0);
+ bits_lo = *bits & U8_MAX;
+ ret = regmap_write_bits(awi->regmap, reg, mask_lo, bits_lo);
+ if (ret)
+ dev_warn(awi->dev, "Cannot write port0 out level\n");
+ }
+out:
+ mutex_unlock(&awi->i2c_lock);
+}
+
+static void aw9523_gpio_set(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct aw9523 *awi = gpiochip_get_data(chip);
+ u8 regbit = offset % AW9523_PINS_PER_PORT;
+
+ mutex_lock(&awi->i2c_lock);
+ regmap_update_bits(awi->regmap, AW9523_REG_OUT_STATE(offset),
+ BIT(regbit), value ? BIT(regbit) : 0);
+ mutex_unlock(&awi->i2c_lock);
+}
+
+
+static int aw9523_direction_input(struct gpio_chip *chip, unsigned int offset)
+{
+ struct aw9523 *awi = gpiochip_get_data(chip);
+ u8 regbit = offset % AW9523_PINS_PER_PORT;
+ int ret;
+
+ mutex_lock(&awi->i2c_lock);
+ ret = regmap_update_bits(awi->regmap, AW9523_REG_CONF_STATE(offset),
+ BIT(regbit), BIT(regbit));
+ mutex_unlock(&awi->i2c_lock);
+
+ return ret;
+}
+
+static int aw9523_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct aw9523 *awi = gpiochip_get_data(chip);
+ u8 regbit = offset % AW9523_PINS_PER_PORT;
+ int ret;
+
+ mutex_lock(&awi->i2c_lock);
+ ret = regmap_update_bits(awi->regmap, AW9523_REG_OUT_STATE(offset),
+ BIT(regbit), value ? BIT(regbit) : 0);
+ if (ret)
+ goto end;
+
+ ret = regmap_update_bits(awi->regmap, AW9523_REG_CONF_STATE(offset),
+ BIT(regbit), 0);
+end:
+ mutex_unlock(&awi->i2c_lock);
+ return ret;
+}
+
+static int aw9523_drive_reset_gpio(struct aw9523 *awi)
+{
+ unsigned int chip_id;
+ int ret;
+
+ /*
+ * If the chip is already configured for any reason, then we
+ * will probably succeed in sending the soft reset signal to
+ * the hardware through I2C: this operation takes less time
+ * compared to a full HW reset and it gives the same results.
+ */
+ ret = regmap_write(awi->regmap, AW9523_REG_SOFT_RESET, 0);
+ if (ret == 0)
+ goto done;
+
+ dev_dbg(awi->dev, "Cannot execute soft reset: trying hard reset\n");
+ ret = gpiod_direction_output(awi->reset_gpio, 0);
+ if (ret)
+ return ret;
+
+ /* The reset pulse has to be longer than 20uS due to deglitch */
+ usleep_range(AW9523_HW_RESET_US, AW9523_HW_RESET_US + 1);
+
+ ret = gpiod_direction_output(awi->reset_gpio, 1);
+ if (ret)
+ return ret;
+done:
+ /* The HW needs at least 1uS to reliably recover after reset */
+ usleep_range(AW9523_HW_RESET_RECOVERY_US,
+ AW9523_HW_RESET_RECOVERY_US + 1);
+
+ /* Check the ChipID */
+ ret = regmap_read(awi->regmap, AW9523_REG_CHIPID, &chip_id);
+ if (ret) {
+ dev_err(awi->dev, "Cannot read Chip ID: %d\n", ret);
+ return ret;
+ }
+ if (chip_id != AW9523_VAL_EXPECTED_CHIPID) {
+ dev_err(awi->dev, "Bad ChipID; read 0x%x, expected 0x%x\n",
+ chip_id, AW9523_VAL_EXPECTED_CHIPID);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int aw9523_hw_reset(struct aw9523 *awi)
+{
+ int ret, max_retries = 2;
+
+ /* Sometimes the chip needs more than one reset cycle */
+ do {
+ ret = aw9523_drive_reset_gpio(awi);
+ if (ret == 0)
+ break;
+ max_retries--;
+ } while (max_retries);
+
+ return ret;
+}
+
+static int aw9523_init_gpiochip(struct aw9523 *awi, unsigned int npins)
+{
+ struct device *dev = awi->dev;
+ struct gpio_chip *gc = &awi->gpio;
+
+ gc->label = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
+ if (!gc->label)
+ return -ENOMEM;
+
+ gc->base = -1;
+ gc->ngpio = npins;
+ gc->get_direction = aw9523_gpio_get_direction;
+ gc->direction_input = aw9523_direction_input;
+ gc->direction_output = aw9523_direction_output;
+ gc->get = aw9523_gpio_get;
+ gc->get_multiple = aw9523_gpio_get_multiple;
+ gc->set = aw9523_gpio_set;
+ gc->set_multiple = aw9523_gpio_set_multiple;
+ gc->set_config = gpiochip_generic_config;
+ gc->parent = dev;
+ gc->owner = THIS_MODULE;
+ gc->can_sleep = false;
+
+ return 0;
+}
+
+static const struct irq_chip aw9523_irq_chip = {
+ .name = "aw9523",
+ .irq_mask = aw9523_irq_mask,
+ .irq_unmask = aw9523_irq_unmask,
+ .irq_bus_lock = aw9523_irq_bus_lock,
+ .irq_bus_sync_unlock = aw9523_irq_bus_sync_unlock,
+ .irq_set_type = aw9523_gpio_irq_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static int aw9523_init_irq(struct aw9523 *awi, int irq)
+{
+ struct device *dev = awi->dev;
+ struct gpio_irq_chip *girq;
+ struct irq_chip *irqchip;
+ int ret;
+
+ if (!device_property_read_bool(dev, "interrupt-controller"))
+ return 0;
+
+ irqchip = devm_kzalloc(dev, sizeof(*irqchip), GFP_KERNEL);
+ if (!irqchip)
+ return -ENOMEM;
+
+ awi->irq = devm_kzalloc(dev, sizeof(*awi->irq), GFP_KERNEL);
+ if (!awi->irq)
+ return -ENOMEM;
+
+ awi->irq->irqchip = irqchip;
+ mutex_init(&awi->irq->lock);
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, aw9523_irq_thread_func,
+ IRQF_ONESHOT, dev_name(dev), awi);
+ if (ret) {
+ dev_err(dev, "Failed to request irq %d\n", irq);
+ return ret;
+ }
+
+ girq = &awi->gpio.irq;
+ gpio_irq_chip_set_chip(girq, &aw9523_irq_chip);
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_EDGE_BOTH;
+ girq->handler = handle_simple_irq;
+ girq->threaded = true;
+
+ return 0;
+}
+
+static bool aw9523_is_reg_hole(unsigned int reg)
+{
+ return (reg > AW9523_REG_PORT_MODE(AW9523_PINS_PER_PORT) &&
+ reg < AW9523_REG_SOFT_RESET) ||
+ (reg > AW9523_REG_INTR_DIS(AW9523_PINS_PER_PORT) &&
+ reg < AW9523_REG_CHIPID);
+}
+
+static bool aw9523_readable_reg(struct device *dev, unsigned int reg)
+{
+ /* All available registers (minus holes) can be read */
+ return !aw9523_is_reg_hole(reg);
+}
+
+static bool aw9523_volatile_reg(struct device *dev, unsigned int reg)
+{
+ return aw9523_is_reg_hole(reg) ||
+ reg == AW9523_REG_IN_STATE(0) ||
+ reg == AW9523_REG_IN_STATE(AW9523_PINS_PER_PORT) ||
+ reg == AW9523_REG_CHIPID ||
+ reg == AW9523_REG_SOFT_RESET;
+}
+
+static bool aw9523_writeable_reg(struct device *dev, unsigned int reg)
+{
+ return !aw9523_is_reg_hole(reg) && reg != AW9523_REG_CHIPID;
+}
+
+static bool aw9523_precious_reg(struct device *dev, unsigned int reg)
+{
+ /* Reading AW9523_REG_IN_STATE clears interrupt status */
+ return aw9523_is_reg_hole(reg) ||
+ reg == AW9523_REG_IN_STATE(0) ||
+ reg == AW9523_REG_IN_STATE(AW9523_PINS_PER_PORT);
+}
+
+static const struct regmap_config aw9523_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .reg_stride = 1,
+
+ .precious_reg = aw9523_precious_reg,
+ .readable_reg = aw9523_readable_reg,
+ .volatile_reg = aw9523_volatile_reg,
+ .writeable_reg = aw9523_writeable_reg,
+
+ .cache_type = REGCACHE_FLAT,
+ .disable_locking = true,
+
+ .num_reg_defaults_raw = AW9523_REG_SOFT_RESET,
+};
+
+static int aw9523_hw_init(struct aw9523 *awi)
+{
+ u8 p1_pin = AW9523_PINS_PER_PORT;
+ unsigned int val;
+ int ret;
+
+ /* No register caching during initialization */
+ regcache_cache_bypass(awi->regmap, true);
+
+ /* Bring up the chip */
+ ret = aw9523_hw_reset(awi);
+ if (ret) {
+ dev_err(awi->dev, "HW Reset failed: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * This is the expected chip and it is running: it's time to
+ * set a safe default configuration in case the user doesn't
+ * configure (all of the available) pins in this chip.
+ * P.S.: The writes order doesn't matter.
+ */
+
+ /* Set all pins as GPIO */
+ ret = regmap_write(awi->regmap, AW9523_REG_PORT_MODE(0), U8_MAX);
+ if (ret)
+ return ret;
+ ret = regmap_write(awi->regmap, AW9523_REG_PORT_MODE(p1_pin), U8_MAX);
+ if (ret)
+ return ret;
+
+ /* Set Open-Drain mode on Port 0 (Port 1 is always P-P) */
+ ret = regmap_write(awi->regmap, AW9523_REG_GCR, 0);
+ if (ret)
+ return ret;
+
+ /* Set all pins as inputs */
+ ret = regmap_write(awi->regmap, AW9523_REG_CONF_STATE(0), U8_MAX);
+ if (ret)
+ return ret;
+ ret = regmap_write(awi->regmap, AW9523_REG_CONF_STATE(p1_pin), U8_MAX);
+ if (ret)
+ return ret;
+
+ /* Disable all interrupts to avoid unreasoned wakeups */
+ ret = regmap_write(awi->regmap, AW9523_REG_INTR_DIS(0), U8_MAX);
+ if (ret)
+ return ret;
+ ret = regmap_write(awi->regmap, AW9523_REG_INTR_DIS(p1_pin), U8_MAX);
+ if (ret)
+ return ret;
+
+ /* Clear setup-generated interrupts by performing a port state read */
+ ret = aw9523_get_port_state(awi->regmap, 0, 0, &val);
+ if (ret)
+ return ret;
+ ret = aw9523_get_port_state(awi->regmap, p1_pin, 0, &val);
+ if (ret)
+ return ret;
+
+ /* Everything went fine: activate and reinitialize register cache */
+ regcache_cache_bypass(awi->regmap, false);
+ return regmap_reinit_cache(awi->regmap, &aw9523_regmap);
+}
+
+static int aw9523_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct pinctrl_desc *pdesc;
+ struct aw9523 *awi;
+ int ret;
+
+ awi = devm_kzalloc(dev, sizeof(*awi), GFP_KERNEL);
+ if (!awi)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, awi);
+
+ awi->dev = dev;
+ awi->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(awi->reset_gpio))
+ return PTR_ERR(awi->reset_gpio);
+ gpiod_set_consumer_name(awi->reset_gpio, "aw9523 reset");
+
+ awi->regmap = devm_regmap_init_i2c(client, &aw9523_regmap);
+ if (IS_ERR(awi->regmap))
+ return PTR_ERR(awi->regmap);
+
+ awi->vio_vreg = devm_regulator_get_optional(dev, "vio");
+ if (IS_ERR(awi->vio_vreg)) {
+ if (PTR_ERR(awi->vio_vreg) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ awi->vio_vreg = NULL;
+ } else {
+ ret = regulator_enable(awi->vio_vreg);
+ if (ret)
+ return ret;
+ }
+
+ mutex_init(&awi->i2c_lock);
+ lockdep_set_subclass(&awi->i2c_lock,
+ i2c_adapter_depth(client->adapter));
+
+ pdesc = devm_kzalloc(dev, sizeof(*pdesc), GFP_KERNEL);
+ if (!pdesc)
+ return -ENOMEM;
+
+ ret = aw9523_hw_init(awi);
+ if (ret)
+ goto err_disable_vregs;
+
+ pdesc->name = dev_name(dev);
+ pdesc->owner = THIS_MODULE;
+ pdesc->pctlops = &aw9523_pinctrl_ops;
+ pdesc->pmxops = &aw9523_pinmux_ops;
+ pdesc->confops = &aw9523_pinconf_ops;
+ pdesc->pins = aw9523_pins;
+ pdesc->npins = ARRAY_SIZE(aw9523_pins);
+
+ ret = aw9523_init_gpiochip(awi, pdesc->npins);
+ if (ret)
+ goto err_disable_vregs;
+
+ if (client->irq) {
+ ret = aw9523_init_irq(awi, client->irq);
+ if (ret)
+ goto err_disable_vregs;
+ }
+
+ awi->pctl = devm_pinctrl_register(dev, pdesc, awi);
+ if (IS_ERR(awi->pctl)) {
+ ret = PTR_ERR(awi->pctl);
+ dev_err(dev, "Cannot register pinctrl: %d", ret);
+ goto err_disable_vregs;
+ }
+
+ ret = devm_gpiochip_add_data(dev, &awi->gpio, awi);
+ if (ret)
+ goto err_disable_vregs;
+
+ return ret;
+
+err_disable_vregs:
+ if (awi->vio_vreg)
+ regulator_disable(awi->vio_vreg);
+ mutex_destroy(&awi->i2c_lock);
+ return ret;
+}
+
+static void aw9523_remove(struct i2c_client *client)
+{
+ struct aw9523 *awi = i2c_get_clientdata(client);
+ int ret;
+
+ if (!awi)
+ return;
+
+ /*
+ * If the chip VIO is connected to a regulator that we can turn
+ * off, life is easy... otherwise, reinitialize the chip and
+ * set the pins to hardware defaults before removing the driver
+ * to leave it in a clean, safe and predictable state.
+ */
+ if (awi->vio_vreg) {
+ regulator_disable(awi->vio_vreg);
+ } else {
+ mutex_lock(&awi->i2c_lock);
+ ret = aw9523_hw_init(awi);
+ mutex_unlock(&awi->i2c_lock);
+ if (ret)
+ return;
+ }
+
+ mutex_destroy(&awi->i2c_lock);
+}
+
+static const struct i2c_device_id aw9523_i2c_id_table[] = {
+ { "aw9523_i2c", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, aw9523_i2c_id_table);
+
+static const struct of_device_id of_aw9523_i2c_match[] = {
+ { .compatible = "awinic,aw9523-pinctrl", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, of_aw9523_i2c_match);
+
+static struct i2c_driver aw9523_driver = {
+ .driver = {
+ .name = "aw9523-pinctrl",
+ .of_match_table = of_aw9523_i2c_match,
+ },
+ .probe = aw9523_probe,
+ .remove = aw9523_remove,
+ .id_table = aw9523_i2c_id_table,
+};
+module_i2c_driver(aw9523_driver);
+
+MODULE_DESCRIPTION("Awinic AW9523 I2C GPIO Expander driver");
+MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@somainline.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-da9062.c b/drivers/pinctrl/pinctrl-da9062.c
index 3998b27cbe0e6..22e3cd2cc9632 100644
--- a/drivers/pinctrl/pinctrl-da9062.c
+++ b/drivers/pinctrl/pinctrl-da9062.c
@@ -281,10 +281,17 @@ static int da9062_pctl_probe(struct platform_device *pdev)
return devm_gpiochip_add_data(&pdev->dev, &pctl->gc, pctl);
}
+static const struct of_device_id da9062_compatible_reg_id_table[] = {
+ { .compatible = "dlg,da9062-gpio" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, da9062_compatible_reg_id_table);
+
static struct platform_driver da9062_pctl_driver = {
.probe = da9062_pctl_probe,
.driver = {
.name = "da9062-gpio",
+ .of_match_table = da9062_compatible_reg_id_table,
},
};
module_platform_driver(da9062_pctl_driver);
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 4551575e4e7d7..38c3a14c8b586 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -375,7 +375,8 @@ mcp23s08_direction_output(struct gpio_chip *chip, unsigned offset, int value)
static irqreturn_t mcp23s08_irq(int irq, void *data)
{
struct mcp23s08 *mcp = data;
- int intcap, intcon, intf, i, gpio, gpio_orig, intcap_mask, defval;
+ int intcap, intcon, intf, i, gpio, gpio_orig, intcap_mask, defval, gpinten;
+ unsigned long int enabled_interrupts;
unsigned int child_irq;
bool intf_set, intcap_changed, gpio_bit_changed,
defval_changed, gpio_set;
@@ -395,6 +396,9 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
if (mcp_read(mcp, MCP_INTCON, &intcon))
goto unlock;
+ if (mcp_read(mcp, MCP_GPINTEN, &gpinten))
+ goto unlock;
+
if (mcp_read(mcp, MCP_DEFVAL, &defval))
goto unlock;
@@ -410,9 +414,12 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
"intcap 0x%04X intf 0x%04X gpio_orig 0x%04X gpio 0x%04X\n",
intcap, intf, gpio_orig, gpio);
- for (i = 0; i < mcp->chip.ngpio; i++) {
- /* We must check all of the inputs on the chip,
- * otherwise we may not notice a change on >=2 pins.
+ enabled_interrupts = gpinten;
+ for_each_set_bit(i, &enabled_interrupts, mcp->chip.ngpio) {
+ /*
+ * We must check all of the inputs with enabled interrupts
+ * on the chip, otherwise we may not notice a change
+ * on more than one pin.
*
* On at least the mcp23s17, INTCAP is only updated
* one byte at a time(INTCAPA and INTCAPB are
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index 52aadd6d72a80..be9b8c0101670 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -1401,7 +1401,6 @@ static int ocelot_hw_set_value(struct ocelot_pinctrl *info,
if (info->pincfg) {
const struct ocelot_pincfg_data *opd = info->pincfg_data;
- ret = 0;
switch (reg) {
case PINCONF_BIAS:
ret = ocelot_pincfg_clrsetbits(info, pin,
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 1485573b523c2..5d9abd6547d06 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -723,9 +723,8 @@ static int st_gpio_direction_output(struct gpio_chip *chip,
struct st_gpio_bank *bank = gpiochip_get_data(chip);
__st_gpio_set(bank, offset, value);
- pinctrl_gpio_direction_output(chip, offset);
- return 0;
+ return pinctrl_gpio_direction_output(chip, offset);
}
static int st_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/pinctrl/pinctrl-zynqmp.c b/drivers/pinctrl/pinctrl-zynqmp.c
index f2be341f73e13..5c46b7d7ebcba 100644
--- a/drivers/pinctrl/pinctrl-zynqmp.c
+++ b/drivers/pinctrl/pinctrl-zynqmp.c
@@ -562,7 +562,7 @@ static int zynqmp_pinctrl_prepare_func_groups(struct device *dev, u32 fid,
const char **fgroups;
int ret, index, i;
- fgroups = devm_kzalloc(dev, sizeof(*fgroups) * func->ngroups, GFP_KERNEL);
+ fgroups = devm_kcalloc(dev, func->ngroups, sizeof(*fgroups), GFP_KERNEL);
if (!fgroups)
return -ENOMEM;
@@ -754,7 +754,7 @@ static int zynqmp_pinctrl_prepare_function_info(struct device *dev,
if (ret)
return ret;
- funcs = devm_kzalloc(dev, sizeof(*funcs) * pctrl->nfuncs, GFP_KERNEL);
+ funcs = devm_kcalloc(dev, pctrl->nfuncs, sizeof(*funcs), GFP_KERNEL);
if (!funcs)
return -ENOMEM;
@@ -768,7 +768,7 @@ static int zynqmp_pinctrl_prepare_function_info(struct device *dev,
pctrl->ngroups += funcs[i].ngroups;
}
- groups = devm_kzalloc(dev, sizeof(*groups) * pctrl->ngroups, GFP_KERNEL);
+ groups = devm_kcalloc(dev, pctrl->ngroups, sizeof(*groups), GFP_KERNEL);
if (!groups)
return -ENOMEM;
@@ -830,7 +830,7 @@ static int zynqmp_pinctrl_prepare_pin_desc(struct device *dev,
if (ret)
return ret;
- pins = devm_kzalloc(dev, sizeof(*pins) * *npins, GFP_KERNEL);
+ pins = devm_kcalloc(dev, *npins, sizeof(*pins), GFP_KERNEL);
if (!pins)
return -ENOMEM;
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index abbb044d6acec..d924207d629b4 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -188,8 +188,8 @@ out_free_pin:
}
out:
if (status)
- dev_err(pctldev->dev, "pin-%d (%s) status %d\n",
- pin, owner, status);
+ dev_err_probe(pctldev->dev, status, "pin-%d (%s)\n",
+ pin, owner);
return status;
}
@@ -441,7 +441,7 @@ int pinmux_enable_setting(const struct pinctrl_setting *setting)
pname = desc ? desc->name : "non-existing";
gname = pctlops->get_group_name(pctldev,
setting->data.mux.group);
- dev_err(pctldev->dev,
+ dev_err_probe(pctldev->dev, ret,
"could not request pin %d (%s) from group %s "
" on device %s\n",
pins[i], pname, gname,
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index e0f2829c15d6a..24619e80b2cce 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -125,7 +125,7 @@ config PINCTRL_SM8550_LPASS_LPI
platform.
config PINCTRL_SM8650_LPASS_LPI
- tristate "Qualcomm Technologies Inc SM8550 LPASS LPI pin controller driver"
+ tristate "Qualcomm Technologies Inc SM8650 LPASS LPI pin controller driver"
depends on ARM64 || COMPILE_TEST
depends on PINCTRL_LPASS_LPI
help
diff --git a/drivers/pinctrl/renesas/Kconfig b/drivers/pinctrl/renesas/Kconfig
index c8d519ca53eb7..14bd55d647319 100644
--- a/drivers/pinctrl/renesas/Kconfig
+++ b/drivers/pinctrl/renesas/Kconfig
@@ -38,6 +38,7 @@ config PINCTRL_RENESAS
select PINCTRL_PFC_R8A779A0 if ARCH_R8A779A0
select PINCTRL_PFC_R8A779F0 if ARCH_R8A779F0
select PINCTRL_PFC_R8A779G0 if ARCH_R8A779G0
+ select PINCTRL_PFC_R8A779H0 if ARCH_R8A779H0
select PINCTRL_RZG2L if ARCH_RZG2L
select PINCTRL_RZV2M if ARCH_R9A09G011
select PINCTRL_PFC_SH7203 if CPU_SUBTYPE_SH7203
@@ -154,6 +155,10 @@ config PINCTRL_PFC_R8A779G0
bool "pin control support for R-Car V4H" if COMPILE_TEST
select PINCTRL_SH_PFC
+config PINCTRL_PFC_R8A779H0
+ bool "pin control support for R-Car V4M" if COMPILE_TEST
+ select PINCTRL_SH_PFC
+
config PINCTRL_PFC_R8A7740
bool "pin control support for R-Mobile A1" if COMPILE_TEST
select PINCTRL_SH_PFC_GPIO
@@ -187,9 +192,11 @@ config PINCTRL_RZG2L
bool "pin control support for RZ/{G2L,G2UL,V2L}" if COMPILE_TEST
depends on OF
select GPIOLIB
+ select GPIOLIB_IRQCHIP
select GENERIC_PINCTRL_GROUPS
select GENERIC_PINMUX_FUNCTIONS
select GENERIC_PINCONF
+ select IRQ_DOMAIN_HIERARCHY
help
This selects GPIO and pinctrl driver for Renesas RZ/{G2L,G2UL,V2L}
platforms.
diff --git a/drivers/pinctrl/renesas/Makefile b/drivers/pinctrl/renesas/Makefile
index 3e776955bd4bc..2ba623e04bf8c 100644
--- a/drivers/pinctrl/renesas/Makefile
+++ b/drivers/pinctrl/renesas/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_PINCTRL_PFC_R8A77995) += pfc-r8a77995.o
obj-$(CONFIG_PINCTRL_PFC_R8A779A0) += pfc-r8a779a0.o
obj-$(CONFIG_PINCTRL_PFC_R8A779F0) += pfc-r8a779f0.o
obj-$(CONFIG_PINCTRL_PFC_R8A779G0) += pfc-r8a779g0.o
+obj-$(CONFIG_PINCTRL_PFC_R8A779H0) += pfc-r8a779h0.o
obj-$(CONFIG_PINCTRL_PFC_SH7203) += pfc-sh7203.o
obj-$(CONFIG_PINCTRL_PFC_SH7264) += pfc-sh7264.o
obj-$(CONFIG_PINCTRL_PFC_SH7269) += pfc-sh7269.o
diff --git a/drivers/pinctrl/renesas/core.c b/drivers/pinctrl/renesas/core.c
index 93e51abbf519a..96d6040a88714 100644
--- a/drivers/pinctrl/renesas/core.c
+++ b/drivers/pinctrl/renesas/core.c
@@ -638,6 +638,12 @@ static const struct of_device_id sh_pfc_of_table[] = {
.data = &r8a779g0_pinmux_info,
},
#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A779H0
+ {
+ .compatible = "renesas,pfc-r8a779h0",
+ .data = &r8a779h0_pinmux_info,
+ },
+#endif
#ifdef CONFIG_PINCTRL_PFC_SH73A0
{
.compatible = "renesas,pfc-sh73a0",
@@ -731,10 +737,12 @@ static int sh_pfc_resume_noirq(struct device *dev)
sh_pfc_walk_regs(pfc, sh_pfc_restore_reg);
return 0;
}
+#define pm_psci_sleep_ptr(_ptr) pm_sleep_ptr(_ptr)
#else
static int sh_pfc_suspend_init(struct sh_pfc *pfc) { return 0; }
static int sh_pfc_suspend_noirq(struct device *dev) { return 0; }
static int sh_pfc_resume_noirq(struct device *dev) { return 0; }
+#define pm_psci_sleep_ptr(_ptr) PTR_IF(false, (_ptr))
#endif /* CONFIG_ARM_PSCI_FW */
static DEFINE_NOIRQ_DEV_PM_OPS(sh_pfc_pm, sh_pfc_suspend_noirq, sh_pfc_resume_noirq);
@@ -907,9 +915,11 @@ static void __init sh_pfc_check_cfg_reg(const char *drvname,
sh_pfc_err("reg 0x%x: var_field_width declares %u instead of %u bits\n",
cfg_reg->reg, rw, cfg_reg->reg_width);
- if (n != cfg_reg->nr_enum_ids)
+ if (n != cfg_reg->nr_enum_ids) {
sh_pfc_err("reg 0x%x: enum_ids[] has %u instead of %u values\n",
cfg_reg->reg, cfg_reg->nr_enum_ids, n);
+ n = cfg_reg->nr_enum_ids;
+ }
check_enum_ids:
sh_pfc_check_reg_enums(drvname, cfg_reg->reg, cfg_reg->enum_ids, n);
@@ -1415,7 +1425,7 @@ static struct platform_driver sh_pfc_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = of_match_ptr(sh_pfc_of_table),
- .pm = pm_sleep_ptr(&sh_pfc_pm),
+ .pm = pm_psci_sleep_ptr(&sh_pfc_pm),
},
};
diff --git a/drivers/pinctrl/renesas/pfc-r8a779g0.c b/drivers/pinctrl/renesas/pfc-r8a779g0.c
index acdea6ac15253..d2de526a3b588 100644
--- a/drivers/pinctrl/renesas/pfc-r8a779g0.c
+++ b/drivers/pinctrl/renesas/pfc-r8a779g0.c
@@ -2384,6 +2384,14 @@ static const unsigned int scif_clk_mux[] = {
SCIF_CLK_MARK,
};
+static const unsigned int scif_clk2_pins[] = {
+ /* SCIF_CLK2 */
+ RCAR_GP_PIN(8, 11),
+};
+static const unsigned int scif_clk2_mux[] = {
+ SCIF_CLK2_MARK,
+};
+
/* - SSI ------------------------------------------------- */
static const unsigned int ssi_data_pins[] = {
/* SSI_SD */
@@ -2694,6 +2702,7 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(scif4_clk),
SH_PFC_PIN_GROUP(scif4_ctrl),
SH_PFC_PIN_GROUP(scif_clk),
+ SH_PFC_PIN_GROUP(scif_clk2),
SH_PFC_PIN_GROUP(ssi_data),
SH_PFC_PIN_GROUP(ssi_ctrl),
@@ -3015,6 +3024,10 @@ static const char * const scif_clk_groups[] = {
"scif_clk",
};
+static const char * const scif_clk2_groups[] = {
+ "scif_clk2",
+};
+
static const char * const ssi_groups[] = {
"ssi_data",
"ssi_ctrl",
@@ -3102,6 +3115,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(scif3),
SH_PFC_FUNCTION(scif4),
SH_PFC_FUNCTION(scif_clk),
+ SH_PFC_FUNCTION(scif_clk2),
SH_PFC_FUNCTION(ssi),
diff --git a/drivers/pinctrl/renesas/pfc-r8a779h0.c b/drivers/pinctrl/renesas/pfc-r8a779h0.c
new file mode 100644
index 0000000000000..afa8f06c85cf5
--- /dev/null
+++ b/drivers/pinctrl/renesas/pfc-r8a779h0.c
@@ -0,0 +1,3967 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R8A779H0 processor support - PFC hardware block.
+ *
+ * Copyright (C) 2023 Renesas Electronics Corp.
+ *
+ * This file is based on the drivers/pinctrl/renesas/pfc-r8a779a0.c
+ */
+
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+
+#include "sh_pfc.h"
+
+#define CFG_FLAGS (SH_PFC_PIN_CFG_DRIVE_STRENGTH | SH_PFC_PIN_CFG_PULL_UP_DOWN)
+
+#define CPU_ALL_GP(fn, sfx) \
+ PORT_GP_CFG_19(0, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_29(1, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(1, 29, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_16(2, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(2, 17, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(2, 19, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_13(3, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(3, 13, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 14, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 15, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 16, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 17, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 18, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 19, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 20, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 21, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 22, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 23, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 24, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 25, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 26, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 27, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 28, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 29, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 30, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 31, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_14(4, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(4, 14, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(4, 15, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(4, 21, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(4, 23, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(4, 24, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_21(5, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_21(6, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_21(7, fn, sfx, CFG_FLAGS)
+
+#define CPU_ALL_NOGP(fn) \
+ PIN_NOGP_CFG(VDDQ_AVB0, "VDDQ_AVB0", fn, SH_PFC_PIN_CFG_IO_VOLTAGE_18_25), \
+ PIN_NOGP_CFG(VDDQ_AVB1, "VDDQ_AVB1", fn, SH_PFC_PIN_CFG_IO_VOLTAGE_18_25), \
+ PIN_NOGP_CFG(VDDQ_AVB2, "VDDQ_AVB2", fn, SH_PFC_PIN_CFG_IO_VOLTAGE_18_25)
+
+/*
+ * F_() : just information
+ * FM() : macro for FN_xxx / xxx_MARK
+ */
+
+/* GPSR0 */
+#define GPSR0_18 F_(MSIOF2_RXD, IP2SR0_11_8)
+#define GPSR0_17 F_(MSIOF2_SCK, IP2SR0_7_4)
+#define GPSR0_16 F_(MSIOF2_TXD, IP2SR0_3_0)
+#define GPSR0_15 F_(MSIOF2_SYNC, IP1SR0_31_28)
+#define GPSR0_14 F_(MSIOF2_SS1, IP1SR0_27_24)
+#define GPSR0_13 F_(MSIOF2_SS2, IP1SR0_23_20)
+#define GPSR0_12 F_(MSIOF5_RXD, IP1SR0_19_16)
+#define GPSR0_11 F_(MSIOF5_SCK, IP1SR0_15_12)
+#define GPSR0_10 F_(MSIOF5_TXD, IP1SR0_11_8)
+#define GPSR0_9 F_(MSIOF5_SYNC, IP1SR0_7_4)
+#define GPSR0_8 F_(MSIOF5_SS1, IP1SR0_3_0)
+#define GPSR0_7 F_(MSIOF5_SS2, IP0SR0_31_28)
+#define GPSR0_6 F_(IRQ0, IP0SR0_27_24)
+#define GPSR0_5 F_(IRQ1, IP0SR0_23_20)
+#define GPSR0_4 F_(IRQ2, IP0SR0_19_16)
+#define GPSR0_3 F_(IRQ3, IP0SR0_15_12)
+#define GPSR0_2 F_(GP0_02, IP0SR0_11_8)
+#define GPSR0_1 F_(GP0_01, IP0SR0_7_4)
+#define GPSR0_0 F_(GP0_00, IP0SR0_3_0)
+
+/* GPSR1 */
+#define GPSR1_29 F_(ERROROUTC_N_A, IP3SR1_23_20)
+#define GPSR1_28 F_(HTX3, IP3SR1_19_16)
+#define GPSR1_27 F_(HCTS3_N, IP3SR1_15_12)
+#define GPSR1_26 F_(HRTS3_N, IP3SR1_11_8)
+#define GPSR1_25 F_(HSCK3, IP3SR1_7_4)
+#define GPSR1_24 F_(HRX3, IP3SR1_3_0)
+#define GPSR1_23 F_(GP1_23, IP2SR1_31_28)
+#define GPSR1_22 F_(AUDIO_CLKIN, IP2SR1_27_24)
+#define GPSR1_21 F_(AUDIO_CLKOUT, IP2SR1_23_20)
+#define GPSR1_20 F_(SSI_SD, IP2SR1_19_16)
+#define GPSR1_19 F_(SSI_WS, IP2SR1_15_12)
+#define GPSR1_18 F_(SSI_SCK, IP2SR1_11_8)
+#define GPSR1_17 F_(SCIF_CLK, IP2SR1_7_4)
+#define GPSR1_16 F_(HRX0, IP2SR1_3_0)
+#define GPSR1_15 F_(HSCK0, IP1SR1_31_28)
+#define GPSR1_14 F_(HRTS0_N, IP1SR1_27_24)
+#define GPSR1_13 F_(HCTS0_N, IP1SR1_23_20)
+#define GPSR1_12 F_(HTX0, IP1SR1_19_16)
+#define GPSR1_11 F_(MSIOF0_RXD, IP1SR1_15_12)
+#define GPSR1_10 F_(MSIOF0_SCK, IP1SR1_11_8)
+#define GPSR1_9 F_(MSIOF0_TXD, IP1SR1_7_4)
+#define GPSR1_8 F_(MSIOF0_SYNC, IP1SR1_3_0)
+#define GPSR1_7 F_(MSIOF0_SS1, IP0SR1_31_28)
+#define GPSR1_6 F_(MSIOF0_SS2, IP0SR1_27_24)
+#define GPSR1_5 F_(MSIOF1_RXD, IP0SR1_23_20)
+#define GPSR1_4 F_(MSIOF1_TXD, IP0SR1_19_16)
+#define GPSR1_3 F_(MSIOF1_SCK, IP0SR1_15_12)
+#define GPSR1_2 F_(MSIOF1_SYNC, IP0SR1_11_8)
+#define GPSR1_1 F_(MSIOF1_SS1, IP0SR1_7_4)
+#define GPSR1_0 F_(MSIOF1_SS2, IP0SR1_3_0)
+
+/* GPSR2 */
+#define GPSR2_19 F_(CANFD1_RX, IP2SR2_15_12)
+#define GPSR2_17 F_(CANFD1_TX, IP2SR2_7_4)
+#define GPSR2_15 F_(CANFD3_RX, IP1SR2_31_28)
+#define GPSR2_14 F_(CANFD3_TX, IP1SR2_27_24)
+#define GPSR2_13 F_(CANFD2_RX, IP1SR2_23_20)
+#define GPSR2_12 F_(CANFD2_TX, IP1SR2_19_16)
+#define GPSR2_11 F_(CANFD0_RX, IP1SR2_15_12)
+#define GPSR2_10 F_(CANFD0_TX, IP1SR2_11_8)
+#define GPSR2_9 F_(CAN_CLK, IP1SR2_7_4)
+#define GPSR2_8 F_(TPU0TO0, IP1SR2_3_0)
+#define GPSR2_7 F_(TPU0TO1, IP0SR2_31_28)
+#define GPSR2_6 F_(FXR_TXDB, IP0SR2_27_24)
+#define GPSR2_5 F_(FXR_TXENB_N_A, IP0SR2_23_20)
+#define GPSR2_4 F_(RXDB_EXTFXR, IP0SR2_19_16)
+#define GPSR2_3 F_(CLK_EXTFXR, IP0SR2_15_12)
+#define GPSR2_2 F_(RXDA_EXTFXR, IP0SR2_11_8)
+#define GPSR2_1 F_(FXR_TXENA_N_A, IP0SR2_7_4)
+#define GPSR2_0 F_(FXR_TXDA, IP0SR2_3_0)
+
+/* GPSR3 */
+#define GPSR3_31 F_(TCLK4, IP3SR3_31_28)
+#define GPSR3_30 F_(TCLK3, IP3SR3_27_24)
+#define GPSR3_29 F_(RPC_INT_N, IP3SR3_23_20)
+#define GPSR3_28 F_(RPC_WP_N, IP3SR3_19_16)
+#define GPSR3_27 F_(RPC_RESET_N, IP3SR3_15_12)
+#define GPSR3_26 F_(QSPI1_IO3, IP3SR3_11_8)
+#define GPSR3_25 F_(QSPI1_SSL, IP3SR3_7_4)
+#define GPSR3_24 F_(QSPI1_IO2, IP3SR3_3_0)
+#define GPSR3_23 F_(QSPI1_MISO_IO1, IP2SR3_31_28)
+#define GPSR3_22 F_(QSPI1_SPCLK, IP2SR3_27_24)
+#define GPSR3_21 F_(QSPI1_MOSI_IO0, IP2SR3_23_20)
+#define GPSR3_20 F_(QSPI0_SPCLK, IP2SR3_19_16)
+#define GPSR3_19 F_(QSPI0_MOSI_IO0, IP2SR3_15_12)
+#define GPSR3_18 F_(QSPI0_MISO_IO1, IP2SR3_11_8)
+#define GPSR3_17 F_(QSPI0_IO2, IP2SR3_7_4)
+#define GPSR3_16 F_(QSPI0_IO3, IP2SR3_3_0)
+#define GPSR3_15 F_(QSPI0_SSL, IP1SR3_31_28)
+#define GPSR3_14 F_(PWM2, IP1SR3_27_24)
+#define GPSR3_13 F_(PWM1, IP1SR3_23_20)
+#define GPSR3_12 F_(SD_WP, IP1SR3_19_16)
+#define GPSR3_11 F_(SD_CD, IP1SR3_15_12)
+#define GPSR3_10 F_(MMC_SD_CMD, IP1SR3_11_8)
+#define GPSR3_9 F_(MMC_D6, IP1SR3_7_4)
+#define GPSR3_8 F_(MMC_D7, IP1SR3_3_0)
+#define GPSR3_7 F_(MMC_D4, IP0SR3_31_28)
+#define GPSR3_6 F_(MMC_D5, IP0SR3_27_24)
+#define GPSR3_5 F_(MMC_SD_D3, IP0SR3_23_20)
+#define GPSR3_4 F_(MMC_DS, IP0SR3_19_16)
+#define GPSR3_3 F_(MMC_SD_CLK, IP0SR3_15_12)
+#define GPSR3_2 F_(MMC_SD_D2, IP0SR3_11_8)
+#define GPSR3_1 F_(MMC_SD_D0, IP0SR3_7_4)
+#define GPSR3_0 F_(MMC_SD_D1, IP0SR3_3_0)
+
+/* GPSR4 */
+#define GPSR4_24 F_(AVS1, IP3SR4_3_0)
+#define GPSR4_23 F_(AVS0, IP2SR4_31_28)
+#define GPSR4_21 F_(PCIE0_CLKREQ_N, IP2SR4_23_20)
+#define GPSR4_15 F_(PWM4, IP1SR4_31_28)
+#define GPSR4_14 F_(PWM3, IP1SR4_27_24)
+#define GPSR4_13 F_(HSCK2, IP1SR4_23_20)
+#define GPSR4_12 F_(HCTS2_N, IP1SR4_19_16)
+#define GPSR4_11 F_(SCIF_CLK2, IP1SR4_15_12)
+#define GPSR4_10 F_(HRTS2_N, IP1SR4_11_8)
+#define GPSR4_9 F_(HTX2, IP1SR4_7_4)
+#define GPSR4_8 F_(HRX2, IP1SR4_3_0)
+#define GPSR4_7 F_(SDA3, IP0SR4_31_28)
+#define GPSR4_6 F_(SCL3, IP0SR4_27_24)
+#define GPSR4_5 F_(SDA2, IP0SR4_23_20)
+#define GPSR4_4 F_(SCL2, IP0SR4_19_16)
+#define GPSR4_3 F_(SDA1, IP0SR4_15_12)
+#define GPSR4_2 F_(SCL1, IP0SR4_11_8)
+#define GPSR4_1 F_(SDA0, IP0SR4_7_4)
+#define GPSR4_0 F_(SCL0, IP0SR4_3_0)
+
+/* GPSR 5 */
+#define GPSR5_20 F_(AVB2_RX_CTL, IP2SR5_19_16)
+#define GPSR5_19 F_(AVB2_TX_CTL, IP2SR5_15_12)
+#define GPSR5_18 F_(AVB2_RXC, IP2SR5_11_8)
+#define GPSR5_17 F_(AVB2_RD0, IP2SR5_7_4)
+#define GPSR5_16 F_(AVB2_TXC, IP2SR5_3_0)
+#define GPSR5_15 F_(AVB2_TD0, IP1SR5_31_28)
+#define GPSR5_14 F_(AVB2_RD1, IP1SR5_27_24)
+#define GPSR5_13 F_(AVB2_RD2, IP1SR5_23_20)
+#define GPSR5_12 F_(AVB2_TD1, IP1SR5_19_16)
+#define GPSR5_11 F_(AVB2_TD2, IP1SR5_15_12)
+#define GPSR5_10 F_(AVB2_MDIO, IP1SR5_11_8)
+#define GPSR5_9 F_(AVB2_RD3, IP1SR5_7_4)
+#define GPSR5_8 F_(AVB2_TD3, IP1SR5_3_0)
+#define GPSR5_7 F_(AVB2_TXCREFCLK, IP0SR5_31_28)
+#define GPSR5_6 F_(AVB2_MDC, IP0SR5_27_24)
+#define GPSR5_5 F_(AVB2_MAGIC, IP0SR5_23_20)
+#define GPSR5_4 F_(AVB2_PHY_INT, IP0SR5_19_16)
+#define GPSR5_3 F_(AVB2_LINK, IP0SR5_15_12)
+#define GPSR5_2 F_(AVB2_AVTP_MATCH, IP0SR5_11_8)
+#define GPSR5_1 F_(AVB2_AVTP_CAPTURE, IP0SR5_7_4)
+#define GPSR5_0 F_(AVB2_AVTP_PPS, IP0SR5_3_0)
+
+/* GPSR 6 */
+#define GPSR6_20 F_(AVB1_TXCREFCLK, IP2SR6_19_16)
+#define GPSR6_19 F_(AVB1_RD3, IP2SR6_15_12)
+#define GPSR6_18 F_(AVB1_TD3, IP2SR6_11_8)
+#define GPSR6_17 F_(AVB1_RD2, IP2SR6_7_4)
+#define GPSR6_16 F_(AVB1_TD2, IP2SR6_3_0)
+#define GPSR6_15 F_(AVB1_RD0, IP1SR6_31_28)
+#define GPSR6_14 F_(AVB1_RD1, IP1SR6_27_24)
+#define GPSR6_13 F_(AVB1_TD0, IP1SR6_23_20)
+#define GPSR6_12 F_(AVB1_TD1, IP1SR6_19_16)
+#define GPSR6_11 F_(AVB1_AVTP_CAPTURE, IP1SR6_15_12)
+#define GPSR6_10 F_(AVB1_AVTP_PPS, IP1SR6_11_8)
+#define GPSR6_9 F_(AVB1_RX_CTL, IP1SR6_7_4)
+#define GPSR6_8 F_(AVB1_RXC, IP1SR6_3_0)
+#define GPSR6_7 F_(AVB1_TX_CTL, IP0SR6_31_28)
+#define GPSR6_6 F_(AVB1_TXC, IP0SR6_27_24)
+#define GPSR6_5 F_(AVB1_AVTP_MATCH, IP0SR6_23_20)
+#define GPSR6_4 F_(AVB1_LINK, IP0SR6_19_16)
+#define GPSR6_3 F_(AVB1_PHY_INT, IP0SR6_15_12)
+#define GPSR6_2 F_(AVB1_MDC, IP0SR6_11_8)
+#define GPSR6_1 F_(AVB1_MAGIC, IP0SR6_7_4)
+#define GPSR6_0 F_(AVB1_MDIO, IP0SR6_3_0)
+
+/* GPSR7 */
+#define GPSR7_20 F_(AVB0_RX_CTL, IP2SR7_19_16)
+#define GPSR7_19 F_(AVB0_RXC, IP2SR7_15_12)
+#define GPSR7_18 F_(AVB0_RD0, IP2SR7_11_8)
+#define GPSR7_17 F_(AVB0_RD1, IP2SR7_7_4)
+#define GPSR7_16 F_(AVB0_TX_CTL, IP2SR7_3_0)
+#define GPSR7_15 F_(AVB0_TXC, IP1SR7_31_28)
+#define GPSR7_14 F_(AVB0_MDIO, IP1SR7_27_24)
+#define GPSR7_13 F_(AVB0_MDC, IP1SR7_23_20)
+#define GPSR7_12 F_(AVB0_RD2, IP1SR7_19_16)
+#define GPSR7_11 F_(AVB0_TD0, IP1SR7_15_12)
+#define GPSR7_10 F_(AVB0_MAGIC, IP1SR7_11_8)
+#define GPSR7_9 F_(AVB0_TXCREFCLK, IP1SR7_7_4)
+#define GPSR7_8 F_(AVB0_RD3, IP1SR7_3_0)
+#define GPSR7_7 F_(AVB0_TD1, IP0SR7_31_28)
+#define GPSR7_6 F_(AVB0_TD2, IP0SR7_27_24)
+#define GPSR7_5 F_(AVB0_PHY_INT, IP0SR7_23_20)
+#define GPSR7_4 F_(AVB0_LINK, IP0SR7_19_16)
+#define GPSR7_3 F_(AVB0_TD3, IP0SR7_15_12)
+#define GPSR7_2 F_(AVB0_AVTP_MATCH, IP0SR7_11_8)
+#define GPSR7_1 F_(AVB0_AVTP_CAPTURE, IP0SR7_7_4)
+#define GPSR7_0 F_(AVB0_AVTP_PPS, IP0SR7_3_0)
+
+
+/* SR0 */
+/* IP0SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR0_3_0 F_(0, 0) FM(ERROROUTC_N_B) FM(TCLK2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_7_4 F_(0, 0) FM(MSIOF3_SS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_11_8 F_(0, 0) FM(MSIOF3_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_15_12 FM(IRQ3) FM(MSIOF3_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_19_16 FM(IRQ2) FM(MSIOF3_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_23_20 FM(IRQ1) FM(MSIOF3_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_27_24 FM(IRQ0) FM(MSIOF3_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_31_28 FM(MSIOF5_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR0_3_0 FM(MSIOF5_SS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_7_4 FM(MSIOF5_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_11_8 FM(MSIOF5_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_15_12 FM(MSIOF5_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_19_16 FM(MSIOF5_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_23_20 FM(MSIOF2_SS2) FM(TCLK1_A) FM(IRQ2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_27_24 FM(MSIOF2_SS1) FM(HTX1_A) FM(TX1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_31_28 FM(MSIOF2_SYNC) FM(HRX1_A) FM(RX1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR0_3_0 FM(MSIOF2_TXD) FM(HCTS1_N_A) FM(CTS1_N_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR0_7_4 FM(MSIOF2_SCK) FM(HRTS1_N_A) FM(RTS1_N_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR0_11_8 FM(MSIOF2_RXD) FM(HSCK1_A) FM(SCK1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR1 */
+/* IP0SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR1_3_0 FM(MSIOF1_SS2) FM(HTX3_B) FM(TX3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_7_4 FM(MSIOF1_SS1) FM(HCTS3_N_B) FM(RX3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_11_8 FM(MSIOF1_SYNC) FM(HRTS3_N_B) FM(RTS3_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_15_12 FM(MSIOF1_SCK) FM(HSCK3_B) FM(CTS3_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_19_16 FM(MSIOF1_TXD) FM(HRX3_B) FM(SCK3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_23_20 FM(MSIOF1_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_27_24 FM(MSIOF0_SS2) FM(HTX1_B) FM(TX1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_31_28 FM(MSIOF0_SS1) FM(HRX1_B) FM(RX1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR1_3_0 FM(MSIOF0_SYNC) FM(HCTS1_N_B) FM(CTS1_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_7_4 FM(MSIOF0_TXD) FM(HRTS1_N_B) FM(RTS1_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_11_8 FM(MSIOF0_SCK) FM(HSCK1_B) FM(SCK1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_15_12 FM(MSIOF0_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_19_16 FM(HTX0) FM(TX0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_23_20 FM(HCTS0_N) FM(CTS0_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_27_24 FM(HRTS0_N) FM(RTS0_N) FM(PWM0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_31_28 FM(HSCK0) FM(SCK0) FM(PWM0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR1_3_0 FM(HRX0) FM(RX0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_7_4 FM(SCIF_CLK) FM(IRQ4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_11_8 FM(SSI_SCK) FM(TCLK3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_15_12 FM(SSI_WS) FM(TCLK4_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_19_16 FM(SSI_SD) FM(IRQ0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_23_20 FM(AUDIO_CLKOUT) FM(IRQ1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_27_24 FM(AUDIO_CLKIN) FM(PWM3_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_31_28 F_(0, 0) FM(TCLK2_A) FM(MSIOF4_SS1) FM(IRQ3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP3SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP3SR1_3_0 FM(HRX3_A) FM(SCK3_A) FM(MSIOF4_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_7_4 FM(HSCK3_A) FM(CTS3_N_A) FM(MSIOF4_SCK) FM(TPU0TO0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_11_8 FM(HRTS3_N_A) FM(RTS3_N_A) FM(MSIOF4_TXD) FM(TPU0TO1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_15_12 FM(HCTS3_N_A) FM(RX3_A) FM(MSIOF4_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_19_16 FM(HTX3_A) FM(TX3_A) FM(MSIOF4_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_23_20 FM(ERROROUTC_N_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR2 */
+/* IP0SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR2_3_0 FM(FXR_TXDA) F_(0, 0) FM(TPU0TO2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_7_4 FM(FXR_TXENA_N_A) F_(0, 0) FM(TPU0TO3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_11_8 FM(RXDA_EXTFXR) F_(0, 0) FM(IRQ5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_15_12 FM(CLK_EXTFXR) F_(0, 0) FM(IRQ4_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_19_16 FM(RXDB_EXTFXR) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_23_20 FM(FXR_TXENB_N_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_27_24 FM(FXR_TXDB) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_31_28 FM(TPU0TO1_A) F_(0, 0) F_(0, 0) FM(TCLK2_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR2_3_0 FM(TPU0TO0_A) F_(0, 0) F_(0, 0) FM(TCLK1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_7_4 FM(CAN_CLK) FM(FXR_TXENA_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_11_8 FM(CANFD0_TX) FM(FXR_TXENB_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_15_12 FM(CANFD0_RX) FM(STPWT_EXTFXR) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_19_16 FM(CANFD2_TX) FM(TPU0TO2_A) F_(0, 0) FM(TCLK3_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_23_20 FM(CANFD2_RX) FM(TPU0TO3_A) FM(PWM1_B) FM(TCLK4_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_27_24 FM(CANFD3_TX) F_(0, 0) FM(PWM2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_31_28 FM(CANFD3_RX) F_(0, 0) FM(PWM3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR2_7_4 FM(CANFD1_TX) F_(0, 0) FM(PWM1_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR2_15_12 FM(CANFD1_RX) F_(0, 0) FM(PWM2_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR3 */
+/* IP0SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR3_3_0 FM(MMC_SD_D1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_7_4 FM(MMC_SD_D0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_11_8 FM(MMC_SD_D2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_15_12 FM(MMC_SD_CLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_19_16 FM(MMC_DS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_23_20 FM(MMC_SD_D3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_27_24 FM(MMC_D5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_31_28 FM(MMC_D4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR3_3_0 FM(MMC_D7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_7_4 FM(MMC_D6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_11_8 FM(MMC_SD_CMD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_15_12 FM(SD_CD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_19_16 FM(SD_WP) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_23_20 FM(PWM1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_27_24 FM(PWM2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_31_28 FM(QSPI0_SSL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR3_3_0 FM(QSPI0_IO3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_7_4 FM(QSPI0_IO2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_11_8 FM(QSPI0_MISO_IO1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_15_12 FM(QSPI0_MOSI_IO0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_19_16 FM(QSPI0_SPCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_23_20 FM(QSPI1_MOSI_IO0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_27_24 FM(QSPI1_SPCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_31_28 FM(QSPI1_MISO_IO1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP3SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP3SR3_3_0 FM(QSPI1_IO2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_7_4 FM(QSPI1_SSL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_11_8 FM(QSPI1_IO3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_15_12 FM(RPC_RESET_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_19_16 FM(RPC_WP_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_23_20 FM(RPC_INT_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_27_24 FM(TCLK3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_31_28 FM(TCLK4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR4 */
+/* IP0SR4 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR4_3_0 FM(SCL0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR4_7_4 FM(SDA0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR4_11_8 FM(SCL1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR4_15_12 FM(SDA1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR4_19_16 FM(SCL2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR4_23_20 FM(SDA2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR4_27_24 FM(SCL3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR4_31_28 FM(SDA3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR4 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR4_3_0 FM(HRX2) FM(SCK4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR4_7_4 FM(HTX2) FM(CTS4_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR4_11_8 FM(HRTS2_N) FM(RTS4_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR4_15_12 FM(SCIF_CLK2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR4_19_16 FM(HCTS2_N) FM(TX4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR4_23_20 FM(HSCK2) FM(RX4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR4_27_24 FM(PWM3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR4_31_28 FM(PWM4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR4 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR4_23_20 FM(PCIE0_CLKREQ_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR4_31_28 FM(AVS0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP3SR4 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP3SR4_3_0 FM(AVS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR5 */
+/* IP0SR5 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR5_3_0 FM(AVB2_AVTP_PPS) FM(Ether_GPTP_PPS0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR5_7_4 FM(AVB2_AVTP_CAPTURE) FM(Ether_GPTP_CAPTURE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR5_11_8 FM(AVB2_AVTP_MATCH) FM(Ether_GPTP_MATCH) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR5_15_12 FM(AVB2_LINK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR5_19_16 FM(AVB2_PHY_INT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR5_23_20 FM(AVB2_MAGIC) FM(Ether_GPTP_PPS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR5_27_24 FM(AVB2_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR5_31_28 FM(AVB2_TXCREFCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR5 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR5_3_0 FM(AVB2_TD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR5_7_4 FM(AVB2_RD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR5_11_8 FM(AVB2_MDIO) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR5_15_12 FM(AVB2_TD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR5_19_16 FM(AVB2_TD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR5_23_20 FM(AVB2_RD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR5_27_24 FM(AVB2_RD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR5_31_28 FM(AVB2_TD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR5 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR5_3_0 FM(AVB2_TXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR5_7_4 FM(AVB2_RD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR5_11_8 FM(AVB2_RXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR5_15_12 FM(AVB2_TX_CTL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR5_19_16 FM(AVB2_RX_CTL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR6 */
+/* IP0SR6 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR6_3_0 FM(AVB1_MDIO) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_7_4 FM(AVB1_MAGIC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_11_8 FM(AVB1_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_15_12 FM(AVB1_PHY_INT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_19_16 FM(AVB1_LINK) FM(AVB1_MII_TX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_23_20 FM(AVB1_AVTP_MATCH) FM(AVB1_MII_RX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_27_24 FM(AVB1_TXC) FM(AVB1_MII_TXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_31_28 FM(AVB1_TX_CTL) FM(AVB1_MII_TX_EN) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR6 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR6_3_0 FM(AVB1_RXC) FM(AVB1_MII_RXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_7_4 FM(AVB1_RX_CTL) FM(AVB1_MII_RX_DV) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_11_8 FM(AVB1_AVTP_PPS) FM(AVB1_MII_COL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_15_12 FM(AVB1_AVTP_CAPTURE) FM(AVB1_MII_CRS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_19_16 FM(AVB1_TD1) FM(AVB1_MII_TD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_23_20 FM(AVB1_TD0) FM(AVB1_MII_TD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_27_24 FM(AVB1_RD1) FM(AVB1_MII_RD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_31_28 FM(AVB1_RD0) FM(AVB1_MII_RD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR6 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR6_3_0 FM(AVB1_TD2) FM(AVB1_MII_TD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR6_7_4 FM(AVB1_RD2) FM(AVB1_MII_RD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR6_11_8 FM(AVB1_TD3) FM(AVB1_MII_TD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR6_15_12 FM(AVB1_RD3) FM(AVB1_MII_RD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR6_19_16 FM(AVB1_TXCREFCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR7 */
+/* IP0SR7 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR7_3_0 FM(AVB0_AVTP_PPS) FM(AVB0_MII_COL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_7_4 FM(AVB0_AVTP_CAPTURE) FM(AVB0_MII_CRS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_11_8 FM(AVB0_AVTP_MATCH) FM(AVB0_MII_RX_ER) FM(CC5_OSCOUT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_15_12 FM(AVB0_TD3) FM(AVB0_MII_TD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_19_16 FM(AVB0_LINK) FM(AVB0_MII_TX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_23_20 FM(AVB0_PHY_INT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_27_24 FM(AVB0_TD2) FM(AVB0_MII_TD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_31_28 FM(AVB0_TD1) FM(AVB0_MII_TD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR7 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR7_3_0 FM(AVB0_RD3) FM(AVB0_MII_RD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_7_4 FM(AVB0_TXCREFCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_11_8 FM(AVB0_MAGIC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_15_12 FM(AVB0_TD0) FM(AVB0_MII_TD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_19_16 FM(AVB0_RD2) FM(AVB0_MII_RD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_23_20 FM(AVB0_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_27_24 FM(AVB0_MDIO) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_31_28 FM(AVB0_TXC) FM(AVB0_MII_TXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR7 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR7_3_0 FM(AVB0_TX_CTL) FM(AVB0_MII_TX_EN) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR7_7_4 FM(AVB0_RD1) FM(AVB0_MII_RD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR7_11_8 FM(AVB0_RD0) FM(AVB0_MII_RD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR7_15_12 FM(AVB0_RXC) FM(AVB0_MII_RXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR7_19_16 FM(AVB0_RX_CTL) FM(AVB0_MII_RX_DV) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+#define PINMUX_GPSR \
+ GPSR3_31 \
+ GPSR3_30 \
+ GPSR1_29 GPSR3_29 \
+ GPSR1_28 GPSR3_28 \
+ GPSR1_27 GPSR3_27 \
+ GPSR1_26 GPSR3_26 \
+ GPSR1_25 GPSR3_25 \
+ GPSR1_24 GPSR3_24 GPSR4_24 \
+ GPSR1_23 GPSR3_23 GPSR4_23 \
+ GPSR1_22 GPSR3_22 \
+ GPSR1_21 GPSR3_21 GPSR4_21 \
+ GPSR1_20 GPSR3_20 GPSR5_20 GPSR6_20 GPSR7_20 \
+ GPSR1_19 GPSR2_19 GPSR3_19 GPSR5_19 GPSR6_19 GPSR7_19 \
+GPSR0_18 GPSR1_18 GPSR3_18 GPSR5_18 GPSR6_18 GPSR7_18 \
+GPSR0_17 GPSR1_17 GPSR2_17 GPSR3_17 GPSR5_17 GPSR6_17 GPSR7_17 \
+GPSR0_16 GPSR1_16 GPSR3_16 GPSR5_16 GPSR6_16 GPSR7_16 \
+GPSR0_15 GPSR1_15 GPSR2_15 GPSR3_15 GPSR4_15 GPSR5_15 GPSR6_15 GPSR7_15 \
+GPSR0_14 GPSR1_14 GPSR2_14 GPSR3_14 GPSR4_14 GPSR5_14 GPSR6_14 GPSR7_14 \
+GPSR0_13 GPSR1_13 GPSR2_13 GPSR3_13 GPSR4_13 GPSR5_13 GPSR6_13 GPSR7_13 \
+GPSR0_12 GPSR1_12 GPSR2_12 GPSR3_12 GPSR4_12 GPSR5_12 GPSR6_12 GPSR7_12 \
+GPSR0_11 GPSR1_11 GPSR2_11 GPSR3_11 GPSR4_11 GPSR5_11 GPSR6_11 GPSR7_11 \
+GPSR0_10 GPSR1_10 GPSR2_10 GPSR3_10 GPSR4_10 GPSR5_10 GPSR6_10 GPSR7_10 \
+GPSR0_9 GPSR1_9 GPSR2_9 GPSR3_9 GPSR4_9 GPSR5_9 GPSR6_9 GPSR7_9 \
+GPSR0_8 GPSR1_8 GPSR2_8 GPSR3_8 GPSR4_8 GPSR5_8 GPSR6_8 GPSR7_8 \
+GPSR0_7 GPSR1_7 GPSR2_7 GPSR3_7 GPSR4_7 GPSR5_7 GPSR6_7 GPSR7_7 \
+GPSR0_6 GPSR1_6 GPSR2_6 GPSR3_6 GPSR4_6 GPSR5_6 GPSR6_6 GPSR7_6 \
+GPSR0_5 GPSR1_5 GPSR2_5 GPSR3_5 GPSR4_5 GPSR5_5 GPSR6_5 GPSR7_5 \
+GPSR0_4 GPSR1_4 GPSR2_4 GPSR3_4 GPSR4_4 GPSR5_4 GPSR6_4 GPSR7_4 \
+GPSR0_3 GPSR1_3 GPSR2_3 GPSR3_3 GPSR4_3 GPSR5_3 GPSR6_3 GPSR7_3 \
+GPSR0_2 GPSR1_2 GPSR2_2 GPSR3_2 GPSR4_2 GPSR5_2 GPSR6_2 GPSR7_2 \
+GPSR0_1 GPSR1_1 GPSR2_1 GPSR3_1 GPSR4_1 GPSR5_1 GPSR6_1 GPSR7_1 \
+GPSR0_0 GPSR1_0 GPSR2_0 GPSR3_0 GPSR4_0 GPSR5_0 GPSR6_0 GPSR7_0
+
+#define PINMUX_IPSR \
+\
+FM(IP0SR0_3_0) IP0SR0_3_0 FM(IP1SR0_3_0) IP1SR0_3_0 FM(IP2SR0_3_0) IP2SR0_3_0 \
+FM(IP0SR0_7_4) IP0SR0_7_4 FM(IP1SR0_7_4) IP1SR0_7_4 FM(IP2SR0_7_4) IP2SR0_7_4 \
+FM(IP0SR0_11_8) IP0SR0_11_8 FM(IP1SR0_11_8) IP1SR0_11_8 FM(IP2SR0_11_8) IP2SR0_11_8 \
+FM(IP0SR0_15_12) IP0SR0_15_12 FM(IP1SR0_15_12) IP1SR0_15_12 \
+FM(IP0SR0_19_16) IP0SR0_19_16 FM(IP1SR0_19_16) IP1SR0_19_16 \
+FM(IP0SR0_23_20) IP0SR0_23_20 FM(IP1SR0_23_20) IP1SR0_23_20 \
+FM(IP0SR0_27_24) IP0SR0_27_24 FM(IP1SR0_27_24) IP1SR0_27_24 \
+FM(IP0SR0_31_28) IP0SR0_31_28 FM(IP1SR0_31_28) IP1SR0_31_28 \
+\
+FM(IP0SR1_3_0) IP0SR1_3_0 FM(IP1SR1_3_0) IP1SR1_3_0 FM(IP2SR1_3_0) IP2SR1_3_0 FM(IP3SR1_3_0) IP3SR1_3_0 \
+FM(IP0SR1_7_4) IP0SR1_7_4 FM(IP1SR1_7_4) IP1SR1_7_4 FM(IP2SR1_7_4) IP2SR1_7_4 FM(IP3SR1_7_4) IP3SR1_7_4 \
+FM(IP0SR1_11_8) IP0SR1_11_8 FM(IP1SR1_11_8) IP1SR1_11_8 FM(IP2SR1_11_8) IP2SR1_11_8 FM(IP3SR1_11_8) IP3SR1_11_8 \
+FM(IP0SR1_15_12) IP0SR1_15_12 FM(IP1SR1_15_12) IP1SR1_15_12 FM(IP2SR1_15_12) IP2SR1_15_12 FM(IP3SR1_15_12) IP3SR1_15_12 \
+FM(IP0SR1_19_16) IP0SR1_19_16 FM(IP1SR1_19_16) IP1SR1_19_16 FM(IP2SR1_19_16) IP2SR1_19_16 FM(IP3SR1_19_16) IP3SR1_19_16 \
+FM(IP0SR1_23_20) IP0SR1_23_20 FM(IP1SR1_23_20) IP1SR1_23_20 FM(IP2SR1_23_20) IP2SR1_23_20 FM(IP3SR1_23_20) IP3SR1_23_20 \
+FM(IP0SR1_27_24) IP0SR1_27_24 FM(IP1SR1_27_24) IP1SR1_27_24 FM(IP2SR1_27_24) IP2SR1_27_24 \
+FM(IP0SR1_31_28) IP0SR1_31_28 FM(IP1SR1_31_28) IP1SR1_31_28 FM(IP2SR1_31_28) IP2SR1_31_28 \
+\
+FM(IP0SR2_3_0) IP0SR2_3_0 FM(IP1SR2_3_0) IP1SR2_3_0 \
+FM(IP0SR2_7_4) IP0SR2_7_4 FM(IP1SR2_7_4) IP1SR2_7_4 FM(IP2SR2_7_4) IP2SR2_7_4 \
+FM(IP0SR2_11_8) IP0SR2_11_8 FM(IP1SR2_11_8) IP1SR2_11_8 \
+FM(IP0SR2_15_12) IP0SR2_15_12 FM(IP1SR2_15_12) IP1SR2_15_12 FM(IP2SR2_15_12) IP2SR2_15_12 \
+FM(IP0SR2_19_16) IP0SR2_19_16 FM(IP1SR2_19_16) IP1SR2_19_16 \
+FM(IP0SR2_23_20) IP0SR2_23_20 FM(IP1SR2_23_20) IP1SR2_23_20 \
+FM(IP0SR2_27_24) IP0SR2_27_24 FM(IP1SR2_27_24) IP1SR2_27_24 \
+FM(IP0SR2_31_28) IP0SR2_31_28 FM(IP1SR2_31_28) IP1SR2_31_28 \
+\
+FM(IP0SR3_3_0) IP0SR3_3_0 FM(IP1SR3_3_0) IP1SR3_3_0 FM(IP2SR3_3_0) IP2SR3_3_0 FM(IP3SR3_3_0) IP3SR3_3_0 \
+FM(IP0SR3_7_4) IP0SR3_7_4 FM(IP1SR3_7_4) IP1SR3_7_4 FM(IP2SR3_7_4) IP2SR3_7_4 FM(IP3SR3_7_4) IP3SR3_7_4 \
+FM(IP0SR3_11_8) IP0SR3_11_8 FM(IP1SR3_11_8) IP1SR3_11_8 FM(IP2SR3_11_8) IP2SR3_11_8 FM(IP3SR3_11_8) IP3SR3_11_8 \
+FM(IP0SR3_15_12) IP0SR3_15_12 FM(IP1SR3_15_12) IP1SR3_15_12 FM(IP2SR3_15_12) IP2SR3_15_12 FM(IP3SR3_15_12) IP3SR3_15_12 \
+FM(IP0SR3_19_16) IP0SR3_19_16 FM(IP1SR3_19_16) IP1SR3_19_16 FM(IP2SR3_19_16) IP2SR3_19_16 FM(IP3SR3_19_16) IP3SR3_19_16 \
+FM(IP0SR3_23_20) IP0SR3_23_20 FM(IP1SR3_23_20) IP1SR3_23_20 FM(IP2SR3_23_20) IP2SR3_23_20 FM(IP3SR3_23_20) IP3SR3_23_20 \
+FM(IP0SR3_27_24) IP0SR3_27_24 FM(IP1SR3_27_24) IP1SR3_27_24 FM(IP2SR3_27_24) IP2SR3_27_24 FM(IP3SR3_27_24) IP3SR3_27_24 \
+FM(IP0SR3_31_28) IP0SR3_31_28 FM(IP1SR3_31_28) IP1SR3_31_28 FM(IP2SR3_31_28) IP2SR3_31_28 FM(IP3SR3_31_28) IP3SR3_31_28 \
+\
+FM(IP0SR4_3_0) IP0SR4_3_0 FM(IP1SR4_3_0) IP1SR4_3_0 FM(IP3SR4_3_0) IP3SR4_3_0 \
+FM(IP0SR4_7_4) IP0SR4_7_4 FM(IP1SR4_7_4) IP1SR4_7_4 \
+FM(IP0SR4_11_8) IP0SR4_11_8 FM(IP1SR4_11_8) IP1SR4_11_8 \
+FM(IP0SR4_15_12) IP0SR4_15_12 FM(IP1SR4_15_12) IP1SR4_15_12 \
+FM(IP0SR4_19_16) IP0SR4_19_16 FM(IP1SR4_19_16) IP1SR4_19_16 \
+FM(IP0SR4_23_20) IP0SR4_23_20 FM(IP1SR4_23_20) IP1SR4_23_20 FM(IP2SR4_23_20) IP2SR4_23_20 \
+FM(IP0SR4_27_24) IP0SR4_27_24 FM(IP1SR4_27_24) IP1SR4_27_24 \
+FM(IP0SR4_31_28) IP0SR4_31_28 FM(IP1SR4_31_28) IP1SR4_31_28 FM(IP2SR4_31_28) IP2SR4_31_28 \
+\
+FM(IP0SR5_3_0) IP0SR5_3_0 FM(IP1SR5_3_0) IP1SR5_3_0 FM(IP2SR5_3_0) IP2SR5_3_0 \
+FM(IP0SR5_7_4) IP0SR5_7_4 FM(IP1SR5_7_4) IP1SR5_7_4 FM(IP2SR5_7_4) IP2SR5_7_4 \
+FM(IP0SR5_11_8) IP0SR5_11_8 FM(IP1SR5_11_8) IP1SR5_11_8 FM(IP2SR5_11_8) IP2SR5_11_8 \
+FM(IP0SR5_15_12) IP0SR5_15_12 FM(IP1SR5_15_12) IP1SR5_15_12 FM(IP2SR5_15_12) IP2SR5_15_12 \
+FM(IP0SR5_19_16) IP0SR5_19_16 FM(IP1SR5_19_16) IP1SR5_19_16 FM(IP2SR5_19_16) IP2SR5_19_16 \
+FM(IP0SR5_23_20) IP0SR5_23_20 FM(IP1SR5_23_20) IP1SR5_23_20 \
+FM(IP0SR5_27_24) IP0SR5_27_24 FM(IP1SR5_27_24) IP1SR5_27_24 \
+FM(IP0SR5_31_28) IP0SR5_31_28 FM(IP1SR5_31_28) IP1SR5_31_28 \
+\
+FM(IP0SR6_3_0) IP0SR6_3_0 FM(IP1SR6_3_0) IP1SR6_3_0 FM(IP2SR6_3_0) IP2SR6_3_0 \
+FM(IP0SR6_7_4) IP0SR6_7_4 FM(IP1SR6_7_4) IP1SR6_7_4 FM(IP2SR6_7_4) IP2SR6_7_4 \
+FM(IP0SR6_11_8) IP0SR6_11_8 FM(IP1SR6_11_8) IP1SR6_11_8 FM(IP2SR6_11_8) IP2SR6_11_8 \
+FM(IP0SR6_15_12) IP0SR6_15_12 FM(IP1SR6_15_12) IP1SR6_15_12 FM(IP2SR6_15_12) IP2SR6_15_12 \
+FM(IP0SR6_19_16) IP0SR6_19_16 FM(IP1SR6_19_16) IP1SR6_19_16 FM(IP2SR6_19_16) IP2SR6_19_16 \
+FM(IP0SR6_23_20) IP0SR6_23_20 FM(IP1SR6_23_20) IP1SR6_23_20 \
+FM(IP0SR6_27_24) IP0SR6_27_24 FM(IP1SR6_27_24) IP1SR6_27_24 \
+FM(IP0SR6_31_28) IP0SR6_31_28 FM(IP1SR6_31_28) IP1SR6_31_28 \
+\
+FM(IP0SR7_3_0) IP0SR7_3_0 FM(IP1SR7_3_0) IP1SR7_3_0 FM(IP2SR7_3_0) IP2SR7_3_0 \
+FM(IP0SR7_7_4) IP0SR7_7_4 FM(IP1SR7_7_4) IP1SR7_7_4 FM(IP2SR7_7_4) IP2SR7_7_4 \
+FM(IP0SR7_11_8) IP0SR7_11_8 FM(IP1SR7_11_8) IP1SR7_11_8 FM(IP2SR7_11_8) IP2SR7_11_8 \
+FM(IP0SR7_15_12) IP0SR7_15_12 FM(IP1SR7_15_12) IP1SR7_15_12 FM(IP2SR7_15_12) IP2SR7_15_12 \
+FM(IP0SR7_19_16) IP0SR7_19_16 FM(IP1SR7_19_16) IP1SR7_19_16 FM(IP2SR7_19_16) IP2SR7_19_16 \
+FM(IP0SR7_23_20) IP0SR7_23_20 FM(IP1SR7_23_20) IP1SR7_23_20 \
+FM(IP0SR7_27_24) IP0SR7_27_24 FM(IP1SR7_27_24) IP1SR7_27_24 \
+FM(IP0SR7_31_28) IP0SR7_31_28 FM(IP1SR7_31_28) IP1SR7_31_28 \
+
+/* MOD_SEL4 */ /* 0 */ /* 1 */
+#define MOD_SEL4_7 FM(SEL_SDA3_0) FM(SEL_SDA3_1)
+#define MOD_SEL4_6 FM(SEL_SCL3_0) FM(SEL_SCL3_1)
+#define MOD_SEL4_5 FM(SEL_SDA2_0) FM(SEL_SDA2_1)
+#define MOD_SEL4_4 FM(SEL_SCL2_0) FM(SEL_SCL2_1)
+#define MOD_SEL4_3 FM(SEL_SDA1_0) FM(SEL_SDA1_1)
+#define MOD_SEL4_2 FM(SEL_SCL1_0) FM(SEL_SCL1_1)
+#define MOD_SEL4_1 FM(SEL_SDA0_0) FM(SEL_SDA0_1)
+#define MOD_SEL4_0 FM(SEL_SCL0_0) FM(SEL_SCL0_1)
+
+#define PINMUX_MOD_SELS \
+\
+MOD_SEL4_7 \
+MOD_SEL4_6 \
+MOD_SEL4_5 \
+MOD_SEL4_4 \
+MOD_SEL4_3 \
+MOD_SEL4_2 \
+MOD_SEL4_1 \
+MOD_SEL4_0
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ GP_ALL(DATA),
+ PINMUX_DATA_END,
+
+#define F_(x, y)
+#define FM(x) FN_##x,
+ PINMUX_FUNCTION_BEGIN,
+ GP_ALL(FN),
+ PINMUX_GPSR
+ PINMUX_IPSR
+ PINMUX_MOD_SELS
+ PINMUX_FUNCTION_END,
+#undef F_
+#undef FM
+
+#define F_(x, y)
+#define FM(x) x##_MARK,
+ PINMUX_MARK_BEGIN,
+ PINMUX_GPSR
+ PINMUX_IPSR
+ PINMUX_MOD_SELS
+ PINMUX_MARK_END,
+#undef F_
+#undef FM
+};
+
+static const u16 pinmux_data[] = {
+ PINMUX_DATA_GP_ALL(),
+
+ /* IP0SR0 */
+ PINMUX_IPSR_GPSR(IP0SR0_3_0, ERROROUTC_N_B),
+ PINMUX_IPSR_GPSR(IP0SR0_3_0, TCLK2_B),
+
+ PINMUX_IPSR_GPSR(IP0SR0_7_4, MSIOF3_SS1),
+
+ PINMUX_IPSR_GPSR(IP0SR0_11_8, MSIOF3_SS2),
+
+ PINMUX_IPSR_GPSR(IP0SR0_15_12, IRQ3),
+ PINMUX_IPSR_GPSR(IP0SR0_15_12, MSIOF3_SCK),
+
+ PINMUX_IPSR_GPSR(IP0SR0_19_16, IRQ2),
+ PINMUX_IPSR_GPSR(IP0SR0_19_16, MSIOF3_TXD),
+
+ PINMUX_IPSR_GPSR(IP0SR0_23_20, IRQ1),
+ PINMUX_IPSR_GPSR(IP0SR0_23_20, MSIOF3_RXD),
+
+ PINMUX_IPSR_GPSR(IP0SR0_27_24, IRQ0),
+ PINMUX_IPSR_GPSR(IP0SR0_27_24, MSIOF3_SYNC),
+
+ PINMUX_IPSR_GPSR(IP0SR0_31_28, MSIOF5_SS2),
+
+ /* IP1SR0 */
+ PINMUX_IPSR_GPSR(IP1SR0_3_0, MSIOF5_SS1),
+
+ PINMUX_IPSR_GPSR(IP1SR0_7_4, MSIOF5_SYNC),
+
+ PINMUX_IPSR_GPSR(IP1SR0_11_8, MSIOF5_TXD),
+
+ PINMUX_IPSR_GPSR(IP1SR0_15_12, MSIOF5_SCK),
+
+ PINMUX_IPSR_GPSR(IP1SR0_19_16, MSIOF5_RXD),
+
+ PINMUX_IPSR_GPSR(IP1SR0_23_20, MSIOF2_SS2),
+ PINMUX_IPSR_GPSR(IP1SR0_23_20, TCLK1_A),
+ PINMUX_IPSR_GPSR(IP1SR0_23_20, IRQ2_B),
+
+ PINMUX_IPSR_GPSR(IP1SR0_27_24, MSIOF2_SS1),
+ PINMUX_IPSR_GPSR(IP1SR0_27_24, HTX1_A),
+ PINMUX_IPSR_GPSR(IP1SR0_27_24, TX1_A),
+
+ PINMUX_IPSR_GPSR(IP1SR0_31_28, MSIOF2_SYNC),
+ PINMUX_IPSR_GPSR(IP1SR0_31_28, HRX1_A),
+ PINMUX_IPSR_GPSR(IP1SR0_31_28, RX1_A),
+
+ /* IP2SR0 */
+ PINMUX_IPSR_GPSR(IP2SR0_3_0, MSIOF2_TXD),
+ PINMUX_IPSR_GPSR(IP2SR0_3_0, HCTS1_N_A),
+ PINMUX_IPSR_GPSR(IP2SR0_3_0, CTS1_N_A),
+
+ PINMUX_IPSR_GPSR(IP2SR0_7_4, MSIOF2_SCK),
+ PINMUX_IPSR_GPSR(IP2SR0_7_4, HRTS1_N_A),
+ PINMUX_IPSR_GPSR(IP2SR0_7_4, RTS1_N_A),
+
+ PINMUX_IPSR_GPSR(IP2SR0_11_8, MSIOF2_RXD),
+ PINMUX_IPSR_GPSR(IP2SR0_11_8, HSCK1_A),
+ PINMUX_IPSR_GPSR(IP2SR0_11_8, SCK1_A),
+
+ /* IP0SR1 */
+ PINMUX_IPSR_GPSR(IP0SR1_3_0, MSIOF1_SS2),
+ PINMUX_IPSR_GPSR(IP0SR1_3_0, HTX3_B),
+ PINMUX_IPSR_GPSR(IP0SR1_3_0, TX3_B),
+
+ PINMUX_IPSR_GPSR(IP0SR1_7_4, MSIOF1_SS1),
+ PINMUX_IPSR_GPSR(IP0SR1_7_4, HCTS3_N_B),
+ PINMUX_IPSR_GPSR(IP0SR1_7_4, RX3_B),
+
+ PINMUX_IPSR_GPSR(IP0SR1_11_8, MSIOF1_SYNC),
+ PINMUX_IPSR_GPSR(IP0SR1_11_8, HRTS3_N_B),
+ PINMUX_IPSR_GPSR(IP0SR1_11_8, RTS3_N_B),
+
+ PINMUX_IPSR_GPSR(IP0SR1_15_12, MSIOF1_SCK),
+ PINMUX_IPSR_GPSR(IP0SR1_15_12, HSCK3_B),
+ PINMUX_IPSR_GPSR(IP0SR1_15_12, CTS3_N_B),
+
+ PINMUX_IPSR_GPSR(IP0SR1_19_16, MSIOF1_TXD),
+ PINMUX_IPSR_GPSR(IP0SR1_19_16, HRX3_B),
+ PINMUX_IPSR_GPSR(IP0SR1_19_16, SCK3_B),
+
+ PINMUX_IPSR_GPSR(IP0SR1_23_20, MSIOF1_RXD),
+
+ PINMUX_IPSR_GPSR(IP0SR1_27_24, MSIOF0_SS2),
+ PINMUX_IPSR_GPSR(IP0SR1_27_24, HTX1_B),
+ PINMUX_IPSR_GPSR(IP0SR1_27_24, TX1_B),
+
+ PINMUX_IPSR_GPSR(IP0SR1_31_28, MSIOF0_SS1),
+ PINMUX_IPSR_GPSR(IP0SR1_31_28, HRX1_B),
+ PINMUX_IPSR_GPSR(IP0SR1_31_28, RX1_B),
+
+ /* IP1SR1 */
+ PINMUX_IPSR_GPSR(IP1SR1_3_0, MSIOF0_SYNC),
+ PINMUX_IPSR_GPSR(IP1SR1_3_0, HCTS1_N_B),
+ PINMUX_IPSR_GPSR(IP1SR1_3_0, CTS1_N_B),
+
+ PINMUX_IPSR_GPSR(IP1SR1_7_4, MSIOF0_TXD),
+ PINMUX_IPSR_GPSR(IP1SR1_7_4, HRTS1_N_B),
+ PINMUX_IPSR_GPSR(IP1SR1_7_4, RTS1_N_B),
+
+ PINMUX_IPSR_GPSR(IP1SR1_11_8, MSIOF0_SCK),
+ PINMUX_IPSR_GPSR(IP1SR1_11_8, HSCK1_B),
+ PINMUX_IPSR_GPSR(IP1SR1_11_8, SCK1_B),
+
+ PINMUX_IPSR_GPSR(IP1SR1_15_12, MSIOF0_RXD),
+
+ PINMUX_IPSR_GPSR(IP1SR1_19_16, HTX0),
+ PINMUX_IPSR_GPSR(IP1SR1_19_16, TX0),
+
+ PINMUX_IPSR_GPSR(IP1SR1_23_20, HCTS0_N),
+ PINMUX_IPSR_GPSR(IP1SR1_23_20, CTS0_N),
+
+ PINMUX_IPSR_GPSR(IP1SR1_27_24, HRTS0_N),
+ PINMUX_IPSR_GPSR(IP1SR1_27_24, RTS0_N),
+ PINMUX_IPSR_GPSR(IP1SR1_27_24, PWM0_B),
+
+ PINMUX_IPSR_GPSR(IP1SR1_31_28, HSCK0),
+ PINMUX_IPSR_GPSR(IP1SR1_31_28, SCK0),
+ PINMUX_IPSR_GPSR(IP1SR1_31_28, PWM0_A),
+
+ /* IP2SR1 */
+ PINMUX_IPSR_GPSR(IP2SR1_3_0, HRX0),
+ PINMUX_IPSR_GPSR(IP2SR1_3_0, RX0),
+
+ PINMUX_IPSR_GPSR(IP2SR1_7_4, SCIF_CLK),
+ PINMUX_IPSR_GPSR(IP2SR1_7_4, IRQ4_A),
+
+ PINMUX_IPSR_GPSR(IP2SR1_11_8, SSI_SCK),
+ PINMUX_IPSR_GPSR(IP2SR1_11_8, TCLK3_B),
+
+ PINMUX_IPSR_GPSR(IP2SR1_15_12, SSI_WS),
+ PINMUX_IPSR_GPSR(IP2SR1_15_12, TCLK4_B),
+
+ PINMUX_IPSR_GPSR(IP2SR1_19_16, SSI_SD),
+ PINMUX_IPSR_GPSR(IP2SR1_19_16, IRQ0_B),
+
+ PINMUX_IPSR_GPSR(IP2SR1_23_20, AUDIO_CLKOUT),
+ PINMUX_IPSR_GPSR(IP2SR1_23_20, IRQ1_B),
+
+ PINMUX_IPSR_GPSR(IP2SR1_27_24, AUDIO_CLKIN),
+ PINMUX_IPSR_GPSR(IP2SR1_27_24, PWM3_C),
+
+ PINMUX_IPSR_GPSR(IP2SR1_31_28, TCLK2_A),
+ PINMUX_IPSR_GPSR(IP2SR1_31_28, MSIOF4_SS1),
+ PINMUX_IPSR_GPSR(IP2SR1_31_28, IRQ3_B),
+
+ /* IP3SR1 */
+ PINMUX_IPSR_GPSR(IP3SR1_3_0, HRX3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_3_0, SCK3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_3_0, MSIOF4_SS2),
+
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, HSCK3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, CTS3_N_A),
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, MSIOF4_SCK),
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, TPU0TO0_B),
+
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, HRTS3_N_A),
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, RTS3_N_A),
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, MSIOF4_TXD),
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, TPU0TO1_B),
+
+ PINMUX_IPSR_GPSR(IP3SR1_15_12, HCTS3_N_A),
+ PINMUX_IPSR_GPSR(IP3SR1_15_12, RX3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_15_12, MSIOF4_RXD),
+
+ PINMUX_IPSR_GPSR(IP3SR1_19_16, HTX3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_19_16, TX3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_19_16, MSIOF4_SYNC),
+
+ PINMUX_IPSR_GPSR(IP3SR1_23_20, ERROROUTC_N_A),
+
+ /* IP0SR2 */
+ PINMUX_IPSR_GPSR(IP0SR2_3_0, FXR_TXDA),
+ PINMUX_IPSR_GPSR(IP0SR2_3_0, TPU0TO2_B),
+
+ PINMUX_IPSR_GPSR(IP0SR2_7_4, FXR_TXENA_N_A),
+ PINMUX_IPSR_GPSR(IP0SR2_7_4, TPU0TO3_B),
+
+ PINMUX_IPSR_GPSR(IP0SR2_11_8, RXDA_EXTFXR),
+ PINMUX_IPSR_GPSR(IP0SR2_11_8, IRQ5),
+
+ PINMUX_IPSR_GPSR(IP0SR2_15_12, CLK_EXTFXR),
+ PINMUX_IPSR_GPSR(IP0SR2_15_12, IRQ4_B),
+
+ PINMUX_IPSR_GPSR(IP0SR2_19_16, RXDB_EXTFXR),
+
+ PINMUX_IPSR_GPSR(IP0SR2_23_20, FXR_TXENB_N_A),
+
+ PINMUX_IPSR_GPSR(IP0SR2_27_24, FXR_TXDB),
+
+ PINMUX_IPSR_GPSR(IP0SR2_31_28, TPU0TO1_A),
+ PINMUX_IPSR_GPSR(IP0SR2_31_28, TCLK2_C),
+
+ /* IP1SR2 */
+ PINMUX_IPSR_GPSR(IP1SR2_3_0, TPU0TO0_A),
+ PINMUX_IPSR_GPSR(IP1SR2_3_0, TCLK1_B),
+
+ PINMUX_IPSR_GPSR(IP1SR2_7_4, CAN_CLK),
+ PINMUX_IPSR_GPSR(IP1SR2_7_4, FXR_TXENA_N_B),
+
+ PINMUX_IPSR_GPSR(IP1SR2_11_8, CANFD0_TX),
+ PINMUX_IPSR_GPSR(IP1SR2_11_8, FXR_TXENB_N_B),
+
+ PINMUX_IPSR_GPSR(IP1SR2_15_12, CANFD0_RX),
+ PINMUX_IPSR_GPSR(IP1SR2_15_12, STPWT_EXTFXR),
+
+ PINMUX_IPSR_GPSR(IP1SR2_19_16, CANFD2_TX),
+ PINMUX_IPSR_GPSR(IP1SR2_19_16, TPU0TO2_A),
+ PINMUX_IPSR_GPSR(IP1SR2_19_16, TCLK3_C),
+
+ PINMUX_IPSR_GPSR(IP1SR2_23_20, CANFD2_RX),
+ PINMUX_IPSR_GPSR(IP1SR2_23_20, TPU0TO3_A),
+ PINMUX_IPSR_GPSR(IP1SR2_23_20, PWM1_B),
+ PINMUX_IPSR_GPSR(IP1SR2_23_20, TCLK4_C),
+
+ PINMUX_IPSR_GPSR(IP1SR2_27_24, CANFD3_TX),
+ PINMUX_IPSR_GPSR(IP1SR2_27_24, PWM2_B),
+
+ PINMUX_IPSR_GPSR(IP1SR2_31_28, CANFD3_RX),
+ PINMUX_IPSR_GPSR(IP1SR2_31_28, PWM3_B),
+
+ /* IP2SR2 */
+ PINMUX_IPSR_GPSR(IP2SR2_7_4, CANFD1_TX),
+ PINMUX_IPSR_GPSR(IP2SR2_7_4, PWM1_C),
+
+ PINMUX_IPSR_GPSR(IP2SR2_15_12, CANFD1_RX),
+ PINMUX_IPSR_GPSR(IP2SR2_15_12, PWM2_C),
+
+ /* IP0SR3 */
+ PINMUX_IPSR_GPSR(IP0SR3_3_0, MMC_SD_D1),
+
+ PINMUX_IPSR_GPSR(IP0SR3_7_4, MMC_SD_D0),
+
+ PINMUX_IPSR_GPSR(IP0SR3_11_8, MMC_SD_D2),
+
+ PINMUX_IPSR_GPSR(IP0SR3_15_12, MMC_SD_CLK),
+
+ PINMUX_IPSR_GPSR(IP0SR3_19_16, MMC_DS),
+
+ PINMUX_IPSR_GPSR(IP0SR3_23_20, MMC_SD_D3),
+
+ PINMUX_IPSR_GPSR(IP0SR3_27_24, MMC_D5),
+
+ PINMUX_IPSR_GPSR(IP0SR3_31_28, MMC_D4),
+
+ /* IP1SR3 */
+ PINMUX_IPSR_GPSR(IP1SR3_3_0, MMC_D7),
+
+ PINMUX_IPSR_GPSR(IP1SR3_7_4, MMC_D6),
+
+ PINMUX_IPSR_GPSR(IP1SR3_11_8, MMC_SD_CMD),
+
+ PINMUX_IPSR_GPSR(IP1SR3_15_12, SD_CD),
+
+ PINMUX_IPSR_GPSR(IP1SR3_19_16, SD_WP),
+
+ PINMUX_IPSR_GPSR(IP1SR3_23_20, PWM1_A),
+
+ PINMUX_IPSR_GPSR(IP1SR3_27_24, PWM2_A),
+
+ PINMUX_IPSR_GPSR(IP1SR3_31_28, QSPI0_SSL),
+
+ /* IP2SR3 */
+ PINMUX_IPSR_GPSR(IP2SR3_3_0, QSPI0_IO3),
+
+ PINMUX_IPSR_GPSR(IP2SR3_7_4, QSPI0_IO2),
+
+ PINMUX_IPSR_GPSR(IP2SR3_11_8, QSPI0_MISO_IO1),
+
+ PINMUX_IPSR_GPSR(IP2SR3_15_12, QSPI0_MOSI_IO0),
+
+ PINMUX_IPSR_GPSR(IP2SR3_19_16, QSPI0_SPCLK),
+
+ PINMUX_IPSR_GPSR(IP2SR3_23_20, QSPI1_MOSI_IO0),
+
+ PINMUX_IPSR_GPSR(IP2SR3_27_24, QSPI1_SPCLK),
+
+ PINMUX_IPSR_GPSR(IP2SR3_31_28, QSPI1_MISO_IO1),
+
+ /* IP3SR3 */
+ PINMUX_IPSR_GPSR(IP3SR3_3_0, QSPI1_IO2),
+
+ PINMUX_IPSR_GPSR(IP3SR3_7_4, QSPI1_SSL),
+
+ PINMUX_IPSR_GPSR(IP3SR3_11_8, QSPI1_IO3),
+
+ PINMUX_IPSR_GPSR(IP3SR3_15_12, RPC_RESET_N),
+
+ PINMUX_IPSR_GPSR(IP3SR3_19_16, RPC_WP_N),
+
+ PINMUX_IPSR_GPSR(IP3SR3_23_20, RPC_INT_N),
+
+ PINMUX_IPSR_GPSR(IP3SR3_27_24, TCLK3_A),
+
+ PINMUX_IPSR_GPSR(IP3SR3_31_28, TCLK4_A),
+
+ /* IP0SR4 */
+ PINMUX_IPSR_MSEL(IP0SR4_3_0, SCL0, SEL_SCL0_0),
+
+ PINMUX_IPSR_MSEL(IP0SR4_7_4, SDA0, SEL_SDA0_0),
+
+ PINMUX_IPSR_MSEL(IP0SR4_11_8, SCL1, SEL_SCL1_0),
+
+ PINMUX_IPSR_MSEL(IP0SR4_15_12, SDA1, SEL_SDA1_0),
+
+ PINMUX_IPSR_MSEL(IP0SR4_19_16, SCL2, SEL_SCL2_0),
+
+ PINMUX_IPSR_MSEL(IP0SR4_23_20, SDA2, SEL_SDA2_0),
+
+ PINMUX_IPSR_MSEL(IP0SR4_27_24, SCL3, SEL_SCL3_0),
+
+ PINMUX_IPSR_MSEL(IP0SR4_31_28, SDA3, SEL_SDA3_0),
+
+ /* IP1SR4 */
+ PINMUX_IPSR_GPSR(IP1SR4_3_0, HRX2),
+ PINMUX_IPSR_GPSR(IP1SR4_3_0, SCK4),
+
+ PINMUX_IPSR_GPSR(IP1SR4_7_4, HTX2),
+ PINMUX_IPSR_GPSR(IP1SR4_7_4, CTS4_N),
+
+ PINMUX_IPSR_GPSR(IP1SR4_11_8, HRTS2_N),
+ PINMUX_IPSR_GPSR(IP1SR4_11_8, RTS4_N),
+
+ PINMUX_IPSR_GPSR(IP1SR4_15_12, SCIF_CLK2),
+
+ PINMUX_IPSR_GPSR(IP1SR4_19_16, HCTS2_N),
+ PINMUX_IPSR_GPSR(IP1SR4_19_16, TX4),
+
+ PINMUX_IPSR_GPSR(IP1SR4_23_20, HSCK2),
+ PINMUX_IPSR_GPSR(IP1SR4_23_20, RX4),
+
+ PINMUX_IPSR_GPSR(IP1SR4_27_24, PWM3_A),
+
+ PINMUX_IPSR_GPSR(IP1SR4_31_28, PWM4),
+
+ /* IP2SR4 */
+ PINMUX_IPSR_GPSR(IP2SR4_23_20, PCIE0_CLKREQ_N),
+
+ PINMUX_IPSR_GPSR(IP2SR4_31_28, AVS0),
+
+ /* IP3SR4 */
+ PINMUX_IPSR_GPSR(IP3SR4_3_0, AVS1),
+
+ /* IP0SR5 */
+ PINMUX_IPSR_GPSR(IP0SR5_3_0, AVB2_AVTP_PPS),
+ PINMUX_IPSR_GPSR(IP0SR5_3_0, Ether_GPTP_PPS0),
+
+ PINMUX_IPSR_GPSR(IP0SR5_7_4, AVB2_AVTP_CAPTURE),
+ PINMUX_IPSR_GPSR(IP0SR5_7_4, Ether_GPTP_CAPTURE),
+
+ PINMUX_IPSR_GPSR(IP0SR5_11_8, AVB2_AVTP_MATCH),
+ PINMUX_IPSR_GPSR(IP0SR5_11_8, Ether_GPTP_MATCH),
+
+ PINMUX_IPSR_GPSR(IP0SR5_15_12, AVB2_LINK),
+
+ PINMUX_IPSR_GPSR(IP0SR5_19_16, AVB2_PHY_INT),
+
+ PINMUX_IPSR_GPSR(IP0SR5_23_20, AVB2_MAGIC),
+ PINMUX_IPSR_GPSR(IP0SR5_23_20, Ether_GPTP_PPS1),
+
+ PINMUX_IPSR_GPSR(IP0SR5_27_24, AVB2_MDC),
+
+ PINMUX_IPSR_GPSR(IP0SR5_31_28, AVB2_TXCREFCLK),
+
+ /* IP1SR5 */
+ PINMUX_IPSR_GPSR(IP1SR5_3_0, AVB2_TD3),
+
+ PINMUX_IPSR_GPSR(IP1SR5_7_4, AVB2_RD3),
+
+ PINMUX_IPSR_GPSR(IP1SR5_11_8, AVB2_MDIO),
+
+ PINMUX_IPSR_GPSR(IP1SR5_15_12, AVB2_TD2),
+
+ PINMUX_IPSR_GPSR(IP1SR5_19_16, AVB2_TD1),
+
+ PINMUX_IPSR_GPSR(IP1SR5_23_20, AVB2_RD2),
+
+ PINMUX_IPSR_GPSR(IP1SR5_27_24, AVB2_RD1),
+
+ PINMUX_IPSR_GPSR(IP1SR5_31_28, AVB2_TD0),
+
+ /* IP2SR5 */
+ PINMUX_IPSR_GPSR(IP2SR5_3_0, AVB2_TXC),
+
+ PINMUX_IPSR_GPSR(IP2SR5_7_4, AVB2_RD0),
+
+ PINMUX_IPSR_GPSR(IP2SR5_11_8, AVB2_RXC),
+
+ PINMUX_IPSR_GPSR(IP2SR5_15_12, AVB2_TX_CTL),
+
+ PINMUX_IPSR_GPSR(IP2SR5_19_16, AVB2_RX_CTL),
+
+ /* IP0SR6 */
+ PINMUX_IPSR_GPSR(IP0SR6_3_0, AVB1_MDIO),
+
+ PINMUX_IPSR_GPSR(IP0SR6_7_4, AVB1_MAGIC),
+
+ PINMUX_IPSR_GPSR(IP0SR6_11_8, AVB1_MDC),
+
+ PINMUX_IPSR_GPSR(IP0SR6_15_12, AVB1_PHY_INT),
+
+ PINMUX_IPSR_GPSR(IP0SR6_19_16, AVB1_LINK),
+ PINMUX_IPSR_GPSR(IP0SR6_19_16, AVB1_MII_TX_ER),
+
+ PINMUX_IPSR_GPSR(IP0SR6_23_20, AVB1_AVTP_MATCH),
+ PINMUX_IPSR_GPSR(IP0SR6_23_20, AVB1_MII_RX_ER),
+
+ PINMUX_IPSR_GPSR(IP0SR6_27_24, AVB1_TXC),
+ PINMUX_IPSR_GPSR(IP0SR6_27_24, AVB1_MII_TXC),
+
+ PINMUX_IPSR_GPSR(IP0SR6_31_28, AVB1_TX_CTL),
+ PINMUX_IPSR_GPSR(IP0SR6_31_28, AVB1_MII_TX_EN),
+
+ /* IP1SR6 */
+ PINMUX_IPSR_GPSR(IP1SR6_3_0, AVB1_RXC),
+ PINMUX_IPSR_GPSR(IP1SR6_3_0, AVB1_MII_RXC),
+
+ PINMUX_IPSR_GPSR(IP1SR6_7_4, AVB1_RX_CTL),
+ PINMUX_IPSR_GPSR(IP1SR6_7_4, AVB1_MII_RX_DV),
+
+ PINMUX_IPSR_GPSR(IP1SR6_11_8, AVB1_AVTP_PPS),
+ PINMUX_IPSR_GPSR(IP1SR6_11_8, AVB1_MII_COL),
+
+ PINMUX_IPSR_GPSR(IP1SR6_15_12, AVB1_AVTP_CAPTURE),
+ PINMUX_IPSR_GPSR(IP1SR6_15_12, AVB1_MII_CRS),
+
+ PINMUX_IPSR_GPSR(IP1SR6_19_16, AVB1_TD1),
+ PINMUX_IPSR_GPSR(IP1SR6_19_16, AVB1_MII_TD1),
+
+ PINMUX_IPSR_GPSR(IP1SR6_23_20, AVB1_TD0),
+ PINMUX_IPSR_GPSR(IP1SR6_23_20, AVB1_MII_TD0),
+
+ PINMUX_IPSR_GPSR(IP1SR6_27_24, AVB1_RD1),
+ PINMUX_IPSR_GPSR(IP1SR6_27_24, AVB1_MII_RD1),
+
+ PINMUX_IPSR_GPSR(IP1SR6_31_28, AVB1_RD0),
+ PINMUX_IPSR_GPSR(IP1SR6_31_28, AVB1_MII_RD0),
+
+ /* IP2SR6 */
+ PINMUX_IPSR_GPSR(IP2SR6_3_0, AVB1_TD2),
+ PINMUX_IPSR_GPSR(IP2SR6_3_0, AVB1_MII_TD2),
+
+ PINMUX_IPSR_GPSR(IP2SR6_7_4, AVB1_RD2),
+ PINMUX_IPSR_GPSR(IP2SR6_7_4, AVB1_MII_RD2),
+
+ PINMUX_IPSR_GPSR(IP2SR6_11_8, AVB1_TD3),
+ PINMUX_IPSR_GPSR(IP2SR6_11_8, AVB1_MII_TD3),
+
+ PINMUX_IPSR_GPSR(IP2SR6_15_12, AVB1_RD3),
+ PINMUX_IPSR_GPSR(IP2SR6_15_12, AVB1_MII_RD3),
+
+ PINMUX_IPSR_GPSR(IP2SR6_19_16, AVB1_TXCREFCLK),
+
+ /* IP0SR7 */
+ PINMUX_IPSR_GPSR(IP0SR7_3_0, AVB0_AVTP_PPS),
+ PINMUX_IPSR_GPSR(IP0SR7_3_0, AVB0_MII_COL),
+
+ PINMUX_IPSR_GPSR(IP0SR7_7_4, AVB0_AVTP_CAPTURE),
+ PINMUX_IPSR_GPSR(IP0SR7_7_4, AVB0_MII_CRS),
+
+ PINMUX_IPSR_GPSR(IP0SR7_11_8, AVB0_AVTP_MATCH),
+ PINMUX_IPSR_GPSR(IP0SR7_11_8, AVB0_MII_RX_ER),
+ PINMUX_IPSR_GPSR(IP0SR7_11_8, CC5_OSCOUT),
+
+ PINMUX_IPSR_GPSR(IP0SR7_15_12, AVB0_TD3),
+ PINMUX_IPSR_GPSR(IP0SR7_15_12, AVB0_MII_TD3),
+
+ PINMUX_IPSR_GPSR(IP0SR7_19_16, AVB0_LINK),
+ PINMUX_IPSR_GPSR(IP0SR7_19_16, AVB0_MII_TX_ER),
+
+ PINMUX_IPSR_GPSR(IP0SR7_23_20, AVB0_PHY_INT),
+
+ PINMUX_IPSR_GPSR(IP0SR7_27_24, AVB0_TD2),
+ PINMUX_IPSR_GPSR(IP0SR7_27_24, AVB0_MII_TD2),
+
+ PINMUX_IPSR_GPSR(IP0SR7_31_28, AVB0_TD1),
+ PINMUX_IPSR_GPSR(IP0SR7_31_28, AVB0_MII_TD1),
+
+ /* IP1SR7 */
+ PINMUX_IPSR_GPSR(IP1SR7_3_0, AVB0_RD3),
+ PINMUX_IPSR_GPSR(IP1SR7_3_0, AVB0_MII_RD3),
+
+ PINMUX_IPSR_GPSR(IP1SR7_7_4, AVB0_TXCREFCLK),
+
+ PINMUX_IPSR_GPSR(IP1SR7_11_8, AVB0_MAGIC),
+
+ PINMUX_IPSR_GPSR(IP1SR7_15_12, AVB0_TD0),
+ PINMUX_IPSR_GPSR(IP1SR7_15_12, AVB0_MII_TD0),
+
+ PINMUX_IPSR_GPSR(IP1SR7_19_16, AVB0_RD2),
+ PINMUX_IPSR_GPSR(IP1SR7_19_16, AVB0_MII_RD2),
+
+ PINMUX_IPSR_GPSR(IP1SR7_23_20, AVB0_MDC),
+
+ PINMUX_IPSR_GPSR(IP1SR7_27_24, AVB0_MDIO),
+
+ PINMUX_IPSR_GPSR(IP1SR7_31_28, AVB0_TXC),
+ PINMUX_IPSR_GPSR(IP1SR7_31_28, AVB0_MII_TXC),
+
+ /* IP2SR7 */
+ PINMUX_IPSR_GPSR(IP2SR7_3_0, AVB0_TX_CTL),
+ PINMUX_IPSR_GPSR(IP2SR7_3_0, AVB0_MII_TX_EN),
+
+ PINMUX_IPSR_GPSR(IP2SR7_7_4, AVB0_RD1),
+ PINMUX_IPSR_GPSR(IP2SR7_7_4, AVB0_MII_RD1),
+
+ PINMUX_IPSR_GPSR(IP2SR7_11_8, AVB0_RD0),
+ PINMUX_IPSR_GPSR(IP2SR7_11_8, AVB0_MII_RD0),
+
+ PINMUX_IPSR_GPSR(IP2SR7_15_12, AVB0_RXC),
+ PINMUX_IPSR_GPSR(IP2SR7_15_12, AVB0_MII_RXC),
+
+ PINMUX_IPSR_GPSR(IP2SR7_19_16, AVB0_RX_CTL),
+ PINMUX_IPSR_GPSR(IP2SR7_19_16, AVB0_MII_RX_DV),
+};
+
+/*
+ * Pins not associated with a GPIO port.
+ */
+enum {
+ GP_ASSIGN_LAST(),
+ NOGP_ALL(),
+};
+
+static const struct sh_pfc_pin pinmux_pins[] = {
+ PINMUX_GPIO_GP_ALL(),
+ PINMUX_NOGP_ALL(),
+};
+
+/* - AUDIO CLOCK ----------------------------------------- */
+static const unsigned int audio_clkin_pins[] = {
+ /* CLK IN */
+ RCAR_GP_PIN(1, 22),
+};
+static const unsigned int audio_clkin_mux[] = {
+ AUDIO_CLKIN_MARK,
+};
+static const unsigned int audio_clkout_pins[] = {
+ /* CLK OUT */
+ RCAR_GP_PIN(1, 21),
+};
+static const unsigned int audio_clkout_mux[] = {
+ AUDIO_CLKOUT_MARK,
+};
+
+/* - AVB0 ------------------------------------------------ */
+static const unsigned int avb0_link_pins[] = {
+ /* AVB0_LINK */
+ RCAR_GP_PIN(7, 4),
+};
+static const unsigned int avb0_link_mux[] = {
+ AVB0_LINK_MARK,
+};
+static const unsigned int avb0_magic_pins[] = {
+ /* AVB0_MAGIC */
+ RCAR_GP_PIN(7, 10),
+};
+static const unsigned int avb0_magic_mux[] = {
+ AVB0_MAGIC_MARK,
+};
+static const unsigned int avb0_phy_int_pins[] = {
+ /* AVB0_PHY_INT */
+ RCAR_GP_PIN(7, 5),
+};
+static const unsigned int avb0_phy_int_mux[] = {
+ AVB0_PHY_INT_MARK,
+};
+static const unsigned int avb0_mdio_pins[] = {
+ /* AVB0_MDC, AVB0_MDIO */
+ RCAR_GP_PIN(7, 13), RCAR_GP_PIN(7, 14),
+};
+static const unsigned int avb0_mdio_mux[] = {
+ AVB0_MDC_MARK, AVB0_MDIO_MARK,
+};
+static const unsigned int avb0_rgmii_pins[] = {
+ /*
+ * AVB0_TX_CTL, AVB0_TXC, AVB0_TD0, AVB0_TD1, AVB0_TD2, AVB0_TD3,
+ * AVB0_RX_CTL, AVB0_RXC, AVB0_RD0, AVB0_RD1, AVB0_RD2, AVB0_RD3,
+ */
+ RCAR_GP_PIN(7, 16), RCAR_GP_PIN(7, 15),
+ RCAR_GP_PIN(7, 11), RCAR_GP_PIN(7, 7),
+ RCAR_GP_PIN(7, 6), RCAR_GP_PIN(7, 3),
+ RCAR_GP_PIN(7, 20), RCAR_GP_PIN(7, 19),
+ RCAR_GP_PIN(7, 18), RCAR_GP_PIN(7, 17),
+ RCAR_GP_PIN(7, 12), RCAR_GP_PIN(7, 8),
+};
+static const unsigned int avb0_rgmii_mux[] = {
+ AVB0_TX_CTL_MARK, AVB0_TXC_MARK,
+ AVB0_TD0_MARK, AVB0_TD1_MARK,
+ AVB0_TD2_MARK, AVB0_TD3_MARK,
+ AVB0_RX_CTL_MARK, AVB0_RXC_MARK,
+ AVB0_RD0_MARK, AVB0_RD1_MARK,
+ AVB0_RD2_MARK, AVB0_RD3_MARK,
+};
+static const unsigned int avb0_txcrefclk_pins[] = {
+ /* AVB0_TXCREFCLK */
+ RCAR_GP_PIN(7, 9),
+};
+static const unsigned int avb0_txcrefclk_mux[] = {
+ AVB0_TXCREFCLK_MARK,
+};
+static const unsigned int avb0_avtp_pps_pins[] = {
+ /* AVB0_AVTP_PPS */
+ RCAR_GP_PIN(7, 0),
+};
+static const unsigned int avb0_avtp_pps_mux[] = {
+ AVB0_AVTP_PPS_MARK,
+};
+static const unsigned int avb0_avtp_capture_pins[] = {
+ /* AVB0_AVTP_CAPTURE */
+ RCAR_GP_PIN(7, 1),
+};
+static const unsigned int avb0_avtp_capture_mux[] = {
+ AVB0_AVTP_CAPTURE_MARK,
+};
+static const unsigned int avb0_avtp_match_pins[] = {
+ /* AVB0_AVTP_MATCH */
+ RCAR_GP_PIN(7, 2),
+};
+static const unsigned int avb0_avtp_match_mux[] = {
+ AVB0_AVTP_MATCH_MARK,
+};
+
+/* - AVB1 ------------------------------------------------ */
+static const unsigned int avb1_link_pins[] = {
+ /* AVB1_LINK */
+ RCAR_GP_PIN(6, 4),
+};
+static const unsigned int avb1_link_mux[] = {
+ AVB1_LINK_MARK,
+};
+static const unsigned int avb1_magic_pins[] = {
+ /* AVB1_MAGIC */
+ RCAR_GP_PIN(6, 1),
+};
+static const unsigned int avb1_magic_mux[] = {
+ AVB1_MAGIC_MARK,
+};
+static const unsigned int avb1_phy_int_pins[] = {
+ /* AVB1_PHY_INT */
+ RCAR_GP_PIN(6, 3),
+};
+static const unsigned int avb1_phy_int_mux[] = {
+ AVB1_PHY_INT_MARK,
+};
+static const unsigned int avb1_mdio_pins[] = {
+ /* AVB1_MDC, AVB1_MDIO */
+ RCAR_GP_PIN(6, 2), RCAR_GP_PIN(6, 0),
+};
+static const unsigned int avb1_mdio_mux[] = {
+ AVB1_MDC_MARK, AVB1_MDIO_MARK,
+};
+static const unsigned int avb1_rgmii_pins[] = {
+ /*
+ * AVB1_TX_CTL, AVB1_TXC, AVB1_TD0, AVB1_TD1, AVB1_TD2, AVB1_TD3,
+ * AVB1_RX_CTL, AVB1_RXC, AVB1_RD0, AVB1_RD1, AVB1_RD2, AVB1_RD3,
+ */
+ RCAR_GP_PIN(6, 7), RCAR_GP_PIN(6, 6),
+ RCAR_GP_PIN(6, 13), RCAR_GP_PIN(6, 12),
+ RCAR_GP_PIN(6, 16), RCAR_GP_PIN(6, 18),
+ RCAR_GP_PIN(6, 9), RCAR_GP_PIN(6, 8),
+ RCAR_GP_PIN(6, 15), RCAR_GP_PIN(6, 14),
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 19),
+};
+static const unsigned int avb1_rgmii_mux[] = {
+ AVB1_TX_CTL_MARK, AVB1_TXC_MARK,
+ AVB1_TD0_MARK, AVB1_TD1_MARK,
+ AVB1_TD2_MARK, AVB1_TD3_MARK,
+ AVB1_RX_CTL_MARK, AVB1_RXC_MARK,
+ AVB1_RD0_MARK, AVB1_RD1_MARK,
+ AVB1_RD2_MARK, AVB1_RD3_MARK,
+};
+static const unsigned int avb1_txcrefclk_pins[] = {
+ /* AVB1_TXCREFCLK */
+ RCAR_GP_PIN(6, 20),
+};
+static const unsigned int avb1_txcrefclk_mux[] = {
+ AVB1_TXCREFCLK_MARK,
+};
+static const unsigned int avb1_avtp_pps_pins[] = {
+ /* AVB1_AVTP_PPS */
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int avb1_avtp_pps_mux[] = {
+ AVB1_AVTP_PPS_MARK,
+};
+static const unsigned int avb1_avtp_capture_pins[] = {
+ /* AVB1_AVTP_CAPTURE */
+ RCAR_GP_PIN(6, 11),
+};
+static const unsigned int avb1_avtp_capture_mux[] = {
+ AVB1_AVTP_CAPTURE_MARK,
+};
+static const unsigned int avb1_avtp_match_pins[] = {
+ /* AVB1_AVTP_MATCH */
+ RCAR_GP_PIN(6, 5),
+};
+static const unsigned int avb1_avtp_match_mux[] = {
+ AVB1_AVTP_MATCH_MARK,
+};
+
+/* - AVB2 ------------------------------------------------ */
+static const unsigned int avb2_link_pins[] = {
+ /* AVB2_LINK */
+ RCAR_GP_PIN(5, 3),
+};
+static const unsigned int avb2_link_mux[] = {
+ AVB2_LINK_MARK,
+};
+static const unsigned int avb2_magic_pins[] = {
+ /* AVB2_MAGIC */
+ RCAR_GP_PIN(5, 5),
+};
+static const unsigned int avb2_magic_mux[] = {
+ AVB2_MAGIC_MARK,
+};
+static const unsigned int avb2_phy_int_pins[] = {
+ /* AVB2_PHY_INT */
+ RCAR_GP_PIN(5, 4),
+};
+static const unsigned int avb2_phy_int_mux[] = {
+ AVB2_PHY_INT_MARK,
+};
+static const unsigned int avb2_mdio_pins[] = {
+ /* AVB2_MDC, AVB2_MDIO */
+ RCAR_GP_PIN(5, 6), RCAR_GP_PIN(5, 10),
+};
+static const unsigned int avb2_mdio_mux[] = {
+ AVB2_MDC_MARK, AVB2_MDIO_MARK,
+};
+static const unsigned int avb2_rgmii_pins[] = {
+ /*
+ * AVB2_TX_CTL, AVB2_TXC, AVB2_TD0, AVB2_TD1, AVB2_TD2, AVB2_TD3,
+ * AVB2_RX_CTL, AVB2_RXC, AVB2_RD0, AVB2_RD1, AVB2_RD2, AVB2_RD3,
+ */
+ RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 16),
+ RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 12),
+ RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 8),
+ RCAR_GP_PIN(5, 20), RCAR_GP_PIN(5, 18),
+ RCAR_GP_PIN(5, 17), RCAR_GP_PIN(5, 14),
+ RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 9),
+};
+static const unsigned int avb2_rgmii_mux[] = {
+ AVB2_TX_CTL_MARK, AVB2_TXC_MARK,
+ AVB2_TD0_MARK, AVB2_TD1_MARK,
+ AVB2_TD2_MARK, AVB2_TD3_MARK,
+ AVB2_RX_CTL_MARK, AVB2_RXC_MARK,
+ AVB2_RD0_MARK, AVB2_RD1_MARK,
+ AVB2_RD2_MARK, AVB2_RD3_MARK,
+};
+static const unsigned int avb2_txcrefclk_pins[] = {
+ /* AVB2_TXCREFCLK */
+ RCAR_GP_PIN(5, 7),
+};
+static const unsigned int avb2_txcrefclk_mux[] = {
+ AVB2_TXCREFCLK_MARK,
+};
+static const unsigned int avb2_avtp_pps_pins[] = {
+ /* AVB2_AVTP_PPS */
+ RCAR_GP_PIN(5, 0),
+};
+static const unsigned int avb2_avtp_pps_mux[] = {
+ AVB2_AVTP_PPS_MARK,
+};
+static const unsigned int avb2_avtp_capture_pins[] = {
+ /* AVB2_AVTP_CAPTURE */
+ RCAR_GP_PIN(5, 1),
+};
+static const unsigned int avb2_avtp_capture_mux[] = {
+ AVB2_AVTP_CAPTURE_MARK,
+};
+static const unsigned int avb2_avtp_match_pins[] = {
+ /* AVB2_AVTP_MATCH */
+ RCAR_GP_PIN(5, 2),
+};
+static const unsigned int avb2_avtp_match_mux[] = {
+ AVB2_AVTP_MATCH_MARK,
+};
+
+/* - CANFD0 ----------------------------------------------------------------- */
+static const unsigned int canfd0_data_pins[] = {
+ /* CANFD0_TX, CANFD0_RX */
+ RCAR_GP_PIN(2, 10), RCAR_GP_PIN(2, 11),
+};
+static const unsigned int canfd0_data_mux[] = {
+ CANFD0_TX_MARK, CANFD0_RX_MARK,
+};
+
+/* - CANFD1 ----------------------------------------------------------------- */
+static const unsigned int canfd1_data_pins[] = {
+ /* CANFD1_TX, CANFD1_RX */
+ RCAR_GP_PIN(2, 17), RCAR_GP_PIN(2, 19),
+};
+static const unsigned int canfd1_data_mux[] = {
+ CANFD1_TX_MARK, CANFD1_RX_MARK,
+};
+
+/* - CANFD2 ----------------------------------------------------------------- */
+static const unsigned int canfd2_data_pins[] = {
+ /* CANFD2_TX, CANFD2_RX */
+ RCAR_GP_PIN(2, 12), RCAR_GP_PIN(2, 13),
+};
+static const unsigned int canfd2_data_mux[] = {
+ CANFD2_TX_MARK, CANFD2_RX_MARK,
+};
+
+/* - CANFD3 ----------------------------------------------------------------- */
+static const unsigned int canfd3_data_pins[] = {
+ /* CANFD3_TX, CANFD3_RX */
+ RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 15),
+};
+static const unsigned int canfd3_data_mux[] = {
+ CANFD3_TX_MARK, CANFD3_RX_MARK,
+};
+
+/* - CANFD Clock ------------------------------------------------------------ */
+static const unsigned int can_clk_pins[] = {
+ /* CAN_CLK */
+ RCAR_GP_PIN(2, 9),
+};
+static const unsigned int can_clk_mux[] = {
+ CAN_CLK_MARK,
+};
+
+/* - HSCIF0 ----------------------------------------------------------------- */
+static const unsigned int hscif0_data_pins[] = {
+ /* HRX0, HTX0 */
+ RCAR_GP_PIN(1, 16), RCAR_GP_PIN(1, 12),
+};
+static const unsigned int hscif0_data_mux[] = {
+ HRX0_MARK, HTX0_MARK,
+};
+static const unsigned int hscif0_clk_pins[] = {
+ /* HSCK0 */
+ RCAR_GP_PIN(1, 15),
+};
+static const unsigned int hscif0_clk_mux[] = {
+ HSCK0_MARK,
+};
+static const unsigned int hscif0_ctrl_pins[] = {
+ /* HRTS0_N, HCTS0_N */
+ RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
+};
+static const unsigned int hscif0_ctrl_mux[] = {
+ HRTS0_N_MARK, HCTS0_N_MARK,
+};
+
+/* - HSCIF1_A ----------------------------------------------------------------- */
+static const unsigned int hscif1_data_a_pins[] = {
+ /* HRX1_A, HTX1_A */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
+};
+static const unsigned int hscif1_data_a_mux[] = {
+ HRX1_A_MARK, HTX1_A_MARK,
+};
+static const unsigned int hscif1_clk_a_pins[] = {
+ /* HSCK1_A */
+ RCAR_GP_PIN(0, 18),
+};
+static const unsigned int hscif1_clk_a_mux[] = {
+ HSCK1_A_MARK,
+};
+static const unsigned int hscif1_ctrl_a_pins[] = {
+ /* HRTS1_N_A, HCTS1_N_A */
+ RCAR_GP_PIN(0, 17), RCAR_GP_PIN(0, 16),
+};
+static const unsigned int hscif1_ctrl_a_mux[] = {
+ HRTS1_N_A_MARK, HCTS1_N_A_MARK,
+};
+
+/* - HSCIF1_B ---------------------------------------------------------------- */
+static const unsigned int hscif1_data_b_pins[] = {
+ /* HRX1_B, HTX1_B */
+ RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6),
+};
+static const unsigned int hscif1_data_b_mux[] = {
+ HRX1_B_MARK, HTX1_B_MARK,
+};
+static const unsigned int hscif1_clk_b_pins[] = {
+ /* HSCK1_B */
+ RCAR_GP_PIN(1, 10),
+};
+static const unsigned int hscif1_clk_b_mux[] = {
+ HSCK1_B_MARK,
+};
+static const unsigned int hscif1_ctrl_b_pins[] = {
+ /* HRTS1_N_B, HCTS1_N_B */
+ RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 8),
+};
+static const unsigned int hscif1_ctrl_b_mux[] = {
+ HRTS1_N_B_MARK, HCTS1_N_B_MARK,
+};
+
+/* - HSCIF2 ----------------------------------------------------------------- */
+static const unsigned int hscif2_data_pins[] = {
+ /* HRX2, HTX2 */
+ RCAR_GP_PIN(4, 8), RCAR_GP_PIN(4, 9),
+};
+static const unsigned int hscif2_data_mux[] = {
+ HRX2_MARK, HTX2_MARK,
+};
+static const unsigned int hscif2_clk_pins[] = {
+ /* HSCK2 */
+ RCAR_GP_PIN(4, 13),
+};
+static const unsigned int hscif2_clk_mux[] = {
+ HSCK2_MARK,
+};
+static const unsigned int hscif2_ctrl_pins[] = {
+ /* HRTS2_N, HCTS2_N */
+ RCAR_GP_PIN(4, 10), RCAR_GP_PIN(4, 12),
+};
+static const unsigned int hscif2_ctrl_mux[] = {
+ HRTS2_N_MARK, HCTS2_N_MARK,
+};
+
+/* - HSCIF3_A ----------------------------------------------------------------- */
+static const unsigned int hscif3_data_a_pins[] = {
+ /* HRX3_A, HTX3_A */
+ RCAR_GP_PIN(1, 24), RCAR_GP_PIN(1, 28),
+};
+static const unsigned int hscif3_data_a_mux[] = {
+ HRX3_A_MARK, HTX3_A_MARK,
+};
+static const unsigned int hscif3_clk_a_pins[] = {
+ /* HSCK3_A */
+ RCAR_GP_PIN(1, 25),
+};
+static const unsigned int hscif3_clk_a_mux[] = {
+ HSCK3_A_MARK,
+};
+static const unsigned int hscif3_ctrl_a_pins[] = {
+ /* HRTS3_N_A, HCTS3_N_A */
+ RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 27),
+};
+static const unsigned int hscif3_ctrl_a_mux[] = {
+ HRTS3_N_A_MARK, HCTS3_N_A_MARK,
+};
+
+/* - HSCIF3_B ----------------------------------------------------------------- */
+static const unsigned int hscif3_data_b_pins[] = {
+ /* HRX3_B, HTX3_B */
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 0),
+};
+static const unsigned int hscif3_data_b_mux[] = {
+ HRX3_B_MARK, HTX3_B_MARK,
+};
+static const unsigned int hscif3_clk_b_pins[] = {
+ /* HSCK3_B */
+ RCAR_GP_PIN(1, 3),
+};
+static const unsigned int hscif3_clk_b_mux[] = {
+ HSCK3_B_MARK,
+};
+static const unsigned int hscif3_ctrl_b_pins[] = {
+ /* HRTS3_N_B, HCTS3_N_B */
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 1),
+};
+static const unsigned int hscif3_ctrl_b_mux[] = {
+ HRTS3_N_B_MARK, HCTS3_N_B_MARK,
+};
+
+/* - I2C0 ------------------------------------------------------------------- */
+static const unsigned int i2c0_pins[] = {
+ /* SDA0, SCL0 */
+ RCAR_GP_PIN(4, 1), RCAR_GP_PIN(4, 0),
+};
+static const unsigned int i2c0_mux[] = {
+ SDA0_MARK, SCL0_MARK,
+};
+
+/* - I2C1 ------------------------------------------------------------------- */
+static const unsigned int i2c1_pins[] = {
+ /* SDA1, SCL1 */
+ RCAR_GP_PIN(4, 3), RCAR_GP_PIN(4, 2),
+};
+static const unsigned int i2c1_mux[] = {
+ SDA1_MARK, SCL1_MARK,
+};
+
+/* - I2C2 ------------------------------------------------------------------- */
+static const unsigned int i2c2_pins[] = {
+ /* SDA2, SCL2 */
+ RCAR_GP_PIN(4, 5), RCAR_GP_PIN(4, 4),
+};
+static const unsigned int i2c2_mux[] = {
+ SDA2_MARK, SCL2_MARK,
+};
+
+/* - I2C3 ------------------------------------------------------------------- */
+static const unsigned int i2c3_pins[] = {
+ /* SDA3, SCL3 */
+ RCAR_GP_PIN(4, 7), RCAR_GP_PIN(4, 6),
+};
+static const unsigned int i2c3_mux[] = {
+ SDA3_MARK, SCL3_MARK,
+};
+
+/* - MMC -------------------------------------------------------------------- */
+static const unsigned int mmc_data_pins[] = {
+ /* MMC_SD_D[0:3], MMC_D[4:7] */
+ RCAR_GP_PIN(3, 1), RCAR_GP_PIN(3, 0),
+ RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 5),
+ RCAR_GP_PIN(3, 7), RCAR_GP_PIN(3, 6),
+ RCAR_GP_PIN(3, 9), RCAR_GP_PIN(3, 8),
+};
+static const unsigned int mmc_data_mux[] = {
+ MMC_SD_D0_MARK, MMC_SD_D1_MARK,
+ MMC_SD_D2_MARK, MMC_SD_D3_MARK,
+ MMC_D4_MARK, MMC_D5_MARK,
+ MMC_D6_MARK, MMC_D7_MARK,
+};
+static const unsigned int mmc_ctrl_pins[] = {
+ /* MMC_SD_CLK, MMC_SD_CMD */
+ RCAR_GP_PIN(3, 3), RCAR_GP_PIN(3, 10),
+};
+static const unsigned int mmc_ctrl_mux[] = {
+ MMC_SD_CLK_MARK, MMC_SD_CMD_MARK,
+};
+static const unsigned int mmc_cd_pins[] = {
+ /* SD_CD */
+ RCAR_GP_PIN(3, 11),
+};
+static const unsigned int mmc_cd_mux[] = {
+ SD_CD_MARK,
+};
+static const unsigned int mmc_wp_pins[] = {
+ /* SD_WP */
+ RCAR_GP_PIN(3, 12),
+};
+static const unsigned int mmc_wp_mux[] = {
+ SD_WP_MARK,
+};
+static const unsigned int mmc_ds_pins[] = {
+ /* MMC_DS */
+ RCAR_GP_PIN(3, 4),
+};
+static const unsigned int mmc_ds_mux[] = {
+ MMC_DS_MARK,
+};
+
+/* - MSIOF0 ----------------------------------------------------------------- */
+static const unsigned int msiof0_clk_pins[] = {
+ /* MSIOF0_SCK */
+ RCAR_GP_PIN(1, 10),
+};
+static const unsigned int msiof0_clk_mux[] = {
+ MSIOF0_SCK_MARK,
+};
+static const unsigned int msiof0_sync_pins[] = {
+ /* MSIOF0_SYNC */
+ RCAR_GP_PIN(1, 8),
+};
+static const unsigned int msiof0_sync_mux[] = {
+ MSIOF0_SYNC_MARK,
+};
+static const unsigned int msiof0_ss1_pins[] = {
+ /* MSIOF0_SS1 */
+ RCAR_GP_PIN(1, 7),
+};
+static const unsigned int msiof0_ss1_mux[] = {
+ MSIOF0_SS1_MARK,
+};
+static const unsigned int msiof0_ss2_pins[] = {
+ /* MSIOF0_SS2 */
+ RCAR_GP_PIN(1, 6),
+};
+static const unsigned int msiof0_ss2_mux[] = {
+ MSIOF0_SS2_MARK,
+};
+static const unsigned int msiof0_txd_pins[] = {
+ /* MSIOF0_TXD */
+ RCAR_GP_PIN(1, 9),
+};
+static const unsigned int msiof0_txd_mux[] = {
+ MSIOF0_TXD_MARK,
+};
+static const unsigned int msiof0_rxd_pins[] = {
+ /* MSIOF0_RXD */
+ RCAR_GP_PIN(1, 11),
+};
+static const unsigned int msiof0_rxd_mux[] = {
+ MSIOF0_RXD_MARK,
+};
+
+/* - MSIOF1 ----------------------------------------------------------------- */
+static const unsigned int msiof1_clk_pins[] = {
+ /* MSIOF1_SCK */
+ RCAR_GP_PIN(1, 3),
+};
+static const unsigned int msiof1_clk_mux[] = {
+ MSIOF1_SCK_MARK,
+};
+static const unsigned int msiof1_sync_pins[] = {
+ /* MSIOF1_SYNC */
+ RCAR_GP_PIN(1, 2),
+};
+static const unsigned int msiof1_sync_mux[] = {
+ MSIOF1_SYNC_MARK,
+};
+static const unsigned int msiof1_ss1_pins[] = {
+ /* MSIOF1_SS1 */
+ RCAR_GP_PIN(1, 1),
+};
+static const unsigned int msiof1_ss1_mux[] = {
+ MSIOF1_SS1_MARK,
+};
+static const unsigned int msiof1_ss2_pins[] = {
+ /* MSIOF1_SS2 */
+ RCAR_GP_PIN(1, 0),
+};
+static const unsigned int msiof1_ss2_mux[] = {
+ MSIOF1_SS2_MARK,
+};
+static const unsigned int msiof1_txd_pins[] = {
+ /* MSIOF1_TXD */
+ RCAR_GP_PIN(1, 4),
+};
+static const unsigned int msiof1_txd_mux[] = {
+ MSIOF1_TXD_MARK,
+};
+static const unsigned int msiof1_rxd_pins[] = {
+ /* MSIOF1_RXD */
+ RCAR_GP_PIN(1, 5),
+};
+static const unsigned int msiof1_rxd_mux[] = {
+ MSIOF1_RXD_MARK,
+};
+
+/* - MSIOF2 ----------------------------------------------------------------- */
+static const unsigned int msiof2_clk_pins[] = {
+ /* MSIOF2_SCK */
+ RCAR_GP_PIN(0, 17),
+};
+static const unsigned int msiof2_clk_mux[] = {
+ MSIOF2_SCK_MARK,
+};
+static const unsigned int msiof2_sync_pins[] = {
+ /* MSIOF2_SYNC */
+ RCAR_GP_PIN(0, 15),
+};
+static const unsigned int msiof2_sync_mux[] = {
+ MSIOF2_SYNC_MARK,
+};
+static const unsigned int msiof2_ss1_pins[] = {
+ /* MSIOF2_SS1 */
+ RCAR_GP_PIN(0, 14),
+};
+static const unsigned int msiof2_ss1_mux[] = {
+ MSIOF2_SS1_MARK,
+};
+static const unsigned int msiof2_ss2_pins[] = {
+ /* MSIOF2_SS2 */
+ RCAR_GP_PIN(0, 13),
+};
+static const unsigned int msiof2_ss2_mux[] = {
+ MSIOF2_SS2_MARK,
+};
+static const unsigned int msiof2_txd_pins[] = {
+ /* MSIOF2_TXD */
+ RCAR_GP_PIN(0, 16),
+};
+static const unsigned int msiof2_txd_mux[] = {
+ MSIOF2_TXD_MARK,
+};
+static const unsigned int msiof2_rxd_pins[] = {
+ /* MSIOF2_RXD */
+ RCAR_GP_PIN(0, 18),
+};
+static const unsigned int msiof2_rxd_mux[] = {
+ MSIOF2_RXD_MARK,
+};
+
+/* - MSIOF3 ----------------------------------------------------------------- */
+static const unsigned int msiof3_clk_pins[] = {
+ /* MSIOF3_SCK */
+ RCAR_GP_PIN(0, 3),
+};
+static const unsigned int msiof3_clk_mux[] = {
+ MSIOF3_SCK_MARK,
+};
+static const unsigned int msiof3_sync_pins[] = {
+ /* MSIOF3_SYNC */
+ RCAR_GP_PIN(0, 6),
+};
+static const unsigned int msiof3_sync_mux[] = {
+ MSIOF3_SYNC_MARK,
+};
+static const unsigned int msiof3_ss1_pins[] = {
+ /* MSIOF3_SS1 */
+ RCAR_GP_PIN(0, 1),
+};
+static const unsigned int msiof3_ss1_mux[] = {
+ MSIOF3_SS1_MARK,
+};
+static const unsigned int msiof3_ss2_pins[] = {
+ /* MSIOF3_SS2 */
+ RCAR_GP_PIN(0, 2),
+};
+static const unsigned int msiof3_ss2_mux[] = {
+ MSIOF3_SS2_MARK,
+};
+static const unsigned int msiof3_txd_pins[] = {
+ /* MSIOF3_TXD */
+ RCAR_GP_PIN(0, 4),
+};
+static const unsigned int msiof3_txd_mux[] = {
+ MSIOF3_TXD_MARK,
+};
+static const unsigned int msiof3_rxd_pins[] = {
+ /* MSIOF3_RXD */
+ RCAR_GP_PIN(0, 5),
+};
+static const unsigned int msiof3_rxd_mux[] = {
+ MSIOF3_RXD_MARK,
+};
+
+/* - MSIOF4 ----------------------------------------------------------------- */
+static const unsigned int msiof4_clk_pins[] = {
+ /* MSIOF4_SCK */
+ RCAR_GP_PIN(1, 25),
+};
+static const unsigned int msiof4_clk_mux[] = {
+ MSIOF4_SCK_MARK,
+};
+static const unsigned int msiof4_sync_pins[] = {
+ /* MSIOF4_SYNC */
+ RCAR_GP_PIN(1, 28),
+};
+static const unsigned int msiof4_sync_mux[] = {
+ MSIOF4_SYNC_MARK,
+};
+static const unsigned int msiof4_ss1_pins[] = {
+ /* MSIOF4_SS1 */
+ RCAR_GP_PIN(1, 23),
+};
+static const unsigned int msiof4_ss1_mux[] = {
+ MSIOF4_SS1_MARK,
+};
+static const unsigned int msiof4_ss2_pins[] = {
+ /* MSIOF4_SS2 */
+ RCAR_GP_PIN(1, 24),
+};
+static const unsigned int msiof4_ss2_mux[] = {
+ MSIOF4_SS2_MARK,
+};
+static const unsigned int msiof4_txd_pins[] = {
+ /* MSIOF4_TXD */
+ RCAR_GP_PIN(1, 26),
+};
+static const unsigned int msiof4_txd_mux[] = {
+ MSIOF4_TXD_MARK,
+};
+static const unsigned int msiof4_rxd_pins[] = {
+ /* MSIOF4_RXD */
+ RCAR_GP_PIN(1, 27),
+};
+static const unsigned int msiof4_rxd_mux[] = {
+ MSIOF4_RXD_MARK,
+};
+
+/* - MSIOF5 ----------------------------------------------------------------- */
+static const unsigned int msiof5_clk_pins[] = {
+ /* MSIOF5_SCK */
+ RCAR_GP_PIN(0, 11),
+};
+static const unsigned int msiof5_clk_mux[] = {
+ MSIOF5_SCK_MARK,
+};
+static const unsigned int msiof5_sync_pins[] = {
+ /* MSIOF5_SYNC */
+ RCAR_GP_PIN(0, 9),
+};
+static const unsigned int msiof5_sync_mux[] = {
+ MSIOF5_SYNC_MARK,
+};
+static const unsigned int msiof5_ss1_pins[] = {
+ /* MSIOF5_SS1 */
+ RCAR_GP_PIN(0, 8),
+};
+static const unsigned int msiof5_ss1_mux[] = {
+ MSIOF5_SS1_MARK,
+};
+static const unsigned int msiof5_ss2_pins[] = {
+ /* MSIOF5_SS2 */
+ RCAR_GP_PIN(0, 7),
+};
+static const unsigned int msiof5_ss2_mux[] = {
+ MSIOF5_SS2_MARK,
+};
+static const unsigned int msiof5_txd_pins[] = {
+ /* MSIOF5_TXD */
+ RCAR_GP_PIN(0, 10),
+};
+static const unsigned int msiof5_txd_mux[] = {
+ MSIOF5_TXD_MARK,
+};
+static const unsigned int msiof5_rxd_pins[] = {
+ /* MSIOF5_RXD */
+ RCAR_GP_PIN(0, 12),
+};
+static const unsigned int msiof5_rxd_mux[] = {
+ MSIOF5_RXD_MARK,
+};
+
+/* - PCIE ------------------------------------------------------------------- */
+static const unsigned int pcie0_clkreq_n_pins[] = {
+ /* PCIE0_CLKREQ_N */
+ RCAR_GP_PIN(4, 21),
+};
+
+static const unsigned int pcie0_clkreq_n_mux[] = {
+ PCIE0_CLKREQ_N_MARK,
+};
+
+/* - PWM0_A ------------------------------------------------------------------- */
+static const unsigned int pwm0_a_pins[] = {
+ /* PWM0_A */
+ RCAR_GP_PIN(1, 15),
+};
+static const unsigned int pwm0_a_mux[] = {
+ PWM0_A_MARK,
+};
+
+/* - PWM0_B ------------------------------------------------------------------- */
+static const unsigned int pwm0_b_pins[] = {
+ /* PWM0_B */
+ RCAR_GP_PIN(1, 14),
+};
+static const unsigned int pwm0_b_mux[] = {
+ PWM0_B_MARK,
+};
+
+/* - PWM1_A ------------------------------------------------------------------- */
+static const unsigned int pwm1_a_pins[] = {
+ /* PWM1_A */
+ RCAR_GP_PIN(3, 13),
+};
+static const unsigned int pwm1_a_mux[] = {
+ PWM1_A_MARK,
+};
+
+/* - PWM1_B ------------------------------------------------------------------- */
+static const unsigned int pwm1_b_pins[] = {
+ /* PWM1_B */
+ RCAR_GP_PIN(2, 13),
+};
+static const unsigned int pwm1_b_mux[] = {
+ PWM1_B_MARK,
+};
+
+/* - PWM1_C ------------------------------------------------------------------- */
+static const unsigned int pwm1_c_pins[] = {
+ /* PWM1_C */
+ RCAR_GP_PIN(2, 17),
+};
+static const unsigned int pwm1_c_mux[] = {
+ PWM1_C_MARK,
+};
+
+/* - PWM2_A ------------------------------------------------------------------- */
+static const unsigned int pwm2_a_pins[] = {
+ /* PWM2_A */
+ RCAR_GP_PIN(3, 14),
+};
+static const unsigned int pwm2_a_mux[] = {
+ PWM2_A_MARK,
+};
+
+/* - PWM2_B ------------------------------------------------------------------- */
+static const unsigned int pwm2_b_pins[] = {
+ /* PWM2_B */
+ RCAR_GP_PIN(2, 14),
+};
+static const unsigned int pwm2_b_mux[] = {
+ PWM2_B_MARK,
+};
+
+/* - PWM2_C ------------------------------------------------------------------- */
+static const unsigned int pwm2_c_pins[] = {
+ /* PWM2_C */
+ RCAR_GP_PIN(2, 19),
+};
+static const unsigned int pwm2_c_mux[] = {
+ PWM2_C_MARK,
+};
+
+/* - PWM3_A ------------------------------------------------------------------- */
+static const unsigned int pwm3_a_pins[] = {
+ /* PWM3_A */
+ RCAR_GP_PIN(4, 14),
+};
+static const unsigned int pwm3_a_mux[] = {
+ PWM3_A_MARK,
+};
+
+/* - PWM3_B ------------------------------------------------------------------- */
+static const unsigned int pwm3_b_pins[] = {
+ /* PWM3_B */
+ RCAR_GP_PIN(2, 15),
+};
+static const unsigned int pwm3_b_mux[] = {
+ PWM3_B_MARK,
+};
+
+/* - PWM3_C ------------------------------------------------------------------- */
+static const unsigned int pwm3_c_pins[] = {
+ /* PWM3_C */
+ RCAR_GP_PIN(1, 22),
+};
+static const unsigned int pwm3_c_mux[] = {
+ PWM3_C_MARK,
+};
+
+/* - PWM4 ------------------------------------------------------------------- */
+static const unsigned int pwm4_pins[] = {
+ /* PWM4 */
+ RCAR_GP_PIN(4, 15),
+};
+static const unsigned int pwm4_mux[] = {
+ PWM4_MARK,
+};
+
+/* - QSPI0 ------------------------------------------------------------------ */
+static const unsigned int qspi0_ctrl_pins[] = {
+ /* SPCLK, SSL */
+ RCAR_GP_PIN(3, 20), RCAR_GP_PIN(3, 15),
+};
+static const unsigned int qspi0_ctrl_mux[] = {
+ QSPI0_SPCLK_MARK, QSPI0_SSL_MARK,
+};
+static const unsigned int qspi0_data_pins[] = {
+ /* MOSI_IO0, MISO_IO1, IO2, IO3 */
+ RCAR_GP_PIN(3, 19), RCAR_GP_PIN(3, 18),
+ RCAR_GP_PIN(3, 17), RCAR_GP_PIN(3, 16),
+};
+static const unsigned int qspi0_data_mux[] = {
+ QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK,
+ QSPI0_IO2_MARK, QSPI0_IO3_MARK
+};
+
+/* - QSPI1 ------------------------------------------------------------------ */
+static const unsigned int qspi1_ctrl_pins[] = {
+ /* SPCLK, SSL */
+ RCAR_GP_PIN(3, 22), RCAR_GP_PIN(3, 25),
+};
+static const unsigned int qspi1_ctrl_mux[] = {
+ QSPI1_SPCLK_MARK, QSPI1_SSL_MARK,
+};
+static const unsigned int qspi1_data_pins[] = {
+ /* MOSI_IO0, MISO_IO1, IO2, IO3 */
+ RCAR_GP_PIN(3, 21), RCAR_GP_PIN(3, 23),
+ RCAR_GP_PIN(3, 24), RCAR_GP_PIN(3, 26),
+};
+static const unsigned int qspi1_data_mux[] = {
+ QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK,
+ QSPI1_IO2_MARK, QSPI1_IO3_MARK
+};
+
+/* - SCIF0 ------------------------------------------------------------------ */
+static const unsigned int scif0_data_pins[] = {
+ /* RX0, TX0 */
+ RCAR_GP_PIN(1, 16), RCAR_GP_PIN(1, 12),
+};
+static const unsigned int scif0_data_mux[] = {
+ RX0_MARK, TX0_MARK,
+};
+static const unsigned int scif0_clk_pins[] = {
+ /* SCK0 */
+ RCAR_GP_PIN(1, 15),
+};
+static const unsigned int scif0_clk_mux[] = {
+ SCK0_MARK,
+};
+static const unsigned int scif0_ctrl_pins[] = {
+ /* RTS0_N, CTS0_N */
+ RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
+};
+static const unsigned int scif0_ctrl_mux[] = {
+ RTS0_N_MARK, CTS0_N_MARK,
+};
+
+/* - SCIF1_A ------------------------------------------------------------------ */
+static const unsigned int scif1_data_a_pins[] = {
+ /* RX1_A, TX1_A */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
+};
+static const unsigned int scif1_data_a_mux[] = {
+ RX1_A_MARK, TX1_A_MARK,
+};
+static const unsigned int scif1_clk_a_pins[] = {
+ /* SCK1_A */
+ RCAR_GP_PIN(0, 18),
+};
+static const unsigned int scif1_clk_a_mux[] = {
+ SCK1_A_MARK,
+};
+static const unsigned int scif1_ctrl_a_pins[] = {
+ /* RTS1_N_A, CTS1_N_A */
+ RCAR_GP_PIN(0, 17), RCAR_GP_PIN(0, 16),
+};
+static const unsigned int scif1_ctrl_a_mux[] = {
+ RTS1_N_A_MARK, CTS1_N_A_MARK,
+};
+
+/* - SCIF1_B ------------------------------------------------------------------ */
+static const unsigned int scif1_data_b_pins[] = {
+ /* RX1_B, TX1_B */
+ RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6),
+};
+static const unsigned int scif1_data_b_mux[] = {
+ RX1_B_MARK, TX1_B_MARK,
+};
+static const unsigned int scif1_clk_b_pins[] = {
+ /* SCK1_B */
+ RCAR_GP_PIN(1, 10),
+};
+static const unsigned int scif1_clk_b_mux[] = {
+ SCK1_B_MARK,
+};
+static const unsigned int scif1_ctrl_b_pins[] = {
+ /* RTS1_N_B, CTS1_N_B */
+ RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 8),
+};
+static const unsigned int scif1_ctrl_b_mux[] = {
+ RTS1_N_B_MARK, CTS1_N_B_MARK,
+};
+
+/* - SCIF3_A ------------------------------------------------------------------ */
+static const unsigned int scif3_data_a_pins[] = {
+ /* RX3_A, TX3_A */
+ RCAR_GP_PIN(1, 27), RCAR_GP_PIN(1, 28),
+};
+static const unsigned int scif3_data_a_mux[] = {
+ RX3_A_MARK, TX3_A_MARK,
+};
+static const unsigned int scif3_clk_a_pins[] = {
+ /* SCK3_A */
+ RCAR_GP_PIN(1, 24),
+};
+static const unsigned int scif3_clk_a_mux[] = {
+ SCK3_A_MARK,
+};
+static const unsigned int scif3_ctrl_a_pins[] = {
+ /* RTS3_N_A, CTS3_N_A */
+ RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
+};
+static const unsigned int scif3_ctrl_a_mux[] = {
+ RTS3_N_A_MARK, CTS3_N_A_MARK,
+};
+
+/* - SCIF3_B ------------------------------------------------------------------ */
+static const unsigned int scif3_data_b_pins[] = {
+ /* RX3_B, TX3_B */
+ RCAR_GP_PIN(1, 1), RCAR_GP_PIN(1, 0),
+};
+static const unsigned int scif3_data_b_mux[] = {
+ RX3_B_MARK, TX3_B_MARK,
+};
+static const unsigned int scif3_clk_b_pins[] = {
+ /* SCK3_B */
+ RCAR_GP_PIN(1, 4),
+};
+static const unsigned int scif3_clk_b_mux[] = {
+ SCK3_B_MARK,
+};
+static const unsigned int scif3_ctrl_b_pins[] = {
+ /* RTS3_N_B, CTS3_N_B */
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
+};
+static const unsigned int scif3_ctrl_b_mux[] = {
+ RTS3_N_B_MARK, CTS3_N_B_MARK,
+};
+
+/* - SCIF4 ------------------------------------------------------------------ */
+static const unsigned int scif4_data_pins[] = {
+ /* RX4, TX4 */
+ RCAR_GP_PIN(4, 13), RCAR_GP_PIN(4, 12),
+};
+static const unsigned int scif4_data_mux[] = {
+ RX4_MARK, TX4_MARK,
+};
+static const unsigned int scif4_clk_pins[] = {
+ /* SCK4 */
+ RCAR_GP_PIN(4, 8),
+};
+static const unsigned int scif4_clk_mux[] = {
+ SCK4_MARK,
+};
+static const unsigned int scif4_ctrl_pins[] = {
+ /* RTS4_N, CTS4_N */
+ RCAR_GP_PIN(4, 10), RCAR_GP_PIN(4, 9),
+};
+static const unsigned int scif4_ctrl_mux[] = {
+ RTS4_N_MARK, CTS4_N_MARK,
+};
+
+/* - SCIF Clock ------------------------------------------------------------- */
+static const unsigned int scif_clk_pins[] = {
+ /* SCIF_CLK */
+ RCAR_GP_PIN(1, 17),
+};
+static const unsigned int scif_clk_mux[] = {
+ SCIF_CLK_MARK,
+};
+
+static const unsigned int scif_clk2_pins[] = {
+ /* SCIF_CLK2 */
+ RCAR_GP_PIN(4, 11),
+};
+static const unsigned int scif_clk2_mux[] = {
+ SCIF_CLK2_MARK,
+};
+
+/* - SSI ------------------------------------------------- */
+static const unsigned int ssi_data_pins[] = {
+ /* SSI_SD */
+ RCAR_GP_PIN(1, 20),
+};
+static const unsigned int ssi_data_mux[] = {
+ SSI_SD_MARK,
+};
+static const unsigned int ssi_ctrl_pins[] = {
+ /* SSI_SCK, SSI_WS */
+ RCAR_GP_PIN(1, 18), RCAR_GP_PIN(1, 19),
+};
+static const unsigned int ssi_ctrl_mux[] = {
+ SSI_SCK_MARK, SSI_WS_MARK,
+};
+
+/* - TPU_A ------------------------------------------------------------------- */
+static const unsigned int tpu_to0_a_pins[] = {
+ /* TPU0TO0_A */
+ RCAR_GP_PIN(2, 8),
+};
+static const unsigned int tpu_to0_a_mux[] = {
+ TPU0TO0_A_MARK,
+};
+static const unsigned int tpu_to1_a_pins[] = {
+ /* TPU0TO1_A */
+ RCAR_GP_PIN(2, 7),
+};
+static const unsigned int tpu_to1_a_mux[] = {
+ TPU0TO1_A_MARK,
+};
+static const unsigned int tpu_to2_a_pins[] = {
+ /* TPU0TO2_A */
+ RCAR_GP_PIN(2, 12),
+};
+static const unsigned int tpu_to2_a_mux[] = {
+ TPU0TO2_A_MARK,
+};
+static const unsigned int tpu_to3_a_pins[] = {
+ /* TPU0TO3_A */
+ RCAR_GP_PIN(2, 13),
+};
+static const unsigned int tpu_to3_a_mux[] = {
+ TPU0TO3_A_MARK,
+};
+
+/* - TPU_B ------------------------------------------------------------------- */
+static const unsigned int tpu_to0_b_pins[] = {
+ /* TPU0TO0_B */
+ RCAR_GP_PIN(1, 25),
+};
+static const unsigned int tpu_to0_b_mux[] = {
+ TPU0TO0_B_MARK,
+};
+static const unsigned int tpu_to1_b_pins[] = {
+ /* TPU0TO1_B */
+ RCAR_GP_PIN(1, 26),
+};
+static const unsigned int tpu_to1_b_mux[] = {
+ TPU0TO1_B_MARK,
+};
+static const unsigned int tpu_to2_b_pins[] = {
+ /* TPU0TO2_B */
+ RCAR_GP_PIN(2, 0),
+};
+static const unsigned int tpu_to2_b_mux[] = {
+ TPU0TO2_B_MARK,
+};
+static const unsigned int tpu_to3_b_pins[] = {
+ /* TPU0TO3_B */
+ RCAR_GP_PIN(2, 1),
+};
+static const unsigned int tpu_to3_b_mux[] = {
+ TPU0TO3_B_MARK,
+};
+
+static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(audio_clkin),
+ SH_PFC_PIN_GROUP(audio_clkout),
+
+ SH_PFC_PIN_GROUP(avb0_link),
+ SH_PFC_PIN_GROUP(avb0_magic),
+ SH_PFC_PIN_GROUP(avb0_phy_int),
+ SH_PFC_PIN_GROUP(avb0_mdio),
+ SH_PFC_PIN_GROUP(avb0_rgmii),
+ SH_PFC_PIN_GROUP(avb0_txcrefclk),
+ SH_PFC_PIN_GROUP(avb0_avtp_pps),
+ SH_PFC_PIN_GROUP(avb0_avtp_capture),
+ SH_PFC_PIN_GROUP(avb0_avtp_match),
+
+ SH_PFC_PIN_GROUP(avb1_link),
+ SH_PFC_PIN_GROUP(avb1_magic),
+ SH_PFC_PIN_GROUP(avb1_phy_int),
+ SH_PFC_PIN_GROUP(avb1_mdio),
+ SH_PFC_PIN_GROUP(avb1_rgmii),
+ SH_PFC_PIN_GROUP(avb1_txcrefclk),
+ SH_PFC_PIN_GROUP(avb1_avtp_pps),
+ SH_PFC_PIN_GROUP(avb1_avtp_capture),
+ SH_PFC_PIN_GROUP(avb1_avtp_match),
+
+ SH_PFC_PIN_GROUP(avb2_link),
+ SH_PFC_PIN_GROUP(avb2_magic),
+ SH_PFC_PIN_GROUP(avb2_phy_int),
+ SH_PFC_PIN_GROUP(avb2_mdio),
+ SH_PFC_PIN_GROUP(avb2_rgmii),
+ SH_PFC_PIN_GROUP(avb2_txcrefclk),
+ SH_PFC_PIN_GROUP(avb2_avtp_pps),
+ SH_PFC_PIN_GROUP(avb2_avtp_capture),
+ SH_PFC_PIN_GROUP(avb2_avtp_match),
+
+ SH_PFC_PIN_GROUP(canfd0_data),
+ SH_PFC_PIN_GROUP(canfd1_data),
+ SH_PFC_PIN_GROUP(canfd2_data),
+ SH_PFC_PIN_GROUP(canfd3_data),
+ SH_PFC_PIN_GROUP(can_clk),
+
+ SH_PFC_PIN_GROUP(hscif0_data),
+ SH_PFC_PIN_GROUP(hscif0_clk),
+ SH_PFC_PIN_GROUP(hscif0_ctrl),
+ SH_PFC_PIN_GROUP(hscif1_data_a),
+ SH_PFC_PIN_GROUP(hscif1_clk_a),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_a),
+ SH_PFC_PIN_GROUP(hscif1_data_b),
+ SH_PFC_PIN_GROUP(hscif1_clk_b),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif2_data),
+ SH_PFC_PIN_GROUP(hscif2_clk),
+ SH_PFC_PIN_GROUP(hscif2_ctrl),
+ SH_PFC_PIN_GROUP(hscif3_data_a),
+ SH_PFC_PIN_GROUP(hscif3_clk_a),
+ SH_PFC_PIN_GROUP(hscif3_ctrl_a),
+ SH_PFC_PIN_GROUP(hscif3_data_b),
+ SH_PFC_PIN_GROUP(hscif3_clk_b),
+ SH_PFC_PIN_GROUP(hscif3_ctrl_b),
+
+ SH_PFC_PIN_GROUP(i2c0),
+ SH_PFC_PIN_GROUP(i2c1),
+ SH_PFC_PIN_GROUP(i2c2),
+ SH_PFC_PIN_GROUP(i2c3),
+
+ BUS_DATA_PIN_GROUP(mmc_data, 1),
+ BUS_DATA_PIN_GROUP(mmc_data, 4),
+ BUS_DATA_PIN_GROUP(mmc_data, 8),
+ SH_PFC_PIN_GROUP(mmc_ctrl),
+ SH_PFC_PIN_GROUP(mmc_cd),
+ SH_PFC_PIN_GROUP(mmc_wp),
+ SH_PFC_PIN_GROUP(mmc_ds),
+
+ SH_PFC_PIN_GROUP(msiof0_clk),
+ SH_PFC_PIN_GROUP(msiof0_sync),
+ SH_PFC_PIN_GROUP(msiof0_ss1),
+ SH_PFC_PIN_GROUP(msiof0_ss2),
+ SH_PFC_PIN_GROUP(msiof0_txd),
+ SH_PFC_PIN_GROUP(msiof0_rxd),
+
+ SH_PFC_PIN_GROUP(msiof1_clk),
+ SH_PFC_PIN_GROUP(msiof1_sync),
+ SH_PFC_PIN_GROUP(msiof1_ss1),
+ SH_PFC_PIN_GROUP(msiof1_ss2),
+ SH_PFC_PIN_GROUP(msiof1_txd),
+ SH_PFC_PIN_GROUP(msiof1_rxd),
+
+ SH_PFC_PIN_GROUP(msiof2_clk),
+ SH_PFC_PIN_GROUP(msiof2_sync),
+ SH_PFC_PIN_GROUP(msiof2_ss1),
+ SH_PFC_PIN_GROUP(msiof2_ss2),
+ SH_PFC_PIN_GROUP(msiof2_txd),
+ SH_PFC_PIN_GROUP(msiof2_rxd),
+
+ SH_PFC_PIN_GROUP(msiof3_clk),
+ SH_PFC_PIN_GROUP(msiof3_sync),
+ SH_PFC_PIN_GROUP(msiof3_ss1),
+ SH_PFC_PIN_GROUP(msiof3_ss2),
+ SH_PFC_PIN_GROUP(msiof3_txd),
+ SH_PFC_PIN_GROUP(msiof3_rxd),
+
+ SH_PFC_PIN_GROUP(msiof4_clk),
+ SH_PFC_PIN_GROUP(msiof4_sync),
+ SH_PFC_PIN_GROUP(msiof4_ss1),
+ SH_PFC_PIN_GROUP(msiof4_ss2),
+ SH_PFC_PIN_GROUP(msiof4_txd),
+ SH_PFC_PIN_GROUP(msiof4_rxd),
+
+ SH_PFC_PIN_GROUP(msiof5_clk),
+ SH_PFC_PIN_GROUP(msiof5_sync),
+ SH_PFC_PIN_GROUP(msiof5_ss1),
+ SH_PFC_PIN_GROUP(msiof5_ss2),
+ SH_PFC_PIN_GROUP(msiof5_txd),
+ SH_PFC_PIN_GROUP(msiof5_rxd),
+
+ SH_PFC_PIN_GROUP(pcie0_clkreq_n),
+
+ SH_PFC_PIN_GROUP(pwm0_a),
+ SH_PFC_PIN_GROUP(pwm0_b),
+ SH_PFC_PIN_GROUP(pwm1_a),
+ SH_PFC_PIN_GROUP(pwm1_b),
+ SH_PFC_PIN_GROUP(pwm1_c),
+ SH_PFC_PIN_GROUP(pwm2_a),
+ SH_PFC_PIN_GROUP(pwm2_b),
+ SH_PFC_PIN_GROUP(pwm2_c),
+ SH_PFC_PIN_GROUP(pwm3_a),
+ SH_PFC_PIN_GROUP(pwm3_b),
+ SH_PFC_PIN_GROUP(pwm3_c),
+ SH_PFC_PIN_GROUP(pwm4),
+
+ SH_PFC_PIN_GROUP(qspi0_ctrl),
+ BUS_DATA_PIN_GROUP(qspi0_data, 2),
+ BUS_DATA_PIN_GROUP(qspi0_data, 4),
+ SH_PFC_PIN_GROUP(qspi1_ctrl),
+ BUS_DATA_PIN_GROUP(qspi1_data, 2),
+ BUS_DATA_PIN_GROUP(qspi1_data, 4),
+
+ SH_PFC_PIN_GROUP(scif0_data),
+ SH_PFC_PIN_GROUP(scif0_clk),
+ SH_PFC_PIN_GROUP(scif0_ctrl),
+ SH_PFC_PIN_GROUP(scif1_data_a),
+ SH_PFC_PIN_GROUP(scif1_clk_a),
+ SH_PFC_PIN_GROUP(scif1_ctrl_a),
+ SH_PFC_PIN_GROUP(scif1_data_b),
+ SH_PFC_PIN_GROUP(scif1_clk_b),
+ SH_PFC_PIN_GROUP(scif1_ctrl_b),
+ SH_PFC_PIN_GROUP(scif3_data_a),
+ SH_PFC_PIN_GROUP(scif3_clk_a),
+ SH_PFC_PIN_GROUP(scif3_ctrl_a),
+ SH_PFC_PIN_GROUP(scif3_data_b),
+ SH_PFC_PIN_GROUP(scif3_clk_b),
+ SH_PFC_PIN_GROUP(scif3_ctrl_b),
+ SH_PFC_PIN_GROUP(scif4_data),
+ SH_PFC_PIN_GROUP(scif4_clk),
+ SH_PFC_PIN_GROUP(scif4_ctrl),
+ SH_PFC_PIN_GROUP(scif_clk),
+ SH_PFC_PIN_GROUP(scif_clk2),
+
+ SH_PFC_PIN_GROUP(ssi_data),
+ SH_PFC_PIN_GROUP(ssi_ctrl),
+
+ SH_PFC_PIN_GROUP(tpu_to0_a),
+ SH_PFC_PIN_GROUP(tpu_to0_b),
+ SH_PFC_PIN_GROUP(tpu_to1_a),
+ SH_PFC_PIN_GROUP(tpu_to1_b),
+ SH_PFC_PIN_GROUP(tpu_to2_a),
+ SH_PFC_PIN_GROUP(tpu_to2_b),
+ SH_PFC_PIN_GROUP(tpu_to3_a),
+ SH_PFC_PIN_GROUP(tpu_to3_b),
+};
+
+static const char * const audio_clk_groups[] = {
+ "audio_clkin",
+ "audio_clkout",
+};
+
+static const char * const avb0_groups[] = {
+ "avb0_link",
+ "avb0_magic",
+ "avb0_phy_int",
+ "avb0_mdio",
+ "avb0_rgmii",
+ "avb0_txcrefclk",
+ "avb0_avtp_pps",
+ "avb0_avtp_capture",
+ "avb0_avtp_match",
+};
+
+static const char * const avb1_groups[] = {
+ "avb1_link",
+ "avb1_magic",
+ "avb1_phy_int",
+ "avb1_mdio",
+ "avb1_rgmii",
+ "avb1_txcrefclk",
+ "avb1_avtp_pps",
+ "avb1_avtp_capture",
+ "avb1_avtp_match",
+};
+
+static const char * const avb2_groups[] = {
+ "avb2_link",
+ "avb2_magic",
+ "avb2_phy_int",
+ "avb2_mdio",
+ "avb2_rgmii",
+ "avb2_txcrefclk",
+ "avb2_avtp_pps",
+ "avb2_avtp_capture",
+ "avb2_avtp_match",
+};
+
+static const char * const canfd0_groups[] = {
+ "canfd0_data",
+};
+
+static const char * const canfd1_groups[] = {
+ "canfd1_data",
+};
+
+static const char * const canfd2_groups[] = {
+ "canfd2_data",
+};
+
+static const char * const canfd3_groups[] = {
+ "canfd3_data",
+};
+
+static const char * const can_clk_groups[] = {
+ "can_clk",
+};
+
+static const char * const hscif0_groups[] = {
+ "hscif0_data",
+ "hscif0_clk",
+ "hscif0_ctrl",
+};
+
+static const char * const hscif1_groups[] = {
+ "hscif1_data_a",
+ "hscif1_clk_a",
+ "hscif1_ctrl_a",
+ "hscif1_data_b",
+ "hscif1_clk_b",
+ "hscif1_ctrl_b",
+};
+
+static const char * const hscif2_groups[] = {
+ "hscif2_data",
+ "hscif2_clk",
+ "hscif2_ctrl",
+};
+
+static const char * const hscif3_groups[] = {
+ "hscif3_data_a",
+ "hscif3_clk_a",
+ "hscif3_ctrl_a",
+ "hscif3_data_b",
+ "hscif3_clk_b",
+ "hscif3_ctrl_b",
+};
+
+static const char * const i2c0_groups[] = {
+ "i2c0",
+};
+
+static const char * const i2c1_groups[] = {
+ "i2c1",
+};
+
+static const char * const i2c2_groups[] = {
+ "i2c2",
+};
+
+static const char * const i2c3_groups[] = {
+ "i2c3",
+};
+
+static const char * const mmc_groups[] = {
+ "mmc_data1",
+ "mmc_data4",
+ "mmc_data8",
+ "mmc_ctrl",
+ "mmc_cd",
+ "mmc_wp",
+ "mmc_ds",
+};
+
+static const char * const msiof0_groups[] = {
+ "msiof0_clk",
+ "msiof0_sync",
+ "msiof0_ss1",
+ "msiof0_ss2",
+ "msiof0_txd",
+ "msiof0_rxd",
+};
+
+static const char * const msiof1_groups[] = {
+ "msiof1_clk",
+ "msiof1_sync",
+ "msiof1_ss1",
+ "msiof1_ss2",
+ "msiof1_txd",
+ "msiof1_rxd",
+};
+
+static const char * const msiof2_groups[] = {
+ "msiof2_clk",
+ "msiof2_sync",
+ "msiof2_ss1",
+ "msiof2_ss2",
+ "msiof2_txd",
+ "msiof2_rxd",
+};
+
+static const char * const msiof3_groups[] = {
+ "msiof3_clk",
+ "msiof3_sync",
+ "msiof3_ss1",
+ "msiof3_ss2",
+ "msiof3_txd",
+ "msiof3_rxd",
+};
+
+static const char * const msiof4_groups[] = {
+ "msiof4_clk",
+ "msiof4_sync",
+ "msiof4_ss1",
+ "msiof4_ss2",
+ "msiof4_txd",
+ "msiof4_rxd",
+};
+
+static const char * const msiof5_groups[] = {
+ "msiof5_clk",
+ "msiof5_sync",
+ "msiof5_ss1",
+ "msiof5_ss2",
+ "msiof5_txd",
+ "msiof5_rxd",
+};
+
+static const char * const pcie_groups[] = {
+ "pcie0_clkreq_n",
+};
+
+static const char * const pwm0_groups[] = {
+ "pwm0_a",
+ "pwm0_b",
+};
+
+static const char * const pwm1_groups[] = {
+ "pwm1_a",
+ "pwm1_b",
+ "pwm1_c",
+};
+
+static const char * const pwm2_groups[] = {
+ "pwm2_a",
+ "pwm2_b",
+ "pwm2_c",
+};
+
+static const char * const pwm3_groups[] = {
+ "pwm3_a",
+ "pwm3_b",
+ "pwm3_c",
+};
+
+static const char * const pwm4_groups[] = {
+ "pwm4",
+};
+
+static const char * const qspi0_groups[] = {
+ "qspi0_ctrl",
+ "qspi0_data2",
+ "qspi0_data4",
+};
+
+static const char * const qspi1_groups[] = {
+ "qspi1_ctrl",
+ "qspi1_data2",
+ "qspi1_data4",
+};
+
+static const char * const scif0_groups[] = {
+ "scif0_data",
+ "scif0_clk",
+ "scif0_ctrl",
+};
+
+static const char * const scif1_groups[] = {
+ "scif1_data_a",
+ "scif1_clk_a",
+ "scif1_ctrl_a",
+ "scif1_data_b",
+ "scif1_clk_b",
+ "scif1_ctrl_b",
+};
+
+static const char * const scif3_groups[] = {
+ "scif3_data_a",
+ "scif3_clk_a",
+ "scif3_ctrl_a",
+ "scif3_data_b",
+ "scif3_clk_b",
+ "scif3_ctrl_b",
+};
+
+static const char * const scif4_groups[] = {
+ "scif4_data",
+ "scif4_clk",
+ "scif4_ctrl",
+};
+
+static const char * const scif_clk_groups[] = {
+ "scif_clk",
+};
+
+static const char * const scif_clk2_groups[] = {
+ "scif_clk2",
+};
+
+static const char * const ssi_groups[] = {
+ "ssi_data",
+ "ssi_ctrl",
+};
+
+static const char * const tpu_groups[] = {
+ "tpu_to0_a",
+ "tpu_to0_b",
+ "tpu_to1_a",
+ "tpu_to1_b",
+ "tpu_to2_a",
+ "tpu_to2_b",
+ "tpu_to3_a",
+ "tpu_to3_b",
+};
+
+static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(audio_clk),
+
+ SH_PFC_FUNCTION(avb0),
+ SH_PFC_FUNCTION(avb1),
+ SH_PFC_FUNCTION(avb2),
+
+ SH_PFC_FUNCTION(canfd0),
+ SH_PFC_FUNCTION(canfd1),
+ SH_PFC_FUNCTION(canfd2),
+ SH_PFC_FUNCTION(canfd3),
+ SH_PFC_FUNCTION(can_clk),
+
+ SH_PFC_FUNCTION(hscif0),
+ SH_PFC_FUNCTION(hscif1),
+ SH_PFC_FUNCTION(hscif2),
+ SH_PFC_FUNCTION(hscif3),
+
+ SH_PFC_FUNCTION(i2c0),
+ SH_PFC_FUNCTION(i2c1),
+ SH_PFC_FUNCTION(i2c2),
+ SH_PFC_FUNCTION(i2c3),
+
+ SH_PFC_FUNCTION(mmc),
+
+ SH_PFC_FUNCTION(msiof0),
+ SH_PFC_FUNCTION(msiof1),
+ SH_PFC_FUNCTION(msiof2),
+ SH_PFC_FUNCTION(msiof3),
+ SH_PFC_FUNCTION(msiof4),
+ SH_PFC_FUNCTION(msiof5),
+
+ SH_PFC_FUNCTION(pcie),
+
+ SH_PFC_FUNCTION(pwm0),
+ SH_PFC_FUNCTION(pwm1),
+ SH_PFC_FUNCTION(pwm2),
+ SH_PFC_FUNCTION(pwm3),
+ SH_PFC_FUNCTION(pwm4),
+
+ SH_PFC_FUNCTION(qspi0),
+ SH_PFC_FUNCTION(qspi1),
+
+ SH_PFC_FUNCTION(scif0),
+ SH_PFC_FUNCTION(scif1),
+ SH_PFC_FUNCTION(scif3),
+ SH_PFC_FUNCTION(scif4),
+ SH_PFC_FUNCTION(scif_clk),
+ SH_PFC_FUNCTION(scif_clk2),
+
+ SH_PFC_FUNCTION(ssi),
+
+ SH_PFC_FUNCTION(tpu),
+};
+
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+#define F_(x, y) FN_##y
+#define FM(x) FN_##x
+ { PINMUX_CFG_REG_VAR("GPSR0", 0xE6050040, 32,
+ GROUP(-13, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP0_31_19 RESERVED */
+ GP_0_18_FN, GPSR0_18,
+ GP_0_17_FN, GPSR0_17,
+ GP_0_16_FN, GPSR0_16,
+ GP_0_15_FN, GPSR0_15,
+ GP_0_14_FN, GPSR0_14,
+ GP_0_13_FN, GPSR0_13,
+ GP_0_12_FN, GPSR0_12,
+ GP_0_11_FN, GPSR0_11,
+ GP_0_10_FN, GPSR0_10,
+ GP_0_9_FN, GPSR0_9,
+ GP_0_8_FN, GPSR0_8,
+ GP_0_7_FN, GPSR0_7,
+ GP_0_6_FN, GPSR0_6,
+ GP_0_5_FN, GPSR0_5,
+ GP_0_4_FN, GPSR0_4,
+ GP_0_3_FN, GPSR0_3,
+ GP_0_2_FN, GPSR0_2,
+ GP_0_1_FN, GPSR0_1,
+ GP_0_0_FN, GPSR0_0, ))
+ },
+ { PINMUX_CFG_REG("GPSR1", 0xE6050840, 32, 1, GROUP(
+ 0, 0,
+ 0, 0,
+ GP_1_29_FN, GPSR1_29,
+ GP_1_28_FN, GPSR1_28,
+ GP_1_27_FN, GPSR1_27,
+ GP_1_26_FN, GPSR1_26,
+ GP_1_25_FN, GPSR1_25,
+ GP_1_24_FN, GPSR1_24,
+ GP_1_23_FN, GPSR1_23,
+ GP_1_22_FN, GPSR1_22,
+ GP_1_21_FN, GPSR1_21,
+ GP_1_20_FN, GPSR1_20,
+ GP_1_19_FN, GPSR1_19,
+ GP_1_18_FN, GPSR1_18,
+ GP_1_17_FN, GPSR1_17,
+ GP_1_16_FN, GPSR1_16,
+ GP_1_15_FN, GPSR1_15,
+ GP_1_14_FN, GPSR1_14,
+ GP_1_13_FN, GPSR1_13,
+ GP_1_12_FN, GPSR1_12,
+ GP_1_11_FN, GPSR1_11,
+ GP_1_10_FN, GPSR1_10,
+ GP_1_9_FN, GPSR1_9,
+ GP_1_8_FN, GPSR1_8,
+ GP_1_7_FN, GPSR1_7,
+ GP_1_6_FN, GPSR1_6,
+ GP_1_5_FN, GPSR1_5,
+ GP_1_4_FN, GPSR1_4,
+ GP_1_3_FN, GPSR1_3,
+ GP_1_2_FN, GPSR1_2,
+ GP_1_1_FN, GPSR1_1,
+ GP_1_0_FN, GPSR1_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR2", 0xE6058040, 32,
+ GROUP(-12, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP2_31_20 RESERVED */
+ GP_2_19_FN, GPSR2_19,
+ /* GP2_18 RESERVED */
+ GP_2_17_FN, GPSR2_17,
+ /* GP2_16 RESERVED */
+ GP_2_15_FN, GPSR2_15,
+ GP_2_14_FN, GPSR2_14,
+ GP_2_13_FN, GPSR2_13,
+ GP_2_12_FN, GPSR2_12,
+ GP_2_11_FN, GPSR2_11,
+ GP_2_10_FN, GPSR2_10,
+ GP_2_9_FN, GPSR2_9,
+ GP_2_8_FN, GPSR2_8,
+ GP_2_7_FN, GPSR2_7,
+ GP_2_6_FN, GPSR2_6,
+ GP_2_5_FN, GPSR2_5,
+ GP_2_4_FN, GPSR2_4,
+ GP_2_3_FN, GPSR2_3,
+ GP_2_2_FN, GPSR2_2,
+ GP_2_1_FN, GPSR2_1,
+ GP_2_0_FN, GPSR2_0, ))
+ },
+ { PINMUX_CFG_REG("GPSR3", 0xE6058840, 32, 1, GROUP(
+ GP_3_31_FN, GPSR3_31,
+ GP_3_30_FN, GPSR3_30,
+ GP_3_29_FN, GPSR3_29,
+ GP_3_28_FN, GPSR3_28,
+ GP_3_27_FN, GPSR3_27,
+ GP_3_26_FN, GPSR3_26,
+ GP_3_25_FN, GPSR3_25,
+ GP_3_24_FN, GPSR3_24,
+ GP_3_23_FN, GPSR3_23,
+ GP_3_22_FN, GPSR3_22,
+ GP_3_21_FN, GPSR3_21,
+ GP_3_20_FN, GPSR3_20,
+ GP_3_19_FN, GPSR3_19,
+ GP_3_18_FN, GPSR3_18,
+ GP_3_17_FN, GPSR3_17,
+ GP_3_16_FN, GPSR3_16,
+ GP_3_15_FN, GPSR3_15,
+ GP_3_14_FN, GPSR3_14,
+ GP_3_13_FN, GPSR3_13,
+ GP_3_12_FN, GPSR3_12,
+ GP_3_11_FN, GPSR3_11,
+ GP_3_10_FN, GPSR3_10,
+ GP_3_9_FN, GPSR3_9,
+ GP_3_8_FN, GPSR3_8,
+ GP_3_7_FN, GPSR3_7,
+ GP_3_6_FN, GPSR3_6,
+ GP_3_5_FN, GPSR3_5,
+ GP_3_4_FN, GPSR3_4,
+ GP_3_3_FN, GPSR3_3,
+ GP_3_2_FN, GPSR3_2,
+ GP_3_1_FN, GPSR3_1,
+ GP_3_0_FN, GPSR3_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR4", 0xE6060040, 32,
+ GROUP(-7, 1, 1, -1, 1, -5, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP4_31_25 RESERVED */
+ GP_4_24_FN, GPSR4_24,
+ GP_4_23_FN, GPSR4_23,
+ /* GP4_22 RESERVED */
+ GP_4_21_FN, GPSR4_21,
+ /* GP4_20_16 RESERVED */
+ GP_4_15_FN, GPSR4_15,
+ GP_4_14_FN, GPSR4_14,
+ GP_4_13_FN, GPSR4_13,
+ GP_4_12_FN, GPSR4_12,
+ GP_4_11_FN, GPSR4_11,
+ GP_4_10_FN, GPSR4_10,
+ GP_4_9_FN, GPSR4_9,
+ GP_4_8_FN, GPSR4_8,
+ GP_4_7_FN, GPSR4_7,
+ GP_4_6_FN, GPSR4_6,
+ GP_4_5_FN, GPSR4_5,
+ GP_4_4_FN, GPSR4_4,
+ GP_4_3_FN, GPSR4_3,
+ GP_4_2_FN, GPSR4_2,
+ GP_4_1_FN, GPSR4_1,
+ GP_4_0_FN, GPSR4_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR5", 0xE6060840, 32,
+ GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP5_31_21 RESERVED */
+ GP_5_20_FN, GPSR5_20,
+ GP_5_19_FN, GPSR5_19,
+ GP_5_18_FN, GPSR5_18,
+ GP_5_17_FN, GPSR5_17,
+ GP_5_16_FN, GPSR5_16,
+ GP_5_15_FN, GPSR5_15,
+ GP_5_14_FN, GPSR5_14,
+ GP_5_13_FN, GPSR5_13,
+ GP_5_12_FN, GPSR5_12,
+ GP_5_11_FN, GPSR5_11,
+ GP_5_10_FN, GPSR5_10,
+ GP_5_9_FN, GPSR5_9,
+ GP_5_8_FN, GPSR5_8,
+ GP_5_7_FN, GPSR5_7,
+ GP_5_6_FN, GPSR5_6,
+ GP_5_5_FN, GPSR5_5,
+ GP_5_4_FN, GPSR5_4,
+ GP_5_3_FN, GPSR5_3,
+ GP_5_2_FN, GPSR5_2,
+ GP_5_1_FN, GPSR5_1,
+ GP_5_0_FN, GPSR5_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR6", 0xE6061040, 32,
+ GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP6_31_21 RESERVED */
+ GP_6_20_FN, GPSR6_20,
+ GP_6_19_FN, GPSR6_19,
+ GP_6_18_FN, GPSR6_18,
+ GP_6_17_FN, GPSR6_17,
+ GP_6_16_FN, GPSR6_16,
+ GP_6_15_FN, GPSR6_15,
+ GP_6_14_FN, GPSR6_14,
+ GP_6_13_FN, GPSR6_13,
+ GP_6_12_FN, GPSR6_12,
+ GP_6_11_FN, GPSR6_11,
+ GP_6_10_FN, GPSR6_10,
+ GP_6_9_FN, GPSR6_9,
+ GP_6_8_FN, GPSR6_8,
+ GP_6_7_FN, GPSR6_7,
+ GP_6_6_FN, GPSR6_6,
+ GP_6_5_FN, GPSR6_5,
+ GP_6_4_FN, GPSR6_4,
+ GP_6_3_FN, GPSR6_3,
+ GP_6_2_FN, GPSR6_2,
+ GP_6_1_FN, GPSR6_1,
+ GP_6_0_FN, GPSR6_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR7", 0xE6061840, 32,
+ GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP7_31_21 RESERVED */
+ GP_7_20_FN, GPSR7_20,
+ GP_7_19_FN, GPSR7_19,
+ GP_7_18_FN, GPSR7_18,
+ GP_7_17_FN, GPSR7_17,
+ GP_7_16_FN, GPSR7_16,
+ GP_7_15_FN, GPSR7_15,
+ GP_7_14_FN, GPSR7_14,
+ GP_7_13_FN, GPSR7_13,
+ GP_7_12_FN, GPSR7_12,
+ GP_7_11_FN, GPSR7_11,
+ GP_7_10_FN, GPSR7_10,
+ GP_7_9_FN, GPSR7_9,
+ GP_7_8_FN, GPSR7_8,
+ GP_7_7_FN, GPSR7_7,
+ GP_7_6_FN, GPSR7_6,
+ GP_7_5_FN, GPSR7_5,
+ GP_7_4_FN, GPSR7_4,
+ GP_7_3_FN, GPSR7_3,
+ GP_7_2_FN, GPSR7_2,
+ GP_7_1_FN, GPSR7_1,
+ GP_7_0_FN, GPSR7_0, ))
+ },
+#undef F_
+#undef FM
+
+#define F_(x, y) x,
+#define FM(x) FN_##x,
+ { PINMUX_CFG_REG("IP0SR0", 0xE6050060, 32, 4, GROUP(
+ IP0SR0_31_28
+ IP0SR0_27_24
+ IP0SR0_23_20
+ IP0SR0_19_16
+ IP0SR0_15_12
+ IP0SR0_11_8
+ IP0SR0_7_4
+ IP0SR0_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR0", 0xE6050064, 32, 4, GROUP(
+ IP1SR0_31_28
+ IP1SR0_27_24
+ IP1SR0_23_20
+ IP1SR0_19_16
+ IP1SR0_15_12
+ IP1SR0_11_8
+ IP1SR0_7_4
+ IP1SR0_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR0", 0xE6050068, 32,
+ GROUP(-20, 4, 4, 4),
+ GROUP(
+ /* IP2SR0_31_12 RESERVED */
+ IP2SR0_11_8
+ IP2SR0_7_4
+ IP2SR0_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR1", 0xE6050860, 32, 4, GROUP(
+ IP0SR1_31_28
+ IP0SR1_27_24
+ IP0SR1_23_20
+ IP0SR1_19_16
+ IP0SR1_15_12
+ IP0SR1_11_8
+ IP0SR1_7_4
+ IP0SR1_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR1", 0xE6050864, 32, 4, GROUP(
+ IP1SR1_31_28
+ IP1SR1_27_24
+ IP1SR1_23_20
+ IP1SR1_19_16
+ IP1SR1_15_12
+ IP1SR1_11_8
+ IP1SR1_7_4
+ IP1SR1_3_0))
+ },
+ { PINMUX_CFG_REG("IP2SR1", 0xE6050868, 32, 4, GROUP(
+ IP2SR1_31_28
+ IP2SR1_27_24
+ IP2SR1_23_20
+ IP2SR1_19_16
+ IP2SR1_15_12
+ IP2SR1_11_8
+ IP2SR1_7_4
+ IP2SR1_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP3SR1", 0xE605086C, 32,
+ GROUP(-8, 4, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP3SR1_31_24 RESERVED */
+ IP3SR1_23_20
+ IP3SR1_19_16
+ IP3SR1_15_12
+ IP3SR1_11_8
+ IP3SR1_7_4
+ IP3SR1_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR2", 0xE6058060, 32, 4, GROUP(
+ IP0SR2_31_28
+ IP0SR2_27_24
+ IP0SR2_23_20
+ IP0SR2_19_16
+ IP0SR2_15_12
+ IP0SR2_11_8
+ IP0SR2_7_4
+ IP0SR2_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR2", 0xE6058064, 32, 4, GROUP(
+ IP1SR2_31_28
+ IP1SR2_27_24
+ IP1SR2_23_20
+ IP1SR2_19_16
+ IP1SR2_15_12
+ IP1SR2_11_8
+ IP1SR2_7_4
+ IP1SR2_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR2", 0xE6058068, 32,
+ GROUP(-16, 4, -4, 4, -4),
+ GROUP(
+ /* IP2SR2_31_16 RESERVED */
+ IP2SR2_15_12
+ /* IP2SR2_11_8 RESERVED */
+ IP2SR2_7_4
+ /* IP2SR2_3_0 RESERVED */))
+ },
+ { PINMUX_CFG_REG("IP0SR3", 0xE6058860, 32, 4, GROUP(
+ IP0SR3_31_28
+ IP0SR3_27_24
+ IP0SR3_23_20
+ IP0SR3_19_16
+ IP0SR3_15_12
+ IP0SR3_11_8
+ IP0SR3_7_4
+ IP0SR3_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR3", 0xE6058864, 32, 4, GROUP(
+ IP1SR3_31_28
+ IP1SR3_27_24
+ IP1SR3_23_20
+ IP1SR3_19_16
+ IP1SR3_15_12
+ IP1SR3_11_8
+ IP1SR3_7_4
+ IP1SR3_3_0))
+ },
+ { PINMUX_CFG_REG("IP2SR3", 0xE6058868, 32, 4, GROUP(
+ IP2SR3_31_28
+ IP2SR3_27_24
+ IP2SR3_23_20
+ IP2SR3_19_16
+ IP2SR3_15_12
+ IP2SR3_11_8
+ IP2SR3_7_4
+ IP2SR3_3_0))
+ },
+ { PINMUX_CFG_REG("IP3SR3", 0xE605886C, 32, 4, GROUP(
+ IP3SR3_31_28
+ IP3SR3_27_24
+ IP3SR3_23_20
+ IP3SR3_19_16
+ IP3SR3_15_12
+ IP3SR3_11_8
+ IP3SR3_7_4
+ IP3SR3_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR4", 0xE6060060, 32, 4, GROUP(
+ IP0SR4_31_28
+ IP0SR4_27_24
+ IP0SR4_23_20
+ IP0SR4_19_16
+ IP0SR4_15_12
+ IP0SR4_11_8
+ IP0SR4_7_4
+ IP0SR4_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR4", 0xE6060064, 32, 4, GROUP(
+ IP1SR4_31_28
+ IP1SR4_27_24
+ IP1SR4_23_20
+ IP1SR4_19_16
+ IP1SR4_15_12
+ IP1SR4_11_8
+ IP1SR4_7_4
+ IP1SR4_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR4", 0xE6060068, 32,
+ GROUP(4, -4, 4, -20),
+ GROUP(
+ IP2SR4_31_28
+ /* IP2SR4_27_24 RESERVED */
+ IP2SR4_23_20
+ /* IP2SR4_19_0 RESERVED */))
+ },
+ { PINMUX_CFG_REG_VAR("IP3SR4", 0xE606006C, 32,
+ GROUP(-28, 4),
+ GROUP(
+ /* IP3SR4_31_4 RESERVED */
+ IP3SR4_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR5", 0xE6060860, 32, 4, GROUP(
+ IP0SR5_31_28
+ IP0SR5_27_24
+ IP0SR5_23_20
+ IP0SR5_19_16
+ IP0SR5_15_12
+ IP0SR5_11_8
+ IP0SR5_7_4
+ IP0SR5_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR5", 0xE6060864, 32, 4, GROUP(
+ IP1SR5_31_28
+ IP1SR5_27_24
+ IP1SR5_23_20
+ IP1SR5_19_16
+ IP1SR5_15_12
+ IP1SR5_11_8
+ IP1SR5_7_4
+ IP1SR5_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR5", 0xE6060868, 32,
+ GROUP(-12, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP2SR5_31_20 RESERVED */
+ IP2SR5_19_16
+ IP2SR5_15_12
+ IP2SR5_11_8
+ IP2SR5_7_4
+ IP2SR5_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR6", 0xE6061060, 32, 4, GROUP(
+ IP0SR6_31_28
+ IP0SR6_27_24
+ IP0SR6_23_20
+ IP0SR6_19_16
+ IP0SR6_15_12
+ IP0SR6_11_8
+ IP0SR6_7_4
+ IP0SR6_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR6", 0xE6061064, 32, 4, GROUP(
+ IP1SR6_31_28
+ IP1SR6_27_24
+ IP1SR6_23_20
+ IP1SR6_19_16
+ IP1SR6_15_12
+ IP1SR6_11_8
+ IP1SR6_7_4
+ IP1SR6_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR6", 0xE6061068, 32,
+ GROUP(-12, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP2SR6_31_20 RESERVED */
+ IP2SR6_19_16
+ IP2SR6_15_12
+ IP2SR6_11_8
+ IP2SR6_7_4
+ IP2SR6_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR7", 0xE6061860, 32, 4, GROUP(
+ IP0SR7_31_28
+ IP0SR7_27_24
+ IP0SR7_23_20
+ IP0SR7_19_16
+ IP0SR7_15_12
+ IP0SR7_11_8
+ IP0SR7_7_4
+ IP0SR7_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR7", 0xE6061864, 32, 4, GROUP(
+ IP1SR7_31_28
+ IP1SR7_27_24
+ IP1SR7_23_20
+ IP1SR7_19_16
+ IP1SR7_15_12
+ IP1SR7_11_8
+ IP1SR7_7_4
+ IP1SR7_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR7", 0xE6061868, 32,
+ GROUP(-12, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP2SR7_31_20 RESERVED */
+ IP2SR7_19_16
+ IP2SR7_15_12
+ IP2SR7_11_8
+ IP2SR7_7_4
+ IP2SR7_3_0))
+ },
+#undef F_
+#undef FM
+
+#define F_(x, y) x,
+#define FM(x) FN_##x,
+ { PINMUX_CFG_REG_VAR("MOD_SEL4", 0xE6060100, 32,
+ GROUP(-24, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* RESERVED 31-8 */
+ MOD_SEL4_7
+ MOD_SEL4_6
+ MOD_SEL4_5
+ MOD_SEL4_4
+ MOD_SEL4_3
+ MOD_SEL4_2
+ MOD_SEL4_1
+ MOD_SEL4_0))
+ },
+ { },
+};
+
+static const struct pinmux_drive_reg pinmux_drive_regs[] = {
+ { PINMUX_DRIVE_REG("DRV0CTRL0", 0xE6050080) {
+ { RCAR_GP_PIN(0, 7), 28, 3 }, /* MSIOF5_SS2 */
+ { RCAR_GP_PIN(0, 6), 24, 3 }, /* IRQ0 */
+ { RCAR_GP_PIN(0, 5), 20, 3 }, /* IRQ1 */
+ { RCAR_GP_PIN(0, 4), 16, 3 }, /* IRQ2 */
+ { RCAR_GP_PIN(0, 3), 12, 3 }, /* IRQ3 */
+ { RCAR_GP_PIN(0, 2), 8, 3 }, /* GP0_02 */
+ { RCAR_GP_PIN(0, 1), 4, 3 }, /* GP0_01 */
+ { RCAR_GP_PIN(0, 0), 0, 3 }, /* GP0_00 */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL0", 0xE6050084) {
+ { RCAR_GP_PIN(0, 15), 28, 3 }, /* MSIOF2_SYNC */
+ { RCAR_GP_PIN(0, 14), 24, 3 }, /* MSIOF2_SS1 */
+ { RCAR_GP_PIN(0, 13), 20, 3 }, /* MSIOF2_SS2 */
+ { RCAR_GP_PIN(0, 12), 16, 3 }, /* MSIOF5_RXD */
+ { RCAR_GP_PIN(0, 11), 12, 3 }, /* MSIOF5_SCK */
+ { RCAR_GP_PIN(0, 10), 8, 3 }, /* MSIOF5_TXD */
+ { RCAR_GP_PIN(0, 9), 4, 3 }, /* MSIOF5_SYNC */
+ { RCAR_GP_PIN(0, 8), 0, 3 }, /* MSIOF5_SS1 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL0", 0xE6050088) {
+ { RCAR_GP_PIN(0, 18), 8, 3 }, /* MSIOF2_RXD */
+ { RCAR_GP_PIN(0, 17), 4, 3 }, /* MSIOF2_SCK */
+ { RCAR_GP_PIN(0, 16), 0, 3 }, /* MSIOF2_TXD */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL1", 0xE6050880) {
+ { RCAR_GP_PIN(1, 7), 28, 3 }, /* MSIOF0_SS1 */
+ { RCAR_GP_PIN(1, 6), 24, 3 }, /* MSIOF0_SS2 */
+ { RCAR_GP_PIN(1, 5), 20, 3 }, /* MSIOF1_RXD */
+ { RCAR_GP_PIN(1, 4), 16, 3 }, /* MSIOF1_TXD */
+ { RCAR_GP_PIN(1, 3), 12, 3 }, /* MSIOF1_SCK */
+ { RCAR_GP_PIN(1, 2), 8, 3 }, /* MSIOF1_SYNC */
+ { RCAR_GP_PIN(1, 1), 4, 3 }, /* MSIOF1_SS1 */
+ { RCAR_GP_PIN(1, 0), 0, 3 }, /* MSIOF1_SS2 */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL1", 0xE6050884) {
+ { RCAR_GP_PIN(1, 15), 28, 3 }, /* HSCK0 */
+ { RCAR_GP_PIN(1, 14), 24, 3 }, /* HRTS0_N */
+ { RCAR_GP_PIN(1, 13), 20, 3 }, /* HCTS0_N */
+ { RCAR_GP_PIN(1, 12), 16, 3 }, /* HTX0 */
+ { RCAR_GP_PIN(1, 11), 12, 3 }, /* MSIOF0_RXD */
+ { RCAR_GP_PIN(1, 10), 8, 3 }, /* MSIOF0_SCK */
+ { RCAR_GP_PIN(1, 9), 4, 3 }, /* MSIOF0_TXD */
+ { RCAR_GP_PIN(1, 8), 0, 3 }, /* MSIOF0_SYNC */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL1", 0xE6050888) {
+ { RCAR_GP_PIN(1, 23), 28, 3 }, /* GP1_23 */
+ { RCAR_GP_PIN(1, 22), 24, 3 }, /* AUDIO_CLKIN */
+ { RCAR_GP_PIN(1, 21), 20, 3 }, /* AUDIO_CLKOUT */
+ { RCAR_GP_PIN(1, 20), 16, 3 }, /* SSI_SD */
+ { RCAR_GP_PIN(1, 19), 12, 3 }, /* SSI_WS */
+ { RCAR_GP_PIN(1, 18), 8, 3 }, /* SSI_SCK */
+ { RCAR_GP_PIN(1, 17), 4, 3 }, /* SCIF_CLK */
+ { RCAR_GP_PIN(1, 16), 0, 3 }, /* HRX0 */
+ } },
+ { PINMUX_DRIVE_REG("DRV3CTRL1", 0xE605088C) {
+ { RCAR_GP_PIN(1, 29), 20, 2 }, /* ERROROUTC_N */
+ { RCAR_GP_PIN(1, 28), 16, 3 }, /* HTX3 */
+ { RCAR_GP_PIN(1, 27), 12, 3 }, /* HCTS3_N */
+ { RCAR_GP_PIN(1, 26), 8, 3 }, /* HRTS3_N */
+ { RCAR_GP_PIN(1, 25), 4, 3 }, /* HSCK3 */
+ { RCAR_GP_PIN(1, 24), 0, 3 }, /* HRX3 */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL2", 0xE6058080) {
+ { RCAR_GP_PIN(2, 7), 28, 3 }, /* TPU0TO1 */
+ { RCAR_GP_PIN(2, 6), 24, 3 }, /* FXR_TXDB */
+ { RCAR_GP_PIN(2, 5), 20, 3 }, /* FXR_TXENB_N */
+ { RCAR_GP_PIN(2, 4), 16, 3 }, /* RXDB_EXTFXR */
+ { RCAR_GP_PIN(2, 3), 12, 3 }, /* CLK_EXTFXR */
+ { RCAR_GP_PIN(2, 2), 8, 3 }, /* RXDA_EXTFXR */
+ { RCAR_GP_PIN(2, 1), 4, 3 }, /* FXR_TXENA_N */
+ { RCAR_GP_PIN(2, 0), 0, 3 }, /* FXR_TXDA */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL2", 0xE6058084) {
+ { RCAR_GP_PIN(2, 15), 28, 3 }, /* CANFD3_RX */
+ { RCAR_GP_PIN(2, 14), 24, 3 }, /* CANFD3_TX */
+ { RCAR_GP_PIN(2, 13), 20, 3 }, /* CANFD2_RX */
+ { RCAR_GP_PIN(2, 12), 16, 3 }, /* CANFD2_TX */
+ { RCAR_GP_PIN(2, 11), 12, 3 }, /* CANFD0_RX */
+ { RCAR_GP_PIN(2, 10), 8, 3 }, /* CANFD0_TX */
+ { RCAR_GP_PIN(2, 9), 4, 3 }, /* CAN_CLK */
+ { RCAR_GP_PIN(2, 8), 0, 3 }, /* TPU0TO0 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL2", 0xE6058088) {
+ { RCAR_GP_PIN(2, 19), 12, 3 }, /* CANFD1_RX */
+ { RCAR_GP_PIN(2, 17), 4, 3 }, /* CANFD1_TX */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL3", 0xE6058880) {
+ { RCAR_GP_PIN(3, 7), 28, 3 }, /* MMC_D4 */
+ { RCAR_GP_PIN(3, 6), 24, 3 }, /* MMC_D5 */
+ { RCAR_GP_PIN(3, 5), 20, 3 }, /* MMC_SD_D3 */
+ { RCAR_GP_PIN(3, 4), 16, 3 }, /* MMC_DS */
+ { RCAR_GP_PIN(3, 3), 12, 3 }, /* MMC_SD_CLK */
+ { RCAR_GP_PIN(3, 2), 8, 3 }, /* MMC_SD_D2 */
+ { RCAR_GP_PIN(3, 1), 4, 3 }, /* MMC_SD_D0 */
+ { RCAR_GP_PIN(3, 0), 0, 3 }, /* MMC_SD_D1 */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL3", 0xE6058884) {
+ { RCAR_GP_PIN(3, 15), 28, 2 }, /* QSPI0_SSL */
+ { RCAR_GP_PIN(3, 14), 24, 2 }, /* PWM2 */
+ { RCAR_GP_PIN(3, 13), 20, 2 }, /* PWM1 */
+ { RCAR_GP_PIN(3, 12), 16, 3 }, /* SD_WP */
+ { RCAR_GP_PIN(3, 11), 12, 3 }, /* SD_CD */
+ { RCAR_GP_PIN(3, 10), 8, 3 }, /* MMC_SD_CMD */
+ { RCAR_GP_PIN(3, 9), 4, 3 }, /* MMC_D6*/
+ { RCAR_GP_PIN(3, 8), 0, 3 }, /* MMC_D7 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL3", 0xE6058888) {
+ { RCAR_GP_PIN(3, 23), 28, 2 }, /* QSPI1_MISO_IO1 */
+ { RCAR_GP_PIN(3, 22), 24, 2 }, /* QSPI1_SPCLK */
+ { RCAR_GP_PIN(3, 21), 20, 2 }, /* QSPI1_MOSI_IO0 */
+ { RCAR_GP_PIN(3, 20), 16, 2 }, /* QSPI0_SPCLK */
+ { RCAR_GP_PIN(3, 19), 12, 2 }, /* QSPI0_MOSI_IO0 */
+ { RCAR_GP_PIN(3, 18), 8, 2 }, /* QSPI0_MISO_IO1 */
+ { RCAR_GP_PIN(3, 17), 4, 2 }, /* QSPI0_IO2 */
+ { RCAR_GP_PIN(3, 16), 0, 2 }, /* QSPI0_IO3 */
+ } },
+ { PINMUX_DRIVE_REG("DRV3CTRL3", 0xE605888C) {
+ { RCAR_GP_PIN(3, 31), 28, 2 }, /* TCLK4 */
+ { RCAR_GP_PIN(3, 30), 24, 2 }, /* TCLK3 */
+ { RCAR_GP_PIN(3, 29), 20, 2 }, /* RPC_INT_N */
+ { RCAR_GP_PIN(3, 28), 16, 2 }, /* RPC_WP_N */
+ { RCAR_GP_PIN(3, 27), 12, 2 }, /* RPC_RESET_N */
+ { RCAR_GP_PIN(3, 26), 8, 2 }, /* QSPI1_IO3 */
+ { RCAR_GP_PIN(3, 25), 4, 2 }, /* QSPI1_SSL */
+ { RCAR_GP_PIN(3, 24), 0, 2 }, /* QSPI1_IO2 */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL4", 0xE6060080) {
+ { RCAR_GP_PIN(4, 7), 28, 3 }, /* SDA3 */
+ { RCAR_GP_PIN(4, 6), 24, 3 }, /* SCL3 */
+ { RCAR_GP_PIN(4, 5), 20, 3 }, /* SDA2 */
+ { RCAR_GP_PIN(4, 4), 16, 3 }, /* SCL2 */
+ { RCAR_GP_PIN(4, 3), 12, 3 }, /* SDA1 */
+ { RCAR_GP_PIN(4, 2), 8, 3 }, /* SCL1 */
+ { RCAR_GP_PIN(4, 1), 4, 3 }, /* SDA0 */
+ { RCAR_GP_PIN(4, 0), 0, 3 }, /* SCL0 */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL4", 0xE6060084) {
+ { RCAR_GP_PIN(4, 15), 28, 3 }, /* PWM4 */
+ { RCAR_GP_PIN(4, 14), 24, 3 }, /* PWM3 */
+ { RCAR_GP_PIN(4, 13), 20, 3 }, /* HSCK2 */
+ { RCAR_GP_PIN(4, 12), 16, 3 }, /* HCTS2_N */
+ { RCAR_GP_PIN(4, 11), 12, 3 }, /* SCIF_CLK2 */
+ { RCAR_GP_PIN(4, 10), 8, 3 }, /* HRTS2_N */
+ { RCAR_GP_PIN(4, 9), 4, 3 }, /* HTX2 */
+ { RCAR_GP_PIN(4, 8), 0, 3 }, /* HRX2 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL4", 0xE6060088) {
+ { RCAR_GP_PIN(4, 23), 28, 3 }, /* AVS0 */
+ { RCAR_GP_PIN(4, 21), 20, 3 }, /* PCIE0_CLKREQ_N */
+ } },
+ { PINMUX_DRIVE_REG("DRV3CTRL4", 0xE606008C) {
+ { RCAR_GP_PIN(4, 24), 0, 3 }, /* AVS1 */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL5", 0xE6060880) {
+ { RCAR_GP_PIN(5, 7), 28, 3 }, /* AVB2_TXCREFCLK */
+ { RCAR_GP_PIN(5, 6), 24, 3 }, /* AVB2_MDC */
+ { RCAR_GP_PIN(5, 5), 20, 3 }, /* AVB2_MAGIC */
+ { RCAR_GP_PIN(5, 4), 16, 3 }, /* AVB2_PHY_INT */
+ { RCAR_GP_PIN(5, 3), 12, 3 }, /* AVB2_LINK */
+ { RCAR_GP_PIN(5, 2), 8, 3 }, /* AVB2_AVTP_MATCH */
+ { RCAR_GP_PIN(5, 1), 4, 3 }, /* AVB2_AVTP_CAPTURE */
+ { RCAR_GP_PIN(5, 0), 0, 3 }, /* AVB2_AVTP_PPS */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL5", 0xE6060884) {
+ { RCAR_GP_PIN(5, 15), 28, 3 }, /* AVB2_TD0 */
+ { RCAR_GP_PIN(5, 14), 24, 3 }, /* AVB2_RD1 */
+ { RCAR_GP_PIN(5, 13), 20, 3 }, /* AVB2_RD2 */
+ { RCAR_GP_PIN(5, 12), 16, 3 }, /* AVB2_TD1 */
+ { RCAR_GP_PIN(5, 11), 12, 3 }, /* AVB2_TD2 */
+ { RCAR_GP_PIN(5, 10), 8, 3 }, /* AVB2_MDIO */
+ { RCAR_GP_PIN(5, 9), 4, 3 }, /* AVB2_RD3 */
+ { RCAR_GP_PIN(5, 8), 0, 3 }, /* AVB2_TD3 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL5", 0xE6060888) {
+ { RCAR_GP_PIN(5, 20), 16, 3 }, /* AVB2_RX_CTL */
+ { RCAR_GP_PIN(5, 19), 12, 3 }, /* AVB2_TX_CTL */
+ { RCAR_GP_PIN(5, 18), 8, 3 }, /* AVB2_RXC */
+ { RCAR_GP_PIN(5, 17), 4, 3 }, /* AVB2_RD0 */
+ { RCAR_GP_PIN(5, 16), 0, 3 }, /* AVB2_TXC */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL6", 0xE6061080) {
+ { RCAR_GP_PIN(6, 7), 28, 3 }, /* AVB1_TX_CTL */
+ { RCAR_GP_PIN(6, 6), 24, 3 }, /* AVB1_TXC */
+ { RCAR_GP_PIN(6, 5), 20, 3 }, /* AVB1_AVTP_MATCH */
+ { RCAR_GP_PIN(6, 4), 16, 3 }, /* AVB1_LINK */
+ { RCAR_GP_PIN(6, 3), 12, 3 }, /* AVB1_PHY_INT */
+ { RCAR_GP_PIN(6, 2), 8, 3 }, /* AVB1_MDC */
+ { RCAR_GP_PIN(6, 1), 4, 3 }, /* AVB1_MAGIC */
+ { RCAR_GP_PIN(6, 0), 0, 3 }, /* AVB1_MDIO */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL6", 0xE6061084) {
+ { RCAR_GP_PIN(6, 15), 28, 3 }, /* AVB1_RD0 */
+ { RCAR_GP_PIN(6, 14), 24, 3 }, /* AVB1_RD1 */
+ { RCAR_GP_PIN(6, 13), 20, 3 }, /* AVB1_TD0 */
+ { RCAR_GP_PIN(6, 12), 16, 3 }, /* AVB1_TD1 */
+ { RCAR_GP_PIN(6, 11), 12, 3 }, /* AVB1_AVTP_CAPTURE */
+ { RCAR_GP_PIN(6, 10), 8, 3 }, /* AVB1_AVTP_PPS */
+ { RCAR_GP_PIN(6, 9), 4, 3 }, /* AVB1_RX_CTL */
+ { RCAR_GP_PIN(6, 8), 0, 3 }, /* AVB1_RXC */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL6", 0xE6061088) {
+ { RCAR_GP_PIN(6, 20), 16, 3 }, /* AVB1_TXCREFCLK */
+ { RCAR_GP_PIN(6, 19), 12, 3 }, /* AVB1_RD3 */
+ { RCAR_GP_PIN(6, 18), 8, 3 }, /* AVB1_TD3 */
+ { RCAR_GP_PIN(6, 17), 4, 3 }, /* AVB1_RD2 */
+ { RCAR_GP_PIN(6, 16), 0, 3 }, /* AVB1_TD2 */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL7", 0xE6061880) {
+ { RCAR_GP_PIN(7, 7), 28, 3 }, /* AVB0_TD1 */
+ { RCAR_GP_PIN(7, 6), 24, 3 }, /* AVB0_TD2 */
+ { RCAR_GP_PIN(7, 5), 20, 3 }, /* AVB0_PHY_INT */
+ { RCAR_GP_PIN(7, 4), 16, 3 }, /* AVB0_LINK */
+ { RCAR_GP_PIN(7, 3), 12, 3 }, /* AVB0_TD3 */
+ { RCAR_GP_PIN(7, 2), 8, 3 }, /* AVB0_AVTP_MATCH */
+ { RCAR_GP_PIN(7, 1), 4, 3 }, /* AVB0_AVTP_CAPTURE */
+ { RCAR_GP_PIN(7, 0), 0, 3 }, /* AVB0_AVTP_PPS */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL7", 0xE6061884) {
+ { RCAR_GP_PIN(7, 15), 28, 3 }, /* AVB0_TXC */
+ { RCAR_GP_PIN(7, 14), 24, 3 }, /* AVB0_MDIO */
+ { RCAR_GP_PIN(7, 13), 20, 3 }, /* AVB0_MDC */
+ { RCAR_GP_PIN(7, 12), 16, 3 }, /* AVB0_RD2 */
+ { RCAR_GP_PIN(7, 11), 12, 3 }, /* AVB0_TD0 */
+ { RCAR_GP_PIN(7, 10), 8, 3 }, /* AVB0_MAGIC */
+ { RCAR_GP_PIN(7, 9), 4, 3 }, /* AVB0_TXCREFCLK */
+ { RCAR_GP_PIN(7, 8), 0, 3 }, /* AVB0_RD3 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL7", 0xE6061888) {
+ { RCAR_GP_PIN(7, 20), 16, 3 }, /* AVB0_RX_CTL */
+ { RCAR_GP_PIN(7, 19), 12, 3 }, /* AVB0_RXC */
+ { RCAR_GP_PIN(7, 18), 8, 3 }, /* AVB0_RD0 */
+ { RCAR_GP_PIN(7, 17), 4, 3 }, /* AVB0_RD1 */
+ { RCAR_GP_PIN(7, 16), 0, 3 }, /* AVB0_TX_CTL */
+ } },
+ { },
+};
+
+enum ioctrl_regs {
+ POC0,
+ POC1,
+ POC3,
+ POC4,
+ POC5,
+ POC6,
+ POC7,
+};
+
+static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
+ [POC0] = { 0xE60500A0, },
+ [POC1] = { 0xE60508A0, },
+ [POC3] = { 0xE60588A0, },
+ [POC4] = { 0xE60600A0, },
+ [POC5] = { 0xE60608A0, },
+ [POC6] = { 0xE60610A0, },
+ [POC7] = { 0xE60618A0, },
+ { /* sentinel */ },
+};
+
+static int r8a779h0_pin_to_pocctrl(unsigned int pin, u32 *pocctrl)
+{
+ int bit = pin & 0x1f;
+
+ switch (pin) {
+ case RCAR_GP_PIN(0, 0) ... RCAR_GP_PIN(0, 18):
+ *pocctrl = pinmux_ioctrl_regs[POC0].reg;
+ return bit;
+
+ case RCAR_GP_PIN(1, 0) ... RCAR_GP_PIN(1, 28):
+ *pocctrl = pinmux_ioctrl_regs[POC1].reg;
+ return bit;
+
+ case RCAR_GP_PIN(3, 0) ... RCAR_GP_PIN(3, 12):
+ *pocctrl = pinmux_ioctrl_regs[POC3].reg;
+ return bit;
+
+ case RCAR_GP_PIN(4, 0) ... RCAR_GP_PIN(4, 13):
+ *pocctrl = pinmux_ioctrl_regs[POC4].reg;
+ return bit;
+
+ case PIN_VDDQ_AVB2:
+ *pocctrl = pinmux_ioctrl_regs[POC5].reg;
+ return 0;
+
+ case PIN_VDDQ_AVB1:
+ *pocctrl = pinmux_ioctrl_regs[POC6].reg;
+ return 0;
+
+ case PIN_VDDQ_AVB0:
+ *pocctrl = pinmux_ioctrl_regs[POC7].reg;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct pinmux_bias_reg pinmux_bias_regs[] = {
+ { PINMUX_BIAS_REG("PUEN0", 0xE60500C0, "PUD0", 0xE60500E0) {
+ [ 0] = RCAR_GP_PIN(0, 0), /* GP0_00 */
+ [ 1] = RCAR_GP_PIN(0, 1), /* GP0_01 */
+ [ 2] = RCAR_GP_PIN(0, 2), /* GP0_02 */
+ [ 3] = RCAR_GP_PIN(0, 3), /* IRQ3 */
+ [ 4] = RCAR_GP_PIN(0, 4), /* IRQ2 */
+ [ 5] = RCAR_GP_PIN(0, 5), /* IRQ1 */
+ [ 6] = RCAR_GP_PIN(0, 6), /* IRQ0 */
+ [ 7] = RCAR_GP_PIN(0, 7), /* MSIOF5_SS2 */
+ [ 8] = RCAR_GP_PIN(0, 8), /* MSIOF5_SS1 */
+ [ 9] = RCAR_GP_PIN(0, 9), /* MSIOF5_SYNC */
+ [10] = RCAR_GP_PIN(0, 10), /* MSIOF5_TXD */
+ [11] = RCAR_GP_PIN(0, 11), /* MSIOF5_SCK */
+ [12] = RCAR_GP_PIN(0, 12), /* MSIOF5_RXD */
+ [13] = RCAR_GP_PIN(0, 13), /* MSIOF2_SS2 */
+ [14] = RCAR_GP_PIN(0, 14), /* MSIOF2_SS1 */
+ [15] = RCAR_GP_PIN(0, 15), /* MSIOF2_SYNC */
+ [16] = RCAR_GP_PIN(0, 16), /* MSIOF2_TXD */
+ [17] = RCAR_GP_PIN(0, 17), /* MSIOF2_SCK */
+ [18] = RCAR_GP_PIN(0, 18), /* MSIOF2_RXD */
+ [19] = SH_PFC_PIN_NONE,
+ [20] = SH_PFC_PIN_NONE,
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN1", 0xE60508C0, "PUD1", 0xE60508E0) {
+ [ 0] = RCAR_GP_PIN(1, 0), /* MSIOF1_SS2 */
+ [ 1] = RCAR_GP_PIN(1, 1), /* MSIOF1_SS1 */
+ [ 2] = RCAR_GP_PIN(1, 2), /* MSIOF1_SYNC */
+ [ 3] = RCAR_GP_PIN(1, 3), /* MSIOF1_SCK */
+ [ 4] = RCAR_GP_PIN(1, 4), /* MSIOF1_TXD */
+ [ 5] = RCAR_GP_PIN(1, 5), /* MSIOF1_RXD */
+ [ 6] = RCAR_GP_PIN(1, 6), /* MSIOF0_SS2 */
+ [ 7] = RCAR_GP_PIN(1, 7), /* MSIOF0_SS1 */
+ [ 8] = RCAR_GP_PIN(1, 8), /* MSIOF0_SYNC */
+ [ 9] = RCAR_GP_PIN(1, 9), /* MSIOF0_TXD */
+ [10] = RCAR_GP_PIN(1, 10), /* MSIOF0_SCK */
+ [11] = RCAR_GP_PIN(1, 11), /* MSIOF0_RXD */
+ [12] = RCAR_GP_PIN(1, 12), /* HTX0 */
+ [13] = RCAR_GP_PIN(1, 13), /* HCTS0_N */
+ [14] = RCAR_GP_PIN(1, 14), /* HRTS0_N */
+ [15] = RCAR_GP_PIN(1, 15), /* HSCK0 */
+ [16] = RCAR_GP_PIN(1, 16), /* HRX0 */
+ [17] = RCAR_GP_PIN(1, 17), /* SCIF_CLK */
+ [18] = RCAR_GP_PIN(1, 18), /* SSI_SCK */
+ [19] = RCAR_GP_PIN(1, 19), /* SSI_WS */
+ [20] = RCAR_GP_PIN(1, 20), /* SSI_SD */
+ [21] = RCAR_GP_PIN(1, 21), /* AUDIO_CLKOUT */
+ [22] = RCAR_GP_PIN(1, 22), /* AUDIO_CLKIN */
+ [23] = RCAR_GP_PIN(1, 23), /* GP1_23 */
+ [24] = RCAR_GP_PIN(1, 24), /* HRX3 */
+ [25] = RCAR_GP_PIN(1, 25), /* HSCK3 */
+ [26] = RCAR_GP_PIN(1, 26), /* HRTS3_N */
+ [27] = RCAR_GP_PIN(1, 27), /* HCTS3_N */
+ [28] = RCAR_GP_PIN(1, 28), /* HTX3 */
+ [29] = RCAR_GP_PIN(1, 29), /* ERROROUTC_N */
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN2", 0xE60580C0, "PUD2", 0xE60580E0) {
+ [ 0] = RCAR_GP_PIN(2, 0), /* FXR_TXDA */
+ [ 1] = RCAR_GP_PIN(2, 1), /* FXR_TXENA_N */
+ [ 2] = RCAR_GP_PIN(2, 2), /* RXDA_EXTFXR */
+ [ 3] = RCAR_GP_PIN(2, 3), /* CLK_EXTFXR */
+ [ 4] = RCAR_GP_PIN(2, 4), /* RXDB_EXTFXR */
+ [ 5] = RCAR_GP_PIN(2, 5), /* FXR_TXENB_N */
+ [ 6] = RCAR_GP_PIN(2, 6), /* FXR_TXDB */
+ [ 7] = RCAR_GP_PIN(2, 7), /* TPU0TO1 */
+ [ 8] = RCAR_GP_PIN(2, 8), /* TPU0TO0 */
+ [ 9] = RCAR_GP_PIN(2, 9), /* CAN_CLK */
+ [10] = RCAR_GP_PIN(2, 10), /* CANFD0_TX */
+ [11] = RCAR_GP_PIN(2, 11), /* CANFD0_RX */
+ [12] = RCAR_GP_PIN(2, 12), /* CANFD2_TX */
+ [13] = RCAR_GP_PIN(2, 13), /* CANFD2_RX */
+ [14] = RCAR_GP_PIN(2, 14), /* CANFD3_TX */
+ [15] = RCAR_GP_PIN(2, 15), /* CANFD3_RX */
+ [16] = SH_PFC_PIN_NONE,
+ [17] = RCAR_GP_PIN(2, 17), /* CANFD1_TX */
+ [18] = SH_PFC_PIN_NONE,
+ [19] = RCAR_GP_PIN(2, 19), /* CANFD1_RX */
+ [20] = SH_PFC_PIN_NONE,
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN3", 0xE60588C0, "PUD3", 0xE60588E0) {
+ [ 0] = RCAR_GP_PIN(3, 0), /* MMC_SD_D1 */
+ [ 1] = RCAR_GP_PIN(3, 1), /* MMC_SD_D0 */
+ [ 2] = RCAR_GP_PIN(3, 2), /* MMC_SD_D2 */
+ [ 3] = RCAR_GP_PIN(3, 3), /* MMC_SD_CLK */
+ [ 4] = RCAR_GP_PIN(3, 4), /* MMC_DS */
+ [ 5] = RCAR_GP_PIN(3, 5), /* MMC_SD_D3 */
+ [ 6] = RCAR_GP_PIN(3, 6), /* MMC_D5 */
+ [ 7] = RCAR_GP_PIN(3, 7), /* MMC_D4 */
+ [ 8] = RCAR_GP_PIN(3, 8), /* MMC_D7 */
+ [ 9] = RCAR_GP_PIN(3, 9), /* MMC_D6 */
+ [10] = RCAR_GP_PIN(3, 10), /* MMC_SD_CMD */
+ [11] = RCAR_GP_PIN(3, 11), /* SD_CD */
+ [12] = RCAR_GP_PIN(3, 12), /* SD_WP */
+ [13] = RCAR_GP_PIN(3, 13), /* PWM1 */
+ [14] = RCAR_GP_PIN(3, 14), /* PWM2 */
+ [15] = RCAR_GP_PIN(3, 15), /* QSPI0_SSL */
+ [16] = RCAR_GP_PIN(3, 16), /* QSPI0_IO3 */
+ [17] = RCAR_GP_PIN(3, 17), /* QSPI0_IO2 */
+ [18] = RCAR_GP_PIN(3, 18), /* QSPI0_MISO_IO1 */
+ [19] = RCAR_GP_PIN(3, 19), /* QSPI0_MOSI_IO0 */
+ [20] = RCAR_GP_PIN(3, 20), /* QSPI0_SPCLK */
+ [21] = RCAR_GP_PIN(3, 21), /* QSPI1_MOSI_IO0 */
+ [22] = RCAR_GP_PIN(3, 22), /* QSPI1_SPCLK */
+ [23] = RCAR_GP_PIN(3, 23), /* QSPI1_MISO_IO1 */
+ [24] = RCAR_GP_PIN(3, 24), /* QSPI1_IO2 */
+ [25] = RCAR_GP_PIN(3, 25), /* QSPI1_SSL */
+ [26] = RCAR_GP_PIN(3, 26), /* QSPI1_IO3 */
+ [27] = RCAR_GP_PIN(3, 27), /* RPC_RESET_N */
+ [28] = RCAR_GP_PIN(3, 28), /* RPC_WP_N */
+ [29] = RCAR_GP_PIN(3, 29), /* RPC_INT_N */
+ [30] = RCAR_GP_PIN(3, 30), /* TCLK3 */
+ [31] = RCAR_GP_PIN(3, 31), /* TCLK4 */
+ } },
+ { PINMUX_BIAS_REG("PUEN4", 0xE60600C0, "PUD4", 0xE60600E0) {
+ [ 0] = RCAR_GP_PIN(4, 0), /* SCL0 */
+ [ 1] = RCAR_GP_PIN(4, 1), /* SDA0 */
+ [ 2] = RCAR_GP_PIN(4, 2), /* SCL1 */
+ [ 3] = RCAR_GP_PIN(4, 3), /* SDA1 */
+ [ 4] = RCAR_GP_PIN(4, 4), /* SCL2 */
+ [ 5] = RCAR_GP_PIN(4, 5), /* SDA2 */
+ [ 6] = RCAR_GP_PIN(4, 6), /* SCL3 */
+ [ 7] = RCAR_GP_PIN(4, 7), /* SDA3 */
+ [ 8] = RCAR_GP_PIN(4, 8), /* HRX2 */
+ [ 9] = RCAR_GP_PIN(4, 9), /* HTX2 */
+ [10] = RCAR_GP_PIN(4, 10), /* HRTS2_N */
+ [11] = RCAR_GP_PIN(4, 11), /* SCIF_CLK2 */
+ [12] = RCAR_GP_PIN(4, 12), /* HCTS2_N */
+ [13] = RCAR_GP_PIN(4, 13), /* HSCK2 */
+ [14] = RCAR_GP_PIN(4, 14), /* PWM3 */
+ [15] = RCAR_GP_PIN(4, 15), /* PWM4 */
+ [16] = SH_PFC_PIN_NONE,
+ [17] = SH_PFC_PIN_NONE,
+ [18] = SH_PFC_PIN_NONE,
+ [19] = SH_PFC_PIN_NONE,
+ [20] = SH_PFC_PIN_NONE,
+ [21] = RCAR_GP_PIN(4, 21), /* PCIE0_CLKREQ_N */
+ [22] = SH_PFC_PIN_NONE,
+ [23] = RCAR_GP_PIN(4, 23), /* AVS0 */
+ [24] = RCAR_GP_PIN(4, 24), /* AVS1 */
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN5", 0xE60608C0, "PUD5", 0xE60608E0) {
+ [ 0] = RCAR_GP_PIN(5, 0), /* AVB2_AVTP_PPS */
+ [ 1] = RCAR_GP_PIN(5, 1), /* AVB0_AVTP_CAPTURE */
+ [ 2] = RCAR_GP_PIN(5, 2), /* AVB2_AVTP_MATCH */
+ [ 3] = RCAR_GP_PIN(5, 3), /* AVB2_LINK */
+ [ 4] = RCAR_GP_PIN(5, 4), /* AVB2_PHY_INT */
+ [ 5] = RCAR_GP_PIN(5, 5), /* AVB2_MAGIC */
+ [ 6] = RCAR_GP_PIN(5, 6), /* AVB2_MDC */
+ [ 7] = RCAR_GP_PIN(5, 7), /* AVB2_TXCREFCLK */
+ [ 8] = RCAR_GP_PIN(5, 8), /* AVB2_TD3 */
+ [ 9] = RCAR_GP_PIN(5, 9), /* AVB2_RD3 */
+ [10] = RCAR_GP_PIN(5, 10), /* AVB2_MDIO */
+ [11] = RCAR_GP_PIN(5, 11), /* AVB2_TD2 */
+ [12] = RCAR_GP_PIN(5, 12), /* AVB2_TD1 */
+ [13] = RCAR_GP_PIN(5, 13), /* AVB2_RD2 */
+ [14] = RCAR_GP_PIN(5, 14), /* AVB2_RD1 */
+ [15] = RCAR_GP_PIN(5, 15), /* AVB2_TD0 */
+ [16] = RCAR_GP_PIN(5, 16), /* AVB2_TXC */
+ [17] = RCAR_GP_PIN(5, 17), /* AVB2_RD0 */
+ [18] = RCAR_GP_PIN(5, 18), /* AVB2_RXC */
+ [19] = RCAR_GP_PIN(5, 19), /* AVB2_TX_CTL */
+ [20] = RCAR_GP_PIN(5, 20), /* AVB2_RX_CTL */
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN6", 0xE60610C0, "PUD6", 0xE60610E0) {
+ [ 0] = RCAR_GP_PIN(6, 0), /* AVB1_MDIO */
+ [ 1] = RCAR_GP_PIN(6, 1), /* AVB1_MAGIC */
+ [ 2] = RCAR_GP_PIN(6, 2), /* AVB1_MDC */
+ [ 3] = RCAR_GP_PIN(6, 3), /* AVB1_PHY_INT */
+ [ 4] = RCAR_GP_PIN(6, 4), /* AVB1_LINK */
+ [ 5] = RCAR_GP_PIN(6, 5), /* AVB1_AVTP_MATCH */
+ [ 6] = RCAR_GP_PIN(6, 6), /* AVB1_TXC */
+ [ 7] = RCAR_GP_PIN(6, 7), /* AVB1_TX_CTL */
+ [ 8] = RCAR_GP_PIN(6, 8), /* AVB1_RXC */
+ [ 9] = RCAR_GP_PIN(6, 9), /* AVB1_RX_CTL */
+ [10] = RCAR_GP_PIN(6, 10), /* AVB1_AVTP_PPS */
+ [11] = RCAR_GP_PIN(6, 11), /* AVB1_AVTP_CAPTURE */
+ [12] = RCAR_GP_PIN(6, 12), /* AVB1_TD1 */
+ [13] = RCAR_GP_PIN(6, 13), /* AVB1_TD0 */
+ [14] = RCAR_GP_PIN(6, 14), /* AVB1_RD1*/
+ [15] = RCAR_GP_PIN(6, 15), /* AVB1_RD0 */
+ [16] = RCAR_GP_PIN(6, 16), /* AVB1_TD2 */
+ [17] = RCAR_GP_PIN(6, 17), /* AVB1_RD2 */
+ [18] = RCAR_GP_PIN(6, 18), /* AVB1_TD3 */
+ [19] = RCAR_GP_PIN(6, 19), /* AVB1_RD3 */
+ [20] = RCAR_GP_PIN(6, 20), /* AVB1_TXCREFCLK */
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN7", 0xE60618C0, "PUD7", 0xE60618E0) {
+ [ 0] = RCAR_GP_PIN(7, 0), /* AVB0_AVTP_PPS */
+ [ 1] = RCAR_GP_PIN(7, 1), /* AVB0_AVTP_CAPTURE */
+ [ 2] = RCAR_GP_PIN(7, 2), /* AVB0_AVTP_MATCH */
+ [ 3] = RCAR_GP_PIN(7, 3), /* AVB0_TD3 */
+ [ 4] = RCAR_GP_PIN(7, 4), /* AVB0_LINK */
+ [ 5] = RCAR_GP_PIN(7, 5), /* AVB0_PHY_INT */
+ [ 6] = RCAR_GP_PIN(7, 6), /* AVB0_TD2 */
+ [ 7] = RCAR_GP_PIN(7, 7), /* AVB0_TD1 */
+ [ 8] = RCAR_GP_PIN(7, 8), /* AVB0_RD3 */
+ [ 9] = RCAR_GP_PIN(7, 9), /* AVB0_TXCREFCLK */
+ [10] = RCAR_GP_PIN(7, 10), /* AVB0_MAGIC */
+ [11] = RCAR_GP_PIN(7, 11), /* AVB0_TD0 */
+ [12] = RCAR_GP_PIN(7, 12), /* AVB0_RD2 */
+ [13] = RCAR_GP_PIN(7, 13), /* AVB0_MDC */
+ [14] = RCAR_GP_PIN(7, 14), /* AVB0_MDIO */
+ [15] = RCAR_GP_PIN(7, 15), /* AVB0_TXC */
+ [16] = RCAR_GP_PIN(7, 16), /* AVB0_TX_CTL */
+ [17] = RCAR_GP_PIN(7, 17), /* AVB0_RD1 */
+ [18] = RCAR_GP_PIN(7, 18), /* AVB0_RD0 */
+ [19] = RCAR_GP_PIN(7, 19), /* AVB0_RXC */
+ [20] = RCAR_GP_PIN(7, 20), /* AVB0_RX_CTL */
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { /* sentinel */ },
+};
+
+static const struct sh_pfc_soc_operations r8a779h0_pin_ops = {
+ .pin_to_pocctrl = r8a779h0_pin_to_pocctrl,
+ .get_bias = rcar_pinmux_get_bias,
+ .set_bias = rcar_pinmux_set_bias,
+};
+
+const struct sh_pfc_soc_info r8a779h0_pinmux_info = {
+ .name = "r8a779h0_pfc",
+ .ops = &r8a779h0_pin_ops,
+ .unlock_reg = 0x1ff, /* PMMRn mask */
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups,
+ .nr_groups = ARRAY_SIZE(pinmux_groups),
+ .functions = pinmux_functions,
+ .nr_functions = ARRAY_SIZE(pinmux_functions),
+
+ .cfg_regs = pinmux_config_regs,
+ .drive_regs = pinmux_drive_regs,
+ .bias_regs = pinmux_bias_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
+
+ .pinmux_data = pinmux_data,
+ .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index 80fb5011c7bbc..eb5a8c6542606 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -5,6 +5,7 @@
* Copyright (C) 2021 Renesas Electronics Corporation.
*/
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/gpio/driver.h>
@@ -38,8 +39,6 @@
*/
#define MUX_PIN_ID_MASK GENMASK(15, 0)
#define MUX_FUNC_MASK GENMASK(31, 16)
-#define MUX_FUNC_OFFS 16
-#define MUX_FUNC(pinconf) (((pinconf) & MUX_FUNC_MASK) >> MUX_FUNC_OFFS)
/* PIN capabilities */
#define PIN_CFG_IOLH_A BIT(0)
@@ -58,6 +57,8 @@
#define PIN_CFG_IOLH_C BIT(13)
#define PIN_CFG_SOFT_PS BIT(14)
#define PIN_CFG_OEN BIT(15)
+#define PIN_CFG_VARIABLE BIT(16)
+#define PIN_CFG_NOGPIO_INT BIT(17)
#define RZG2L_MPXED_COMMON_PIN_FUNCS(group) \
(PIN_CFG_IOLH_##group | \
@@ -77,27 +78,41 @@
PIN_CFG_FILNUM | \
PIN_CFG_FILCLKSEL)
+#define PIN_CFG_PIN_MAP_MASK GENMASK_ULL(35, 28)
+#define PIN_CFG_PIN_REG_MASK GENMASK(27, 20)
+#define PIN_CFG_MASK GENMASK(19, 0)
+
+/*
+ * m indicates the bitmap of supported pins, a is the register index
+ * and f is pin configuration capabilities supported.
+ */
+#define RZG2L_GPIO_PORT_SPARSE_PACK(m, a, f) (FIELD_PREP_CONST(PIN_CFG_PIN_MAP_MASK, (m)) | \
+ FIELD_PREP_CONST(PIN_CFG_PIN_REG_MASK, (a)) | \
+ FIELD_PREP_CONST(PIN_CFG_MASK, (f)))
+
/*
* n indicates number of pins in the port, a is the register index
* and f is pin configuration capabilities supported.
*/
-#define RZG2L_GPIO_PORT_PACK(n, a, f) (((n) << 28) | ((a) << 20) | (f))
-#define RZG2L_GPIO_PORT_GET_PINCNT(x) (((x) & GENMASK(30, 28)) >> 28)
+#define RZG2L_GPIO_PORT_PACK(n, a, f) RZG2L_GPIO_PORT_SPARSE_PACK((1ULL << (n)) - 1, (a), (f))
/*
- * BIT(31) indicates dedicated pin, p is the register index while
+ * BIT(63) indicates dedicated pin, p is the register index while
* referencing to SR/IEN/IOLH/FILxx registers, b is the register bits
* (b * 8) and f is the pin configuration capabilities supported.
*/
-#define RZG2L_SINGLE_PIN BIT(31)
+#define RZG2L_SINGLE_PIN BIT_ULL(63)
+#define RZG2L_SINGLE_PIN_INDEX_MASK GENMASK(30, 24)
+#define RZG2L_SINGLE_PIN_BITS_MASK GENMASK(22, 20)
+
#define RZG2L_SINGLE_PIN_PACK(p, b, f) (RZG2L_SINGLE_PIN | \
- ((p) << 24) | ((b) << 20) | (f))
-#define RZG2L_SINGLE_PIN_GET_BIT(x) (((x) & GENMASK(22, 20)) >> 20)
+ FIELD_PREP_CONST(RZG2L_SINGLE_PIN_INDEX_MASK, (p)) | \
+ FIELD_PREP_CONST(RZG2L_SINGLE_PIN_BITS_MASK, (b)) | \
+ FIELD_PREP_CONST(PIN_CFG_MASK, (f)))
-#define RZG2L_PIN_CFG_TO_CAPS(cfg) ((cfg) & GENMASK(19, 0))
#define RZG2L_PIN_CFG_TO_PORT_OFFSET(cfg) ((cfg) & RZG2L_SINGLE_PIN ? \
- (((cfg) & GENMASK(30, 24)) >> 24) : \
- (((cfg) & GENMASK(26, 20)) >> 20))
+ FIELD_GET(RZG2L_SINGLE_PIN_INDEX_MASK, (cfg)) : \
+ FIELD_GET(PIN_CFG_PIN_REG_MASK, (cfg)))
#define P(off) (0x0000 + (off))
#define PM(off) (0x0100 + (off) * 2)
@@ -134,6 +149,33 @@
#define RZG2L_TINT_IRQ_START_INDEX 9
#define RZG2L_PACK_HWIRQ(t, i) (((t) << 16) | (i))
+/* Read/write 8 bits register */
+#define RZG2L_PCTRL_REG_ACCESS8(_read, _addr, _val) \
+ do { \
+ if (_read) \
+ _val = readb(_addr); \
+ else \
+ writeb(_val, _addr); \
+ } while (0)
+
+/* Read/write 16 bits register */
+#define RZG2L_PCTRL_REG_ACCESS16(_read, _addr, _val) \
+ do { \
+ if (_read) \
+ _val = readw(_addr); \
+ else \
+ writew(_val, _addr); \
+ } while (0)
+
+/* Read/write 32 bits register */
+#define RZG2L_PCTRL_REG_ACCESS32(_read, _addr, _val) \
+ do { \
+ if (_read) \
+ _val = readl(_addr); \
+ else \
+ writel(_val, _addr); \
+ } while (0)
+
/**
* struct rzg2l_register_offsets - specific register offsets
* @pwpr: PWPR register offset
@@ -189,17 +231,31 @@ struct rzg2l_hwcfg {
struct rzg2l_dedicated_configs {
const char *name;
- u32 config;
+ u64 config;
+};
+
+/**
+ * struct rzg2l_variable_pin_cfg - pin data cfg
+ * @cfg: port pin configuration
+ * @port: port number
+ * @pin: port pin
+ */
+struct rzg2l_variable_pin_cfg {
+ u32 cfg:20;
+ u32 port:5;
+ u32 pin:3;
};
struct rzg2l_pinctrl_data {
const char * const *port_pins;
- const u32 *port_pin_configs;
+ const u64 *port_pin_configs;
unsigned int n_ports;
const struct rzg2l_dedicated_configs *dedicated_pins;
unsigned int n_port_pins;
unsigned int n_dedicated_pins;
const struct rzg2l_hwcfg *hwcfg;
+ const struct rzg2l_variable_pin_cfg *variable_pin_cfg;
+ unsigned int n_variable_pin_cfg;
};
/**
@@ -212,6 +268,32 @@ struct rzg2l_pinctrl_pin_settings {
u16 drive_strength_ua;
};
+/**
+ * struct rzg2l_pinctrl_reg_cache - register cache structure (to be used in suspend/resume)
+ * @p: P registers cache
+ * @pm: PM registers cache
+ * @pmc: PMC registers cache
+ * @pfc: PFC registers cache
+ * @iolh: IOLH registers cache
+ * @ien: IEN registers cache
+ * @sd_ch: SD_CH registers cache
+ * @eth_poc: ET_POC registers cache
+ * @eth_mode: ETH_MODE register cache
+ * @qspi: QSPI registers cache
+ */
+struct rzg2l_pinctrl_reg_cache {
+ u8 *p;
+ u16 *pm;
+ u8 *pmc;
+ u32 *pfc;
+ u32 *iolh[2];
+ u32 *ien[2];
+ u8 sd_ch[2];
+ u8 eth_poc[2];
+ u8 eth_mode;
+ u8 qspi;
+};
+
struct rzg2l_pinctrl {
struct pinctrl_dev *pctl;
struct pinctrl_desc desc;
@@ -221,6 +303,8 @@ struct rzg2l_pinctrl {
void __iomem *base;
struct device *dev;
+ struct clk *clk;
+
struct gpio_chip gpio_chip;
struct pinctrl_gpio_range gpio_range;
DECLARE_BITMAP(tint_slot, RZG2L_TINT_MAX_INTERRUPT);
@@ -231,10 +315,150 @@ struct rzg2l_pinctrl {
struct mutex mutex; /* serialize adding groups and functions */
struct rzg2l_pinctrl_pin_settings *settings;
+ struct rzg2l_pinctrl_reg_cache *cache;
+ struct rzg2l_pinctrl_reg_cache *dedicated_cache;
+ atomic_t wakeup_path;
};
static const u16 available_ps[] = { 1800, 2500, 3300 };
+#ifdef CONFIG_RISCV
+static u64 rzg2l_pinctrl_get_variable_pin_cfg(struct rzg2l_pinctrl *pctrl,
+ u64 pincfg,
+ unsigned int port,
+ u8 pin)
+{
+ unsigned int i;
+
+ for (i = 0; i < pctrl->data->n_variable_pin_cfg; i++) {
+ if (pctrl->data->variable_pin_cfg[i].port == port &&
+ pctrl->data->variable_pin_cfg[i].pin == pin)
+ return (pincfg & ~PIN_CFG_VARIABLE) | pctrl->data->variable_pin_cfg[i].cfg;
+ }
+
+ return 0;
+}
+
+static const struct rzg2l_variable_pin_cfg r9a07g043f_variable_pin_cfg[] = {
+ {
+ .port = 20,
+ .pin = 0,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 20,
+ .pin = 1,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 20,
+ .pin = 2,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 20,
+ .pin = 3,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 20,
+ .pin = 4,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 20,
+ .pin = 5,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 20,
+ .pin = 6,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 20,
+ .pin = 7,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 23,
+ .pin = 1,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_NOGPIO_INT
+ },
+ {
+ .port = 23,
+ .pin = 2,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 23,
+ .pin = 3,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 23,
+ .pin = 4,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 23,
+ .pin = 5,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 24,
+ .pin = 0,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 24,
+ .pin = 1,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 24,
+ .pin = 2,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 24,
+ .pin = 3,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 24,
+ .pin = 4,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 24,
+ .pin = 5,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
+ PIN_CFG_NOGPIO_INT,
+ },
+};
+#endif
+
static void rzg2l_pinctrl_set_pfc_mode(struct rzg2l_pinctrl *pctrl,
u8 pin, u8 off, u8 func)
{
@@ -295,7 +519,7 @@ static int rzg2l_pinctrl_set_mux(struct pinctrl_dev *pctldev,
pins = group->grp.pins;
for (i = 0; i < group->grp.npins; i++) {
- unsigned int *pin_data = pctrl->desc.pins[pins[i]].drv_data;
+ u64 *pin_data = pctrl->desc.pins[pins[i]].drv_data;
u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
u32 pin = RZG2L_PIN_ID_TO_PIN(pins[i]);
@@ -432,8 +656,8 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
ret = of_property_read_u32_index(np, "pinmux", i, &value);
if (ret)
goto done;
- pins[i] = value & MUX_PIN_ID_MASK;
- psel_val[i] = MUX_FUNC(value);
+ pins[i] = FIELD_GET(MUX_PIN_ID_MASK, value);
+ psel_val[i] = FIELD_GET(MUX_FUNC_MASK, value);
}
if (parent) {
@@ -447,6 +671,16 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
name = np->name;
}
+ if (num_configs) {
+ ret = rzg2l_map_add_config(&maps[idx], name,
+ PIN_MAP_TYPE_CONFIGS_GROUP,
+ configs, num_configs);
+ if (ret < 0)
+ goto done;
+
+ idx++;
+ }
+
mutex_lock(&pctrl->mutex);
/* Register a single pin group listing all the pins we read from DT */
@@ -474,16 +708,6 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
maps[idx].data.mux.function = name;
idx++;
- if (num_configs) {
- ret = rzg2l_map_add_config(&maps[idx], name,
- PIN_MAP_TYPE_CONFIGS_GROUP,
- configs, num_configs);
- if (ret < 0)
- goto remove_group;
-
- idx++;
- }
-
dev_dbg(pctrl->dev, "Parsed %pOF with %d pins\n", np, num_pinmux);
ret = 0;
goto done;
@@ -558,13 +782,13 @@ done:
}
static int rzg2l_validate_gpio_pin(struct rzg2l_pinctrl *pctrl,
- u32 cfg, u32 port, u8 bit)
+ u64 cfg, u32 port, u8 bit)
{
- u8 pincount = RZG2L_GPIO_PORT_GET_PINCNT(cfg);
+ u8 pinmap = FIELD_GET(PIN_CFG_PIN_MAP_MASK, cfg);
u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(cfg);
- u32 data;
+ u64 data;
- if (bit >= pincount || port >= pctrl->data->n_port_pins)
+ if (!(pinmap & BIT(bit)) || port >= pctrl->data->n_port_pins)
return -EINVAL;
data = pctrl->data->port_pin_configs[port];
@@ -856,7 +1080,7 @@ static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
enum pin_config_param param = pinconf_to_config_param(*config);
const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
const struct pinctrl_pin_desc *pin = &pctrl->desc.pins[_pin];
- unsigned int *pin_data = pin->drv_data;
+ u64 *pin_data = pin->drv_data;
unsigned int arg = 0;
u32 off, cfg;
int ret;
@@ -866,9 +1090,9 @@ static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
return -EINVAL;
off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
- cfg = RZG2L_PIN_CFG_TO_CAPS(*pin_data);
+ cfg = FIELD_GET(PIN_CFG_MASK, *pin_data);
if (*pin_data & RZG2L_SINGLE_PIN) {
- bit = RZG2L_SINGLE_PIN_GET_BIT(*pin_data);
+ bit = FIELD_GET(RZG2L_SINGLE_PIN_BITS_MASK, *pin_data);
} else {
bit = RZG2L_PIN_ID_TO_PIN(_pin);
@@ -959,7 +1183,7 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
const struct pinctrl_pin_desc *pin = &pctrl->desc.pins[_pin];
const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
struct rzg2l_pinctrl_pin_settings settings = pctrl->settings[_pin];
- unsigned int *pin_data = pin->drv_data;
+ u64 *pin_data = pin->drv_data;
enum pin_config_param param;
unsigned int i, arg, index;
u32 cfg, off;
@@ -970,9 +1194,9 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
return -EINVAL;
off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
- cfg = RZG2L_PIN_CFG_TO_CAPS(*pin_data);
+ cfg = FIELD_GET(PIN_CFG_MASK, *pin_data);
if (*pin_data & RZG2L_SINGLE_PIN) {
- bit = RZG2L_SINGLE_PIN_GET_BIT(*pin_data);
+ bit = FIELD_GET(RZG2L_SINGLE_PIN_BITS_MASK, *pin_data);
} else {
bit = RZG2L_PIN_ID_TO_PIN(_pin);
@@ -1164,7 +1388,7 @@ static int rzg2l_gpio_request(struct gpio_chip *chip, unsigned int offset)
{
struct rzg2l_pinctrl *pctrl = gpiochip_get_data(chip);
const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[offset];
- u32 *pin_data = pin_desc->drv_data;
+ u64 *pin_data = pin_desc->drv_data;
u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
u32 port = RZG2L_PIN_ID_TO_PORT(offset);
u8 bit = RZG2L_PIN_ID_TO_PIN(offset);
@@ -1196,7 +1420,7 @@ static void rzg2l_gpio_set_direction(struct rzg2l_pinctrl *pctrl, u32 offset,
bool output)
{
const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[offset];
- unsigned int *pin_data = pin_desc->drv_data;
+ u64 *pin_data = pin_desc->drv_data;
u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
u8 bit = RZG2L_PIN_ID_TO_PIN(offset);
unsigned long flags;
@@ -1217,7 +1441,7 @@ static int rzg2l_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
struct rzg2l_pinctrl *pctrl = gpiochip_get_data(chip);
const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[offset];
- unsigned int *pin_data = pin_desc->drv_data;
+ u64 *pin_data = pin_desc->drv_data;
u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
u8 bit = RZG2L_PIN_ID_TO_PIN(offset);
@@ -1248,7 +1472,7 @@ static void rzg2l_gpio_set(struct gpio_chip *chip, unsigned int offset,
{
struct rzg2l_pinctrl *pctrl = gpiochip_get_data(chip);
const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[offset];
- unsigned int *pin_data = pin_desc->drv_data;
+ u64 *pin_data = pin_desc->drv_data;
u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
u8 bit = RZG2L_PIN_ID_TO_PIN(offset);
unsigned long flags;
@@ -1281,7 +1505,7 @@ static int rzg2l_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct rzg2l_pinctrl *pctrl = gpiochip_get_data(chip);
const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[offset];
- unsigned int *pin_data = pin_desc->drv_data;
+ u64 *pin_data = pin_desc->drv_data;
u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
u8 bit = RZG2L_PIN_ID_TO_PIN(offset);
u16 reg16;
@@ -1366,7 +1590,7 @@ static const char * const rzg2l_gpio_names[] = {
"P48_0", "P48_1", "P48_2", "P48_3", "P48_4", "P48_5", "P48_6", "P48_7",
};
-static const u32 r9a07g044_gpio_configs[] = {
+static const u64 r9a07g044_gpio_configs[] = {
RZG2L_GPIO_PORT_PACK(2, 0x10, RZG2L_MPXED_PIN_FUNCS),
RZG2L_GPIO_PORT_PACK(2, 0x11, RZG2L_MPXED_PIN_FUNCS),
RZG2L_GPIO_PORT_PACK(2, 0x12, RZG2L_MPXED_PIN_FUNCS),
@@ -1418,7 +1642,7 @@ static const u32 r9a07g044_gpio_configs[] = {
RZG2L_GPIO_PORT_PACK(5, 0x40, RZG2L_MPXED_PIN_FUNCS),
};
-static const u32 r9a07g043_gpio_configs[] = {
+static const u64 r9a07g043_gpio_configs[] = {
RZG2L_GPIO_PORT_PACK(4, 0x10, RZG2L_MPXED_PIN_FUNCS),
RZG2L_GPIO_PORT_PACK(5, 0x11, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
RZG2L_GPIO_PORT_PACK(4, 0x12, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
@@ -1438,9 +1662,28 @@ static const u32 r9a07g043_gpio_configs[] = {
RZG2L_GPIO_PORT_PACK(2, 0x20, RZG2L_MPXED_PIN_FUNCS),
RZG2L_GPIO_PORT_PACK(4, 0x21, RZG2L_MPXED_PIN_FUNCS),
RZG2L_GPIO_PORT_PACK(6, 0x22, RZG2L_MPXED_PIN_FUNCS),
+#ifdef CONFIG_RISCV
+ /* Below additional port pins (P19 - P28) are exclusively available on RZ/Five SoC only */
+ RZG2L_GPIO_PORT_SPARSE_PACK(0x2, 0x06, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT), /* P19 */
+ RZG2L_GPIO_PORT_PACK(8, 0x07, PIN_CFG_VARIABLE), /* P20 */
+ RZG2L_GPIO_PORT_SPARSE_PACK(0x2, 0x08, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT), /* P21 */
+ RZG2L_GPIO_PORT_PACK(4, 0x09, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT), /* P22 */
+ RZG2L_GPIO_PORT_SPARSE_PACK(0x3e, 0x0a, PIN_CFG_VARIABLE), /* P23 */
+ RZG2L_GPIO_PORT_PACK(6, 0x0b, PIN_CFG_VARIABLE), /* P24 */
+ RZG2L_GPIO_PORT_SPARSE_PACK(0x2, 0x0c, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_FILONOFF |
+ PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
+ PIN_CFG_NOGPIO_INT), /* P25 */
+ 0x0, /* P26 */
+ 0x0, /* P27 */
+ RZG2L_GPIO_PORT_PACK(6, 0x0f, RZG2L_MPXED_PIN_FUNCS | PIN_CFG_NOGPIO_INT), /* P28 */
+#endif
};
-static const u32 r9a08g045_gpio_configs[] = {
+static const u64 r9a08g045_gpio_configs[] = {
RZG2L_GPIO_PORT_PACK(4, 0x20, RZG3S_MPXED_PIN_FUNCS(A)), /* P0 */
RZG2L_GPIO_PORT_PACK(5, 0x30, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_C |
PIN_CFG_IO_VMC_ETH0)) |
@@ -1598,40 +1841,42 @@ static const struct rzg2l_dedicated_configs rzg3s_dedicated_pins[] = {
PIN_CFG_IO_VMC_SD1)) },
};
-static int rzg2l_gpio_get_gpioint(unsigned int virq, const struct rzg2l_pinctrl_data *data)
+static int rzg2l_gpio_get_gpioint(unsigned int virq, struct rzg2l_pinctrl *pctrl)
{
+ const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[virq];
+ const struct rzg2l_pinctrl_data *data = pctrl->data;
+ u64 *pin_data = pin_desc->drv_data;
unsigned int gpioint;
unsigned int i;
u32 port, bit;
+ if (*pin_data & PIN_CFG_NOGPIO_INT)
+ return -EINVAL;
+
port = virq / 8;
bit = virq % 8;
if (port >= data->n_ports ||
- bit >= RZG2L_GPIO_PORT_GET_PINCNT(data->port_pin_configs[port]))
+ bit >= hweight8(FIELD_GET(PIN_CFG_PIN_MAP_MASK, data->port_pin_configs[port])))
return -EINVAL;
gpioint = bit;
for (i = 0; i < port; i++)
- gpioint += RZG2L_GPIO_PORT_GET_PINCNT(data->port_pin_configs[i]);
+ gpioint += hweight8(FIELD_GET(PIN_CFG_PIN_MAP_MASK, data->port_pin_configs[i]));
return gpioint;
}
-static void rzg2l_gpio_irq_disable(struct irq_data *d)
+static void rzg2l_gpio_irq_endisable(struct rzg2l_pinctrl *pctrl,
+ unsigned int hwirq, bool enable)
{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct rzg2l_pinctrl *pctrl = container_of(gc, struct rzg2l_pinctrl, gpio_chip);
- unsigned int hwirq = irqd_to_hwirq(d);
const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[hwirq];
- unsigned int *pin_data = pin_desc->drv_data;
+ u64 *pin_data = pin_desc->drv_data;
u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
u8 bit = RZG2L_PIN_ID_TO_PIN(hwirq);
unsigned long flags;
void __iomem *addr;
- irq_chip_disable_parent(d);
-
addr = pctrl->base + ISEL(off);
if (bit >= 4) {
bit -= 4;
@@ -1639,36 +1884,28 @@ static void rzg2l_gpio_irq_disable(struct irq_data *d)
}
spin_lock_irqsave(&pctrl->lock, flags);
- writel(readl(addr) & ~BIT(bit * 8), addr);
+ if (enable)
+ writel(readl(addr) | BIT(bit * 8), addr);
+ else
+ writel(readl(addr) & ~BIT(bit * 8), addr);
spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+static void rzg2l_gpio_irq_disable(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
+
+ irq_chip_disable_parent(d);
gpiochip_disable_irq(gc, hwirq);
}
static void rzg2l_gpio_irq_enable(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct rzg2l_pinctrl *pctrl = container_of(gc, struct rzg2l_pinctrl, gpio_chip);
unsigned int hwirq = irqd_to_hwirq(d);
- const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[hwirq];
- unsigned int *pin_data = pin_desc->drv_data;
- u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
- u8 bit = RZG2L_PIN_ID_TO_PIN(hwirq);
- unsigned long flags;
- void __iomem *addr;
gpiochip_enable_irq(gc, hwirq);
-
- addr = pctrl->base + ISEL(off);
- if (bit >= 4) {
- bit -= 4;
- addr += 4;
- }
-
- spin_lock_irqsave(&pctrl->lock, flags);
- writel(readl(addr) | BIT(bit * 8), addr);
- spin_unlock_irqrestore(&pctrl->lock, flags);
-
irq_chip_enable_parent(d);
}
@@ -1689,6 +1926,28 @@ static void rzg2l_gpio_irq_print_chip(struct irq_data *data, struct seq_file *p)
seq_printf(p, dev_name(gc->parent));
}
+static int rzg2l_gpio_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct rzg2l_pinctrl *pctrl = container_of(gc, struct rzg2l_pinctrl, gpio_chip);
+ int ret;
+
+ /* It should not happen. */
+ if (!data->parent_data)
+ return -EOPNOTSUPP;
+
+ ret = irq_chip_set_wake_parent(data, on);
+ if (ret)
+ return ret;
+
+ if (on)
+ atomic_inc(&pctrl->wakeup_path);
+ else
+ atomic_dec(&pctrl->wakeup_path);
+
+ return 0;
+}
+
static const struct irq_chip rzg2l_gpio_irqchip = {
.name = "rzg2l-gpio",
.irq_disable = rzg2l_gpio_irq_disable,
@@ -1699,10 +1958,31 @@ static const struct irq_chip rzg2l_gpio_irqchip = {
.irq_eoi = rzg2l_gpio_irqc_eoi,
.irq_print_chip = rzg2l_gpio_irq_print_chip,
.irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_set_wake = rzg2l_gpio_irq_set_wake,
.flags = IRQCHIP_IMMUTABLE,
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
+static int rzg2l_gpio_interrupt_input_mode(struct gpio_chip *chip, unsigned int offset)
+{
+ struct rzg2l_pinctrl *pctrl = gpiochip_get_data(chip);
+ const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[offset];
+ u64 *pin_data = pin_desc->drv_data;
+ u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
+ u8 bit = RZG2L_PIN_ID_TO_PIN(offset);
+ u8 reg8;
+ int ret;
+
+ reg8 = readb(pctrl->base + PMC(off));
+ if (reg8 & BIT(bit)) {
+ ret = rzg2l_gpio_request(chip, offset);
+ if (ret)
+ return ret;
+ }
+
+ return rzg2l_gpio_direction_input(chip, offset);
+}
+
static int rzg2l_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
unsigned int child,
unsigned int child_type,
@@ -1712,16 +1992,25 @@ static int rzg2l_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
struct rzg2l_pinctrl *pctrl = gpiochip_get_data(gc);
unsigned long flags;
int gpioint, irq;
+ int ret;
- gpioint = rzg2l_gpio_get_gpioint(child, pctrl->data);
+ gpioint = rzg2l_gpio_get_gpioint(child, pctrl);
if (gpioint < 0)
return gpioint;
+ ret = rzg2l_gpio_interrupt_input_mode(gc, child);
+ if (ret)
+ return ret;
+
spin_lock_irqsave(&pctrl->bitmap_lock, flags);
irq = bitmap_find_free_region(pctrl->tint_slot, RZG2L_TINT_MAX_INTERRUPT, get_order(1));
spin_unlock_irqrestore(&pctrl->bitmap_lock, flags);
- if (irq < 0)
- return -ENOSPC;
+ if (irq < 0) {
+ ret = -ENOSPC;
+ goto err;
+ }
+
+ rzg2l_gpio_irq_endisable(pctrl, child, true);
pctrl->hwirq[irq] = child;
irq += RZG2L_TINT_IRQ_START_INDEX;
@@ -1729,6 +2018,10 @@ static int rzg2l_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
*parent_type = IRQ_TYPE_LEVEL_HIGH;
*parent = RZG2L_PACK_HWIRQ(gpioint, irq);
return 0;
+
+err:
+ rzg2l_gpio_free(gc, child);
+ return ret;
}
static int rzg2l_gpio_populate_parent_fwspec(struct gpio_chip *chip,
@@ -1746,6 +2039,35 @@ static int rzg2l_gpio_populate_parent_fwspec(struct gpio_chip *chip,
return 0;
}
+static void rzg2l_gpio_irq_restore(struct rzg2l_pinctrl *pctrl)
+{
+ struct irq_domain *domain = pctrl->gpio_chip.irq.domain;
+
+ for (unsigned int i = 0; i < RZG2L_TINT_MAX_INTERRUPT; i++) {
+ struct irq_data *data;
+ unsigned int virq;
+
+ if (!pctrl->hwirq[i])
+ continue;
+
+ virq = irq_find_mapping(domain, pctrl->hwirq[i]);
+ if (!virq) {
+ dev_crit(pctrl->dev, "Failed to find IRQ mapping for hwirq %u\n",
+ pctrl->hwirq[i]);
+ continue;
+ }
+
+ data = irq_domain_get_irq_data(domain, virq);
+ if (!data) {
+ dev_crit(pctrl->dev, "Failed to get IRQ data for virq=%u\n", virq);
+ continue;
+ }
+
+ if (!irqd_irq_disabled(data))
+ rzg2l_gpio_irq_enable(data);
+ }
+}
+
static void rzg2l_gpio_irq_domain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs)
{
@@ -1761,6 +2083,8 @@ static void rzg2l_gpio_irq_domain_free(struct irq_domain *domain, unsigned int v
for (i = 0; i < RZG2L_TINT_MAX_INTERRUPT; i++) {
if (pctrl->hwirq[i] == hwirq) {
+ rzg2l_gpio_irq_endisable(pctrl, hwirq, false);
+ rzg2l_gpio_free(gc, hwirq);
spin_lock_irqsave(&pctrl->bitmap_lock, flags);
bitmap_release_region(pctrl->tint_slot, i, get_order(1));
spin_unlock_irqrestore(&pctrl->bitmap_lock, flags);
@@ -1788,11 +2112,74 @@ static void rzg2l_init_irq_valid_mask(struct gpio_chip *gc,
bit = offset % 8;
if (port >= pctrl->data->n_ports ||
- bit >= RZG2L_GPIO_PORT_GET_PINCNT(pctrl->data->port_pin_configs[port]))
+ bit >= hweight8(FIELD_GET(PIN_CFG_PIN_MAP_MASK,
+ pctrl->data->port_pin_configs[port])))
clear_bit(offset, valid_mask);
}
}
+static int rzg2l_pinctrl_reg_cache_alloc(struct rzg2l_pinctrl *pctrl)
+{
+ u32 nports = pctrl->data->n_port_pins / RZG2L_PINS_PER_PORT;
+ struct rzg2l_pinctrl_reg_cache *cache, *dedicated_cache;
+
+ cache = devm_kzalloc(pctrl->dev, sizeof(*cache), GFP_KERNEL);
+ if (!cache)
+ return -ENOMEM;
+
+ dedicated_cache = devm_kzalloc(pctrl->dev, sizeof(*dedicated_cache), GFP_KERNEL);
+ if (!dedicated_cache)
+ return -ENOMEM;
+
+ cache->p = devm_kcalloc(pctrl->dev, nports, sizeof(*cache->p), GFP_KERNEL);
+ if (!cache->p)
+ return -ENOMEM;
+
+ cache->pm = devm_kcalloc(pctrl->dev, nports, sizeof(*cache->pm), GFP_KERNEL);
+ if (!cache->pm)
+ return -ENOMEM;
+
+ cache->pmc = devm_kcalloc(pctrl->dev, nports, sizeof(*cache->pmc), GFP_KERNEL);
+ if (!cache->pmc)
+ return -ENOMEM;
+
+ cache->pfc = devm_kcalloc(pctrl->dev, nports, sizeof(*cache->pfc), GFP_KERNEL);
+ if (!cache->pfc)
+ return -ENOMEM;
+
+ for (u8 i = 0; i < 2; i++) {
+ u32 n_dedicated_pins = pctrl->data->n_dedicated_pins;
+
+ cache->iolh[i] = devm_kcalloc(pctrl->dev, nports, sizeof(*cache->iolh[i]),
+ GFP_KERNEL);
+ if (!cache->iolh[i])
+ return -ENOMEM;
+
+ cache->ien[i] = devm_kcalloc(pctrl->dev, nports, sizeof(*cache->ien[i]),
+ GFP_KERNEL);
+ if (!cache->ien[i])
+ return -ENOMEM;
+
+ /* Allocate dedicated cache. */
+ dedicated_cache->iolh[i] = devm_kcalloc(pctrl->dev, n_dedicated_pins,
+ sizeof(*dedicated_cache->iolh[i]),
+ GFP_KERNEL);
+ if (!dedicated_cache->iolh[i])
+ return -ENOMEM;
+
+ dedicated_cache->ien[i] = devm_kcalloc(pctrl->dev, n_dedicated_pins,
+ sizeof(*dedicated_cache->ien[i]),
+ GFP_KERNEL);
+ if (!dedicated_cache->ien[i])
+ return -ENOMEM;
+ }
+
+ pctrl->cache = cache;
+ pctrl->dedicated_cache = dedicated_cache;
+
+ return 0;
+}
+
static int rzg2l_gpio_register(struct rzg2l_pinctrl *pctrl)
{
struct device_node *np = pctrl->dev->of_node;
@@ -1870,7 +2257,7 @@ static int rzg2l_pinctrl_register(struct rzg2l_pinctrl *pctrl)
const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
struct pinctrl_pin_desc *pins;
unsigned int i, j;
- u32 *pin_data;
+ u64 *pin_data;
int ret;
pctrl->desc.name = DRV_NAME;
@@ -1898,6 +2285,13 @@ static int rzg2l_pinctrl_register(struct rzg2l_pinctrl *pctrl)
if (i && !(i % RZG2L_PINS_PER_PORT))
j++;
pin_data[i] = pctrl->data->port_pin_configs[j];
+#ifdef CONFIG_RISCV
+ if (pin_data[i] & PIN_CFG_VARIABLE)
+ pin_data[i] = rzg2l_pinctrl_get_variable_pin_cfg(pctrl,
+ pin_data[i],
+ j,
+ i % RZG2L_PINS_PER_PORT);
+#endif
pins[i].drv_data = &pin_data[i];
}
@@ -1926,6 +2320,10 @@ static int rzg2l_pinctrl_register(struct rzg2l_pinctrl *pctrl)
}
}
+ ret = rzg2l_pinctrl_reg_cache_alloc(pctrl);
+ if (ret)
+ return ret;
+
ret = devm_pinctrl_register_and_init(pctrl->dev, &pctrl->desc, pctrl,
&pctrl->pctl);
if (ret) {
@@ -1951,7 +2349,6 @@ static int rzg2l_pinctrl_register(struct rzg2l_pinctrl *pctrl)
static int rzg2l_pinctrl_probe(struct platform_device *pdev)
{
struct rzg2l_pinctrl *pctrl;
- struct clk *clk;
int ret;
BUILD_BUG_ON(ARRAY_SIZE(r9a07g044_gpio_configs) * RZG2L_PINS_PER_PORT >
@@ -1977,14 +2374,16 @@ static int rzg2l_pinctrl_probe(struct platform_device *pdev)
if (IS_ERR(pctrl->base))
return PTR_ERR(pctrl->base);
- clk = devm_clk_get_enabled(pctrl->dev, NULL);
- if (IS_ERR(clk))
- return dev_err_probe(pctrl->dev, PTR_ERR(clk),
+ pctrl->clk = devm_clk_get_enabled(pctrl->dev, NULL);
+ if (IS_ERR(pctrl->clk)) {
+ return dev_err_probe(pctrl->dev, PTR_ERR(pctrl->clk),
"failed to enable GPIO clk\n");
+ }
spin_lock_init(&pctrl->lock);
spin_lock_init(&pctrl->bitmap_lock);
mutex_init(&pctrl->mutex);
+ atomic_set(&pctrl->wakeup_path, 0);
platform_set_drvdata(pdev, pctrl);
@@ -1996,6 +2395,224 @@ static int rzg2l_pinctrl_probe(struct platform_device *pdev)
return 0;
}
+static void rzg2l_pinctrl_pm_setup_regs(struct rzg2l_pinctrl *pctrl, bool suspend)
+{
+ u32 nports = pctrl->data->n_port_pins / RZG2L_PINS_PER_PORT;
+ struct rzg2l_pinctrl_reg_cache *cache = pctrl->cache;
+
+ for (u32 port = 0; port < nports; port++) {
+ bool has_iolh, has_ien;
+ u32 off, caps;
+ u8 pincnt;
+ u64 cfg;
+
+ cfg = pctrl->data->port_pin_configs[port];
+ off = RZG2L_PIN_CFG_TO_PORT_OFFSET(cfg);
+ pincnt = hweight8(FIELD_GET(PIN_CFG_PIN_MAP_MASK, cfg));
+
+ caps = FIELD_GET(PIN_CFG_MASK, cfg);
+ has_iolh = !!(caps & (PIN_CFG_IOLH_A | PIN_CFG_IOLH_B | PIN_CFG_IOLH_C));
+ has_ien = !!(caps & PIN_CFG_IEN);
+
+ if (suspend)
+ RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + PFC(off), cache->pfc[port]);
+
+ /*
+ * Now cache the registers or set them in the order suggested by
+ * HW manual (section "Operation for GPIO Function").
+ */
+ RZG2L_PCTRL_REG_ACCESS8(suspend, pctrl->base + PMC(off), cache->pmc[port]);
+ if (has_iolh) {
+ RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + IOLH(off),
+ cache->iolh[0][port]);
+ if (pincnt >= 4) {
+ RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + IOLH(off) + 4,
+ cache->iolh[1][port]);
+ }
+ }
+
+ RZG2L_PCTRL_REG_ACCESS16(suspend, pctrl->base + PM(off), cache->pm[port]);
+ RZG2L_PCTRL_REG_ACCESS8(suspend, pctrl->base + P(off), cache->p[port]);
+
+ if (has_ien) {
+ RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + IEN(off),
+ cache->ien[0][port]);
+ if (pincnt >= 4) {
+ RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + IEN(off) + 4,
+ cache->ien[1][port]);
+ }
+ }
+ }
+}
+
+static void rzg2l_pinctrl_pm_setup_dedicated_regs(struct rzg2l_pinctrl *pctrl, bool suspend)
+{
+ struct rzg2l_pinctrl_reg_cache *cache = pctrl->dedicated_cache;
+
+ /*
+ * Make sure entries in pctrl->data->n_dedicated_pins[] having the same
+ * port offset are close together.
+ */
+ for (u32 i = 0, caps = 0; i < pctrl->data->n_dedicated_pins; i++) {
+ bool has_iolh, has_ien;
+ u32 off, next_off = 0;
+ u64 cfg, next_cfg;
+ u8 pincnt;
+
+ cfg = pctrl->data->dedicated_pins[i].config;
+ off = RZG2L_PIN_CFG_TO_PORT_OFFSET(cfg);
+ if (i + 1 < pctrl->data->n_dedicated_pins) {
+ next_cfg = pctrl->data->dedicated_pins[i + 1].config;
+ next_off = RZG2L_PIN_CFG_TO_PORT_OFFSET(next_cfg);
+ }
+
+ if (off == next_off) {
+ /* Gather caps of all port pins. */
+ caps |= FIELD_GET(PIN_CFG_MASK, cfg);
+ continue;
+ }
+
+ /* And apply them in a single shot. */
+ has_iolh = !!(caps & (PIN_CFG_IOLH_A | PIN_CFG_IOLH_B | PIN_CFG_IOLH_C));
+ has_ien = !!(caps & PIN_CFG_IEN);
+ pincnt = hweight8(FIELD_GET(RZG2L_SINGLE_PIN_BITS_MASK, cfg));
+
+ if (has_iolh) {
+ RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + IOLH(off),
+ cache->iolh[0][i]);
+ }
+ if (has_ien) {
+ RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + IEN(off),
+ cache->ien[0][i]);
+ }
+
+ if (pincnt >= 4) {
+ if (has_iolh) {
+ RZG2L_PCTRL_REG_ACCESS32(suspend,
+ pctrl->base + IOLH(off) + 4,
+ cache->iolh[1][i]);
+ }
+ if (has_ien) {
+ RZG2L_PCTRL_REG_ACCESS32(suspend,
+ pctrl->base + IEN(off) + 4,
+ cache->ien[1][i]);
+ }
+ }
+ caps = 0;
+ }
+}
+
+static void rzg2l_pinctrl_pm_setup_pfc(struct rzg2l_pinctrl *pctrl)
+{
+ u32 nports = pctrl->data->n_port_pins / RZG2L_PINS_PER_PORT;
+ const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
+ const struct rzg2l_register_offsets *regs = &hwcfg->regs;
+
+ /* Set the PWPR register to allow PFC register to write. */
+ writel(0x0, pctrl->base + regs->pwpr); /* B0WI=0, PFCWE=0 */
+ writel(PWPR_PFCWE, pctrl->base + regs->pwpr); /* B0WI=0, PFCWE=1 */
+
+ /* Restore port registers. */
+ for (u32 port = 0; port < nports; port++) {
+ unsigned long pinmap;
+ u8 pmc = 0, max_pin;
+ u32 off, pfc = 0;
+ u64 cfg;
+ u16 pm;
+ u8 pin;
+
+ cfg = pctrl->data->port_pin_configs[port];
+ off = RZG2L_PIN_CFG_TO_PORT_OFFSET(cfg);
+ pinmap = FIELD_GET(PIN_CFG_PIN_MAP_MASK, cfg);
+ max_pin = fls(pinmap);
+
+ pm = readw(pctrl->base + PM(off));
+ for_each_set_bit(pin, &pinmap, max_pin) {
+ struct rzg2l_pinctrl_reg_cache *cache = pctrl->cache;
+
+ /* Nothing to do if PFC was not configured before. */
+ if (!(cache->pmc[port] & BIT(pin)))
+ continue;
+
+ /* Set pin to 'Non-use (Hi-Z input protection)' */
+ pm &= ~(PM_MASK << (pin * 2));
+ writew(pm, pctrl->base + PM(off));
+
+ /* Temporarily switch to GPIO mode with PMC register */
+ pmc &= ~BIT(pin);
+ writeb(pmc, pctrl->base + PMC(off));
+
+ /* Select Pin function mode. */
+ pfc &= ~(PFC_MASK << (pin * 4));
+ pfc |= (cache->pfc[port] & (PFC_MASK << (pin * 4)));
+ writel(pfc, pctrl->base + PFC(off));
+
+ /* Switch to Peripheral pin function. */
+ pmc |= BIT(pin);
+ writeb(pmc, pctrl->base + PMC(off));
+ }
+ }
+
+ /* Set the PWPR register to be write-protected. */
+ writel(0x0, pctrl->base + regs->pwpr); /* B0WI=0, PFCWE=0 */
+ writel(PWPR_B0WI, pctrl->base + regs->pwpr); /* B0WI=1, PFCWE=0 */
+}
+
+static int rzg2l_pinctrl_suspend_noirq(struct device *dev)
+{
+ struct rzg2l_pinctrl *pctrl = dev_get_drvdata(dev);
+ const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
+ const struct rzg2l_register_offsets *regs = &hwcfg->regs;
+ struct rzg2l_pinctrl_reg_cache *cache = pctrl->cache;
+
+ rzg2l_pinctrl_pm_setup_regs(pctrl, true);
+ rzg2l_pinctrl_pm_setup_dedicated_regs(pctrl, true);
+
+ for (u8 i = 0; i < 2; i++) {
+ cache->sd_ch[i] = readb(pctrl->base + SD_CH(regs->sd_ch, i));
+ cache->eth_poc[i] = readb(pctrl->base + ETH_POC(regs->eth_poc, i));
+ }
+
+ cache->qspi = readb(pctrl->base + QSPI);
+ cache->eth_mode = readb(pctrl->base + ETH_MODE);
+
+ if (!atomic_read(&pctrl->wakeup_path))
+ clk_disable_unprepare(pctrl->clk);
+ else
+ device_set_wakeup_path(dev);
+
+ return 0;
+}
+
+static int rzg2l_pinctrl_resume_noirq(struct device *dev)
+{
+ struct rzg2l_pinctrl *pctrl = dev_get_drvdata(dev);
+ const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
+ const struct rzg2l_register_offsets *regs = &hwcfg->regs;
+ struct rzg2l_pinctrl_reg_cache *cache = pctrl->cache;
+ int ret;
+
+ if (!atomic_read(&pctrl->wakeup_path)) {
+ ret = clk_prepare_enable(pctrl->clk);
+ if (ret)
+ return ret;
+ }
+
+ writeb(cache->qspi, pctrl->base + QSPI);
+ writeb(cache->eth_mode, pctrl->base + ETH_MODE);
+ for (u8 i = 0; i < 2; i++) {
+ writeb(cache->sd_ch[i], pctrl->base + SD_CH(regs->sd_ch, i));
+ writeb(cache->eth_poc[i], pctrl->base + ETH_POC(regs->eth_poc, i));
+ }
+
+ rzg2l_pinctrl_pm_setup_pfc(pctrl);
+ rzg2l_pinctrl_pm_setup_regs(pctrl, false);
+ rzg2l_pinctrl_pm_setup_dedicated_regs(pctrl, false);
+ rzg2l_gpio_irq_restore(pctrl);
+
+ return 0;
+}
+
static const struct rzg2l_hwcfg rzg2l_hwcfg = {
.regs = {
.pwpr = 0x3014,
@@ -2049,6 +2666,10 @@ static struct rzg2l_pinctrl_data r9a07g043_data = {
.n_port_pins = ARRAY_SIZE(r9a07g043_gpio_configs) * RZG2L_PINS_PER_PORT,
.n_dedicated_pins = ARRAY_SIZE(rzg2l_dedicated_pins.common),
.hwcfg = &rzg2l_hwcfg,
+#ifdef CONFIG_RISCV
+ .variable_pin_cfg = r9a07g043f_variable_pin_cfg,
+ .n_variable_pin_cfg = ARRAY_SIZE(r9a07g043f_variable_pin_cfg),
+#endif
};
static struct rzg2l_pinctrl_data r9a07g044_data = {
@@ -2088,10 +2709,15 @@ static const struct of_device_id rzg2l_pinctrl_of_table[] = {
{ /* sentinel */ }
};
+static const struct dev_pm_ops rzg2l_pinctrl_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(rzg2l_pinctrl_suspend_noirq, rzg2l_pinctrl_resume_noirq)
+};
+
static struct platform_driver rzg2l_pinctrl_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = of_match_ptr(rzg2l_pinctrl_of_table),
+ .pm = pm_sleep_ptr(&rzg2l_pinctrl_pm_ops),
},
.probe = rzg2l_pinctrl_probe,
};
diff --git a/drivers/pinctrl/renesas/sh_pfc.h b/drivers/pinctrl/renesas/sh_pfc.h
index 8dc7a66009ad8..0061e96400598 100644
--- a/drivers/pinctrl/renesas/sh_pfc.h
+++ b/drivers/pinctrl/renesas/sh_pfc.h
@@ -322,6 +322,7 @@ extern const struct sh_pfc_soc_info r8a77995_pinmux_info;
extern const struct sh_pfc_soc_info r8a779a0_pinmux_info;
extern const struct sh_pfc_soc_info r8a779f0_pinmux_info;
extern const struct sh_pfc_soc_info r8a779g0_pinmux_info;
+extern const struct sh_pfc_soc_info r8a779h0_pinmux_info;
extern const struct sh_pfc_soc_info sh7203_pinmux_info;
extern const struct sh_pfc_soc_info sh7264_pinmux_info;
extern const struct sh_pfc_soc_info sh7269_pinmux_info;
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c b/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
index 7641848be4def..96ef57a7d385c 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
@@ -685,7 +685,7 @@ static const struct phy_ops sata_phy_ops = {
};
static struct phy *tegra_xusb_padctl_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct tegra_xusb_padctl *padctl = dev_get_drvdata(dev);
unsigned int index = args->args[0];
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index 2b2f14a1b7119..4d305876ec08f 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -24,6 +24,23 @@
#define DP_PORT_VDO (DP_CONF_SET_PIN_ASSIGN(BIT(DP_PIN_ASSIGN_C) | BIT(DP_PIN_ASSIGN_D)) | \
DP_CAP_DFP_D | DP_CAP_RECEPTACLE)
+static void cros_typec_role_switch_quirk(struct fwnode_handle *fwnode)
+{
+#ifdef CONFIG_ACPI
+ struct fwnode_handle *switch_fwnode;
+
+ /* Supply the USB role switch with the correct pld_crc if it's missing. */
+ switch_fwnode = fwnode_find_reference(fwnode, "usb-role-switch", 0);
+ if (!IS_ERR_OR_NULL(switch_fwnode)) {
+ struct acpi_device *adev = to_acpi_device_node(switch_fwnode);
+
+ if (adev && !adev->pld_crc)
+ adev->pld_crc = to_acpi_device_node(fwnode)->pld_crc;
+ fwnode_handle_put(switch_fwnode);
+ }
+#endif
+}
+
static int cros_typec_parse_port_props(struct typec_capability *cap,
struct fwnode_handle *fwnode,
struct device *dev)
@@ -66,6 +83,8 @@ static int cros_typec_parse_port_props(struct typec_capability *cap,
cap->prefer_role = ret;
}
+ cros_typec_role_switch_quirk(fwnode);
+
cap->fwnode = fwnode;
return 0;
diff --git a/drivers/platform/chrome/cros_ec_uart.c b/drivers/platform/chrome/cros_ec_uart.c
index 68d80559fddc2..62bc24f6dcc7a 100644
--- a/drivers/platform/chrome/cros_ec_uart.c
+++ b/drivers/platform/chrome/cros_ec_uart.c
@@ -81,8 +81,8 @@ struct cros_ec_uart {
struct response_info response;
};
-static ssize_t cros_ec_uart_rx_bytes(struct serdev_device *serdev,
- const u8 *data, size_t count)
+static size_t cros_ec_uart_rx_bytes(struct serdev_device *serdev,
+ const u8 *data, size_t count)
{
struct ec_host_response *host_response;
struct cros_ec_device *ec_dev = serdev_device_get_drvdata(serdev);
@@ -263,12 +263,6 @@ static int cros_ec_uart_probe(struct serdev_device *serdev)
if (!ec_dev)
return -ENOMEM;
- ret = devm_serdev_device_open(dev, serdev);
- if (ret) {
- dev_err(dev, "Unable to open UART device");
- return ret;
- }
-
serdev_device_set_drvdata(serdev, ec_dev);
init_waitqueue_head(&ec_uart->response.wait_queue);
@@ -280,14 +274,6 @@ static int cros_ec_uart_probe(struct serdev_device *serdev)
return ret;
}
- ret = serdev_device_set_baudrate(serdev, ec_uart->baudrate);
- if (ret < 0) {
- dev_err(dev, "Failed to set up host baud rate (%d)", ret);
- return ret;
- }
-
- serdev_device_set_flow_control(serdev, ec_uart->flowcontrol);
-
/* Initialize ec_dev for cros_ec */
ec_dev->phys_name = dev_name(dev);
ec_dev->dev = dev;
@@ -301,6 +287,20 @@ static int cros_ec_uart_probe(struct serdev_device *serdev)
serdev_device_set_client_ops(serdev, &cros_ec_uart_client_ops);
+ ret = devm_serdev_device_open(dev, serdev);
+ if (ret) {
+ dev_err(dev, "Unable to open UART device");
+ return ret;
+ }
+
+ ret = serdev_device_set_baudrate(serdev, ec_uart->baudrate);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set up host baud rate (%d)", ret);
+ return ret;
+ }
+
+ serdev_device_set_flow_control(serdev, ec_uart->flowcontrol);
+
return cros_ec_register(ec_dev);
}
diff --git a/drivers/platform/goldfish/Kconfig b/drivers/platform/goldfish/Kconfig
index f3d09b1631e3e..03ca5bf19f985 100644
--- a/drivers/platform/goldfish/Kconfig
+++ b/drivers/platform/goldfish/Kconfig
@@ -2,6 +2,7 @@
menuconfig GOLDFISH
bool "Platform support for Goldfish virtual devices"
depends on HAS_IOMEM && HAS_DMA
+ default X86_GOLDFISH
help
Say Y here to get to see options for the Goldfish virtual platform.
This option alone does not add any kernel code.
diff --git a/drivers/platform/mellanox/mlxbf-bootctl.c b/drivers/platform/mellanox/mlxbf-bootctl.c
index c1aef3a8fb2de..dd5f370c31682 100644
--- a/drivers/platform/mellanox/mlxbf-bootctl.c
+++ b/drivers/platform/mellanox/mlxbf-bootctl.c
@@ -463,7 +463,7 @@ static ssize_t large_icm_show(struct device *dev,
if (res.a0)
return -EPERM;
- return snprintf(buf, PAGE_SIZE, "0x%lx", res.a1);
+ return sysfs_emit(buf, "0x%lx", res.a1);
}
static ssize_t large_icm_store(struct device *dev,
@@ -581,7 +581,7 @@ static ssize_t opn_show(struct device *dev,
}
mutex_unlock(&mfg_ops_lock);
- return snprintf(buf, PAGE_SIZE, "%s", (char *)opn_data);
+ return sysfs_emit(buf, "%s", (char *)opn_data);
}
static ssize_t opn_store(struct device *dev,
@@ -632,7 +632,7 @@ static ssize_t sku_show(struct device *dev,
}
mutex_unlock(&mfg_ops_lock);
- return snprintf(buf, PAGE_SIZE, "%s", (char *)sku_data);
+ return sysfs_emit(buf, "%s", (char *)sku_data);
}
static ssize_t sku_store(struct device *dev,
@@ -683,7 +683,7 @@ static ssize_t modl_show(struct device *dev,
}
mutex_unlock(&mfg_ops_lock);
- return snprintf(buf, PAGE_SIZE, "%s", (char *)modl_data);
+ return sysfs_emit(buf, "%s", (char *)modl_data);
}
static ssize_t modl_store(struct device *dev,
@@ -734,7 +734,7 @@ static ssize_t sn_show(struct device *dev,
}
mutex_unlock(&mfg_ops_lock);
- return snprintf(buf, PAGE_SIZE, "%s", (char *)sn_data);
+ return sysfs_emit(buf, "%s", (char *)sn_data);
}
static ssize_t sn_store(struct device *dev,
@@ -785,7 +785,7 @@ static ssize_t uuid_show(struct device *dev,
}
mutex_unlock(&mfg_ops_lock);
- return snprintf(buf, PAGE_SIZE, "%s", (char *)uuid_data);
+ return sysfs_emit(buf, "%s", (char *)uuid_data);
}
static ssize_t uuid_store(struct device *dev,
@@ -836,7 +836,7 @@ static ssize_t rev_show(struct device *dev,
}
mutex_unlock(&mfg_ops_lock);
- return snprintf(buf, PAGE_SIZE, "%s", (char *)rev_data);
+ return sysfs_emit(buf, "%s", (char *)rev_data);
}
static ssize_t rev_store(struct device *dev,
diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
index b1995ac268d77..4ed9c7fd2b62a 100644
--- a/drivers/platform/mellanox/mlxbf-pmc.c
+++ b/drivers/platform/mellanox/mlxbf-pmc.c
@@ -99,8 +99,8 @@
*/
struct mlxbf_pmc_attribute {
struct device_attribute dev_attr;
- int index;
- int nr;
+ unsigned int index;
+ unsigned int nr;
};
/**
@@ -121,7 +121,7 @@ struct mlxbf_pmc_block_info {
void __iomem *mmio_base;
size_t blk_size;
size_t counters;
- int type;
+ unsigned int type;
struct mlxbf_pmc_attribute *attr_counter;
struct mlxbf_pmc_attribute *attr_event;
struct mlxbf_pmc_attribute attr_event_list;
@@ -149,17 +149,17 @@ struct mlxbf_pmc_block_info {
*/
struct mlxbf_pmc_context {
struct platform_device *pdev;
- uint32_t total_blocks;
- uint32_t tile_count;
- uint8_t llt_enable;
- uint8_t mss_enable;
- uint32_t group_num;
+ u32 total_blocks;
+ u32 tile_count;
+ u8 llt_enable;
+ u8 mss_enable;
+ u32 group_num;
struct device *hwmon_dev;
const char *block_name[MLXBF_PMC_MAX_BLOCKS];
struct mlxbf_pmc_block_info block[MLXBF_PMC_MAX_BLOCKS];
const struct attribute_group *groups[MLXBF_PMC_MAX_BLOCKS];
bool svc_sreg_support;
- uint32_t sreg_tbl_perf;
+ u32 sreg_tbl_perf;
unsigned int event_set;
};
@@ -169,7 +169,7 @@ struct mlxbf_pmc_context {
* @evt_name: Name of the event
*/
struct mlxbf_pmc_events {
- int evt_num;
+ u32 evt_num;
char *evt_name;
};
@@ -865,8 +865,7 @@ static struct mlxbf_pmc_context *pmc;
static const char *mlxbf_pmc_svc_uuid_str = "89c036b4-e7d7-11e6-8797-001aca00bfc4";
/* Calls an SMC to access a performance register */
-static int mlxbf_pmc_secure_read(void __iomem *addr, uint32_t command,
- uint64_t *result)
+static int mlxbf_pmc_secure_read(void __iomem *addr, u32 command, u64 *result)
{
struct arm_smccc_res res;
int status, err = 0;
@@ -892,8 +891,7 @@ static int mlxbf_pmc_secure_read(void __iomem *addr, uint32_t command,
}
/* Read from a performance counter */
-static int mlxbf_pmc_read(void __iomem *addr, uint32_t command,
- uint64_t *result)
+static int mlxbf_pmc_read(void __iomem *addr, u32 command, u64 *result)
{
if (pmc->svc_sreg_support)
return mlxbf_pmc_secure_read(addr, command, result);
@@ -907,22 +905,21 @@ static int mlxbf_pmc_read(void __iomem *addr, uint32_t command,
}
/* Convenience function for 32-bit reads */
-static int mlxbf_pmc_readl(void __iomem *addr, uint32_t *result)
+static int mlxbf_pmc_readl(void __iomem *addr, u32 *result)
{
- uint64_t read_out;
+ u64 read_out;
int status;
status = mlxbf_pmc_read(addr, MLXBF_PMC_READ_REG_32, &read_out);
if (status)
return status;
- *result = (uint32_t)read_out;
+ *result = (u32)read_out;
return 0;
}
/* Calls an SMC to access a performance register */
-static int mlxbf_pmc_secure_write(void __iomem *addr, uint32_t command,
- uint64_t value)
+static int mlxbf_pmc_secure_write(void __iomem *addr, u32 command, u64 value)
{
struct arm_smccc_res res;
int status, err = 0;
@@ -945,7 +942,7 @@ static int mlxbf_pmc_secure_write(void __iomem *addr, uint32_t command,
}
/* Write to a performance counter */
-static int mlxbf_pmc_write(void __iomem *addr, int command, uint64_t value)
+static int mlxbf_pmc_write(void __iomem *addr, int command, u64 value)
{
if (pmc->svc_sreg_support)
return mlxbf_pmc_secure_write(addr, command, value);
@@ -959,7 +956,7 @@ static int mlxbf_pmc_write(void __iomem *addr, int command, uint64_t value)
}
/* Check if the register offset is within the mapped region for the block */
-static bool mlxbf_pmc_valid_range(int blk_num, uint32_t offset)
+static bool mlxbf_pmc_valid_range(unsigned int blk_num, u32 offset)
{
if ((offset >= 0) && !(offset % MLXBF_PMC_REG_SIZE) &&
(offset + MLXBF_PMC_REG_SIZE <= pmc->block[blk_num].blk_size))
@@ -969,33 +966,33 @@ static bool mlxbf_pmc_valid_range(int blk_num, uint32_t offset)
}
/* Get the event list corresponding to a certain block */
-static const struct mlxbf_pmc_events *mlxbf_pmc_event_list(const char *blk,
- int *size)
+static const struct mlxbf_pmc_events *mlxbf_pmc_event_list(const char *blk, size_t *psize)
{
const struct mlxbf_pmc_events *events;
+ size_t size;
if (strstr(blk, "tilenet")) {
events = mlxbf_pmc_hnfnet_events;
- *size = ARRAY_SIZE(mlxbf_pmc_hnfnet_events);
+ size = ARRAY_SIZE(mlxbf_pmc_hnfnet_events);
} else if (strstr(blk, "tile")) {
events = mlxbf_pmc_hnf_events;
- *size = ARRAY_SIZE(mlxbf_pmc_hnf_events);
+ size = ARRAY_SIZE(mlxbf_pmc_hnf_events);
} else if (strstr(blk, "triogen")) {
events = mlxbf_pmc_smgen_events;
- *size = ARRAY_SIZE(mlxbf_pmc_smgen_events);
+ size = ARRAY_SIZE(mlxbf_pmc_smgen_events);
} else if (strstr(blk, "trio")) {
switch (pmc->event_set) {
case MLXBF_PMC_EVENT_SET_BF1:
events = mlxbf_pmc_trio_events_1;
- *size = ARRAY_SIZE(mlxbf_pmc_trio_events_1);
+ size = ARRAY_SIZE(mlxbf_pmc_trio_events_1);
break;
case MLXBF_PMC_EVENT_SET_BF2:
events = mlxbf_pmc_trio_events_2;
- *size = ARRAY_SIZE(mlxbf_pmc_trio_events_2);
+ size = ARRAY_SIZE(mlxbf_pmc_trio_events_2);
break;
default:
events = NULL;
- *size = 0;
+ size = 0;
break;
}
} else if (strstr(blk, "mss")) {
@@ -1003,51 +1000,60 @@ static const struct mlxbf_pmc_events *mlxbf_pmc_event_list(const char *blk,
case MLXBF_PMC_EVENT_SET_BF1:
case MLXBF_PMC_EVENT_SET_BF2:
events = mlxbf_pmc_mss_events_1;
- *size = ARRAY_SIZE(mlxbf_pmc_mss_events_1);
+ size = ARRAY_SIZE(mlxbf_pmc_mss_events_1);
break;
case MLXBF_PMC_EVENT_SET_BF3:
events = mlxbf_pmc_mss_events_3;
- *size = ARRAY_SIZE(mlxbf_pmc_mss_events_3);
+ size = ARRAY_SIZE(mlxbf_pmc_mss_events_3);
break;
default:
events = NULL;
- *size = 0;
+ size = 0;
break;
}
} else if (strstr(blk, "ecc")) {
events = mlxbf_pmc_ecc_events;
- *size = ARRAY_SIZE(mlxbf_pmc_ecc_events);
+ size = ARRAY_SIZE(mlxbf_pmc_ecc_events);
} else if (strstr(blk, "pcie")) {
events = mlxbf_pmc_pcie_events;
- *size = ARRAY_SIZE(mlxbf_pmc_pcie_events);
+ size = ARRAY_SIZE(mlxbf_pmc_pcie_events);
} else if (strstr(blk, "l3cache")) {
events = mlxbf_pmc_l3c_events;
- *size = ARRAY_SIZE(mlxbf_pmc_l3c_events);
+ size = ARRAY_SIZE(mlxbf_pmc_l3c_events);
} else if (strstr(blk, "gic")) {
events = mlxbf_pmc_smgen_events;
- *size = ARRAY_SIZE(mlxbf_pmc_smgen_events);
+ size = ARRAY_SIZE(mlxbf_pmc_smgen_events);
} else if (strstr(blk, "smmu")) {
events = mlxbf_pmc_smgen_events;
- *size = ARRAY_SIZE(mlxbf_pmc_smgen_events);
+ size = ARRAY_SIZE(mlxbf_pmc_smgen_events);
} else if (strstr(blk, "llt_miss")) {
events = mlxbf_pmc_llt_miss_events;
- *size = ARRAY_SIZE(mlxbf_pmc_llt_miss_events);
+ size = ARRAY_SIZE(mlxbf_pmc_llt_miss_events);
} else if (strstr(blk, "llt")) {
events = mlxbf_pmc_llt_events;
- *size = ARRAY_SIZE(mlxbf_pmc_llt_events);
+ size = ARRAY_SIZE(mlxbf_pmc_llt_events);
} else {
events = NULL;
- *size = 0;
+ size = 0;
}
+ if (psize)
+ *psize = size;
+
return events;
}
+static bool mlxbf_pmc_event_supported(const char *blk)
+{
+ return !!mlxbf_pmc_event_list(blk, NULL);
+}
+
/* Get the event number given the name */
static int mlxbf_pmc_get_event_num(const char *blk, const char *evt)
{
const struct mlxbf_pmc_events *events;
- int i, size;
+ unsigned int i;
+ size_t size;
events = mlxbf_pmc_event_list(blk, &size);
if (!events)
@@ -1062,10 +1068,11 @@ static int mlxbf_pmc_get_event_num(const char *blk, const char *evt)
}
/* Get the event number given the name */
-static char *mlxbf_pmc_get_event_name(const char *blk, int evt)
+static char *mlxbf_pmc_get_event_name(const char *blk, u32 evt)
{
const struct mlxbf_pmc_events *events;
- int i, size;
+ unsigned int i;
+ size_t size;
events = mlxbf_pmc_event_list(blk, &size);
if (!events)
@@ -1080,9 +1087,9 @@ static char *mlxbf_pmc_get_event_name(const char *blk, int evt)
}
/* Method to enable/disable/reset l3cache counters */
-static int mlxbf_pmc_config_l3_counters(int blk_num, bool enable, bool reset)
+static int mlxbf_pmc_config_l3_counters(unsigned int blk_num, bool enable, bool reset)
{
- uint32_t perfcnt_cfg = 0;
+ u32 perfcnt_cfg = 0;
if (enable)
perfcnt_cfg |= MLXBF_PMC_L3C_PERF_CNT_CFG_EN;
@@ -1095,12 +1102,9 @@ static int mlxbf_pmc_config_l3_counters(int blk_num, bool enable, bool reset)
}
/* Method to handle l3cache counter programming */
-static int mlxbf_pmc_program_l3_counter(int blk_num, uint32_t cnt_num,
- uint32_t evt)
+static int mlxbf_pmc_program_l3_counter(unsigned int blk_num, u32 cnt_num, u32 evt)
{
- uint32_t perfcnt_sel_1 = 0;
- uint32_t perfcnt_sel = 0;
- uint32_t *wordaddr;
+ u32 perfcnt_sel_1 = 0, perfcnt_sel = 0, *wordaddr;
void __iomem *pmcaddr;
int ret;
@@ -1162,11 +1166,10 @@ static int mlxbf_pmc_program_l3_counter(int blk_num, uint32_t cnt_num,
}
/* Method to handle crspace counter programming */
-static int mlxbf_pmc_program_crspace_counter(int blk_num, uint32_t cnt_num,
- uint32_t evt)
+static int mlxbf_pmc_program_crspace_counter(unsigned int blk_num, u32 cnt_num, u32 evt)
{
- uint32_t word;
void *addr;
+ u32 word;
int ret;
addr = pmc->block[blk_num].mmio_base +
@@ -1187,7 +1190,7 @@ static int mlxbf_pmc_program_crspace_counter(int blk_num, uint32_t cnt_num,
}
/* Method to clear crspace counter value */
-static int mlxbf_pmc_clear_crspace_counter(int blk_num, uint32_t cnt_num)
+static int mlxbf_pmc_clear_crspace_counter(unsigned int blk_num, u32 cnt_num)
{
void *addr;
@@ -1199,10 +1202,9 @@ static int mlxbf_pmc_clear_crspace_counter(int blk_num, uint32_t cnt_num)
}
/* Method to program a counter to monitor an event */
-static int mlxbf_pmc_program_counter(int blk_num, uint32_t cnt_num,
- uint32_t evt, bool is_l3)
+static int mlxbf_pmc_program_counter(unsigned int blk_num, u32 cnt_num, u32 evt, bool is_l3)
{
- uint64_t perfctl, perfevt, perfmon_cfg;
+ u64 perfctl, perfevt, perfmon_cfg;
if (cnt_num >= pmc->block[blk_num].counters)
return -ENODEV;
@@ -1263,12 +1265,11 @@ static int mlxbf_pmc_program_counter(int blk_num, uint32_t cnt_num,
}
/* Method to handle l3 counter reads */
-static int mlxbf_pmc_read_l3_counter(int blk_num, uint32_t cnt_num,
- uint64_t *result)
+static int mlxbf_pmc_read_l3_counter(unsigned int blk_num, u32 cnt_num, u64 *result)
{
- uint32_t perfcnt_low = 0, perfcnt_high = 0;
- uint64_t value;
+ u32 perfcnt_low = 0, perfcnt_high = 0;
int status;
+ u64 value;
status = mlxbf_pmc_readl(pmc->block[blk_num].mmio_base +
MLXBF_PMC_L3C_PERF_CNT_LOW +
@@ -1295,11 +1296,10 @@ static int mlxbf_pmc_read_l3_counter(int blk_num, uint32_t cnt_num,
}
/* Method to handle crspace counter reads */
-static int mlxbf_pmc_read_crspace_counter(int blk_num, uint32_t cnt_num,
- uint64_t *result)
+static int mlxbf_pmc_read_crspace_counter(unsigned int blk_num, u32 cnt_num, u64 *result)
{
- uint32_t value;
int status = 0;
+ u32 value;
status = mlxbf_pmc_readl(pmc->block[blk_num].mmio_base +
MLXBF_PMC_CRSPACE_PERFMON_VAL0(pmc->block[blk_num].counters) +
@@ -1313,11 +1313,10 @@ static int mlxbf_pmc_read_crspace_counter(int blk_num, uint32_t cnt_num,
}
/* Method to read the counter value */
-static int mlxbf_pmc_read_counter(int blk_num, uint32_t cnt_num, bool is_l3,
- uint64_t *result)
+static int mlxbf_pmc_read_counter(unsigned int blk_num, u32 cnt_num, bool is_l3, u64 *result)
{
- uint32_t perfcfg_offset, perfval_offset;
- uint64_t perfmon_cfg;
+ u32 perfcfg_offset, perfval_offset;
+ u64 perfmon_cfg;
int status;
if (cnt_num >= pmc->block[blk_num].counters)
@@ -1351,13 +1350,11 @@ static int mlxbf_pmc_read_counter(int blk_num, uint32_t cnt_num, bool is_l3,
}
/* Method to read L3 block event */
-static int mlxbf_pmc_read_l3_event(int blk_num, uint32_t cnt_num,
- uint64_t *result)
+static int mlxbf_pmc_read_l3_event(unsigned int blk_num, u32 cnt_num, u64 *result)
{
- uint32_t perfcnt_sel = 0, perfcnt_sel_1 = 0;
- uint32_t *wordaddr;
+ u32 perfcnt_sel = 0, perfcnt_sel_1 = 0, *wordaddr;
void __iomem *pmcaddr;
- uint64_t evt;
+ u64 evt;
/* Select appropriate register information */
switch (cnt_num) {
@@ -1405,10 +1402,9 @@ static int mlxbf_pmc_read_l3_event(int blk_num, uint32_t cnt_num,
}
/* Method to read crspace block event */
-static int mlxbf_pmc_read_crspace_event(int blk_num, uint32_t cnt_num,
- uint64_t *result)
+static int mlxbf_pmc_read_crspace_event(unsigned int blk_num, u32 cnt_num, u64 *result)
{
- uint32_t word, evt;
+ u32 word, evt;
void *addr;
int ret;
@@ -1429,11 +1425,10 @@ static int mlxbf_pmc_read_crspace_event(int blk_num, uint32_t cnt_num,
}
/* Method to find the event currently being monitored by a counter */
-static int mlxbf_pmc_read_event(int blk_num, uint32_t cnt_num, bool is_l3,
- uint64_t *result)
+static int mlxbf_pmc_read_event(unsigned int blk_num, u32 cnt_num, bool is_l3, u64 *result)
{
- uint32_t perfcfg_offset, perfval_offset;
- uint64_t perfmon_cfg, perfevt;
+ u32 perfcfg_offset, perfval_offset;
+ u64 perfmon_cfg, perfevt;
if (cnt_num >= pmc->block[blk_num].counters)
return -EINVAL;
@@ -1469,9 +1464,9 @@ static int mlxbf_pmc_read_event(int blk_num, uint32_t cnt_num, bool is_l3,
}
/* Method to read a register */
-static int mlxbf_pmc_read_reg(int blk_num, uint32_t offset, uint64_t *result)
+static int mlxbf_pmc_read_reg(unsigned int blk_num, u32 offset, u64 *result)
{
- uint32_t ecc_out;
+ u32 ecc_out;
if (strstr(pmc->block_name[blk_num], "ecc")) {
if (mlxbf_pmc_readl(pmc->block[blk_num].mmio_base + offset,
@@ -1490,7 +1485,7 @@ static int mlxbf_pmc_read_reg(int blk_num, uint32_t offset, uint64_t *result)
}
/* Method to write to a register */
-static int mlxbf_pmc_write_reg(int blk_num, uint32_t offset, uint64_t data)
+static int mlxbf_pmc_write_reg(unsigned int blk_num, u32 offset, u64 data)
{
if (strstr(pmc->block_name[blk_num], "ecc")) {
return mlxbf_pmc_write(pmc->block[blk_num].mmio_base + offset,
@@ -1510,9 +1505,10 @@ static ssize_t mlxbf_pmc_counter_show(struct device *dev,
{
struct mlxbf_pmc_attribute *attr_counter = container_of(
attr, struct mlxbf_pmc_attribute, dev_attr);
- int blk_num, cnt_num, offset;
+ unsigned int blk_num, cnt_num;
bool is_l3 = false;
- uint64_t value;
+ int offset;
+ u64 value;
blk_num = attr_counter->nr;
cnt_num = attr_counter->index;
@@ -1544,14 +1540,16 @@ static ssize_t mlxbf_pmc_counter_store(struct device *dev,
{
struct mlxbf_pmc_attribute *attr_counter = container_of(
attr, struct mlxbf_pmc_attribute, dev_attr);
- int blk_num, cnt_num, offset, err, data;
+ unsigned int blk_num, cnt_num, data;
bool is_l3 = false;
- uint64_t evt_num;
+ u64 evt_num;
+ int offset;
+ int err;
blk_num = attr_counter->nr;
cnt_num = attr_counter->index;
- err = kstrtoint(buf, 0, &data);
+ err = kstrtouint(buf, 0, &data);
if (err < 0)
return err;
@@ -1580,7 +1578,7 @@ static ssize_t mlxbf_pmc_counter_store(struct device *dev,
if (err)
return err;
} else if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_CRSPACE) {
- if (sscanf(attr->attr.name, "counter%d", &cnt_num) != 1)
+ if (sscanf(attr->attr.name, "counter%u", &cnt_num) != 1)
return -EINVAL;
err = mlxbf_pmc_clear_crspace_counter(blk_num, cnt_num);
} else
@@ -1595,10 +1593,11 @@ static ssize_t mlxbf_pmc_event_show(struct device *dev,
{
struct mlxbf_pmc_attribute *attr_event = container_of(
attr, struct mlxbf_pmc_attribute, dev_attr);
- int blk_num, cnt_num, err;
+ unsigned int blk_num, cnt_num;
bool is_l3 = false;
- uint64_t evt_num;
char *evt_name;
+ u64 evt_num;
+ int err;
blk_num = attr_event->nr;
cnt_num = attr_event->index;
@@ -1624,8 +1623,10 @@ static ssize_t mlxbf_pmc_event_store(struct device *dev,
{
struct mlxbf_pmc_attribute *attr_event = container_of(
attr, struct mlxbf_pmc_attribute, dev_attr);
- int blk_num, cnt_num, evt_num, err;
+ unsigned int blk_num, cnt_num;
bool is_l3 = false;
+ int evt_num;
+ int err;
blk_num = attr_event->nr;
cnt_num = attr_event->index;
@@ -1636,7 +1637,7 @@ static ssize_t mlxbf_pmc_event_store(struct device *dev,
if (evt_num < 0)
return -EINVAL;
} else {
- err = kstrtoint(buf, 0, &evt_num);
+ err = kstrtouint(buf, 0, &evt_num);
if (err < 0)
return err;
}
@@ -1658,9 +1659,11 @@ static ssize_t mlxbf_pmc_event_list_show(struct device *dev,
{
struct mlxbf_pmc_attribute *attr_event_list = container_of(
attr, struct mlxbf_pmc_attribute, dev_attr);
- int blk_num, i, size, len = 0, ret = 0;
const struct mlxbf_pmc_events *events;
char e_info[MLXBF_PMC_EVENT_INFO_LEN];
+ unsigned int blk_num, i, len = 0;
+ size_t size;
+ int ret = 0;
blk_num = attr_event_list->nr;
@@ -1686,8 +1689,8 @@ static ssize_t mlxbf_pmc_enable_show(struct device *dev,
{
struct mlxbf_pmc_attribute *attr_enable = container_of(
attr, struct mlxbf_pmc_attribute, dev_attr);
- uint32_t perfcnt_cfg, word;
- int blk_num, value;
+ unsigned int blk_num, value;
+ u32 perfcnt_cfg, word;
blk_num = attr_enable->nr;
@@ -1707,7 +1710,7 @@ static ssize_t mlxbf_pmc_enable_show(struct device *dev,
value = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_CFG_EN, perfcnt_cfg);
}
- return sysfs_emit(buf, "%d\n", value);
+ return sysfs_emit(buf, "%u\n", value);
}
/* Store function for "enable" sysfs files - only for l3cache & crspace */
@@ -1717,12 +1720,13 @@ static ssize_t mlxbf_pmc_enable_store(struct device *dev,
{
struct mlxbf_pmc_attribute *attr_enable = container_of(
attr, struct mlxbf_pmc_attribute, dev_attr);
- int err, en, blk_num;
- uint32_t word;
+ unsigned int en, blk_num;
+ u32 word;
+ int err;
blk_num = attr_enable->nr;
- err = kstrtoint(buf, 0, &en);
+ err = kstrtouint(buf, 0, &en);
if (err < 0)
return err;
@@ -1760,10 +1764,13 @@ static ssize_t mlxbf_pmc_enable_store(struct device *dev,
}
/* Populate attributes for blocks with counters to monitor performance */
-static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
+static int mlxbf_pmc_init_perftype_counter(struct device *dev, unsigned int blk_num)
{
struct mlxbf_pmc_attribute *attr;
- int i = 0, j = 0;
+ unsigned int i = 0, j = 0;
+
+ if (!mlxbf_pmc_event_supported(pmc->block_name[blk_num]))
+ return -ENOENT;
/* "event_list" sysfs to list events supported by the block */
attr = &pmc->block[blk_num].attr_event_list;
@@ -1812,8 +1819,7 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
attr->dev_attr.store = mlxbf_pmc_counter_store;
attr->index = j;
attr->nr = blk_num;
- attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
- "counter%d", j);
+ attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL, "counter%u", j);
if (!attr->dev_attr.attr.name)
return -ENOMEM;
pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
@@ -1825,8 +1831,7 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
attr->dev_attr.store = mlxbf_pmc_event_store;
attr->index = j;
attr->nr = blk_num;
- attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
- "event%d", j);
+ attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL, "event%u", j);
if (!attr->dev_attr.attr.name)
return -ENOMEM;
pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
@@ -1837,30 +1842,31 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
}
/* Populate attributes for blocks with registers to monitor performance */
-static int mlxbf_pmc_init_perftype_reg(struct device *dev, int blk_num)
+static int mlxbf_pmc_init_perftype_reg(struct device *dev, unsigned int blk_num)
{
- struct mlxbf_pmc_attribute *attr;
const struct mlxbf_pmc_events *events;
- int i = 0, j = 0;
+ struct mlxbf_pmc_attribute *attr;
+ unsigned int i = 0;
+ size_t count = 0;
- events = mlxbf_pmc_event_list(pmc->block_name[blk_num], &j);
+ events = mlxbf_pmc_event_list(pmc->block_name[blk_num], &count);
if (!events)
- return -EINVAL;
+ return -ENOENT;
pmc->block[blk_num].attr_event = devm_kcalloc(
- dev, j, sizeof(struct mlxbf_pmc_attribute), GFP_KERNEL);
+ dev, count, sizeof(struct mlxbf_pmc_attribute), GFP_KERNEL);
if (!pmc->block[blk_num].attr_event)
return -ENOMEM;
- while (j > 0) {
- --j;
- attr = &pmc->block[blk_num].attr_event[j];
+ while (count > 0) {
+ --count;
+ attr = &pmc->block[blk_num].attr_event[count];
attr->dev_attr.attr.mode = 0644;
attr->dev_attr.show = mlxbf_pmc_counter_show;
attr->dev_attr.store = mlxbf_pmc_counter_store;
attr->nr = blk_num;
attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
- events[j].evt_name);
+ events[count].evt_name);
if (!attr->dev_attr.attr.name)
return -ENOMEM;
pmc->block[blk_num].block_attr[i] = &attr->dev_attr.attr;
@@ -1872,7 +1878,7 @@ static int mlxbf_pmc_init_perftype_reg(struct device *dev, int blk_num)
}
/* Helper to create the bfperf sysfs sub-directories and files */
-static int mlxbf_pmc_create_groups(struct device *dev, int blk_num)
+static int mlxbf_pmc_create_groups(struct device *dev, unsigned int blk_num)
{
int err;
@@ -1883,7 +1889,7 @@ static int mlxbf_pmc_create_groups(struct device *dev, int blk_num)
else if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_REGISTER)
err = mlxbf_pmc_init_perftype_reg(dev, blk_num);
else
- err = -EINVAL;
+ err = -ENOENT;
if (err)
return err;
@@ -1914,19 +1920,20 @@ static bool mlxbf_pmc_guid_match(const guid_t *guid,
/* Helper to map the Performance Counters from the varios blocks */
static int mlxbf_pmc_map_counters(struct device *dev)
{
- uint64_t info[MLXBF_PMC_INFO_SZ];
- int i, tile_num, ret;
+ u64 info[MLXBF_PMC_INFO_SZ];
+ unsigned int tile_num, i;
+ int ret;
for (i = 0; i < pmc->total_blocks; ++i) {
/* Create sysfs for tiles only if block number < tile_count */
if (strstr(pmc->block_name[i], "tilenet")) {
- if (sscanf(pmc->block_name[i], "tilenet%d", &tile_num) != 1)
+ if (sscanf(pmc->block_name[i], "tilenet%u", &tile_num) != 1)
continue;
if (tile_num >= pmc->tile_count)
continue;
} else if (strstr(pmc->block_name[i], "tile")) {
- if (sscanf(pmc->block_name[i], "tile%d", &tile_num) != 1)
+ if (sscanf(pmc->block_name[i], "tile%u", &tile_num) != 1)
continue;
if (tile_num >= pmc->tile_count)
@@ -1936,9 +1943,9 @@ static int mlxbf_pmc_map_counters(struct device *dev)
/* Create sysfs only for enabled MSS blocks */
if (strstr(pmc->block_name[i], "mss") &&
pmc->event_set == MLXBF_PMC_EVENT_SET_BF3) {
- int mss_num;
+ unsigned int mss_num;
- if (sscanf(pmc->block_name[i], "mss%d", &mss_num) != 1)
+ if (sscanf(pmc->block_name[i], "mss%u", &mss_num) != 1)
continue;
if (!((pmc->mss_enable >> mss_num) & 0x1))
@@ -1947,17 +1954,17 @@ static int mlxbf_pmc_map_counters(struct device *dev)
/* Create sysfs only for enabled LLT blocks */
if (strstr(pmc->block_name[i], "llt_miss")) {
- int llt_num;
+ unsigned int llt_num;
- if (sscanf(pmc->block_name[i], "llt_miss%d", &llt_num) != 1)
+ if (sscanf(pmc->block_name[i], "llt_miss%u", &llt_num) != 1)
continue;
if (!((pmc->llt_enable >> llt_num) & 0x1))
continue;
} else if (strstr(pmc->block_name[i], "llt")) {
- int llt_num;
+ unsigned int llt_num;
- if (sscanf(pmc->block_name[i], "llt%d", &llt_num) != 1)
+ if (sscanf(pmc->block_name[i], "llt%u", &llt_num) != 1)
continue;
if (!((pmc->llt_enable >> llt_num) & 0x1))
@@ -1987,6 +1994,10 @@ static int mlxbf_pmc_map_counters(struct device *dev)
return -ENOMEM;
ret = mlxbf_pmc_create_groups(dev, i);
+ if (ret == -ENOENT) {
+ dev_warn(dev, "ignoring unsupported block: '%s'\n", pmc->block_name[i]);
+ continue;
+ }
if (ret)
return ret;
}
diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c
index 5c022b258f91a..0ce9fff1f7d4d 100644
--- a/drivers/platform/mellanox/mlxreg-hotplug.c
+++ b/drivers/platform/mellanox/mlxreg-hotplug.c
@@ -348,20 +348,6 @@ mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv,
u32 regval, bit;
int ret;
- /*
- * Validate if item related to received signal type is valid.
- * It should never happen, excepted the situation when some
- * piece of hardware is broken. In such situation just produce
- * error message and return. Caller must continue to handle the
- * signals from other devices if any.
- */
- if (unlikely(!item)) {
- dev_err(priv->dev, "False signal: at offset:mask 0x%02x:0x%02x.\n",
- item->reg, item->mask);
-
- return;
- }
-
/* Mask event. */
ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF,
0);
diff --git a/drivers/platform/surface/aggregator/core.c b/drivers/platform/surface/aggregator/core.c
index 9591a28bc38a9..ba550eaa06fcf 100644
--- a/drivers/platform/surface/aggregator/core.c
+++ b/drivers/platform/surface/aggregator/core.c
@@ -227,8 +227,8 @@ EXPORT_SYMBOL_GPL(ssam_client_bind);
/* -- Glue layer (serdev_device -> ssam_controller). ------------------------ */
-static ssize_t ssam_receive_buf(struct serdev_device *dev, const u8 *buf,
- size_t n)
+static size_t ssam_receive_buf(struct serdev_device *dev, const u8 *buf,
+ size_t n)
{
struct ssam_controller *ctrl;
int ret;
diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
index aeb3feae40ff3..035d6b4105cd6 100644
--- a/drivers/platform/surface/surface_aggregator_registry.c
+++ b/drivers/platform/surface/surface_aggregator_registry.c
@@ -74,6 +74,12 @@ static const struct software_node ssam_node_tmp_pprof = {
.parent = &ssam_node_root,
};
+/* Fan speed function. */
+static const struct software_node ssam_node_fan_speed = {
+ .name = "ssam:01:05:01:01:01",
+ .parent = &ssam_node_root,
+};
+
/* Tablet-mode switch via KIP subsystem. */
static const struct software_node ssam_node_kip_tablet_switch = {
.name = "ssam:01:0e:01:00:01",
@@ -305,6 +311,7 @@ static const struct software_node *ssam_node_group_sp9[] = {
&ssam_node_bat_ac,
&ssam_node_bat_main,
&ssam_node_tmp_pprof,
+ &ssam_node_fan_speed,
&ssam_node_pos_tablet_switch,
&ssam_node_hid_kip_keyboard,
&ssam_node_hid_kip_penstash,
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index bdd302274b9ab..7e9251fc33416 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -56,8 +56,6 @@ config HUAWEI_WMI
depends on INPUT
select INPUT_SPARSEKMAP
select LEDS_CLASS
- select LEDS_TRIGGERS
- select LEDS_TRIGGER_AUDIO
select NEW_LEDS
help
This driver provides support for Huawei WMI hotkeys, battery charge
@@ -269,8 +267,6 @@ config ASUS_WMI
select INPUT_SPARSEKMAP
select LEDS_CLASS
select NEW_LEDS
- select LEDS_TRIGGERS
- select LEDS_TRIGGER_AUDIO
select ACPI_PLATFORM_PROFILE
help
Say Y here if you have a WMI aware Asus laptop (like Eee PCs or new
@@ -374,6 +370,7 @@ config FUJITSU_LAPTOP
depends on ACPI
depends on INPUT
depends on BACKLIGHT_CLASS_DEVICE
+ depends on ACPI_BATTERY
depends on ACPI_VIDEO || ACPI_VIDEO = n
select INPUT_SPARSEKMAP
select NEW_LEDS
@@ -507,8 +504,6 @@ config THINKPAD_ACPI
select NVRAM
select NEW_LEDS
select LEDS_CLASS
- select LEDS_TRIGGERS
- select LEDS_TRIGGER_AUDIO
help
This is a driver for the IBM and Lenovo ThinkPad laptops. It adds
support for Fn-Fx key combinations, Bluetooth control, video
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 88b826e88ebd7..38c932df6446a 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -276,6 +276,7 @@ static bool has_type_aa;
static u16 commun_func_bitmap;
static u8 commun_fn_key_number;
static bool cycle_gaming_thermal_profile = true;
+static bool predator_v4;
module_param(mailled, int, 0444);
module_param(brightness, int, 0444);
@@ -284,6 +285,7 @@ module_param(force_series, int, 0444);
module_param(force_caps, int, 0444);
module_param(ec_raw_mode, bool, 0444);
module_param(cycle_gaming_thermal_profile, bool, 0644);
+module_param(predator_v4, bool, 0444);
MODULE_PARM_DESC(mailled, "Set initial state of Mail LED");
MODULE_PARM_DESC(brightness, "Set initial LCD backlight brightness");
MODULE_PARM_DESC(threeg, "Set initial state of 3G hardware");
@@ -292,6 +294,8 @@ MODULE_PARM_DESC(force_caps, "Force the capability bitmask to this value");
MODULE_PARM_DESC(ec_raw_mode, "Enable EC raw mode");
MODULE_PARM_DESC(cycle_gaming_thermal_profile,
"Set thermal mode key in cycle mode. Disabling it sets the mode key in turbo toggle mode");
+MODULE_PARM_DESC(predator_v4,
+ "Enable features for predator laptops that use predator sense v4");
struct acer_data {
int mailled;
@@ -585,6 +589,24 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
.driver_data = &quirk_acer_predator_v4,
},
{
+ .callback = dmi_matched,
+ .ident = "Acer Predator PH16-71",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Predator PH16-71"),
+ },
+ .driver_data = &quirk_acer_predator_v4,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Acer Predator PH18-71",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Predator PH18-71"),
+ },
+ .driver_data = &quirk_acer_predator_v4,
+ },
+ {
.callback = set_force_caps,
.ident = "Acer Aspire Switch 10E SW3-016",
.matches = {
@@ -725,7 +747,9 @@ enum acer_predator_v4_thermal_profile_wmi {
/* Find which quirks are needed for a particular vendor/ model pair */
static void __init find_quirks(void)
{
- if (!force_series) {
+ if (predator_v4) {
+ quirks = &quirk_acer_predator_v4;
+ } else if (!force_series) {
dmi_check_system(acer_quirks);
dmi_check_system(non_acer_quirks);
} else if (force_series == 2490) {
diff --git a/drivers/platform/x86/amd/Kconfig b/drivers/platform/x86/amd/Kconfig
index 54753213cc61c..f88682d36447c 100644
--- a/drivers/platform/x86/amd/Kconfig
+++ b/drivers/platform/x86/amd/Kconfig
@@ -8,7 +8,7 @@ source "drivers/platform/x86/amd/pmc/Kconfig"
config AMD_HSMP
tristate "AMD HSMP Driver"
- depends on AMD_NB && X86_64
+ depends on AMD_NB && X86_64 && ACPI
help
The driver provides a way for user space tools to monitor and manage
system management functionality on EPYC server CPUs from AMD.
diff --git a/drivers/platform/x86/amd/hsmp.c b/drivers/platform/x86/amd/hsmp.c
index b55d80e291393..1927be901108e 100644
--- a/drivers/platform/x86/amd/hsmp.c
+++ b/drivers/platform/x86/amd/hsmp.c
@@ -18,9 +18,11 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/semaphore.h>
+#include <linux/acpi.h>
#define DRIVER_NAME "amd_hsmp"
-#define DRIVER_VERSION "2.0"
+#define DRIVER_VERSION "2.2"
+#define ACPI_HSMP_DEVICE_HID "AMDI0097"
/* HSMP Status / Error codes */
#define HSMP_STATUS_NOT_READY 0x00
@@ -40,9 +42,11 @@
* register into the SMN_INDEX register, and reads/writes the SMN_DATA reg.
* Below are required SMN address for HSMP Mailbox register offsets in SMU address space
*/
-#define SMN_HSMP_MSG_ID 0x3B10534
-#define SMN_HSMP_MSG_RESP 0x3B10980
-#define SMN_HSMP_MSG_DATA 0x3B109E0
+#define SMN_HSMP_BASE 0x3B00000
+#define SMN_HSMP_MSG_ID 0x0010534
+#define SMN_HSMP_MSG_ID_F1A_M0H 0x0010934
+#define SMN_HSMP_MSG_RESP 0x0010980
+#define SMN_HSMP_MSG_DATA 0x00109E0
#define HSMP_INDEX_REG 0xc4
#define HSMP_DATA_REG 0xc8
@@ -53,41 +57,86 @@
#define HSMP_ATTR_GRP_NAME_SIZE 10
+/* These are the strings specified in ACPI table */
+#define MSG_IDOFF_STR "MsgIdOffset"
+#define MSG_ARGOFF_STR "MsgArgOffset"
+#define MSG_RESPOFF_STR "MsgRspOffset"
+
+#define MAX_AMD_SOCKETS 8
+
+struct hsmp_mbaddr_info {
+ u32 base_addr;
+ u32 msg_id_off;
+ u32 msg_resp_off;
+ u32 msg_arg_off;
+ u32 size;
+};
+
struct hsmp_socket {
struct bin_attribute hsmp_attr;
+ struct hsmp_mbaddr_info mbinfo;
void __iomem *metric_tbl_addr;
+ void __iomem *virt_base_addr;
struct semaphore hsmp_sem;
char name[HSMP_ATTR_GRP_NAME_SIZE];
+ struct pci_dev *root;
+ struct device *dev;
u16 sock_ind;
};
struct hsmp_plat_device {
struct miscdevice hsmp_device;
struct hsmp_socket *sock;
- struct device *dev;
u32 proto_ver;
u16 num_sockets;
+ bool is_acpi_device;
+ bool is_probed;
};
static struct hsmp_plat_device plat_dev;
-static int amd_hsmp_rdwr(struct pci_dev *root, u32 address,
- u32 *value, bool write)
+static int amd_hsmp_pci_rdwr(struct hsmp_socket *sock, u32 offset,
+ u32 *value, bool write)
{
int ret;
- ret = pci_write_config_dword(root, HSMP_INDEX_REG, address);
+ if (!sock->root)
+ return -ENODEV;
+
+ ret = pci_write_config_dword(sock->root, HSMP_INDEX_REG,
+ sock->mbinfo.base_addr + offset);
if (ret)
return ret;
- ret = (write ? pci_write_config_dword(root, HSMP_DATA_REG, *value)
- : pci_read_config_dword(root, HSMP_DATA_REG, value));
+ ret = (write ? pci_write_config_dword(sock->root, HSMP_DATA_REG, *value)
+ : pci_read_config_dword(sock->root, HSMP_DATA_REG, value));
return ret;
}
+static void amd_hsmp_acpi_rdwr(struct hsmp_socket *sock, u32 offset,
+ u32 *value, bool write)
+{
+ if (write)
+ iowrite32(*value, sock->virt_base_addr + offset);
+ else
+ *value = ioread32(sock->virt_base_addr + offset);
+}
+
+static int amd_hsmp_rdwr(struct hsmp_socket *sock, u32 offset,
+ u32 *value, bool write)
+{
+ if (plat_dev.is_acpi_device)
+ amd_hsmp_acpi_rdwr(sock, offset, value, write);
+ else
+ return amd_hsmp_pci_rdwr(sock, offset, value, write);
+
+ return 0;
+}
+
/*
- * Send a message to the HSMP port via PCI-e config space registers.
+ * Send a message to the HSMP port via PCI-e config space registers
+ * or by writing to MMIO space.
*
* The caller is expected to zero out any unused arguments.
* If a response is expected, the number of response words should be greater than 0.
@@ -95,16 +144,19 @@ static int amd_hsmp_rdwr(struct pci_dev *root, u32 address,
* Returns 0 for success and populates the requested number of arguments.
* Returns a negative error code for failure.
*/
-static int __hsmp_send_message(struct pci_dev *root, struct hsmp_message *msg)
+static int __hsmp_send_message(struct hsmp_socket *sock, struct hsmp_message *msg)
{
+ struct hsmp_mbaddr_info *mbinfo;
unsigned long timeout, short_sleep;
u32 mbox_status;
u32 index;
int ret;
+ mbinfo = &sock->mbinfo;
+
/* Clear the status register */
mbox_status = HSMP_STATUS_NOT_READY;
- ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_RESP, &mbox_status, HSMP_WR);
+ ret = amd_hsmp_rdwr(sock, mbinfo->msg_resp_off, &mbox_status, HSMP_WR);
if (ret) {
pr_err("Error %d clearing mailbox status register\n", ret);
return ret;
@@ -113,7 +165,7 @@ static int __hsmp_send_message(struct pci_dev *root, struct hsmp_message *msg)
index = 0;
/* Write any message arguments */
while (index < msg->num_args) {
- ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_DATA + (index << 2),
+ ret = amd_hsmp_rdwr(sock, mbinfo->msg_arg_off + (index << 2),
&msg->args[index], HSMP_WR);
if (ret) {
pr_err("Error %d writing message argument %d\n", ret, index);
@@ -123,7 +175,7 @@ static int __hsmp_send_message(struct pci_dev *root, struct hsmp_message *msg)
}
/* Write the message ID which starts the operation */
- ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_ID, &msg->msg_id, HSMP_WR);
+ ret = amd_hsmp_rdwr(sock, mbinfo->msg_id_off, &msg->msg_id, HSMP_WR);
if (ret) {
pr_err("Error %d writing message ID %u\n", ret, msg->msg_id);
return ret;
@@ -140,7 +192,7 @@ static int __hsmp_send_message(struct pci_dev *root, struct hsmp_message *msg)
timeout = jiffies + msecs_to_jiffies(HSMP_MSG_TIMEOUT);
while (time_before(jiffies, timeout)) {
- ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_RESP, &mbox_status, HSMP_RD);
+ ret = amd_hsmp_rdwr(sock, mbinfo->msg_resp_off, &mbox_status, HSMP_RD);
if (ret) {
pr_err("Error %d reading mailbox status\n", ret);
return ret;
@@ -175,7 +227,7 @@ static int __hsmp_send_message(struct pci_dev *root, struct hsmp_message *msg)
*/
index = 0;
while (index < msg->response_sz) {
- ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_DATA + (index << 2),
+ ret = amd_hsmp_rdwr(sock, mbinfo->msg_arg_off + (index << 2),
&msg->args[index], HSMP_RD);
if (ret) {
pr_err("Error %d reading response %u for message ID:%u\n",
@@ -208,21 +260,19 @@ static int validate_message(struct hsmp_message *msg)
int hsmp_send_message(struct hsmp_message *msg)
{
- struct hsmp_socket *sock = &plat_dev.sock[msg->sock_ind];
- struct amd_northbridge *nb;
+ struct hsmp_socket *sock;
int ret;
if (!msg)
return -EINVAL;
-
- nb = node_to_amd_nb(msg->sock_ind);
- if (!nb || !nb->root)
- return -ENODEV;
-
ret = validate_message(msg);
if (ret)
return ret;
+ if (!plat_dev.sock || msg->sock_ind >= plat_dev.num_sockets)
+ return -ENODEV;
+ sock = &plat_dev.sock[msg->sock_ind];
+
/*
* The time taken by smu operation to complete is between
* 10us to 1ms. Sometime it may take more time.
@@ -233,7 +283,7 @@ int hsmp_send_message(struct hsmp_message *msg)
if (ret < 0)
return ret;
- ret = __hsmp_send_message(nb->root, msg);
+ ret = __hsmp_send_message(sock, msg);
up(&sock->hsmp_sem);
@@ -244,12 +294,7 @@ EXPORT_SYMBOL_GPL(hsmp_send_message);
static int hsmp_test(u16 sock_ind, u32 value)
{
struct hsmp_message msg = { 0 };
- struct amd_northbridge *nb;
- int ret = -ENODEV;
-
- nb = node_to_amd_nb(sock_ind);
- if (!nb || !nb->root)
- return ret;
+ int ret;
/*
* Test the hsmp port by performing TEST command. The test message
@@ -261,14 +306,15 @@ static int hsmp_test(u16 sock_ind, u32 value)
msg.args[0] = value;
msg.sock_ind = sock_ind;
- ret = __hsmp_send_message(nb->root, &msg);
+ ret = hsmp_send_message(&msg);
if (ret)
return ret;
/* Check the response value */
if (msg.args[0] != (value + 1)) {
- pr_err("Socket %d test message failed, Expected 0x%08X, received 0x%08X\n",
- sock_ind, (value + 1), msg.args[0]);
+ dev_err(plat_dev.sock[sock_ind].dev,
+ "Socket %d test message failed, Expected 0x%08X, received 0x%08X\n",
+ sock_ind, (value + 1), msg.args[0]);
return -EBADE;
}
@@ -337,6 +383,181 @@ static const struct file_operations hsmp_fops = {
.compat_ioctl = hsmp_ioctl,
};
+/* This is the UUID used for HSMP */
+static const guid_t acpi_hsmp_uuid = GUID_INIT(0xb74d619d, 0x5707, 0x48bd,
+ 0xa6, 0x9f, 0x4e, 0xa2,
+ 0x87, 0x1f, 0xc2, 0xf6);
+
+static inline bool is_acpi_hsmp_uuid(union acpi_object *obj)
+{
+ if (obj->type == ACPI_TYPE_BUFFER && obj->buffer.length == UUID_SIZE)
+ return guid_equal((guid_t *)obj->buffer.pointer, &acpi_hsmp_uuid);
+
+ return false;
+}
+
+static inline int hsmp_get_uid(struct device *dev, u16 *sock_ind)
+{
+ char *uid;
+
+ /*
+ * UID (ID00, ID01..IDXX) is used for differentiating sockets,
+ * read it and strip the "ID" part of it and convert the remaining
+ * bytes to integer.
+ */
+ uid = acpi_device_uid(ACPI_COMPANION(dev));
+
+ return kstrtou16(uid + 2, 10, sock_ind);
+}
+
+static acpi_status hsmp_resource(struct acpi_resource *res, void *data)
+{
+ struct hsmp_socket *sock = data;
+ struct resource r;
+
+ switch (res->type) {
+ case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
+ if (!acpi_dev_resource_memory(res, &r))
+ return AE_ERROR;
+ if (!r.start || r.end < r.start || !(r.flags & IORESOURCE_MEM_WRITEABLE))
+ return AE_ERROR;
+ sock->mbinfo.base_addr = r.start;
+ sock->mbinfo.size = resource_size(&r);
+ break;
+ case ACPI_RESOURCE_TYPE_END_TAG:
+ break;
+ default:
+ return AE_ERROR;
+ }
+
+ return AE_OK;
+}
+
+static int hsmp_read_acpi_dsd(struct hsmp_socket *sock)
+{
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *guid, *mailbox_package;
+ union acpi_object *dsd;
+ acpi_status status;
+ int ret = 0;
+ int j;
+
+ status = acpi_evaluate_object_typed(ACPI_HANDLE(sock->dev), "_DSD", NULL,
+ &buf, ACPI_TYPE_PACKAGE);
+ if (ACPI_FAILURE(status)) {
+ dev_err(sock->dev, "Failed to read mailbox reg offsets from DSD table, err: %s\n",
+ acpi_format_exception(status));
+ return -ENODEV;
+ }
+
+ dsd = buf.pointer;
+
+ /* HSMP _DSD property should contain 2 objects.
+ * 1. guid which is an acpi object of type ACPI_TYPE_BUFFER
+ * 2. mailbox which is an acpi object of type ACPI_TYPE_PACKAGE
+ * This mailbox object contains 3 more acpi objects of type
+ * ACPI_TYPE_PACKAGE for holding msgid, msgresp, msgarg offsets
+ * these packages inturn contain 2 acpi objects of type
+ * ACPI_TYPE_STRING and ACPI_TYPE_INTEGER
+ */
+ if (!dsd || dsd->type != ACPI_TYPE_PACKAGE || dsd->package.count != 2) {
+ ret = -EINVAL;
+ goto free_buf;
+ }
+
+ guid = &dsd->package.elements[0];
+ mailbox_package = &dsd->package.elements[1];
+ if (!is_acpi_hsmp_uuid(guid) || mailbox_package->type != ACPI_TYPE_PACKAGE) {
+ dev_err(sock->dev, "Invalid hsmp _DSD table data\n");
+ ret = -EINVAL;
+ goto free_buf;
+ }
+
+ for (j = 0; j < mailbox_package->package.count; j++) {
+ union acpi_object *msgobj, *msgstr, *msgint;
+
+ msgobj = &mailbox_package->package.elements[j];
+ msgstr = &msgobj->package.elements[0];
+ msgint = &msgobj->package.elements[1];
+
+ /* package should have 1 string and 1 integer object */
+ if (msgobj->type != ACPI_TYPE_PACKAGE ||
+ msgstr->type != ACPI_TYPE_STRING ||
+ msgint->type != ACPI_TYPE_INTEGER) {
+ ret = -EINVAL;
+ goto free_buf;
+ }
+
+ if (!strncmp(msgstr->string.pointer, MSG_IDOFF_STR,
+ msgstr->string.length)) {
+ sock->mbinfo.msg_id_off = msgint->integer.value;
+ } else if (!strncmp(msgstr->string.pointer, MSG_RESPOFF_STR,
+ msgstr->string.length)) {
+ sock->mbinfo.msg_resp_off = msgint->integer.value;
+ } else if (!strncmp(msgstr->string.pointer, MSG_ARGOFF_STR,
+ msgstr->string.length)) {
+ sock->mbinfo.msg_arg_off = msgint->integer.value;
+ } else {
+ ret = -ENOENT;
+ goto free_buf;
+ }
+ }
+
+ if (!sock->mbinfo.msg_id_off || !sock->mbinfo.msg_resp_off ||
+ !sock->mbinfo.msg_arg_off)
+ ret = -EINVAL;
+
+free_buf:
+ ACPI_FREE(buf.pointer);
+ return ret;
+}
+
+static int hsmp_read_acpi_crs(struct hsmp_socket *sock)
+{
+ acpi_status status;
+
+ status = acpi_walk_resources(ACPI_HANDLE(sock->dev), METHOD_NAME__CRS,
+ hsmp_resource, sock);
+ if (ACPI_FAILURE(status)) {
+ dev_err(sock->dev, "Failed to look up MP1 base address from CRS method, err: %s\n",
+ acpi_format_exception(status));
+ return -EINVAL;
+ }
+ if (!sock->mbinfo.base_addr || !sock->mbinfo.size)
+ return -EINVAL;
+
+ /* The mapped region should be un cached */
+ sock->virt_base_addr = devm_ioremap_uc(sock->dev, sock->mbinfo.base_addr,
+ sock->mbinfo.size);
+ if (!sock->virt_base_addr) {
+ dev_err(sock->dev, "Failed to ioremap MP1 base address\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* Parse the ACPI table to read the data */
+static int hsmp_parse_acpi_table(struct device *dev, u16 sock_ind)
+{
+ struct hsmp_socket *sock = &plat_dev.sock[sock_ind];
+ int ret;
+
+ sock->sock_ind = sock_ind;
+ sock->dev = dev;
+ plat_dev.is_acpi_device = true;
+
+ sema_init(&sock->hsmp_sem, 1);
+
+ /* Read MP1 base address from CRS method */
+ ret = hsmp_read_acpi_crs(sock);
+ if (ret)
+ return ret;
+
+ /* Read mailbox offsets from DSD table */
+ return hsmp_read_acpi_dsd(sock);
+}
+
static ssize_t hsmp_metric_tbl_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
@@ -345,14 +566,12 @@ static ssize_t hsmp_metric_tbl_read(struct file *filp, struct kobject *kobj,
struct hsmp_message msg = { 0 };
int ret;
- /* Do not support lseek(), reads entire metric table */
- if (count < bin_attr->size) {
- dev_err(plat_dev.dev, "Wrong buffer size\n");
+ if (!sock)
return -EINVAL;
- }
- if (!sock) {
- dev_err(plat_dev.dev, "Failed to read attribute private data\n");
+ /* Do not support lseek(), reads entire metric table */
+ if (count < bin_attr->size) {
+ dev_err(sock->dev, "Wrong buffer size\n");
return -EINVAL;
}
@@ -388,13 +607,13 @@ static int hsmp_get_tbl_dram_base(u16 sock_ind)
*/
dram_addr = msg.args[0] | ((u64)(msg.args[1]) << 32);
if (!dram_addr) {
- dev_err(plat_dev.dev, "Invalid DRAM address for metric table\n");
+ dev_err(sock->dev, "Invalid DRAM address for metric table\n");
return -ENOMEM;
}
- sock->metric_tbl_addr = devm_ioremap(plat_dev.dev, dram_addr,
+ sock->metric_tbl_addr = devm_ioremap(sock->dev, dram_addr,
sizeof(struct hsmp_metric_table));
if (!sock->metric_tbl_addr) {
- dev_err(plat_dev.dev, "Failed to ioremap metric table addr\n");
+ dev_err(sock->dev, "Failed to ioremap metric table addr\n");
return -ENOMEM;
}
return 0;
@@ -422,65 +641,91 @@ static int hsmp_init_metric_tbl_bin_attr(struct bin_attribute **hattrs, u16 sock
hattrs[0] = hattr;
if (plat_dev.proto_ver == HSMP_PROTO_VER6)
- return (hsmp_get_tbl_dram_base(sock_ind));
+ return hsmp_get_tbl_dram_base(sock_ind);
else
return 0;
}
-/* One bin sysfs for metrics table*/
+/* One bin sysfs for metrics table */
#define NUM_HSMP_ATTRS 1
-static int hsmp_create_sysfs_interface(void)
+static int hsmp_create_attr_list(struct attribute_group *attr_grp,
+ struct device *dev, u16 sock_ind)
{
- const struct attribute_group **hsmp_attr_grps;
struct bin_attribute **hsmp_bin_attrs;
+
+ /* Null terminated list of attributes */
+ hsmp_bin_attrs = devm_kcalloc(dev, NUM_HSMP_ATTRS + 1,
+ sizeof(*hsmp_bin_attrs),
+ GFP_KERNEL);
+ if (!hsmp_bin_attrs)
+ return -ENOMEM;
+
+ attr_grp->bin_attrs = hsmp_bin_attrs;
+
+ return hsmp_init_metric_tbl_bin_attr(hsmp_bin_attrs, sock_ind);
+}
+
+static int hsmp_create_non_acpi_sysfs_if(struct device *dev)
+{
+ const struct attribute_group **hsmp_attr_grps;
struct attribute_group *attr_grp;
- int ret;
u16 i;
- /* String formatting is currently limited to u8 sockets */
- if (WARN_ON(plat_dev.num_sockets > U8_MAX))
- return -ERANGE;
-
- hsmp_attr_grps = devm_kzalloc(plat_dev.dev, sizeof(struct attribute_group *) *
- (plat_dev.num_sockets + 1), GFP_KERNEL);
+ hsmp_attr_grps = devm_kcalloc(dev, plat_dev.num_sockets + 1,
+ sizeof(*hsmp_attr_grps),
+ GFP_KERNEL);
if (!hsmp_attr_grps)
return -ENOMEM;
/* Create a sysfs directory for each socket */
for (i = 0; i < plat_dev.num_sockets; i++) {
- attr_grp = devm_kzalloc(plat_dev.dev, sizeof(struct attribute_group), GFP_KERNEL);
+ attr_grp = devm_kzalloc(dev, sizeof(struct attribute_group),
+ GFP_KERNEL);
if (!attr_grp)
return -ENOMEM;
snprintf(plat_dev.sock[i].name, HSMP_ATTR_GRP_NAME_SIZE, "socket%u", (u8)i);
- attr_grp->name = plat_dev.sock[i].name;
-
- /* Null terminated list of attributes */
- hsmp_bin_attrs = devm_kzalloc(plat_dev.dev, sizeof(struct bin_attribute *) *
- (NUM_HSMP_ATTRS + 1), GFP_KERNEL);
- if (!hsmp_bin_attrs)
- return -ENOMEM;
-
- attr_grp->bin_attrs = hsmp_bin_attrs;
+ attr_grp->name = plat_dev.sock[i].name;
attr_grp->is_bin_visible = hsmp_is_sock_attr_visible;
hsmp_attr_grps[i] = attr_grp;
- /* Now create the leaf nodes */
- ret = hsmp_init_metric_tbl_bin_attr(hsmp_bin_attrs, i);
- if (ret)
- return ret;
+ hsmp_create_attr_list(attr_grp, dev, i);
}
- return devm_device_add_groups(plat_dev.dev, hsmp_attr_grps);
+
+ return devm_device_add_groups(dev, hsmp_attr_grps);
+}
+
+static int hsmp_create_acpi_sysfs_if(struct device *dev)
+{
+ struct attribute_group *attr_grp;
+ u16 sock_ind;
+ int ret;
+
+ attr_grp = devm_kzalloc(dev, sizeof(struct attribute_group), GFP_KERNEL);
+ if (!attr_grp)
+ return -ENOMEM;
+
+ attr_grp->is_bin_visible = hsmp_is_sock_attr_visible;
+
+ ret = hsmp_get_uid(dev, &sock_ind);
+ if (ret)
+ return ret;
+
+ ret = hsmp_create_attr_list(attr_grp, dev, sock_ind);
+ if (ret)
+ return ret;
+
+ return devm_device_add_group(dev, attr_grp);
}
-static int hsmp_cache_proto_ver(void)
+static int hsmp_cache_proto_ver(u16 sock_ind)
{
struct hsmp_message msg = { 0 };
int ret;
msg.msg_id = HSMP_GET_PROTO_VER;
- msg.sock_ind = 0;
+ msg.sock_ind = sock_ind;
msg.response_sz = hsmp_msg_desc_table[HSMP_GET_PROTO_VER].response_sz;
ret = hsmp_send_message(&msg);
@@ -490,45 +735,150 @@ static int hsmp_cache_proto_ver(void)
return ret;
}
-static int hsmp_pltdrv_probe(struct platform_device *pdev)
+static inline bool is_f1a_m0h(void)
{
- int ret, i;
+ if (boot_cpu_data.x86 == 0x1A && boot_cpu_data.x86_model <= 0x0F)
+ return true;
- plat_dev.sock = devm_kzalloc(&pdev->dev,
- (plat_dev.num_sockets * sizeof(struct hsmp_socket)),
- GFP_KERNEL);
- if (!plat_dev.sock)
- return -ENOMEM;
- plat_dev.dev = &pdev->dev;
+ return false;
+}
+
+static int init_platform_device(struct device *dev)
+{
+ struct hsmp_socket *sock;
+ int ret, i;
for (i = 0; i < plat_dev.num_sockets; i++) {
- sema_init(&plat_dev.sock[i].hsmp_sem, 1);
- plat_dev.sock[i].sock_ind = i;
+ if (!node_to_amd_nb(i))
+ return -ENODEV;
+ sock = &plat_dev.sock[i];
+ sock->root = node_to_amd_nb(i)->root;
+ sock->sock_ind = i;
+ sock->dev = dev;
+ sock->mbinfo.base_addr = SMN_HSMP_BASE;
+
+ /*
+ * This is a transitional change from non-ACPI to ACPI, only
+ * family 0x1A, model 0x00 platform is supported for both ACPI and non-ACPI.
+ */
+ if (is_f1a_m0h())
+ sock->mbinfo.msg_id_off = SMN_HSMP_MSG_ID_F1A_M0H;
+ else
+ sock->mbinfo.msg_id_off = SMN_HSMP_MSG_ID;
+
+ sock->mbinfo.msg_resp_off = SMN_HSMP_MSG_RESP;
+ sock->mbinfo.msg_arg_off = SMN_HSMP_MSG_DATA;
+ sema_init(&sock->hsmp_sem, 1);
+
+ /* Test the hsmp interface on each socket */
+ ret = hsmp_test(i, 0xDEADBEEF);
+ if (ret) {
+ dev_err(dev, "HSMP test message failed on Fam:%x model:%x\n",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+ dev_err(dev, "Is HSMP disabled in BIOS ?\n");
+ return ret;
+ }
}
- plat_dev.hsmp_device.name = HSMP_CDEV_NAME;
- plat_dev.hsmp_device.minor = MISC_DYNAMIC_MINOR;
- plat_dev.hsmp_device.fops = &hsmp_fops;
- plat_dev.hsmp_device.parent = &pdev->dev;
- plat_dev.hsmp_device.nodename = HSMP_DEVNODE_NAME;
- plat_dev.hsmp_device.mode = 0644;
+ return 0;
+}
+
+static const struct acpi_device_id amd_hsmp_acpi_ids[] = {
+ {ACPI_HSMP_DEVICE_HID, 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, amd_hsmp_acpi_ids);
+
+static int hsmp_pltdrv_probe(struct platform_device *pdev)
+{
+ struct acpi_device *adev;
+ u16 sock_ind = 0;
+ int ret;
+
+ /*
+ * On ACPI supported BIOS, there is an ACPI HSMP device added for
+ * each socket, so the per socket probing, but the memory allocated for
+ * sockets should be contiguous to access it as an array,
+ * Hence allocate memory for all the sockets at once instead of allocating
+ * on each probe.
+ */
+ if (!plat_dev.is_probed) {
+ plat_dev.sock = devm_kcalloc(&pdev->dev, plat_dev.num_sockets,
+ sizeof(*plat_dev.sock),
+ GFP_KERNEL);
+ if (!plat_dev.sock)
+ return -ENOMEM;
+ }
+ adev = ACPI_COMPANION(&pdev->dev);
+ if (adev && !acpi_match_device_ids(adev, amd_hsmp_acpi_ids)) {
+ ret = hsmp_get_uid(&pdev->dev, &sock_ind);
+ if (ret)
+ return ret;
+ if (sock_ind >= plat_dev.num_sockets)
+ return -EINVAL;
+ ret = hsmp_parse_acpi_table(&pdev->dev, sock_ind);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to parse ACPI table\n");
+ return ret;
+ }
+ /* Test the hsmp interface */
+ ret = hsmp_test(sock_ind, 0xDEADBEEF);
+ if (ret) {
+ dev_err(&pdev->dev, "HSMP test message failed on Fam:%x model:%x\n",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+ dev_err(&pdev->dev, "Is HSMP disabled in BIOS ?\n");
+ return ret;
+ }
+ } else {
+ ret = init_platform_device(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to init HSMP mailbox\n");
+ return ret;
+ }
+ }
- ret = hsmp_cache_proto_ver();
+ ret = hsmp_cache_proto_ver(sock_ind);
if (ret) {
- dev_err(plat_dev.dev, "Failed to read HSMP protocol version\n");
+ dev_err(&pdev->dev, "Failed to read HSMP protocol version\n");
return ret;
}
- ret = hsmp_create_sysfs_interface();
+ if (plat_dev.is_acpi_device)
+ ret = hsmp_create_acpi_sysfs_if(&pdev->dev);
+ else
+ ret = hsmp_create_non_acpi_sysfs_if(&pdev->dev);
if (ret)
- dev_err(plat_dev.dev, "Failed to create HSMP sysfs interface\n");
+ dev_err(&pdev->dev, "Failed to create HSMP sysfs interface\n");
+
+ if (!plat_dev.is_probed) {
+ plat_dev.hsmp_device.name = HSMP_CDEV_NAME;
+ plat_dev.hsmp_device.minor = MISC_DYNAMIC_MINOR;
+ plat_dev.hsmp_device.fops = &hsmp_fops;
+ plat_dev.hsmp_device.parent = &pdev->dev;
+ plat_dev.hsmp_device.nodename = HSMP_DEVNODE_NAME;
+ plat_dev.hsmp_device.mode = 0644;
+
+ ret = misc_register(&plat_dev.hsmp_device);
+ if (ret)
+ return ret;
+
+ plat_dev.is_probed = true;
+ }
+
+ return 0;
- return misc_register(&plat_dev.hsmp_device);
}
static void hsmp_pltdrv_remove(struct platform_device *pdev)
{
- misc_deregister(&plat_dev.hsmp_device);
+ /*
+ * We register only one misc_device even on multi socket system.
+ * So, deregister should happen only once.
+ */
+ if (plat_dev.is_probed) {
+ misc_deregister(&plat_dev.hsmp_device);
+ plat_dev.is_probed = false;
+ }
}
static struct platform_driver amd_hsmp_driver = {
@@ -536,15 +886,30 @@ static struct platform_driver amd_hsmp_driver = {
.remove_new = hsmp_pltdrv_remove,
.driver = {
.name = DRIVER_NAME,
+ .acpi_match_table = amd_hsmp_acpi_ids,
},
};
static struct platform_device *amd_hsmp_platdev;
+static int hsmp_plat_dev_register(void)
+{
+ int ret;
+
+ amd_hsmp_platdev = platform_device_alloc(DRIVER_NAME, PLATFORM_DEVID_NONE);
+ if (!amd_hsmp_platdev)
+ return -ENOMEM;
+
+ ret = platform_device_add(amd_hsmp_platdev);
+ if (ret)
+ platform_device_put(amd_hsmp_platdev);
+
+ return ret;
+}
+
static int __init hsmp_plt_init(void)
{
int ret = -ENODEV;
- int i;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD || boot_cpu_data.x86 < 0x19) {
pr_err("HSMP is not supported on Family:%x model:%x\n",
@@ -557,40 +922,19 @@ static int __init hsmp_plt_init(void)
* if we have N SMN/DF interfaces that ideally means N sockets
*/
plat_dev.num_sockets = amd_nb_num();
- if (plat_dev.num_sockets == 0)
+ if (plat_dev.num_sockets == 0 || plat_dev.num_sockets > MAX_AMD_SOCKETS)
return ret;
- /* Test the hsmp interface on each socket */
- for (i = 0; i < plat_dev.num_sockets; i++) {
- ret = hsmp_test(i, 0xDEADBEEF);
- if (ret) {
- pr_err("HSMP test message failed on Fam:%x model:%x\n",
- boot_cpu_data.x86, boot_cpu_data.x86_model);
- pr_err("Is HSMP disabled in BIOS ?\n");
- return ret;
- }
- }
-
ret = platform_driver_register(&amd_hsmp_driver);
if (ret)
return ret;
- amd_hsmp_platdev = platform_device_alloc(DRIVER_NAME, PLATFORM_DEVID_NONE);
- if (!amd_hsmp_platdev) {
- ret = -ENOMEM;
- goto drv_unregister;
- }
-
- ret = platform_device_add(amd_hsmp_platdev);
- if (ret) {
- platform_device_put(amd_hsmp_platdev);
- goto drv_unregister;
+ if (!plat_dev.is_acpi_device) {
+ ret = hsmp_plat_dev_register();
+ if (ret)
+ platform_driver_unregister(&amd_hsmp_driver);
}
- return 0;
-
-drv_unregister:
- platform_driver_unregister(&amd_hsmp_driver);
return ret;
}
diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c
index b456370166b6b..b4f49720c87f6 100644
--- a/drivers/platform/x86/amd/pmc/pmc-quirks.c
+++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c
@@ -208,6 +208,15 @@ static const struct dmi_system_id fwbug_list[] = {
DMI_MATCH(DMI_BIOS_VERSION, "03.03"),
}
},
+ {
+ .ident = "Framework Laptop 13 (Phoenix)",
+ .driver_data = &quirk_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Laptop 13 (AMD Ryzen 7040Series)"),
+ DMI_MATCH(DMI_BIOS_VERSION, "03.05"),
+ }
+ },
{}
};
diff --git a/drivers/platform/x86/amd/pmf/Makefile b/drivers/platform/x86/amd/pmf/Makefile
index 6b26e48ce8ad2..7d6079b02589c 100644
--- a/drivers/platform/x86/amd/pmf/Makefile
+++ b/drivers/platform/x86/amd/pmf/Makefile
@@ -7,4 +7,4 @@
obj-$(CONFIG_AMD_PMF) += amd-pmf.o
amd-pmf-objs := core.o acpi.o sps.o \
auto-mode.o cnqf.o \
- tee-if.o spc.o
+ tee-if.o spc.o pmf-quirks.o
diff --git a/drivers/platform/x86/amd/pmf/acpi.c b/drivers/platform/x86/amd/pmf/acpi.c
index f2eb07ef855af..1157ec148880b 100644
--- a/drivers/platform/x86/amd/pmf/acpi.c
+++ b/drivers/platform/x86/amd/pmf/acpi.c
@@ -90,12 +90,96 @@ out:
return err;
}
+static union acpi_object *apts_if_call(struct amd_pmf_dev *pdev, u32 state_index)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_handle ahandle = ACPI_HANDLE(pdev->dev);
+ struct acpi_object_list apts_if_arg_list;
+ union acpi_object apts_if_args[3];
+ acpi_status status;
+
+ apts_if_arg_list.count = 3;
+ apts_if_arg_list.pointer = &apts_if_args[0];
+
+ apts_if_args[0].type = ACPI_TYPE_INTEGER;
+ apts_if_args[0].integer.value = 1;
+ apts_if_args[1].type = ACPI_TYPE_INTEGER;
+ apts_if_args[1].integer.value = state_index;
+ apts_if_args[2].type = ACPI_TYPE_INTEGER;
+ apts_if_args[2].integer.value = 0;
+
+ status = acpi_evaluate_object(ahandle, "APTS", &apts_if_arg_list, &buffer);
+ if (ACPI_FAILURE(status)) {
+ dev_err(pdev->dev, "APTS state_idx:%u call failed\n", state_index);
+ kfree(buffer.pointer);
+ return NULL;
+ }
+
+ return buffer.pointer;
+}
+
+static int apts_if_call_store_buffer(struct amd_pmf_dev *pdev,
+ u32 index, void *data, size_t out_sz)
+{
+ union acpi_object *info;
+ size_t size;
+ int err = 0;
+
+ info = apts_if_call(pdev, index);
+ if (!info)
+ return -EIO;
+
+ if (info->type != ACPI_TYPE_BUFFER) {
+ dev_err(pdev->dev, "object is not a buffer\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ size = *(u16 *)info->buffer.pointer;
+ if (info->buffer.length < size) {
+ dev_err(pdev->dev, "buffer smaller than header size %u < %zu\n",
+ info->buffer.length, size);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (size < out_sz) {
+ dev_err(pdev->dev, "buffer too small %zu\n", size);
+ err = -EINVAL;
+ goto out;
+ }
+
+ memcpy(data, info->buffer.pointer, out_sz);
+out:
+ kfree(info);
+ return err;
+}
+
int is_apmf_func_supported(struct amd_pmf_dev *pdev, unsigned long index)
{
/* If bit-n is set, that indicates function n+1 is supported */
return !!(pdev->supported_func & BIT(index - 1));
}
+int apts_get_static_slider_granular_v2(struct amd_pmf_dev *pdev,
+ struct amd_pmf_apts_granular_output *data, u32 apts_idx)
+{
+ if (!is_apmf_func_supported(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
+ return -EINVAL;
+
+ return apts_if_call_store_buffer(pdev, apts_idx, data, sizeof(*data));
+}
+
+int apmf_get_static_slider_granular_v2(struct amd_pmf_dev *pdev,
+ struct apmf_static_slider_granular_output_v2 *data)
+{
+ if (!is_apmf_func_supported(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
+ return -EINVAL;
+
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR,
+ data, sizeof(*data));
+}
+
int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev,
struct apmf_static_slider_granular_output *data)
{
@@ -140,6 +224,43 @@ static void apmf_sbios_heartbeat_notify(struct work_struct *work)
kfree(info);
}
+int amd_pmf_notify_sbios_heartbeat_event_v2(struct amd_pmf_dev *dev, u8 flag)
+{
+ struct sbios_hb_event_v2 args = { };
+ struct acpi_buffer params;
+ union acpi_object *info;
+
+ args.size = sizeof(args);
+
+ switch (flag) {
+ case ON_LOAD:
+ args.load = 1;
+ break;
+ case ON_UNLOAD:
+ args.unload = 1;
+ break;
+ case ON_SUSPEND:
+ args.suspend = 1;
+ break;
+ case ON_RESUME:
+ args.resume = 1;
+ break;
+ default:
+ dev_dbg(dev->dev, "Failed to send v2 heartbeat event, flag:0x%x\n", flag);
+ return -EINVAL;
+ }
+
+ params.length = sizeof(args);
+ params.pointer = &args;
+
+ info = apmf_if_call(dev, APMF_FUNC_SBIOS_HEARTBEAT_V2, &params);
+ if (!info)
+ return -EIO;
+
+ kfree(info);
+ return 0;
+}
+
int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx)
{
union acpi_object *info;
@@ -166,6 +287,11 @@ int apmf_get_auto_mode_def(struct amd_pmf_dev *pdev, struct apmf_auto_mode *data
return apmf_if_call_store_buffer(pdev, APMF_FUNC_AUTO_MODE, data, sizeof(*data));
}
+int apmf_get_sbios_requests_v2(struct amd_pmf_dev *pdev, struct apmf_sbios_req_v2 *req)
+{
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_SBIOS_REQUESTS, req, sizeof(*req));
+}
+
int apmf_get_sbios_requests(struct amd_pmf_dev *pdev, struct apmf_sbios_req *req)
{
return apmf_if_call_store_buffer(pdev, APMF_FUNC_SBIOS_REQUESTS,
@@ -217,9 +343,14 @@ static int apmf_if_verify_interface(struct amd_pmf_dev *pdev)
if (err)
return err;
- pdev->supported_func = output.supported_functions;
- dev_dbg(pdev->dev, "supported functions:0x%x notifications:0x%x\n",
- output.supported_functions, output.notification_mask);
+ /* only set if not already set by a quirk */
+ if (!pdev->supported_func)
+ pdev->supported_func = output.supported_functions;
+
+ dev_dbg(pdev->dev, "supported functions:0x%x notifications:0x%x version:%u\n",
+ output.supported_functions, output.notification_mask, output.version);
+
+ pdev->pmf_if_version = output.version;
return 0;
}
@@ -309,7 +440,7 @@ int apmf_check_smart_pc(struct amd_pmf_dev *pmf_dev)
status = acpi_walk_resources(ahandle, METHOD_NAME__CRS, apmf_walk_resources, pmf_dev);
if (ACPI_FAILURE(status)) {
- dev_err(pmf_dev->dev, "acpi_walk_resources failed :%d\n", status);
+ dev_dbg(pmf_dev->dev, "acpi_walk_resources failed :%d\n", status);
return -EINVAL;
}
@@ -320,7 +451,7 @@ void apmf_acpi_deinit(struct amd_pmf_dev *pmf_dev)
{
acpi_handle ahandle = ACPI_HANDLE(pmf_dev->dev);
- if (pmf_dev->hb_interval)
+ if (pmf_dev->hb_interval && pmf_dev->pmf_if_version == PMF_IF_V1)
cancel_delayed_work_sync(&pmf_dev->heart_beat);
if (is_apmf_func_supported(pmf_dev, APMF_FUNC_AUTO_MODE) &&
@@ -344,7 +475,7 @@ int apmf_acpi_init(struct amd_pmf_dev *pmf_dev)
goto out;
}
- if (pmf_dev->hb_interval) {
+ if (pmf_dev->hb_interval && pmf_dev->pmf_if_version == PMF_IF_V1) {
/* send heartbeats only if the interval is not zero */
INIT_DELAYED_WORK(&pmf_dev->heart_beat, apmf_sbios_heartbeat_notify);
schedule_delayed_work(&pmf_dev->heart_beat, 0);
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index 4f734e049f4a4..64e6e34a2a9ac 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -113,8 +113,9 @@ static void amd_pmf_dbgfs_unregister(struct amd_pmf_dev *dev)
static void amd_pmf_dbgfs_register(struct amd_pmf_dev *dev)
{
dev->dbgfs_dir = debugfs_create_dir("amd_pmf", NULL);
- debugfs_create_file("current_power_limits", 0644, dev->dbgfs_dir, dev,
- &current_power_limits_fops);
+ if (dev->pmf_if_version == PMF_IF_V1)
+ debugfs_create_file("current_power_limits", 0644, dev->dbgfs_dir, dev,
+ &current_power_limits_fops);
}
int amd_pmf_get_power_source(void)
@@ -299,6 +300,9 @@ static int amd_pmf_suspend_handler(struct device *dev)
if (pdev->smart_pc_enabled)
cancel_delayed_work_sync(&pdev->pb_work);
+ if (is_apmf_func_supported(pdev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
+ amd_pmf_notify_sbios_heartbeat_event_v2(pdev, ON_SUSPEND);
+
return 0;
}
@@ -313,6 +317,9 @@ static int amd_pmf_resume_handler(struct device *dev)
return ret;
}
+ if (is_apmf_func_supported(pdev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
+ amd_pmf_notify_sbios_heartbeat_event_v2(pdev, ON_RESUME);
+
if (pdev->smart_pc_enabled)
schedule_delayed_work(&pdev->pb_work, msecs_to_jiffies(2000));
@@ -438,11 +445,14 @@ static int amd_pmf_probe(struct platform_device *pdev)
mutex_init(&dev->lock);
mutex_init(&dev->update_mutex);
+ amd_pmf_quirks_init(dev);
apmf_acpi_init(dev);
platform_set_drvdata(pdev, dev);
amd_pmf_dbgfs_register(dev);
amd_pmf_init_features(dev);
apmf_install_handler(dev);
+ if (is_apmf_func_supported(dev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
+ amd_pmf_notify_sbios_heartbeat_event_v2(dev, ON_LOAD);
dev_info(dev->dev, "registered PMF device successfully\n");
@@ -454,6 +464,8 @@ static void amd_pmf_remove(struct platform_device *pdev)
struct amd_pmf_dev *dev = platform_get_drvdata(pdev);
amd_pmf_deinit_features(dev);
+ if (is_apmf_func_supported(dev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
+ amd_pmf_notify_sbios_heartbeat_event_v2(dev, ON_UNLOAD);
apmf_acpi_deinit(dev);
amd_pmf_dbgfs_unregister(dev);
mutex_destroy(&dev->lock);
diff --git a/drivers/platform/x86/amd/pmf/pmf-quirks.c b/drivers/platform/x86/amd/pmf/pmf-quirks.c
new file mode 100644
index 0000000000000..0b2eb0ae85feb
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/pmf-quirks.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD Platform Management Framework Driver Quirks
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ */
+
+#include <linux/dmi.h>
+
+#include "pmf.h"
+
+struct quirk_entry {
+ u32 supported_func;
+};
+
+static struct quirk_entry quirk_no_sps_bug = {
+ .supported_func = 0x4003,
+};
+
+static const struct dmi_system_id fwbug_list[] = {
+ {
+ .ident = "ROG Zephyrus G14",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GA403UV"),
+ },
+ .driver_data = &quirk_no_sps_bug,
+ },
+ {}
+};
+
+void amd_pmf_quirks_init(struct amd_pmf_dev *dev)
+{
+ const struct dmi_system_id *dmi_id;
+ struct quirk_entry *quirks;
+
+ dmi_id = dmi_first_match(fwbug_list);
+ if (!dmi_id)
+ return;
+
+ quirks = dmi_id->driver_data;
+ if (quirks->supported_func) {
+ dev->supported_func = quirks->supported_func;
+ pr_info("Using supported funcs quirk to avoid %s platform firmware bug\n",
+ dmi_id->ident);
+ }
+}
+
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
index 66cae1cca73cc..eeedd0c0395a8 100644
--- a/drivers/platform/x86/amd/pmf/pmf.h
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -17,7 +17,11 @@
#define POLICY_BUF_MAX_SZ 0x4b000
#define POLICY_SIGN_COOKIE 0x31535024
#define POLICY_COOKIE_OFFSET 0x10
-#define POLICY_COOKIE_LEN 0x14
+
+struct cookie_header {
+ u32 sign;
+ u32 length;
+} __packed;
/* APMF Functions */
#define APMF_FUNC_VERIFY_INTERFACE 0
@@ -30,6 +34,7 @@
#define APMF_FUNC_STATIC_SLIDER_GRANULAR 9
#define APMF_FUNC_DYN_SLIDER_AC 11
#define APMF_FUNC_DYN_SLIDER_DC 12
+#define APMF_FUNC_SBIOS_HEARTBEAT_V2 16
/* Message Definitions */
#define SET_SPL 0x03 /* SPL: Sustained Power Limit */
@@ -50,6 +55,8 @@
#define GET_STT_LIMIT_APU 0x20
#define GET_STT_LIMIT_HS2 0x21
#define SET_P3T 0x23 /* P3T: Peak Package Power Limit */
+#define SET_PMF_PPT 0x25
+#define SET_PMF_PPT_APU_ONLY 0x26
/* OS slider update notification */
#define DC_BEST_PERF 0
@@ -83,6 +90,47 @@
#define TA_OUTPUT_RESERVED_MEM 906
#define MAX_OPERATION_PARAMS 4
+#define PMF_IF_V1 1
+#define PMF_IF_V2 2
+
+#define APTS_MAX_STATES 16
+
+/* APTS PMF BIOS Interface */
+struct amd_pmf_apts_output {
+ u16 table_version;
+ u32 fan_table_idx;
+ u32 pmf_ppt;
+ u32 ppt_pmf_apu_only;
+ u32 stt_min_limit;
+ u8 stt_skin_temp_limit_apu;
+ u8 stt_skin_temp_limit_hs2;
+} __packed;
+
+struct amd_pmf_apts_granular_output {
+ u16 size;
+ struct amd_pmf_apts_output val;
+} __packed;
+
+struct amd_pmf_apts_granular {
+ u16 size;
+ struct amd_pmf_apts_output val[APTS_MAX_STATES];
+};
+
+struct sbios_hb_event_v2 {
+ u16 size;
+ u8 load;
+ u8 unload;
+ u8 suspend;
+ u8 resume;
+} __packed;
+
+enum sbios_hb_v2 {
+ ON_LOAD,
+ ON_UNLOAD,
+ ON_SUSPEND,
+ ON_RESUME,
+};
+
/* AMD PMF BIOS interfaces */
struct apmf_verify_interface {
u16 size;
@@ -114,6 +162,18 @@ struct apmf_sbios_req {
u8 skin_temp_hs2;
} __packed;
+struct apmf_sbios_req_v2 {
+ u16 size;
+ u32 pending_req;
+ u8 rsd;
+ u32 ppt_pmf;
+ u32 ppt_pmf_apu_only;
+ u32 stt_min_limit;
+ u8 skin_temp_apu;
+ u8 skin_temp_hs2;
+ u32 custom_policy[10];
+} __packed;
+
struct apmf_fan_idx {
u16 size;
u8 fan_ctl_mode;
@@ -194,6 +254,14 @@ enum power_modes {
POWER_MODE_MAX,
};
+enum power_modes_v2 {
+ POWER_MODE_BEST_PERFORMANCE,
+ POWER_MODE_BALANCED,
+ POWER_MODE_BEST_POWER_EFFICIENCY,
+ POWER_MODE_ENERGY_SAVE,
+ POWER_MODE_V2_MAX,
+};
+
struct amd_pmf_dev {
void __iomem *regbase;
void __iomem *smu_virt_addr;
@@ -229,10 +297,15 @@ struct amd_pmf_dev {
struct delayed_work pb_work;
struct pmf_action_table *prev_data;
u64 policy_addr;
- void *policy_base;
+ void __iomem *policy_base;
bool smart_pc_enabled;
+ u16 pmf_if_version;
};
+struct apmf_sps_prop_granular_v2 {
+ u8 power_states[POWER_SOURCE_MAX][POWER_MODE_V2_MAX];
+} __packed;
+
struct apmf_sps_prop_granular {
u32 fppt;
u32 sppt;
@@ -254,6 +327,16 @@ struct amd_pmf_static_slider_granular {
struct apmf_sps_prop_granular prop[POWER_SOURCE_MAX][POWER_MODE_MAX];
};
+struct apmf_static_slider_granular_output_v2 {
+ u16 size;
+ struct apmf_sps_prop_granular_v2 sps_idx;
+} __packed;
+
+struct amd_pmf_static_slider_granular_v2 {
+ u16 size;
+ struct apmf_sps_prop_granular_v2 sps_idx;
+};
+
struct os_power_slider {
u16 size;
u8 slider_event;
@@ -585,6 +668,7 @@ int amd_pmf_get_power_source(void);
int apmf_install_handler(struct amd_pmf_dev *pmf_dev);
int apmf_os_power_slider_update(struct amd_pmf_dev *dev, u8 flag);
int amd_pmf_set_dram_addr(struct amd_pmf_dev *dev, bool alloc_buffer);
+int amd_pmf_notify_sbios_heartbeat_event_v2(struct amd_pmf_dev *dev, u8 flag);
/* SPS Layer */
int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf);
@@ -602,6 +686,10 @@ const char *amd_pmf_source_as_str(unsigned int state);
int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx);
int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf);
+int apmf_get_static_slider_granular_v2(struct amd_pmf_dev *dev,
+ struct apmf_static_slider_granular_output_v2 *data);
+int apts_get_static_slider_granular_v2(struct amd_pmf_dev *pdev,
+ struct amd_pmf_apts_granular_output *data, u32 apts_idx);
/* Auto Mode Layer */
int apmf_get_auto_mode_def(struct amd_pmf_dev *pdev, struct apmf_auto_mode *data);
@@ -609,6 +697,7 @@ void amd_pmf_init_auto_mode(struct amd_pmf_dev *dev);
void amd_pmf_deinit_auto_mode(struct amd_pmf_dev *dev);
void amd_pmf_trans_automode(struct amd_pmf_dev *dev, int socket_power, ktime_t time_elapsed_ms);
int apmf_get_sbios_requests(struct amd_pmf_dev *pdev, struct apmf_sbios_req *req);
+int apmf_get_sbios_requests_v2(struct amd_pmf_dev *pdev, struct apmf_sbios_req_v2 *req);
void amd_pmf_update_2_cql(struct amd_pmf_dev *dev, bool is_cql_event);
int amd_pmf_reset_amt(struct amd_pmf_dev *dev);
@@ -631,4 +720,7 @@ int apmf_check_smart_pc(struct amd_pmf_dev *pmf_dev);
void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in);
void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in);
+/* Quirk infrastructure */
+void amd_pmf_quirks_init(struct amd_pmf_dev *dev);
+
#endif /* PMF_H */
diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c
index 33e23e25c8b1e..92f7fb22277dc 100644
--- a/drivers/platform/x86/amd/pmf/sps.c
+++ b/drivers/platform/x86/amd/pmf/sps.c
@@ -10,9 +10,27 @@
#include "pmf.h"
+static struct amd_pmf_static_slider_granular_v2 config_store_v2;
static struct amd_pmf_static_slider_granular config_store;
+static struct amd_pmf_apts_granular apts_config_store;
#ifdef CONFIG_AMD_PMF_DEBUG
+static const char *slider_v2_as_str(unsigned int state)
+{
+ switch (state) {
+ case POWER_MODE_BEST_PERFORMANCE:
+ return "Best Performance";
+ case POWER_MODE_BALANCED:
+ return "Balanced";
+ case POWER_MODE_BEST_POWER_EFFICIENCY:
+ return "Best Power Efficiency";
+ case POWER_MODE_ENERGY_SAVE:
+ return "Energy Save";
+ default:
+ return "Unknown Power Mode";
+ }
+}
+
static const char *slider_as_str(unsigned int state)
{
switch (state) {
@@ -63,10 +81,88 @@ static void amd_pmf_dump_sps_defaults(struct amd_pmf_static_slider_granular *dat
pr_debug("Static Slider Data - END\n");
}
+
+static void amd_pmf_dump_sps_defaults_v2(struct amd_pmf_static_slider_granular_v2 *data)
+{
+ unsigned int i, j;
+
+ pr_debug("Static Slider APTS state index data - BEGIN");
+ pr_debug("size: %u\n", data->size);
+
+ for (i = 0; i < POWER_SOURCE_MAX; i++)
+ for (j = 0; j < POWER_MODE_V2_MAX; j++)
+ pr_debug("%s %s: %u\n", amd_pmf_source_as_str(i), slider_v2_as_str(j),
+ data->sps_idx.power_states[i][j]);
+
+ pr_debug("Static Slider APTS state index data - END\n");
+}
+
+static void amd_pmf_dump_apts_sps_defaults(struct amd_pmf_apts_granular *info)
+{
+ int i;
+
+ pr_debug("Static Slider APTS index default values data - BEGIN");
+
+ for (i = 0; i < APTS_MAX_STATES; i++) {
+ pr_debug("Table Version[%d] = %u\n", i, info->val[i].table_version);
+ pr_debug("Fan Index[%d] = %u\n", i, info->val[i].fan_table_idx);
+ pr_debug("PPT[%d] = %u\n", i, info->val[i].pmf_ppt);
+ pr_debug("PPT APU[%d] = %u\n", i, info->val[i].ppt_pmf_apu_only);
+ pr_debug("STT Min[%d] = %u\n", i, info->val[i].stt_min_limit);
+ pr_debug("STT APU[%d] = %u\n", i, info->val[i].stt_skin_temp_limit_apu);
+ pr_debug("STT HS2[%d] = %u\n", i, info->val[i].stt_skin_temp_limit_hs2);
+ }
+
+ pr_debug("Static Slider APTS index default values data - END");
+}
#else
static void amd_pmf_dump_sps_defaults(struct amd_pmf_static_slider_granular *data) {}
+static void amd_pmf_dump_sps_defaults_v2(struct amd_pmf_static_slider_granular_v2 *data) {}
+static void amd_pmf_dump_apts_sps_defaults(struct amd_pmf_apts_granular *info) {}
#endif
+static void amd_pmf_load_apts_defaults_sps_v2(struct amd_pmf_dev *pdev)
+{
+ struct amd_pmf_apts_granular_output output;
+ struct amd_pmf_apts_output *ps;
+ int i;
+
+ memset(&apts_config_store, 0, sizeof(apts_config_store));
+
+ ps = apts_config_store.val;
+
+ for (i = 0; i < APTS_MAX_STATES; i++) {
+ apts_get_static_slider_granular_v2(pdev, &output, i);
+ ps[i].table_version = output.val.table_version;
+ ps[i].fan_table_idx = output.val.fan_table_idx;
+ ps[i].pmf_ppt = output.val.pmf_ppt;
+ ps[i].ppt_pmf_apu_only = output.val.ppt_pmf_apu_only;
+ ps[i].stt_min_limit = output.val.stt_min_limit;
+ ps[i].stt_skin_temp_limit_apu = output.val.stt_skin_temp_limit_apu;
+ ps[i].stt_skin_temp_limit_hs2 = output.val.stt_skin_temp_limit_hs2;
+ }
+
+ amd_pmf_dump_apts_sps_defaults(&apts_config_store);
+}
+
+static void amd_pmf_load_defaults_sps_v2(struct amd_pmf_dev *dev)
+{
+ struct apmf_static_slider_granular_output_v2 output;
+ unsigned int i, j;
+
+ memset(&config_store_v2, 0, sizeof(config_store_v2));
+ apmf_get_static_slider_granular_v2(dev, &output);
+
+ config_store_v2.size = output.size;
+
+ for (i = 0; i < POWER_SOURCE_MAX; i++)
+ for (j = 0; j < POWER_MODE_V2_MAX; j++)
+ config_store_v2.sps_idx.power_states[i][j] =
+ output.sps_idx.power_states[i][j];
+
+ amd_pmf_dump_sps_defaults_v2(&config_store_v2);
+}
+
static void amd_pmf_load_defaults_sps(struct amd_pmf_dev *dev)
{
struct apmf_static_slider_granular_output output;
@@ -94,6 +190,19 @@ static void amd_pmf_load_defaults_sps(struct amd_pmf_dev *dev)
amd_pmf_dump_sps_defaults(&config_store);
}
+static void amd_pmf_update_slider_v2(struct amd_pmf_dev *dev, int idx)
+{
+ amd_pmf_send_cmd(dev, SET_PMF_PPT, false, apts_config_store.val[idx].pmf_ppt, NULL);
+ amd_pmf_send_cmd(dev, SET_PMF_PPT_APU_ONLY, false,
+ apts_config_store.val[idx].ppt_pmf_apu_only, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false,
+ apts_config_store.val[idx].stt_min_limit, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
+ apts_config_store.val[idx].stt_skin_temp_limit_apu, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
+ apts_config_store.val[idx].stt_skin_temp_limit_hs2, NULL);
+}
+
void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
struct amd_pmf_static_slider_granular *table)
{
@@ -126,6 +235,32 @@ void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
}
}
+static int amd_pmf_update_sps_power_limits_v2(struct amd_pmf_dev *pdev, int pwr_mode)
+{
+ int src, index;
+
+ src = amd_pmf_get_power_source();
+
+ switch (pwr_mode) {
+ case POWER_MODE_PERFORMANCE:
+ index = config_store_v2.sps_idx.power_states[src][POWER_MODE_BEST_PERFORMANCE];
+ amd_pmf_update_slider_v2(pdev, index);
+ break;
+ case POWER_MODE_BALANCED_POWER:
+ index = config_store_v2.sps_idx.power_states[src][POWER_MODE_BALANCED];
+ amd_pmf_update_slider_v2(pdev, index);
+ break;
+ case POWER_MODE_POWER_SAVER:
+ index = config_store_v2.sps_idx.power_states[src][POWER_MODE_BEST_POWER_EFFICIENCY];
+ amd_pmf_update_slider_v2(pdev, index);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf)
{
int mode;
@@ -134,6 +269,9 @@ int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf)
if (mode < 0)
return mode;
+ if (pmf->pmf_if_version == PMF_IF_V2)
+ return amd_pmf_update_sps_power_limits_v2(pmf, mode);
+
amd_pmf_update_slider(pmf, SLIDER_OP_SET, mode, NULL);
return 0;
@@ -256,7 +394,12 @@ int amd_pmf_init_sps(struct amd_pmf_dev *dev)
dev->current_profile = PLATFORM_PROFILE_BALANCED;
if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
- amd_pmf_load_defaults_sps(dev);
+ if (dev->pmf_if_version == PMF_IF_V2) {
+ amd_pmf_load_defaults_sps_v2(dev);
+ amd_pmf_load_apts_defaults_sps_v2(dev);
+ } else {
+ amd_pmf_load_defaults_sps(dev);
+ }
/* update SPS balanced power mode thermals */
amd_pmf_set_sps_power_limits(dev);
diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c
index dcbe8f85e1229..b438de4d6bfce 100644
--- a/drivers/platform/x86/amd/pmf/tee-if.c
+++ b/drivers/platform/x86/amd/pmf/tee-if.c
@@ -246,19 +246,24 @@ static void amd_pmf_invoke_cmd(struct work_struct *work)
static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev)
{
- u32 cookie, length;
+ struct cookie_header *header;
int res;
- cookie = readl(dev->policy_buf + POLICY_COOKIE_OFFSET);
- length = readl(dev->policy_buf + POLICY_COOKIE_LEN);
+ if (dev->policy_sz < POLICY_COOKIE_OFFSET + sizeof(*header))
+ return -EINVAL;
+
+ header = (struct cookie_header *)(dev->policy_buf + POLICY_COOKIE_OFFSET);
- if (cookie != POLICY_SIGN_COOKIE || !length) {
+ if (header->sign != POLICY_SIGN_COOKIE || !header->length) {
dev_dbg(dev->dev, "cookie doesn't match\n");
return -EINVAL;
}
+ if (dev->policy_sz < header->length + 512)
+ return -EINVAL;
+
/* Update the actual length */
- dev->policy_sz = length + 512;
+ dev->policy_sz = header->length + 512;
res = amd_pmf_invoke_cmd_init(dev);
if (res == TA_PMF_TYPE_SUCCESS) {
/* Now its safe to announce that smart pc is enabled */
@@ -271,7 +276,7 @@ static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev)
} else {
dev_err(dev->dev, "ta invoke cmd init failed err: %x\n", res);
dev->smart_pc_enabled = false;
- return res;
+ return -EIO;
}
return 0;
@@ -311,8 +316,8 @@ static ssize_t amd_pmf_get_pb_data(struct file *filp, const char __user *buf,
amd_pmf_hex_dump_pb(dev);
ret = amd_pmf_start_policy_engine(dev);
- if (ret)
- return -EINVAL;
+ if (ret < 0)
+ return ret;
return length;
}
@@ -453,7 +458,7 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
goto error;
}
- memcpy(dev->policy_buf, dev->policy_base, dev->policy_sz);
+ memcpy_fromio(dev->policy_buf, dev->policy_base, dev->policy_sz);
amd_pmf_hex_dump_pb(dev);
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 18be35fdb3814..3f07bbf809ef0 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -101,13 +101,6 @@ module_param(fnlock_default, bool, 0444);
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
#define ASUS_ACPI_UID_ASUSWMI "ASUSWMI"
-#define ASUS_ACPI_UID_ATK "ATK"
-
-#define WMI_EVENT_QUEUE_SIZE 0x10
-#define WMI_EVENT_QUEUE_END 0x1
-#define WMI_EVENT_MASK 0xFFFF
-/* The WMI hotkey event value is always the same. */
-#define WMI_EVENT_VALUE_ATK 0xFF
#define WMI_EVENT_MASK 0xFFFF
@@ -219,7 +212,6 @@ struct asus_wmi {
int dsts_id;
int spec;
int sfun;
- bool wmi_event_queue;
struct input_dev *inputdev;
struct backlight_device *backlight_device;
@@ -489,7 +481,17 @@ static int asus_wmi_evaluate_method_agfn(const struct acpi_buffer args)
static int asus_wmi_get_devstate(struct asus_wmi *asus, u32 dev_id, u32 *retval)
{
- return asus_wmi_evaluate_method(asus->dsts_id, dev_id, 0, retval);
+ int err;
+
+ err = asus_wmi_evaluate_method(asus->dsts_id, dev_id, 0, retval);
+
+ if (err)
+ return err;
+
+ if (*retval == ~0)
+ return -ENODEV;
+
+ return 0;
}
static int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param,
@@ -1620,7 +1622,6 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MICMUTE_LED)) {
asus->micmute_led.name = "platform::micmute";
asus->micmute_led.max_brightness = 1;
- asus->micmute_led.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
asus->micmute_led.brightness_set_blocking = micmute_led_set;
asus->micmute_led.default_trigger = "audio-micmute";
@@ -4020,50 +4021,14 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus)
static void asus_wmi_notify(u32 value, void *context)
{
struct asus_wmi *asus = context;
- int code;
- int i;
-
- for (i = 0; i < WMI_EVENT_QUEUE_SIZE + 1; i++) {
- code = asus_wmi_get_event_code(value);
- if (code < 0) {
- pr_warn("Failed to get notify code: %d\n", code);
- return;
- }
-
- if (code == WMI_EVENT_QUEUE_END || code == WMI_EVENT_MASK)
- return;
+ int code = asus_wmi_get_event_code(value);
- asus_wmi_handle_event_code(code, asus);
-
- /*
- * Double check that queue is present:
- * ATK (with queue) uses 0xff, ASUSWMI (without) 0xd2.
- */
- if (!asus->wmi_event_queue || value != WMI_EVENT_VALUE_ATK)
- return;
- }
-
- pr_warn("Failed to process event queue, last code: 0x%x\n", code);
-}
-
-static int asus_wmi_notify_queue_flush(struct asus_wmi *asus)
-{
- int code;
- int i;
-
- for (i = 0; i < WMI_EVENT_QUEUE_SIZE + 1; i++) {
- code = asus_wmi_get_event_code(WMI_EVENT_VALUE_ATK);
- if (code < 0) {
- pr_warn("Failed to get event during flush: %d\n", code);
- return code;
- }
-
- if (code == WMI_EVENT_QUEUE_END || code == WMI_EVENT_MASK)
- return 0;
+ if (code < 0) {
+ pr_warn("Failed to get notify code: %d\n", code);
+ return;
}
- pr_warn("Failed to flush event queue\n");
- return -EIO;
+ asus_wmi_handle_event_code(code, asus);
}
/* Sysfs **********************************************************************/
@@ -4303,23 +4268,6 @@ static int asus_wmi_platform_init(struct asus_wmi *asus)
asus->dsts_id = ASUS_WMI_METHODID_DSTS;
}
- /*
- * Some devices can have multiple event codes stored in a queue before
- * the module load if it was unloaded intermittently after calling
- * the INIT method (enables event handling). The WMI notify handler is
- * expected to retrieve all event codes until a retrieved code equals
- * queue end marker (One or Ones). Old codes are flushed from the queue
- * upon module load. Not enabling this when it should be has minimal
- * visible impact so fall back if anything goes wrong.
- */
- wmi_uid = wmi_get_acpi_device_uid(asus->driver->event_guid);
- if (wmi_uid && !strcmp(wmi_uid, ASUS_ACPI_UID_ATK)) {
- dev_info(dev, "Detected ATK, enable event queue\n");
-
- if (!asus_wmi_notify_queue_flush(asus))
- asus->wmi_event_queue = true;
- }
-
/* CWAP allow to define the behavior of the Fn+F2 key,
* this method doesn't seems to be present on Eee PCs */
if (asus->driver->quirks->wapf >= 0)
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 61c745490d714..5546fb1894913 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -1107,7 +1107,7 @@ module_init(compal_init);
module_exit(compal_cleanup);
MODULE_AUTHOR("Cezary Jackiewicz");
-MODULE_AUTHOR("Roald Frederickx (roald.frederickx@gmail.com)");
+MODULE_AUTHOR("Roald Frederickx <roald.frederickx@gmail.com>");
MODULE_DESCRIPTION("Compal Laptop Support");
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig
index e712df67fa6be..bd9f445974cc6 100644
--- a/drivers/platform/x86/dell/Kconfig
+++ b/drivers/platform/x86/dell/Kconfig
@@ -57,8 +57,6 @@ config DELL_LAPTOP
select POWER_SUPPLY
select LEDS_CLASS
select NEW_LEDS
- select LEDS_TRIGGERS
- select LEDS_TRIGGER_AUDIO
help
This driver adds support for rfkill and backlight control to Dell
laptops (except for some models covered by the Compal driver).
@@ -165,7 +163,6 @@ config DELL_WMI
config DELL_WMI_PRIVACY
bool "Dell WMI Hardware Privacy Support"
- depends on LEDS_TRIGGER_AUDIO = y || DELL_WMI = LEDS_TRIGGER_AUDIO
depends on DELL_WMI
help
This option adds integration with the "Dell Hardware Privacy"
diff --git a/drivers/platform/x86/dell/dell-laptop.c b/drivers/platform/x86/dell/dell-laptop.c
index 6586438356de7..42f7de2b45223 100644
--- a/drivers/platform/x86/dell/dell-laptop.c
+++ b/drivers/platform/x86/dell/dell-laptop.c
@@ -2252,7 +2252,6 @@ static int __init dell_init(void)
if (dell_smbios_find_token(GLOBAL_MIC_MUTE_DISABLE) &&
dell_smbios_find_token(GLOBAL_MIC_MUTE_ENABLE) &&
!dell_privacy_has_mic_mute()) {
- micmute_led_cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
ret = led_classdev_register(&platform_device->dev, &micmute_led_cdev);
if (ret < 0)
goto fail_led;
@@ -2261,7 +2260,6 @@ static int __init dell_init(void)
if (dell_smbios_find_token(GLOBAL_MUTE_DISABLE) &&
dell_smbios_find_token(GLOBAL_MUTE_ENABLE)) {
- mute_led_cdev.brightness = ledtrig_audio_get(LED_AUDIO_MUTE);
ret = led_classdev_register(&platform_device->dev, &mute_led_cdev);
if (ret < 0)
goto fail_backlight;
diff --git a/drivers/platform/x86/dell/dell-wmi-ddv.c b/drivers/platform/x86/dell/dell-wmi-ddv.c
index db1e9240dd02c..0b2299f7a2de5 100644
--- a/drivers/platform/x86/dell/dell-wmi-ddv.c
+++ b/drivers/platform/x86/dell/dell-wmi-ddv.c
@@ -882,6 +882,7 @@ static struct wmi_driver dell_wmi_ddv_driver = {
},
.id_table = dell_wmi_ddv_id_table,
.probe = dell_wmi_ddv_probe,
+ .no_singleton = true,
};
module_wmi_driver(dell_wmi_ddv_driver);
diff --git a/drivers/platform/x86/dell/dell-wmi-privacy.c b/drivers/platform/x86/dell/dell-wmi-privacy.c
index c517bd45dd32e..4b65e1655d42a 100644
--- a/drivers/platform/x86/dell/dell-wmi-privacy.c
+++ b/drivers/platform/x86/dell/dell-wmi-privacy.c
@@ -288,7 +288,6 @@ static int dell_privacy_leds_setup(struct device *dev)
priv->cdev.max_brightness = 1;
priv->cdev.brightness_set_blocking = dell_privacy_micmute_led_set;
priv->cdev.default_trigger = "audio-micmute";
- priv->cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
return devm_led_classdev_register(dev, &priv->cdev);
}
@@ -298,10 +297,6 @@ static int dell_privacy_wmi_probe(struct wmi_device *wdev, const void *context)
struct key_entry *keymap;
int ret, i, j;
- ret = wmi_has_guid(DELL_PRIVACY_GUID);
- if (!ret)
- pr_debug("Unable to detect available Dell privacy devices!\n");
-
priv = devm_kzalloc(&wdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
index b929b4f824205..9def7983d7d66 100644
--- a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
@@ -25,7 +25,7 @@ struct wmi_sysman_priv wmi_priv = {
/* reset bios to defaults */
static const char * const reset_types[] = {"builtinsafe", "lastknowngood", "factory", "custom"};
static int reset_option = -1;
-static struct class *fw_attr_class;
+static const struct class *fw_attr_class;
/**
diff --git a/drivers/platform/x86/firmware_attributes_class.c b/drivers/platform/x86/firmware_attributes_class.c
index fafe8eaf6e3e4..dd8240009565d 100644
--- a/drivers/platform/x86/firmware_attributes_class.c
+++ b/drivers/platform/x86/firmware_attributes_class.c
@@ -10,11 +10,11 @@
static DEFINE_MUTEX(fw_attr_lock);
static int fw_attr_inuse;
-static struct class firmware_attributes_class = {
+static const struct class firmware_attributes_class = {
.name = "firmware-attributes",
};
-int fw_attributes_class_get(struct class **fw_attr_class)
+int fw_attributes_class_get(const struct class **fw_attr_class)
{
int err;
diff --git a/drivers/platform/x86/firmware_attributes_class.h b/drivers/platform/x86/firmware_attributes_class.h
index 486485cb1f54e..363c75f1ac1b8 100644
--- a/drivers/platform/x86/firmware_attributes_class.h
+++ b/drivers/platform/x86/firmware_attributes_class.h
@@ -5,7 +5,7 @@
#ifndef FW_ATTR_CLASS_H
#define FW_ATTR_CLASS_H
-int fw_attributes_class_get(struct class **fw_attr_class);
+int fw_attributes_class_get(const struct class **fw_attr_class);
int fw_attributes_class_put(void);
#endif /* FW_ATTR_CLASS_H */
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 085e044e888e4..94480af494678 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -49,6 +49,8 @@
#include <linux/kfifo.h>
#include <linux/leds.h>
#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <acpi/battery.h>
#include <acpi/video.h>
#define FUJITSU_DRIVER_VERSION "0.6.0"
@@ -97,6 +99,10 @@
#define BACKLIGHT_OFF (BIT(0) | BIT(1))
#define BACKLIGHT_ON 0
+/* FUNC interface - battery control interface */
+#define FUNC_S006_METHOD 0x1006
+#define CHARGE_CONTROL_RW 0x21
+
/* Scancodes read from the GIRB register */
#define KEY1_CODE 0x410
#define KEY2_CODE 0x411
@@ -132,6 +138,7 @@ struct fujitsu_laptop {
spinlock_t fifo_lock;
int flags_supported;
int flags_state;
+ bool charge_control_supported;
};
static struct acpi_device *fext;
@@ -164,6 +171,110 @@ static int call_fext_func(struct acpi_device *device,
return value;
}
+/* Battery charge control code */
+static ssize_t charge_control_end_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int cc_end_value, s006_cc_return;
+ int value, ret;
+
+ ret = kstrtouint(buf, 10, &value);
+ if (ret)
+ return ret;
+
+ if (value < 50 || value > 100)
+ return -EINVAL;
+
+ cc_end_value = value * 0x100 + 0x20;
+ s006_cc_return = call_fext_func(fext, FUNC_S006_METHOD,
+ CHARGE_CONTROL_RW, cc_end_value, 0x0);
+ if (s006_cc_return < 0)
+ return s006_cc_return;
+ /*
+ * The S006 0x21 method returns 0x00 in case the provided value
+ * is invalid.
+ */
+ if (s006_cc_return == 0x00)
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t charge_control_end_threshold_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int status;
+
+ status = call_fext_func(fext, FUNC_S006_METHOD,
+ CHARGE_CONTROL_RW, 0x21, 0x0);
+ if (status < 0)
+ return status;
+
+ return sysfs_emit(buf, "%d\n", status);
+}
+
+static DEVICE_ATTR_RW(charge_control_end_threshold);
+
+/* ACPI battery hook */
+static int fujitsu_battery_add_hook(struct power_supply *battery,
+ struct acpi_battery_hook *hook)
+{
+ return device_create_file(&battery->dev,
+ &dev_attr_charge_control_end_threshold);
+}
+
+static int fujitsu_battery_remove_hook(struct power_supply *battery,
+ struct acpi_battery_hook *hook)
+{
+ device_remove_file(&battery->dev,
+ &dev_attr_charge_control_end_threshold);
+
+ return 0;
+}
+
+static struct acpi_battery_hook battery_hook = {
+ .add_battery = fujitsu_battery_add_hook,
+ .remove_battery = fujitsu_battery_remove_hook,
+ .name = "Fujitsu Battery Extension",
+};
+
+/*
+ * These functions are intended to be called from acpi_fujitsu_laptop_add and
+ * acpi_fujitsu_laptop_remove.
+ */
+static int fujitsu_battery_charge_control_add(struct acpi_device *device)
+{
+ struct fujitsu_laptop *priv = acpi_driver_data(device);
+ int s006_cc_return;
+
+ priv->charge_control_supported = false;
+ /*
+ * Check if the S006 0x21 method exists by trying to get the current
+ * battery charge limit.
+ */
+ s006_cc_return = call_fext_func(fext, FUNC_S006_METHOD,
+ CHARGE_CONTROL_RW, 0x21, 0x0);
+ if (s006_cc_return < 0)
+ return s006_cc_return;
+ if (s006_cc_return == UNSUPPORTED_CMD)
+ return -ENODEV;
+
+ priv->charge_control_supported = true;
+ battery_hook_register(&battery_hook);
+
+ return 0;
+}
+
+static void fujitsu_battery_charge_control_remove(struct acpi_device *device)
+{
+ struct fujitsu_laptop *priv = acpi_driver_data(device);
+
+ if (priv->charge_control_supported)
+ battery_hook_unregister(&battery_hook);
+}
+
/* Hardware access for LCD brightness control */
static int set_lcd_level(struct acpi_device *device, int level)
@@ -839,6 +950,10 @@ static int acpi_fujitsu_laptop_add(struct acpi_device *device)
if (ret)
goto err_free_fifo;
+ ret = fujitsu_battery_charge_control_add(device);
+ if (ret < 0)
+ pr_warn("Unable to register battery charge control: %d\n", ret);
+
return 0;
err_free_fifo:
@@ -851,6 +966,8 @@ static void acpi_fujitsu_laptop_remove(struct acpi_device *device)
{
struct fujitsu_laptop *priv = acpi_driver_data(device);
+ fujitsu_battery_charge_control_remove(device);
+
fujitsu_laptop_platform_remove(device);
kfifo_free(&priv->fifo);
diff --git a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
index 8c9f4f3227fc6..2dc50152158a3 100644
--- a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
+++ b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
@@ -24,7 +24,7 @@ struct bioscfg_priv bioscfg_drv = {
.mutex = __MUTEX_INITIALIZER(bioscfg_drv.mutex),
};
-static struct class *fw_attr_class;
+static const struct class *fw_attr_class;
ssize_t display_name_language_code_show(struct kobject *kobj,
struct kobj_attribute *attr,
diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
index e536604225c51..630519c086171 100644
--- a/drivers/platform/x86/hp/hp-wmi.c
+++ b/drivers/platform/x86/hp/hp-wmi.c
@@ -29,15 +29,19 @@
#include <linux/dmi.h>
MODULE_AUTHOR("Matthew Garrett <mjg59@srcf.ucam.org>");
-MODULE_DESCRIPTION("HP laptop WMI hotkeys driver");
+MODULE_DESCRIPTION("HP laptop WMI driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C");
-MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
+MODULE_ALIAS("wmi:5FB7F034-2C63-45E9-BE91-3D44E2C707E4");
#define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
-#define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
+#define HPWMI_BIOS_GUID "5FB7F034-2C63-45E9-BE91-3D44E2C707E4"
+
+#define HP_OMEN_EC_THERMAL_PROFILE_FLAGS_OFFSET 0x62
+#define HP_OMEN_EC_THERMAL_PROFILE_TIMER_OFFSET 0x63
#define HP_OMEN_EC_THERMAL_PROFILE_OFFSET 0x95
+
#define zero_if_sup(tmp) (zero_insize_support?0:sizeof(tmp)) // use when zero insize is required
/* DMI board names of devices that should use the omen specific path for
@@ -55,17 +59,25 @@ static const char * const omen_thermal_profile_boards[] = {
"874A", "8603", "8604", "8748", "886B", "886C", "878A", "878B", "878C",
"88C8", "88CB", "8786", "8787", "8788", "88D1", "88D2", "88F4", "88FD",
"88F5", "88F6", "88F7", "88FE", "88FF", "8900", "8901", "8902", "8912",
- "8917", "8918", "8949", "894A", "89EB"
+ "8917", "8918", "8949", "894A", "89EB", "8BAD", "8A42"
};
/* DMI Board names of Omen laptops that are specifically set to be thermal
* profile version 0 by the Omen Command Center app, regardless of what
* the get system design information WMI call returns
*/
-static const char *const omen_thermal_profile_force_v0_boards[] = {
+static const char * const omen_thermal_profile_force_v0_boards[] = {
"8607", "8746", "8747", "8749", "874A", "8748"
};
+/* DMI board names of Omen laptops that have a thermal profile timer which will
+ * cause the embedded controller to set the thermal profile back to
+ * "balanced" when reaching zero.
+ */
+static const char * const omen_timed_thermal_profile_boards[] = {
+ "8BAD", "8A42"
+};
+
/* DMI Board names of Victus laptops */
static const char * const victus_thermal_profile_boards[] = {
"8A25"
@@ -182,6 +194,12 @@ enum hp_thermal_profile_omen_v1 {
HP_OMEN_V1_THERMAL_PROFILE_COOL = 0x50,
};
+enum hp_thermal_profile_omen_flags {
+ HP_OMEN_EC_FLAGS_TURBO = 0x04,
+ HP_OMEN_EC_FLAGS_NOTIMER = 0x02,
+ HP_OMEN_EC_FLAGS_JUSTSET = 0x01,
+};
+
enum hp_thermal_profile_victus {
HP_VICTUS_THERMAL_PROFILE_DEFAULT = 0x00,
HP_VICTUS_THERMAL_PROFILE_PERFORMANCE = 0x01,
@@ -449,7 +467,11 @@ static int hp_wmi_get_tablet_mode(void)
static int omen_thermal_profile_set(int mode)
{
- char buffer[2] = {0, mode};
+ /* The Omen Control Center actively sets the first byte of the buffer to
+ * 255, so let's mimic this behaviour to be as close as possible to
+ * the original software.
+ */
+ char buffer[2] = {-1, mode};
int ret;
ret = hp_wmi_perform_query(HPWMI_SET_PERFORMANCE_MODE, HPWMI_GM,
@@ -1201,10 +1223,33 @@ static int platform_profile_omen_get(struct platform_profile_handler *pprof,
return 0;
}
+static bool has_omen_thermal_profile_ec_timer(void)
+{
+ const char *board_name = dmi_get_system_info(DMI_BOARD_NAME);
+
+ if (!board_name)
+ return false;
+
+ return match_string(omen_timed_thermal_profile_boards,
+ ARRAY_SIZE(omen_timed_thermal_profile_boards),
+ board_name) >= 0;
+}
+
+inline int omen_thermal_profile_ec_flags_set(enum hp_thermal_profile_omen_flags flags)
+{
+ return ec_write(HP_OMEN_EC_THERMAL_PROFILE_FLAGS_OFFSET, flags);
+}
+
+inline int omen_thermal_profile_ec_timer_set(u8 value)
+{
+ return ec_write(HP_OMEN_EC_THERMAL_PROFILE_TIMER_OFFSET, value);
+}
+
static int platform_profile_omen_set(struct platform_profile_handler *pprof,
enum platform_profile_option profile)
{
int err, tp, tp_version;
+ enum hp_thermal_profile_omen_flags flags = 0;
tp_version = omen_get_thermal_policy_version();
@@ -1238,6 +1283,20 @@ static int platform_profile_omen_set(struct platform_profile_handler *pprof,
if (err < 0)
return err;
+ if (has_omen_thermal_profile_ec_timer()) {
+ err = omen_thermal_profile_ec_timer_set(0);
+ if (err < 0)
+ return err;
+
+ if (profile == PLATFORM_PROFILE_PERFORMANCE)
+ flags = HP_OMEN_EC_FLAGS_NOTIMER |
+ HP_OMEN_EC_FLAGS_TURBO;
+
+ err = omen_thermal_profile_ec_flags_set(flags);
+ if (err < 0)
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/platform/x86/huawei-wmi.c b/drivers/platform/x86/huawei-wmi.c
index 0ef1c46b617b6..dde139c69945e 100644
--- a/drivers/platform/x86/huawei-wmi.c
+++ b/drivers/platform/x86/huawei-wmi.c
@@ -310,7 +310,6 @@ static void huawei_wmi_leds_setup(struct device *dev)
huawei->cdev.max_brightness = 1;
huawei->cdev.brightness_set_blocking = &huawei_wmi_micmute_led_set;
huawei->cdev.default_trigger = "audio-micmute";
- huawei->cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
huawei->cdev.dev = dev;
huawei->cdev.flags = LED_CORE_SUSPENDRESUME;
diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
index 2ab7d9ac542d1..1d4bbae115f1f 100644
--- a/drivers/platform/x86/ibm_rtl.c
+++ b/drivers/platform/x86/ibm_rtl.c
@@ -179,7 +179,7 @@ static ssize_t rtl_set_state(struct device *dev,
return ret;
}
-static struct bus_type rtl_subsys = {
+static const struct bus_type rtl_subsys = {
.name = "ibm_rtl",
.dev_name = "ibm_rtl",
};
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 88eefccb6ed27..901849810ce2e 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -1091,6 +1091,8 @@ static const struct key_entry ideapad_keymap[] = {
{ KE_KEY, 0x07 | IDEAPAD_WMI_KEY, { KEY_HELP } },
{ KE_KEY, 0x0e | IDEAPAD_WMI_KEY, { KEY_PICKUP_PHONE } },
{ KE_KEY, 0x0f | IDEAPAD_WMI_KEY, { KEY_HANGUP_PHONE } },
+ /* Refresh Rate Toggle (Fn+R) */
+ { KE_KEY, 0x10 | IDEAPAD_WMI_KEY, { KEY_REFRESH_RATE_TOGGLE } },
/* Dark mode toggle */
{ KE_KEY, 0x13 | IDEAPAD_WMI_KEY, { KEY_PROG1 } },
/* Sound profile switch */
@@ -1100,7 +1102,7 @@ static const struct key_entry ideapad_keymap[] = {
/* Lenovo Support */
{ KE_KEY, 0x27 | IDEAPAD_WMI_KEY, { KEY_HELP } },
/* Refresh Rate Toggle */
- { KE_KEY, 0x0a | IDEAPAD_WMI_KEY, { KEY_DISPLAYTOGGLE } },
+ { KE_KEY, 0x0a | IDEAPAD_WMI_KEY, { KEY_REFRESH_RATE_TOGGLE } },
{ KE_END },
};
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
index 7457ca2b27a60..c7a8276458640 100644
--- a/drivers/platform/x86/intel/hid.c
+++ b/drivers/platform/x86/intel/hid.c
@@ -49,6 +49,8 @@ static const struct acpi_device_id intel_hid_ids[] = {
{"INTC1076", 0},
{"INTC1077", 0},
{"INTC1078", 0},
+ {"INTC107B", 0},
+ {"INTC10CB", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, intel_hid_ids);
@@ -504,6 +506,7 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
struct platform_device *device = context;
struct intel_hid_priv *priv = dev_get_drvdata(&device->dev);
unsigned long long ev_index;
+ struct key_entry *ke;
int err;
/*
@@ -545,11 +548,15 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
if (event == 0xc0 || !priv->array)
return;
- if (!sparse_keymap_entry_from_scancode(priv->array, event)) {
+ ke = sparse_keymap_entry_from_scancode(priv->array, event);
+ if (!ke) {
dev_info(&device->dev, "unknown event 0x%x\n", event);
return;
}
+ if (ke->type == KE_IGNORE)
+ return;
+
wakeup:
pm_wakeup_hard_event(&device->dev);
diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c
index 2cf3b4a8813f9..584c44387e103 100644
--- a/drivers/platform/x86/intel/ifs/load.c
+++ b/drivers/platform/x86/intel/ifs/load.c
@@ -383,7 +383,7 @@ int ifs_load_firmware(struct device *dev)
unsigned int expected_size;
const struct firmware *fw;
char scan_path[64];
- int ret = -EINVAL;
+ int ret;
snprintf(scan_path, sizeof(scan_path), "intel/ifs_%d/%02x-%02x-%02x-%02x.scan",
test->test_num, boot_cpu_data.x86, boot_cpu_data.x86_model,
diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c
index 13ecd55c66680..95b4b71fab537 100644
--- a/drivers/platform/x86/intel/ifs/runtest.c
+++ b/drivers/platform/x86/intel/ifs/runtest.c
@@ -23,6 +23,12 @@
/* Max retries on the same chunk */
#define MAX_IFS_RETRIES 5
+struct run_params {
+ struct ifs_data *ifsd;
+ union ifs_scan *activate;
+ union ifs_status status;
+};
+
/*
* Number of TSC cycles that a logical CPU will wait for the other
* logical CPU on the core in the WRMSR(ACTIVATE_SCAN).
@@ -134,19 +140,56 @@ static bool can_restart(union ifs_status status)
return false;
}
+#define SPINUNIT 100 /* 100 nsec */
+static atomic_t array_cpus_in;
+static atomic_t scan_cpus_in;
+
+/*
+ * Simplified cpu sibling rendezvous loop based on microcode loader __wait_for_cpus()
+ */
+static void wait_for_sibling_cpu(atomic_t *t, long long timeout)
+{
+ int cpu = smp_processor_id();
+ const struct cpumask *smt_mask = cpu_smt_mask(cpu);
+ int all_cpus = cpumask_weight(smt_mask);
+
+ atomic_inc(t);
+ while (atomic_read(t) < all_cpus) {
+ if (timeout < SPINUNIT)
+ return;
+ ndelay(SPINUNIT);
+ timeout -= SPINUNIT;
+ touch_nmi_watchdog();
+ }
+}
+
/*
* Execute the scan. Called "simultaneously" on all threads of a core
* at high priority using the stop_cpus mechanism.
*/
static int doscan(void *data)
{
- int cpu = smp_processor_id();
- u64 *msrs = data;
+ int cpu = smp_processor_id(), start, stop;
+ struct run_params *params = data;
+ union ifs_status status;
+ struct ifs_data *ifsd;
int first;
+ ifsd = params->ifsd;
+
+ if (ifsd->generation) {
+ start = params->activate->gen2.start;
+ stop = params->activate->gen2.stop;
+ } else {
+ start = params->activate->gen0.start;
+ stop = params->activate->gen0.stop;
+ }
+
/* Only the first logical CPU on a core reports result */
first = cpumask_first(cpu_smt_mask(cpu));
+ wait_for_sibling_cpu(&scan_cpus_in, NSEC_PER_SEC);
+
/*
* This WRMSR will wait for other HT threads to also write
* to this MSR (at most for activate.delay cycles). Then it
@@ -155,12 +198,14 @@ static int doscan(void *data)
* take up to 200 milliseconds (in the case where all chunks
* are processed in a single pass) before it retires.
*/
- wrmsrl(MSR_ACTIVATE_SCAN, msrs[0]);
+ wrmsrl(MSR_ACTIVATE_SCAN, params->activate->data);
+ rdmsrl(MSR_SCAN_STATUS, status.data);
- if (cpu == first) {
- /* Pass back the result of the scan */
- rdmsrl(MSR_SCAN_STATUS, msrs[1]);
- }
+ trace_ifs_status(ifsd->cur_batch, start, stop, status.data);
+
+ /* Pass back the result of the scan */
+ if (cpu == first)
+ params->status = status;
return 0;
}
@@ -179,7 +224,7 @@ static void ifs_test_core(int cpu, struct device *dev)
struct ifs_data *ifsd;
int to_start, to_stop;
int status_chunk;
- u64 msrvals[2];
+ struct run_params params;
int retries;
ifsd = ifs_get_data(dev);
@@ -190,6 +235,8 @@ static void ifs_test_core(int cpu, struct device *dev)
to_start = 0;
to_stop = ifsd->valid_chunks - 1;
+ params.ifsd = ifs_get_data(dev);
+
if (ifsd->generation) {
activate.gen2.start = to_start;
activate.gen2.stop = to_stop;
@@ -207,12 +254,11 @@ static void ifs_test_core(int cpu, struct device *dev)
break;
}
- msrvals[0] = activate.data;
- stop_core_cpuslocked(cpu, doscan, msrvals);
-
- status.data = msrvals[1];
+ params.activate = &activate;
+ atomic_set(&scan_cpus_in, 0);
+ stop_core_cpuslocked(cpu, doscan, &params);
- trace_ifs_status(cpu, to_start, to_stop, status.data);
+ status = params.status;
/* Some cases can be retried, give up for others */
if (!can_restart(status))
@@ -250,34 +296,14 @@ static void ifs_test_core(int cpu, struct device *dev)
}
}
-#define SPINUNIT 100 /* 100 nsec */
-static atomic_t array_cpus_out;
-
-/*
- * Simplified cpu sibling rendezvous loop based on microcode loader __wait_for_cpus()
- */
-static void wait_for_sibling_cpu(atomic_t *t, long long timeout)
-{
- int cpu = smp_processor_id();
- const struct cpumask *smt_mask = cpu_smt_mask(cpu);
- int all_cpus = cpumask_weight(smt_mask);
-
- atomic_inc(t);
- while (atomic_read(t) < all_cpus) {
- if (timeout < SPINUNIT)
- return;
- ndelay(SPINUNIT);
- timeout -= SPINUNIT;
- touch_nmi_watchdog();
- }
-}
-
static int do_array_test(void *data)
{
union ifs_array *command = data;
int cpu = smp_processor_id();
int first;
+ wait_for_sibling_cpu(&array_cpus_in, NSEC_PER_SEC);
+
/*
* Only one logical CPU on a core needs to trigger the Array test via MSR write.
*/
@@ -289,9 +315,6 @@ static int do_array_test(void *data)
rdmsrl(MSR_ARRAY_BIST, command->data);
}
- /* Tests complete faster if the sibling is spinning here */
- wait_for_sibling_cpu(&array_cpus_out, NSEC_PER_SEC);
-
return 0;
}
@@ -312,7 +335,7 @@ static void ifs_array_test_core(int cpu, struct device *dev)
timed_out = true;
break;
}
- atomic_set(&array_cpus_out, 0);
+ atomic_set(&array_cpus_in, 0);
stop_core_cpuslocked(cpu, do_array_test, &command);
if (command.ctrl_result)
diff --git a/drivers/platform/x86/intel/oaktrail.c b/drivers/platform/x86/intel/oaktrail.c
index fa720967e69bf..217630f40c3f8 100644
--- a/drivers/platform/x86/intel/oaktrail.c
+++ b/drivers/platform/x86/intel/oaktrail.c
@@ -365,7 +365,7 @@ static void __exit oaktrail_cleanup(void)
module_init(oaktrail_init);
module_exit(oaktrail_cleanup);
-MODULE_AUTHOR("Yin Kangkai (kangkai.yin@intel.com)");
+MODULE_AUTHOR("Yin Kangkai <kangkai.yin@intel.com>");
MODULE_DESCRIPTION("Intel Oaktrail Platform ACPI Extras");
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/pmc/arl.c b/drivers/platform/x86/intel/pmc/arl.c
index 683ae828276b8..34b4cd23bfe59 100644
--- a/drivers/platform/x86/intel/pmc/arl.c
+++ b/drivers/platform/x86/intel/pmc/arl.c
@@ -673,6 +673,7 @@ static struct pmc_info arl_pmc_info_list[] = {
};
#define ARL_NPU_PCI_DEV 0xad1d
+#define ARL_GNA_PCI_DEV 0xae4c
/*
* Set power state of select devices that do not have drivers to D3
* so that they do not block Package C entry.
@@ -680,6 +681,7 @@ static struct pmc_info arl_pmc_info_list[] = {
static void arl_d3_fixup(void)
{
pmc_core_set_device_d3(ARL_NPU_PCI_DEV);
+ pmc_core_set_device_d3(ARL_GNA_PCI_DEV);
}
static int arl_resume(struct pmc_dev *pmcdev)
diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
index 8f9c036809c79..10c96c1a850af 100644
--- a/drivers/platform/x86/intel/pmc/core.c
+++ b/drivers/platform/x86/intel/pmc/core.c
@@ -1389,6 +1389,15 @@ static int pmc_core_probe(struct platform_device *pdev)
return -ENOMEM;
pmcdev->pmcs[PMC_IDX_MAIN] = primary_pmc;
+ /* The last element in msr_map is empty */
+ pmcdev->num_of_pkgc = ARRAY_SIZE(msr_map) - 1;
+ pmcdev->pkgc_res_cnt = devm_kcalloc(&pdev->dev,
+ pmcdev->num_of_pkgc,
+ sizeof(*pmcdev->pkgc_res_cnt),
+ GFP_KERNEL);
+ if (!pmcdev->pkgc_res_cnt)
+ return -ENOMEM;
+
/*
* Coffee Lake has CPU ID of Kaby Lake and Cannon Lake PCH. So here
* Sunrisepoint PCH regmap can't be used. Use Cannon Lake PCH regmap
@@ -1432,6 +1441,7 @@ static __maybe_unused int pmc_core_suspend(struct device *dev)
{
struct pmc_dev *pmcdev = dev_get_drvdata(dev);
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ unsigned int i;
if (pmcdev->suspend)
pmcdev->suspend(pmcdev);
@@ -1440,9 +1450,11 @@ static __maybe_unused int pmc_core_suspend(struct device *dev)
if (pm_suspend_via_firmware())
return 0;
- /* Save PC10 residency for checking later */
- if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pmcdev->pc10_counter))
- return -EIO;
+ /* Save PKGC residency for checking later */
+ for (i = 0; i < pmcdev->num_of_pkgc; i++) {
+ if (rdmsrl_safe(msr_map[i].bit_mask, &pmcdev->pkgc_res_cnt[i]))
+ return -EIO;
+ }
/* Save S0ix residency for checking later */
if (pmc_core_dev_state_get(pmc, &pmcdev->s0ix_counter))
@@ -1451,14 +1463,15 @@ static __maybe_unused int pmc_core_suspend(struct device *dev)
return 0;
}
-static inline bool pmc_core_is_pc10_failed(struct pmc_dev *pmcdev)
+static inline bool pmc_core_is_deepest_pkgc_failed(struct pmc_dev *pmcdev)
{
- u64 pc10_counter;
+ u32 deepest_pkgc_msr = msr_map[pmcdev->num_of_pkgc - 1].bit_mask;
+ u64 deepest_pkgc_residency;
- if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pc10_counter))
+ if (rdmsrl_safe(deepest_pkgc_msr, &deepest_pkgc_residency))
return false;
- if (pc10_counter == pmcdev->pc10_counter)
+ if (deepest_pkgc_residency == pmcdev->pkgc_res_cnt[pmcdev->num_of_pkgc - 1])
return true;
return false;
@@ -1497,10 +1510,22 @@ int pmc_core_resume_common(struct pmc_dev *pmcdev)
if (!warn_on_s0ix_failures)
return 0;
- if (pmc_core_is_pc10_failed(pmcdev)) {
- /* S0ix failed because of PC10 entry failure */
- dev_info(dev, "CPU did not enter PC10!!! (PC10 cnt=0x%llx)\n",
- pmcdev->pc10_counter);
+ if (pmc_core_is_deepest_pkgc_failed(pmcdev)) {
+ /* S0ix failed because of deepest PKGC entry failure */
+ dev_info(dev, "CPU did not enter %s!!! (%s cnt=0x%llx)\n",
+ msr_map[pmcdev->num_of_pkgc - 1].name,
+ msr_map[pmcdev->num_of_pkgc - 1].name,
+ pmcdev->pkgc_res_cnt[pmcdev->num_of_pkgc - 1]);
+
+ for (i = 0; i < pmcdev->num_of_pkgc; i++) {
+ u64 pc_cnt;
+
+ if (!rdmsrl_safe(msr_map[i].bit_mask, &pc_cnt)) {
+ dev_info(dev, "Prev %s cnt = 0x%llx, Current %s cnt = 0x%llx\n",
+ msr_map[i].name, pmcdev->pkgc_res_cnt[i],
+ msr_map[i].name, pc_cnt);
+ }
+ }
return 0;
}
diff --git a/drivers/platform/x86/intel/pmc/core.h b/drivers/platform/x86/intel/pmc/core.h
index 54137faaae2b2..83504c49a0e31 100644
--- a/drivers/platform/x86/intel/pmc/core.h
+++ b/drivers/platform/x86/intel/pmc/core.h
@@ -385,7 +385,8 @@ struct pmc {
* @pmc_xram_read_bit: flag to indicate whether PMC XRAM shadow registers
* used to read MPHY PG and PLL status are available
* @mutex_lock: mutex to complete one transcation
- * @pc10_counter: PC10 residency counter
+ * @pkgc_res_cnt: Array of PKGC residency counters
+ * @num_of_pkgc: Number of PKGC
* @s0ix_counter: S0ix residency (step adjusted)
* @num_lpm_modes: Count of enabled modes
* @lpm_en_modes: Array of enabled modes from lowest to highest priority
@@ -403,13 +404,15 @@ struct pmc_dev {
int pmc_xram_read_bit;
struct mutex lock; /* generic mutex lock for PMC Core */
- u64 pc10_counter;
u64 s0ix_counter;
int num_lpm_modes;
int lpm_en_modes[LPM_MAX_NUM_MODES];
void (*suspend)(struct pmc_dev *pmcdev);
int (*resume)(struct pmc_dev *pmcdev);
+ u64 *pkgc_res_cnt;
+ u8 num_of_pkgc;
+
bool has_die_c6;
u32 die_c6_offset;
struct telem_endpoint *punit_ep;
diff --git a/drivers/platform/x86/intel/pmc/lnl.c b/drivers/platform/x86/intel/pmc/lnl.c
index abad17cdd3d78..068d72504683f 100644
--- a/drivers/platform/x86/intel/pmc/lnl.c
+++ b/drivers/platform/x86/intel/pmc/lnl.c
@@ -13,21 +13,6 @@
#include "core.h"
-#define SOCM_LPM_REQ_GUID 0x11594920
-
-#define PMC_DEVID_SOCM 0xa87f
-
-static const u8 LNL_LPM_REG_INDEX[] = {0, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20};
-
-static struct pmc_info lnl_pmc_info_list[] = {
- {
- .guid = SOCM_LPM_REQ_GUID,
- .devid = PMC_DEVID_SOCM,
- .map = &lnl_socm_reg_map,
- },
- {}
-};
-
const struct pmc_bit_map lnl_ltr_show_map[] = {
{"SOUTHPORT_A", CNP_PMC_LTR_SPA},
{"SOUTHPORT_B", CNP_PMC_LTR_SPB},
@@ -490,7 +475,6 @@ const struct pmc_reg_map lnl_socm_reg_map = {
.lpm_sts = lnl_lpm_maps,
.lpm_status_offset = MTL_LPM_STATUS_OFFSET,
.lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET,
- .lpm_reg_index = LNL_LPM_REG_INDEX,
};
#define LNL_NPU_PCI_DEV 0x643e
@@ -517,33 +501,19 @@ static int lnl_resume(struct pmc_dev *pmcdev)
int lnl_core_init(struct pmc_dev *pmcdev)
{
int ret;
- int func = 2;
- bool ssram_init = true;
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_SOC];
lnl_d3_fixup();
pmcdev->suspend = cnl_suspend;
pmcdev->resume = lnl_resume;
- pmcdev->regmap_list = lnl_pmc_info_list;
- ret = pmc_core_ssram_init(pmcdev, func);
-
- /* If regbase not assigned, set map and discover using legacy method */
- if (ret) {
- ssram_init = false;
- pmc->map = &lnl_socm_reg_map;
- ret = get_primary_reg_base(pmc);
- if (ret)
- return ret;
- }
- pmc_core_get_low_power_modes(pmcdev);
+ pmc->map = &lnl_socm_reg_map;
+ ret = get_primary_reg_base(pmc);
+ if (ret)
+ return ret;
- if (ssram_init) {
- ret = pmc_core_ssram_get_lpm_reqs(pmcdev);
- if (ret)
- return ret;
- }
+ pmc_core_get_low_power_modes(pmcdev);
return 0;
}
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index 08df9494603c5..30951f7131cd9 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -719,6 +719,7 @@ static struct miscdevice isst_if_char_driver = {
};
static const struct x86_cpu_id hpm_cpu_ids[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, NULL),
X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, NULL),
{}
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
index 2662fbbddf0cc..1d918000d72b4 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
@@ -462,10 +462,10 @@ static long isst_if_core_power_state(void __user *argp)
struct tpmi_per_power_domain_info *power_domain_info;
struct isst_core_power core_power;
- if (disable_dynamic_sst_features())
+ if (copy_from_user(&core_power, argp, sizeof(core_power)))
return -EFAULT;
- if (copy_from_user(&core_power, argp, sizeof(core_power)))
+ if (core_power.get_set && disable_dynamic_sst_features())
return -EFAULT;
power_domain_info = get_instance(core_power.socket_id, core_power.power_domain_id);
diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c
index e73cdea67fff8..910df7c654f48 100644
--- a/drivers/platform/x86/intel/tpmi.c
+++ b/drivers/platform/x86/intel/tpmi.c
@@ -96,7 +96,7 @@ struct intel_tpmi_pfs_entry {
*/
struct intel_tpmi_pm_feature {
struct intel_tpmi_pfs_entry pfs_header;
- unsigned int vsec_offset;
+ u64 vsec_offset;
struct intel_vsec_device *vsec_dev;
};
@@ -376,7 +376,7 @@ static int tpmi_pfs_dbg_show(struct seq_file *s, void *unused)
read_blocked = feature_state.read_blocked ? 'Y' : 'N';
write_blocked = feature_state.write_blocked ? 'Y' : 'N';
}
- seq_printf(s, "0x%02x\t\t0x%02x\t\t0x%04x\t\t0x%04x\t\t0x%02x\t\t0x%08x\t%c\t%c\t\t%c\t\t%c\n",
+ seq_printf(s, "0x%02x\t\t0x%02x\t\t0x%04x\t\t0x%04x\t\t0x%02x\t\t0x%016llx\t%c\t%c\t\t%c\t\t%c\n",
pfs->pfs_header.tpmi_id, pfs->pfs_header.num_entries,
pfs->pfs_header.entry_size, pfs->pfs_header.cap_offset,
pfs->pfs_header.attribute, pfs->vsec_offset, locked, disabled,
@@ -395,7 +395,8 @@ static int tpmi_mem_dump_show(struct seq_file *s, void *unused)
struct intel_tpmi_pm_feature *pfs = s->private;
int count, ret = 0;
void __iomem *mem;
- u32 off, size;
+ u32 size;
+ u64 off;
u8 *buffer;
size = TPMI_GET_SINGLE_ENTRY_SIZE(pfs);
@@ -411,7 +412,7 @@ static int tpmi_mem_dump_show(struct seq_file *s, void *unused)
mutex_lock(&tpmi_dev_lock);
for (count = 0; count < pfs->pfs_header.num_entries; ++count) {
- seq_printf(s, "TPMI Instance:%d offset:0x%x\n", count, off);
+ seq_printf(s, "TPMI Instance:%d offset:0x%llx\n", count, off);
mem = ioremap(off, size);
if (!mem) {
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
index bd75d61ff8a66..ef730200a04bd 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
@@ -29,7 +29,7 @@
#include "uncore-frequency-common.h"
#define UNCORE_MAJOR_VERSION 0
-#define UNCORE_MINOR_VERSION 1
+#define UNCORE_MINOR_VERSION 2
#define UNCORE_HEADER_INDEX 0
#define UNCORE_FABRIC_CLUSTER_OFFSET 8
@@ -329,7 +329,7 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
goto remove_clusters;
}
- if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) != UNCORE_MINOR_VERSION)
+ if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) > UNCORE_MINOR_VERSION)
dev_info(&auxdev->dev, "Uncore: Ignore: Unsupported minor version:%lx\n",
TPMI_MINOR_VERSION(pd_info->ufs_header_ver));
diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c
index 084c355c86f5f..79bb2c801daa9 100644
--- a/drivers/platform/x86/intel/vbtn.c
+++ b/drivers/platform/x86/intel/vbtn.c
@@ -136,8 +136,6 @@ static int intel_vbtn_input_setup(struct platform_device *device)
priv->switches_dev->id.bustype = BUS_HOST;
if (priv->has_switches) {
- detect_tablet_mode(&device->dev);
-
ret = input_register_device(priv->switches_dev);
if (ret)
return ret;
@@ -258,9 +256,6 @@ static const struct dmi_system_id dmi_switches_allow_list[] = {
static bool intel_vbtn_has_switches(acpi_handle handle, bool dual_accel)
{
- unsigned long long vgbs;
- acpi_status status;
-
/* See dual_accel_detect.h for more info */
if (dual_accel)
return false;
@@ -268,8 +263,7 @@ static bool intel_vbtn_has_switches(acpi_handle handle, bool dual_accel)
if (!dmi_check_system(dmi_switches_allow_list))
return false;
- status = acpi_evaluate_integer(handle, "VGBS", NULL, &vgbs);
- return ACPI_SUCCESS(status);
+ return acpi_has_method(handle, "VGBS");
}
static int intel_vbtn_probe(struct platform_device *device)
@@ -316,6 +310,9 @@ static int intel_vbtn_probe(struct platform_device *device)
if (ACPI_FAILURE(status))
dev_err(&device->dev, "Error VBDL failed with ACPI status %d\n", status);
}
+ // Check switches after buttons since VBDL may have side effects.
+ if (has_switches)
+ detect_tablet_mode(&device->dev);
device_init_wakeup(&device->dev, true);
/*
diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
index 778eb0aa3479a..0fdfaf3a4f5cd 100644
--- a/drivers/platform/x86/intel/vsec.c
+++ b/drivers/platform/x86/intel/vsec.c
@@ -236,10 +236,7 @@ static bool intel_vsec_walk_header(struct pci_dev *pdev,
for ( ; *header; header++) {
ret = intel_vsec_add_dev(pdev, *header, info);
- if (ret)
- dev_info(&pdev->dev, "Could not add device for VSEC id %d\n",
- (*header)->id);
- else
+ if (!ret)
have_devices = true;
}
diff --git a/drivers/platform/x86/intel/wmi/sbl-fw-update.c b/drivers/platform/x86/intel/wmi/sbl-fw-update.c
index 040153ad67c1c..75c82c08117f5 100644
--- a/drivers/platform/x86/intel/wmi/sbl-fw-update.c
+++ b/drivers/platform/x86/intel/wmi/sbl-fw-update.c
@@ -131,6 +131,7 @@ static struct wmi_driver intel_wmi_sbl_fw_update_driver = {
.probe = intel_wmi_sbl_fw_update_probe,
.remove = intel_wmi_sbl_fw_update_remove,
.id_table = intel_wmi_sbl_id_table,
+ .no_singleton = true,
};
module_wmi_driver(intel_wmi_sbl_fw_update_driver);
diff --git a/drivers/platform/x86/intel/wmi/thunderbolt.c b/drivers/platform/x86/intel/wmi/thunderbolt.c
index e2ad3f46f3569..08df560a2c7a8 100644
--- a/drivers/platform/x86/intel/wmi/thunderbolt.c
+++ b/drivers/platform/x86/intel/wmi/thunderbolt.c
@@ -63,6 +63,7 @@ static struct wmi_driver intel_wmi_thunderbolt_driver = {
.dev_groups = tbt_groups,
},
.id_table = intel_wmi_thunderbolt_id_table,
+ .no_singleton = true,
};
module_wmi_driver(intel_wmi_thunderbolt_driver);
diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c
index b7c10c15a3d63..7d87cbd4b9c63 100644
--- a/drivers/platform/x86/intel_scu_ipcutil.c
+++ b/drivers/platform/x86/intel_scu_ipcutil.c
@@ -22,7 +22,7 @@
static int major;
-struct intel_scu_ipc_dev *scu;
+static struct intel_scu_ipc_dev *scu;
static DEFINE_MUTEX(scu_lock);
/* IOCTL commands */
diff --git a/drivers/platform/x86/intel_scu_pcidrv.c b/drivers/platform/x86/intel_scu_pcidrv.c
index d904fad499aa5..dbf0310448da9 100644
--- a/drivers/platform/x86/intel_scu_pcidrv.c
+++ b/drivers/platform/x86/intel_scu_pcidrv.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/pci.h>
-#include <asm/intel-mid.h>
#include <asm/intel_scu_ipc.h>
static int intel_scu_pci_probe(struct pci_dev *pdev,
diff --git a/drivers/platform/x86/intel_scu_wdt.c b/drivers/platform/x86/intel_scu_wdt.c
index c2479777a1d60..a5031a25632e6 100644
--- a/drivers/platform/x86/intel_scu_wdt.c
+++ b/drivers/platform/x86/intel_scu_wdt.c
@@ -13,7 +13,6 @@
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
-#include <asm/intel-mid.h>
#include <asm/io_apic.h>
#include <asm/hw_irq.h>
diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
index ad3c39e9e9f58..e714ee6298dda 100644
--- a/drivers/platform/x86/lg-laptop.c
+++ b/drivers/platform/x86/lg-laptop.c
@@ -736,7 +736,7 @@ static int acpi_add(struct acpi_device *device)
default:
year = 2019;
}
- pr_info("product: %s year: %d\n", product, year);
+ pr_info("product: %s year: %d\n", product ?: "unknown", year);
if (year >= 2019)
battery_limit_use_wmbb = 1;
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 32981e2ad3b39..9d70146fd7420 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -6659,6 +6659,6 @@ static void __exit mlxplat_exit(void)
}
module_exit(mlxplat_exit);
-MODULE_AUTHOR("Vadim Pasternak (vadimp@mellanox.com)");
+MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
MODULE_DESCRIPTION("Mellanox platform driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
index 93a6414c66114..0aa7076bc9cc7 100644
--- a/drivers/platform/x86/pmc_atom.c
+++ b/drivers/platform/x86/pmc_atom.c
@@ -6,6 +6,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/acpi.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dmi.h>
@@ -17,6 +18,7 @@
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/seq_file.h>
+#include <linux/suspend.h>
struct pmc_bit_map {
const char *name;
@@ -448,6 +450,82 @@ static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap,
return 0;
}
+#ifdef CONFIG_SUSPEND
+static void pmc_dev_state_check(u32 sts, const struct pmc_bit_map *sts_map,
+ u32 fd, const struct pmc_bit_map *fd_map,
+ u32 sts_possible_false_pos)
+{
+ int index;
+
+ for (index = 0; sts_map[index].name; index++) {
+ if (!(fd_map[index].bit_mask & fd) &&
+ !(sts_map[index].bit_mask & sts)) {
+ if (sts_map[index].bit_mask & sts_possible_false_pos)
+ pm_pr_dbg("%s is in D0 prior to s2idle\n",
+ sts_map[index].name);
+ else
+ pr_err("%s is in D0 prior to s2idle\n",
+ sts_map[index].name);
+ }
+ }
+}
+
+static void pmc_s2idle_check(void)
+{
+ struct pmc_dev *pmc = &pmc_device;
+ const struct pmc_reg_map *m = pmc->map;
+ u32 func_dis, func_dis_2;
+ u32 d3_sts_0, d3_sts_1;
+ u32 false_pos_sts_0, false_pos_sts_1;
+ int i;
+
+ func_dis = pmc_reg_read(pmc, PMC_FUNC_DIS);
+ func_dis_2 = pmc_reg_read(pmc, PMC_FUNC_DIS_2);
+ d3_sts_0 = pmc_reg_read(pmc, PMC_D3_STS_0);
+ d3_sts_1 = pmc_reg_read(pmc, PMC_D3_STS_1);
+
+ /*
+ * Some blocks are not used on lower-featured versions of the SoC and
+ * always report D0, add these to false_pos mask to log at debug level.
+ */
+ if (m->d3_sts_1 == byt_d3_sts_1_map) {
+ /* Bay Trail */
+ false_pos_sts_0 = BIT_GBE | BIT_SATA | BIT_PCIE_PORT0 |
+ BIT_PCIE_PORT1 | BIT_PCIE_PORT2 | BIT_PCIE_PORT3 |
+ BIT_LPSS2_F5_I2C5;
+ false_pos_sts_1 = BIT_SMB | BIT_USH_SS_PHY | BIT_DFX;
+ } else {
+ /* Cherry Trail */
+ false_pos_sts_0 = BIT_GBE | BIT_SATA | BIT_LPSS2_F7_I2C7;
+ false_pos_sts_1 = BIT_SMB | BIT_STS_ISH;
+ }
+
+ pmc_dev_state_check(d3_sts_0, m->d3_sts_0, func_dis, m->func_dis, false_pos_sts_0);
+ pmc_dev_state_check(d3_sts_1, m->d3_sts_1, func_dis_2, m->func_dis_2, false_pos_sts_1);
+
+ /* Forced-on PMC clocks prevent S0i3 */
+ for (i = 0; i < PMC_CLK_NUM; i++) {
+ u32 ctl = pmc_reg_read(pmc, PMC_CLK_CTL_OFFSET + 4 * i);
+
+ if ((ctl & PMC_MASK_CLK_CTL) != PMC_CLK_CTL_FORCE_ON)
+ continue;
+
+ pr_err("clock %d is ON prior to freeze (ctl 0x%08x)\n", i, ctl);
+ }
+}
+
+static struct acpi_s2idle_dev_ops pmc_s2idle_ops = {
+ .check = pmc_s2idle_check,
+};
+
+static void pmc_s2idle_check_register(void)
+{
+ acpi_register_lps0_dev(&pmc_s2idle_ops);
+}
+#else
+static void pmc_s2idle_check_register(void) {}
+#endif
+
static int pmc_setup_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct pmc_dev *pmc = &pmc_device;
@@ -485,6 +563,7 @@ static int pmc_setup_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_warn(&pdev->dev, "platform clocks register failed: %d\n",
ret);
+ pmc_s2idle_check_register();
pmc->init = true;
return ret;
}
diff --git a/drivers/platform/x86/serial-multi-instantiate.c b/drivers/platform/x86/serial-multi-instantiate.c
index 8158e3cf5d6de..97b9c63922303 100644
--- a/drivers/platform/x86/serial-multi-instantiate.c
+++ b/drivers/platform/x86/serial-multi-instantiate.c
@@ -329,6 +329,19 @@ static const struct smi_node cs35l41_hda = {
.bus_type = SMI_AUTO_DETECT,
};
+static const struct smi_node cs35l54_hda = {
+ .instances = {
+ { "cs35l54-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "cs35l54-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "cs35l54-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "cs35l54-hda", IRQ_RESOURCE_AUTO, 0 },
+ /* a 5th entry is an alias address, not a real device */
+ { "cs35l54-hda_dummy_dev" },
+ {}
+ },
+ .bus_type = SMI_AUTO_DETECT,
+};
+
static const struct smi_node cs35l56_hda = {
.instances = {
{ "cs35l56-hda", IRQ_RESOURCE_AUTO, 0 },
@@ -342,6 +355,19 @@ static const struct smi_node cs35l56_hda = {
.bus_type = SMI_AUTO_DETECT,
};
+static const struct smi_node cs35l57_hda = {
+ .instances = {
+ { "cs35l57-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "cs35l57-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "cs35l57-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "cs35l57-hda", IRQ_RESOURCE_AUTO, 0 },
+ /* a 5th entry is an alias address, not a real device */
+ { "cs35l57-hda_dummy_dev" },
+ {}
+ },
+ .bus_type = SMI_AUTO_DETECT,
+};
+
/*
* Note new device-ids must also be added to ignore_serial_bus_ids in
* drivers/acpi/scan.c: acpi_device_enumeration_by_parent().
@@ -350,7 +376,9 @@ static const struct acpi_device_id smi_acpi_ids[] = {
{ "BSG1160", (unsigned long)&bsg1160_data },
{ "BSG2150", (unsigned long)&bsg2150_data },
{ "CSC3551", (unsigned long)&cs35l41_hda },
+ { "CSC3554", (unsigned long)&cs35l54_hda },
{ "CSC3556", (unsigned long)&cs35l56_hda },
+ { "CSC3557", (unsigned long)&cs35l57_hda },
{ "INT3515", (unsigned long)&int3515_data },
/* Non-conforming _HID for Cirrus Logic already released */
{ "CLSA0100", (unsigned long)&cs35l41_hda },
diff --git a/drivers/platform/x86/silicom-platform.c b/drivers/platform/x86/silicom-platform.c
index 6ce43ccb3112e..c0910af16a3ac 100644
--- a/drivers/platform/x86/silicom-platform.c
+++ b/drivers/platform/x86/silicom-platform.c
@@ -256,12 +256,7 @@ static void silicom_gpio_set(struct gpio_chip *gc,
if (direction == GPIO_LINE_DIRECTION_IN)
return;
- if (value)
- silicom_mec_port_set(channel, 0);
- else if (value == 0)
- silicom_mec_port_set(channel, 1);
- else
- pr_err("Wrong argument value: %d\n", value);
+ silicom_mec_port_set(channel, !value);
}
static int silicom_gpio_direction_output(struct gpio_chip *gc,
diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
index ce3e08815a8e6..9345316b45dbe 100644
--- a/drivers/platform/x86/think-lmi.c
+++ b/drivers/platform/x86/think-lmi.c
@@ -195,7 +195,7 @@ static const char * const level_options[] = {
[TLMI_LEVEL_MASTER] = "master",
};
static struct think_lmi tlmi_priv;
-static struct class *fw_attr_class;
+static const struct class *fw_attr_class;
static DEFINE_MUTEX(tlmi_mutex);
/* Convert BIOS WMI error string to suitable error code */
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 5ecd9d33250d7..82429e59999da 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -69,6 +69,7 @@
#include <linux/sysfs.h>
#include <linux/types.h>
#include <linux/uaccess.h>
+#include <linux/units.h>
#include <linux/workqueue.h>
#include <acpi/battery.h>
@@ -166,6 +167,7 @@ enum tpacpi_hkey_event_t {
TP_HKEY_EV_VOL_MUTE = 0x1017, /* Mixer output mute */
TP_HKEY_EV_PRIVACYGUARD_TOGGLE = 0x130f, /* Toggle priv.guard on/off */
TP_HKEY_EV_AMT_TOGGLE = 0x131a, /* Toggle AMT on/off */
+ TP_HKEY_EV_PROFILE_TOGGLE = 0x131f, /* Toggle platform profile */
/* Reasons for waking up from S3/S4 */
TP_HKEY_EV_WKUP_S3_UNDOCK = 0x2304, /* undock requested, S3 */
@@ -3731,6 +3733,7 @@ static bool hotkey_notify_extended_hotkey(const u32 hkey)
switch (hkey) {
case TP_HKEY_EV_PRIVACYGUARD_TOGGLE:
case TP_HKEY_EV_AMT_TOGGLE:
+ case TP_HKEY_EV_PROFILE_TOGGLE:
tpacpi_driver_event(hkey);
return true;
}
@@ -6126,12 +6129,15 @@ enum thermal_access_mode {
TPACPI_THERMAL_ACPI_TMP07, /* Use ACPI TMP0-7 */
TPACPI_THERMAL_ACPI_UPDT, /* Use ACPI TMP0-7 with UPDT */
TPACPI_THERMAL_TPEC_8, /* Use ACPI EC regs, 8 sensors */
+ TPACPI_THERMAL_TPEC_12, /* Use ACPI EC regs, 12 sensors */
TPACPI_THERMAL_TPEC_16, /* Use ACPI EC regs, 16 sensors */
};
enum { /* TPACPI_THERMAL_TPEC_* */
TP_EC_THERMAL_TMP0 = 0x78, /* ACPI EC regs TMP 0..7 */
TP_EC_THERMAL_TMP8 = 0xC0, /* ACPI EC regs TMP 8..15 */
+ TP_EC_THERMAL_TMP0_NS = 0xA8, /* ACPI EC Non-Standard regs TMP 0..7 */
+ TP_EC_THERMAL_TMP8_NS = 0xB8, /* ACPI EC Non-standard regs TMP 8..11 */
TP_EC_FUNCREV = 0xEF, /* ACPI EC Functional revision */
TP_EC_THERMAL_TMP_NA = -128, /* ACPI EC sensor not available */
@@ -6144,8 +6150,104 @@ struct ibm_thermal_sensors_struct {
s32 temp[TPACPI_MAX_THERMAL_SENSORS];
};
+static const struct tpacpi_quirk thermal_quirk_table[] __initconst = {
+ /* Non-standard address for thermal registers on some ThinkPads */
+ TPACPI_Q_LNV3('R', '1', 'F', true), /* L13 Yoga Gen 2 */
+ TPACPI_Q_LNV3('N', '2', 'U', true), /* X13 Yoga Gen 2*/
+ TPACPI_Q_LNV3('R', '0', 'R', true), /* L380 */
+ TPACPI_Q_LNV3('R', '1', '5', true), /* L13 Yoga Gen 1*/
+ TPACPI_Q_LNV3('R', '1', '0', true), /* L390 */
+ TPACPI_Q_LNV3('N', '2', 'L', true), /* X13 Yoga Gen 1*/
+ TPACPI_Q_LNV3('R', '0', 'T', true), /* 11e Gen5 GL*/
+ TPACPI_Q_LNV3('R', '1', 'D', true), /* 11e Gen5 GL-R*/
+ TPACPI_Q_LNV3('R', '0', 'V', true), /* 11e Gen5 KL-Y*/
+};
+
static enum thermal_access_mode thermal_read_mode;
static bool thermal_use_labels;
+static bool thermal_with_ns_address; /* Non-standard thermal reg address */
+
+/* Function to check thermal read mode */
+static enum thermal_access_mode __init thermal_read_mode_check(void)
+{
+ u8 t, ta1, ta2, ver = 0;
+ int i;
+ int acpi_tmp7;
+
+ acpi_tmp7 = acpi_evalf(ec_handle, NULL, "TMP7", "qv");
+
+ if (thinkpad_id.ec_model) {
+ /*
+ * Direct EC access mode: sensors at registers 0x78-0x7F,
+ * 0xC0-0xC7. Registers return 0x00 for non-implemented,
+ * thermal sensors return 0x80 when not available.
+ *
+ * In some special cases (when Power Supply ID is 0xC2)
+ * above rule causes thermal control issues. Offset 0xEF
+ * determines EC version. 0xC0-0xC7 are not thermal registers
+ * in Ver 3.
+ */
+ if (!acpi_ec_read(TP_EC_FUNCREV, &ver))
+ pr_warn("Thinkpad ACPI EC unable to access EC version\n");
+
+ /* Quirks to check non-standard EC */
+ thermal_with_ns_address = tpacpi_check_quirks(thermal_quirk_table,
+ ARRAY_SIZE(thermal_quirk_table));
+
+ /* Support for Thinkpads with non-standard address */
+ if (thermal_with_ns_address) {
+ pr_info("ECFW with non-standard thermal registers found\n");
+ return TPACPI_THERMAL_TPEC_12;
+ }
+
+ ta1 = ta2 = 0;
+ for (i = 0; i < 8; i++) {
+ if (acpi_ec_read(TP_EC_THERMAL_TMP0 + i, &t)) {
+ ta1 |= t;
+ } else {
+ ta1 = 0;
+ break;
+ }
+ if (ver < 3) {
+ if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) {
+ ta2 |= t;
+ } else {
+ ta1 = 0;
+ break;
+ }
+ }
+ }
+
+ if (ta1 == 0) {
+ /* This is sheer paranoia, but we handle it anyway */
+ if (acpi_tmp7) {
+ pr_err("ThinkPad ACPI EC access misbehaving, falling back to ACPI TMPx access mode\n");
+ return TPACPI_THERMAL_ACPI_TMP07;
+ }
+ pr_err("ThinkPad ACPI EC access misbehaving, disabling thermal sensors access\n");
+ return TPACPI_THERMAL_NONE;
+ }
+
+ if (ver >= 3) {
+ thermal_use_labels = true;
+ return TPACPI_THERMAL_TPEC_8;
+ }
+
+ return (ta2 != 0) ? TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8;
+ }
+
+ if (acpi_tmp7) {
+ if (tpacpi_is_ibm() && acpi_evalf(ec_handle, NULL, "UPDT", "qv")) {
+ /* 600e/x, 770e, 770x */
+ return TPACPI_THERMAL_ACPI_UPDT;
+ }
+ /* IBM/LENOVO DSDT EC.TMPx access, max 8 sensors */
+ return TPACPI_THERMAL_ACPI_TMP07;
+ }
+
+ /* temperatures not supported on 570, G4x, R30, R31, R32 */
+ return TPACPI_THERMAL_NONE;
+}
/* idx is zero-based */
static int thermal_get_sensor(int idx, s32 *value)
@@ -6174,6 +6276,20 @@ static int thermal_get_sensor(int idx, s32 *value)
}
break;
+ /* The Non-standard EC uses 12 Thermal areas */
+ case TPACPI_THERMAL_TPEC_12:
+ if (idx >= 12)
+ return -EINVAL;
+
+ t = idx < 8 ? TP_EC_THERMAL_TMP0_NS + idx :
+ TP_EC_THERMAL_TMP8_NS + (idx - 8);
+
+ if (!acpi_ec_read(t, &tmp))
+ return -EIO;
+
+ *value = tmp * MILLIDEGREE_PER_DEGREE;
+ return 0;
+
case TPACPI_THERMAL_ACPI_UPDT:
if (idx <= 7) {
snprintf(tmpi, sizeof(tmpi), "TMP%c", '0' + idx);
@@ -6208,17 +6324,17 @@ static int thermal_get_sensor(int idx, s32 *value)
static int thermal_get_sensors(struct ibm_thermal_sensors_struct *s)
{
- int res, i;
- int n;
-
- n = 8;
- i = 0;
+ int res, i, n;
if (!s)
return -EINVAL;
if (thermal_read_mode == TPACPI_THERMAL_TPEC_16)
n = 16;
+ else if (thermal_read_mode == TPACPI_THERMAL_TPEC_12)
+ n = 12;
+ else
+ n = 8;
for (i = 0 ; i < n; i++) {
res = thermal_get_sensor(i, &s->temp[i]);
@@ -6317,18 +6433,36 @@ static struct attribute *thermal_temp_input_attr[] = {
NULL
};
+#define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
+
static umode_t thermal_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
- if (thermal_read_mode == TPACPI_THERMAL_NONE)
+ struct device_attribute *dev_attr = to_dev_attr(attr);
+ struct sensor_device_attribute *sensor_attr =
+ to_sensor_dev_attr(dev_attr);
+
+ int idx = sensor_attr->index;
+
+ switch (thermal_read_mode) {
+ case TPACPI_THERMAL_NONE:
return 0;
- if (attr == THERMAL_ATTRS(8) || attr == THERMAL_ATTRS(9) ||
- attr == THERMAL_ATTRS(10) || attr == THERMAL_ATTRS(11) ||
- attr == THERMAL_ATTRS(12) || attr == THERMAL_ATTRS(13) ||
- attr == THERMAL_ATTRS(14) || attr == THERMAL_ATTRS(15)) {
- if (thermal_read_mode != TPACPI_THERMAL_TPEC_16)
+ case TPACPI_THERMAL_ACPI_TMP07:
+ case TPACPI_THERMAL_ACPI_UPDT:
+ case TPACPI_THERMAL_TPEC_8:
+ if (idx >= 8)
+ return 0;
+ break;
+
+ case TPACPI_THERMAL_TPEC_12:
+ if (idx >= 12)
return 0;
+ break;
+
+ default:
+ break;
+
}
return attr->mode;
@@ -6375,78 +6509,9 @@ static const struct attribute_group temp_label_attr_group = {
static int __init thermal_init(struct ibm_init_struct *iibm)
{
- u8 t, ta1, ta2, ver = 0;
- int i;
- int acpi_tmp7;
-
vdbg_printk(TPACPI_DBG_INIT, "initializing thermal subdriver\n");
- acpi_tmp7 = acpi_evalf(ec_handle, NULL, "TMP7", "qv");
-
- if (thinkpad_id.ec_model) {
- /*
- * Direct EC access mode: sensors at registers
- * 0x78-0x7F, 0xC0-0xC7. Registers return 0x00 for
- * non-implemented, thermal sensors return 0x80 when
- * not available
- * The above rule is unfortunately flawed. This has been seen with
- * 0xC2 (power supply ID) causing thermal control problems.
- * The EC version can be determined by offset 0xEF and at least for
- * version 3 the Lenovo firmware team confirmed that registers 0xC0-0xC7
- * are not thermal registers.
- */
- if (!acpi_ec_read(TP_EC_FUNCREV, &ver))
- pr_warn("Thinkpad ACPI EC unable to access EC version\n");
-
- ta1 = ta2 = 0;
- for (i = 0; i < 8; i++) {
- if (acpi_ec_read(TP_EC_THERMAL_TMP0 + i, &t)) {
- ta1 |= t;
- } else {
- ta1 = 0;
- break;
- }
- if (ver < 3) {
- if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) {
- ta2 |= t;
- } else {
- ta1 = 0;
- break;
- }
- }
- }
- if (ta1 == 0) {
- /* This is sheer paranoia, but we handle it anyway */
- if (acpi_tmp7) {
- pr_err("ThinkPad ACPI EC access misbehaving, falling back to ACPI TMPx access mode\n");
- thermal_read_mode = TPACPI_THERMAL_ACPI_TMP07;
- } else {
- pr_err("ThinkPad ACPI EC access misbehaving, disabling thermal sensors access\n");
- thermal_read_mode = TPACPI_THERMAL_NONE;
- }
- } else {
- if (ver >= 3) {
- thermal_read_mode = TPACPI_THERMAL_TPEC_8;
- thermal_use_labels = true;
- } else {
- thermal_read_mode =
- (ta2 != 0) ?
- TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8;
- }
- }
- } else if (acpi_tmp7) {
- if (tpacpi_is_ibm() &&
- acpi_evalf(ec_handle, NULL, "UPDT", "qv")) {
- /* 600e/x, 770e, 770x */
- thermal_read_mode = TPACPI_THERMAL_ACPI_UPDT;
- } else {
- /* IBM/LENOVO DSDT EC.TMPx access, max 8 sensors */
- thermal_read_mode = TPACPI_THERMAL_ACPI_TMP07;
- }
- } else {
- /* temperatures not supported on 570, G4x, R30, R31, R32 */
- thermal_read_mode = TPACPI_THERMAL_NONE;
- }
+ thermal_read_mode = thermal_read_mode_check();
vdbg_printk(TPACPI_DBG_INIT, "thermal is %s, mode %d\n",
str_supported(thermal_read_mode != TPACPI_THERMAL_NONE),
@@ -8767,6 +8832,13 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_Q_LNV3('N', '3', '7', TPACPI_FAN_2CTL), /* T15g (2nd gen) */
TPACPI_Q_LNV3('R', '1', 'F', TPACPI_FAN_NS), /* L13 Yoga Gen 2 */
TPACPI_Q_LNV3('N', '2', 'U', TPACPI_FAN_NS), /* X13 Yoga Gen 2*/
+ TPACPI_Q_LNV3('R', '0', 'R', TPACPI_FAN_NS), /* L380 */
+ TPACPI_Q_LNV3('R', '1', '5', TPACPI_FAN_NS), /* L13 Yoga Gen 1 */
+ TPACPI_Q_LNV3('R', '1', '0', TPACPI_FAN_NS), /* L390 */
+ TPACPI_Q_LNV3('N', '2', 'L', TPACPI_FAN_NS), /* X13 Yoga Gen 1 */
+ TPACPI_Q_LNV3('R', '0', 'T', TPACPI_FAN_NS), /* 11e Gen5 GL */
+ TPACPI_Q_LNV3('R', '1', 'D', TPACPI_FAN_NS), /* 11e Gen5 GL-R */
+ TPACPI_Q_LNV3('R', '0', 'V', TPACPI_FAN_NS), /* 11e Gen5 KL-Y */
TPACPI_Q_LNV3('N', '1', 'O', TPACPI_FAN_NOFAN), /* X1 Tablet (2nd gen) */
};
@@ -9285,7 +9357,6 @@ static int mute_led_init(struct ibm_init_struct *iibm)
continue;
}
- mute_led_cdev[i].brightness = ledtrig_audio_get(i);
err = led_classdev_register(&tpacpi_pdev->dev, &mute_led_cdev[i]);
if (err < 0) {
while (i--)
@@ -11119,7 +11190,23 @@ static void tpacpi_driver_event(const unsigned int hkey_event)
else
dytc_control_amt(!dytc_amt_active);
}
-
+ if (hkey_event == TP_HKEY_EV_PROFILE_TOGGLE) {
+ switch (dytc_current_profile) {
+ case PLATFORM_PROFILE_LOW_POWER:
+ dytc_profile_set(NULL, PLATFORM_PROFILE_BALANCED);
+ break;
+ case PLATFORM_PROFILE_BALANCED:
+ dytc_profile_set(NULL, PLATFORM_PROFILE_PERFORMANCE);
+ break;
+ case PLATFORM_PROFILE_PERFORMANCE:
+ dytc_profile_set(NULL, PLATFORM_PROFILE_LOW_POWER);
+ break;
+ default:
+ pr_warn("Profile HKEY unexpected profile %d", dytc_current_profile);
+ }
+ /* Notify user space the profile changed */
+ platform_profile_notify();
+ }
}
static void hotkey_driver_event(const unsigned int scancode)
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 291f14ef67024..77244c9aa60d2 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -264,6 +264,7 @@ static const struct key_entry toshiba_acpi_keymap[] = {
{ KE_KEY, 0xb32, { KEY_NEXTSONG } },
{ KE_KEY, 0xb33, { KEY_PLAYPAUSE } },
{ KE_KEY, 0xb5a, { KEY_MEDIA } },
+ { KE_IGNORE, 0x0e00, { KEY_RESERVED } }, /* Wake from sleep */
{ KE_IGNORE, 0x1430, { KEY_RESERVED } }, /* Wake from sleep */
{ KE_IGNORE, 0x1501, { KEY_RESERVED } }, /* Output changed */
{ KE_IGNORE, 0x1502, { KEY_RESERVED } }, /* HDMI plugged/unplugged */
@@ -3523,9 +3524,10 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
(dev->kbd_mode == SCI_KBD_MODE_ON) ?
LED_FULL : LED_OFF);
break;
+ case 0x8e: /* Power button pressed */
+ break;
case 0x85: /* Unknown */
case 0x8d: /* Unknown */
- case 0x8e: /* Unknown */
case 0x94: /* Unknown */
case 0x95: /* Unknown */
default:
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 975cf24ae359a..c6a10ec2c83f6 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -1218,6 +1218,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Chuwi Vi8 dual-boot (CWI506) */
+ .driver_data = (void *)&chuwi_vi8_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "i86"),
+ DMI_MATCH(DMI_BIOS_VERSION, "CHUWI2.D86JHBNR02"),
+ },
+ },
+ {
/* Chuwi Vi8 Plus (CWI519) */
.driver_data = (void *)&chuwi_vi8_plus_data,
.matches = {
diff --git a/drivers/platform/x86/wmi-bmof.c b/drivers/platform/x86/wmi-bmof.c
index 644d2fd889c08..df6f0ae6e6c79 100644
--- a/drivers/platform/x86/wmi-bmof.c
+++ b/drivers/platform/x86/wmi-bmof.c
@@ -94,6 +94,7 @@ static struct wmi_driver wmi_bmof_driver = {
.probe = wmi_bmof_probe,
.remove = wmi_bmof_remove,
.id_table = wmi_bmof_id_table,
+ .no_singleton = true,
};
module_wmi_driver(wmi_bmof_driver);
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 3c288e8f404be..1920e115da893 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -57,6 +57,8 @@ static_assert(__alignof__(struct guid_block) == 1);
enum { /* wmi_block flags */
WMI_READ_TAKES_NO_ARGS,
+ WMI_GUID_DUPLICATED,
+ WMI_NO_EVENT_DATA,
};
struct wmi_block {
@@ -88,16 +90,6 @@ static const struct acpi_device_id wmi_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, wmi_device_ids);
-/* allow duplicate GUIDs as these device drivers use struct wmi_driver */
-static const char * const allow_duplicates[] = {
- "05901221-D566-11D1-B2F0-00A0C9062910", /* wmi-bmof */
- "8A42EA14-4F2A-FD45-6422-0087F7A7E608", /* dell-wmi-ddv */
- "44FADEB1-B204-40F2-8581-394BBDC1B651", /* intel-wmi-sbl-fw-update */
- "86CCFD48-205E-4A77-9C48-2021CBEDE341", /* intel-wmi-thunderbolt */
- "F1DDEE52-063C-4784-A11E-8A06684B9B01", /* dell-smm-hwmon */
- NULL
-};
-
#define dev_to_wblock(__dev) container_of_const(__dev, struct wmi_block, dev.dev)
#define dev_to_wdev(__dev) container_of_const(__dev, struct wmi_device, dev)
@@ -132,26 +124,6 @@ static const void *find_guid_context(struct wmi_block *wblock,
return NULL;
}
-static int get_subobj_info(acpi_handle handle, const char *pathname,
- struct acpi_device_info **info)
-{
- acpi_handle subobj_handle;
- acpi_status status;
-
- status = acpi_get_handle(handle, pathname, &subobj_handle);
- if (status == AE_NOT_FOUND)
- return -ENOENT;
-
- if (ACPI_FAILURE(status))
- return -EIO;
-
- status = acpi_get_object_info(subobj_handle, info);
- if (ACPI_FAILURE(status))
- return -EIO;
-
- return 0;
-}
-
static acpi_status wmi_method_enable(struct wmi_block *wblock, bool enable)
{
struct guid_block *block;
@@ -215,6 +187,12 @@ static int wmidev_match_guid(struct device *dev, const void *data)
struct wmi_block *wblock = dev_to_wblock(dev);
const guid_t *guid = data;
+ /* Legacy GUID-based functions are restricted to only see
+ * a single WMI device for each GUID.
+ */
+ if (test_bit(WMI_GUID_DUPLICATED, &wblock->flags))
+ return 0;
+
if (guid_equal(guid, &wblock->gblock.guid))
return 1;
@@ -226,13 +204,19 @@ static int wmidev_match_notify_id(struct device *dev, const void *data)
struct wmi_block *wblock = dev_to_wblock(dev);
const u32 *notify_id = data;
+ /* Legacy GUID-based functions are restricted to only see
+ * a single WMI device for each GUID.
+ */
+ if (test_bit(WMI_GUID_DUPLICATED, &wblock->flags))
+ return 0;
+
if (wblock->gblock.flags & ACPI_WMI_EVENT && wblock->gblock.notify_id == *notify_id)
return 1;
return 0;
}
-static struct bus_type wmi_bus_type;
+static const struct bus_type wmi_bus_type;
static struct wmi_device *wmi_find_device_by_guid(const char *guid_string)
{
@@ -316,7 +300,7 @@ EXPORT_SYMBOL_GPL(wmidev_instance_count);
* @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
* @instance: Instance index
* @method_id: Method ID to call
- * @in: Buffer containing input for the method call
+ * @in: Mandatory buffer containing input for the method call
* @out: Empty buffer to return the method results
*
* Call an ACPI-WMI method, the caller must free @out.
@@ -346,7 +330,7 @@ EXPORT_SYMBOL_GPL(wmi_evaluate_method);
* @wdev: A wmi bus device from a driver
* @instance: Instance index
* @method_id: Method ID to call
- * @in: Buffer containing input for the method call
+ * @in: Mandatory buffer containing input for the method call
* @out: Empty buffer to return the method results
*
* Call an ACPI-WMI method, the caller must free @out.
@@ -367,26 +351,25 @@ acpi_status wmidev_evaluate_method(struct wmi_device *wdev, u8 instance, u32 met
block = &wblock->gblock;
handle = wblock->acpi_device->handle;
+ if (!in)
+ return AE_BAD_DATA;
+
if (!(block->flags & ACPI_WMI_METHOD))
return AE_BAD_DATA;
if (block->instance_count <= instance)
return AE_BAD_PARAMETER;
- input.count = 2;
+ input.count = 3;
input.pointer = params;
+
params[0].type = ACPI_TYPE_INTEGER;
params[0].integer.value = instance;
params[1].type = ACPI_TYPE_INTEGER;
params[1].integer.value = method_id;
-
- if (in) {
- input.count = 3;
-
- params[2].type = get_param_acpi_type(wblock);
- params[2].buffer.length = in->length;
- params[2].buffer.pointer = in->pointer;
- }
+ params[2].type = get_param_acpi_type(wblock);
+ params[2].buffer.length = in->length;
+ params[2].buffer.pointer = in->pointer;
get_acpi_method_name(wblock, 'M', method);
@@ -890,6 +873,23 @@ static int wmi_dev_probe(struct device *dev)
struct wmi_driver *wdriver = drv_to_wdrv(dev->driver);
int ret = 0;
+ /* Some older WMI drivers will break if instantiated multiple times,
+ * so they are blocked from probing WMI devices with a duplicated GUID.
+ *
+ * New WMI drivers should support being instantiated multiple times.
+ */
+ if (test_bit(WMI_GUID_DUPLICATED, &wblock->flags) && !wdriver->no_singleton) {
+ dev_warn(dev, "Legacy driver %s cannot be instantiated multiple times\n",
+ dev->driver->name);
+
+ return -ENODEV;
+ }
+
+ if (wdriver->notify) {
+ if (test_bit(WMI_NO_EVENT_DATA, &wblock->flags) && !wdriver->no_notify_data)
+ return -ENODEV;
+ }
+
if (ACPI_FAILURE(wmi_method_enable(wblock, true)))
dev_warn(dev, "failed to enable device -- probing anyway\n");
@@ -931,7 +931,7 @@ static struct class wmi_bus_class = {
.name = "wmi_bus",
};
-static struct bus_type wmi_bus_type = {
+static const struct bus_type wmi_bus_type = {
.name = "wmi",
.dev_groups = wmi_groups,
.match = wmi_dev_match,
@@ -979,9 +979,10 @@ static int wmi_create_device(struct device *wmi_bus_dev,
struct wmi_block *wblock,
struct acpi_device *device)
{
- struct acpi_device_info *info;
char method[WMI_ACPI_METHOD_NAME_SIZE];
- int result;
+ struct acpi_device_info *info;
+ acpi_handle method_handle;
+ acpi_status status;
uint count;
if (wblock->gblock.flags & ACPI_WMI_EVENT) {
@@ -990,6 +991,15 @@ static int wmi_create_device(struct device *wmi_bus_dev,
}
if (wblock->gblock.flags & ACPI_WMI_METHOD) {
+ get_acpi_method_name(wblock, 'M', method);
+ if (!acpi_has_method(device->handle, method)) {
+ dev_warn(wmi_bus_dev,
+ FW_BUG "%s method block execution control method not found\n",
+ method);
+
+ return -ENXIO;
+ }
+
wblock->dev.dev.type = &wmi_type_method;
goto out_init;
}
@@ -1000,15 +1010,19 @@ static int wmi_create_device(struct device *wmi_bus_dev,
* we ignore this data block.
*/
get_acpi_method_name(wblock, 'Q', method);
- result = get_subobj_info(device->handle, method, &info);
-
- if (result) {
+ status = acpi_get_handle(device->handle, method, &method_handle);
+ if (ACPI_FAILURE(status)) {
dev_warn(wmi_bus_dev,
- "%s data block query control method not found\n",
+ FW_BUG "%s data block query control method not found\n",
method);
- return result;
+
+ return -ENXIO;
}
+ status = acpi_get_object_info(method_handle, &info);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
wblock->dev.dev.type = &wmi_type_data;
/*
@@ -1037,10 +1051,12 @@ static int wmi_create_device(struct device *wmi_bus_dev,
wblock->dev.dev.parent = wmi_bus_dev;
count = guid_count(&wblock->gblock.guid);
- if (count)
+ if (count) {
dev_set_name(&wblock->dev.dev, "%pUL-%d", &wblock->gblock.guid, count);
- else
+ set_bit(WMI_GUID_DUPLICATED, &wblock->flags);
+ } else {
dev_set_name(&wblock->dev.dev, "%pUL", &wblock->gblock.guid);
+ }
device_initialize(&wblock->dev.dev);
@@ -1067,32 +1083,6 @@ static int wmi_add_device(struct platform_device *pdev, struct wmi_device *wdev)
return device_add(&wdev->dev);
}
-static bool guid_already_parsed_for_legacy(struct acpi_device *device, const guid_t *guid)
-{
- struct wmi_block *wblock;
-
- list_for_each_entry(wblock, &wmi_block_list, list) {
- /* skip warning and register if we know the driver will use struct wmi_driver */
- for (int i = 0; allow_duplicates[i] != NULL; i++) {
- if (guid_parse_and_compare(allow_duplicates[i], guid))
- return false;
- }
- if (guid_equal(&wblock->gblock.guid, guid)) {
- /*
- * Because we historically didn't track the relationship
- * between GUIDs and ACPI nodes, we don't know whether
- * we need to suppress GUIDs that are unique on a
- * given node but duplicated across nodes.
- */
- dev_warn(&device->dev, "duplicate WMI GUID %pUL (first instance was on %s)\n",
- guid, dev_name(&wblock->acpi_device->dev));
- return true;
- }
- }
-
- return false;
-}
-
/*
* Parse the _WDG method for the GUID data blocks
*/
@@ -1101,6 +1091,7 @@ static int parse_wdg(struct device *wmi_bus_dev, struct platform_device *pdev)
struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL};
const struct guid_block *gblock;
+ bool event_data_available;
struct wmi_block *wblock;
union acpi_object *obj;
acpi_status status;
@@ -1120,6 +1111,7 @@ static int parse_wdg(struct device *wmi_bus_dev, struct platform_device *pdev)
return -ENXIO;
}
+ event_data_available = acpi_has_method(device->handle, "_WED");
gblock = (const struct guid_block *)obj->buffer.pointer;
total = obj->buffer.length / sizeof(struct guid_block);
@@ -1129,17 +1121,14 @@ static int parse_wdg(struct device *wmi_bus_dev, struct platform_device *pdev)
continue;
}
- if (guid_already_parsed_for_legacy(device, &gblock[i].guid))
- continue;
-
wblock = kzalloc(sizeof(*wblock), GFP_KERNEL);
- if (!wblock) {
- dev_err(wmi_bus_dev, "Failed to allocate %pUL\n", &gblock[i].guid);
+ if (!wblock)
continue;
- }
wblock->acpi_device = device;
wblock->gblock = gblock[i];
+ if (gblock[i].flags & ACPI_WMI_EVENT && !event_data_available)
+ set_bit(WMI_NO_EVENT_DATA, &wblock->flags);
retval = wmi_create_device(wmi_bus_dev, wblock, device);
if (retval) {
@@ -1205,30 +1194,46 @@ acpi_wmi_ec_space_handler(u32 function, acpi_physical_address address,
}
}
-static void wmi_notify_driver(struct wmi_block *wblock)
+static int wmi_get_notify_data(struct wmi_block *wblock, union acpi_object **obj)
{
- struct wmi_driver *driver = drv_to_wdrv(wblock->dev.dev.driver);
struct acpi_buffer data = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_status status;
- if (!driver->no_notify_data) {
- status = get_event_data(wblock, &data);
- if (ACPI_FAILURE(status)) {
- dev_warn(&wblock->dev.dev, "Failed to get event data\n");
- return;
- }
+ if (test_bit(WMI_NO_EVENT_DATA, &wblock->flags)) {
+ *obj = NULL;
+ return 0;
}
- if (driver->notify)
- driver->notify(&wblock->dev, data.pointer);
+ status = get_event_data(wblock, &data);
+ if (ACPI_FAILURE(status)) {
+ dev_warn(&wblock->dev.dev, "Failed to get event data\n");
+ return -EIO;
+ }
+
+ *obj = data.pointer;
- kfree(data.pointer);
+ return 0;
+}
+
+static void wmi_notify_driver(struct wmi_block *wblock, union acpi_object *obj)
+{
+ struct wmi_driver *driver = drv_to_wdrv(wblock->dev.dev.driver);
+
+ if (!obj && !driver->no_notify_data) {
+ dev_warn(&wblock->dev.dev, "Event contains no event data\n");
+ return;
+ }
+
+ if (driver->notify)
+ driver->notify(&wblock->dev, obj);
}
static int wmi_notify_device(struct device *dev, void *data)
{
struct wmi_block *wblock = dev_to_wblock(dev);
+ union acpi_object *obj;
u32 *event = data;
+ int ret;
if (!(wblock->gblock.flags & ACPI_WMI_EVENT && wblock->gblock.notify_id == *event))
return 0;
@@ -1238,15 +1243,36 @@ static int wmi_notify_device(struct device *dev, void *data)
* Because of this the WMI driver notify handler takes precedence.
*/
if (wblock->dev.dev.driver && wblock->driver_ready) {
- wmi_notify_driver(wblock);
+ ret = wmi_get_notify_data(wblock, &obj);
+ if (ret >= 0) {
+ wmi_notify_driver(wblock, obj);
+ kfree(obj);
+ }
} else {
- if (wblock->handler)
+ if (wblock->handler) {
wblock->handler(*event, wblock->handler_data);
+ } else {
+ /* The ACPI WMI specification says that _WED should be
+ * evaluated every time an notification is received, even
+ * if no consumers are present.
+ *
+ * Some firmware implementations actually depend on this
+ * by using a queue for events which will fill up if the
+ * WMI driver core stops evaluating _WED due to missing
+ * WMI event consumers.
+ *
+ * Because of this we need this seemingly useless call to
+ * wmi_get_notify_data() which in turn evaluates _WED.
+ */
+ ret = wmi_get_notify_data(wblock, &obj);
+ if (ret >= 0)
+ kfree(obj);
+ }
+
}
up_read(&wblock->notify_lock);
- acpi_bus_generate_netlink_event(wblock->acpi_device->pnp.device_class,
- dev_name(&wblock->dev.dev), *event, 0);
+ acpi_bus_generate_netlink_event("wmi", acpi_dev_name(wblock->acpi_device), *event, 0);
return -EBUSY;
}
@@ -1347,7 +1373,7 @@ static int acpi_wmi_probe(struct platform_device *device)
error = parse_wdg(wmi_bus_dev, device);
if (error) {
- pr_err("Failed to parse WDG method\n");
+ dev_err(&device->dev, "Failed to parse _WDG method\n");
return error;
}
diff --git a/drivers/power/reset/as3722-poweroff.c b/drivers/power/reset/as3722-poweroff.c
index ab3350ce2d621..bb26fa6fa67ca 100644
--- a/drivers/power/reset/as3722-poweroff.c
+++ b/drivers/power/reset/as3722-poweroff.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/reboot.h>
#include <linux/slab.h>
struct as3722_poweroff {
@@ -18,22 +19,18 @@ struct as3722_poweroff {
struct as3722 *as3722;
};
-static struct as3722_poweroff *as3722_pm_poweroff;
-
-static void as3722_pm_power_off(void)
+static int as3722_pm_power_off(struct sys_off_data *data)
{
+ struct as3722_poweroff *as3722_pm_poweroff = data->cb_data;
int ret;
- if (!as3722_pm_poweroff) {
- pr_err("AS3722 poweroff is not initialised\n");
- return;
- }
-
ret = as3722_update_bits(as3722_pm_poweroff->as3722,
AS3722_RESET_CONTROL_REG, AS3722_POWER_OFF, AS3722_POWER_OFF);
if (ret < 0)
dev_err(as3722_pm_poweroff->dev,
"RESET_CONTROL_REG update failed, %d\n", ret);
+
+ return NOTIFY_DONE;
}
static int as3722_poweroff_probe(struct platform_device *pdev)
@@ -54,18 +51,14 @@ static int as3722_poweroff_probe(struct platform_device *pdev)
as3722_poweroff->as3722 = dev_get_drvdata(pdev->dev.parent);
as3722_poweroff->dev = &pdev->dev;
- as3722_pm_poweroff = as3722_poweroff;
- if (!pm_power_off)
- pm_power_off = as3722_pm_power_off;
- return 0;
-}
+ return devm_register_sys_off_handler(as3722_poweroff->dev,
+ SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_DEFAULT,
+ as3722_pm_power_off,
+ as3722_poweroff);
-static void as3722_poweroff_remove(struct platform_device *pdev)
-{
- if (pm_power_off == as3722_pm_power_off)
- pm_power_off = NULL;
- as3722_pm_poweroff = NULL;
+ return 0;
}
static struct platform_driver as3722_poweroff_driver = {
@@ -73,7 +66,6 @@ static struct platform_driver as3722_poweroff_driver = {
.name = "as3722-power-off",
},
.probe = as3722_poweroff_probe,
- .remove_new = as3722_poweroff_remove,
};
module_platform_driver(as3722_poweroff_driver);
diff --git a/drivers/power/reset/atc260x-poweroff.c b/drivers/power/reset/atc260x-poweroff.c
index b4aa50e9685e1..e3e4621ccb1dd 100644
--- a/drivers/power/reset/atc260x-poweroff.c
+++ b/drivers/power/reset/atc260x-poweroff.c
@@ -16,13 +16,9 @@
struct atc260x_pwrc {
struct device *dev;
struct regmap *regmap;
- struct notifier_block restart_nb;
int (*do_poweroff)(const struct atc260x_pwrc *pwrc, bool restart);
};
-/* Global variable needed only for pm_power_off */
-static struct atc260x_pwrc *atc260x_pwrc_data;
-
static int atc2603c_do_poweroff(const struct atc260x_pwrc *pwrc, bool restart)
{
int ret, deep_sleep = 0;
@@ -165,18 +161,20 @@ static int atc2609a_init(const struct atc260x_pwrc *pwrc)
return ret;
}
-static void atc260x_pwrc_pm_handler(void)
+static int atc260x_pwrc_pm_handler(struct sys_off_data *data)
{
- atc260x_pwrc_data->do_poweroff(atc260x_pwrc_data, false);
+ struct atc260x_pwrc *pwrc = data->cb_data;
+
+ pwrc->do_poweroff(pwrc, false);
WARN_ONCE(1, "Unable to power off system\n");
+
+ return NOTIFY_DONE;
}
-static int atc260x_pwrc_restart_handler(struct notifier_block *nb,
- unsigned long mode, void *cmd)
+static int atc260x_pwrc_restart_handler(struct sys_off_data *data)
{
- struct atc260x_pwrc *pwrc = container_of(nb, struct atc260x_pwrc,
- restart_nb);
+ struct atc260x_pwrc *pwrc = data->cb_data;
pwrc->do_poweroff(pwrc, true);
return NOTIFY_DONE;
@@ -194,8 +192,6 @@ static int atc260x_pwrc_probe(struct platform_device *pdev)
priv->dev = &pdev->dev;
priv->regmap = atc260x->regmap;
- priv->restart_nb.notifier_call = atc260x_pwrc_restart_handler;
- priv->restart_nb.priority = 192;
switch (atc260x->ic_type) {
case ATC2603C:
@@ -216,16 +212,20 @@ static int atc260x_pwrc_probe(struct platform_device *pdev)
if (ret)
return ret;
- platform_set_drvdata(pdev, priv);
-
- if (!pm_power_off) {
- atc260x_pwrc_data = priv;
- pm_power_off = atc260x_pwrc_pm_handler;
- } else {
- dev_warn(priv->dev, "Poweroff callback already assigned\n");
- }
+ ret = devm_register_sys_off_handler(priv->dev,
+ SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_DEFAULT,
+ atc260x_pwrc_pm_handler,
+ priv);
+ if (ret)
+ dev_err(priv->dev, "failed to register power-off handler: %d\n",
+ ret);
- ret = register_restart_handler(&priv->restart_nb);
+ ret = devm_register_sys_off_handler(priv->dev,
+ SYS_OFF_MODE_RESTART,
+ SYS_OFF_PRIO_HIGH,
+ atc260x_pwrc_restart_handler,
+ priv);
if (ret)
dev_err(priv->dev, "failed to register restart handler: %d\n",
ret);
@@ -233,21 +233,8 @@ static int atc260x_pwrc_probe(struct platform_device *pdev)
return ret;
}
-static void atc260x_pwrc_remove(struct platform_device *pdev)
-{
- struct atc260x_pwrc *priv = platform_get_drvdata(pdev);
-
- if (atc260x_pwrc_data == priv) {
- pm_power_off = NULL;
- atc260x_pwrc_data = NULL;
- }
-
- unregister_restart_handler(&priv->restart_nb);
-}
-
static struct platform_driver atc260x_pwrc_driver = {
.probe = atc260x_pwrc_probe,
- .remove_new = atc260x_pwrc_remove,
.driver = {
.name = "atc260x-pwrc",
},
diff --git a/drivers/power/reset/axxia-reset.c b/drivers/power/reset/axxia-reset.c
index 24946766760c3..797bf6773860e 100644
--- a/drivers/power/reset/axxia-reset.c
+++ b/drivers/power/reset/axxia-reset.c
@@ -26,11 +26,10 @@
#define SC_EFUSE_INT_STATUS 0x180c
#define EFUSE_READ_DONE (1<<31)
-static struct regmap *syscon;
-
-static int axxia_restart_handler(struct notifier_block *this,
- unsigned long mode, void *cmd)
+static int axxia_restart_handler(struct sys_off_data *data)
{
+ struct regmap *syscon = data->cb_data;
+
/* Access Key (0xab) */
regmap_write(syscon, SC_CRIT_WRITE_KEY, 0xab);
/* Select internal boot from 0xffff0000 */
@@ -44,14 +43,10 @@ static int axxia_restart_handler(struct notifier_block *this,
return NOTIFY_DONE;
}
-static struct notifier_block axxia_restart_nb = {
- .notifier_call = axxia_restart_handler,
- .priority = 128,
-};
-
static int axxia_reset_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct regmap *syscon;
int err;
syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
@@ -60,7 +55,8 @@ static int axxia_reset_probe(struct platform_device *pdev)
return PTR_ERR(syscon);
}
- err = register_restart_handler(&axxia_restart_nb);
+ err = devm_register_sys_off_handler(&pdev->dev, SYS_OFF_MODE_RESTART,
+ 128, axxia_restart_handler, syscon);
if (err)
dev_err(dev, "cannot register restart handler (err=%d)\n", err);
diff --git a/drivers/power/reset/brcm-kona-reset.c b/drivers/power/reset/brcm-kona-reset.c
index d05728b1db097..ee3f1bb976530 100644
--- a/drivers/power/reset/brcm-kona-reset.c
+++ b/drivers/power/reset/brcm-kona-reset.c
@@ -15,8 +15,7 @@
static void __iomem *kona_reset_base;
-static int kona_reset_handler(struct notifier_block *this,
- unsigned long mode, void *cmd)
+static int kona_reset_handler(struct sys_off_data *data)
{
/*
* A soft reset is triggered by writing a 0 to bit 0 of the soft reset
@@ -31,18 +30,14 @@ static int kona_reset_handler(struct notifier_block *this,
return NOTIFY_DONE;
}
-static struct notifier_block kona_reset_nb = {
- .notifier_call = kona_reset_handler,
- .priority = 128,
-};
-
static int kona_reset_probe(struct platform_device *pdev)
{
kona_reset_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(kona_reset_base))
return PTR_ERR(kona_reset_base);
- return register_restart_handler(&kona_reset_nb);
+ return devm_register_sys_off_handler(&pdev->dev, SYS_OFF_MODE_RESTART,
+ 128, kona_reset_handler, NULL);
}
static const struct of_device_id of_match[] = {
diff --git a/drivers/power/reset/gemini-poweroff.c b/drivers/power/reset/gemini-poweroff.c
index d309b610142ce..06d6992dec892 100644
--- a/drivers/power/reset/gemini-poweroff.c
+++ b/drivers/power/reset/gemini-poweroff.c
@@ -70,12 +70,9 @@ static irqreturn_t gemini_powerbutton_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
-/* This callback needs this static local as it has void as argument */
-static struct gemini_powercon *gpw_poweroff;
-
-static void gemini_poweroff(void)
+static int gemini_poweroff(struct sys_off_data *data)
{
- struct gemini_powercon *gpw = gpw_poweroff;
+ struct gemini_powercon *gpw = data->cb_data;
u32 val;
dev_crit(gpw->dev, "Gemini power off\n");
@@ -86,6 +83,8 @@ static void gemini_poweroff(void)
val &= ~GEMINI_CTRL_ENABLE;
val |= GEMINI_CTRL_SHUTDOWN;
writel(val, gpw->base + GEMINI_PWC_CTRLREG);
+
+ return NOTIFY_DONE;
}
static int gemini_poweroff_probe(struct platform_device *pdev)
@@ -148,8 +147,11 @@ static int gemini_poweroff_probe(struct platform_device *pdev)
if (ret)
return ret;
- pm_power_off = gemini_poweroff;
- gpw_poweroff = gpw;
+ ret = devm_register_sys_off_handler(dev, SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_DEFAULT,
+ gemini_poweroff, gpw);
+ if (ret)
+ return ret;
dev_info(dev, "Gemini poweroff driver registered\n");
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
index d96d248a6e25b..c7eb6dc8e90ae 100644
--- a/drivers/power/reset/msm-poweroff.c
+++ b/drivers/power/reset/msm-poweroff.c
@@ -14,8 +14,8 @@
#include <linux/pm.h>
static void __iomem *msm_ps_hold;
-static int deassert_pshold(struct notifier_block *nb, unsigned long action,
- void *data)
+
+static int do_msm_poweroff(struct sys_off_data *data)
{
writel(0, msm_ps_hold);
mdelay(10000);
@@ -23,25 +23,18 @@ static int deassert_pshold(struct notifier_block *nb, unsigned long action,
return NOTIFY_DONE;
}
-static struct notifier_block restart_nb = {
- .notifier_call = deassert_pshold,
- .priority = 128,
-};
-
-static void do_msm_poweroff(void)
-{
- deassert_pshold(&restart_nb, 0, NULL);
-}
-
static int msm_restart_probe(struct platform_device *pdev)
{
msm_ps_hold = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(msm_ps_hold))
return PTR_ERR(msm_ps_hold);
- register_restart_handler(&restart_nb);
+ devm_register_sys_off_handler(&pdev->dev, SYS_OFF_MODE_RESTART,
+ 128, do_msm_poweroff, NULL);
- pm_power_off = do_msm_poweroff;
+ devm_register_sys_off_handler(&pdev->dev, SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_DEFAULT, do_msm_poweroff,
+ NULL);
return 0;
}
diff --git a/drivers/power/reset/mt6323-poweroff.c b/drivers/power/reset/mt6323-poweroff.c
index 57a63c0ab7fb7..c663347547f97 100644
--- a/drivers/power/reset/mt6323-poweroff.c
+++ b/drivers/power/reset/mt6323-poweroff.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/mfd/mt6397/core.h>
#include <linux/mfd/mt6397/rtc.h>
+#include <linux/reboot.h>
struct mt6323_pwrc {
struct device *dev;
@@ -21,11 +22,9 @@ struct mt6323_pwrc {
u32 base;
};
-static struct mt6323_pwrc *mt_pwrc;
-
-static void mt6323_do_pwroff(void)
+static int mt6323_do_pwroff(struct sys_off_data *data)
{
- struct mt6323_pwrc *pwrc = mt_pwrc;
+ struct mt6323_pwrc *pwrc = data->cb_data;
unsigned int val;
int ret;
@@ -44,6 +43,8 @@ static void mt6323_do_pwroff(void)
mdelay(1000);
WARN_ONCE(1, "Unable to power off system\n");
+
+ return NOTIFY_DONE;
}
static int mt6323_pwrc_probe(struct platform_device *pdev)
@@ -51,6 +52,7 @@ static int mt6323_pwrc_probe(struct platform_device *pdev)
struct mt6397_chip *mt6397_chip = dev_get_drvdata(pdev->dev.parent);
struct mt6323_pwrc *pwrc;
struct resource *res;
+ int ret;
pwrc = devm_kzalloc(&pdev->dev, sizeof(*pwrc), GFP_KERNEL);
if (!pwrc)
@@ -63,19 +65,18 @@ static int mt6323_pwrc_probe(struct platform_device *pdev)
pwrc->base = res->start;
pwrc->regmap = mt6397_chip->regmap;
pwrc->dev = &pdev->dev;
- mt_pwrc = pwrc;
- pm_power_off = &mt6323_do_pwroff;
+ ret = devm_register_sys_off_handler(pwrc->dev,
+ SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_DEFAULT,
+ mt6323_do_pwroff,
+ pwrc);
+ if (ret)
+ return dev_err_probe(pwrc->dev, ret, "failed to register power-off handler\n");
return 0;
}
-static void mt6323_pwrc_remove(struct platform_device *pdev)
-{
- if (pm_power_off == &mt6323_do_pwroff)
- pm_power_off = NULL;
-}
-
static const struct of_device_id mt6323_pwrc_dt_match[] = {
{ .compatible = "mediatek,mt6323-pwrc" },
{},
@@ -84,7 +85,6 @@ MODULE_DEVICE_TABLE(of, mt6323_pwrc_dt_match);
static struct platform_driver mt6323_pwrc_driver = {
.probe = mt6323_pwrc_probe,
- .remove_new = mt6323_pwrc_remove,
.driver = {
.name = "mt6323-pwrc",
.of_match_table = mt6323_pwrc_dt_match,
diff --git a/drivers/power/reset/regulator-poweroff.c b/drivers/power/reset/regulator-poweroff.c
index 15160809c423a..fed4978e38580 100644
--- a/drivers/power/reset/regulator-poweroff.c
+++ b/drivers/power/reset/regulator-poweroff.c
@@ -13,18 +13,15 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
+#include <linux/reboot.h>
#include <linux/regulator/consumer.h>
#define TIMEOUT_MS 3000
-/*
- * Hold configuration here, cannot be more than one instance of the driver
- * since pm_power_off itself is global.
- */
-static struct regulator *cpu_regulator;
-
-static void regulator_poweroff_do_poweroff(void)
+static int regulator_poweroff_do_poweroff(struct sys_off_data *data)
{
+ struct regulator *cpu_regulator = data->cb_data;
+
if (cpu_regulator && regulator_is_enabled(cpu_regulator))
regulator_force_disable(cpu_regulator);
@@ -32,30 +29,24 @@ static void regulator_poweroff_do_poweroff(void)
mdelay(TIMEOUT_MS);
WARN_ON(1);
+
+ return NOTIFY_DONE;
}
static int regulator_poweroff_probe(struct platform_device *pdev)
{
- /* If a pm_power_off function has already been added, leave it alone */
- if (pm_power_off != NULL) {
- dev_err(&pdev->dev,
- "%s: pm_power_off function already registered\n",
- __func__);
- return -EBUSY;
- }
+ struct regulator *cpu_regulator;
cpu_regulator = devm_regulator_get(&pdev->dev, "cpu");
if (IS_ERR(cpu_regulator))
return PTR_ERR(cpu_regulator);
- pm_power_off = &regulator_poweroff_do_poweroff;
- return 0;
-}
-
-static void regulator_poweroff_remove(struct platform_device *pdev)
-{
- if (pm_power_off == &regulator_poweroff_do_poweroff)
- pm_power_off = NULL;
+ /* Set this handler to low priority to not override an existing handler */
+ return devm_register_sys_off_handler(&pdev->dev,
+ SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_LOW,
+ regulator_poweroff_do_poweroff,
+ cpu_regulator);
}
static const struct of_device_id of_regulator_poweroff_match[] = {
@@ -66,7 +57,6 @@ MODULE_DEVICE_TABLE(of, of_regulator_poweroff_match);
static struct platform_driver regulator_poweroff_driver = {
.probe = regulator_poweroff_probe,
- .remove_new = regulator_poweroff_remove,
.driver = {
.name = "poweroff-regulator",
.of_match_table = of_regulator_poweroff_match,
diff --git a/drivers/power/reset/restart-poweroff.c b/drivers/power/reset/restart-poweroff.c
index f4d6004793d3a..fcd588f9ae9d3 100644
--- a/drivers/power/reset/restart-poweroff.c
+++ b/drivers/power/reset/restart-poweroff.c
@@ -14,29 +14,21 @@
#include <linux/module.h>
#include <linux/reboot.h>
-static void restart_poweroff_do_poweroff(void)
+static int restart_poweroff_do_poweroff(struct sys_off_data *data)
{
reboot_mode = REBOOT_HARD;
machine_restart(NULL);
+ return NOTIFY_DONE;
}
static int restart_poweroff_probe(struct platform_device *pdev)
{
- /* If a pm_power_off function has already been added, leave it alone */
- if (pm_power_off != NULL) {
- dev_err(&pdev->dev,
- "pm_power_off function already registered");
- return -EBUSY;
- }
-
- pm_power_off = &restart_poweroff_do_poweroff;
- return 0;
-}
-
-static void restart_poweroff_remove(struct platform_device *pdev)
-{
- if (pm_power_off == &restart_poweroff_do_poweroff)
- pm_power_off = NULL;
+ /* Set this handler to low priority to not override an existing handler */
+ return devm_register_sys_off_handler(&pdev->dev,
+ SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_LOW,
+ restart_poweroff_do_poweroff,
+ NULL);
}
static const struct of_device_id of_restart_poweroff_match[] = {
@@ -47,7 +39,6 @@ MODULE_DEVICE_TABLE(of, of_restart_poweroff_match);
static struct platform_driver restart_poweroff_driver = {
.probe = restart_poweroff_probe,
- .remove_new = restart_poweroff_remove,
.driver = {
.name = "poweroff-restart",
.of_match_table = of_restart_poweroff_match,
diff --git a/drivers/power/reset/rmobile-reset.c b/drivers/power/reset/rmobile-reset.c
index 5df9b41c68c79..7dbc51c32b0eb 100644
--- a/drivers/power/reset/rmobile-reset.c
+++ b/drivers/power/reset/rmobile-reset.c
@@ -19,12 +19,9 @@
/* Reset Control Register 2 */
#define RESCNT2_PRES 0x80000000 /* Soft power-on reset */
-static void __iomem *sysc_base2;
-
-static int rmobile_reset_handler(struct notifier_block *this,
- unsigned long mode, void *cmd)
+static int rmobile_reset_handler(struct sys_off_data *data)
{
- pr_debug("%s %lu\n", __func__, mode);
+ void __iomem *sysc_base2 = (void __iomem *)data->cb_data;
/* Let's assume we have acquired the HPB semaphore */
writel(RESCNT2_PRES, sysc_base2 + RESCNT2);
@@ -32,37 +29,27 @@ static int rmobile_reset_handler(struct notifier_block *this,
return NOTIFY_DONE;
}
-static struct notifier_block rmobile_reset_nb = {
- .notifier_call = rmobile_reset_handler,
- .priority = 192,
-};
-
static int rmobile_reset_probe(struct platform_device *pdev)
{
+ void __iomem *sysc_base2;
int error;
- sysc_base2 = of_iomap(pdev->dev.of_node, 1);
- if (!sysc_base2)
- return -ENODEV;
+ sysc_base2 = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(sysc_base2))
+ return PTR_ERR(sysc_base2);
- error = register_restart_handler(&rmobile_reset_nb);
+ error = devm_register_sys_off_handler(&pdev->dev,
+ SYS_OFF_MODE_RESTART,
+ SYS_OFF_PRIO_HIGH,
+ rmobile_reset_handler,
+ (__force void *)sysc_base2);
if (error) {
dev_err(&pdev->dev,
"cannot register restart handler (err=%d)\n", error);
- goto fail_unmap;
+ return error;
}
return 0;
-
-fail_unmap:
- iounmap(sysc_base2);
- return error;
-}
-
-static void rmobile_reset_remove(struct platform_device *pdev)
-{
- unregister_restart_handler(&rmobile_reset_nb);
- iounmap(sysc_base2);
}
static const struct of_device_id rmobile_reset_of_match[] = {
@@ -73,7 +60,6 @@ MODULE_DEVICE_TABLE(of, rmobile_reset_of_match);
static struct platform_driver rmobile_reset_driver = {
.probe = rmobile_reset_probe,
- .remove_new = rmobile_reset_remove,
.driver = {
.name = "rmobile_reset",
.of_match_table = rmobile_reset_of_match,
diff --git a/drivers/power/reset/syscon-poweroff.c b/drivers/power/reset/syscon-poweroff.c
index 1b2ce7734260c..203936f4c544f 100644
--- a/drivers/power/reset/syscon-poweroff.c
+++ b/drivers/power/reset/syscon-poweroff.c
@@ -13,44 +13,56 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
+#include <linux/reboot.h>
#include <linux/regmap.h>
-static struct regmap *map;
-static u32 offset;
-static u32 value;
-static u32 mask;
+struct syscon_poweroff_data {
+ struct regmap *map;
+ u32 offset;
+ u32 value;
+ u32 mask;
+};
-static void syscon_poweroff(void)
+static int syscon_poweroff(struct sys_off_data *off_data)
{
+ struct syscon_poweroff_data *data = off_data->cb_data;
+
/* Issue the poweroff */
- regmap_update_bits(map, offset, mask, value);
+ regmap_update_bits(data->map, data->offset, data->mask, data->value);
mdelay(1000);
pr_emerg("Unable to poweroff system\n");
+
+ return NOTIFY_DONE;
}
static int syscon_poweroff_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct syscon_poweroff_data *data;
int mask_err, value_err;
- map = syscon_regmap_lookup_by_phandle(dev->of_node, "regmap");
- if (IS_ERR(map)) {
- map = syscon_node_to_regmap(dev->parent->of_node);
- if (IS_ERR(map)) {
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->map = syscon_regmap_lookup_by_phandle(dev->of_node, "regmap");
+ if (IS_ERR(data->map)) {
+ data->map = syscon_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(data->map)) {
dev_err(dev, "unable to get syscon");
- return PTR_ERR(map);
+ return PTR_ERR(data->map);
}
}
- if (of_property_read_u32(dev->of_node, "offset", &offset)) {
+ if (of_property_read_u32(dev->of_node, "offset", &data->offset)) {
dev_err(dev, "unable to read 'offset'");
return -EINVAL;
}
- value_err = of_property_read_u32(dev->of_node, "value", &value);
- mask_err = of_property_read_u32(dev->of_node, "mask", &mask);
+ value_err = of_property_read_u32(dev->of_node, "value", &data->value);
+ mask_err = of_property_read_u32(dev->of_node, "mask", &data->mask);
if (value_err && mask_err) {
dev_err(dev, "unable to read 'value' and 'mask'");
return -EINVAL;
@@ -58,28 +70,17 @@ static int syscon_poweroff_probe(struct platform_device *pdev)
if (value_err) {
/* support old binding */
- value = mask;
- mask = 0xFFFFFFFF;
+ data->value = data->mask;
+ data->mask = 0xFFFFFFFF;
} else if (mask_err) {
/* support value without mask*/
- mask = 0xFFFFFFFF;
- }
-
- if (pm_power_off) {
- dev_err(dev, "pm_power_off already claimed for %ps",
- pm_power_off);
- return -EBUSY;
+ data->mask = 0xFFFFFFFF;
}
- pm_power_off = syscon_poweroff;
-
- return 0;
-}
-
-static void syscon_poweroff_remove(struct platform_device *pdev)
-{
- if (pm_power_off == syscon_poweroff)
- pm_power_off = NULL;
+ return devm_register_sys_off_handler(&pdev->dev,
+ SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_DEFAULT,
+ syscon_poweroff, data);
}
static const struct of_device_id syscon_poweroff_of_match[] = {
@@ -89,7 +90,6 @@ static const struct of_device_id syscon_poweroff_of_match[] = {
static struct platform_driver syscon_poweroff_driver = {
.probe = syscon_poweroff_probe,
- .remove_new = syscon_poweroff_remove,
.driver = {
.name = "syscon-poweroff",
.of_match_table = syscon_poweroff_of_match,
diff --git a/drivers/power/reset/tps65086-restart.c b/drivers/power/reset/tps65086-restart.c
index ee8e9f4b837ea..6976dbcac74fa 100644
--- a/drivers/power/reset/tps65086-restart.c
+++ b/drivers/power/reset/tps65086-restart.c
@@ -9,22 +9,14 @@
#include <linux/platform_device.h>
#include <linux/reboot.h>
-struct tps65086_restart {
- struct notifier_block handler;
- struct device *dev;
-};
-
-static int tps65086_restart_notify(struct notifier_block *this,
- unsigned long mode, void *cmd)
+static int tps65086_restart_notify(struct sys_off_data *data)
{
- struct tps65086_restart *tps65086_restart =
- container_of(this, struct tps65086_restart, handler);
- struct tps65086 *tps65086 = dev_get_drvdata(tps65086_restart->dev->parent);
+ struct tps65086 *tps65086 = data->cb_data;
int ret;
ret = regmap_write(tps65086->regmap, TPS65086_FORCESHUTDN, 1);
if (ret) {
- dev_err(tps65086_restart->dev, "%s: error writing to tps65086 pmic: %d\n",
+ dev_err(tps65086->dev, "%s: error writing to tps65086 pmic: %d\n",
__func__, ret);
return NOTIFY_DONE;
}
@@ -39,44 +31,13 @@ static int tps65086_restart_notify(struct notifier_block *this,
static int tps65086_restart_probe(struct platform_device *pdev)
{
- struct tps65086_restart *tps65086_restart;
- int ret;
-
- tps65086_restart = devm_kzalloc(&pdev->dev, sizeof(*tps65086_restart), GFP_KERNEL);
- if (!tps65086_restart)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, tps65086_restart);
-
- tps65086_restart->handler.notifier_call = tps65086_restart_notify;
- tps65086_restart->handler.priority = 192;
- tps65086_restart->dev = &pdev->dev;
-
- ret = register_restart_handler(&tps65086_restart->handler);
- if (ret) {
- dev_err(&pdev->dev, "%s: cannot register restart handler: %d\n",
- __func__, ret);
- return -ENODEV;
- }
-
- return 0;
-}
-
-static void tps65086_restart_remove(struct platform_device *pdev)
-{
- struct tps65086_restart *tps65086_restart = platform_get_drvdata(pdev);
- int ret;
+ struct tps65086 *tps65086 = dev_get_drvdata(pdev->dev.parent);
- ret = unregister_restart_handler(&tps65086_restart->handler);
- if (ret) {
- /*
- * tps65086_restart_probe() registered the restart handler. So
- * unregistering should work fine. Checking the error code
- * shouldn't be needed, still doing it for completeness.
- */
- dev_err(&pdev->dev, "%s: cannot unregister restart handler: %d\n",
- __func__, ret);
- }
+ return devm_register_sys_off_handler(&pdev->dev,
+ SYS_OFF_MODE_RESTART,
+ SYS_OFF_PRIO_HIGH,
+ tps65086_restart_notify,
+ tps65086);
}
static const struct platform_device_id tps65086_restart_id_table[] = {
@@ -90,7 +51,6 @@ static struct platform_driver tps65086_restart_driver = {
.name = "tps65086-restart",
},
.probe = tps65086_restart_probe,
- .remove_new = tps65086_restart_remove,
.id_table = tps65086_restart_id_table,
};
module_platform_driver(tps65086_restart_driver);
diff --git a/drivers/power/reset/xgene-reboot.c b/drivers/power/reset/xgene-reboot.c
index c2e5a99940d37..b5eee19bac426 100644
--- a/drivers/power/reset/xgene-reboot.c
+++ b/drivers/power/reset/xgene-reboot.c
@@ -22,17 +22,13 @@
struct xgene_reboot_context {
struct device *dev;
- void *csr;
+ void __iomem *csr;
u32 mask;
- struct notifier_block restart_handler;
};
-static int xgene_restart_handler(struct notifier_block *this,
- unsigned long mode, void *cmd)
+static int xgene_restart_handler(struct sys_off_data *data)
{
- struct xgene_reboot_context *ctx =
- container_of(this, struct xgene_reboot_context,
- restart_handler);
+ struct xgene_reboot_context *ctx = data->cb_data;
/* Issue the reboot */
writel(ctx->mask, ctx->csr);
@@ -54,23 +50,20 @@ static int xgene_reboot_probe(struct platform_device *pdev)
if (!ctx)
return -ENOMEM;
- ctx->csr = of_iomap(dev->of_node, 0);
- if (!ctx->csr) {
+ ctx->csr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctx->csr)) {
dev_err(dev, "can not map resource\n");
- return -ENODEV;
+ return PTR_ERR(ctx->csr);
}
if (of_property_read_u32(dev->of_node, "mask", &ctx->mask))
ctx->mask = 0xFFFFFFFF;
ctx->dev = dev;
- ctx->restart_handler.notifier_call = xgene_restart_handler;
- ctx->restart_handler.priority = 128;
- err = register_restart_handler(&ctx->restart_handler);
- if (err) {
- iounmap(ctx->csr);
+ err = devm_register_sys_off_handler(dev, SYS_OFF_MODE_RESTART, 128,
+ xgene_restart_handler, ctx);
+ if (err)
dev_err(dev, "cannot register restart handler (err=%d)\n", err);
- }
return err;
}
diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
index 7905eba93dead..56f136b2d071d 100644
--- a/drivers/power/supply/ab8500_btemp.c
+++ b/drivers/power/supply/ab8500_btemp.c
@@ -617,8 +617,7 @@ static int ab8500_btemp_get_ext_psy_data(struct device *dev, void *data)
*/
static void ab8500_btemp_external_power_changed(struct power_supply *psy)
{
- class_for_each_device(power_supply_class, NULL, psy,
- ab8500_btemp_get_ext_psy_data);
+ power_supply_for_each_device(psy, ab8500_btemp_get_ext_psy_data);
}
/* ab8500 btemp driver interrupts and their respective isr */
diff --git a/drivers/power/supply/ab8500_chargalg.c b/drivers/power/supply/ab8500_chargalg.c
index de912658facb9..55ab7a28056e5 100644
--- a/drivers/power/supply/ab8500_chargalg.c
+++ b/drivers/power/supply/ab8500_chargalg.c
@@ -1231,8 +1231,7 @@ static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
int ret;
/* Collect data from all power_supply class devices */
- class_for_each_device(power_supply_class, NULL,
- di->chargalg_psy, ab8500_chargalg_get_ext_psy_data);
+ power_supply_for_each_device(di->chargalg_psy, ab8500_chargalg_get_ext_psy_data);
ab8500_chargalg_end_of_charge(di);
ab8500_chargalg_check_temp(di);
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index d72f32c663bc0..9b34d1a60f662 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -1949,8 +1949,7 @@ static void ab8500_charger_check_vbat_work(struct work_struct *work)
struct ab8500_charger *di = container_of(work,
struct ab8500_charger, check_vbat_work.work);
- class_for_each_device(power_supply_class, NULL,
- &di->usb_chg, ab8500_charger_get_ext_psy_data);
+ power_supply_for_each_device(&di->usb_chg, ab8500_charger_get_ext_psy_data);
/* First run old_vbat is 0. */
if (di->old_vbat == 0)
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 8c593fbdd45a7..2ccaf6116c09a 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -2407,8 +2407,7 @@ out:
*/
static void ab8500_fg_external_power_changed(struct power_supply *psy)
{
- class_for_each_device(power_supply_class, NULL, psy,
- ab8500_fg_get_ext_psy_data);
+ power_supply_for_each_device(psy, ab8500_fg_get_ext_psy_data);
}
/**
diff --git a/drivers/power/supply/apm_power.c b/drivers/power/supply/apm_power.c
index 9d1a7fbcaed42..8ef1b6f1f7879 100644
--- a/drivers/power/supply/apm_power.c
+++ b/drivers/power/supply/apm_power.c
@@ -79,8 +79,7 @@ static void find_main_battery(void)
main_battery = NULL;
bp.main = main_battery;
- error = class_for_each_device(power_supply_class, NULL, &bp,
- __find_main_battery);
+ error = power_supply_for_each_device(&bp, __find_main_battery);
if (error) {
main_battery = bp.main;
return;
diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c
index e23308ad4cc79..dae7e5cfc54e1 100644
--- a/drivers/power/supply/axp20x_usb_power.c
+++ b/drivers/power/supply/axp20x_usb_power.c
@@ -50,20 +50,24 @@ struct axp_data {
const char * const *irq_names;
unsigned int num_irq_names;
const int *curr_lim_table;
+ int curr_lim_table_size;
struct reg_field curr_lim_fld;
struct reg_field vbus_valid_bit;
struct reg_field vbus_mon_bit;
struct reg_field usb_bc_en_bit;
+ struct reg_field usb_bc_det_fld;
struct reg_field vbus_disable_bit;
bool vbus_needs_polling: 1;
};
struct axp20x_usb_power {
+ struct device *dev;
struct regmap *regmap;
struct regmap_field *curr_lim_fld;
struct regmap_field *vbus_valid_bit;
struct regmap_field *vbus_mon_bit;
struct regmap_field *usb_bc_en_bit;
+ struct regmap_field *usb_bc_det_fld;
struct regmap_field *vbus_disable_bit;
struct power_supply *supply;
const struct axp_data *axp_data;
@@ -115,6 +119,15 @@ static void axp20x_usb_power_poll_vbus(struct work_struct *work)
if (val != power->old_status)
power_supply_changed(power->supply);
+ if (power->usb_bc_en_bit && (val & AXP20X_PWR_STATUS_VBUS_PRESENT) !=
+ (power->old_status & AXP20X_PWR_STATUS_VBUS_PRESENT)) {
+ dev_dbg(power->dev, "Cable status changed, re-enabling USB BC");
+ ret = regmap_field_write(power->usb_bc_en_bit, 1);
+ if (ret)
+ dev_err(power->dev, "failed to enable USB BC: errno %d",
+ ret);
+ }
+
power->old_status = val;
power->online = val & AXP20X_PWR_STATUS_VBUS_USED;
@@ -123,6 +136,37 @@ out:
mod_delayed_work(system_power_efficient_wq, &power->vbus_detect, DEBOUNCE_TIME);
}
+static int axp20x_get_usb_type(struct axp20x_usb_power *power,
+ union power_supply_propval *val)
+{
+ unsigned int reg;
+ int ret;
+
+ if (!power->usb_bc_det_fld)
+ return -EINVAL;
+
+ ret = regmap_field_read(power->usb_bc_det_fld, &reg);
+ if (ret)
+ return ret;
+
+ switch (reg) {
+ case 1:
+ val->intval = POWER_SUPPLY_USB_TYPE_SDP;
+ break;
+ case 2:
+ val->intval = POWER_SUPPLY_USB_TYPE_CDP;
+ break;
+ case 3:
+ val->intval = POWER_SUPPLY_USB_TYPE_DCP;
+ break;
+ default:
+ val->intval = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+ break;
+ }
+
+ return 0;
+}
+
static int axp20x_usb_power_get_property(struct power_supply *psy,
enum power_supply_property psp, union power_supply_propval *val)
{
@@ -160,12 +204,16 @@ static int axp20x_usb_power_get_property(struct power_supply *psy,
val->intval = ret * 1700; /* 1 step = 1.7 mV */
return 0;
- case POWER_SUPPLY_PROP_CURRENT_MAX:
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
ret = regmap_field_read(power->curr_lim_fld, &v);
if (ret)
return ret;
- val->intval = power->axp_data->curr_lim_table[v];
+ if (v < power->axp_data->curr_lim_table_size)
+ val->intval = power->axp_data->curr_lim_table[v];
+ else
+ val->intval = power->axp_data->curr_lim_table[
+ power->axp_data->curr_lim_table_size - 1];
return 0;
case POWER_SUPPLY_PROP_CURRENT_NOW:
if (IS_ENABLED(CONFIG_AXP20X_ADC)) {
@@ -189,6 +237,9 @@ static int axp20x_usb_power_get_property(struct power_supply *psy,
val->intval = ret * 375; /* 1 step = 0.375 mA */
return 0;
+
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ return axp20x_get_usb_type(power, val);
default:
break;
}
@@ -256,19 +307,37 @@ static int axp20x_usb_power_set_voltage_min(struct axp20x_usb_power *power,
return -EINVAL;
}
-static int axp20x_usb_power_set_current_max(struct axp20x_usb_power *power, int intval)
+static int axp20x_usb_power_set_input_current_limit(struct axp20x_usb_power *power,
+ int intval)
{
- const unsigned int max = GENMASK(power->axp_data->curr_lim_fld.msb,
- power->axp_data->curr_lim_fld.lsb);
+ int ret;
+ unsigned int reg;
+ const unsigned int max = power->axp_data->curr_lim_table_size;
if (intval == -1)
return -EINVAL;
- for (unsigned int i = 0; i <= max; ++i)
- if (power->axp_data->curr_lim_table[i] == intval)
- return regmap_field_write(power->curr_lim_fld, i);
+ /*
+ * BC1.2 detection can cause a race condition if we try to set a current
+ * limit while it's in progress. When it finishes it will overwrite the
+ * current limit we just set.
+ */
+ if (power->usb_bc_en_bit) {
+ dev_dbg(power->dev,
+ "disabling BC1.2 detection because current limit was set");
+ ret = regmap_field_write(power->usb_bc_en_bit, 0);
+ if (ret)
+ return ret;
+ }
+
+ for (reg = max - 1; reg > 0; reg--)
+ if (power->axp_data->curr_lim_table[reg] <= intval)
+ break;
+
+ dev_dbg(power->dev, "setting input current limit reg to %d (%d uA), requested %d uA",
+ reg, power->axp_data->curr_lim_table[reg], intval);
- return -EINVAL;
+ return regmap_field_write(power->curr_lim_fld, reg);
}
static int axp20x_usb_power_set_property(struct power_supply *psy,
@@ -287,8 +356,8 @@ static int axp20x_usb_power_set_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_VOLTAGE_MIN:
return axp20x_usb_power_set_voltage_min(power, val->intval);
- case POWER_SUPPLY_PROP_CURRENT_MAX:
- return axp20x_usb_power_set_current_max(power, val->intval);
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return axp20x_usb_power_set_input_current_limit(power, val->intval);
default:
return -EINVAL;
@@ -313,7 +382,7 @@ static int axp20x_usb_power_prop_writeable(struct power_supply *psy,
return power->vbus_disable_bit != NULL;
return psp == POWER_SUPPLY_PROP_VOLTAGE_MIN ||
- psp == POWER_SUPPLY_PROP_CURRENT_MAX;
+ psp == POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT;
}
static enum power_supply_property axp20x_usb_power_properties[] = {
@@ -322,7 +391,7 @@ static enum power_supply_property axp20x_usb_power_properties[] = {
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_VOLTAGE_MIN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
- POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
POWER_SUPPLY_PROP_CURRENT_NOW,
};
@@ -331,7 +400,23 @@ static enum power_supply_property axp22x_usb_power_properties[] = {
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_VOLTAGE_MIN,
- POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+};
+
+static enum power_supply_property axp813_usb_power_properties[] = {
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+ POWER_SUPPLY_PROP_USB_TYPE,
+};
+
+static enum power_supply_usb_type axp813_usb_types[] = {
+ POWER_SUPPLY_USB_TYPE_SDP,
+ POWER_SUPPLY_USB_TYPE_DCP,
+ POWER_SUPPLY_USB_TYPE_CDP,
+ POWER_SUPPLY_USB_TYPE_UNKNOWN,
};
static const struct power_supply_desc axp20x_usb_power_desc = {
@@ -354,6 +439,18 @@ static const struct power_supply_desc axp22x_usb_power_desc = {
.set_property = axp20x_usb_power_set_property,
};
+static const struct power_supply_desc axp813_usb_power_desc = {
+ .name = "axp20x-usb",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .properties = axp813_usb_power_properties,
+ .num_properties = ARRAY_SIZE(axp813_usb_power_properties),
+ .property_is_writeable = axp20x_usb_power_prop_writeable,
+ .get_property = axp20x_usb_power_get_property,
+ .set_property = axp20x_usb_power_set_property,
+ .usb_types = axp813_usb_types,
+ .num_usb_types = ARRAY_SIZE(axp813_usb_types),
+};
+
static const char * const axp20x_irq_names[] = {
"VBUS_PLUGIN",
"VBUS_REMOVAL",
@@ -388,10 +485,15 @@ static int axp221_usb_curr_lim_table[] = {
};
static int axp813_usb_curr_lim_table[] = {
+ 100000,
+ 500000,
900000,
1500000,
2000000,
2500000,
+ 3000000,
+ 3500000,
+ 4000000,
};
static const struct axp_data axp192_data = {
@@ -399,6 +501,7 @@ static const struct axp_data axp192_data = {
.irq_names = axp20x_irq_names,
.num_irq_names = ARRAY_SIZE(axp20x_irq_names),
.curr_lim_table = axp192_usb_curr_lim_table,
+ .curr_lim_table_size = ARRAY_SIZE(axp192_usb_curr_lim_table),
.curr_lim_fld = REG_FIELD(AXP20X_VBUS_IPSOUT_MGMT, 0, 1),
.vbus_valid_bit = REG_FIELD(AXP192_USB_OTG_STATUS, 2, 2),
.vbus_mon_bit = REG_FIELD(AXP20X_VBUS_MON, 3, 3),
@@ -409,6 +512,7 @@ static const struct axp_data axp202_data = {
.irq_names = axp20x_irq_names,
.num_irq_names = ARRAY_SIZE(axp20x_irq_names),
.curr_lim_table = axp20x_usb_curr_lim_table,
+ .curr_lim_table_size = ARRAY_SIZE(axp20x_usb_curr_lim_table),
.curr_lim_fld = REG_FIELD(AXP20X_VBUS_IPSOUT_MGMT, 0, 1),
.vbus_valid_bit = REG_FIELD(AXP20X_USB_OTG_STATUS, 2, 2),
.vbus_mon_bit = REG_FIELD(AXP20X_VBUS_MON, 3, 3),
@@ -419,6 +523,7 @@ static const struct axp_data axp221_data = {
.irq_names = axp22x_irq_names,
.num_irq_names = ARRAY_SIZE(axp22x_irq_names),
.curr_lim_table = axp221_usb_curr_lim_table,
+ .curr_lim_table_size = ARRAY_SIZE(axp221_usb_curr_lim_table),
.curr_lim_fld = REG_FIELD(AXP20X_VBUS_IPSOUT_MGMT, 0, 1),
.vbus_needs_polling = true,
};
@@ -428,17 +533,20 @@ static const struct axp_data axp223_data = {
.irq_names = axp22x_irq_names,
.num_irq_names = ARRAY_SIZE(axp22x_irq_names),
.curr_lim_table = axp20x_usb_curr_lim_table,
+ .curr_lim_table_size = ARRAY_SIZE(axp20x_usb_curr_lim_table),
.curr_lim_fld = REG_FIELD(AXP20X_VBUS_IPSOUT_MGMT, 0, 1),
.vbus_needs_polling = true,
};
static const struct axp_data axp813_data = {
- .power_desc = &axp22x_usb_power_desc,
+ .power_desc = &axp813_usb_power_desc,
.irq_names = axp22x_irq_names,
.num_irq_names = ARRAY_SIZE(axp22x_irq_names),
.curr_lim_table = axp813_usb_curr_lim_table,
- .curr_lim_fld = REG_FIELD(AXP20X_VBUS_IPSOUT_MGMT, 0, 1),
+ .curr_lim_table_size = ARRAY_SIZE(axp813_usb_curr_lim_table),
+ .curr_lim_fld = REG_FIELD(AXP22X_CHRG_CTRL3, 4, 7),
.usb_bc_en_bit = REG_FIELD(AXP288_BC_GLOBAL, 0, 0),
+ .usb_bc_det_fld = REG_FIELD(AXP288_BC_DET_STAT, 5, 7),
.vbus_disable_bit = REG_FIELD(AXP20X_VBUS_IPSOUT_MGMT, 7, 7),
.vbus_needs_polling = true,
};
@@ -558,6 +666,7 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, power);
+ power->dev = &pdev->dev;
power->axp_data = axp_data;
power->regmap = axp20x->regmap;
power->num_irqs = axp_data->num_irq_names;
@@ -586,6 +695,12 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
return ret;
ret = axp20x_regmap_field_alloc_optional(&pdev->dev, power->regmap,
+ axp_data->usb_bc_det_fld,
+ &power->usb_bc_det_fld);
+ if (ret)
+ return ret;
+
+ ret = axp20x_regmap_field_alloc_optional(&pdev->dev, power->regmap,
axp_data->vbus_disable_bit,
&power->vbus_disable_bit);
if (ret)
diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index 3be6f3b10ea42..95d9a35243c2d 100644
--- a/drivers/power/supply/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -550,18 +550,20 @@ static const struct dmi_system_id axp288_quirks[] = {
.driver_data = (void *)AXP288_QUIRK_NO_BATTERY,
},
{
- /* Intel Cherry Trail Compute Stick, Windows version */
+ /* Intel Bay Trail Compute Stick */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
- DMI_MATCH(DMI_PRODUCT_NAME, "STK1AW32SC"),
+ /* Partial match for STCK1A32WFC STCK1A32FC, STCK1A8LFC variants */
+ DMI_MATCH(DMI_PRODUCT_NAME, "STCK1A"),
},
.driver_data = (void *)AXP288_QUIRK_NO_BATTERY,
},
{
- /* Intel Cherry Trail Compute Stick, version without an OS */
+ /* Intel Cherry Trail Compute Stick */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
- DMI_MATCH(DMI_PRODUCT_NAME, "STK1A32SC"),
+ /* Partial match for STK1AW32SC and STK1A32SC variants */
+ DMI_MATCH(DMI_PRODUCT_NAME, "STK1A"),
},
.driver_data = (void *)AXP288_QUIRK_NO_BATTERY,
},
@@ -600,6 +602,14 @@ static const struct dmi_system_id axp288_quirks[] = {
.driver_data = NULL,
},
{
+ /* Radxa ROCK Pi X Single Board Computer */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "ROCK Pi X"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "Radxa"),
+ },
+ .driver_data = (void *)AXP288_QUIRK_NO_BATTERY,
+ },
+ {
/*
* Various Ace PC/Meegopad/MinisForum/Wintel Mini-PCs/HDMI-sticks
* This entry must be last because it is generic, this allows
diff --git a/drivers/power/supply/bq2415x_charger.c b/drivers/power/supply/bq2415x_charger.c
index 6a4798a62588b..25e28dac900de 100644
--- a/drivers/power/supply/bq2415x_charger.c
+++ b/drivers/power/supply/bq2415x_charger.c
@@ -991,6 +991,7 @@ static enum power_supply_property bq2415x_power_supply_props[] = {
/* TODO: maybe add more power supply properties */
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_ONLINE,
};
static int bq2415x_power_supply_get_property(struct power_supply *psy,
@@ -1017,6 +1018,15 @@ static int bq2415x_power_supply_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_MODEL_NAME:
val->strval = bq->model;
break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ /* VBUS is present for all charging and fault states,
+ * except the 'Ready' state.
+ */
+ ret = bq2415x_exec_command(bq, BQ2415X_CHARGE_STATUS);
+ if (ret < 0)
+ return ret;
+ val->intval = ret > 0;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 1c4a9d1377442..abca568344686 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -1595,17 +1595,24 @@ static inline int bq27xxx_battery_read_fcc(struct bq27xxx_device_info *di)
* Return the Design Capacity in µAh
* Or < 0 if something fails.
*/
-static int bq27xxx_battery_read_dcap(struct bq27xxx_device_info *di)
+static int bq27xxx_battery_read_dcap(struct bq27xxx_device_info *di,
+ union power_supply_propval *val)
{
int dcap;
+ /* We only have to read charge design full once */
+ if (di->charge_design_full > 0) {
+ val->intval = di->charge_design_full;
+ return 0;
+ }
+
if (di->opts & BQ27XXX_O_ZERO)
dcap = bq27xxx_read(di, BQ27XXX_REG_DCAP, true);
else
dcap = bq27xxx_read(di, BQ27XXX_REG_DCAP, false);
if (dcap < 0) {
- dev_dbg(di->dev, "error reading initial last measured discharge\n");
+ dev_dbg(di->dev, "error reading design capacity\n");
return dcap;
}
@@ -1614,7 +1621,12 @@ static int bq27xxx_battery_read_dcap(struct bq27xxx_device_info *di)
else
dcap *= 1000;
- return dcap;
+ /* Save for later reads */
+ di->charge_design_full = dcap;
+
+ val->intval = dcap;
+
+ return 0;
}
/*
@@ -1816,17 +1828,14 @@ static int bq27xxx_battery_current_and_status(
val_curr->intval = curr;
if (val_status) {
- if (curr > 0) {
+ if (bq27xxx_battery_is_full(di, flags))
+ val_status->intval = POWER_SUPPLY_STATUS_FULL;
+ else if (curr > 0)
val_status->intval = POWER_SUPPLY_STATUS_CHARGING;
- } else if (curr < 0) {
+ else if (curr < 0)
val_status->intval = POWER_SUPPLY_STATUS_DISCHARGING;
- } else {
- if (bq27xxx_battery_is_full(di, flags))
- val_status->intval = POWER_SUPPLY_STATUS_FULL;
- else
- val_status->intval =
- POWER_SUPPLY_STATUS_NOT_CHARGING;
- }
+ else
+ val_status->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
}
return 0;
@@ -1865,10 +1874,6 @@ static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di)
*/
if (!(di->opts & BQ27XXX_O_ZERO))
bq27xxx_battery_current_and_status(di, NULL, &status, &cache);
-
- /* We only have to read charge design full once */
- if (di->charge_design_full <= 0)
- di->charge_design_full = bq27xxx_battery_read_dcap(di);
}
if ((di->cache.capacity != cache.capacity) ||
@@ -2062,7 +2067,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
ret = bq27xxx_simple_value(di->cache.charge_full, val);
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
- ret = bq27xxx_simple_value(di->charge_design_full, val);
+ ret = bq27xxx_battery_read_dcap(di, val);
break;
/*
* TODO: Implement these to make registers set from
@@ -2101,6 +2106,13 @@ static void bq27xxx_external_power_changed(struct power_supply *psy)
mod_delayed_work(system_wq, &di->work, HZ / 2);
}
+static void bq27xxx_battery_mutex_destroy(void *data)
+{
+ struct mutex *lock = data;
+
+ mutex_destroy(lock);
+}
+
int bq27xxx_battery_setup(struct bq27xxx_device_info *di)
{
struct power_supply_desc *psy_desc;
@@ -2108,9 +2120,14 @@ int bq27xxx_battery_setup(struct bq27xxx_device_info *di)
.of_node = di->dev->of_node,
.drv_data = di,
};
+ int ret;
INIT_DELAYED_WORK(&di->work, bq27xxx_battery_poll);
mutex_init(&di->lock);
+ ret = devm_add_action_or_reset(di->dev, bq27xxx_battery_mutex_destroy,
+ &di->lock);
+ if (ret)
+ return ret;
di->regs = bq27xxx_chip_data[di->chip].regs;
di->unseal_key = bq27xxx_chip_data[di->chip].unseal_key;
@@ -2128,7 +2145,7 @@ int bq27xxx_battery_setup(struct bq27xxx_device_info *di)
psy_desc->get_property = bq27xxx_battery_get_property;
psy_desc->external_power_changed = bq27xxx_external_power_changed;
- di->bat = power_supply_register_no_ws(di->dev, psy_desc, &psy_cfg);
+ di->bat = devm_power_supply_register_no_ws(di->dev, psy_desc, &psy_cfg);
if (IS_ERR(di->bat))
return dev_err_probe(di->dev, PTR_ERR(di->bat),
"failed to register battery\n");
@@ -2156,9 +2173,6 @@ void bq27xxx_battery_teardown(struct bq27xxx_device_info *di)
mutex_unlock(&di->lock);
cancel_delayed_work_sync(&di->work);
-
- power_supply_unregister(di->bat);
- mutex_destroy(&di->lock);
}
EXPORT_SYMBOL_GPL(bq27xxx_battery_teardown);
diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index 9910c600743eb..c1737f964840a 100644
--- a/drivers/power/supply/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
@@ -13,8 +13,7 @@
#include <linux/power/bq27xxx_battery.h>
-static DEFINE_IDR(battery_id);
-static DEFINE_MUTEX(battery_mutex);
+static DEFINE_IDA(battery_id);
static irqreturn_t bq27xxx_battery_irq_handler_thread(int irq, void *data)
{
@@ -136,30 +135,39 @@ static int bq27xxx_battery_i2c_bulk_write(struct bq27xxx_device_info *di,
return 0;
}
+static void bq27xxx_battery_i2c_devm_ida_free(void *data)
+{
+ int num = (long)data;
+
+ ida_free(&battery_id, num);
+}
+
static int bq27xxx_battery_i2c_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct bq27xxx_device_info *di;
int ret;
char *name;
- int num;
+ long num;
/* Get new ID for the new battery device */
- mutex_lock(&battery_mutex);
- num = idr_alloc(&battery_id, client, 0, 0, GFP_KERNEL);
- mutex_unlock(&battery_mutex);
+ num = ida_alloc(&battery_id, GFP_KERNEL);
if (num < 0)
return num;
+ ret = devm_add_action_or_reset(&client->dev,
+ bq27xxx_battery_i2c_devm_ida_free,
+ (void *)num);
+ if (ret)
+ return ret;
- name = devm_kasprintf(&client->dev, GFP_KERNEL, "%s-%d", id->name, num);
+ name = devm_kasprintf(&client->dev, GFP_KERNEL, "%s-%ld", id->name, num);
if (!name)
- goto err_mem;
+ return -ENOMEM;
di = devm_kzalloc(&client->dev, sizeof(*di), GFP_KERNEL);
if (!di)
- goto err_mem;
+ return -ENOMEM;
- di->id = num;
di->dev = &client->dev;
di->chip = id->driver_data;
di->name = name;
@@ -171,7 +179,7 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client)
ret = bq27xxx_battery_setup(di);
if (ret)
- goto err_failed;
+ return ret;
/* Schedule a polling after about 1 min */
schedule_delayed_work(&di->work, 60 * HZ);
@@ -188,21 +196,11 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client)
"Unable to register IRQ %d error %d\n",
client->irq, ret);
bq27xxx_battery_teardown(di);
- goto err_failed;
+ return ret;
}
}
return 0;
-
-err_mem:
- ret = -ENOMEM;
-
-err_failed:
- mutex_lock(&battery_mutex);
- idr_remove(&battery_id, num);
- mutex_unlock(&battery_mutex);
-
- return ret;
}
static void bq27xxx_battery_i2c_remove(struct i2c_client *client)
@@ -213,10 +211,6 @@ static void bq27xxx_battery_i2c_remove(struct i2c_client *client)
free_irq(client->irq, di);
bq27xxx_battery_teardown(di);
-
- mutex_lock(&battery_mutex);
- idr_remove(&battery_id, di->id);
- mutex_unlock(&battery_mutex);
}
static const struct i2c_device_id bq27xxx_i2c_id_table[] = {
diff --git a/drivers/power/supply/da9030_battery.c b/drivers/power/supply/da9030_battery.c
index 581cf956d2d25..04e0f4162d42b 100644
--- a/drivers/power/supply/da9030_battery.c
+++ b/drivers/power/supply/da9030_battery.c
@@ -530,8 +530,9 @@ static int da9030_battery_probe(struct platform_device *pdev)
da9030_battery_setup_psy(charger);
psy_cfg.drv_data = charger;
- charger->psy = power_supply_register(&pdev->dev, &charger->psy_desc,
- &psy_cfg);
+ charger->psy = devm_power_supply_register(&pdev->dev,
+ &charger->psy_desc,
+ &psy_cfg);
if (IS_ERR(charger->psy)) {
ret = PTR_ERR(charger->psy);
goto err_ps_register;
@@ -563,7 +564,6 @@ static void da9030_battery_remove(struct platform_device *dev)
DA9030_EVENT_CHIOVER | DA9030_EVENT_TBAT);
cancel_delayed_work_sync(&charger->work);
da9030_set_charge(charger, 0);
- power_supply_unregister(charger->psy);
}
static struct platform_driver da903x_battery_driver = {
diff --git a/drivers/power/supply/da9052-battery.c b/drivers/power/supply/da9052-battery.c
index 6f7c58a41e918..0d84c42c624e0 100644
--- a/drivers/power/supply/da9052-battery.c
+++ b/drivers/power/supply/da9052-battery.c
@@ -622,7 +622,7 @@ static s32 da9052_bat_probe(struct platform_device *pdev)
}
}
- bat->psy = power_supply_register(&pdev->dev, &psy_desc, &psy_cfg);
+ bat->psy = devm_power_supply_register(&pdev->dev, &psy_desc, &psy_cfg);
if (IS_ERR(bat->psy)) {
ret = PTR_ERR(bat->psy);
goto err;
@@ -644,8 +644,6 @@ static void da9052_bat_remove(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(da9052_bat_irqs); i++)
da9052_free_irq(bat->da9052, da9052_bat_irq_bits[i], bat);
-
- power_supply_unregister(bat->psy);
}
static struct platform_driver da9052_bat_driver = {
diff --git a/drivers/power/supply/da9150-charger.c b/drivers/power/supply/da9150-charger.c
index 37db9e4ed7f30..b13cecd84f589 100644
--- a/drivers/power/supply/da9150-charger.c
+++ b/drivers/power/supply/da9150-charger.c
@@ -521,42 +521,30 @@ static int da9150_charger_probe(struct platform_device *pdev)
charger->dev = dev;
/* Acquire ADC channels */
- charger->ibus_chan = iio_channel_get(dev, "CHAN_IBUS");
- if (IS_ERR(charger->ibus_chan)) {
- ret = PTR_ERR(charger->ibus_chan);
- goto ibus_chan_fail;
- }
+ charger->ibus_chan = devm_iio_channel_get(dev, "CHAN_IBUS");
+ if (IS_ERR(charger->ibus_chan))
+ return PTR_ERR(charger->ibus_chan);
- charger->vbus_chan = iio_channel_get(dev, "CHAN_VBUS");
- if (IS_ERR(charger->vbus_chan)) {
- ret = PTR_ERR(charger->vbus_chan);
- goto vbus_chan_fail;
- }
+ charger->vbus_chan = devm_iio_channel_get(dev, "CHAN_VBUS");
+ if (IS_ERR(charger->vbus_chan))
+ return PTR_ERR(charger->vbus_chan);
- charger->tjunc_chan = iio_channel_get(dev, "CHAN_TJUNC");
- if (IS_ERR(charger->tjunc_chan)) {
- ret = PTR_ERR(charger->tjunc_chan);
- goto tjunc_chan_fail;
- }
+ charger->tjunc_chan = devm_iio_channel_get(dev, "CHAN_TJUNC");
+ if (IS_ERR(charger->tjunc_chan))
+ return PTR_ERR(charger->tjunc_chan);
- charger->vbat_chan = iio_channel_get(dev, "CHAN_VBAT");
- if (IS_ERR(charger->vbat_chan)) {
- ret = PTR_ERR(charger->vbat_chan);
- goto vbat_chan_fail;
- }
+ charger->vbat_chan = devm_iio_channel_get(dev, "CHAN_VBAT");
+ if (IS_ERR(charger->vbat_chan))
+ return PTR_ERR(charger->vbat_chan);
/* Register power supplies */
- charger->usb = power_supply_register(dev, &usb_desc, NULL);
- if (IS_ERR(charger->usb)) {
- ret = PTR_ERR(charger->usb);
- goto usb_fail;
- }
+ charger->usb = devm_power_supply_register(dev, &usb_desc, NULL);
+ if (IS_ERR(charger->usb))
+ return PTR_ERR(charger->usb);
- charger->battery = power_supply_register(dev, &battery_desc, NULL);
- if (IS_ERR(charger->battery)) {
- ret = PTR_ERR(charger->battery);
- goto battery_fail;
- }
+ charger->battery = devm_power_supply_register(dev, &battery_desc, NULL);
+ if (IS_ERR(charger->battery))
+ return PTR_ERR(charger->battery);
/* Get initial online supply */
reg = da9150_reg_read(da9150, DA9150_STATUS_H);
@@ -616,22 +604,7 @@ tjunc_irq_fail:
chg_irq_fail:
if (!IS_ERR_OR_NULL(charger->usb_phy))
usb_unregister_notifier(charger->usb_phy, &charger->otg_nb);
-battery_fail:
- power_supply_unregister(charger->usb);
-usb_fail:
- iio_channel_release(charger->vbat_chan);
-
-vbat_chan_fail:
- iio_channel_release(charger->tjunc_chan);
-
-tjunc_chan_fail:
- iio_channel_release(charger->vbus_chan);
-
-vbus_chan_fail:
- iio_channel_release(charger->ibus_chan);
-
-ibus_chan_fail:
return ret;
}
@@ -656,15 +629,6 @@ static void da9150_charger_remove(struct platform_device *pdev)
if (!IS_ERR_OR_NULL(charger->usb_phy))
usb_unregister_notifier(charger->usb_phy, &charger->otg_nb);
cancel_work_sync(&charger->otg_work);
-
- power_supply_unregister(charger->battery);
- power_supply_unregister(charger->usb);
-
- /* Release ADC channels */
- iio_channel_release(charger->ibus_chan);
- iio_channel_release(charger->vbus_chan);
- iio_channel_release(charger->tjunc_chan);
- iio_channel_release(charger->vbat_chan);
}
static struct platform_driver da9150_charger_driver = {
diff --git a/drivers/power/supply/ds2760_battery.c b/drivers/power/supply/ds2760_battery.c
index 40fba31be1744..7cf4ea06b5003 100644
--- a/drivers/power/supply/ds2760_battery.c
+++ b/drivers/power/supply/ds2760_battery.c
@@ -739,7 +739,7 @@ static int w1_ds2760_add_slave(struct w1_slave *sl)
if (current_accum)
ds2760_battery_set_current_accum(di, current_accum);
- di->bat = power_supply_register(dev, &di->bat_desc, &psy_cfg);
+ di->bat = devm_power_supply_register(dev, &di->bat_desc, &psy_cfg);
if (IS_ERR(di->bat)) {
dev_err(di->dev, "failed to register battery\n");
retval = PTR_ERR(di->bat);
@@ -762,7 +762,6 @@ static int w1_ds2760_add_slave(struct w1_slave *sl)
goto success;
workqueue_failed:
- power_supply_unregister(di->bat);
batt_failed:
di_alloc_failed:
success:
@@ -777,7 +776,6 @@ static void w1_ds2760_remove_slave(struct w1_slave *sl)
cancel_delayed_work_sync(&di->monitor_work);
cancel_delayed_work_sync(&di->set_charged_work);
destroy_workqueue(di->monitor_wqueue);
- power_supply_unregister(di->bat);
}
#ifdef CONFIG_OF
diff --git a/drivers/power/supply/goldfish_battery.c b/drivers/power/supply/goldfish_battery.c
index 8bb645ad1e5d5..479195e35d734 100644
--- a/drivers/power/supply/goldfish_battery.c
+++ b/drivers/power/supply/goldfish_battery.c
@@ -232,31 +232,22 @@ static int goldfish_battery_probe(struct platform_device *pdev)
psy_cfg.drv_data = data;
- data->ac = power_supply_register(&pdev->dev, &ac_desc, &psy_cfg);
+ data->ac = devm_power_supply_register(&pdev->dev,
+ &ac_desc,
+ &psy_cfg);
if (IS_ERR(data->ac))
return PTR_ERR(data->ac);
- data->battery = power_supply_register(&pdev->dev, &battery_desc,
- &psy_cfg);
- if (IS_ERR(data->battery)) {
- power_supply_unregister(data->ac);
+ data->battery = devm_power_supply_register(&pdev->dev,
+ &battery_desc,
+ &psy_cfg);
+ if (IS_ERR(data->battery))
return PTR_ERR(data->battery);
- }
-
- platform_set_drvdata(pdev, data);
GOLDFISH_BATTERY_WRITE(data, BATTERY_INT_ENABLE, BATTERY_INT_MASK);
return 0;
}
-static void goldfish_battery_remove(struct platform_device *pdev)
-{
- struct goldfish_battery_data *data = platform_get_drvdata(pdev);
-
- power_supply_unregister(data->battery);
- power_supply_unregister(data->ac);
-}
-
static const struct of_device_id goldfish_battery_of_match[] = {
{ .compatible = "google,goldfish-battery", },
{},
@@ -273,7 +264,6 @@ MODULE_DEVICE_TABLE(acpi, goldfish_battery_acpi_match);
static struct platform_driver goldfish_battery_device = {
.probe = goldfish_battery_probe,
- .remove_new = goldfish_battery_remove,
.driver = {
.name = "goldfish-battery",
.of_match_table = goldfish_battery_of_match,
diff --git a/drivers/power/supply/lp8727_charger.c b/drivers/power/supply/lp8727_charger.c
index 0875391f7ac6b..34548a4da90b2 100644
--- a/drivers/power/supply/lp8727_charger.c
+++ b/drivers/power/supply/lp8727_charger.c
@@ -453,39 +453,20 @@ static int lp8727_register_psy(struct lp8727_chg *pchg)
psy_cfg.supplied_to = battery_supplied_to;
psy_cfg.num_supplicants = ARRAY_SIZE(battery_supplied_to);
- psy->ac = power_supply_register(pchg->dev, &lp8727_ac_desc, &psy_cfg);
+ psy->ac = devm_power_supply_register(pchg->dev, &lp8727_ac_desc, &psy_cfg);
if (IS_ERR(psy->ac))
- goto err_psy_ac;
+ return -EPERM;
- psy->usb = power_supply_register(pchg->dev, &lp8727_usb_desc,
- &psy_cfg);
+ psy->usb = devm_power_supply_register(pchg->dev, &lp8727_usb_desc,
+ &psy_cfg);
if (IS_ERR(psy->usb))
- goto err_psy_usb;
+ return -EPERM;
- psy->batt = power_supply_register(pchg->dev, &lp8727_batt_desc, NULL);
+ psy->batt = devm_power_supply_register(pchg->dev, &lp8727_batt_desc, NULL);
if (IS_ERR(psy->batt))
- goto err_psy_batt;
+ return -EPERM;
return 0;
-
-err_psy_batt:
- power_supply_unregister(psy->usb);
-err_psy_usb:
- power_supply_unregister(psy->ac);
-err_psy_ac:
- return -EPERM;
-}
-
-static void lp8727_unregister_psy(struct lp8727_chg *pchg)
-{
- struct lp8727_psy *psy = pchg->psy;
-
- if (!psy)
- return;
-
- power_supply_unregister(psy->ac);
- power_supply_unregister(psy->usb);
- power_supply_unregister(psy->batt);
}
#ifdef CONFIG_OF
@@ -583,7 +564,6 @@ static int lp8727_probe(struct i2c_client *cl)
ret = lp8727_setup_irq(pchg);
if (ret) {
dev_err(pchg->dev, "irq handler err: %d", ret);
- lp8727_unregister_psy(pchg);
return ret;
}
@@ -595,7 +575,6 @@ static void lp8727_remove(struct i2c_client *cl)
struct lp8727_chg *pchg = i2c_get_clientdata(cl);
lp8727_release_irq(pchg);
- lp8727_unregister_psy(pchg);
}
static const struct of_device_id lp8727_dt_ids[] __maybe_unused = {
diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c
index 2c81be82a41a4..72b170b4ac46a 100644
--- a/drivers/power/supply/lp8788-charger.c
+++ b/drivers/power/supply/lp8788-charger.c
@@ -406,12 +406,6 @@ static const struct power_supply_desc lp8788_psy_battery_desc = {
.get_property = lp8788_battery_get_property,
};
-static void lp8788_psy_unregister(struct lp8788_charger *pchg)
-{
- power_supply_unregister(pchg->battery);
- power_supply_unregister(pchg->charger);
-}
-
static void lp8788_charger_event(struct work_struct *work)
{
struct lp8788_charger *pchg =
@@ -666,18 +660,16 @@ static int lp8788_psy_register(struct platform_device *pdev,
charger_cfg.supplied_to = battery_supplied_to;
charger_cfg.num_supplicants = ARRAY_SIZE(battery_supplied_to);
- pchg->charger = power_supply_register(&pdev->dev,
- &lp8788_psy_charger_desc,
- &charger_cfg);
+ pchg->charger = devm_power_supply_register(&pdev->dev,
+ &lp8788_psy_charger_desc,
+ &charger_cfg);
if (IS_ERR(pchg->charger))
return -EPERM;
- pchg->battery = power_supply_register(&pdev->dev,
- &lp8788_psy_battery_desc, NULL);
- if (IS_ERR(pchg->battery)) {
- power_supply_unregister(pchg->charger);
+ pchg->battery = devm_power_supply_register(&pdev->dev,
+ &lp8788_psy_battery_desc, NULL);
+ if (IS_ERR(pchg->battery))
return -EPERM;
- }
return 0;
}
@@ -720,7 +712,6 @@ static void lp8788_charger_remove(struct platform_device *pdev)
flush_work(&pchg->charger_work);
lp8788_irq_unregister(pdev, pchg);
- lp8788_psy_unregister(pchg);
}
static struct platform_driver lp8788_charger_driver = {
diff --git a/drivers/power/supply/max14577_charger.c b/drivers/power/supply/max14577_charger.c
index 7c23fa89ea199..b28c04157709a 100644
--- a/drivers/power/supply/max14577_charger.c
+++ b/drivers/power/supply/max14577_charger.c
@@ -586,8 +586,9 @@ static int max14577_charger_probe(struct platform_device *pdev)
}
psy_cfg.drv_data = chg;
- chg->charger = power_supply_register(&pdev->dev, &max14577_charger_desc,
- &psy_cfg);
+ chg->charger = devm_power_supply_register(&pdev->dev,
+ &max14577_charger_desc,
+ &psy_cfg);
if (IS_ERR(chg->charger)) {
dev_err(&pdev->dev, "failed: power supply register\n");
ret = PTR_ERR(chg->charger);
@@ -608,10 +609,7 @@ err:
static void max14577_charger_remove(struct platform_device *pdev)
{
- struct max14577_charger *chg = platform_get_drvdata(pdev);
-
device_remove_file(&pdev->dev, &dev_attr_fast_charge_timer);
- power_supply_unregister(chg->charger);
}
static const struct platform_device_id max14577_charger_id[] = {
diff --git a/drivers/power/supply/max77693_charger.c b/drivers/power/supply/max77693_charger.c
index d0157e63b8b50..2001e12c9f7de 100644
--- a/drivers/power/supply/max77693_charger.c
+++ b/drivers/power/supply/max77693_charger.c
@@ -709,9 +709,9 @@ static int max77693_charger_probe(struct platform_device *pdev)
goto err;
}
- chg->charger = power_supply_register(&pdev->dev,
- &max77693_charger_desc,
- &psy_cfg);
+ chg->charger = devm_power_supply_register(&pdev->dev,
+ &max77693_charger_desc,
+ &psy_cfg);
if (IS_ERR(chg->charger)) {
dev_err(&pdev->dev, "failed: power supply register\n");
ret = PTR_ERR(chg->charger);
@@ -730,13 +730,9 @@ err:
static void max77693_charger_remove(struct platform_device *pdev)
{
- struct max77693_charger *chg = platform_get_drvdata(pdev);
-
device_remove_file(&pdev->dev, &dev_attr_top_off_timer);
device_remove_file(&pdev->dev, &dev_attr_top_off_threshold_current);
device_remove_file(&pdev->dev, &dev_attr_fast_charge_timer);
-
- power_supply_unregister(chg->charger);
}
static const struct platform_device_id max77693_charger_id[] = {
diff --git a/drivers/power/supply/max8925_power.c b/drivers/power/supply/max8925_power.c
index 4a2d6894f94ee..621a006d52a96 100644
--- a/drivers/power/supply/max8925_power.c
+++ b/drivers/power/supply/max8925_power.c
@@ -507,7 +507,6 @@ static int max8925_power_probe(struct platform_device *pdev)
struct power_supply_config psy_cfg = {}; /* Only for ac and usb */
struct max8925_power_pdata *pdata = NULL;
struct max8925_power_info *info;
- int ret;
pdata = max8925_power_dt_init(pdev);
if (!pdata) {
@@ -528,25 +527,19 @@ static int max8925_power_probe(struct platform_device *pdev)
psy_cfg.supplied_to = pdata->supplied_to;
psy_cfg.num_supplicants = pdata->num_supplicants;
- info->ac = power_supply_register(&pdev->dev, &ac_desc, &psy_cfg);
- if (IS_ERR(info->ac)) {
- ret = PTR_ERR(info->ac);
- goto out;
- }
+ info->ac = devm_power_supply_register(&pdev->dev, &ac_desc, &psy_cfg);
+ if (IS_ERR(info->ac))
+ return PTR_ERR(info->ac);
info->ac->dev.parent = &pdev->dev;
- info->usb = power_supply_register(&pdev->dev, &usb_desc, &psy_cfg);
- if (IS_ERR(info->usb)) {
- ret = PTR_ERR(info->usb);
- goto out_unregister_ac;
- }
+ info->usb = devm_power_supply_register(&pdev->dev, &usb_desc, &psy_cfg);
+ if (IS_ERR(info->usb))
+ return PTR_ERR(info->usb);
info->usb->dev.parent = &pdev->dev;
- info->battery = power_supply_register(&pdev->dev, &battery_desc, NULL);
- if (IS_ERR(info->battery)) {
- ret = PTR_ERR(info->battery);
- goto out_unregister_usb;
- }
+ info->battery = devm_power_supply_register(&pdev->dev, &battery_desc, NULL);
+ if (IS_ERR(info->battery))
+ return PTR_ERR(info->battery);
info->battery->dev.parent = &pdev->dev;
info->batt_detect = pdata->batt_detect;
@@ -558,24 +551,14 @@ static int max8925_power_probe(struct platform_device *pdev)
max8925_init_charger(chip, info);
return 0;
-out_unregister_usb:
- power_supply_unregister(info->usb);
-out_unregister_ac:
- power_supply_unregister(info->ac);
-out:
- return ret;
}
static void max8925_power_remove(struct platform_device *pdev)
{
struct max8925_power_info *info = platform_get_drvdata(pdev);
- if (info) {
- power_supply_unregister(info->ac);
- power_supply_unregister(info->usb);
- power_supply_unregister(info->battery);
+ if (info)
max8925_deinit_charger(info);
- }
}
static struct platform_driver max8925_power_driver = {
diff --git a/drivers/power/supply/mm8013.c b/drivers/power/supply/mm8013.c
index caa272b035649..20c1651ca38e0 100644
--- a/drivers/power/supply/mm8013.c
+++ b/drivers/power/supply/mm8013.c
@@ -71,7 +71,6 @@ static int mm8013_checkdevice(struct mm8013_chip *chip)
static enum power_supply_property mm8013_battery_props[] = {
POWER_SUPPLY_PROP_CAPACITY,
- POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR,
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_CHARGE_NOW,
@@ -103,16 +102,6 @@ static int mm8013_get_property(struct power_supply *psy,
val->intval = regval;
break;
- case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR:
- ret = regmap_read(chip->regmap, REG_FLAGS, &regval);
- if (ret < 0)
- return ret;
-
- if (regval & MM8013_FLAG_CHG_INH)
- val->intval = POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE;
- else
- val->intval = POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO;
- break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
ret = regmap_read(chip->regmap, REG_FULL_CHARGE_CAPACITY, &regval);
if (ret < 0)
@@ -187,6 +176,8 @@ static int mm8013_get_property(struct power_supply *psy,
if (regval & MM8013_FLAG_DSG)
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ else if (regval & MM8013_FLAG_CHG_INH)
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
else if (regval & MM8013_FLAG_CHG)
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else if (regval & MM8013_FLAG_FC)
diff --git a/drivers/power/supply/pcf50633-charger.c b/drivers/power/supply/pcf50633-charger.c
index 950e30917c638..0e980522fee51 100644
--- a/drivers/power/supply/pcf50633-charger.c
+++ b/drivers/power/supply/pcf50633-charger.c
@@ -404,9 +404,9 @@ static int pcf50633_mbc_probe(struct platform_device *pdev)
psy_cfg.drv_data = mbc;
/* Create power supplies */
- mbc->adapter = power_supply_register(&pdev->dev,
- &pcf50633_mbc_adapter_desc,
- &psy_cfg);
+ mbc->adapter = devm_power_supply_register(&pdev->dev,
+ &pcf50633_mbc_adapter_desc,
+ &psy_cfg);
if (IS_ERR(mbc->adapter)) {
dev_err(mbc->pcf->dev, "failed to register adapter\n");
return PTR_ERR(mbc->adapter);
@@ -415,20 +415,19 @@ static int pcf50633_mbc_probe(struct platform_device *pdev)
usb_psy_cfg = psy_cfg;
usb_psy_cfg.attr_grp = pcf50633_mbc_sysfs_groups;
- mbc->usb = power_supply_register(&pdev->dev, &pcf50633_mbc_usb_desc,
- &usb_psy_cfg);
+ mbc->usb = devm_power_supply_register(&pdev->dev,
+ &pcf50633_mbc_usb_desc,
+ &usb_psy_cfg);
if (IS_ERR(mbc->usb)) {
dev_err(mbc->pcf->dev, "failed to register usb\n");
- power_supply_unregister(mbc->adapter);
return PTR_ERR(mbc->usb);
}
- mbc->ac = power_supply_register(&pdev->dev, &pcf50633_mbc_ac_desc,
- &psy_cfg);
+ mbc->ac = devm_power_supply_register(&pdev->dev,
+ &pcf50633_mbc_ac_desc,
+ &psy_cfg);
if (IS_ERR(mbc->ac)) {
dev_err(mbc->pcf->dev, "failed to register ac\n");
- power_supply_unregister(mbc->adapter);
- power_supply_unregister(mbc->usb);
return PTR_ERR(mbc->ac);
}
@@ -449,10 +448,6 @@ static void pcf50633_mbc_remove(struct platform_device *pdev)
/* Remove IRQ handlers */
for (i = 0; i < ARRAY_SIZE(mbc_irq_handlers); i++)
pcf50633_free_irq(mbc->pcf, mbc_irq_handlers[i]);
-
- power_supply_unregister(mbc->usb);
- power_supply_unregister(mbc->adapter);
- power_supply_unregister(mbc->ac);
}
static struct platform_driver pcf50633_mbc_driver = {
diff --git a/drivers/power/supply/power_supply.h b/drivers/power/supply/power_supply.h
index 645eee4d6b6ae..3cbafc58bdad0 100644
--- a/drivers/power/supply/power_supply.h
+++ b/drivers/power/supply/power_supply.h
@@ -15,12 +15,14 @@ struct power_supply;
#ifdef CONFIG_SYSFS
-extern void power_supply_init_attrs(struct device_type *dev_type);
+extern void power_supply_init_attrs(void);
extern int power_supply_uevent(const struct device *dev, struct kobj_uevent_env *env);
+extern const struct attribute_group *power_supply_attr_groups[];
#else
-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
+static inline void power_supply_init_attrs(void) {}
+#define power_supply_attr_groups NULL
#define power_supply_uevent NULL
#endif /* CONFIG_SYSFS */
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index ecef35ac3b7e4..fefe938c93424 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -25,13 +25,17 @@
#include "power_supply.h"
#include "samsung-sdi-battery.h"
-/* exported for the APM Power driver, APM emulation */
-struct class *power_supply_class;
-EXPORT_SYMBOL_GPL(power_supply_class);
+static const struct class power_supply_class = {
+ .name = "power_supply",
+ .dev_uevent = power_supply_uevent,
+};
static BLOCKING_NOTIFIER_HEAD(power_supply_notifier);
-static struct device_type power_supply_dev_type;
+static const struct device_type power_supply_dev_type = {
+ .name = "power_supply",
+ .groups = power_supply_attr_groups,
+};
#define POWER_SUPPLY_DEFERRED_REGISTER_TIME msecs_to_jiffies(10)
@@ -93,8 +97,7 @@ static void power_supply_changed_work(struct work_struct *work)
if (likely(psy->changed)) {
psy->changed = false;
spin_unlock_irqrestore(&psy->changed_lock, flags);
- class_for_each_device(power_supply_class, NULL, psy,
- __power_supply_changed_work);
+ power_supply_for_each_device(psy, __power_supply_changed_work);
power_supply_update_leds(psy);
blocking_notifier_call_chain(&power_supply_notifier,
PSY_EVENT_PROP_CHANGED, psy);
@@ -112,6 +115,12 @@ static void power_supply_changed_work(struct work_struct *work)
spin_unlock_irqrestore(&psy->changed_lock, flags);
}
+int power_supply_for_each_device(void *data, int (*fn)(struct device *dev, void *data))
+{
+ return class_for_each_device(&power_supply_class, NULL, data, fn);
+}
+EXPORT_SYMBOL_GPL(power_supply_for_each_device);
+
void power_supply_changed(struct power_supply *psy)
{
unsigned long flags;
@@ -187,8 +196,7 @@ static int power_supply_populate_supplied_from(struct power_supply *psy)
{
int error;
- error = class_for_each_device(power_supply_class, NULL, psy,
- __power_supply_populate_supplied_from);
+ error = power_supply_for_each_device(psy, __power_supply_populate_supplied_from);
dev_dbg(&psy->dev, "%s %d\n", __func__, error);
@@ -201,7 +209,7 @@ static int __power_supply_find_supply_from_node(struct device *dev,
struct device_node *np = data;
struct power_supply *epsy = dev_get_drvdata(dev);
- /* returning non-zero breaks out of class_for_each_device loop */
+ /* returning non-zero breaks out of power_supply_for_each_device loop */
if (epsy->of_node == np)
return 1;
@@ -213,17 +221,16 @@ static int power_supply_find_supply_from_node(struct device_node *supply_node)
int error;
/*
- * class_for_each_device() either returns its own errors or values
+ * power_supply_for_each_device() either returns its own errors or values
* returned by __power_supply_find_supply_from_node().
*
* __power_supply_find_supply_from_node() will return 0 (no match)
* or 1 (match).
*
- * We return 0 if class_for_each_device() returned 1, -EPROBE_DEFER if
+ * We return 0 if power_supply_for_each_device() returned 1, -EPROBE_DEFER if
* it returned 0, or error as returned by it.
*/
- error = class_for_each_device(power_supply_class, NULL, supply_node,
- __power_supply_find_supply_from_node);
+ error = power_supply_for_each_device(supply_node, __power_supply_find_supply_from_node);
return error ? (error == 1 ? 0 : error) : -EPROBE_DEFER;
}
@@ -329,8 +336,7 @@ int power_supply_am_i_supplied(struct power_supply *psy)
struct psy_am_i_supplied_data data = { psy, 0 };
int error;
- error = class_for_each_device(power_supply_class, NULL, &data,
- __power_supply_am_i_supplied);
+ error = power_supply_for_each_device(&data, __power_supply_am_i_supplied);
dev_dbg(&psy->dev, "%s count %u err %d\n", __func__, data.count, error);
@@ -365,8 +371,7 @@ int power_supply_is_system_supplied(void)
int error;
unsigned int count = 0;
- error = class_for_each_device(power_supply_class, NULL, &count,
- __power_supply_is_system_supplied);
+ error = power_supply_for_each_device(&count, __power_supply_is_system_supplied);
/*
* If no system scope power class device was found at all, most probably we
@@ -412,8 +417,7 @@ int power_supply_get_property_from_supplier(struct power_supply *psy,
* This function is not intended for use with a supply with multiple
* suppliers, we simply pick the first supply to report the psp.
*/
- ret = class_for_each_device(power_supply_class, NULL, &data,
- __power_supply_get_supplier_property);
+ ret = power_supply_for_each_device(&data, __power_supply_get_supplier_property);
if (ret < 0)
return ret;
if (ret == 0)
@@ -458,8 +462,8 @@ static int power_supply_match_device_by_name(struct device *dev, const void *dat
struct power_supply *power_supply_get_by_name(const char *name)
{
struct power_supply *psy = NULL;
- struct device *dev = class_find_device(power_supply_class, NULL, name,
- power_supply_match_device_by_name);
+ struct device *dev = class_find_device(&power_supply_class, NULL, name,
+ power_supply_match_device_by_name);
if (dev) {
psy = dev_get_drvdata(dev);
@@ -515,8 +519,8 @@ struct power_supply *power_supply_get_by_phandle(struct device_node *np,
if (!power_supply_np)
return ERR_PTR(-ENODEV);
- dev = class_find_device(power_supply_class, NULL, power_supply_np,
- power_supply_match_device_node);
+ dev = class_find_device(&power_supply_class, NULL, power_supply_np,
+ power_supply_match_device_node);
of_node_put(power_supply_np);
@@ -1369,7 +1373,7 @@ __power_supply_register(struct device *parent,
device_initialize(dev);
- dev->class = power_supply_class;
+ dev->class = &power_supply_class;
dev->type = &power_supply_dev_type;
dev->parent = parent;
dev->release = power_supply_dev_release;
@@ -1617,20 +1621,13 @@ EXPORT_SYMBOL_GPL(power_supply_get_drvdata);
static int __init power_supply_class_init(void)
{
- power_supply_class = class_create("power_supply");
-
- if (IS_ERR(power_supply_class))
- return PTR_ERR(power_supply_class);
-
- power_supply_class->dev_uevent = power_supply_uevent;
- power_supply_init_attrs(&power_supply_dev_type);
-
- return 0;
+ power_supply_init_attrs();
+ return class_register(&power_supply_class);
}
static void __exit power_supply_class_exit(void)
{
- class_destroy(power_supply_class);
+ class_unregister(&power_supply_class);
}
subsys_initcall(power_supply_class_init);
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 977611e163739..0d2c3724d0bc0 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -271,6 +271,23 @@ static ssize_t power_supply_show_usb_type(struct device *dev,
return count;
}
+static ssize_t power_supply_show_charge_behaviour(struct device *dev,
+ struct power_supply *psy,
+ union power_supply_propval *value,
+ char *buf)
+{
+ int ret;
+
+ ret = power_supply_get_property(psy,
+ POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR,
+ value);
+ if (ret < 0)
+ return ret;
+
+ return power_supply_charge_behaviour_show(dev, psy->desc->charge_behaviours,
+ value->intval, buf);
+}
+
static ssize_t power_supply_show_property(struct device *dev,
struct device_attribute *attr,
char *buf) {
@@ -298,21 +315,24 @@ static ssize_t power_supply_show_property(struct device *dev,
}
}
- if (ps_attr->text_values_len > 0 &&
- value.intval < ps_attr->text_values_len && value.intval >= 0) {
- return sysfs_emit(buf, "%s\n", ps_attr->text_values[value.intval]);
- }
-
switch (psp) {
case POWER_SUPPLY_PROP_USB_TYPE:
ret = power_supply_show_usb_type(dev, psy->desc,
&value, buf);
break;
+ case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR:
+ ret = power_supply_show_charge_behaviour(dev, psy, &value, buf);
+ break;
case POWER_SUPPLY_PROP_MODEL_NAME ... POWER_SUPPLY_PROP_SERIAL_NUMBER:
ret = sysfs_emit(buf, "%s\n", value.strval);
break;
default:
- ret = sysfs_emit(buf, "%d\n", value.intval);
+ if (ps_attr->text_values_len > 0 &&
+ value.intval < ps_attr->text_values_len && value.intval >= 0) {
+ ret = sysfs_emit(buf, "%s\n", ps_attr->text_values[value.intval]);
+ } else {
+ ret = sysfs_emit(buf, "%d\n", value.intval);
+ }
}
return ret;
@@ -394,17 +414,15 @@ static const struct attribute_group power_supply_attr_group = {
.is_visible = power_supply_attr_is_visible,
};
-static const struct attribute_group *power_supply_attr_groups[] = {
+const struct attribute_group *power_supply_attr_groups[] = {
&power_supply_attr_group,
- NULL,
+ NULL
};
-void power_supply_init_attrs(struct device_type *dev_type)
+void power_supply_init_attrs(void)
{
int i;
- dev_type->groups = power_supply_attr_groups;
-
for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++) {
struct device_attribute *attr;
diff --git a/drivers/power/supply/rt5033_battery.c b/drivers/power/supply/rt5033_battery.c
index d90b96df8e73d..32eafe2c00af5 100644
--- a/drivers/power/supply/rt5033_battery.c
+++ b/drivers/power/supply/rt5033_battery.c
@@ -159,12 +159,12 @@ static int rt5033_battery_probe(struct i2c_client *client)
return -EINVAL;
}
- i2c_set_clientdata(client, battery);
psy_cfg.of_node = client->dev.of_node;
psy_cfg.drv_data = battery;
- battery->psy = power_supply_register(&client->dev,
- &rt5033_battery_desc, &psy_cfg);
+ battery->psy = devm_power_supply_register(&client->dev,
+ &rt5033_battery_desc,
+ &psy_cfg);
if (IS_ERR(battery->psy))
return dev_err_probe(&client->dev, PTR_ERR(battery->psy),
"Failed to register power supply\n");
@@ -172,13 +172,6 @@ static int rt5033_battery_probe(struct i2c_client *client)
return 0;
}
-static void rt5033_battery_remove(struct i2c_client *client)
-{
- struct rt5033_battery *battery = i2c_get_clientdata(client);
-
- power_supply_unregister(battery->psy);
-}
-
static const struct i2c_device_id rt5033_battery_id[] = {
{ "rt5033-battery", },
{ }
@@ -197,7 +190,6 @@ static struct i2c_driver rt5033_battery_driver = {
.of_match_table = rt5033_battery_of_match,
},
.probe = rt5033_battery_probe,
- .remove = rt5033_battery_remove,
.id_table = rt5033_battery_id,
};
module_i2c_driver(rt5033_battery_driver);
diff --git a/drivers/power/supply/rx51_battery.c b/drivers/power/supply/rx51_battery.c
index e2bfc81f0fd97..7cdcd415e8684 100644
--- a/drivers/power/supply/rx51_battery.c
+++ b/drivers/power/supply/rx51_battery.c
@@ -192,14 +192,11 @@ static int rx51_battery_probe(struct platform_device *pdev)
{
struct power_supply_config psy_cfg = {};
struct rx51_device_info *di;
- int ret;
di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL);
if (!di)
return -ENOMEM;
- platform_set_drvdata(pdev, di);
-
di->dev = &pdev->dev;
di->bat_desc.name = "rx51-battery";
di->bat_desc.type = POWER_SUPPLY_TYPE_BATTERY;
@@ -209,52 +206,23 @@ static int rx51_battery_probe(struct platform_device *pdev)
psy_cfg.drv_data = di;
- di->channel_temp = iio_channel_get(di->dev, "temp");
- if (IS_ERR(di->channel_temp)) {
- ret = PTR_ERR(di->channel_temp);
- goto error;
- }
+ di->channel_temp = devm_iio_channel_get(di->dev, "temp");
+ if (IS_ERR(di->channel_temp))
+ return PTR_ERR(di->channel_temp);
- di->channel_bsi = iio_channel_get(di->dev, "bsi");
- if (IS_ERR(di->channel_bsi)) {
- ret = PTR_ERR(di->channel_bsi);
- goto error_channel_temp;
- }
+ di->channel_bsi = devm_iio_channel_get(di->dev, "bsi");
+ if (IS_ERR(di->channel_bsi))
+ return PTR_ERR(di->channel_bsi);
- di->channel_vbat = iio_channel_get(di->dev, "vbat");
- if (IS_ERR(di->channel_vbat)) {
- ret = PTR_ERR(di->channel_vbat);
- goto error_channel_bsi;
- }
+ di->channel_vbat = devm_iio_channel_get(di->dev, "vbat");
+ if (IS_ERR(di->channel_vbat))
+ return PTR_ERR(di->channel_vbat);
- di->bat = power_supply_register(di->dev, &di->bat_desc, &psy_cfg);
- if (IS_ERR(di->bat)) {
- ret = PTR_ERR(di->bat);
- goto error_channel_vbat;
- }
+ di->bat = devm_power_supply_register(di->dev, &di->bat_desc, &psy_cfg);
+ if (IS_ERR(di->bat))
+ return PTR_ERR(di->bat);
return 0;
-
-error_channel_vbat:
- iio_channel_release(di->channel_vbat);
-error_channel_bsi:
- iio_channel_release(di->channel_bsi);
-error_channel_temp:
- iio_channel_release(di->channel_temp);
-error:
-
- return ret;
-}
-
-static void rx51_battery_remove(struct platform_device *pdev)
-{
- struct rx51_device_info *di = platform_get_drvdata(pdev);
-
- power_supply_unregister(di->bat);
-
- iio_channel_release(di->channel_vbat);
- iio_channel_release(di->channel_bsi);
- iio_channel_release(di->channel_temp);
}
#ifdef CONFIG_OF
@@ -267,7 +235,6 @@ MODULE_DEVICE_TABLE(of, n900_battery_of_match);
static struct platform_driver rx51_battery_driver = {
.probe = rx51_battery_probe,
- .remove_new = rx51_battery_remove,
.driver = {
.name = "rx51-battery",
.of_match_table = of_match_ptr(n900_battery_of_match),
diff --git a/drivers/power/supply/tps65090-charger.c b/drivers/power/supply/tps65090-charger.c
index c59197d2aa87d..d41595764caa5 100644
--- a/drivers/power/supply/tps65090-charger.c
+++ b/drivers/power/supply/tps65090-charger.c
@@ -262,7 +262,7 @@ static int tps65090_charger_probe(struct platform_device *pdev)
psy_cfg.of_node = pdev->dev.of_node;
psy_cfg.drv_data = cdata;
- cdata->ac = power_supply_register(&pdev->dev, &tps65090_charger_desc,
+ cdata->ac = devm_power_supply_register(&pdev->dev, &tps65090_charger_desc,
&psy_cfg);
if (IS_ERR(cdata->ac)) {
dev_err(&pdev->dev, "failed: power supply register\n");
@@ -277,7 +277,7 @@ static int tps65090_charger_probe(struct platform_device *pdev)
ret = tps65090_config_charger(cdata);
if (ret < 0) {
dev_err(&pdev->dev, "charger config failed, err %d\n", ret);
- goto fail_unregister_supply;
+ return ret;
}
/* Check for charger presence */
@@ -286,14 +286,14 @@ static int tps65090_charger_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(cdata->dev, "%s(): Error in reading reg 0x%x", __func__,
TPS65090_REG_CG_STATUS1);
- goto fail_unregister_supply;
+ return ret;
}
if (status1 != 0) {
ret = tps65090_enable_charging(cdata);
if (ret < 0) {
dev_err(cdata->dev, "error enabling charger\n");
- goto fail_unregister_supply;
+ return ret;
}
cdata->ac_online = 1;
power_supply_changed(cdata->ac);
@@ -306,7 +306,7 @@ static int tps65090_charger_probe(struct platform_device *pdev)
dev_err(cdata->dev,
"Unable to register irq %d err %d\n", irq,
ret);
- goto fail_unregister_supply;
+ return ret;
}
} else {
cdata->poll_task = kthread_run(tps65090_charger_poll_task,
@@ -316,16 +316,11 @@ static int tps65090_charger_probe(struct platform_device *pdev)
ret = PTR_ERR(cdata->poll_task);
dev_err(cdata->dev,
"Unable to run kthread err %d\n", ret);
- goto fail_unregister_supply;
+ return ret;
}
}
return 0;
-
-fail_unregister_supply:
- power_supply_unregister(cdata->ac);
-
- return ret;
}
static void tps65090_charger_remove(struct platform_device *pdev)
@@ -334,7 +329,6 @@ static void tps65090_charger_remove(struct platform_device *pdev)
if (cdata->irq == -ENXIO)
kthread_stop(cdata->poll_task);
- power_supply_unregister(cdata->ac);
}
static const struct of_device_id of_tps65090_charger_match[] = {
diff --git a/drivers/power/supply/twl4030_madc_battery.c b/drivers/power/supply/twl4030_madc_battery.c
index 33106476bea2c..3935162e350b5 100644
--- a/drivers/power/supply/twl4030_madc_battery.c
+++ b/drivers/power/supply/twl4030_madc_battery.c
@@ -188,30 +188,23 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev)
struct twl4030_madc_battery *twl4030_madc_bat;
struct twl4030_madc_bat_platform_data *pdata = pdev->dev.platform_data;
struct power_supply_config psy_cfg = {};
- int ret = 0;
twl4030_madc_bat = devm_kzalloc(&pdev->dev, sizeof(*twl4030_madc_bat),
GFP_KERNEL);
if (!twl4030_madc_bat)
return -ENOMEM;
- twl4030_madc_bat->channel_temp = iio_channel_get(&pdev->dev, "temp");
- if (IS_ERR(twl4030_madc_bat->channel_temp)) {
- ret = PTR_ERR(twl4030_madc_bat->channel_temp);
- goto err;
- }
+ twl4030_madc_bat->channel_temp = devm_iio_channel_get(&pdev->dev, "temp");
+ if (IS_ERR(twl4030_madc_bat->channel_temp))
+ return PTR_ERR(twl4030_madc_bat->channel_temp);
- twl4030_madc_bat->channel_ichg = iio_channel_get(&pdev->dev, "ichg");
- if (IS_ERR(twl4030_madc_bat->channel_ichg)) {
- ret = PTR_ERR(twl4030_madc_bat->channel_ichg);
- goto err_temp;
- }
+ twl4030_madc_bat->channel_ichg = devm_iio_channel_get(&pdev->dev, "ichg");
+ if (IS_ERR(twl4030_madc_bat->channel_ichg))
+ return PTR_ERR(twl4030_madc_bat->channel_ichg);
- twl4030_madc_bat->channel_vbat = iio_channel_get(&pdev->dev, "vbat");
- if (IS_ERR(twl4030_madc_bat->channel_vbat)) {
- ret = PTR_ERR(twl4030_madc_bat->channel_vbat);
- goto err_ichg;
- }
+ twl4030_madc_bat->channel_vbat = devm_iio_channel_get(&pdev->dev, "vbat");
+ if (IS_ERR(twl4030_madc_bat->channel_vbat))
+ return PTR_ERR(twl4030_madc_bat->channel_vbat);
/* sort charging and discharging calibration data */
sort(pdata->charging, pdata->charging_size,
@@ -222,37 +215,14 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev)
twl4030_cmp, NULL);
twl4030_madc_bat->pdata = pdata;
- platform_set_drvdata(pdev, twl4030_madc_bat);
psy_cfg.drv_data = twl4030_madc_bat;
- twl4030_madc_bat->psy = power_supply_register(&pdev->dev,
- &twl4030_madc_bat_desc,
- &psy_cfg);
- if (IS_ERR(twl4030_madc_bat->psy)) {
- ret = PTR_ERR(twl4030_madc_bat->psy);
- goto err_vbat;
- }
+ twl4030_madc_bat->psy = devm_power_supply_register(&pdev->dev,
+ &twl4030_madc_bat_desc,
+ &psy_cfg);
+ if (IS_ERR(twl4030_madc_bat->psy))
+ return PTR_ERR(twl4030_madc_bat->psy);
return 0;
-
-err_vbat:
- iio_channel_release(twl4030_madc_bat->channel_vbat);
-err_ichg:
- iio_channel_release(twl4030_madc_bat->channel_ichg);
-err_temp:
- iio_channel_release(twl4030_madc_bat->channel_temp);
-err:
- return ret;
-}
-
-static void twl4030_madc_battery_remove(struct platform_device *pdev)
-{
- struct twl4030_madc_battery *bat = platform_get_drvdata(pdev);
-
- power_supply_unregister(bat->psy);
-
- iio_channel_release(bat->channel_vbat);
- iio_channel_release(bat->channel_ichg);
- iio_channel_release(bat->channel_temp);
}
static struct platform_driver twl4030_madc_battery_driver = {
@@ -260,7 +230,6 @@ static struct platform_driver twl4030_madc_battery_driver = {
.name = "twl4030_madc_battery",
},
.probe = twl4030_madc_battery_probe,
- .remove_new = twl4030_madc_battery_remove,
};
module_platform_driver(twl4030_madc_battery_driver);
diff --git a/drivers/power/supply/wm831x_backup.c b/drivers/power/supply/wm831x_backup.c
index 1a7265660adeb..9673fcf7f3afb 100644
--- a/drivers/power/supply/wm831x_backup.c
+++ b/drivers/power/supply/wm831x_backup.c
@@ -171,7 +171,6 @@ static int wm831x_backup_probe(struct platform_device *pdev)
return -ENOMEM;
devdata->wm831x = wm831x;
- platform_set_drvdata(pdev, devdata);
/* We ignore configuration failures since we can still read
* back the status without enabling the charger (which may
@@ -191,22 +190,14 @@ static int wm831x_backup_probe(struct platform_device *pdev)
devdata->backup_desc.properties = wm831x_backup_props;
devdata->backup_desc.num_properties = ARRAY_SIZE(wm831x_backup_props);
devdata->backup_desc.get_property = wm831x_backup_get_prop;
- devdata->backup = power_supply_register(&pdev->dev,
- &devdata->backup_desc, NULL);
+ devdata->backup = devm_power_supply_register(&pdev->dev,
+ &devdata->backup_desc, NULL);
return PTR_ERR_OR_ZERO(devdata->backup);
}
-static void wm831x_backup_remove(struct platform_device *pdev)
-{
- struct wm831x_backup *devdata = platform_get_drvdata(pdev);
-
- power_supply_unregister(devdata->backup);
-}
-
static struct platform_driver wm831x_backup_driver = {
.probe = wm831x_backup_probe,
- .remove_new = wm831x_backup_remove,
.driver = {
.name = "wm831x-backup",
},
diff --git a/drivers/power/supply/wm831x_power.c b/drivers/power/supply/wm831x_power.c
index e49b01ee5f3ef..d56e499ac59fb 100644
--- a/drivers/power/supply/wm831x_power.c
+++ b/drivers/power/supply/wm831x_power.c
@@ -570,8 +570,9 @@ static int wm831x_power_probe(struct platform_device *pdev)
power->wall_desc.properties = wm831x_wall_props;
power->wall_desc.num_properties = ARRAY_SIZE(wm831x_wall_props);
power->wall_desc.get_property = wm831x_wall_get_prop;
- power->wall = power_supply_register(&pdev->dev, &power->wall_desc,
- NULL);
+ power->wall = devm_power_supply_register(&pdev->dev,
+ &power->wall_desc,
+ NULL);
if (IS_ERR(power->wall)) {
ret = PTR_ERR(power->wall);
goto err;
@@ -582,7 +583,9 @@ static int wm831x_power_probe(struct platform_device *pdev)
power->usb_desc.properties = wm831x_usb_props;
power->usb_desc.num_properties = ARRAY_SIZE(wm831x_usb_props);
power->usb_desc.get_property = wm831x_usb_get_prop;
- power->usb = power_supply_register(&pdev->dev, &power->usb_desc, NULL);
+ power->usb = devm_power_supply_register(&pdev->dev,
+ &power->usb_desc,
+ NULL);
if (IS_ERR(power->usb)) {
ret = PTR_ERR(power->usb);
goto err_wall;
@@ -599,9 +602,9 @@ static int wm831x_power_probe(struct platform_device *pdev)
power->battery_desc.num_properties = ARRAY_SIZE(wm831x_bat_props);
power->battery_desc.get_property = wm831x_bat_get_prop;
power->battery_desc.use_for_apm = 1;
- power->battery = power_supply_register(&pdev->dev,
- &power->battery_desc,
- NULL);
+ power->battery = devm_power_supply_register(&pdev->dev,
+ &power->battery_desc,
+ NULL);
if (IS_ERR(power->battery)) {
ret = PTR_ERR(power->battery);
goto err_usb;
@@ -684,12 +687,8 @@ err_syslo:
irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
free_irq(irq, power);
err_battery:
- if (power->have_battery)
- power_supply_unregister(power->battery);
err_usb:
- power_supply_unregister(power->usb);
err_wall:
- power_supply_unregister(power->wall);
err:
return ret;
}
@@ -717,11 +716,6 @@ static void wm831x_power_remove(struct platform_device *pdev)
irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
free_irq(irq, wm831x_power);
-
- if (wm831x_power->have_battery)
- power_supply_unregister(wm831x_power->battery);
- power_supply_unregister(wm831x_power->wall);
- power_supply_unregister(wm831x_power->usb);
}
static struct platform_driver wm831x_power_driver = {
diff --git a/drivers/power/supply/wm8350_power.c b/drivers/power/supply/wm8350_power.c
index f23b4f5343bc1..3f79ab6f6abf1 100644
--- a/drivers/power/supply/wm8350_power.c
+++ b/drivers/power/supply/wm8350_power.c
@@ -540,22 +540,17 @@ static int wm8350_power_probe(struct platform_device *pdev)
struct wm8350_charger_policy *policy = power->policy;
int ret;
- power->ac = power_supply_register(&pdev->dev, &wm8350_ac_desc, NULL);
+ power->ac = devm_power_supply_register(&pdev->dev, &wm8350_ac_desc, NULL);
if (IS_ERR(power->ac))
return PTR_ERR(power->ac);
- power->battery = power_supply_register(&pdev->dev, &wm8350_battery_desc,
- NULL);
- if (IS_ERR(power->battery)) {
- ret = PTR_ERR(power->battery);
- goto battery_failed;
- }
+ power->battery = devm_power_supply_register(&pdev->dev, &wm8350_battery_desc, NULL);
+ if (IS_ERR(power->battery))
+ return PTR_ERR(power->battery);
- power->usb = power_supply_register(&pdev->dev, &wm8350_usb_desc, NULL);
- if (IS_ERR(power->usb)) {
- ret = PTR_ERR(power->usb);
- goto usb_failed;
- }
+ power->usb = devm_power_supply_register(&pdev->dev, &wm8350_usb_desc, NULL);
+ if (IS_ERR(power->usb))
+ return PTR_ERR(power->usb);
ret = device_create_file(&pdev->dev, &dev_attr_charger_state);
if (ret < 0)
@@ -570,25 +565,14 @@ static int wm8350_power_probe(struct platform_device *pdev)
}
return ret;
-
-usb_failed:
- power_supply_unregister(power->battery);
-battery_failed:
- power_supply_unregister(power->ac);
-
- return ret;
}
static void wm8350_power_remove(struct platform_device *pdev)
{
struct wm8350 *wm8350 = platform_get_drvdata(pdev);
- struct wm8350_power *power = &wm8350->power;
free_charger_irq(wm8350);
device_remove_file(&pdev->dev, &dev_attr_charger_state);
- power_supply_unregister(power->battery);
- power_supply_unregister(power->ac);
- power_supply_unregister(power->usb);
}
static struct platform_driver wm8350_power_driver = {
diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c
index b4b6930cacb0b..35cb152fa9aa7 100644
--- a/drivers/powercap/intel_rapl_msr.c
+++ b/drivers/powercap/intel_rapl_msr.c
@@ -197,11 +197,10 @@ out:
return ret;
}
-static int rapl_msr_remove(struct platform_device *pdev)
+static void rapl_msr_remove(struct platform_device *pdev)
{
cpuhp_remove_state(rapl_msr_priv->pcap_rapl_online);
powercap_unregister_control_type(rapl_msr_priv->control_type);
- return 0;
}
static const struct platform_device_id rapl_msr_ids[] = {
@@ -212,7 +211,7 @@ MODULE_DEVICE_TABLE(platform, rapl_msr_ids);
static struct platform_driver intel_rapl_msr_driver = {
.probe = rapl_msr_probe,
- .remove = rapl_msr_remove,
+ .remove_new = rapl_msr_remove,
.id_table = rapl_msr_ids,
.driver = {
.name = "intel_rapl_msr",
diff --git a/drivers/pps/generators/Makefile b/drivers/pps/generators/Makefile
index 2d56dd0495d59..2589fd0f2481e 100644
--- a/drivers/pps/generators/Makefile
+++ b/drivers/pps/generators/Makefile
@@ -5,6 +5,4 @@
obj-$(CONFIG_PPS_GENERATOR_PARPORT) += pps_gen_parport.o
-ifeq ($(CONFIG_PPS_DEBUG),y)
-EXTRA_CFLAGS += -DDEBUG
-endif
+ccflags-$(CONFIG_PPS_DEBUG) := -DDEBUG
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index d70f793ce4b38..403525cc17833 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -443,7 +443,7 @@ of_pwm_single_xlate(struct pwm_chip *chip, const struct of_phandle_args *args)
if (IS_ERR(pwm))
return pwm;
- if (args->args_count > 1)
+ if (args->args_count > 0)
pwm->args.period = args->args[0];
pwm->args.polarity = PWM_POLARITY_NORMAL;
diff --git a/drivers/pwm/pwm-dwc-core.c b/drivers/pwm/pwm-dwc-core.c
index 043736972cb92..c8425493b95d8 100644
--- a/drivers/pwm/pwm-dwc-core.c
+++ b/drivers/pwm/pwm-dwc-core.c
@@ -172,7 +172,6 @@ struct pwm_chip *dwc_pwm_alloc(struct device *dev)
dwc->clk_ns = 10;
chip->ops = &dwc_pwm_ops;
- dev_set_drvdata(dev, chip);
return chip;
}
EXPORT_SYMBOL_GPL(dwc_pwm_alloc);
diff --git a/drivers/pwm/pwm-dwc.c b/drivers/pwm/pwm-dwc.c
index 676eaf8d7a53f..fb3eadf6fbc46 100644
--- a/drivers/pwm/pwm-dwc.c
+++ b/drivers/pwm/pwm-dwc.c
@@ -31,26 +31,34 @@ static const struct dwc_pwm_info ehl_pwm_info = {
.size = 0x1000,
};
-static int dwc_pwm_init_one(struct device *dev, void __iomem *base, unsigned int offset)
+static int dwc_pwm_init_one(struct device *dev, struct dwc_pwm_drvdata *ddata, unsigned int idx)
{
struct pwm_chip *chip;
struct dwc_pwm *dwc;
+ int ret;
chip = dwc_pwm_alloc(dev);
if (IS_ERR(chip))
return PTR_ERR(chip);
dwc = to_dwc_pwm(chip);
- dwc->base = base + offset;
+ dwc->base = ddata->io_base + (ddata->info->size * idx);
- return devm_pwmchip_add(dev, chip);
+ ret = devm_pwmchip_add(dev, chip);
+ if (ret)
+ return ret;
+
+ ddata->chips[idx] = chip;
+ return 0;
}
static int dwc_pwm_probe(struct pci_dev *pci, const struct pci_device_id *id)
{
const struct dwc_pwm_info *info;
struct device *dev = &pci->dev;
- int i, ret;
+ struct dwc_pwm_drvdata *ddata;
+ unsigned int idx;
+ int ret;
ret = pcim_enable_device(pci);
if (ret)
@@ -63,17 +71,25 @@ static int dwc_pwm_probe(struct pci_dev *pci, const struct pci_device_id *id)
return dev_err_probe(dev, ret, "Failed to iomap PCI BAR\n");
info = (const struct dwc_pwm_info *)id->driver_data;
-
- for (i = 0; i < info->nr; i++) {
- /*
- * No need to check for pcim_iomap_table() failure,
- * pcim_iomap_regions() already does it for us.
- */
- ret = dwc_pwm_init_one(dev, pcim_iomap_table(pci)[0], i * info->size);
+ ddata = devm_kzalloc(dev, struct_size(ddata, chips, info->nr), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ /*
+ * No need to check for pcim_iomap_table() failure,
+ * pcim_iomap_regions() already does it for us.
+ */
+ ddata->io_base = pcim_iomap_table(pci)[0];
+ ddata->info = info;
+
+ for (idx = 0; idx < ddata->info->nr; idx++) {
+ ret = dwc_pwm_init_one(dev, ddata, idx);
if (ret)
return ret;
}
+ dev_set_drvdata(dev, ddata);
+
pm_runtime_put(dev);
pm_runtime_allow(dev);
@@ -88,19 +104,24 @@ static void dwc_pwm_remove(struct pci_dev *pci)
static int dwc_pwm_suspend(struct device *dev)
{
- struct pwm_chip *chip = dev_get_drvdata(dev);
- struct dwc_pwm *dwc = to_dwc_pwm(chip);
- int i;
-
- for (i = 0; i < DWC_TIMERS_TOTAL; i++) {
- if (chip->pwms[i].state.enabled) {
- dev_err(dev, "PWM %u in use by consumer (%s)\n",
- i, chip->pwms[i].label);
- return -EBUSY;
+ struct dwc_pwm_drvdata *ddata = dev_get_drvdata(dev);
+ unsigned int idx;
+
+ for (idx = 0; idx < ddata->info->nr; idx++) {
+ struct pwm_chip *chip = ddata->chips[idx];
+ struct dwc_pwm *dwc = to_dwc_pwm(chip);
+ unsigned int i;
+
+ for (i = 0; i < DWC_TIMERS_TOTAL; i++) {
+ if (chip->pwms[i].state.enabled) {
+ dev_err(dev, "PWM %u in use by consumer (%s)\n",
+ i, chip->pwms[i].label);
+ return -EBUSY;
+ }
+ dwc->ctx[i].cnt = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT(i));
+ dwc->ctx[i].cnt2 = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT2(i));
+ dwc->ctx[i].ctrl = dwc_pwm_readl(dwc, DWC_TIM_CTRL(i));
}
- dwc->ctx[i].cnt = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT(i));
- dwc->ctx[i].cnt2 = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT2(i));
- dwc->ctx[i].ctrl = dwc_pwm_readl(dwc, DWC_TIM_CTRL(i));
}
return 0;
@@ -108,14 +129,19 @@ static int dwc_pwm_suspend(struct device *dev)
static int dwc_pwm_resume(struct device *dev)
{
- struct pwm_chip *chip = dev_get_drvdata(dev);
- struct dwc_pwm *dwc = to_dwc_pwm(chip);
- int i;
-
- for (i = 0; i < DWC_TIMERS_TOTAL; i++) {
- dwc_pwm_writel(dwc, dwc->ctx[i].cnt, DWC_TIM_LD_CNT(i));
- dwc_pwm_writel(dwc, dwc->ctx[i].cnt2, DWC_TIM_LD_CNT2(i));
- dwc_pwm_writel(dwc, dwc->ctx[i].ctrl, DWC_TIM_CTRL(i));
+ struct dwc_pwm_drvdata *ddata = dev_get_drvdata(dev);
+ unsigned int idx;
+
+ for (idx = 0; idx < ddata->info->nr; idx++) {
+ struct pwm_chip *chip = ddata->chips[idx];
+ struct dwc_pwm *dwc = to_dwc_pwm(chip);
+ unsigned int i;
+
+ for (i = 0; i < DWC_TIMERS_TOTAL; i++) {
+ dwc_pwm_writel(dwc, dwc->ctx[i].cnt, DWC_TIM_LD_CNT(i));
+ dwc_pwm_writel(dwc, dwc->ctx[i].cnt2, DWC_TIM_LD_CNT2(i));
+ dwc_pwm_writel(dwc, dwc->ctx[i].ctrl, DWC_TIM_CTRL(i));
+ }
}
return 0;
diff --git a/drivers/pwm/pwm-dwc.h b/drivers/pwm/pwm-dwc.h
index a8b074841ae80..c6e2df5a61227 100644
--- a/drivers/pwm/pwm-dwc.h
+++ b/drivers/pwm/pwm-dwc.h
@@ -38,6 +38,12 @@ struct dwc_pwm_info {
unsigned int size;
};
+struct dwc_pwm_drvdata {
+ const struct dwc_pwm_info *info;
+ void __iomem *io_base;
+ struct pwm_chip *chips[];
+};
+
struct dwc_pwm_ctx {
u32 cnt;
u32 cnt2;
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index d79a96679a26c..d6596583ed4e7 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -284,9 +284,9 @@ static int img_pwm_probe(struct platform_device *pdev)
return PTR_ERR(imgchip->sys_clk);
}
- imgchip->pwm_clk = devm_clk_get(&pdev->dev, "imgchip");
+ imgchip->pwm_clk = devm_clk_get(&pdev->dev, "pwm");
if (IS_ERR(imgchip->pwm_clk)) {
- dev_err(&pdev->dev, "failed to get imgchip clock\n");
+ dev_err(&pdev->dev, "failed to get pwm clock\n");
return PTR_ERR(imgchip->pwm_clk);
}
diff --git a/drivers/ras/amd/fmpm.c b/drivers/ras/amd/fmpm.c
index 2f4ac9591c8f5..271dfad05d683 100644
--- a/drivers/ras/amd/fmpm.c
+++ b/drivers/ras/amd/fmpm.c
@@ -150,6 +150,8 @@ static unsigned int max_nr_fru;
/* Total length of record including headers and list of descriptor entries. */
static size_t max_rec_len;
+#define FMPM_MAX_REC_LEN (sizeof(struct fru_rec) + (sizeof(struct cper_fru_poison_desc) * 255))
+
/* Total number of SPA entries across all FRUs. */
static unsigned int spa_nr_entries;
@@ -475,6 +477,16 @@ static void set_rec_fields(struct fru_rec *rec)
struct cper_section_descriptor *sec_desc = &rec->sec_desc;
struct cper_record_header *hdr = &rec->hdr;
+ /*
+ * This is a saved record created with fewer max_nr_entries.
+ * Update the record lengths and keep everything else as-is.
+ */
+ if (hdr->record_length && hdr->record_length < max_rec_len) {
+ pr_debug("Growing record 0x%016llx from %u to %zu bytes\n",
+ hdr->record_id, hdr->record_length, max_rec_len);
+ goto update_lengths;
+ }
+
memcpy(hdr->signature, CPER_SIG_RECORD, CPER_SIG_SIZE);
hdr->revision = CPER_RECORD_REV;
hdr->signature_end = CPER_SIG_END;
@@ -489,19 +501,21 @@ static void set_rec_fields(struct fru_rec *rec)
hdr->error_severity = CPER_SEV_RECOVERABLE;
hdr->validation_bits = 0;
- hdr->record_length = max_rec_len;
hdr->creator_id = CPER_CREATOR_FMP;
hdr->notification_type = CPER_NOTIFY_MCE;
hdr->record_id = cper_next_record_id();
hdr->flags = CPER_HW_ERROR_FLAGS_PREVERR;
sec_desc->section_offset = sizeof(struct cper_record_header);
- sec_desc->section_length = max_rec_len - sizeof(struct cper_record_header);
sec_desc->revision = CPER_SEC_REV;
sec_desc->validation_bits = 0;
sec_desc->flags = CPER_SEC_PRIMARY;
sec_desc->section_type = CPER_SECTION_TYPE_FMP;
sec_desc->section_severity = CPER_SEV_RECOVERABLE;
+
+update_lengths:
+ hdr->record_length = max_rec_len;
+ sec_desc->section_length = max_rec_len - sizeof(struct cper_record_header);
}
static int save_new_records(void)
@@ -512,16 +526,18 @@ static int save_new_records(void)
int ret = 0;
for_each_fru(i, rec) {
- if (rec->hdr.record_length)
+ /* No need to update saved records that match the current record size. */
+ if (rec->hdr.record_length == max_rec_len)
continue;
+ if (!rec->hdr.record_length)
+ set_bit(i, new_records);
+
set_rec_fields(rec);
ret = update_record_on_storage(rec);
if (ret)
goto out_clear;
-
- set_bit(i, new_records);
}
return ret;
@@ -641,12 +657,7 @@ static int get_saved_records(void)
int ret, pos;
ssize_t len;
- /*
- * Assume saved records match current max size.
- *
- * However, this may not be true depending on module parameters.
- */
- old = kmalloc(max_rec_len, GFP_KERNEL);
+ old = kmalloc(FMPM_MAX_REC_LEN, GFP_KERNEL);
if (!old) {
ret = -ENOMEM;
goto out;
@@ -663,21 +674,31 @@ static int get_saved_records(void)
* Make sure to clear temporary buffer between reads to avoid
* leftover data from records of various sizes.
*/
- memset(old, 0, max_rec_len);
+ memset(old, 0, FMPM_MAX_REC_LEN);
- len = erst_read_record(record_id, &old->hdr, max_rec_len,
+ len = erst_read_record(record_id, &old->hdr, FMPM_MAX_REC_LEN,
sizeof(struct fru_rec), &CPER_CREATOR_FMP);
if (len < 0)
continue;
- if (len > max_rec_len) {
- pr_debug("Found record larger than max_rec_len\n");
+ new = get_valid_record(old);
+ if (!new) {
+ erst_clear(record_id);
continue;
}
- new = get_valid_record(old);
- if (!new)
- erst_clear(record_id);
+ if (len > max_rec_len) {
+ unsigned int saved_nr_entries;
+
+ saved_nr_entries = len - sizeof(struct fru_rec);
+ saved_nr_entries /= sizeof(struct cper_fru_poison_desc);
+
+ pr_warn("Saved record found with %u entries.\n", saved_nr_entries);
+ pr_warn("Please increase max_nr_entries to %u.\n", saved_nr_entries);
+
+ ret = -EINVAL;
+ goto out_end;
+ }
/* Restore the record */
memcpy(new, old, len);
diff --git a/drivers/ras/debugfs.h b/drivers/ras/debugfs.h
index 4749ccdeeba12..5a2f48439258c 100644
--- a/drivers/ras/debugfs.h
+++ b/drivers/ras/debugfs.h
@@ -4,6 +4,10 @@
#include <linux/debugfs.h>
+#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *ras_get_debugfs_root(void);
+#else
+static inline struct dentry *ras_get_debugfs_root(void) { return NULL; }
+#endif /* DEBUG_FS */
#endif /* __RAS_DEBUGFS_H__ */
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 550145f82726e..7db0a29b5b8dc 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -288,7 +288,7 @@ config REGULATOR_CROS_EC
config REGULATOR_DA903X
tristate "Dialog Semiconductor DA9030/DA9034 regulators"
depends on PMIC_DA903X
- depends on !CC_IS_CLANG # https://bugs.llvm.org/show_bug.cgi?id=38789
+ depends on !CC_IS_CLANG # https://llvm.org/pr38789
help
Say y here to support the BUCKs and LDOs regulators found on
Dialog Semiconductor DA9030/DA9034 PMIC.
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index d019ca6dee9bf..dabac9772741f 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2274,6 +2274,17 @@ struct regulator *_regulator_get(struct device *dev, const char *id,
if (ret > 0) {
rdev->use_count = 1;
regulator->enable_count = 1;
+
+ /* Propagate the regulator state to its supply */
+ if (rdev->supply) {
+ ret = regulator_enable(rdev->supply);
+ if (ret < 0) {
+ destroy_regulator(regulator);
+ module_put(rdev->owner);
+ put_device(&rdev->dev);
+ return ERR_PTR(ret);
+ }
+ }
} else {
rdev->use_count = 0;
regulator->enable_count = 0;
diff --git a/drivers/regulator/tps65132-regulator.c b/drivers/regulator/tps65132-regulator.c
index a06f5f2d79329..9c2f0dd42613d 100644
--- a/drivers/regulator/tps65132-regulator.c
+++ b/drivers/regulator/tps65132-regulator.c
@@ -267,10 +267,17 @@ static const struct i2c_device_id tps65132_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tps65132_id);
+static const struct of_device_id __maybe_unused tps65132_of_match[] = {
+ { .compatible = "ti,tps65132" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tps65132_of_match);
+
static struct i2c_driver tps65132_i2c_driver = {
.driver = {
.name = "tps65132",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .of_match_table = of_match_ptr(tps65132_of_match),
},
.probe = tps65132_probe,
.id_table = tps65132_id,
diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c
index d73727a5828a4..087506e215080 100644
--- a/drivers/remoteproc/imx_dsp_rproc.c
+++ b/drivers/remoteproc/imx_dsp_rproc.c
@@ -1040,8 +1040,8 @@ static int imx_dsp_rproc_probe(struct platform_device *pdev)
return ret;
}
- rproc = rproc_alloc(dev, "imx-dsp-rproc", &imx_dsp_rproc_ops, fw_name,
- sizeof(*priv));
+ rproc = devm_rproc_alloc(dev, "imx-dsp-rproc", &imx_dsp_rproc_ops,
+ fw_name, sizeof(*priv));
if (!rproc)
return -ENOMEM;
@@ -1061,14 +1061,14 @@ static int imx_dsp_rproc_probe(struct platform_device *pdev)
ret = imx_dsp_rproc_detect_mode(priv);
if (ret) {
dev_err(dev, "failed on imx_dsp_rproc_detect_mode\n");
- goto err_put_rproc;
+ return ret;
}
/* There are multiple power domains required by DSP on some platform */
ret = imx_dsp_attach_pm_domains(priv);
if (ret) {
dev_err(dev, "failed on imx_dsp_attach_pm_domains\n");
- goto err_put_rproc;
+ return ret;
}
/* Get clocks */
ret = imx_dsp_rproc_clk_get(priv);
@@ -1091,8 +1091,6 @@ static int imx_dsp_rproc_probe(struct platform_device *pdev)
err_detach_domains:
dev_pm_domain_detach_list(priv->pd_list);
-err_put_rproc:
- rproc_free(rproc);
return ret;
}
@@ -1105,7 +1103,6 @@ static void imx_dsp_rproc_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
rproc_del(rproc);
dev_pm_domain_detach_list(priv->pd_list);
- rproc_free(rproc);
}
/* pm runtime functions */
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index 3161f14442bc2..5a3fb902acc9f 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -1049,16 +1049,14 @@ static int imx_rproc_probe(struct platform_device *pdev)
int ret;
/* set some other name then imx */
- rproc = rproc_alloc(dev, "imx-rproc", &imx_rproc_ops,
- NULL, sizeof(*priv));
+ rproc = devm_rproc_alloc(dev, "imx-rproc", &imx_rproc_ops,
+ NULL, sizeof(*priv));
if (!rproc)
return -ENOMEM;
dcfg = of_device_get_match_data(dev);
- if (!dcfg) {
- ret = -EINVAL;
- goto err_put_rproc;
- }
+ if (!dcfg)
+ return -EINVAL;
priv = rproc->priv;
priv->rproc = rproc;
@@ -1069,8 +1067,7 @@ static int imx_rproc_probe(struct platform_device *pdev)
priv->workqueue = create_workqueue(dev_name(dev));
if (!priv->workqueue) {
dev_err(dev, "cannot create workqueue\n");
- ret = -ENOMEM;
- goto err_put_rproc;
+ return -ENOMEM;
}
ret = imx_rproc_xtr_mbox_init(rproc);
@@ -1112,8 +1109,6 @@ err_put_mbox:
imx_rproc_free_mbox(rproc);
err_put_wkq:
destroy_workqueue(priv->workqueue);
-err_put_rproc:
- rproc_free(rproc);
return ret;
}
@@ -1128,7 +1123,6 @@ static void imx_rproc_remove(struct platform_device *pdev)
imx_rproc_put_scu(rproc);
imx_rproc_free_mbox(rproc);
destroy_workqueue(priv->workqueue);
- rproc_free(rproc);
}
static const struct of_device_id imx_rproc_of_match[] = {
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
index 93f9a1537ec60..1d24c9b656a82 100644
--- a/drivers/remoteproc/qcom_q6v5_adsp.c
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -674,8 +674,8 @@ static int adsp_probe(struct platform_device *pdev)
return ret;
}
- rproc = rproc_alloc(&pdev->dev, pdev->name, &adsp_ops,
- firmware_name, sizeof(*adsp));
+ rproc = devm_rproc_alloc(&pdev->dev, pdev->name, &adsp_ops,
+ firmware_name, sizeof(*adsp));
if (!rproc) {
dev_err(&pdev->dev, "unable to allocate remoteproc\n");
return -ENOMEM;
@@ -700,16 +700,16 @@ static int adsp_probe(struct platform_device *pdev)
ret = adsp_alloc_memory_region(adsp);
if (ret)
- goto free_rproc;
+ return ret;
ret = adsp_init_clock(adsp, desc->clk_ids);
if (ret)
- goto free_rproc;
+ return ret;
ret = qcom_rproc_pds_attach(adsp, desc->pd_names, desc->num_pds);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to attach proxy power domains\n");
- goto free_rproc;
+ return ret;
}
ret = adsp_init_reset(adsp);
@@ -744,9 +744,6 @@ static int adsp_probe(struct platform_device *pdev)
disable_pm:
qcom_rproc_pds_detach(adsp);
-free_rproc:
- rproc_free(rproc);
-
return ret;
}
@@ -761,7 +758,6 @@ static void adsp_remove(struct platform_device *pdev)
qcom_remove_sysmon_subdev(adsp->sysmon);
qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev);
qcom_rproc_pds_detach(adsp);
- rproc_free(adsp->rproc);
}
static const struct adsp_pil_data adsp_resource_init = {
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index 394b2c1cb5e21..1779fc890e102 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -1990,8 +1990,8 @@ static int q6v5_probe(struct platform_device *pdev)
return ret;
}
- rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
- mba_image, sizeof(*qproc));
+ rproc = devm_rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
+ mba_image, sizeof(*qproc));
if (!rproc) {
dev_err(&pdev->dev, "failed to allocate rproc\n");
return -ENOMEM;
@@ -2008,7 +2008,7 @@ static int q6v5_probe(struct platform_device *pdev)
1, &qproc->hexagon_mdt_image);
if (ret < 0 && ret != -EINVAL) {
dev_err(&pdev->dev, "unable to read mpss firmware-name\n");
- goto free_rproc;
+ return ret;
}
platform_set_drvdata(pdev, qproc);
@@ -2019,17 +2019,17 @@ static int q6v5_probe(struct platform_device *pdev)
qproc->has_spare_reg = desc->has_spare_reg;
ret = q6v5_init_mem(qproc, pdev);
if (ret)
- goto free_rproc;
+ return ret;
ret = q6v5_alloc_memory_region(qproc);
if (ret)
- goto free_rproc;
+ return ret;
ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
desc->proxy_clk_names);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
- goto free_rproc;
+ return ret;
}
qproc->proxy_clk_count = ret;
@@ -2037,7 +2037,7 @@ static int q6v5_probe(struct platform_device *pdev)
desc->reset_clk_names);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to get reset clocks.\n");
- goto free_rproc;
+ return ret;
}
qproc->reset_clk_count = ret;
@@ -2045,7 +2045,7 @@ static int q6v5_probe(struct platform_device *pdev)
desc->active_clk_names);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to get active clocks.\n");
- goto free_rproc;
+ return ret;
}
qproc->active_clk_count = ret;
@@ -2053,7 +2053,7 @@ static int q6v5_probe(struct platform_device *pdev)
desc->proxy_supply);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
- goto free_rproc;
+ return ret;
}
qproc->proxy_reg_count = ret;
@@ -2061,7 +2061,7 @@ static int q6v5_probe(struct platform_device *pdev)
desc->active_supply);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to get active regulators.\n");
- goto free_rproc;
+ return ret;
}
qproc->active_reg_count = ret;
@@ -2074,12 +2074,12 @@ static int q6v5_probe(struct platform_device *pdev)
desc->fallback_proxy_supply);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to get fallback proxy regulators.\n");
- goto free_rproc;
+ return ret;
}
qproc->fallback_proxy_reg_count = ret;
} else if (ret < 0) {
dev_err(&pdev->dev, "Failed to init power domains\n");
- goto free_rproc;
+ return ret;
} else {
qproc->proxy_pd_count = ret;
}
@@ -2127,8 +2127,6 @@ remove_subdevs:
qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
detach_proxy_pds:
q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
-free_rproc:
- rproc_free(rproc);
return ret;
}
@@ -2149,8 +2147,6 @@ static void q6v5_remove(struct platform_device *pdev)
qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
-
- rproc_free(rproc);
}
static const struct rproc_hexagon_res sc7180_mss = {
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index a9dd58608052c..54d8005d40a34 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -33,12 +33,15 @@
#define ADSP_DECRYPT_SHUTDOWN_DELAY_MS 100
+#define MAX_ASSIGN_COUNT 3
+
struct adsp_data {
int crash_reason_smem;
const char *firmware_name;
const char *dtb_firmware_name;
int pas_id;
int dtb_pas_id;
+ int lite_pas_id;
unsigned int minidump_id;
bool auto_boot;
bool decrypt_shutdown;
@@ -51,6 +54,9 @@ struct adsp_data {
int ssctl_id;
int region_assign_idx;
+ int region_assign_count;
+ bool region_assign_shared;
+ int region_assign_vmid;
};
struct qcom_adsp {
@@ -72,6 +78,7 @@ struct qcom_adsp {
const char *dtb_firmware_name;
int pas_id;
int dtb_pas_id;
+ int lite_pas_id;
unsigned int minidump_id;
int crash_reason_smem;
bool decrypt_shutdown;
@@ -87,15 +94,18 @@ struct qcom_adsp {
phys_addr_t dtb_mem_phys;
phys_addr_t mem_reloc;
phys_addr_t dtb_mem_reloc;
- phys_addr_t region_assign_phys;
+ phys_addr_t region_assign_phys[MAX_ASSIGN_COUNT];
void *mem_region;
void *dtb_mem_region;
size_t mem_size;
size_t dtb_mem_size;
- size_t region_assign_size;
+ size_t region_assign_size[MAX_ASSIGN_COUNT];
int region_assign_idx;
- u64 region_assign_perms;
+ int region_assign_count;
+ bool region_assign_shared;
+ int region_assign_vmid;
+ u64 region_assign_owners[MAX_ASSIGN_COUNT];
struct qcom_rproc_glink glink_subdev;
struct qcom_rproc_subdev smd_subdev;
@@ -210,6 +220,9 @@ static int adsp_load(struct rproc *rproc, const struct firmware *fw)
/* Store firmware handle to be used in adsp_start() */
adsp->firmware = fw;
+ if (adsp->lite_pas_id)
+ ret = qcom_scm_pas_shutdown(adsp->lite_pas_id);
+
if (adsp->dtb_pas_id) {
ret = request_firmware(&adsp->dtb_firmware, adsp->dtb_firmware_name, adsp->dev);
if (ret) {
@@ -590,37 +603,53 @@ static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
static int adsp_assign_memory_region(struct qcom_adsp *adsp)
{
- struct reserved_mem *rmem = NULL;
- struct qcom_scm_vmperm perm;
+ struct qcom_scm_vmperm perm[MAX_ASSIGN_COUNT];
struct device_node *node;
+ unsigned int perm_size;
+ int offset;
int ret;
if (!adsp->region_assign_idx)
return 0;
- node = of_parse_phandle(adsp->dev->of_node, "memory-region", adsp->region_assign_idx);
- if (node)
- rmem = of_reserved_mem_lookup(node);
- of_node_put(node);
- if (!rmem) {
- dev_err(adsp->dev, "unable to resolve shareable memory-region\n");
- return -EINVAL;
- }
+ for (offset = 0; offset < adsp->region_assign_count; ++offset) {
+ struct reserved_mem *rmem = NULL;
+
+ node = of_parse_phandle(adsp->dev->of_node, "memory-region",
+ adsp->region_assign_idx + offset);
+ if (node)
+ rmem = of_reserved_mem_lookup(node);
+ of_node_put(node);
+ if (!rmem) {
+ dev_err(adsp->dev, "unable to resolve shareable memory-region index %d\n",
+ offset);
+ return -EINVAL;
+ }
- perm.vmid = QCOM_SCM_VMID_MSS_MSA;
- perm.perm = QCOM_SCM_PERM_RW;
+ if (adsp->region_assign_shared) {
+ perm[0].vmid = QCOM_SCM_VMID_HLOS;
+ perm[0].perm = QCOM_SCM_PERM_RW;
+ perm[1].vmid = adsp->region_assign_vmid;
+ perm[1].perm = QCOM_SCM_PERM_RW;
+ perm_size = 2;
+ } else {
+ perm[0].vmid = adsp->region_assign_vmid;
+ perm[0].perm = QCOM_SCM_PERM_RW;
+ perm_size = 1;
+ }
- adsp->region_assign_phys = rmem->base;
- adsp->region_assign_size = rmem->size;
- adsp->region_assign_perms = BIT(QCOM_SCM_VMID_HLOS);
+ adsp->region_assign_phys[offset] = rmem->base;
+ adsp->region_assign_size[offset] = rmem->size;
+ adsp->region_assign_owners[offset] = BIT(QCOM_SCM_VMID_HLOS);
- ret = qcom_scm_assign_mem(adsp->region_assign_phys,
- adsp->region_assign_size,
- &adsp->region_assign_perms,
- &perm, 1);
- if (ret < 0) {
- dev_err(adsp->dev, "assign memory failed\n");
- return ret;
+ ret = qcom_scm_assign_mem(adsp->region_assign_phys[offset],
+ adsp->region_assign_size[offset],
+ &adsp->region_assign_owners[offset],
+ perm, perm_size);
+ if (ret < 0) {
+ dev_err(adsp->dev, "assign memory %d failed\n", offset);
+ return ret;
+ }
}
return 0;
@@ -629,20 +658,23 @@ static int adsp_assign_memory_region(struct qcom_adsp *adsp)
static void adsp_unassign_memory_region(struct qcom_adsp *adsp)
{
struct qcom_scm_vmperm perm;
+ int offset;
int ret;
- if (!adsp->region_assign_idx)
+ if (!adsp->region_assign_idx || adsp->region_assign_shared)
return;
- perm.vmid = QCOM_SCM_VMID_HLOS;
- perm.perm = QCOM_SCM_PERM_RW;
+ for (offset = 0; offset < adsp->region_assign_count; ++offset) {
+ perm.vmid = QCOM_SCM_VMID_HLOS;
+ perm.perm = QCOM_SCM_PERM_RW;
- ret = qcom_scm_assign_mem(adsp->region_assign_phys,
- adsp->region_assign_size,
- &adsp->region_assign_perms,
- &perm, 1);
- if (ret < 0)
- dev_err(adsp->dev, "unassign memory failed\n");
+ ret = qcom_scm_assign_mem(adsp->region_assign_phys[offset],
+ adsp->region_assign_size[offset],
+ &adsp->region_assign_owners[offset],
+ &perm, 1);
+ if (ret < 0)
+ dev_err(adsp->dev, "unassign memory %d failed\n", offset);
+ }
}
static int adsp_probe(struct platform_device *pdev)
@@ -678,7 +710,7 @@ static int adsp_probe(struct platform_device *pdev)
if (desc->minidump_id)
ops = &adsp_minidump_ops;
- rproc = rproc_alloc(&pdev->dev, pdev->name, ops, fw_name, sizeof(*adsp));
+ rproc = devm_rproc_alloc(&pdev->dev, pdev->name, ops, fw_name, sizeof(*adsp));
if (!rproc) {
dev_err(&pdev->dev, "unable to allocate remoteproc\n");
@@ -693,9 +725,13 @@ static int adsp_probe(struct platform_device *pdev)
adsp->rproc = rproc;
adsp->minidump_id = desc->minidump_id;
adsp->pas_id = desc->pas_id;
+ adsp->lite_pas_id = desc->lite_pas_id;
adsp->info_name = desc->sysmon_name;
adsp->decrypt_shutdown = desc->decrypt_shutdown;
adsp->region_assign_idx = desc->region_assign_idx;
+ adsp->region_assign_count = min_t(int, MAX_ASSIGN_COUNT, desc->region_assign_count);
+ adsp->region_assign_vmid = desc->region_assign_vmid;
+ adsp->region_assign_shared = desc->region_assign_shared;
if (dtb_fw_name) {
adsp->dtb_firmware_name = dtb_fw_name;
adsp->dtb_pas_id = desc->dtb_pas_id;
@@ -754,7 +790,6 @@ detach_proxy_pds:
adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
free_rproc:
device_init_wakeup(adsp->dev, false);
- rproc_free(rproc);
return ret;
}
@@ -773,28 +808,27 @@ static void adsp_remove(struct platform_device *pdev)
qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev);
adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
device_init_wakeup(adsp->dev, false);
- rproc_free(adsp->rproc);
}
static const struct adsp_data adsp_resource_init = {
- .crash_reason_smem = 423,
- .firmware_name = "adsp.mdt",
- .pas_id = 1,
- .auto_boot = true,
- .ssr_name = "lpass",
- .sysmon_name = "adsp",
- .ssctl_id = 0x14,
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .pas_id = 1,
+ .auto_boot = true,
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
};
static const struct adsp_data sdm845_adsp_resource_init = {
- .crash_reason_smem = 423,
- .firmware_name = "adsp.mdt",
- .pas_id = 1,
- .auto_boot = true,
- .load_state = "adsp",
- .ssr_name = "lpass",
- .sysmon_name = "adsp",
- .ssctl_id = 0x14,
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .pas_id = 1,
+ .auto_boot = true,
+ .load_state = "adsp",
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
};
static const struct adsp_data sm6350_adsp_resource = {
@@ -829,18 +863,18 @@ static const struct adsp_data sm6375_mpss_resource = {
};
static const struct adsp_data sm8150_adsp_resource = {
- .crash_reason_smem = 423,
- .firmware_name = "adsp.mdt",
- .pas_id = 1,
- .auto_boot = true,
- .proxy_pd_names = (char*[]){
- "cx",
- NULL
- },
- .load_state = "adsp",
- .ssr_name = "lpass",
- .sysmon_name = "adsp",
- .ssctl_id = 0x14,
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .pas_id = 1,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ NULL
+ },
+ .load_state = "adsp",
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
};
static const struct adsp_data sm8250_adsp_resource = {
@@ -876,17 +910,17 @@ static const struct adsp_data sm8350_adsp_resource = {
};
static const struct adsp_data msm8996_adsp_resource = {
- .crash_reason_smem = 423,
- .firmware_name = "adsp.mdt",
- .pas_id = 1,
- .auto_boot = true,
- .proxy_pd_names = (char*[]){
- "cx",
- NULL
- },
- .ssr_name = "lpass",
- .sysmon_name = "adsp",
- .ssctl_id = 0x14,
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .pas_id = 1,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ NULL
+ },
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
};
static const struct adsp_data cdsp_resource_init = {
@@ -984,6 +1018,46 @@ static const struct adsp_data sc8280xp_nsp1_resource = {
.ssctl_id = 0x20,
};
+static const struct adsp_data x1e80100_adsp_resource = {
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .dtb_firmware_name = "adsp_dtb.mdt",
+ .pas_id = 1,
+ .dtb_pas_id = 0x24,
+ .lite_pas_id = 0x1f,
+ .minidump_id = 5,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "lcx",
+ "lmx",
+ NULL
+ },
+ .load_state = "adsp",
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
+};
+
+static const struct adsp_data x1e80100_cdsp_resource = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp.mdt",
+ .dtb_firmware_name = "cdsp_dtb.mdt",
+ .pas_id = 18,
+ .dtb_pas_id = 0x25,
+ .minidump_id = 7,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mxc",
+ "nsp",
+ NULL
+ },
+ .load_state = "cdsp",
+ .ssr_name = "cdsp",
+ .sysmon_name = "cdsp",
+ .ssctl_id = 0x17,
+};
+
static const struct adsp_data sm8350_cdsp_resource = {
.crash_reason_smem = 601,
.firmware_name = "cdsp.mdt",
@@ -1033,33 +1107,33 @@ static const struct adsp_data sc8180x_mpss_resource = {
};
static const struct adsp_data msm8996_slpi_resource_init = {
- .crash_reason_smem = 424,
- .firmware_name = "slpi.mdt",
- .pas_id = 12,
- .auto_boot = true,
- .proxy_pd_names = (char*[]){
- "ssc_cx",
- NULL
- },
- .ssr_name = "dsps",
- .sysmon_name = "slpi",
- .ssctl_id = 0x16,
+ .crash_reason_smem = 424,
+ .firmware_name = "slpi.mdt",
+ .pas_id = 12,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "ssc_cx",
+ NULL
+ },
+ .ssr_name = "dsps",
+ .sysmon_name = "slpi",
+ .ssctl_id = 0x16,
};
static const struct adsp_data sdm845_slpi_resource_init = {
- .crash_reason_smem = 424,
- .firmware_name = "slpi.mdt",
- .pas_id = 12,
- .auto_boot = true,
- .proxy_pd_names = (char*[]){
- "lcx",
- "lmx",
- NULL
- },
- .load_state = "slpi",
- .ssr_name = "dsps",
- .sysmon_name = "slpi",
- .ssctl_id = 0x16,
+ .crash_reason_smem = 424,
+ .firmware_name = "slpi.mdt",
+ .pas_id = 12,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "lcx",
+ "lmx",
+ NULL
+ },
+ .load_state = "slpi",
+ .ssr_name = "dsps",
+ .sysmon_name = "slpi",
+ .ssctl_id = 0x16,
};
static const struct adsp_data wcss_resource_init = {
@@ -1163,6 +1237,8 @@ static const struct adsp_data sm8550_mpss_resource = {
.sysmon_name = "modem",
.ssctl_id = 0x12,
.region_assign_idx = 2,
+ .region_assign_count = 1,
+ .region_assign_vmid = QCOM_SCM_VMID_MSS_MSA,
};
static const struct adsp_data sc7280_wpss_resource = {
@@ -1181,6 +1257,53 @@ static const struct adsp_data sc7280_wpss_resource = {
.ssctl_id = 0x19,
};
+static const struct adsp_data sm8650_cdsp_resource = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp.mdt",
+ .dtb_firmware_name = "cdsp_dtb.mdt",
+ .pas_id = 18,
+ .dtb_pas_id = 0x25,
+ .minidump_id = 7,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mxc",
+ "nsp",
+ NULL
+ },
+ .load_state = "cdsp",
+ .ssr_name = "cdsp",
+ .sysmon_name = "cdsp",
+ .ssctl_id = 0x17,
+ .region_assign_idx = 2,
+ .region_assign_count = 1,
+ .region_assign_shared = true,
+ .region_assign_vmid = QCOM_SCM_VMID_CDSP,
+};
+
+static const struct adsp_data sm8650_mpss_resource = {
+ .crash_reason_smem = 421,
+ .firmware_name = "modem.mdt",
+ .dtb_firmware_name = "modem_dtb.mdt",
+ .pas_id = 4,
+ .dtb_pas_id = 0x26,
+ .minidump_id = 3,
+ .auto_boot = false,
+ .decrypt_shutdown = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mss",
+ NULL
+ },
+ .load_state = "modem",
+ .ssr_name = "mpss",
+ .sysmon_name = "modem",
+ .ssctl_id = 0x12,
+ .region_assign_idx = 2,
+ .region_assign_count = 3,
+ .region_assign_vmid = QCOM_SCM_VMID_MSS_MSA,
+};
+
static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,msm8226-adsp-pil", .data = &adsp_resource_init},
{ .compatible = "qcom,msm8953-adsp-pil", .data = &msm8996_adsp_resource},
@@ -1236,6 +1359,11 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,sm8550-adsp-pas", .data = &sm8550_adsp_resource},
{ .compatible = "qcom,sm8550-cdsp-pas", .data = &sm8550_cdsp_resource},
{ .compatible = "qcom,sm8550-mpss-pas", .data = &sm8550_mpss_resource},
+ { .compatible = "qcom,sm8650-adsp-pas", .data = &sm8550_adsp_resource},
+ { .compatible = "qcom,sm8650-cdsp-pas", .data = &sm8650_cdsp_resource},
+ { .compatible = "qcom,sm8650-mpss-pas", .data = &sm8650_mpss_resource},
+ { .compatible = "qcom,x1e80100-adsp-pas", .data = &x1e80100_adsp_resource},
+ { .compatible = "qcom,x1e80100-cdsp-pas", .data = &x1e80100_cdsp_resource},
{ },
};
MODULE_DEVICE_TABLE(of, adsp_of_match);
diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c
index cff1fa07d1def..94f68c919ee62 100644
--- a/drivers/remoteproc/qcom_q6v5_wcss.c
+++ b/drivers/remoteproc/qcom_q6v5_wcss.c
@@ -1011,8 +1011,8 @@ static int q6v5_wcss_probe(struct platform_device *pdev)
if (!desc)
return -EINVAL;
- rproc = rproc_alloc(&pdev->dev, pdev->name, desc->ops,
- desc->firmware_name, sizeof(*wcss));
+ rproc = devm_rproc_alloc(&pdev->dev, pdev->name, desc->ops,
+ desc->firmware_name, sizeof(*wcss));
if (!rproc) {
dev_err(&pdev->dev, "failed to allocate rproc\n");
return -ENOMEM;
@@ -1027,29 +1027,29 @@ static int q6v5_wcss_probe(struct platform_device *pdev)
ret = q6v5_wcss_init_mmio(wcss, pdev);
if (ret)
- goto free_rproc;
+ return ret;
ret = q6v5_alloc_memory_region(wcss);
if (ret)
- goto free_rproc;
+ return ret;
if (wcss->version == WCSS_QCS404) {
ret = q6v5_wcss_init_clock(wcss);
if (ret)
- goto free_rproc;
+ return ret;
ret = q6v5_wcss_init_regulator(wcss);
if (ret)
- goto free_rproc;
+ return ret;
}
ret = q6v5_wcss_init_reset(wcss, desc);
if (ret)
- goto free_rproc;
+ return ret;
ret = qcom_q6v5_init(&wcss->q6v5, pdev, rproc, desc->crash_reason_smem, NULL, NULL);
if (ret)
- goto free_rproc;
+ return ret;
qcom_add_glink_subdev(rproc, &wcss->glink_subdev, "q6wcss");
qcom_add_ssr_subdev(rproc, &wcss->ssr_subdev, "q6wcss");
@@ -1061,16 +1061,11 @@ static int q6v5_wcss_probe(struct platform_device *pdev)
ret = rproc_add(rproc);
if (ret)
- goto free_rproc;
+ return ret;
platform_set_drvdata(pdev, rproc);
return 0;
-
-free_rproc:
- rproc_free(rproc);
-
- return ret;
}
static void q6v5_wcss_remove(struct platform_device *pdev)
@@ -1080,7 +1075,6 @@ static void q6v5_wcss_remove(struct platform_device *pdev)
qcom_q6v5_deinit(&wcss->q6v5);
rproc_del(rproc);
- rproc_free(rproc);
}
static const struct wcss_data wcss_ipq8074_res_init = {
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index 90de22c81da97..a7bb9da27029d 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -555,8 +555,8 @@ static int wcnss_probe(struct platform_device *pdev)
if (ret < 0 && ret != -EINVAL)
return ret;
- rproc = rproc_alloc(&pdev->dev, pdev->name, &wcnss_ops,
- fw_name, sizeof(*wcnss));
+ rproc = devm_rproc_alloc(&pdev->dev, pdev->name, &wcnss_ops,
+ fw_name, sizeof(*wcnss));
if (!rproc) {
dev_err(&pdev->dev, "unable to allocate remoteproc\n");
return -ENOMEM;
@@ -574,14 +574,12 @@ static int wcnss_probe(struct platform_device *pdev)
mutex_init(&wcnss->iris_lock);
mmio = devm_platform_ioremap_resource_byname(pdev, "pmu");
- if (IS_ERR(mmio)) {
- ret = PTR_ERR(mmio);
- goto free_rproc;
- }
+ if (IS_ERR(mmio))
+ return PTR_ERR(mmio);
ret = wcnss_alloc_memory_region(wcnss);
if (ret)
- goto free_rproc;
+ return ret;
wcnss->pmu_cfg = mmio + data->pmu_offset;
wcnss->spare_out = mmio + data->spare_offset;
@@ -592,7 +590,7 @@ static int wcnss_probe(struct platform_device *pdev)
*/
ret = wcnss_init_pds(wcnss, data->pd_names);
if (ret && (ret != -ENODATA || !data->num_pd_vregs))
- goto free_rproc;
+ return ret;
ret = wcnss_init_regulators(wcnss, data->vregs, data->num_vregs,
data->num_pd_vregs);
@@ -656,8 +654,6 @@ remove_iris:
qcom_iris_remove(wcnss->iris);
detach_pds:
wcnss_release_pds(wcnss);
-free_rproc:
- rproc_free(rproc);
return ret;
}
@@ -673,7 +669,6 @@ static void wcnss_remove(struct platform_device *pdev)
qcom_remove_sysmon_subdev(wcnss->sysmon);
qcom_remove_smd_subdev(wcnss->rproc, &wcnss->smd_subdev);
wcnss_release_pds(wcnss);
- rproc_free(wcnss->rproc);
}
static const struct of_device_id wcnss_of_match[] = {
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 695cce218e8c6..f276956f2c5ce 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -33,6 +33,7 @@
#include <linux/idr.h>
#include <linux/elf.h>
#include <linux/crc32.h>
+#include <linux/of_platform.h>
#include <linux/of_reserved_mem.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_ring.h>
@@ -2112,6 +2113,7 @@ EXPORT_SYMBOL(rproc_detach);
struct rproc *rproc_get_by_phandle(phandle phandle)
{
struct rproc *rproc = NULL, *r;
+ struct device_driver *driver;
struct device_node *np;
np = of_find_node_by_phandle(phandle);
@@ -2122,7 +2124,26 @@ struct rproc *rproc_get_by_phandle(phandle phandle)
list_for_each_entry_rcu(r, &rproc_list, node) {
if (r->dev.parent && device_match_of_node(r->dev.parent, np)) {
/* prevent underlying implementation from being removed */
- if (!try_module_get(r->dev.parent->driver->owner)) {
+
+ /*
+ * If the remoteproc's parent has a driver, the
+ * remoteproc is not part of a cluster and we can use
+ * that driver.
+ */
+ driver = r->dev.parent->driver;
+
+ /*
+ * If the remoteproc's parent does not have a driver,
+ * look for the driver associated with the cluster.
+ */
+ if (!driver) {
+ if (r->dev.parent->parent)
+ driver = r->dev.parent->parent->driver;
+ if (!driver)
+ break;
+ }
+
+ if (!try_module_get(driver->owner)) {
dev_err(&r->dev, "can't get owner\n");
break;
}
@@ -2533,7 +2554,11 @@ EXPORT_SYMBOL(rproc_free);
*/
void rproc_put(struct rproc *rproc)
{
- module_put(rproc->dev.parent->driver->owner);
+ if (rproc->dev.parent->driver)
+ module_put(rproc->dev.parent->driver->owner);
+ else
+ module_put(rproc->dev.parent->parent->driver->owner);
+
put_device(&rproc->dev);
}
EXPORT_SYMBOL(rproc_put);
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 83d76915a6ad6..25b66b113b695 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -351,6 +351,9 @@ static void rproc_virtio_dev_release(struct device *dev)
kfree(vdev);
+ of_reserved_mem_device_release(&rvdev->pdev->dev);
+ dma_release_coherent_memory(&rvdev->pdev->dev);
+
put_device(&rvdev->pdev->dev);
}
@@ -584,9 +587,6 @@ static void rproc_virtio_remove(struct platform_device *pdev)
rproc_remove_subdev(rproc, &rvdev->subdev);
rproc_remove_rvdev(rvdev);
- of_reserved_mem_device_release(&pdev->dev);
- dma_release_coherent_memory(&pdev->dev);
-
put_device(&rproc->dev);
}
diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c
index cb163766c56d5..1340be9d01101 100644
--- a/drivers/remoteproc/st_remoteproc.c
+++ b/drivers/remoteproc/st_remoteproc.c
@@ -347,23 +347,21 @@ static int st_rproc_probe(struct platform_device *pdev)
int enabled;
int ret, i;
- rproc = rproc_alloc(dev, np->name, &st_rproc_ops, NULL, sizeof(*ddata));
+ rproc = devm_rproc_alloc(dev, np->name, &st_rproc_ops, NULL, sizeof(*ddata));
if (!rproc)
return -ENOMEM;
rproc->has_iommu = false;
ddata = rproc->priv;
ddata->config = (struct st_rproc_config *)device_get_match_data(dev);
- if (!ddata->config) {
- ret = -ENODEV;
- goto free_rproc;
- }
+ if (!ddata->config)
+ return -ENODEV;
platform_set_drvdata(pdev, rproc);
ret = st_rproc_parse_dt(pdev);
if (ret)
- goto free_rproc;
+ return ret;
enabled = st_rproc_state(pdev);
if (enabled < 0) {
@@ -439,8 +437,7 @@ free_mbox:
mbox_free_channel(ddata->mbox_chan[i]);
free_clk:
clk_unprepare(ddata->clk);
-free_rproc:
- rproc_free(rproc);
+
return ret;
}
@@ -456,8 +453,6 @@ static void st_rproc_remove(struct platform_device *pdev)
for (i = 0; i < ST_RPROC_MAX_VRING * MBOX_MAX; i++)
mbox_free_channel(ddata->mbox_chan[i]);
-
- rproc_free(rproc);
}
static struct platform_driver st_rproc_driver = {
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index 4f469f0bcf8b2..88623df7d0c35 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -120,7 +120,7 @@ static int stm32_rproc_mem_alloc(struct rproc *rproc,
void *va;
dev_dbg(dev, "map memory: %pad+%zx\n", &mem->dma, mem->len);
- va = ioremap_wc(mem->dma, mem->len);
+ va = (__force void *)ioremap_wc(mem->dma, mem->len);
if (IS_ERR_OR_NULL(va)) {
dev_err(dev, "Unable to map memory region: %pad+0x%zx\n",
&mem->dma, mem->len);
@@ -137,7 +137,7 @@ static int stm32_rproc_mem_release(struct rproc *rproc,
struct rproc_mem_entry *mem)
{
dev_dbg(rproc->dev.parent, "unmap memory: %pa\n", &mem->dma);
- iounmap(mem->va);
+ iounmap((__force __iomem void *)mem->va);
return 0;
}
@@ -657,7 +657,7 @@ done:
* entire area by overwriting it with the initial values stored in rproc->clean_table.
*/
*table_sz = RSC_TBL_SIZE;
- return (struct resource_table *)ddata->rsc_va;
+ return (__force struct resource_table *)ddata->rsc_va;
}
static const struct rproc_ops st_rproc_ops = {
@@ -843,7 +843,7 @@ static int stm32_rproc_probe(struct platform_device *pdev)
if (ret)
return ret;
- rproc = rproc_alloc(dev, np->name, &st_rproc_ops, NULL, sizeof(*ddata));
+ rproc = devm_rproc_alloc(dev, np->name, &st_rproc_ops, NULL, sizeof(*ddata));
if (!rproc)
return -ENOMEM;
@@ -897,7 +897,6 @@ free_rproc:
dev_pm_clear_wake_irq(dev);
device_init_wakeup(dev, false);
}
- rproc_free(rproc);
return ret;
}
@@ -918,7 +917,6 @@ static void stm32_rproc_remove(struct platform_device *pdev)
dev_pm_clear_wake_irq(dev);
device_init_wakeup(dev, false);
}
- rproc_free(rproc);
}
static int stm32_rproc_suspend(struct device *dev)
diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
index ab882e3b7130b..3555b535b1683 100644
--- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
@@ -550,6 +550,13 @@ static int k3_dsp_rproc_of_get_memories(struct platform_device *pdev,
return 0;
}
+static void k3_dsp_mem_release(void *data)
+{
+ struct device *dev = data;
+
+ of_reserved_mem_device_release(dev);
+}
+
static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc)
{
struct device *dev = kproc->dev;
@@ -579,27 +586,25 @@ static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc)
ERR_PTR(ret));
return ret;
}
+ ret = devm_add_action_or_reset(dev, k3_dsp_mem_release, dev);
+ if (ret)
+ return ret;
num_rmems--;
- kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
- if (!kproc->rmem) {
- ret = -ENOMEM;
- goto release_rmem;
- }
+ kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
+ if (!kproc->rmem)
+ return -ENOMEM;
/* use remaining reserved memory regions for static carveouts */
for (i = 0; i < num_rmems; i++) {
rmem_np = of_parse_phandle(np, "memory-region", i + 1);
- if (!rmem_np) {
- ret = -EINVAL;
- goto unmap_rmem;
- }
+ if (!rmem_np)
+ return -EINVAL;
rmem = of_reserved_mem_lookup(rmem_np);
if (!rmem) {
of_node_put(rmem_np);
- ret = -EINVAL;
- goto unmap_rmem;
+ return -EINVAL;
}
of_node_put(rmem_np);
@@ -607,12 +612,11 @@ static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc)
/* 64-bit address regions currently not supported */
kproc->rmem[i].dev_addr = (u32)rmem->base;
kproc->rmem[i].size = rmem->size;
- kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size);
+ kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
if (!kproc->rmem[i].cpu_addr) {
dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
i + 1, &rmem->base, &rmem->size);
- ret = -ENOMEM;
- goto unmap_rmem;
+ return -ENOMEM;
}
dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
@@ -623,25 +627,13 @@ static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc)
kproc->num_rmems = num_rmems;
return 0;
-
-unmap_rmem:
- for (i--; i >= 0; i--)
- iounmap(kproc->rmem[i].cpu_addr);
- kfree(kproc->rmem);
-release_rmem:
- of_reserved_mem_device_release(kproc->dev);
- return ret;
}
-static void k3_dsp_reserved_mem_exit(struct k3_dsp_rproc *kproc)
+static void k3_dsp_release_tsp(void *data)
{
- int i;
+ struct ti_sci_proc *tsp = data;
- for (i = 0; i < kproc->num_rmems; i++)
- iounmap(kproc->rmem[i].cpu_addr);
- kfree(kproc->rmem);
-
- of_reserved_mem_device_release(kproc->dev);
+ ti_sci_proc_release(tsp);
}
static
@@ -657,7 +649,7 @@ struct ti_sci_proc *k3_dsp_rproc_of_get_tsp(struct device *dev,
if (ret < 0)
return ERR_PTR(ret);
- tsp = kzalloc(sizeof(*tsp), GFP_KERNEL);
+ tsp = devm_kzalloc(dev, sizeof(*tsp), GFP_KERNEL);
if (!tsp)
return ERR_PTR(-ENOMEM);
@@ -680,7 +672,6 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
const char *fw_name;
bool p_state = false;
int ret = 0;
- int ret1;
data = of_device_get_match_data(dev);
if (!data)
@@ -690,8 +681,8 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "failed to parse firmware-name property\n");
- rproc = rproc_alloc(dev, dev_name(dev), &k3_dsp_rproc_ops, fw_name,
- sizeof(*kproc));
+ rproc = devm_rproc_alloc(dev, dev_name(dev), &k3_dsp_rproc_ops,
+ fw_name, sizeof(*kproc));
if (!rproc)
return -ENOMEM;
@@ -706,56 +697,46 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
kproc->dev = dev;
kproc->data = data;
- kproc->ti_sci = ti_sci_get_by_phandle(np, "ti,sci");
- if (IS_ERR(kproc->ti_sci)) {
- ret = dev_err_probe(dev, PTR_ERR(kproc->ti_sci),
- "failed to get ti-sci handle\n");
- kproc->ti_sci = NULL;
- goto free_rproc;
- }
+ kproc->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
+ if (IS_ERR(kproc->ti_sci))
+ return dev_err_probe(dev, PTR_ERR(kproc->ti_sci),
+ "failed to get ti-sci handle\n");
ret = of_property_read_u32(np, "ti,sci-dev-id", &kproc->ti_sci_id);
- if (ret) {
- dev_err_probe(dev, ret, "missing 'ti,sci-dev-id' property\n");
- goto put_sci;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "missing 'ti,sci-dev-id' property\n");
kproc->reset = devm_reset_control_get_exclusive(dev, NULL);
- if (IS_ERR(kproc->reset)) {
- ret = dev_err_probe(dev, PTR_ERR(kproc->reset),
- "failed to get reset\n");
- goto put_sci;
- }
+ if (IS_ERR(kproc->reset))
+ return dev_err_probe(dev, PTR_ERR(kproc->reset),
+ "failed to get reset\n");
kproc->tsp = k3_dsp_rproc_of_get_tsp(dev, kproc->ti_sci);
- if (IS_ERR(kproc->tsp)) {
- ret = dev_err_probe(dev, PTR_ERR(kproc->tsp),
- "failed to construct ti-sci proc control\n");
- goto put_sci;
- }
+ if (IS_ERR(kproc->tsp))
+ return dev_err_probe(dev, PTR_ERR(kproc->tsp),
+ "failed to construct ti-sci proc control\n");
ret = ti_sci_proc_request(kproc->tsp);
if (ret < 0) {
dev_err_probe(dev, ret, "ti_sci_proc_request failed\n");
- goto free_tsp;
+ return ret;
}
+ ret = devm_add_action_or_reset(dev, k3_dsp_release_tsp, kproc->tsp);
+ if (ret)
+ return ret;
ret = k3_dsp_rproc_of_get_memories(pdev, kproc);
if (ret)
- goto release_tsp;
+ return ret;
ret = k3_dsp_reserved_mem_init(kproc);
- if (ret) {
- dev_err_probe(dev, ret, "reserved memory init failed\n");
- goto release_tsp;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "reserved memory init failed\n");
ret = kproc->ti_sci->ops.dev_ops.is_on(kproc->ti_sci, kproc->ti_sci_id,
NULL, &p_state);
- if (ret) {
- dev_err_probe(dev, ret, "failed to get initial state, mode cannot be determined\n");
- goto release_mem;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get initial state, mode cannot be determined\n");
/* configure J721E devices for either remoteproc or IPC-only mode */
if (p_state) {
@@ -779,8 +760,7 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
if (data->uses_lreset) {
ret = reset_control_status(kproc->reset);
if (ret < 0) {
- dev_err_probe(dev, ret, "failed to get reset status\n");
- goto release_mem;
+ return dev_err_probe(dev, ret, "failed to get reset status\n");
} else if (ret == 0) {
dev_warn(dev, "local reset is deasserted for device\n");
k3_dsp_rproc_reset(kproc);
@@ -788,31 +768,13 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
}
}
- ret = rproc_add(rproc);
- if (ret) {
- dev_err_probe(dev, ret, "failed to add register device with remoteproc core\n");
- goto release_mem;
- }
+ ret = devm_rproc_add(dev, rproc);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add register device with remoteproc core\n");
platform_set_drvdata(pdev, kproc);
return 0;
-
-release_mem:
- k3_dsp_reserved_mem_exit(kproc);
-release_tsp:
- ret1 = ti_sci_proc_release(kproc->tsp);
- if (ret1)
- dev_err(dev, "failed to release proc (%pe)\n", ERR_PTR(ret1));
-free_tsp:
- kfree(kproc->tsp);
-put_sci:
- ret1 = ti_sci_put_handle(kproc->ti_sci);
- if (ret1)
- dev_err(dev, "failed to put ti_sci handle (%pe)\n", ERR_PTR(ret1));
-free_rproc:
- rproc_free(rproc);
- return ret;
}
static void k3_dsp_rproc_remove(struct platform_device *pdev)
@@ -824,27 +786,9 @@ static void k3_dsp_rproc_remove(struct platform_device *pdev)
if (rproc->state == RPROC_ATTACHED) {
ret = rproc_detach(rproc);
- if (ret) {
- /* Note this error path leaks resources */
+ if (ret)
dev_err(dev, "failed to detach proc (%pe)\n", ERR_PTR(ret));
- return;
- }
}
-
- rproc_del(kproc->rproc);
-
- ret = ti_sci_proc_release(kproc->tsp);
- if (ret)
- dev_err(dev, "failed to release proc (%pe)\n", ERR_PTR(ret));
-
- kfree(kproc->tsp);
-
- ret = ti_sci_put_handle(kproc->ti_sci);
- if (ret)
- dev_err(dev, "failed to put ti_sci handle (%pe)\n", ERR_PTR(ret));
-
- k3_dsp_reserved_mem_exit(kproc);
- rproc_free(kproc->rproc);
}
static const struct k3_dsp_mem_data c66_mems[] = {
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index ccd59ddd76100..85b27c42cf65b 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -66,6 +66,15 @@ config RESET_BRCMSTB_RESCAL
This enables the RESCAL reset controller for SATA, PCIe0, or PCIe1 on
BCM7216.
+config RESET_GPIO
+ tristate "GPIO reset controller"
+ help
+ This enables a generic reset controller for resets attached via
+ GPIOs. Typically for OF platforms this driver expects "reset-gpios"
+ property.
+
+ If compiled as module, it will be called reset-gpio.
+
config RESET_HSDK
bool "Synopsys HSDK Reset Driver"
depends on HAS_IOMEM
@@ -213,7 +222,7 @@ config RESET_SCMI
config RESET_SIMPLE
bool "Simple Reset Controller Driver" if COMPILE_TEST || EXPERT
- default ARCH_ASPEED || ARCH_BCMBCA || ARCH_BITMAIN || ARCH_REALTEK || ARCH_STM32 || (ARCH_INTEL_SOCFPGA && ARM64) || ARCH_SUNXI || ARC
+ default ARCH_ASPEED || ARCH_BCMBCA || ARCH_BITMAIN || ARCH_REALTEK || ARCH_SOPHGO || ARCH_STM32 || (ARCH_INTEL_SOCFPGA && ARM64) || ARCH_SUNXI || ARC
depends on HAS_IOMEM
help
This enables a simple reset controller driver for reset lines that
@@ -228,6 +237,7 @@ config RESET_SIMPLE
- RCC reset controller in STM32 MCUs
- Allwinner SoCs
- SiFive FU740 SoCs
+ - Sophgo SoCs
config RESET_SOCFPGA
bool "SoCFPGA Reset Driver" if COMPILE_TEST && (!ARM || !ARCH_INTEL_SOCFPGA)
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 8270da8a4baa6..fd8b49fa46fc8 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_RESET_BCM6345) += reset-bcm6345.o
obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o
obj-$(CONFIG_RESET_BRCMSTB) += reset-brcmstb.o
obj-$(CONFIG_RESET_BRCMSTB_RESCAL) += reset-brcmstb-rescal.o
+obj-$(CONFIG_RESET_GPIO) += reset-gpio.o
obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o
obj-$(CONFIG_RESET_IMX7) += reset-imx7.o
obj-$(CONFIG_RESET_INTEL_GW) += reset-intel-gw.o
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index 4d5a78d3c085b..dba74e857be62 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -5,14 +5,19 @@
* Copyright 2013 Philipp Zabel, Pengutronix
*/
#include <linux/atomic.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/kref.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/machine.h>
+#include <linux/idr.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/acpi.h>
+#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
@@ -23,6 +28,11 @@ static LIST_HEAD(reset_controller_list);
static DEFINE_MUTEX(reset_lookup_mutex);
static LIST_HEAD(reset_lookup_list);
+/* Protects reset_gpio_lookup_list */
+static DEFINE_MUTEX(reset_gpio_lookup_mutex);
+static LIST_HEAD(reset_gpio_lookup_list);
+static DEFINE_IDA(reset_gpio_ida);
+
/**
* struct reset_control - a reset control
* @rcdev: a pointer to the reset controller device
@@ -63,6 +73,16 @@ struct reset_control_array {
struct reset_control *rstc[] __counted_by(num_rstcs);
};
+/**
+ * struct reset_gpio_lookup - lookup key for ad-hoc created reset-gpio devices
+ * @of_args: phandle to the reset controller with all the args like GPIO number
+ * @list: list entry for the reset_gpio_lookup_list
+ */
+struct reset_gpio_lookup {
+ struct of_phandle_args of_args;
+ struct list_head list;
+};
+
static const char *rcdev_name(struct reset_controller_dev *rcdev)
{
if (rcdev->dev)
@@ -71,6 +91,9 @@ static const char *rcdev_name(struct reset_controller_dev *rcdev)
if (rcdev->of_node)
return rcdev->of_node->full_name;
+ if (rcdev->of_args)
+ return rcdev->of_args->np->full_name;
+
return NULL;
}
@@ -99,6 +122,9 @@ static int of_reset_simple_xlate(struct reset_controller_dev *rcdev,
*/
int reset_controller_register(struct reset_controller_dev *rcdev)
{
+ if (rcdev->of_node && rcdev->of_args)
+ return -EINVAL;
+
if (!rcdev->of_xlate) {
rcdev->of_reset_n_cells = 1;
rcdev->of_xlate = of_reset_simple_xlate;
@@ -813,12 +839,171 @@ static void __reset_control_put_internal(struct reset_control *rstc)
kref_put(&rstc->refcnt, __reset_control_release);
}
+static int __reset_add_reset_gpio_lookup(int id, struct device_node *np,
+ unsigned int gpio,
+ unsigned int of_flags)
+{
+ const struct fwnode_handle *fwnode = of_fwnode_handle(np);
+ unsigned int lookup_flags;
+ const char *label_tmp;
+
+ /*
+ * Later we map GPIO flags between OF and Linux, however not all
+ * constants from include/dt-bindings/gpio/gpio.h and
+ * include/linux/gpio/machine.h match each other.
+ */
+ if (of_flags > GPIO_ACTIVE_LOW) {
+ pr_err("reset-gpio code does not support GPIO flags %u for GPIO %u\n",
+ of_flags, gpio);
+ return -EINVAL;
+ }
+
+ struct gpio_device *gdev __free(gpio_device_put) = gpio_device_find_by_fwnode(fwnode);
+ if (!gdev)
+ return -EPROBE_DEFER;
+
+ label_tmp = gpio_device_get_label(gdev);
+ if (!label_tmp)
+ return -EINVAL;
+
+ char *label __free(kfree) = kstrdup(label_tmp, GFP_KERNEL);
+ if (!label)
+ return -ENOMEM;
+
+ /* Size: one lookup entry plus sentinel */
+ struct gpiod_lookup_table *lookup __free(kfree) = kzalloc(struct_size(lookup, table, 2),
+ GFP_KERNEL);
+ if (!lookup)
+ return -ENOMEM;
+
+ lookup->dev_id = kasprintf(GFP_KERNEL, "reset-gpio.%d", id);
+ if (!lookup->dev_id)
+ return -ENOMEM;
+
+ lookup_flags = GPIO_PERSISTENT;
+ lookup_flags |= of_flags & GPIO_ACTIVE_LOW;
+ lookup->table[0] = GPIO_LOOKUP(no_free_ptr(label), gpio, "reset",
+ lookup_flags);
+
+ /* Not freed on success, because it is persisent subsystem data. */
+ gpiod_add_lookup_table(no_free_ptr(lookup));
+
+ return 0;
+}
+
+/*
+ * @args: phandle to the GPIO provider with all the args like GPIO number
+ */
+static int __reset_add_reset_gpio_device(const struct of_phandle_args *args)
+{
+ struct reset_gpio_lookup *rgpio_dev;
+ struct platform_device *pdev;
+ int id, ret;
+
+ /*
+ * Currently only #gpio-cells=2 is supported with the meaning of:
+ * args[0]: GPIO number
+ * args[1]: GPIO flags
+ * TODO: Handle other cases.
+ */
+ if (args->args_count != 2)
+ return -ENOENT;
+
+ /*
+ * Registering reset-gpio device might cause immediate
+ * bind, resulting in its probe() registering new reset controller thus
+ * taking reset_list_mutex lock via reset_controller_register().
+ */
+ lockdep_assert_not_held(&reset_list_mutex);
+
+ mutex_lock(&reset_gpio_lookup_mutex);
+
+ list_for_each_entry(rgpio_dev, &reset_gpio_lookup_list, list) {
+ if (args->np == rgpio_dev->of_args.np) {
+ if (of_phandle_args_equal(args, &rgpio_dev->of_args))
+ goto out; /* Already on the list, done */
+ }
+ }
+
+ id = ida_alloc(&reset_gpio_ida, GFP_KERNEL);
+ if (id < 0) {
+ ret = id;
+ goto err_unlock;
+ }
+
+ /* Not freed on success, because it is persisent subsystem data. */
+ rgpio_dev = kzalloc(sizeof(*rgpio_dev), GFP_KERNEL);
+ if (!rgpio_dev) {
+ ret = -ENOMEM;
+ goto err_ida_free;
+ }
+
+ ret = __reset_add_reset_gpio_lookup(id, args->np, args->args[0],
+ args->args[1]);
+ if (ret < 0)
+ goto err_kfree;
+
+ rgpio_dev->of_args = *args;
+ /*
+ * We keep the device_node reference, but of_args.np is put at the end
+ * of __of_reset_control_get(), so get it one more time.
+ * Hold reference as long as rgpio_dev memory is valid.
+ */
+ of_node_get(rgpio_dev->of_args.np);
+ pdev = platform_device_register_data(NULL, "reset-gpio", id,
+ &rgpio_dev->of_args,
+ sizeof(rgpio_dev->of_args));
+ ret = PTR_ERR_OR_ZERO(pdev);
+ if (ret)
+ goto err_put;
+
+ list_add(&rgpio_dev->list, &reset_gpio_lookup_list);
+
+out:
+ mutex_unlock(&reset_gpio_lookup_mutex);
+
+ return 0;
+
+err_put:
+ of_node_put(rgpio_dev->of_args.np);
+err_kfree:
+ kfree(rgpio_dev);
+err_ida_free:
+ ida_free(&reset_gpio_ida, id);
+err_unlock:
+ mutex_unlock(&reset_gpio_lookup_mutex);
+
+ return ret;
+}
+
+static struct reset_controller_dev *__reset_find_rcdev(const struct of_phandle_args *args,
+ bool gpio_fallback)
+{
+ struct reset_controller_dev *rcdev;
+
+ lockdep_assert_held(&reset_list_mutex);
+
+ list_for_each_entry(rcdev, &reset_controller_list, list) {
+ if (gpio_fallback) {
+ if (rcdev->of_args && of_phandle_args_equal(args,
+ rcdev->of_args))
+ return rcdev;
+ } else {
+ if (args->np == rcdev->of_node)
+ return rcdev;
+ }
+ }
+
+ return NULL;
+}
+
struct reset_control *
__of_reset_control_get(struct device_node *node, const char *id, int index,
bool shared, bool optional, bool acquired)
{
+ bool gpio_fallback = false;
struct reset_control *rstc;
- struct reset_controller_dev *r, *rcdev;
+ struct reset_controller_dev *rcdev;
struct of_phandle_args args;
int rstc_id;
int ret;
@@ -839,39 +1024,52 @@ __of_reset_control_get(struct device_node *node, const char *id, int index,
index, &args);
if (ret == -EINVAL)
return ERR_PTR(ret);
- if (ret)
- return optional ? NULL : ERR_PTR(ret);
+ if (ret) {
+ if (!IS_ENABLED(CONFIG_RESET_GPIO))
+ return optional ? NULL : ERR_PTR(ret);
- mutex_lock(&reset_list_mutex);
- rcdev = NULL;
- list_for_each_entry(r, &reset_controller_list, list) {
- if (args.np == r->of_node) {
- rcdev = r;
- break;
+ /*
+ * There can be only one reset-gpio for regular devices, so
+ * don't bother with the "reset-gpios" phandle index.
+ */
+ ret = of_parse_phandle_with_args(node, "reset-gpios", "#gpio-cells",
+ 0, &args);
+ if (ret)
+ return optional ? NULL : ERR_PTR(ret);
+
+ gpio_fallback = true;
+
+ ret = __reset_add_reset_gpio_device(&args);
+ if (ret) {
+ rstc = ERR_PTR(ret);
+ goto out_put;
}
}
+ mutex_lock(&reset_list_mutex);
+ rcdev = __reset_find_rcdev(&args, gpio_fallback);
if (!rcdev) {
rstc = ERR_PTR(-EPROBE_DEFER);
- goto out;
+ goto out_unlock;
}
if (WARN_ON(args.args_count != rcdev->of_reset_n_cells)) {
rstc = ERR_PTR(-EINVAL);
- goto out;
+ goto out_unlock;
}
rstc_id = rcdev->of_xlate(rcdev, &args);
if (rstc_id < 0) {
rstc = ERR_PTR(rstc_id);
- goto out;
+ goto out_unlock;
}
/* reset_list_mutex also protects the rcdev's reset_control list */
rstc = __reset_control_get_internal(rcdev, rstc_id, shared, acquired);
-out:
+out_unlock:
mutex_unlock(&reset_list_mutex);
+out_put:
of_node_put(args.np);
return rstc;
diff --git a/drivers/reset/reset-gpio.c b/drivers/reset/reset-gpio.c
new file mode 100644
index 0000000000000..2290b25b67035
--- /dev/null
+++ b/drivers/reset/reset-gpio.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+
+struct reset_gpio_priv {
+ struct reset_controller_dev rc;
+ struct gpio_desc *reset;
+};
+
+static inline struct reset_gpio_priv
+*rc_to_reset_gpio(struct reset_controller_dev *rc)
+{
+ return container_of(rc, struct reset_gpio_priv, rc);
+}
+
+static int reset_gpio_assert(struct reset_controller_dev *rc, unsigned long id)
+{
+ struct reset_gpio_priv *priv = rc_to_reset_gpio(rc);
+
+ gpiod_set_value_cansleep(priv->reset, 1);
+
+ return 0;
+}
+
+static int reset_gpio_deassert(struct reset_controller_dev *rc,
+ unsigned long id)
+{
+ struct reset_gpio_priv *priv = rc_to_reset_gpio(rc);
+
+ gpiod_set_value_cansleep(priv->reset, 0);
+
+ return 0;
+}
+
+static int reset_gpio_status(struct reset_controller_dev *rc, unsigned long id)
+{
+ struct reset_gpio_priv *priv = rc_to_reset_gpio(rc);
+
+ return gpiod_get_value_cansleep(priv->reset);
+}
+
+static const struct reset_control_ops reset_gpio_ops = {
+ .assert = reset_gpio_assert,
+ .deassert = reset_gpio_deassert,
+ .status = reset_gpio_status,
+};
+
+static int reset_gpio_of_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ return reset_spec->args[0];
+}
+
+static void reset_gpio_of_node_put(void *data)
+{
+ of_node_put(data);
+}
+
+static int reset_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct of_phandle_args *platdata = dev_get_platdata(dev);
+ struct reset_gpio_priv *priv;
+ int ret;
+
+ if (!platdata)
+ return -EINVAL;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, &priv->rc);
+
+ priv->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->reset))
+ return dev_err_probe(dev, PTR_ERR(priv->reset),
+ "Could not get reset gpios\n");
+
+ priv->rc.ops = &reset_gpio_ops;
+ priv->rc.owner = THIS_MODULE;
+ priv->rc.dev = dev;
+ priv->rc.of_args = platdata;
+ ret = devm_add_action_or_reset(dev, reset_gpio_of_node_put,
+ priv->rc.of_node);
+ if (ret)
+ return ret;
+
+ /* Cells to match GPIO specifier, but it's not really used */
+ priv->rc.of_reset_n_cells = 2;
+ priv->rc.of_xlate = reset_gpio_of_xlate;
+ priv->rc.nr_resets = 1;
+
+ return devm_reset_controller_register(dev, &priv->rc);
+}
+
+static const struct platform_device_id reset_gpio_ids[] = {
+ { .name = "reset-gpio", },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, reset_gpio_ids);
+
+static struct platform_driver reset_gpio_driver = {
+ .probe = reset_gpio_probe,
+ .id_table = reset_gpio_ids,
+ .driver = {
+ .name = "reset-gpio",
+ },
+};
+module_platform_driver(reset_gpio_driver);
+
+MODULE_AUTHOR("Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>");
+MODULE_DESCRIPTION("Generic GPIO reset driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/reset/reset-simple.c b/drivers/reset/reset-simple.c
index 818cabcc9fb75..2760678398308 100644
--- a/drivers/reset/reset-simple.c
+++ b/drivers/reset/reset-simple.c
@@ -151,6 +151,8 @@ static const struct of_device_id reset_simple_dt_ids[] = {
{ .compatible = "snps,dw-high-reset" },
{ .compatible = "snps,dw-low-reset",
.data = &reset_simple_active_low },
+ { .compatible = "sophgo,sg2042-reset",
+ .data = &reset_simple_active_low },
{ /* sentinel */ },
};
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index 09833ad05da75..1cb8d7474428a 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -399,8 +399,8 @@ static void rpmsg_eptdev_release_device(struct device *dev)
{
struct rpmsg_eptdev *eptdev = dev_to_eptdev(dev);
- ida_simple_remove(&rpmsg_ept_ida, dev->id);
- ida_simple_remove(&rpmsg_minor_ida, MINOR(eptdev->dev.devt));
+ ida_free(&rpmsg_ept_ida, dev->id);
+ ida_free(&rpmsg_minor_ida, MINOR(eptdev->dev.devt));
kfree(eptdev);
}
@@ -441,12 +441,12 @@ static int rpmsg_chrdev_eptdev_add(struct rpmsg_eptdev *eptdev, struct rpmsg_cha
eptdev->chinfo = chinfo;
- ret = ida_simple_get(&rpmsg_minor_ida, 0, RPMSG_DEV_MAX, GFP_KERNEL);
+ ret = ida_alloc_max(&rpmsg_minor_ida, RPMSG_DEV_MAX - 1, GFP_KERNEL);
if (ret < 0)
goto free_eptdev;
dev->devt = MKDEV(MAJOR(rpmsg_major), ret);
- ret = ida_simple_get(&rpmsg_ept_ida, 0, 0, GFP_KERNEL);
+ ret = ida_alloc(&rpmsg_ept_ida, GFP_KERNEL);
if (ret < 0)
goto free_minor_ida;
dev->id = ret;
@@ -462,9 +462,9 @@ static int rpmsg_chrdev_eptdev_add(struct rpmsg_eptdev *eptdev, struct rpmsg_cha
return ret;
free_ept_ida:
- ida_simple_remove(&rpmsg_ept_ida, dev->id);
+ ida_free(&rpmsg_ept_ida, dev->id);
free_minor_ida:
- ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
+ ida_free(&rpmsg_minor_ida, MINOR(dev->devt));
free_eptdev:
put_device(dev);
kfree(eptdev);
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index 8abc7d022ff71..4295c01a2861b 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -605,7 +605,7 @@ static void rpmsg_dev_remove(struct device *dev)
rpmsg_destroy_ept(rpdev->ept);
}
-static struct bus_type rpmsg_bus = {
+static const struct bus_type rpmsg_bus = {
.name = "rpmsg",
.match = rpmsg_dev_match,
.dev_groups = rpmsg_dev_groups,
diff --git a/drivers/rpmsg/rpmsg_ctrl.c b/drivers/rpmsg/rpmsg_ctrl.c
index 4332538356908..c312794ba4b3f 100644
--- a/drivers/rpmsg/rpmsg_ctrl.c
+++ b/drivers/rpmsg/rpmsg_ctrl.c
@@ -130,8 +130,8 @@ static void rpmsg_ctrldev_release_device(struct device *dev)
{
struct rpmsg_ctrldev *ctrldev = dev_to_ctrldev(dev);
- ida_simple_remove(&rpmsg_ctrl_ida, dev->id);
- ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
+ ida_free(&rpmsg_ctrl_ida, dev->id);
+ ida_free(&rpmsg_minor_ida, MINOR(dev->devt));
kfree(ctrldev);
}
@@ -156,12 +156,12 @@ static int rpmsg_ctrldev_probe(struct rpmsg_device *rpdev)
cdev_init(&ctrldev->cdev, &rpmsg_ctrldev_fops);
ctrldev->cdev.owner = THIS_MODULE;
- ret = ida_simple_get(&rpmsg_minor_ida, 0, RPMSG_DEV_MAX, GFP_KERNEL);
+ ret = ida_alloc_max(&rpmsg_minor_ida, RPMSG_DEV_MAX - 1, GFP_KERNEL);
if (ret < 0)
goto free_ctrldev;
dev->devt = MKDEV(MAJOR(rpmsg_major), ret);
- ret = ida_simple_get(&rpmsg_ctrl_ida, 0, 0, GFP_KERNEL);
+ ret = ida_alloc(&rpmsg_ctrl_ida, GFP_KERNEL);
if (ret < 0)
goto free_minor_ida;
dev->id = ret;
@@ -179,9 +179,9 @@ static int rpmsg_ctrldev_probe(struct rpmsg_device *rpdev)
return ret;
free_ctrl_ida:
- ida_simple_remove(&rpmsg_ctrl_ida, dev->id);
+ ida_free(&rpmsg_ctrl_ida, dev->id);
free_minor_ida:
- ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
+ ida_free(&rpmsg_minor_ida, MINOR(dev->devt));
free_ctrldev:
put_device(dev);
kfree(ctrldev);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index e37a4341f442d..c63e32d012f23 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1858,7 +1858,8 @@ config RTC_DRV_MT2712
config RTC_DRV_MT6397
tristate "MediaTek PMIC based RTC"
- depends on MFD_MT6397 || (COMPILE_TEST && IRQ_DOMAIN)
+ depends on MFD_MT6397 || COMPILE_TEST
+ select IRQ_DOMAIN
help
This selects the MediaTek(R) RTC driver. RTC is part of MediaTek
MT6397 PMIC. You should enable MT6397 PMIC MFD before select
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 921ee18279743..e31fa0ad127e9 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -21,7 +21,6 @@
#include "rtc-core.h"
static DEFINE_IDA(rtc_ida);
-struct class *rtc_class;
static void rtc_device_release(struct device *dev)
{
@@ -199,6 +198,11 @@ static SIMPLE_DEV_PM_OPS(rtc_class_dev_pm_ops, rtc_suspend, rtc_resume);
#define RTC_CLASS_DEV_PM_OPS NULL
#endif
+const struct class rtc_class = {
+ .name = "rtc",
+ .pm = RTC_CLASS_DEV_PM_OPS,
+};
+
/* Ensure the caller will set the id before releasing the device */
static struct rtc_device *rtc_allocate_device(void)
{
@@ -220,7 +224,7 @@ static struct rtc_device *rtc_allocate_device(void)
rtc->irq_freq = 1;
rtc->max_user_freq = 64;
- rtc->dev.class = rtc_class;
+ rtc->dev.class = &rtc_class;
rtc->dev.groups = rtc_get_dev_attribute_groups();
rtc->dev.release = rtc_device_release;
@@ -475,13 +479,14 @@ EXPORT_SYMBOL_GPL(devm_rtc_device_register);
static int __init rtc_init(void)
{
- rtc_class = class_create("rtc");
- if (IS_ERR(rtc_class)) {
- pr_err("couldn't create class\n");
- return PTR_ERR(rtc_class);
- }
- rtc_class->pm = RTC_CLASS_DEV_PM_OPS;
+ int err;
+
+ err = class_register(&rtc_class);
+ if (err)
+ return err;
+
rtc_dev_init();
+
return 0;
}
subsys_initcall(rtc_init);
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 1b63111cdda2e..5faafb4aa55cc 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -696,7 +696,7 @@ struct rtc_device *rtc_class_open(const char *name)
struct device *dev;
struct rtc_device *rtc = NULL;
- dev = class_find_device_by_name(rtc_class, name);
+ dev = class_find_device_by_name(&rtc_class, name);
if (dev)
rtc = to_rtc_device(dev);
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 1109cad838384..8b087d9556bee 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -22,26 +22,24 @@
#include <linux/io.h>
#include <linux/module.h>
-enum ds1511reg {
- DS1511_SEC = 0x0,
- DS1511_MIN = 0x1,
- DS1511_HOUR = 0x2,
- DS1511_DOW = 0x3,
- DS1511_DOM = 0x4,
- DS1511_MONTH = 0x5,
- DS1511_YEAR = 0x6,
- DS1511_CENTURY = 0x7,
- DS1511_AM1_SEC = 0x8,
- DS1511_AM2_MIN = 0x9,
- DS1511_AM3_HOUR = 0xa,
- DS1511_AM4_DATE = 0xb,
- DS1511_WD_MSEC = 0xc,
- DS1511_WD_SEC = 0xd,
- DS1511_CONTROL_A = 0xe,
- DS1511_CONTROL_B = 0xf,
- DS1511_RAMADDR_LSB = 0x10,
- DS1511_RAMDATA = 0x13
-};
+#define DS1511_SEC 0x0
+#define DS1511_MIN 0x1
+#define DS1511_HOUR 0x2
+#define DS1511_DOW 0x3
+#define DS1511_DOM 0x4
+#define DS1511_MONTH 0x5
+#define DS1511_YEAR 0x6
+#define DS1511_CENTURY 0x7
+#define DS1511_AM1_SEC 0x8
+#define DS1511_AM2_MIN 0x9
+#define DS1511_AM3_HOUR 0xa
+#define DS1511_AM4_DATE 0xb
+#define DS1511_WD_MSEC 0xc
+#define DS1511_WD_SEC 0xd
+#define DS1511_CONTROL_A 0xe
+#define DS1511_CONTROL_B 0xf
+#define DS1511_RAMADDR_LSB 0x10
+#define DS1511_RAMDATA 0x13
#define DS1511_BLF1 0x80
#define DS1511_BLF2 0x40
@@ -61,35 +59,10 @@ enum ds1511reg {
#define DS1511_WDS 0x01
#define DS1511_RAM_MAX 0x100
-#define RTC_CMD DS1511_CONTROL_B
-#define RTC_CMD1 DS1511_CONTROL_A
-
-#define RTC_ALARM_SEC DS1511_AM1_SEC
-#define RTC_ALARM_MIN DS1511_AM2_MIN
-#define RTC_ALARM_HOUR DS1511_AM3_HOUR
-#define RTC_ALARM_DATE DS1511_AM4_DATE
-
-#define RTC_SEC DS1511_SEC
-#define RTC_MIN DS1511_MIN
-#define RTC_HOUR DS1511_HOUR
-#define RTC_DOW DS1511_DOW
-#define RTC_DOM DS1511_DOM
-#define RTC_MON DS1511_MONTH
-#define RTC_YEAR DS1511_YEAR
-#define RTC_CENTURY DS1511_CENTURY
-
-#define RTC_TIE DS1511_TIE
-#define RTC_TE DS1511_TE
-
-struct rtc_plat_data {
+struct ds1511_data {
struct rtc_device *rtc;
void __iomem *ioaddr; /* virtual base address */
int irq;
- unsigned int irqen;
- int alrm_sec;
- int alrm_min;
- int alrm_hour;
- int alrm_mday;
spinlock_t lock;
};
@@ -98,95 +71,33 @@ static DEFINE_SPINLOCK(ds1511_lock);
static __iomem char *ds1511_base;
static u32 reg_spacing = 1;
-static noinline void
-rtc_write(uint8_t val, uint32_t reg)
+static void rtc_write(uint8_t val, uint32_t reg)
{
writeb(val, ds1511_base + (reg * reg_spacing));
}
-static noinline uint8_t
-rtc_read(enum ds1511reg reg)
+static uint8_t rtc_read(uint32_t reg)
{
return readb(ds1511_base + (reg * reg_spacing));
}
-static inline void
-rtc_disable_update(void)
+static void rtc_disable_update(void)
{
- rtc_write((rtc_read(RTC_CMD) & ~RTC_TE), RTC_CMD);
+ rtc_write((rtc_read(DS1511_CONTROL_B) & ~DS1511_TE), DS1511_CONTROL_B);
}
-static void
-rtc_enable_update(void)
+static void rtc_enable_update(void)
{
- rtc_write((rtc_read(RTC_CMD) | RTC_TE), RTC_CMD);
-}
-
-/*
- * #define DS1511_WDOG_RESET_SUPPORT
- *
- * Uncomment this if you want to use these routines in
- * some platform code.
- */
-#ifdef DS1511_WDOG_RESET_SUPPORT
-/*
- * just enough code to set the watchdog timer so that it
- * will reboot the system
- */
-void
-ds1511_wdog_set(unsigned long deciseconds)
-{
- /*
- * the wdog timer can take 99.99 seconds
- */
- deciseconds %= 10000;
- /*
- * set the wdog values in the wdog registers
- */
- rtc_write(bin2bcd(deciseconds % 100), DS1511_WD_MSEC);
- rtc_write(bin2bcd(deciseconds / 100), DS1511_WD_SEC);
- /*
- * set wdog enable and wdog 'steering' bit to issue a reset
- */
- rtc_write(rtc_read(RTC_CMD) | DS1511_WDE | DS1511_WDS, RTC_CMD);
-}
-
-void
-ds1511_wdog_disable(void)
-{
- /*
- * clear wdog enable and wdog 'steering' bits
- */
- rtc_write(rtc_read(RTC_CMD) & ~(DS1511_WDE | DS1511_WDS), RTC_CMD);
- /*
- * clear the wdog counter
- */
- rtc_write(0, DS1511_WD_MSEC);
- rtc_write(0, DS1511_WD_SEC);
+ rtc_write((rtc_read(DS1511_CONTROL_B) | DS1511_TE), DS1511_CONTROL_B);
}
-#endif
-/*
- * set the rtc chip's idea of the time.
- * stupidly, some callers call with year unmolested;
- * and some call with year = year - 1900. thanks.
- */
static int ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm)
{
u8 mon, day, dow, hrs, min, sec, yrs, cen;
unsigned long flags;
- /*
- * won't have to change this for a while
- */
- if (rtc_tm->tm_year < 1900)
- rtc_tm->tm_year += 1900;
-
- if (rtc_tm->tm_year < 1970)
- return -EINVAL;
-
yrs = rtc_tm->tm_year % 100;
- cen = rtc_tm->tm_year / 100;
+ cen = 19 + rtc_tm->tm_year / 100;
mon = rtc_tm->tm_mon + 1; /* tm_mon starts at zero */
day = rtc_tm->tm_mday;
dow = rtc_tm->tm_wday & 0x7; /* automatic BCD */
@@ -194,15 +105,6 @@ static int ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm)
min = rtc_tm->tm_min;
sec = rtc_tm->tm_sec;
- if ((mon > 12) || (day == 0))
- return -EINVAL;
-
- if (day > rtc_month_days(rtc_tm->tm_mon, rtc_tm->tm_year))
- return -EINVAL;
-
- if ((hrs >= 24) || (min >= 60) || (sec >= 60))
- return -EINVAL;
-
/*
* each register is a different number of valid bits
*/
@@ -216,14 +118,14 @@ static int ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm)
spin_lock_irqsave(&ds1511_lock, flags);
rtc_disable_update();
- rtc_write(cen, RTC_CENTURY);
- rtc_write(yrs, RTC_YEAR);
- rtc_write((rtc_read(RTC_MON) & 0xe0) | mon, RTC_MON);
- rtc_write(day, RTC_DOM);
- rtc_write(hrs, RTC_HOUR);
- rtc_write(min, RTC_MIN);
- rtc_write(sec, RTC_SEC);
- rtc_write(dow, RTC_DOW);
+ rtc_write(cen, DS1511_CENTURY);
+ rtc_write(yrs, DS1511_YEAR);
+ rtc_write((rtc_read(DS1511_MONTH) & 0xe0) | mon, DS1511_MONTH);
+ rtc_write(day, DS1511_DOM);
+ rtc_write(hrs, DS1511_HOUR);
+ rtc_write(min, DS1511_MIN);
+ rtc_write(sec, DS1511_SEC);
+ rtc_write(dow, DS1511_DOW);
rtc_enable_update();
spin_unlock_irqrestore(&ds1511_lock, flags);
@@ -238,14 +140,14 @@ static int ds1511_rtc_read_time(struct device *dev, struct rtc_time *rtc_tm)
spin_lock_irqsave(&ds1511_lock, flags);
rtc_disable_update();
- rtc_tm->tm_sec = rtc_read(RTC_SEC) & 0x7f;
- rtc_tm->tm_min = rtc_read(RTC_MIN) & 0x7f;
- rtc_tm->tm_hour = rtc_read(RTC_HOUR) & 0x3f;
- rtc_tm->tm_mday = rtc_read(RTC_DOM) & 0x3f;
- rtc_tm->tm_wday = rtc_read(RTC_DOW) & 0x7;
- rtc_tm->tm_mon = rtc_read(RTC_MON) & 0x1f;
- rtc_tm->tm_year = rtc_read(RTC_YEAR) & 0x7f;
- century = rtc_read(RTC_CENTURY);
+ rtc_tm->tm_sec = rtc_read(DS1511_SEC) & 0x7f;
+ rtc_tm->tm_min = rtc_read(DS1511_MIN) & 0x7f;
+ rtc_tm->tm_hour = rtc_read(DS1511_HOUR) & 0x3f;
+ rtc_tm->tm_mday = rtc_read(DS1511_DOM) & 0x3f;
+ rtc_tm->tm_wday = rtc_read(DS1511_DOW) & 0x7;
+ rtc_tm->tm_mon = rtc_read(DS1511_MONTH) & 0x1f;
+ rtc_tm->tm_year = rtc_read(DS1511_YEAR) & 0x7f;
+ century = rtc_read(DS1511_CENTURY);
rtc_enable_update();
spin_unlock_irqrestore(&ds1511_lock, flags);
@@ -271,106 +173,67 @@ static int ds1511_rtc_read_time(struct device *dev, struct rtc_time *rtc_tm)
return 0;
}
-/*
- * write the alarm register settings
- *
- * we only have the use to interrupt every second, otherwise
- * known as the update interrupt, or the interrupt if the whole
- * date/hours/mins/secs matches. the ds1511 has many more
- * permutations, but the kernel doesn't.
- */
-static void
-ds1511_rtc_update_alarm(struct rtc_plat_data *pdata)
+static void ds1511_rtc_alarm_enable(unsigned int enabled)
{
- unsigned long flags;
-
- spin_lock_irqsave(&pdata->lock, flags);
- rtc_write(pdata->alrm_mday < 0 || (pdata->irqen & RTC_UF) ?
- 0x80 : bin2bcd(pdata->alrm_mday) & 0x3f,
- RTC_ALARM_DATE);
- rtc_write(pdata->alrm_hour < 0 || (pdata->irqen & RTC_UF) ?
- 0x80 : bin2bcd(pdata->alrm_hour) & 0x3f,
- RTC_ALARM_HOUR);
- rtc_write(pdata->alrm_min < 0 || (pdata->irqen & RTC_UF) ?
- 0x80 : bin2bcd(pdata->alrm_min) & 0x7f,
- RTC_ALARM_MIN);
- rtc_write(pdata->alrm_sec < 0 || (pdata->irqen & RTC_UF) ?
- 0x80 : bin2bcd(pdata->alrm_sec) & 0x7f,
- RTC_ALARM_SEC);
- rtc_write(rtc_read(RTC_CMD) | (pdata->irqen ? RTC_TIE : 0), RTC_CMD);
- rtc_read(RTC_CMD1); /* clear interrupts */
- spin_unlock_irqrestore(&pdata->lock, flags);
+ rtc_write(rtc_read(DS1511_CONTROL_B) | (enabled ? DS1511_TIE : 0), DS1511_CONTROL_B);
}
-static int
-ds1511_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+static int ds1511_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- struct rtc_plat_data *pdata = dev_get_drvdata(dev);
+ struct ds1511_data *ds1511 = dev_get_drvdata(dev);
+ unsigned long flags;
- if (pdata->irq <= 0)
- return -EINVAL;
+ spin_lock_irqsave(&ds1511->lock, flags);
+ rtc_write(bin2bcd(alrm->time.tm_mday) & 0x3f, DS1511_AM4_DATE);
+ rtc_write(bin2bcd(alrm->time.tm_hour) & 0x3f, DS1511_AM3_HOUR);
+ rtc_write(bin2bcd(alrm->time.tm_min) & 0x7f, DS1511_AM2_MIN);
+ rtc_write(bin2bcd(alrm->time.tm_sec) & 0x7f, DS1511_AM1_SEC);
+ ds1511_rtc_alarm_enable(alrm->enabled);
- pdata->alrm_mday = alrm->time.tm_mday;
- pdata->alrm_hour = alrm->time.tm_hour;
- pdata->alrm_min = alrm->time.tm_min;
- pdata->alrm_sec = alrm->time.tm_sec;
- if (alrm->enabled)
- pdata->irqen |= RTC_AF;
+ rtc_read(DS1511_CONTROL_A); /* clear interrupts */
+ spin_unlock_irqrestore(&ds1511->lock, flags);
- ds1511_rtc_update_alarm(pdata);
return 0;
}
-static int
-ds1511_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+static int ds1511_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- struct rtc_plat_data *pdata = dev_get_drvdata(dev);
-
- if (pdata->irq <= 0)
- return -EINVAL;
+ alrm->time.tm_mday = bcd2bin(rtc_read(DS1511_AM4_DATE) & 0x3f);
+ alrm->time.tm_hour = bcd2bin(rtc_read(DS1511_AM3_HOUR) & 0x3f);
+ alrm->time.tm_min = bcd2bin(rtc_read(DS1511_AM2_MIN) & 0x7f);
+ alrm->time.tm_sec = bcd2bin(rtc_read(DS1511_AM1_SEC) & 0x7f);
+ alrm->enabled = !!(rtc_read(DS1511_CONTROL_B) & DS1511_TIE);
- alrm->time.tm_mday = pdata->alrm_mday < 0 ? 0 : pdata->alrm_mday;
- alrm->time.tm_hour = pdata->alrm_hour < 0 ? 0 : pdata->alrm_hour;
- alrm->time.tm_min = pdata->alrm_min < 0 ? 0 : pdata->alrm_min;
- alrm->time.tm_sec = pdata->alrm_sec < 0 ? 0 : pdata->alrm_sec;
- alrm->enabled = (pdata->irqen & RTC_AF) ? 1 : 0;
return 0;
}
-static irqreturn_t
-ds1511_interrupt(int irq, void *dev_id)
+static irqreturn_t ds1511_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct ds1511_data *ds1511 = platform_get_drvdata(pdev);
unsigned long events = 0;
- spin_lock(&pdata->lock);
+ spin_lock(&ds1511->lock);
/*
* read and clear interrupt
*/
- if (rtc_read(RTC_CMD1) & DS1511_IRQF) {
- events = RTC_IRQF;
- if (rtc_read(RTC_ALARM_SEC) & 0x80)
- events |= RTC_UF;
- else
- events |= RTC_AF;
- rtc_update_irq(pdata->rtc, 1, events);
+ if (rtc_read(DS1511_CONTROL_A) & DS1511_IRQF) {
+ events = RTC_IRQF | RTC_AF;
+ rtc_update_irq(ds1511->rtc, 1, events);
}
- spin_unlock(&pdata->lock);
+ spin_unlock(&ds1511->lock);
return events ? IRQ_HANDLED : IRQ_NONE;
}
static int ds1511_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
- struct rtc_plat_data *pdata = dev_get_drvdata(dev);
-
- if (pdata->irq <= 0)
- return -EINVAL;
- if (enabled)
- pdata->irqen |= RTC_AF;
- else
- pdata->irqen &= ~RTC_AF;
- ds1511_rtc_update_alarm(pdata);
+ struct ds1511_data *ds1511 = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ds1511->lock, flags);
+ ds1511_rtc_alarm_enable(enabled);
+ spin_unlock_irqrestore(&ds1511->lock, flags);
+
return 0;
}
@@ -408,7 +271,7 @@ static int ds1511_nvram_write(void *priv, unsigned int pos, void *buf,
static int ds1511_rtc_probe(struct platform_device *pdev)
{
- struct rtc_plat_data *pdata;
+ struct ds1511_data *ds1511;
int ret = 0;
struct nvmem_config ds1511_nvmem_cfg = {
.name = "ds1511_nvram",
@@ -420,21 +283,21 @@ static int ds1511_rtc_probe(struct platform_device *pdev)
.priv = &pdev->dev,
};
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
+ ds1511 = devm_kzalloc(&pdev->dev, sizeof(*ds1511), GFP_KERNEL);
+ if (!ds1511)
return -ENOMEM;
ds1511_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ds1511_base))
return PTR_ERR(ds1511_base);
- pdata->ioaddr = ds1511_base;
- pdata->irq = platform_get_irq(pdev, 0);
+ ds1511->ioaddr = ds1511_base;
+ ds1511->irq = platform_get_irq(pdev, 0);
/*
* turn on the clock and the crystal, etc.
*/
- rtc_write(DS1511_BME, RTC_CMD);
- rtc_write(0, RTC_CMD1);
+ rtc_write(DS1511_BME, DS1511_CONTROL_B);
+ rtc_write(0, DS1511_CONTROL_A);
/*
* clear the wdog counter
*/
@@ -448,38 +311,43 @@ static int ds1511_rtc_probe(struct platform_device *pdev)
/*
* check for a dying bat-tree
*/
- if (rtc_read(RTC_CMD1) & DS1511_BLF1)
+ if (rtc_read(DS1511_CONTROL_A) & DS1511_BLF1)
dev_warn(&pdev->dev, "voltage-low detected.\n");
- spin_lock_init(&pdata->lock);
- platform_set_drvdata(pdev, pdata);
-
- pdata->rtc = devm_rtc_allocate_device(&pdev->dev);
- if (IS_ERR(pdata->rtc))
- return PTR_ERR(pdata->rtc);
-
- pdata->rtc->ops = &ds1511_rtc_ops;
+ spin_lock_init(&ds1511->lock);
+ platform_set_drvdata(pdev, ds1511);
- ret = devm_rtc_register_device(pdata->rtc);
- if (ret)
- return ret;
+ ds1511->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(ds1511->rtc))
+ return PTR_ERR(ds1511->rtc);
- devm_rtc_nvmem_register(pdata->rtc, &ds1511_nvmem_cfg);
+ ds1511->rtc->ops = &ds1511_rtc_ops;
+ ds1511->rtc->range_max = RTC_TIMESTAMP_END_2099;
+ ds1511->rtc->alarm_offset_max = 28 * 24 * 60 * 60 - 1;
/*
* if the platform has an interrupt in mind for this device,
* then by all means, set it
*/
- if (pdata->irq > 0) {
- rtc_read(RTC_CMD1);
- if (devm_request_irq(&pdev->dev, pdata->irq, ds1511_interrupt,
+ if (ds1511->irq > 0) {
+ rtc_read(DS1511_CONTROL_A);
+ if (devm_request_irq(&pdev->dev, ds1511->irq, ds1511_interrupt,
IRQF_SHARED, pdev->name, pdev) < 0) {
dev_warn(&pdev->dev, "interrupt not available.\n");
- pdata->irq = 0;
+ ds1511->irq = 0;
}
}
+ if (ds1511->irq == 0)
+ clear_bit(RTC_FEATURE_ALARM, ds1511->rtc->features);
+
+ ret = devm_rtc_register_device(ds1511->rtc);
+ if (ret)
+ return ret;
+
+ devm_rtc_nvmem_register(ds1511->rtc, &ds1511_nvmem_cfg);
+
return 0;
}
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 866489ad56d67..0013bff0447d5 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -909,10 +909,7 @@ static int m41t80_probe(struct i2c_client *client)
if (IS_ERR(m41t80_data->rtc))
return PTR_ERR(m41t80_data->rtc);
-#ifdef CONFIG_OF
- wakeup_source = of_property_read_bool(client->dev.of_node,
- "wakeup-source");
-#endif
+ wakeup_source = device_property_read_bool(&client->dev, "wakeup-source");
if (client->irq > 0) {
unsigned long irqflags = IRQF_TRIGGER_LOW;
diff --git a/drivers/rtc/rtc-max31335.c b/drivers/rtc/rtc-max31335.c
index 402fda8fd5488..a2441e5c2c74d 100644
--- a/drivers/rtc/rtc-max31335.c
+++ b/drivers/rtc/rtc-max31335.c
@@ -204,7 +204,7 @@ static bool max31335_volatile_reg(struct device *dev, unsigned int reg)
return true;
/* interrupt status register */
- if (reg == MAX31335_INT_EN1_A1IE)
+ if (reg == MAX31335_STATUS1)
return true;
/* temperature registers */
diff --git a/drivers/rtc/rtc-nct3018y.c b/drivers/rtc/rtc-nct3018y.c
index f488a189a4651..076d8b99f9131 100644
--- a/drivers/rtc/rtc-nct3018y.c
+++ b/drivers/rtc/rtc-nct3018y.c
@@ -102,6 +102,8 @@ static int nct3018y_get_alarm_mode(struct i2c_client *client, unsigned char *ala
if (flags < 0)
return flags;
*alarm_enable = flags & NCT3018Y_BIT_AIE;
+ dev_dbg(&client->dev, "%s:alarm_enable:%x\n", __func__, *alarm_enable);
+
}
if (alarm_flag) {
@@ -110,11 +112,9 @@ static int nct3018y_get_alarm_mode(struct i2c_client *client, unsigned char *ala
if (flags < 0)
return flags;
*alarm_flag = flags & NCT3018Y_BIT_AF;
+ dev_dbg(&client->dev, "%s:alarm_flag:%x\n", __func__, *alarm_flag);
}
- dev_dbg(&client->dev, "%s:alarm_enable:%x alarm_flag:%x\n",
- __func__, *alarm_enable, *alarm_flag);
-
return 0;
}
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index d1efde3e7a809..98b77f790b0c5 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -370,6 +370,30 @@ static int pcf8523_rtc_set_offset(struct device *dev, long offset)
return regmap_write(pcf8523->regmap, PCF8523_REG_OFFSET, value);
}
+#ifdef CONFIG_PM_SLEEP
+static int pcf8523_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (client->irq > 0 && device_may_wakeup(dev))
+ enable_irq_wake(client->irq);
+
+ return 0;
+}
+
+static int pcf8523_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (client->irq > 0 && device_may_wakeup(dev))
+ disable_irq_wake(client->irq);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(pcf8523_pm, pcf8523_suspend, pcf8523_resume);
+
static const struct rtc_class_ops pcf8523_rtc_ops = {
.read_time = pcf8523_rtc_read_time,
.set_time = pcf8523_rtc_set_time,
@@ -487,6 +511,7 @@ static struct i2c_driver pcf8523_driver = {
.driver = {
.name = "rtc-pcf8523",
.of_match_table = pcf8523_of_match,
+ .pm = &pcf8523_pm,
},
.probe = pcf8523_probe,
.id_table = pcf8523_id,
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index cead018c3f06a..0a97cfedd7060 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -3976,7 +3976,7 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
ccw = cqr->cpaddr;
ccw->cmd_code = CCW_CMD_RDC;
- ccw->cda = (__u32)virt_to_phys(cqr->data);
+ ccw->cda = virt_to_dma32(cqr->data);
ccw->flags = 0;
ccw->count = rdc_buffer_size;
cqr->startdev = device;
@@ -4020,7 +4020,7 @@ char *dasd_get_sense(struct irb *irb)
if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
if (irb->scsw.tm.tcw)
- tsb = tcw_get_tsb(phys_to_virt(irb->scsw.tm.tcw));
+ tsb = tcw_get_tsb(dma32_to_virt(irb->scsw.tm.tcw));
if (tsb && tsb->length == 64 && tsb->flags)
switch (tsb->flags & 0x07) {
case 1: /* tsa_iostat */
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 459b7f8ac8837..bbbacfc386f28 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -216,7 +216,7 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
memset(ccw, 0, sizeof(struct ccw1));
ccw->cmd_code = CCW_CMD_DCTL;
ccw->count = 4;
- ccw->cda = (__u32)virt_to_phys(DCTL_data);
+ ccw->cda = virt_to_dma32(DCTL_data);
dctl_cqr->flags = erp->flags;
dctl_cqr->function = dasd_3990_erp_DCTL;
dctl_cqr->refers = erp;
@@ -1589,7 +1589,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
{
struct dasd_device *device = default_erp->startdev;
- __u32 cpa = 0;
+ dma32_t cpa = 0;
struct dasd_ccw_req *cqr;
struct dasd_ccw_req *erp;
struct DE_eckd_data *DE_data;
@@ -1693,7 +1693,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
ccw->flags = CCW_FLAG_CC;
ccw->count = 16;
- ccw->cda = (__u32)virt_to_phys(DE_data);
+ ccw->cda = virt_to_dma32(DE_data);
/* create LO ccw */
ccw++;
@@ -1701,7 +1701,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
ccw->flags = CCW_FLAG_CC;
ccw->count = 16;
- ccw->cda = (__u32)virt_to_phys(LO_data);
+ ccw->cda = virt_to_dma32(LO_data);
/* TIC to the failed ccw */
ccw++;
@@ -1747,7 +1747,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
{
struct dasd_device *device = previous_erp->startdev;
- __u32 cpa = 0;
+ dma32_t cpa = 0;
struct dasd_ccw_req *cqr;
struct dasd_ccw_req *erp;
char *LO_data; /* struct LO_eckd_data */
@@ -2386,7 +2386,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
tcw = erp->cpaddr;
tsb = (struct tsb *) &tcw[1];
*tcw = *((struct tcw *)cqr->cpaddr);
- tcw->tsb = virt_to_phys(tsb);
+ tcw->tsb = virt_to_dma64(tsb);
} else if (ccw->cmd_code == DASD_ECKD_CCW_PSF) {
/* PSF cannot be chained from NOOP/TIC */
erp->cpaddr = cqr->cpaddr;
@@ -2397,7 +2397,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
ccw->flags = CCW_FLAG_CC;
ccw++;
ccw->cmd_code = CCW_CMD_TIC;
- ccw->cda = (__u32)virt_to_phys(cqr->cpaddr);
+ ccw->cda = virt_to_dma32(cqr->cpaddr);
}
erp->flags = cqr->flags;
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index e84cd54365563..f7e768d8ca766 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -435,7 +435,7 @@ static int read_unit_address_configuration(struct dasd_device *device,
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
- ccw->cda = (__u32)virt_to_phys(prssdp);
+ ccw->cda = virt_to_dma32(prssdp);
/* Read Subsystem Data - feature codes */
memset(lcu->uac, 0, sizeof(*(lcu->uac)));
@@ -443,7 +443,7 @@ static int read_unit_address_configuration(struct dasd_device *device,
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(*(lcu->uac));
- ccw->cda = (__u32)virt_to_phys(lcu->uac);
+ ccw->cda = virt_to_dma32(lcu->uac);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -739,7 +739,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
ccw->cmd_code = DASD_ECKD_CCW_RSCK;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 16;
- ccw->cda = (__u32)virt_to_phys(cqr->data);
+ ccw->cda = virt_to_dma32(cqr->data);
((char *)cqr->data)[0] = reason;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 373c1a86c33ed..180a008d38eaa 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -283,7 +283,7 @@ define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
ccw->flags = 0;
ccw->count = 16;
- ccw->cda = (__u32)virt_to_phys(data);
+ ccw->cda = virt_to_dma32(data);
}
memset(data, 0, sizeof(struct DE_eckd_data));
@@ -393,7 +393,7 @@ static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data,
ccw->count = 22;
else
ccw->count = 20;
- ccw->cda = (__u32)virt_to_phys(data);
+ ccw->cda = virt_to_dma32(data);
}
memset(data, 0, sizeof(*data));
@@ -539,11 +539,11 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
ccw->flags = 0;
if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) {
ccw->count = sizeof(*pfxdata) + 2;
- ccw->cda = (__u32)virt_to_phys(pfxdata);
+ ccw->cda = virt_to_dma32(pfxdata);
memset(pfxdata, 0, sizeof(*pfxdata) + 2);
} else {
ccw->count = sizeof(*pfxdata);
- ccw->cda = (__u32)virt_to_phys(pfxdata);
+ ccw->cda = virt_to_dma32(pfxdata);
memset(pfxdata, 0, sizeof(*pfxdata));
}
@@ -610,7 +610,7 @@ locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
ccw->flags = 0;
ccw->count = 16;
- ccw->cda = (__u32)virt_to_phys(data);
+ ccw->cda = virt_to_dma32(data);
memset(data, 0, sizeof(struct LO_eckd_data));
sector = 0;
@@ -825,7 +825,7 @@ static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_RCD;
ccw->flags = 0;
- ccw->cda = (__u32)virt_to_phys(rcd_buffer);
+ ccw->cda = virt_to_dma32(rcd_buffer);
ccw->count = DASD_ECKD_RCD_DATA_SIZE;
cqr->magic = DASD_ECKD_MAGIC;
@@ -853,7 +853,7 @@ static void read_conf_cb(struct dasd_ccw_req *cqr, void *data)
if (cqr->status != DASD_CQR_DONE) {
ccw = cqr->cpaddr;
- rcd_buffer = phys_to_virt(ccw->cda);
+ rcd_buffer = dma32_to_virt(ccw->cda);
memset(rcd_buffer, 0, sizeof(*rcd_buffer));
rcd_buffer[0] = 0xE5;
@@ -1534,7 +1534,7 @@ static int dasd_eckd_read_features(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
- ccw->cda = (__u32)virt_to_phys(prssdp);
+ ccw->cda = virt_to_dma32(prssdp);
/* Read Subsystem Data - feature codes */
features = (struct dasd_rssd_features *) (prssdp + 1);
@@ -1543,7 +1543,7 @@ static int dasd_eckd_read_features(struct dasd_device *device)
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(struct dasd_rssd_features);
- ccw->cda = (__u32)virt_to_phys(features);
+ ccw->cda = virt_to_dma32(features);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -1603,7 +1603,7 @@ static int dasd_eckd_read_vol_info(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(*prssdp);
ccw->flags |= CCW_FLAG_CC;
- ccw->cda = (__u32)virt_to_phys(prssdp);
+ ccw->cda = virt_to_dma32(prssdp);
/* Read Subsystem Data - Volume Storage Query */
vsq = (struct dasd_rssd_vsq *)(prssdp + 1);
@@ -1613,7 +1613,7 @@ static int dasd_eckd_read_vol_info(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(*vsq);
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)virt_to_phys(vsq);
+ ccw->cda = virt_to_dma32(vsq);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -1788,7 +1788,7 @@ static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(*prssdp);
ccw->flags |= CCW_FLAG_CC;
- ccw->cda = (__u32)virt_to_phys(prssdp);
+ ccw->cda = virt_to_dma32(prssdp);
lcq = (struct dasd_rssd_lcq *)(prssdp + 1);
memset(lcq, 0, sizeof(*lcq));
@@ -1797,7 +1797,7 @@ static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(*lcq);
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)virt_to_phys(lcq);
+ ccw->cda = virt_to_dma32(lcq);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -1894,7 +1894,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
}
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
- ccw->cda = (__u32)virt_to_phys(psf_ssc_data);
+ ccw->cda = virt_to_dma32(psf_ssc_data);
ccw->count = 66;
cqr->startdev = device;
@@ -2250,7 +2250,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
ccw->flags = 0;
ccw->count = 8;
- ccw->cda = (__u32)virt_to_phys(count_data);
+ ccw->cda = virt_to_dma32(count_data);
ccw++;
count_data++;
}
@@ -2264,7 +2264,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
ccw->flags = 0;
ccw->count = 8;
- ccw->cda = (__u32)virt_to_phys(count_data);
+ ccw->cda = virt_to_dma32(count_data);
cqr->block = NULL;
cqr->startdev = device;
@@ -2635,7 +2635,7 @@ dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
- ccw->cda = (__u32)virt_to_phys(fmt_buffer);
+ ccw->cda = virt_to_dma32(fmt_buffer);
ccw++;
fmt_buffer++;
}
@@ -2845,7 +2845,7 @@ dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev,
ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
- ccw->cda = (__u32)virt_to_phys(ect);
+ ccw->cda = virt_to_dma32(ect);
ccw++;
}
if ((intensity & ~0x08) & 0x04) { /* erase track */
@@ -2860,7 +2860,7 @@ dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev,
ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
- ccw->cda = (__u32)virt_to_phys(ect);
+ ccw->cda = virt_to_dma32(ect);
} else { /* write remaining records */
for (i = 0; i < rpt; i++) {
ect = (struct eckd_count *) data;
@@ -2895,7 +2895,7 @@ dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev,
DASD_ECKD_CCW_WRITE_CKD_MT;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
- ccw->cda = (__u32)virt_to_phys(ect);
+ ccw->cda = virt_to_dma32(ect);
ccw++;
}
}
@@ -3836,7 +3836,7 @@ dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
}
ccw = cqr->cpaddr;
- ccw->cda = (__u32)virt_to_phys(cqr->data);
+ ccw->cda = virt_to_dma32(cqr->data);
ccw->cmd_code = DASD_ECKD_CCW_DSO;
ccw->count = size;
@@ -3961,7 +3961,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
unsigned int blksize)
{
struct dasd_eckd_private *private;
- unsigned long *idaws;
+ dma64_t *idaws;
struct LO_eckd_data *LO_data;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
@@ -4039,8 +4039,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
dasd_sfree_request(cqr, startdev);
return ERR_PTR(-EAGAIN);
}
- idaws = (unsigned long *) (cqr->data +
- sizeof(struct PFX_eckd_data));
+ idaws = (dma64_t *)(cqr->data + sizeof(struct PFX_eckd_data));
} else {
if (define_extent(ccw++, cqr->data, first_trk,
last_trk, cmd, basedev, 0) == -EAGAIN) {
@@ -4050,8 +4049,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
dasd_sfree_request(cqr, startdev);
return ERR_PTR(-EAGAIN);
}
- idaws = (unsigned long *) (cqr->data +
- sizeof(struct DE_eckd_data));
+ idaws = (dma64_t *)(cqr->data + sizeof(struct DE_eckd_data));
}
/* Build locate_record+read/write/ccws. */
LO_data = (struct LO_eckd_data *) (idaws + cidaw);
@@ -4105,11 +4103,11 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
ccw->cmd_code = rcmd;
ccw->count = count;
if (idal_is_needed(dst, blksize)) {
- ccw->cda = (__u32)virt_to_phys(idaws);
+ ccw->cda = virt_to_dma32(idaws);
ccw->flags = CCW_FLAG_IDA;
idaws = idal_create_words(idaws, dst, blksize);
} else {
- ccw->cda = (__u32)virt_to_phys(dst);
+ ccw->cda = virt_to_dma32(dst);
ccw->flags = 0;
}
ccw++;
@@ -4152,7 +4150,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
unsigned int blk_per_trk,
unsigned int blksize)
{
- unsigned long *idaws;
+ dma64_t *idaws;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
struct req_iterator iter;
@@ -4222,7 +4220,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
* (or 2K blocks on 31-bit)
* - the scope of a ccw and it's idal ends with the track boundaries
*/
- idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
+ idaws = (dma64_t *)(cqr->data + sizeof(struct PFX_eckd_data));
recid = first_rec;
new_track = 1;
end_idaw = 0;
@@ -4243,7 +4241,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = cmd;
ccw->count = len_to_track_end;
- ccw->cda = (__u32)virt_to_phys(idaws);
+ ccw->cda = virt_to_dma32(idaws);
ccw->flags = CCW_FLAG_IDA;
ccw++;
recid += count;
@@ -4259,7 +4257,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
* idaw ends
*/
if (!idaw_dst) {
- if ((__u32)virt_to_phys(dst) & (IDA_BLOCK_SIZE - 1)) {
+ if ((unsigned long)(dst) & (IDA_BLOCK_SIZE - 1)) {
dasd_sfree_request(cqr, startdev);
return ERR_PTR(-ERANGE);
} else
@@ -4279,7 +4277,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
* idal_create_words will handle cases where idaw_len
* is larger then IDA_BLOCK_SIZE
*/
- if (!((__u32)virt_to_phys(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE - 1)))
+ if (!((unsigned long)(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE - 1)))
end_idaw = 1;
/* We also need to end the idaw at track end */
if (!len_to_track_end) {
@@ -4738,11 +4736,11 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
struct req_iterator iter;
struct dasd_ccw_req *cqr;
unsigned int trkcount;
- unsigned long *idaws;
unsigned int size;
unsigned char cmd;
struct bio_vec bv;
struct ccw1 *ccw;
+ dma64_t *idaws;
int use_prefix;
void *data;
char *dst;
@@ -4823,7 +4821,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
trkcount, cmd, basedev, 0, 0);
}
- idaws = (unsigned long *)(cqr->data + size);
+ idaws = (dma64_t *)(cqr->data + size);
len_to_track_end = 0;
if (start_padding_sectors) {
ccw[-1].flags |= CCW_FLAG_CC;
@@ -4832,7 +4830,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
ccw->count = 57326;
/* 64k map to one track */
len_to_track_end = 65536 - start_padding_sectors * 512;
- ccw->cda = (__u32)virt_to_phys(idaws);
+ ccw->cda = virt_to_dma32(idaws);
ccw->flags |= CCW_FLAG_IDA;
ccw->flags |= CCW_FLAG_SLI;
ccw++;
@@ -4851,7 +4849,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
ccw->count = 57326;
/* 64k map to one track */
len_to_track_end = 65536;
- ccw->cda = (__u32)virt_to_phys(idaws);
+ ccw->cda = virt_to_dma32(idaws);
ccw->flags |= CCW_FLAG_IDA;
ccw->flags |= CCW_FLAG_SLI;
ccw++;
@@ -4908,9 +4906,9 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
ccw++;
if (dst) {
if (ccw->flags & CCW_FLAG_IDA)
- cda = *((char **)phys_to_virt(ccw->cda));
+ cda = *((char **)dma32_to_virt(ccw->cda));
else
- cda = phys_to_virt(ccw->cda);
+ cda = dma32_to_virt(ccw->cda);
if (dst != cda) {
if (rq_data_dir(req) == READ)
memcpy(dst, cda, bv.bv_len);
@@ -5060,7 +5058,7 @@ dasd_eckd_release(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = 32;
- ccw->cda = (__u32)virt_to_phys(cqr->data);
+ ccw->cda = virt_to_dma32(cqr->data);
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
@@ -5115,7 +5113,7 @@ dasd_eckd_reserve(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = 32;
- ccw->cda = (__u32)virt_to_phys(cqr->data);
+ ccw->cda = virt_to_dma32(cqr->data);
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
@@ -5169,7 +5167,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_SLCK;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = 32;
- ccw->cda = (__u32)virt_to_phys(cqr->data);
+ ccw->cda = virt_to_dma32(cqr->data);
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
@@ -5230,7 +5228,7 @@ static int dasd_eckd_snid(struct dasd_device *device,
ccw->cmd_code = DASD_ECKD_CCW_SNID;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = 12;
- ccw->cda = (__u32)virt_to_phys(cqr->data);
+ ccw->cda = virt_to_dma32(cqr->data);
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
@@ -5297,7 +5295,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
- ccw->cda = (__u32)virt_to_phys(prssdp);
+ ccw->cda = virt_to_dma32(prssdp);
/* Read Subsystem Data - Performance Statistics */
stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
@@ -5306,7 +5304,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
- ccw->cda = (__u32)virt_to_phys(stats);
+ ccw->cda = virt_to_dma32(stats);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -5450,7 +5448,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = usrparm.psf_data_len;
ccw->flags |= CCW_FLAG_CC;
- ccw->cda = (__u32)virt_to_phys(psf_data);
+ ccw->cda = virt_to_dma32(psf_data);
ccw++;
@@ -5458,7 +5456,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = usrparm.rssd_result_len;
ccw->flags = CCW_FLAG_SLI ;
- ccw->cda = (__u32)virt_to_phys(rssd_result);
+ ccw->cda = virt_to_dma32(rssd_result);
rc = dasd_sleep_on(cqr);
if (rc)
@@ -5527,9 +5525,9 @@ dasd_eckd_dump_ccw_range(struct dasd_device *device, struct ccw1 *from,
/* get pointer to data (consider IDALs) */
if (from->flags & CCW_FLAG_IDA)
- datap = (char *)*((addr_t *)phys_to_virt(from->cda));
+ datap = (char *)*((addr_t *)dma32_to_virt(from->cda));
else
- datap = phys_to_virt(from->cda);
+ datap = dma32_to_virt(from->cda);
/* dump data (max 128 bytes) */
for (count = 0; count < from->count && count < 128; count++) {
@@ -5598,7 +5596,7 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
req ? req->intrc : 0);
len += sprintf(page + len, "Failing CCW: %px\n",
- phys_to_virt(irb->scsw.cmd.cpa));
+ dma32_to_virt(irb->scsw.cmd.cpa));
if (irb->esw.esw0.erw.cons) {
for (sl = 0; sl < 4; sl++) {
len += sprintf(page + len, "Sense(hex) %2d-%2d:",
@@ -5641,7 +5639,7 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
/* print failing CCW area (maximum 4) */
/* scsw->cda is either valid or zero */
from = ++to;
- fail = phys_to_virt(irb->scsw.cmd.cpa); /* failing CCW */
+ fail = dma32_to_virt(irb->scsw.cmd.cpa); /* failing CCW */
if (from < fail - 2) {
from = fail - 2; /* there is a gap - print header */
dev_err(dev, "......\n");
@@ -5691,12 +5689,12 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
(irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
req ? req->intrc : 0);
len += sprintf(page + len, "Failing TCW: %px\n",
- phys_to_virt(irb->scsw.tm.tcw));
+ dma32_to_virt(irb->scsw.tm.tcw));
tsb = NULL;
sense = NULL;
if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01))
- tsb = tcw_get_tsb(phys_to_virt(irb->scsw.tm.tcw));
+ tsb = tcw_get_tsb(dma32_to_virt(irb->scsw.tm.tcw));
if (tsb) {
len += sprintf(page + len, "tsb->length %d\n", tsb->length);
@@ -5906,7 +5904,7 @@ retry:
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)virt_to_phys(prssdp);
+ ccw->cda = virt_to_dma32(prssdp);
/* Read Subsystem Data - message buffer */
message_buf = (struct dasd_rssd_messages *) (prssdp + 1);
@@ -5916,7 +5914,7 @@ retry:
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(struct dasd_rssd_messages);
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)virt_to_phys(message_buf);
+ ccw->cda = virt_to_dma32(message_buf);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -5997,14 +5995,14 @@ static int dasd_eckd_query_host_access(struct dasd_device *device,
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)virt_to_phys(prssdp);
+ ccw->cda = virt_to_dma32(prssdp);
/* Read Subsystem Data - query host access */
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(struct dasd_psf_query_host_access);
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)virt_to_phys(host_access);
+ ccw->cda = virt_to_dma32(host_access);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -6239,14 +6237,14 @@ static int dasd_eckd_query_pprc_status(struct dasd_device *device,
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)(addr_t)prssdp;
+ ccw->cda = virt_to_dma32(prssdp);
/* Read Subsystem Data - query host access */
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(*pprc_data);
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)(addr_t)pprc_data;
+ ccw->cda = virt_to_dma32(pprc_data);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -6340,7 +6338,7 @@ dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
psf_cuir->ssid = device->path[pos].ssid;
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
- ccw->cda = (__u32)virt_to_phys(psf_cuir);
+ ccw->cda = virt_to_dma32(psf_cuir);
ccw->flags = CCW_FLAG_SLI;
ccw->count = sizeof(struct dasd_psf_cuir_response);
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 5064a616e041a..194e9e2d9cb89 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -485,7 +485,7 @@ int dasd_eer_enable(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_SNSS;
ccw->count = SNSS_DATA_SIZE;
ccw->flags = 0;
- ccw->cda = (__u32)virt_to_phys(cqr->data);
+ ccw->cda = virt_to_dma32(cqr->data);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index bcbb2f8e91feb..361e9bd752570 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -78,7 +78,7 @@ define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw,
ccw->cmd_code = DASD_FBA_CCW_DEFINE_EXTENT;
ccw->flags = 0;
ccw->count = 16;
- ccw->cda = (__u32)virt_to_phys(data);
+ ccw->cda = virt_to_dma32(data);
memset(data, 0, sizeof (struct DE_fba_data));
if (rw == WRITE)
(data->mask).perm = 0x0;
@@ -98,7 +98,7 @@ locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw,
ccw->cmd_code = DASD_FBA_CCW_LOCATE;
ccw->flags = 0;
ccw->count = 8;
- ccw->cda = (__u32)virt_to_phys(data);
+ ccw->cda = virt_to_dma32(data);
memset(data, 0, sizeof (struct LO_fba_data));
if (rw == WRITE)
data->operation.cmd = 0x5;
@@ -257,7 +257,7 @@ static void ccw_write_zero(struct ccw1 *ccw, int count)
ccw->cmd_code = DASD_FBA_CCW_WRITE;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = count;
- ccw->cda = (__u32)virt_to_phys(dasd_fba_zero_page);
+ ccw->cda = virt_to_dma32(dasd_fba_zero_page);
}
/*
@@ -427,7 +427,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp_regular(
struct request *req)
{
struct dasd_fba_private *private = block->base->private;
- unsigned long *idaws;
+ dma64_t *idaws;
struct LO_fba_data *LO_data;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
@@ -487,7 +487,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp_regular(
define_extent(ccw++, cqr->data, rq_data_dir(req),
block->bp_block, blk_rq_pos(req), blk_rq_sectors(req));
/* Build locate_record + read/write ccws. */
- idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data));
+ idaws = (dma64_t *)(cqr->data + sizeof(struct DE_fba_data));
LO_data = (struct LO_fba_data *) (idaws + cidaw);
/* Locate record for all blocks for smart devices. */
if (private->rdc_data.mode.bits.data_chain != 0) {
@@ -523,11 +523,11 @@ static struct dasd_ccw_req *dasd_fba_build_cp_regular(
ccw->cmd_code = cmd;
ccw->count = block->bp_block;
if (idal_is_needed(dst, blksize)) {
- ccw->cda = (__u32)virt_to_phys(idaws);
+ ccw->cda = virt_to_dma32(idaws);
ccw->flags = CCW_FLAG_IDA;
idaws = idal_create_words(idaws, dst, blksize);
} else {
- ccw->cda = (__u32)virt_to_phys(dst);
+ ccw->cda = virt_to_dma32(dst);
ccw->flags = 0;
}
ccw++;
@@ -585,9 +585,9 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
ccw++;
if (dst) {
if (ccw->flags & CCW_FLAG_IDA)
- cda = *((char **)phys_to_virt(ccw->cda));
+ cda = *((char **)dma32_to_virt(ccw->cda));
else
- cda = phys_to_virt(ccw->cda);
+ cda = dma32_to_virt(ccw->cda);
if (dst != cda) {
if (rq_data_dir(req) == READ)
memcpy(dst, cda, bv.bv_len);
@@ -672,7 +672,7 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
len += sprintf(page + len, "in req: %px CS: 0x%02X DS: 0x%02X\n",
req, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
len += sprintf(page + len, "Failing CCW: %px\n",
- (void *) (addr_t) irb->scsw.cmd.cpa);
+ (void *)(u64)dma32_to_u32(irb->scsw.cmd.cpa));
if (irb->esw.esw0.erw.cons) {
for (sl = 0; sl < 4; sl++) {
len += sprintf(page + len, "Sense(hex) %2d-%2d:",
@@ -701,7 +701,7 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
for (count = 0; count < 32 && count < act->count;
count += sizeof(int))
len += sprintf(page + len, " %08X",
- ((int *) (addr_t) act->cda)
+ ((int *)dma32_to_virt(act->cda))
[(count>>2)]);
len += sprintf(page + len, "\n");
act++;
@@ -710,18 +710,18 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
/* print failing CCW area */
len = 0;
- if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) {
- act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2;
+ if (act < ((struct ccw1 *)dma32_to_virt(irb->scsw.cmd.cpa)) - 2) {
+ act = ((struct ccw1 *)dma32_to_virt(irb->scsw.cmd.cpa)) - 2;
len += sprintf(page + len, "......\n");
}
- end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last);
+ end = min((struct ccw1 *)dma32_to_virt(irb->scsw.cmd.cpa) + 2, last);
while (act <= end) {
len += sprintf(page + len, "CCW %px: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
count += sizeof(int))
len += sprintf(page + len, " %08X",
- ((int *) (addr_t) act->cda)
+ ((int *)dma32_to_virt(act->cda))
[(count>>2)]);
len += sprintf(page + len, "\n");
act++;
@@ -738,7 +738,7 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
for (count = 0; count < 32 && count < act->count;
count += sizeof(int))
len += sprintf(page + len, " %08X",
- ((int *) (addr_t) act->cda)
+ ((int *)dma32_to_virt(act->cda))
[(count>>2)]);
len += sprintf(page + len, "\n");
act++;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 9c8f529b827cb..6d1689a2717e5 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -552,6 +552,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
int rc, i, j, num_of_segments;
struct dcssblk_dev_info *dev_info;
struct segment_info *seg_info, *temp;
+ struct dax_device *dax_dev;
char *local_buf;
unsigned long seg_byte_size;
@@ -679,13 +680,13 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
if (rc)
goto put_dev;
- dev_info->dax_dev = alloc_dax(dev_info, &dcssblk_dax_ops);
- if (IS_ERR(dev_info->dax_dev)) {
- rc = PTR_ERR(dev_info->dax_dev);
- dev_info->dax_dev = NULL;
+ dax_dev = alloc_dax(dev_info, &dcssblk_dax_ops);
+ if (IS_ERR(dax_dev)) {
+ rc = PTR_ERR(dax_dev);
goto put_dev;
}
- set_dax_synchronous(dev_info->dax_dev);
+ set_dax_synchronous(dax_dev);
+ dev_info->dax_dev = dax_dev;
rc = dax_add_host(dev_info->dax_dev, dev_info->gd);
if (rc)
goto out_dax;
@@ -919,7 +920,7 @@ __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
dev_sz = dev_info->end - dev_info->start + 1;
if (kaddr)
- *kaddr = (void *) dev_info->start + offset;
+ *kaddr = __va(dev_info->start + offset);
if (pfn)
*pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset),
PFN_DEV|PFN_SPECIAL);
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 9f6fdd0daa74e..1d456a5a3bfb8 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -131,7 +131,7 @@ static void scm_request_done(struct scm_request *scmrq)
for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
msb = &scmrq->aob->msb[i];
- aidaw = (u64)phys_to_virt(msb->data_addr);
+ aidaw = (u64)dma64_to_virt(msb->data_addr);
if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
IS_ALIGNED(aidaw, PAGE_SIZE))
@@ -196,12 +196,12 @@ static int scm_request_prepare(struct scm_request *scmrq)
msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE;
msb->flags |= MSB_FLAG_IDA;
- msb->data_addr = (u64)virt_to_phys(aidaw);
+ msb->data_addr = virt_to_dma64(aidaw);
rq_for_each_segment(bv, req, iter) {
WARN_ON(bv.bv_offset);
msb->blk_count += bv.bv_len >> 12;
- aidaw->data_addr = virt_to_phys(page_address(bv.bv_page));
+ aidaw->data_addr = virt_to_dma64(page_address(bv.bv_page));
aidaw++;
}
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 0b0324fe4aff3..dcb3c32f027af 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -159,7 +159,7 @@ static void raw3215_mk_read_req(struct raw3215_info *raw)
ccw->cmd_code = 0x0A; /* read inquiry */
ccw->flags = 0x20; /* ignore incorrect length */
ccw->count = 160;
- ccw->cda = (__u32)__pa(raw->inbuf);
+ ccw->cda = virt_to_dma32(raw->inbuf);
}
/*
@@ -218,7 +218,7 @@ static void raw3215_mk_write_req(struct raw3215_info *raw)
ccw[-1].flags |= 0x40; /* use command chaining */
ccw->cmd_code = 0x01; /* write, auto carrier return */
ccw->flags = 0x20; /* ignore incorrect length ind. */
- ccw->cda = (__u32)__pa(raw->buffer + ix);
+ ccw->cda = virt_to_dma32(raw->buffer + ix);
count = len;
if (ix + count > RAW3215_BUFFER_SIZE)
count = RAW3215_BUFFER_SIZE - ix;
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 4f26b0a55620f..4d824f86bbbbb 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -126,7 +126,7 @@ static int fs3270_activate(struct raw3270_view *view)
raw3270_request_set_cmd(fp->init, TC_EWRITEA);
raw3270_request_set_idal(fp->init, fp->rdbuf);
fp->init->rescnt = 0;
- cp = fp->rdbuf->data[0];
+ cp = dma64_to_virt(fp->rdbuf->data[0]);
if (fp->rdbuf_size == 0) {
/* No saved buffer. Just clear the screen. */
fp->init->ccw.count = 1;
@@ -164,7 +164,7 @@ static void fs3270_save_callback(struct raw3270_request *rq, void *data)
fp = (struct fs3270 *)rq->view;
/* Correct idal buffer element 0 address. */
- fp->rdbuf->data[0] -= 5;
+ fp->rdbuf->data[0] = dma64_add(fp->rdbuf->data[0], -5);
fp->rdbuf->size += 5;
/*
@@ -202,7 +202,7 @@ static void fs3270_deactivate(struct raw3270_view *view)
* room for the TW_KR/TO_SBA/<address>/<address>/TO_IC sequence
* in the activation command.
*/
- fp->rdbuf->data[0] += 5;
+ fp->rdbuf->data[0] = dma64_add(fp->rdbuf->data[0], 5);
fp->rdbuf->size -= 5;
raw3270_request_set_idal(fp->init, fp->rdbuf);
fp->init->rescnt = 0;
@@ -521,13 +521,13 @@ static const struct file_operations fs3270_fops = {
static void fs3270_create_cb(int minor)
{
__register_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub", &fs3270_fops);
- device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, minor),
+ device_create(&class3270, NULL, MKDEV(IBM_FS3270_MAJOR, minor),
NULL, "3270/tub%d", minor);
}
static void fs3270_destroy_cb(int minor)
{
- device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, minor));
+ device_destroy(&class3270, MKDEV(IBM_FS3270_MAJOR, minor));
__unregister_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub");
}
@@ -546,7 +546,7 @@ static int __init fs3270_init(void)
rc = __register_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270", &fs3270_fops);
if (rc)
return rc;
- device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, 0),
+ device_create(&class3270, NULL, MKDEV(IBM_FS3270_MAJOR, 0),
NULL, "3270/tub");
raw3270_register_notifier(&fs3270_notifier);
return 0;
@@ -555,7 +555,7 @@ static int __init fs3270_init(void)
static void __exit fs3270_exit(void)
{
raw3270_unregister_notifier(&fs3270_notifier);
- device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, 0));
+ device_destroy(&class3270, MKDEV(IBM_FS3270_MAJOR, 0));
__unregister_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270");
}
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 7115c0f856507..37173cb0f5f5a 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -29,7 +29,9 @@
#include <linux/device.h>
#include <linux/mutex.h>
-struct class *class3270;
+const struct class class3270 = {
+ .name = "3270",
+};
EXPORT_SYMBOL(class3270);
/* The main 3270 data structure. */
@@ -160,7 +162,7 @@ struct raw3270_request *raw3270_request_alloc(size_t size)
/*
* Setup ccw.
*/
- rq->ccw.cda = __pa(rq->buffer);
+ rq->ccw.cda = virt_to_dma32(rq->buffer);
rq->ccw.flags = CCW_FLAG_SLI;
return rq;
@@ -186,7 +188,7 @@ int raw3270_request_reset(struct raw3270_request *rq)
return -EBUSY;
rq->ccw.cmd_code = 0;
rq->ccw.count = 0;
- rq->ccw.cda = __pa(rq->buffer);
+ rq->ccw.cda = virt_to_dma32(rq->buffer);
rq->ccw.flags = CCW_FLAG_SLI;
rq->rescnt = 0;
rq->rc = 0;
@@ -221,7 +223,7 @@ EXPORT_SYMBOL(raw3270_request_add_data);
*/
void raw3270_request_set_data(struct raw3270_request *rq, void *data, size_t size)
{
- rq->ccw.cda = __pa(data);
+ rq->ccw.cda = virt_to_dma32(data);
rq->ccw.count = size;
}
EXPORT_SYMBOL(raw3270_request_set_data);
@@ -231,7 +233,7 @@ EXPORT_SYMBOL(raw3270_request_set_data);
*/
void raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib)
{
- rq->ccw.cda = __pa(ib->data);
+ rq->ccw.cda = virt_to_dma32(ib->data);
rq->ccw.count = ib->size;
rq->ccw.flags |= CCW_FLAG_IDA;
}
@@ -577,7 +579,7 @@ static void raw3270_read_modified(struct raw3270 *rp)
rp->init_readmod.ccw.cmd_code = TC_READMOD;
rp->init_readmod.ccw.flags = CCW_FLAG_SLI;
rp->init_readmod.ccw.count = sizeof(rp->init_data);
- rp->init_readmod.ccw.cda = (__u32)__pa(rp->init_data);
+ rp->init_readmod.ccw.cda = virt_to_dma32(rp->init_data);
rp->init_readmod.callback = raw3270_read_modified_cb;
rp->init_readmod.callback_data = rp->init_data;
rp->state = RAW3270_STATE_READMOD;
@@ -597,7 +599,7 @@ static void raw3270_writesf_readpart(struct raw3270 *rp)
rp->init_readpart.ccw.cmd_code = TC_WRITESF;
rp->init_readpart.ccw.flags = CCW_FLAG_SLI;
rp->init_readpart.ccw.count = sizeof(wbuf);
- rp->init_readpart.ccw.cda = (__u32)__pa(&rp->init_data);
+ rp->init_readpart.ccw.cda = virt_to_dma32(&rp->init_data);
rp->state = RAW3270_STATE_W4ATTN;
raw3270_start_irq(&rp->init_view, &rp->init_readpart);
}
@@ -635,7 +637,7 @@ static int __raw3270_reset_device(struct raw3270 *rp)
rp->init_reset.ccw.cmd_code = TC_EWRITEA;
rp->init_reset.ccw.flags = CCW_FLAG_SLI;
rp->init_reset.ccw.count = 1;
- rp->init_reset.ccw.cda = (__u32)__pa(rp->init_data);
+ rp->init_reset.ccw.cda = virt_to_dma32(rp->init_data);
rp->init_reset.callback = raw3270_reset_device_cb;
rc = __raw3270_start(rp, &rp->init_view, &rp->init_reset);
if (rc == 0 && rp->state == RAW3270_STATE_INIT)
@@ -1316,23 +1318,25 @@ static int raw3270_init(void)
return 0;
raw3270_registered = 1;
rc = ccw_driver_register(&raw3270_ccw_driver);
- if (rc == 0) {
- /* Create attributes for early (= console) device. */
- mutex_lock(&raw3270_mutex);
- class3270 = class_create("3270");
- list_for_each_entry(rp, &raw3270_devices, list) {
- get_device(&rp->cdev->dev);
- raw3270_create_attributes(rp);
- }
- mutex_unlock(&raw3270_mutex);
+ if (rc)
+ return rc;
+ rc = class_register(&class3270);
+ if (rc)
+ return rc;
+ /* Create attributes for early (= console) device. */
+ mutex_lock(&raw3270_mutex);
+ list_for_each_entry(rp, &raw3270_devices, list) {
+ get_device(&rp->cdev->dev);
+ raw3270_create_attributes(rp);
}
- return rc;
+ mutex_unlock(&raw3270_mutex);
+ return 0;
}
static void raw3270_exit(void)
{
ccw_driver_unregister(&raw3270_ccw_driver);
- class_destroy(class3270);
+ class_unregister(&class3270);
}
MODULE_LICENSE("GPL");
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
index b1beecc7a0a94..5040c7e0e051b 100644
--- a/drivers/s390/char/raw3270.h
+++ b/drivers/s390/char/raw3270.h
@@ -14,7 +14,7 @@
struct raw3270;
struct raw3270_view;
-extern struct class *class3270;
+extern const struct class class3270;
/* 3270 CCW request */
struct raw3270_request {
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 11c428f4c7cf9..7815e9bea69a1 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -18,6 +18,7 @@
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/memory.h>
+#include <linux/memory_hotplug.h>
#include <linux/module.h>
#include <asm/ctlreg.h>
#include <asm/chpid.h>
@@ -26,6 +27,7 @@
#include <asm/sclp.h>
#include <asm/numa.h>
#include <asm/facility.h>
+#include <asm/page-states.h>
#include "sclp.h"
@@ -340,16 +342,38 @@ static int sclp_mem_notifier(struct notifier_block *nb,
if (contains_standby_increment(start, start + size))
rc = -EPERM;
break;
- case MEM_ONLINE:
- case MEM_CANCEL_OFFLINE:
- break;
- case MEM_GOING_ONLINE:
+ case MEM_PREPARE_ONLINE:
+ /*
+ * Access the altmap_start_pfn and altmap_nr_pages fields
+ * within the struct memory_notify specifically when dealing
+ * with only MEM_PREPARE_ONLINE/MEM_FINISH_OFFLINE notifiers.
+ *
+ * When altmap is in use, take the specified memory range
+ * online, which includes the altmap.
+ */
+ if (arg->altmap_nr_pages) {
+ start = PFN_PHYS(arg->altmap_start_pfn);
+ size += PFN_PHYS(arg->altmap_nr_pages);
+ }
rc = sclp_mem_change_state(start, size, 1);
+ if (rc || !arg->altmap_nr_pages)
+ break;
+ /*
+ * Set CMMA state to nodat here, since the struct page memory
+ * at the beginning of the memory block will not go through the
+ * buddy allocator later.
+ */
+ __arch_set_page_nodat((void *)__va(start), arg->altmap_nr_pages);
break;
- case MEM_CANCEL_ONLINE:
- sclp_mem_change_state(start, size, 0);
- break;
- case MEM_OFFLINE:
+ case MEM_FINISH_OFFLINE:
+ /*
+ * When altmap is in use, take the specified memory range
+ * offline, which includes the altmap.
+ */
+ if (arg->altmap_nr_pages) {
+ start = PFN_PHYS(arg->altmap_start_pfn);
+ size += PFN_PHYS(arg->altmap_nr_pages);
+ }
sclp_mem_change_state(start, size, 0);
break;
default:
@@ -400,7 +424,9 @@ static void __init add_memory_merged(u16 rn)
if (!size)
goto skip_add;
for (addr = start; addr < start + size; addr += block_size)
- add_memory(0, addr, block_size, MHP_NONE);
+ add_memory(0, addr, block_size,
+ MACHINE_HAS_EDAT1 ?
+ MHP_MEMMAP_ON_MEMORY | MHP_OFFLINE_INACCESSIBLE : MHP_NONE);
skip_add:
first_rn = rn;
num = 1;
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index 4e5d5efa978f2..0aba30efb483d 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -305,7 +305,9 @@ tape_ccw_cc(struct ccw1 *ccw, __u8 cmd_code, __u16 memsize, void *cda)
ccw->cmd_code = cmd_code;
ccw->flags = CCW_FLAG_CC;
ccw->count = memsize;
- ccw->cda = (__u32)(addr_t) cda;
+ ccw->cda = 0;
+ if (cda)
+ ccw->cda = virt_to_dma32(cda);
return ccw + 1;
}
@@ -315,7 +317,9 @@ tape_ccw_end(struct ccw1 *ccw, __u8 cmd_code, __u16 memsize, void *cda)
ccw->cmd_code = cmd_code;
ccw->flags = 0;
ccw->count = memsize;
- ccw->cda = (__u32)(addr_t) cda;
+ ccw->cda = 0;
+ if (cda)
+ ccw->cda = virt_to_dma32(cda);
return ccw + 1;
}
@@ -325,7 +329,7 @@ tape_ccw_cmd(struct ccw1 *ccw, __u8 cmd_code)
ccw->cmd_code = cmd_code;
ccw->flags = 0;
ccw->count = 0;
- ccw->cda = (__u32)(addr_t) &ccw->cmd_code;
+ ccw->cda = virt_to_dma32(&ccw->cmd_code);
return ccw + 1;
}
@@ -336,7 +340,7 @@ tape_ccw_repeat(struct ccw1 *ccw, __u8 cmd_code, int count)
ccw->cmd_code = cmd_code;
ccw->flags = CCW_FLAG_CC;
ccw->count = 0;
- ccw->cda = (__u32)(addr_t) &ccw->cmd_code;
+ ccw->cda = virt_to_dma32(&ccw->cmd_code);
ccw++;
}
return ccw;
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index 277a0f903d11b..eae362bbfbb55 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -22,7 +22,9 @@ MODULE_DESCRIPTION(
);
MODULE_LICENSE("GPL");
-static struct class *tape_class;
+static const struct class tape_class = {
+ .name = "tape390",
+};
/*
* Register a tape device and return a pointer to the cdev structure.
@@ -74,7 +76,7 @@ struct tape_class_device *register_tape_dev(
if (rc)
goto fail_with_cdev;
- tcd->class_device = device_create(tape_class, device,
+ tcd->class_device = device_create(&tape_class, device,
tcd->char_device->dev, NULL,
"%s", tcd->device_name);
rc = PTR_ERR_OR_ZERO(tcd->class_device);
@@ -91,7 +93,7 @@ struct tape_class_device *register_tape_dev(
return tcd;
fail_with_class_device:
- device_destroy(tape_class, tcd->char_device->dev);
+ device_destroy(&tape_class, tcd->char_device->dev);
fail_with_cdev:
cdev_del(tcd->char_device);
@@ -107,7 +109,7 @@ void unregister_tape_dev(struct device *device, struct tape_class_device *tcd)
{
if (tcd != NULL && !IS_ERR(tcd)) {
sysfs_remove_link(&device->kobj, tcd->mode_name);
- device_destroy(tape_class, tcd->char_device->dev);
+ device_destroy(&tape_class, tcd->char_device->dev);
cdev_del(tcd->char_device);
kfree(tcd);
}
@@ -117,15 +119,12 @@ EXPORT_SYMBOL(unregister_tape_dev);
static int __init tape_init(void)
{
- tape_class = class_create("tape390");
-
- return 0;
+ return class_register(&tape_class);
}
static void __exit tape_exit(void)
{
- class_destroy(tape_class);
- tape_class = NULL;
+ class_unregister(&tape_class);
}
postcore_initcall(tape_init);
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 6946ba9a9de2c..d7e408c8d0b84 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -679,7 +679,9 @@ static const struct attribute_group *vmlogrdr_attr_groups[] = {
NULL,
};
-static struct class *vmlogrdr_class;
+static const struct class vmlogrdr_class = {
+ .name = "vmlogrdr_class",
+};
static struct device_driver vmlogrdr_driver = {
.name = "vmlogrdr",
.bus = &iucv_bus,
@@ -699,12 +701,9 @@ static int vmlogrdr_register_driver(void)
if (ret)
goto out_iucv;
- vmlogrdr_class = class_create("vmlogrdr");
- if (IS_ERR(vmlogrdr_class)) {
- ret = PTR_ERR(vmlogrdr_class);
- vmlogrdr_class = NULL;
+ ret = class_register(&vmlogrdr_class);
+ if (ret)
goto out_driver;
- }
return 0;
out_driver:
@@ -718,8 +717,7 @@ out:
static void vmlogrdr_unregister_driver(void)
{
- class_destroy(vmlogrdr_class);
- vmlogrdr_class = NULL;
+ class_unregister(&vmlogrdr_class);
driver_unregister(&vmlogrdr_driver);
iucv_unregister(&vmlogrdr_iucv_handler, 1);
}
@@ -754,7 +752,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
return ret;
}
- priv->class_device = device_create(vmlogrdr_class, dev,
+ priv->class_device = device_create(&vmlogrdr_class, dev,
MKDEV(vmlogrdr_major,
priv->minor_num),
priv, "%s", dev_name(dev));
@@ -771,7 +769,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
{
- device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
+ device_destroy(&vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
if (priv->device != NULL) {
device_unregister(priv->device);
priv->device=NULL;
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 1d17a83569ce4..fe94dec427b69 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -48,7 +48,9 @@ MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver");
MODULE_LICENSE("GPL");
static dev_t ur_first_dev_maj_min;
-static struct class *vmur_class;
+static const struct class vmur_class = {
+ .name = "vmur",
+};
static struct debug_info *vmur_dbf;
/* We put the device's record length (for writes) in the driver_info field */
@@ -195,7 +197,7 @@ static void free_chan_prog(struct ccw1 *cpa)
struct ccw1 *ptr = cpa;
while (ptr->cda) {
- kfree(phys_to_virt(ptr->cda));
+ kfree(dma32_to_virt(ptr->cda));
ptr++;
}
kfree(cpa);
@@ -237,7 +239,7 @@ static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count,
free_chan_prog(cpa);
return ERR_PTR(-ENOMEM);
}
- cpa[i].cda = (u32)virt_to_phys(kbuf);
+ cpa[i].cda = virt_to_dma32(kbuf);
if (copy_from_user(kbuf, ubuf, reclen)) {
free_chan_prog(cpa);
return ERR_PTR(-EFAULT);
@@ -912,7 +914,7 @@ static int ur_set_online(struct ccw_device *cdev)
goto fail_free_cdev;
}
- urd->device = device_create(vmur_class, &cdev->dev,
+ urd->device = device_create(&vmur_class, &cdev->dev,
urd->char_device->dev, NULL, "%s", node_id);
if (IS_ERR(urd->device)) {
rc = PTR_ERR(urd->device);
@@ -958,7 +960,7 @@ static int ur_set_offline_force(struct ccw_device *cdev, int force)
/* Work not run yet - need to release reference here */
urdev_put(urd);
}
- device_destroy(vmur_class, urd->char_device->dev);
+ device_destroy(&vmur_class, urd->char_device->dev);
cdev_del(urd->char_device);
urd->char_device = NULL;
rc = 0;
@@ -1022,11 +1024,9 @@ static int __init ur_init(void)
debug_set_level(vmur_dbf, 6);
- vmur_class = class_create("vmur");
- if (IS_ERR(vmur_class)) {
- rc = PTR_ERR(vmur_class);
+ rc = class_register(&vmur_class);
+ if (rc)
goto fail_free_dbf;
- }
rc = ccw_driver_register(&ur_driver);
if (rc)
@@ -1046,7 +1046,7 @@ static int __init ur_init(void)
fail_unregister_driver:
ccw_driver_unregister(&ur_driver);
fail_class_destroy:
- class_destroy(vmur_class);
+ class_unregister(&vmur_class);
fail_free_dbf:
debug_unregister(vmur_dbf);
return rc;
@@ -1056,7 +1056,7 @@ static void __exit ur_exit(void)
{
unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
ccw_driver_unregister(&ur_driver);
- class_destroy(vmur_class);
+ class_unregister(&vmur_class);
debug_unregister(vmur_dbf);
pr_info("%s unloaded.\n", ur_banner);
}
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 6eb8bcd948dc4..b72f672a7720a 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -240,7 +240,7 @@ static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj,
&gdev->dev.kobj, "group_device");
if (rc) {
- for (--i; i >= 0; i--)
+ while (i--)
sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
"group_device");
return rc;
@@ -251,7 +251,7 @@ static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
rc = sysfs_create_link(&gdev->dev.kobj,
&gdev->cdev[i]->dev.kobj, str);
if (rc) {
- for (--i; i >= 0; i--) {
+ while (i--) {
sprintf(str, "cdev%d", i);
sysfs_remove_link(&gdev->dev.kobj, str);
}
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 3d88899dff7cf..44ea76f9e1dec 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -191,7 +191,7 @@ EXPORT_SYMBOL_GPL(chsc_ssqd);
* Returns 0 on success.
*/
int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
- u64 summary_indicator_addr, u64 subchannel_indicator_addr, u8 isc)
+ dma64_t summary_indicator_addr, dma64_t subchannel_indicator_addr, u8 isc)
{
memset(scssc, 0, sizeof(*scssc));
scssc->request.length = 0x0fe0;
@@ -844,7 +844,7 @@ chsc_add_cmg_attr(struct channel_subsystem *css)
}
return ret;
cleanup:
- for (--i; i >= 0; i--) {
+ while (i--) {
if (!css->chps[i])
continue;
chp_remove_cmg_attr(css->chps[i]);
@@ -861,9 +861,9 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable)
u32 key : 4;
u32 : 28;
u32 zeroes1;
- u32 cub_addr1;
+ dma32_t cub_addr1;
u32 zeroes2;
- u32 cub_addr2;
+ dma32_t cub_addr2;
u32 reserved[13];
struct chsc_header response;
u32 status : 8;
@@ -881,8 +881,8 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable)
secm_area->request.code = 0x0016;
secm_area->key = PAGE_DEFAULT_KEY >> 4;
- secm_area->cub_addr1 = virt_to_phys(css->cub_addr1);
- secm_area->cub_addr2 = virt_to_phys(css->cub_addr2);
+ secm_area->cub_addr1 = virt_to_dma32(css->cub_addr1);
+ secm_area->cub_addr2 = virt_to_dma32(css->cub_addr2);
secm_area->operation_code = enable ? 0 : 1;
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index d1caacb08e674..03602295f3350 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -91,8 +91,8 @@ struct chsc_scssc_area {
u16:16;
u32:32;
u32:32;
- u64 summary_indicator_addr;
- u64 subchannel_indicator_addr;
+ dma64_t summary_indicator_addr;
+ dma64_t subchannel_indicator_addr;
u32 ks:4;
u32 kc:4;
u32:21;
@@ -164,7 +164,7 @@ void chsc_chp_offline(struct chp_id chpid);
int chsc_get_channel_measurement_chars(struct channel_path *chp);
int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd);
int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
- u64 summary_indicator_addr, u64 subchannel_indicator_addr,
+ dma64_t summary_indicator_addr, dma64_t subchannel_indicator_addr,
u8 isc);
int chsc_sgib(u32 origin);
int chsc_error_from_response(int response);
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index a5736b7357b28..7e759c21480e7 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -148,7 +148,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
orb->cmd.i2k = 0;
orb->cmd.key = key >> 4;
/* issue "Start Subchannel" */
- orb->cmd.cpa = (u32)virt_to_phys(cpa);
+ orb->cmd.cpa = virt_to_dma32(cpa);
ccode = ssch(sch->schid, orb);
/* process condition code */
@@ -717,7 +717,7 @@ int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key)
orb->tm.key = key >> 4;
orb->tm.b = 1;
orb->tm.lpm = lpm ? lpm : sch->lpm;
- orb->tm.tcw = (u32)virt_to_phys(tcw);
+ orb->tm.tcw = virt_to_dma32(tcw);
cc = ssch(sch->schid, orb);
switch (cc) {
case 0:
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 094431a62ad54..1d68db1a3d4e4 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -1114,26 +1114,33 @@ static int cio_dma_pool_init(void)
return 0;
}
-void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
- size_t size)
+void *__cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
+ size_t size, dma32_t *dma_handle)
{
dma_addr_t dma_addr;
- unsigned long addr;
size_t chunk_size;
+ void *addr;
if (!gp_dma)
return NULL;
- addr = gen_pool_alloc(gp_dma, size);
+ addr = gen_pool_dma_alloc(gp_dma, size, &dma_addr);
while (!addr) {
chunk_size = round_up(size, PAGE_SIZE);
- addr = (unsigned long) dma_alloc_coherent(dma_dev,
- chunk_size, &dma_addr, CIO_DMA_GFP);
+ addr = dma_alloc_coherent(dma_dev, chunk_size, &dma_addr, CIO_DMA_GFP);
if (!addr)
return NULL;
- gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
- addr = gen_pool_alloc(gp_dma, size);
+ gen_pool_add_virt(gp_dma, (unsigned long)addr, dma_addr, chunk_size, -1);
+ addr = gen_pool_dma_alloc(gp_dma, size, dma_handle ? &dma_addr : NULL);
}
- return (void *) addr;
+ if (dma_handle)
+ *dma_handle = (__force dma32_t)dma_addr;
+ return addr;
+}
+
+void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
+ size_t size)
+{
+ return __cio_gp_dma_zalloc(gp_dma, dma_dev, size, NULL);
}
void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index f95d12345d98a..920f550bc313b 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -363,10 +363,8 @@ int ccw_device_set_online(struct ccw_device *cdev)
spin_lock_irq(cdev->ccwlock);
ret = ccw_device_online(cdev);
- spin_unlock_irq(cdev->ccwlock);
- if (ret == 0)
- wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
- else {
+ if (ret) {
+ spin_unlock_irq(cdev->ccwlock);
CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
"device 0.%x.%04x\n",
ret, cdev->private->dev_id.ssid,
@@ -375,7 +373,12 @@ int ccw_device_set_online(struct ccw_device *cdev)
put_device(&cdev->dev);
return ret;
}
- spin_lock_irq(cdev->ccwlock);
+ /* Wait until a final state is reached */
+ while (!dev_fsm_final_state(cdev)) {
+ spin_unlock_irq(cdev->ccwlock);
+ wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
+ spin_lock_irq(cdev->ccwlock);
+ }
/* Check if online processing was successful */
if ((cdev->private->state != DEV_STATE_ONLINE) &&
(cdev->private->state != DEV_STATE_W4SENSE)) {
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index c396ac3e3a327..42791fa0b80e2 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -64,13 +64,13 @@ static void ccw_timeout_log(struct ccw_device *cdev)
printk(KERN_WARNING "cio: orb indicates transport mode\n");
printk(KERN_WARNING "cio: last tcw:\n");
print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
- phys_to_virt(orb->tm.tcw),
+ dma32_to_virt(orb->tm.tcw),
sizeof(struct tcw), 0);
} else {
printk(KERN_WARNING "cio: orb indicates command mode\n");
- if ((void *)(addr_t)orb->cmd.cpa ==
+ if (dma32_to_virt(orb->cmd.cpa) ==
&private->dma_area->sense_ccw ||
- (void *)(addr_t)orb->cmd.cpa ==
+ dma32_to_virt(orb->cmd.cpa) ==
cdev->private->dma_area->iccws)
printk(KERN_WARNING "cio: last channel program "
"(intern):\n");
@@ -78,7 +78,7 @@ static void ccw_timeout_log(struct ccw_device *cdev)
printk(KERN_WARNING "cio: last channel program:\n");
print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
- phys_to_virt(orb->cmd.cpa),
+ dma32_to_virt(orb->cmd.cpa),
sizeof(struct ccw1), 0);
}
printk(KERN_WARNING "cio: ccw device state: %d\n",
@@ -504,6 +504,11 @@ callback:
ccw_device_done(cdev, DEV_STATE_ONLINE);
/* Deliver fake irb to device driver, if needed. */
if (cdev->private->flags.fake_irb) {
+ CIO_MSG_EVENT(2, "fakeirb: deliver device 0.%x.%04x intparm %lx type=%d\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno,
+ cdev->private->intparm,
+ cdev->private->flags.fake_irb);
create_fake_irb(&cdev->private->dma_area->irb,
cdev->private->flags.fake_irb);
cdev->private->flags.fake_irb = 0;
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index ce99ee2457e6e..a512eac834852 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -210,7 +210,7 @@ void ccw_device_sense_id_start(struct ccw_device *cdev)
snsid_init(cdev);
/* Channel program setup. */
cp->cmd_code = CCW_CMD_SENSE_ID;
- cp->cda = (u32)virt_to_phys(&cdev->private->dma_area->senseid);
+ cp->cda = virt_to_dma32(&cdev->private->dma_area->senseid);
cp->count = sizeof(struct senseid);
cp->flags = CCW_FLAG_SLI;
/* Request setup. */
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index a5dba3829769c..acd6790dba4dd 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -208,6 +208,10 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
if (!cdev->private->flags.fake_irb) {
cdev->private->flags.fake_irb = FAKE_CMD_IRB;
cdev->private->intparm = intparm;
+ CIO_MSG_EVENT(2, "fakeirb: queue device 0.%x.%04x intparm %lx type=%d\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, intparm,
+ cdev->private->flags.fake_irb);
return 0;
} else
/* There's already a fake I/O around. */
@@ -551,6 +555,10 @@ int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
if (!cdev->private->flags.fake_irb) {
cdev->private->flags.fake_irb = FAKE_TM_IRB;
cdev->private->intparm = intparm;
+ CIO_MSG_EVENT(2, "fakeirb: queue device 0.%x.%04x intparm %lx type=%d\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, intparm,
+ cdev->private->flags.fake_irb);
return 0;
} else
/* There's already a fake I/O around. */
@@ -823,13 +831,14 @@ EXPORT_SYMBOL_GPL(ccw_device_get_chid);
* the subchannels dma pool. Maximal size of allocation supported
* is PAGE_SIZE.
*/
-void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size)
+void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size,
+ dma32_t *dma_handle)
{
void *addr;
if (!get_device(&cdev->dev))
return NULL;
- addr = cio_gp_dma_zalloc(cdev->private->dma_pool, &cdev->dev, size);
+ addr = __cio_gp_dma_zalloc(cdev->private->dma_pool, &cdev->dev, size, dma_handle);
if (IS_ERR_OR_NULL(addr))
put_device(&cdev->dev);
return addr;
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index ad90045873e25..b3afe283cc103 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -141,7 +141,7 @@ static void spid_build_cp(struct ccw_device *cdev, u8 fn)
pgid->inf.fc = fn;
cp->cmd_code = CCW_CMD_SET_PGID;
- cp->cda = (u32)virt_to_phys(pgid);
+ cp->cda = virt_to_dma32(pgid);
cp->count = sizeof(*pgid);
cp->flags = CCW_FLAG_SLI;
req->cp = cp;
@@ -442,7 +442,7 @@ static void snid_build_cp(struct ccw_device *cdev)
/* Channel program setup. */
cp->cmd_code = CCW_CMD_SENSE_PGID;
- cp->cda = (u32)virt_to_phys(&cdev->private->dma_area->pgid[i]);
+ cp->cda = virt_to_dma32(&cdev->private->dma_area->pgid[i]);
cp->count = sizeof(struct pgid);
cp->flags = CCW_FLAG_SLI;
req->cp = cp;
@@ -632,11 +632,11 @@ static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
struct ccw1 *cp = cdev->private->dma_area->iccws;
cp[0].cmd_code = CCW_CMD_STLCK;
- cp[0].cda = (u32)virt_to_phys(buf1);
+ cp[0].cda = virt_to_dma32(buf1);
cp[0].count = 32;
cp[0].flags = CCW_FLAG_CC;
cp[1].cmd_code = CCW_CMD_RELEASE;
- cp[1].cda = (u32)virt_to_phys(buf2);
+ cp[1].cda = virt_to_dma32(buf2);
cp[1].count = 32;
cp[1].flags = 0;
req->cp = cp;
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 6c2e35065fec3..0ff8482a7b155 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -332,7 +332,7 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
*/
sense_ccw = &to_io_private(sch)->dma_area->sense_ccw;
sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE;
- sense_ccw->cda = virt_to_phys(cdev->private->dma_area->irb.ecw);
+ sense_ccw->cda = virt_to_dma32(cdev->private->dma_area->irb.ecw);
sense_ccw->count = SENSE_MAX_COUNT;
sense_ccw->flags = CCW_FLAG_SLI;
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
index 1caedf931a5f0..165de15523016 100644
--- a/drivers/s390/cio/eadm_sch.c
+++ b/drivers/s390/cio/eadm_sch.c
@@ -63,7 +63,7 @@ static int eadm_subchannel_start(struct subchannel *sch, struct aob *aob)
int cc;
orb_init(orb);
- orb->eadm.aob = (u32)virt_to_phys(aob);
+ orb->eadm.aob = virt_to_dma32(aob);
orb->eadm.intparm = (u32)virt_to_phys(sch);
orb->eadm.key = PAGE_DEFAULT_KEY >> 4;
@@ -147,7 +147,7 @@ static void eadm_subchannel_irq(struct subchannel *sch)
css_sched_sch_todo(sch, SCH_TODO_EVAL);
return;
}
- scm_irq_handler(phys_to_virt(scsw->aob), error);
+ scm_irq_handler(dma32_to_virt(scsw->aob), error);
private->state = EADM_IDLE;
if (private->completion)
diff --git a/drivers/s390/cio/fcx.c b/drivers/s390/cio/fcx.c
index 84f24a2f46e4a..ba35b64949d3a 100644
--- a/drivers/s390/cio/fcx.c
+++ b/drivers/s390/cio/fcx.c
@@ -25,7 +25,7 @@
*/
struct tcw *tcw_get_intrg(struct tcw *tcw)
{
- return phys_to_virt(tcw->intrg);
+ return dma32_to_virt(tcw->intrg);
}
EXPORT_SYMBOL(tcw_get_intrg);
@@ -40,9 +40,9 @@ EXPORT_SYMBOL(tcw_get_intrg);
void *tcw_get_data(struct tcw *tcw)
{
if (tcw->r)
- return phys_to_virt(tcw->input);
+ return dma64_to_virt(tcw->input);
if (tcw->w)
- return phys_to_virt(tcw->output);
+ return dma64_to_virt(tcw->output);
return NULL;
}
EXPORT_SYMBOL(tcw_get_data);
@@ -55,7 +55,7 @@ EXPORT_SYMBOL(tcw_get_data);
*/
struct tccb *tcw_get_tccb(struct tcw *tcw)
{
- return phys_to_virt(tcw->tccb);
+ return dma64_to_virt(tcw->tccb);
}
EXPORT_SYMBOL(tcw_get_tccb);
@@ -67,7 +67,7 @@ EXPORT_SYMBOL(tcw_get_tccb);
*/
struct tsb *tcw_get_tsb(struct tcw *tcw)
{
- return phys_to_virt(tcw->tsb);
+ return dma64_to_virt(tcw->tsb);
}
EXPORT_SYMBOL(tcw_get_tsb);
@@ -190,7 +190,7 @@ EXPORT_SYMBOL(tcw_finalize);
*/
void tcw_set_intrg(struct tcw *tcw, struct tcw *intrg_tcw)
{
- tcw->intrg = (u32)virt_to_phys(intrg_tcw);
+ tcw->intrg = virt_to_dma32(intrg_tcw);
}
EXPORT_SYMBOL(tcw_set_intrg);
@@ -208,11 +208,11 @@ EXPORT_SYMBOL(tcw_set_intrg);
void tcw_set_data(struct tcw *tcw, void *data, int use_tidal)
{
if (tcw->r) {
- tcw->input = virt_to_phys(data);
+ tcw->input = virt_to_dma64(data);
if (use_tidal)
tcw->flags |= TCW_FLAGS_INPUT_TIDA;
} else if (tcw->w) {
- tcw->output = virt_to_phys(data);
+ tcw->output = virt_to_dma64(data);
if (use_tidal)
tcw->flags |= TCW_FLAGS_OUTPUT_TIDA;
}
@@ -228,7 +228,7 @@ EXPORT_SYMBOL(tcw_set_data);
*/
void tcw_set_tccb(struct tcw *tcw, struct tccb *tccb)
{
- tcw->tccb = virt_to_phys(tccb);
+ tcw->tccb = virt_to_dma64(tccb);
}
EXPORT_SYMBOL(tcw_set_tccb);
@@ -241,7 +241,7 @@ EXPORT_SYMBOL(tcw_set_tccb);
*/
void tcw_set_tsb(struct tcw *tcw, struct tsb *tsb)
{
- tcw->tsb = virt_to_phys(tsb);
+ tcw->tsb = virt_to_dma64(tsb);
}
EXPORT_SYMBOL(tcw_set_tsb);
@@ -346,7 +346,7 @@ struct tidaw *tcw_add_tidaw(struct tcw *tcw, int num_tidaws, u8 flags,
memset(tidaw, 0, sizeof(struct tidaw));
tidaw->flags = flags;
tidaw->count = count;
- tidaw->addr = virt_to_phys(addr);
+ tidaw->addr = virt_to_dma64(addr);
return tidaw;
}
EXPORT_SYMBOL(tcw_add_tidaw);
diff --git a/drivers/s390/cio/orb.h b/drivers/s390/cio/orb.h
index a2d3778b2c959..14d2a1822b506 100644
--- a/drivers/s390/cio/orb.h
+++ b/drivers/s390/cio/orb.h
@@ -12,6 +12,9 @@
#ifndef S390_ORB_H
#define S390_ORB_H
+#include <linux/types.h>
+#include <asm/dma-types.h>
+
/*
* Command-mode operation request block
*/
@@ -34,7 +37,7 @@ struct cmd_orb {
u32 ils:1; /* incorrect length */
u32 zero:6; /* reserved zeros */
u32 orbx:1; /* ORB extension control */
- u32 cpa; /* channel program address */
+ dma32_t cpa; /* channel program address */
} __packed __aligned(4);
/*
@@ -49,7 +52,7 @@ struct tm_orb {
u32 lpm:8;
u32:7;
u32 x:1;
- u32 tcw;
+ dma32_t tcw;
u32 prio:8;
u32:8;
u32 rsvpgm:8;
@@ -71,7 +74,7 @@ struct eadm_orb {
u32 compat2:1;
u32:21;
u32 x:1;
- u32 aob;
+ dma32_t aob;
u32 css_prio:8;
u32:8;
u32 scm_prio:8;
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 9cde55730b65a..a1cb39f4b7a27 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -82,7 +82,7 @@ static inline int do_siga_input(unsigned long schid, unsigned long mask,
*/
static inline int do_siga_output(unsigned long schid, unsigned long mask,
unsigned int *bb, unsigned long fc,
- unsigned long aob)
+ dma64_t aob)
{
int cc;
@@ -321,7 +321,7 @@ static inline int qdio_siga_sync_q(struct qdio_q *q)
}
static int qdio_siga_output(struct qdio_q *q, unsigned int count,
- unsigned int *busy_bit, unsigned long aob)
+ unsigned int *busy_bit, dma64_t aob)
{
unsigned long schid = *((u32 *) &q->irq_ptr->schid);
unsigned int fc = QDIO_SIGA_WRITE;
@@ -628,7 +628,7 @@ int qdio_inspect_output_queue(struct ccw_device *cdev, unsigned int nr,
EXPORT_SYMBOL_GPL(qdio_inspect_output_queue);
static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
- unsigned long aob)
+ dma64_t aob)
{
int retries = 0, cc;
unsigned int busy_bit;
@@ -722,8 +722,8 @@ static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
lgr_info_log();
}
-static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
- int dstat)
+static int qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
+ int dstat, int dcc)
{
DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
@@ -731,15 +731,18 @@ static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
goto error;
if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
goto error;
+ if (dcc == 1)
+ return -EAGAIN;
if (!(dstat & DEV_STAT_DEV_END))
goto error;
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
- return;
+ return 0;
error:
DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+ return -EIO;
}
/* qdio interrupt handler */
@@ -748,7 +751,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct subchannel_id schid;
- int cstat, dstat;
+ int cstat, dstat, rc, dcc;
if (!intparm || !irq_ptr) {
ccw_device_get_schid(cdev, &schid);
@@ -768,10 +771,12 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
qdio_irq_check_sense(irq_ptr, irb);
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
+ dcc = scsw_cmd_is_valid_cc(&irb->scsw) ? irb->scsw.cmd.cc : 0;
+ rc = 0;
switch (irq_ptr->state) {
case QDIO_IRQ_STATE_INACTIVE:
- qdio_establish_handle_irq(irq_ptr, cstat, dstat);
+ rc = qdio_establish_handle_irq(irq_ptr, cstat, dstat, dcc);
break;
case QDIO_IRQ_STATE_CLEANUP:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
@@ -785,12 +790,25 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
if (cstat || dstat)
qdio_handle_activate_check(irq_ptr, intparm, cstat,
dstat);
+ else if (dcc == 1)
+ rc = -EAGAIN;
break;
case QDIO_IRQ_STATE_STOPPED:
break;
default:
WARN_ON_ONCE(1);
}
+
+ if (rc == -EAGAIN) {
+ DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qint retry");
+ rc = ccw_device_start(cdev, irq_ptr->ccw, intparm, 0, 0);
+ if (!rc)
+ return;
+ DBF_ERROR("%4x RETRY ERR", irq_ptr->schid.sch_no);
+ DBF_ERROR("rc:%4x", rc);
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+ }
+
wake_up(&cdev->private->wait_q);
}
@@ -1070,7 +1088,7 @@ int qdio_establish(struct ccw_device *cdev,
irq_ptr->ccw->cmd_code = ciw->cmd;
irq_ptr->ccw->flags = CCW_FLAG_SLI;
irq_ptr->ccw->count = ciw->count;
- irq_ptr->ccw->cda = (u32) virt_to_phys(irq_ptr->qdr);
+ irq_ptr->ccw->cda = virt_to_dma32(irq_ptr->qdr);
spin_lock_irq(get_ccwdev_lock(cdev));
ccw_device_set_options_mask(cdev, 0);
@@ -1263,9 +1281,9 @@ static int handle_outbound(struct qdio_q *q, unsigned int bufnr, unsigned int co
qperf_inc(q, outbound_queue_full);
if (queue_type(q) == QDIO_IQDIO_QFMT) {
- unsigned long phys_aob = aob ? virt_to_phys(aob) : 0;
+ dma64_t phys_aob = aob ? virt_to_dma64(aob) : 0;
- WARN_ON_ONCE(!IS_ALIGNED(phys_aob, 256));
+ WARN_ON_ONCE(!IS_ALIGNED(dma64_to_u64(phys_aob), 256));
rc = qdio_kick_outbound_q(q, count, phys_aob);
} else if (qdio_need_siga_sync(q->irq_ptr)) {
rc = qdio_sync_output_queue(q);
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 714878e2acc4d..99c0fd23022de 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -179,7 +179,7 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
/* fill in sl */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
- q->sl->element[j].sbal = virt_to_phys(q->sbal[j]);
+ q->sl->element[j].sbal = virt_to_dma64(q->sbal[j]);
}
static void setup_queues(struct qdio_irq *irq_ptr,
@@ -291,9 +291,9 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
static void qdio_fill_qdr_desc(struct qdesfmt0 *desc, struct qdio_q *queue)
{
- desc->sliba = virt_to_phys(queue->slib);
- desc->sla = virt_to_phys(queue->sl);
- desc->slsba = virt_to_phys(&queue->slsb);
+ desc->sliba = virt_to_dma64(queue->slib);
+ desc->sla = virt_to_dma64(queue->sl);
+ desc->slsba = virt_to_dma64(&queue->slsb);
desc->akey = PAGE_DEFAULT_KEY >> 4;
desc->bkey = PAGE_DEFAULT_KEY >> 4;
@@ -315,7 +315,7 @@ static void setup_qdr(struct qdio_irq *irq_ptr,
irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs;
irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4;
- irq_ptr->qdr->qiba = virt_to_phys(&irq_ptr->qib);
+ irq_ptr->qdr->qiba = virt_to_dma64(&irq_ptr->qib);
irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4;
for (i = 0; i < qdio_init->no_input_qs; i++)
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 9b9335dd06db6..ccd4ed93bd92d 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -137,15 +137,15 @@ static struct airq_struct tiqdio_airq = {
static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
{
struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page;
- u64 summary_indicator_addr, subchannel_indicator_addr;
+ dma64_t summary_indicator_addr, subchannel_indicator_addr;
int rc;
if (reset) {
summary_indicator_addr = 0;
subchannel_indicator_addr = 0;
} else {
- summary_indicator_addr = virt_to_phys(tiqdio_airq.lsi_ptr);
- subchannel_indicator_addr = virt_to_phys(irq_ptr->dsci);
+ summary_indicator_addr = virt_to_dma64(tiqdio_airq.lsi_ptr);
+ subchannel_indicator_addr = virt_to_dma64(irq_ptr->dsci);
}
rc = chsc_sadc(irq_ptr->schid, scssc, summary_indicator_addr,
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index aafd66305eade..6e5c508b1e07c 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -190,7 +190,7 @@ static bool page_array_iova_pinned(struct page_array *pa, u64 iova, u64 length)
}
/* Create the list of IDAL words for a page_array. */
static inline void page_array_idal_create_words(struct page_array *pa,
- unsigned long *idaws)
+ dma64_t *idaws)
{
int i;
@@ -203,10 +203,10 @@ static inline void page_array_idal_create_words(struct page_array *pa,
*/
for (i = 0; i < pa->pa_nr; i++) {
- idaws[i] = page_to_phys(pa->pa_page[i]);
+ idaws[i] = virt_to_dma64(page_to_virt(pa->pa_page[i]));
/* Incorporate any offset from each starting address */
- idaws[i] += pa->pa_iova[i] & (PAGE_SIZE - 1);
+ idaws[i] = dma64_add(idaws[i], pa->pa_iova[i] & ~PAGE_MASK);
}
}
@@ -227,7 +227,7 @@ static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len)
pccw1->flags = ccw0.flags;
pccw1->count = ccw0.count;
}
- pccw1->cda = ccw0.cda;
+ pccw1->cda = u32_to_dma32(ccw0.cda);
pccw1++;
}
}
@@ -299,11 +299,12 @@ static inline int ccw_does_data_transfer(struct ccw1 *ccw)
*
* Returns 1 if yes, 0 if no.
*/
-static inline int is_cpa_within_range(u32 cpa, u32 head, int len)
+static inline int is_cpa_within_range(dma32_t cpa, u32 head, int len)
{
u32 tail = head + (len - 1) * sizeof(struct ccw1);
+ u32 gcpa = dma32_to_u32(cpa);
- return (head <= cpa && cpa <= tail);
+ return head <= gcpa && gcpa <= tail;
}
static inline int is_tic_within_range(struct ccw1 *ccw, u32 head, int len)
@@ -356,7 +357,7 @@ static void ccwchain_cda_free(struct ccwchain *chain, int idx)
if (ccw_is_tic(ccw))
return;
- kfree(phys_to_virt(ccw->cda));
+ kfree(dma32_to_virt(ccw->cda));
}
/**
@@ -417,15 +418,17 @@ static int tic_target_chain_exists(struct ccw1 *tic, struct channel_program *cp)
static int ccwchain_loop_tic(struct ccwchain *chain,
struct channel_program *cp);
-static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
+static int ccwchain_handle_ccw(dma32_t cda, struct channel_program *cp)
{
struct vfio_device *vdev =
&container_of(cp, struct vfio_ccw_private, cp)->vdev;
struct ccwchain *chain;
int len, ret;
+ u32 gcda;
+ gcda = dma32_to_u32(cda);
/* Copy 2K (the most we support today) of possible CCWs */
- ret = vfio_dma_rw(vdev, cda, cp->guest_cp, CCWCHAIN_LEN_MAX * sizeof(struct ccw1), false);
+ ret = vfio_dma_rw(vdev, gcda, cp->guest_cp, CCWCHAIN_LEN_MAX * sizeof(struct ccw1), false);
if (ret)
return ret;
@@ -434,7 +437,7 @@ static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
convert_ccw0_to_ccw1(cp->guest_cp, CCWCHAIN_LEN_MAX);
/* Count the CCWs in the current chain */
- len = ccwchain_calc_length(cda, cp);
+ len = ccwchain_calc_length(gcda, cp);
if (len < 0)
return len;
@@ -444,7 +447,7 @@ static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
return -ENOMEM;
chain->ch_len = len;
- chain->ch_iova = cda;
+ chain->ch_iova = gcda;
/* Copy the actual CCWs into the new chain */
memcpy(chain->ch_ccw, cp->guest_cp, len * sizeof(struct ccw1));
@@ -487,13 +490,13 @@ static int ccwchain_fetch_tic(struct ccw1 *ccw,
struct channel_program *cp)
{
struct ccwchain *iter;
- u32 ccw_head;
+ u32 cda, ccw_head;
list_for_each_entry(iter, &cp->ccwchain_list, next) {
ccw_head = iter->ch_iova;
if (is_cpa_within_range(ccw->cda, ccw_head, iter->ch_len)) {
- ccw->cda = (__u32) (addr_t) (((char *)iter->ch_ccw) +
- (ccw->cda - ccw_head));
+ cda = (u64)iter->ch_ccw + dma32_to_u32(ccw->cda) - ccw_head;
+ ccw->cda = u32_to_dma32(cda);
return 0;
}
}
@@ -501,14 +504,12 @@ static int ccwchain_fetch_tic(struct ccw1 *ccw,
return -EFAULT;
}
-static unsigned long *get_guest_idal(struct ccw1 *ccw,
- struct channel_program *cp,
- int idaw_nr)
+static dma64_t *get_guest_idal(struct ccw1 *ccw, struct channel_program *cp, int idaw_nr)
{
struct vfio_device *vdev =
&container_of(cp, struct vfio_ccw_private, cp)->vdev;
- unsigned long *idaws;
- unsigned int *idaws_f1;
+ dma64_t *idaws;
+ dma32_t *idaws_f1;
int idal_len = idaw_nr * sizeof(*idaws);
int idaw_size = idal_is_2k(cp) ? PAGE_SIZE / 2 : PAGE_SIZE;
int idaw_mask = ~(idaw_size - 1);
@@ -520,7 +521,7 @@ static unsigned long *get_guest_idal(struct ccw1 *ccw,
if (ccw_is_idal(ccw)) {
/* Copy IDAL from guest */
- ret = vfio_dma_rw(vdev, ccw->cda, idaws, idal_len, false);
+ ret = vfio_dma_rw(vdev, dma32_to_u32(ccw->cda), idaws, idal_len, false);
if (ret) {
kfree(idaws);
return ERR_PTR(ret);
@@ -528,14 +529,18 @@ static unsigned long *get_guest_idal(struct ccw1 *ccw,
} else {
/* Fabricate an IDAL based off CCW data address */
if (cp->orb.cmd.c64) {
- idaws[0] = ccw->cda;
- for (i = 1; i < idaw_nr; i++)
- idaws[i] = (idaws[i - 1] + idaw_size) & idaw_mask;
+ idaws[0] = u64_to_dma64(dma32_to_u32(ccw->cda));
+ for (i = 1; i < idaw_nr; i++) {
+ idaws[i] = dma64_add(idaws[i - 1], idaw_size);
+ idaws[i] = dma64_and(idaws[i], idaw_mask);
+ }
} else {
- idaws_f1 = (unsigned int *)idaws;
+ idaws_f1 = (dma32_t *)idaws;
idaws_f1[0] = ccw->cda;
- for (i = 1; i < idaw_nr; i++)
- idaws_f1[i] = (idaws_f1[i - 1] + idaw_size) & idaw_mask;
+ for (i = 1; i < idaw_nr; i++) {
+ idaws_f1[i] = dma32_add(idaws_f1[i - 1], idaw_size);
+ idaws_f1[i] = dma32_and(idaws_f1[i], idaw_mask);
+ }
}
}
@@ -572,7 +577,7 @@ static int ccw_count_idaws(struct ccw1 *ccw,
if (ccw_is_idal(ccw)) {
/* Read first IDAW to check its starting address. */
/* All subsequent IDAWs will be 2K- or 4K-aligned. */
- ret = vfio_dma_rw(vdev, ccw->cda, &iova, size, false);
+ ret = vfio_dma_rw(vdev, dma32_to_u32(ccw->cda), &iova, size, false);
if (ret)
return ret;
@@ -583,7 +588,7 @@ static int ccw_count_idaws(struct ccw1 *ccw,
if (!cp->orb.cmd.c64)
iova = iova >> 32;
} else {
- iova = ccw->cda;
+ iova = dma32_to_u32(ccw->cda);
}
/* Format-1 IDAWs operate on 2K each */
@@ -604,8 +609,8 @@ static int ccwchain_fetch_ccw(struct ccw1 *ccw,
{
struct vfio_device *vdev =
&container_of(cp, struct vfio_ccw_private, cp)->vdev;
- unsigned long *idaws;
- unsigned int *idaws_f1;
+ dma64_t *idaws;
+ dma32_t *idaws_f1;
int ret;
int idaw_nr;
int i;
@@ -636,12 +641,12 @@ static int ccwchain_fetch_ccw(struct ccw1 *ccw,
* Copy guest IDAWs into page_array, in case the memory they
* occupy is not contiguous.
*/
- idaws_f1 = (unsigned int *)idaws;
+ idaws_f1 = (dma32_t *)idaws;
for (i = 0; i < idaw_nr; i++) {
if (cp->orb.cmd.c64)
- pa->pa_iova[i] = idaws[i];
+ pa->pa_iova[i] = dma64_to_u64(idaws[i]);
else
- pa->pa_iova[i] = idaws_f1[i];
+ pa->pa_iova[i] = dma32_to_u32(idaws_f1[i]);
}
if (ccw_does_data_transfer(ccw)) {
@@ -652,7 +657,7 @@ static int ccwchain_fetch_ccw(struct ccw1 *ccw,
pa->pa_nr = 0;
}
- ccw->cda = (__u32) virt_to_phys(idaws);
+ ccw->cda = virt_to_dma32(idaws);
ccw->flags |= CCW_FLAG_IDA;
/* Populate the IDAL with pinned/translated addresses from page */
@@ -874,7 +879,7 @@ union orb *cp_get_orb(struct channel_program *cp, struct subchannel *sch)
chain = list_first_entry(&cp->ccwchain_list, struct ccwchain, next);
cpa = chain->ch_ccw;
- orb->cmd.cpa = (__u32)virt_to_phys(cpa);
+ orb->cmd.cpa = virt_to_dma32(cpa);
return orb;
}
@@ -896,7 +901,7 @@ union orb *cp_get_orb(struct channel_program *cp, struct subchannel *sch)
void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
{
struct ccwchain *chain;
- u32 cpa = scsw->cmd.cpa;
+ dma32_t cpa = scsw->cmd.cpa;
u32 ccw_head;
if (!cp->initialized)
@@ -919,9 +924,10 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
* (cpa - ccw_head) is the offset value of the host
* physical ccw to its chain head.
* Adding this value to the guest physical ccw chain
- * head gets us the guest cpa.
+ * head gets us the guest cpa:
+ * cpa = chain->ch_iova + (cpa - ccw_head)
*/
- cpa = chain->ch_iova + (cpa - ccw_head);
+ cpa = dma32_add(cpa, chain->ch_iova - ccw_head);
break;
}
}
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index 09877b46181d4..4d7988ea47ef0 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -378,7 +378,7 @@ static void fsm_open(struct vfio_ccw_private *private,
spin_lock_irq(&sch->lock);
sch->isc = VFIO_CCW_ISC;
- ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+ ret = cio_enable_subchannel(sch, (u32)virt_to_phys(sch));
if (ret)
goto err_unlock;
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 02c503f16bc2d..eba07f8ef3087 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -107,7 +107,11 @@ EXPORT_SYMBOL(zcrypt_msgtype);
struct zcdn_device;
-static struct class *zcrypt_class;
+static void zcdn_device_release(struct device *dev);
+static const struct class zcrypt_class = {
+ .name = ZCRYPT_NAME,
+ .dev_release = zcdn_device_release,
+};
static dev_t zcrypt_devt;
static struct cdev zcrypt_cdev;
@@ -130,7 +134,7 @@ static int zcdn_destroy(const char *name);
*/
static inline struct zcdn_device *find_zcdndev_by_name(const char *name)
{
- struct device *dev = class_find_device_by_name(zcrypt_class, name);
+ struct device *dev = class_find_device_by_name(&zcrypt_class, name);
return dev ? to_zcdn_dev(dev) : NULL;
}
@@ -142,7 +146,7 @@ static inline struct zcdn_device *find_zcdndev_by_name(const char *name)
*/
static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt)
{
- struct device *dev = class_find_device_by_devt(zcrypt_class, devt);
+ struct device *dev = class_find_device_by_devt(&zcrypt_class, devt);
return dev ? to_zcdn_dev(dev) : NULL;
}
@@ -396,7 +400,7 @@ static int zcdn_create(const char *name)
goto unlockout;
}
zcdndev->device.release = zcdn_device_release;
- zcdndev->device.class = zcrypt_class;
+ zcdndev->device.class = &zcrypt_class;
zcdndev->device.devt = devt;
zcdndev->device.groups = zcdn_dev_attr_groups;
if (name[0])
@@ -573,6 +577,7 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
{
if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner))
return NULL;
+ zcrypt_card_get(zc);
zcrypt_queue_get(zq);
get_device(&zq->queue->ap_dev.device);
atomic_add(weight, &zc->load);
@@ -592,6 +597,7 @@ static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
atomic_sub(weight, &zq->load);
put_device(&zq->queue->ap_dev.device);
zcrypt_queue_put(zq);
+ zcrypt_card_put(zc);
module_put(mod);
}
@@ -2075,12 +2081,9 @@ static int __init zcdn_init(void)
int rc;
/* create a new class 'zcrypt' */
- zcrypt_class = class_create(ZCRYPT_NAME);
- if (IS_ERR(zcrypt_class)) {
- rc = PTR_ERR(zcrypt_class);
- goto out_class_create_failed;
- }
- zcrypt_class->dev_release = zcdn_device_release;
+ rc = class_register(&zcrypt_class);
+ if (rc)
+ goto out_class_register_failed;
/* alloc device minor range */
rc = alloc_chrdev_region(&zcrypt_devt,
@@ -2096,35 +2099,35 @@ static int __init zcdn_init(void)
goto out_cdev_add_failed;
/* need some class specific sysfs attributes */
- rc = class_create_file(zcrypt_class, &class_attr_zcdn_create);
+ rc = class_create_file(&zcrypt_class, &class_attr_zcdn_create);
if (rc)
goto out_class_create_file_1_failed;
- rc = class_create_file(zcrypt_class, &class_attr_zcdn_destroy);
+ rc = class_create_file(&zcrypt_class, &class_attr_zcdn_destroy);
if (rc)
goto out_class_create_file_2_failed;
return 0;
out_class_create_file_2_failed:
- class_remove_file(zcrypt_class, &class_attr_zcdn_create);
+ class_remove_file(&zcrypt_class, &class_attr_zcdn_create);
out_class_create_file_1_failed:
cdev_del(&zcrypt_cdev);
out_cdev_add_failed:
unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
out_alloc_chrdev_failed:
- class_destroy(zcrypt_class);
-out_class_create_failed:
+ class_unregister(&zcrypt_class);
+out_class_register_failed:
return rc;
}
static void zcdn_exit(void)
{
- class_remove_file(zcrypt_class, &class_attr_zcdn_create);
- class_remove_file(zcrypt_class, &class_attr_zcdn_destroy);
+ class_remove_file(&zcrypt_class, &class_attr_zcdn_create);
+ class_remove_file(&zcrypt_class, &class_attr_zcdn_destroy);
zcdn_destroy_all();
cdev_del(&zcrypt_cdev);
unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
- class_destroy(zcrypt_class);
+ class_unregister(&zcrypt_class);
}
/*
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 90ec477386a86..9678c6a2cda72 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -1325,7 +1325,7 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
clear_normalized_cda(&ch->ccw[1]);
CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n",
- (void *)(unsigned long)ch->ccw[1].cda,
+ (void *)(u64)dma32_to_u32(ch->ccw[1].cda),
ch->trans_skb->data);
ch->ccw[1].count = ch->max_bufsize;
@@ -1340,7 +1340,7 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
}
CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n",
- (void *)(unsigned long)ch->ccw[1].cda,
+ (void *)(u64)dma32_to_u32(ch->ccw[1].cda),
ch->trans_skb->data);
ch->ccw[1].count = ch->trans_skb->len;
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index ac15d7c2b200b..878fe3ce53ada 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1389,7 +1389,7 @@ static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
ch->ccw[15].cmd_code = CCW_CMD_WRITE;
ch->ccw[15].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[15].count = TH_HEADER_LENGTH;
- ch->ccw[15].cda = virt_to_phys(ch->discontact_th);
+ ch->ccw[15].cda = virt_to_dma32(ch->discontact_th);
ch->ccw[16].cmd_code = CCW_CMD_NOOP;
ch->ccw[16].flags = CCW_FLAG_SLI;
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 7a2f34a5e0e09..9e580ef69bdaa 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -1708,57 +1708,57 @@ static void mpc_action_side_xid(fsm_instance *fsm, void *arg, int side)
ch->ccw[9].cmd_code = CCW_CMD_WRITE;
ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[9].count = TH_HEADER_LENGTH;
- ch->ccw[9].cda = virt_to_phys(ch->xid_th);
+ ch->ccw[9].cda = virt_to_dma32(ch->xid_th);
if (ch->xid == NULL)
goto done;
ch->ccw[10].cmd_code = CCW_CMD_WRITE;
ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[10].count = XID2_LENGTH;
- ch->ccw[10].cda = virt_to_phys(ch->xid);
+ ch->ccw[10].cda = virt_to_dma32(ch->xid);
ch->ccw[11].cmd_code = CCW_CMD_READ;
ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[11].count = TH_HEADER_LENGTH;
- ch->ccw[11].cda = virt_to_phys(ch->rcvd_xid_th);
+ ch->ccw[11].cda = virt_to_dma32(ch->rcvd_xid_th);
ch->ccw[12].cmd_code = CCW_CMD_READ;
ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[12].count = XID2_LENGTH;
- ch->ccw[12].cda = virt_to_phys(ch->rcvd_xid);
+ ch->ccw[12].cda = virt_to_dma32(ch->rcvd_xid);
ch->ccw[13].cmd_code = CCW_CMD_READ;
- ch->ccw[13].cda = virt_to_phys(ch->rcvd_xid_id);
+ ch->ccw[13].cda = virt_to_dma32(ch->rcvd_xid_id);
} else { /* side == YSIDE : mpc_action_yside_xid */
ch->ccw[9].cmd_code = CCW_CMD_READ;
ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[9].count = TH_HEADER_LENGTH;
- ch->ccw[9].cda = virt_to_phys(ch->rcvd_xid_th);
+ ch->ccw[9].cda = virt_to_dma32(ch->rcvd_xid_th);
ch->ccw[10].cmd_code = CCW_CMD_READ;
ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[10].count = XID2_LENGTH;
- ch->ccw[10].cda = virt_to_phys(ch->rcvd_xid);
+ ch->ccw[10].cda = virt_to_dma32(ch->rcvd_xid);
if (ch->xid_th == NULL)
goto done;
ch->ccw[11].cmd_code = CCW_CMD_WRITE;
ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[11].count = TH_HEADER_LENGTH;
- ch->ccw[11].cda = virt_to_phys(ch->xid_th);
+ ch->ccw[11].cda = virt_to_dma32(ch->xid_th);
if (ch->xid == NULL)
goto done;
ch->ccw[12].cmd_code = CCW_CMD_WRITE;
ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[12].count = XID2_LENGTH;
- ch->ccw[12].cda = virt_to_phys(ch->xid);
+ ch->ccw[12].cda = virt_to_dma32(ch->xid);
if (ch->xid_id == NULL)
goto done;
ch->ccw[13].cmd_code = CCW_CMD_WRITE;
- ch->ccw[13].cda = virt_to_phys(ch->xid_id);
+ ch->ccw[13].cda = virt_to_dma32(ch->xid_id);
}
ch->ccw[13].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c
index 0ff61d00feb19..8672d225ba77f 100644
--- a/drivers/s390/net/fsm.c
+++ b/drivers/s390/net/fsm.c
@@ -9,7 +9,7 @@
#include <linux/slab.h>
#include <linux/timer.h>
-MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
+MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert <felfert@millenux.com>");
MODULE_DESCRIPTION("Finite state machine helper functions");
MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index 2c8e964425dc3..43778b088ffac 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -292,13 +292,16 @@ out:
static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
{
clear_bit(dmb->sba_idx, ism->sba_bitmap);
- dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
- dmb->cpu_addr, dmb->dma_addr);
+ dma_unmap_page(&ism->pdev->dev, dmb->dma_addr, dmb->dmb_len,
+ DMA_FROM_DEVICE);
+ folio_put(virt_to_folio(dmb->cpu_addr));
}
static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
{
+ struct folio *folio;
unsigned long bit;
+ int rc;
if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
return -EINVAL;
@@ -315,14 +318,30 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
return -EINVAL;
- dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
- &dmb->dma_addr,
- GFP_KERNEL | __GFP_NOWARN |
- __GFP_NOMEMALLOC | __GFP_NORETRY);
- if (!dmb->cpu_addr)
- clear_bit(dmb->sba_idx, ism->sba_bitmap);
+ folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC |
+ __GFP_NORETRY, get_order(dmb->dmb_len));
- return dmb->cpu_addr ? 0 : -ENOMEM;
+ if (!folio) {
+ rc = -ENOMEM;
+ goto out_bit;
+ }
+
+ dmb->cpu_addr = folio_address(folio);
+ dmb->dma_addr = dma_map_page(&ism->pdev->dev,
+ virt_to_page(dmb->cpu_addr), 0,
+ dmb->dmb_len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&ism->pdev->dev, dmb->dma_addr)) {
+ rc = -ENOMEM;
+ goto out_free;
+ }
+
+ return 0;
+
+out_free:
+ kfree(dmb->cpu_addr);
+out_bit:
+ clear_bit(dmb->sba_idx, ism->sba_bitmap);
+ return rc;
}
int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index a1f2acd6fb8f6..25d4e6376591e 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -218,7 +218,7 @@ lcs_setup_read_ccws(struct lcs_card *card)
* we do not need to do set_normalized_cda.
*/
card->read.ccws[cnt].cda =
- (__u32)virt_to_phys(card->read.iob[cnt].data);
+ virt_to_dma32(card->read.iob[cnt].data);
((struct lcs_header *)
card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET;
card->read.iob[cnt].callback = lcs_get_frames_cb;
@@ -230,8 +230,7 @@ lcs_setup_read_ccws(struct lcs_card *card)
card->read.ccws[LCS_NUM_BUFFS - 1].flags |= CCW_FLAG_SUSPEND;
/* Last ccw is a tic (transfer in channel). */
card->read.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
- card->read.ccws[LCS_NUM_BUFFS].cda =
- (__u32)virt_to_phys(card->read.ccws);
+ card->read.ccws[LCS_NUM_BUFFS].cda = virt_to_dma32(card->read.ccws);
/* Setg initial state of the read channel. */
card->read.state = LCS_CH_STATE_INIT;
@@ -273,12 +272,11 @@ lcs_setup_write_ccws(struct lcs_card *card)
* we do not need to do set_normalized_cda.
*/
card->write.ccws[cnt].cda =
- (__u32)virt_to_phys(card->write.iob[cnt].data);
+ virt_to_dma32(card->write.iob[cnt].data);
}
/* Last ccw is a tic (transfer in channel). */
card->write.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
- card->write.ccws[LCS_NUM_BUFFS].cda =
- (__u32)virt_to_phys(card->write.ccws);
+ card->write.ccws[LCS_NUM_BUFFS].cda = virt_to_dma32(card->write.ccws);
/* Set initial state of the write channel. */
card->read.state = LCS_CH_STATE_INIT;
@@ -1399,7 +1397,7 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
if ((channel->state != LCS_CH_STATE_INIT) &&
(irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
(irb->scsw.cmd.cpa != 0)) {
- index = (struct ccw1 *) __va((addr_t) irb->scsw.cmd.cpa)
+ index = (struct ccw1 *)dma32_to_virt(irb->scsw.cmd.cpa)
- channel->ccws;
if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) ||
(irb->scsw.cmd.cstat & SCHN_STAT_PCI))
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index cf8506d0f185c..f0b8b709649f2 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -426,7 +426,7 @@ static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
ccw->cmd_code = cmd_code;
ccw->flags = flags | CCW_FLAG_SLI;
ccw->count = len;
- ccw->cda = (__u32)virt_to_phys(data);
+ ccw->cda = virt_to_dma32(data);
}
static int __qeth_issue_next_read(struct qeth_card *card)
@@ -1179,6 +1179,20 @@ static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
}
}
+/**
+ * qeth_irq() - qeth interrupt handler
+ * @cdev: ccw device
+ * @intparm: expect pointer to iob
+ * @irb: Interruption Response Block
+ *
+ * In the good path:
+ * corresponding qeth channel is locked with last used iob as active_cmd.
+ * But this function is also called for error interrupts.
+ *
+ * Caller ensures that:
+ * Interrupts are disabled; ccw device lock is held;
+ *
+ */
static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
struct irb *irb)
{
@@ -1220,11 +1234,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
}
- qeth_unlock_channel(card, channel);
-
rc = qeth_check_irb_error(card, cdev, irb);
if (rc) {
/* IO was terminated, free its resources. */
+ qeth_unlock_channel(card, channel);
if (iob)
qeth_cancel_cmd(iob, rc);
return;
@@ -1268,6 +1281,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
rc = qeth_get_problem(card, cdev, irb);
if (rc) {
card->read_or_write_problem = 1;
+ qeth_unlock_channel(card, channel);
if (iob)
qeth_cancel_cmd(iob, rc);
qeth_clear_ipacmd_list(card);
@@ -1276,6 +1290,26 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
}
}
+ if (scsw_cmd_is_valid_cc(&irb->scsw) && irb->scsw.cmd.cc == 1 && iob) {
+ /* channel command hasn't started: retry.
+ * active_cmd is still set to last iob
+ */
+ QETH_CARD_TEXT(card, 2, "irqcc1");
+ rc = ccw_device_start_timeout(cdev, __ccw_from_cmd(iob),
+ (addr_t)iob, 0, 0, iob->timeout);
+ if (rc) {
+ QETH_DBF_MESSAGE(2,
+ "ccw retry on %x failed, rc = %i\n",
+ CARD_DEVID(card), rc);
+ QETH_CARD_TEXT_(card, 2, " err%d", rc);
+ qeth_unlock_channel(card, channel);
+ qeth_cancel_cmd(iob, rc);
+ }
+ return;
+ }
+
+ qeth_unlock_channel(card, channel);
+
if (iob) {
/* sanity check: */
if (irb->scsw.cmd.count > iob->length) {
@@ -1359,7 +1393,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
qeth_tx_complete_buf(queue, buf, error, budget);
for (i = 0; i < queue->max_elements; ++i) {
- void *data = phys_to_virt(buf->buffer->element[i].addr);
+ void *data = dma64_to_virt(buf->buffer->element[i].addr);
if (__test_and_clear_bit(i, buf->from_kmem_cache) && data)
kmem_cache_free(qeth_core_header_cache, data);
@@ -1404,7 +1438,7 @@ static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
for (i = 0;
i < aob->sb_count && i < queue->max_elements;
i++) {
- void *data = phys_to_virt(aob->sba[i]);
+ void *data = dma64_to_virt(aob->sba[i]);
if (test_bit(i, buf->from_kmem_cache) && data)
kmem_cache_free(qeth_core_header_cache,
@@ -2918,8 +2952,8 @@ static int qeth_init_input_buffer(struct qeth_card *card,
*/
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
buf->buffer->element[i].length = PAGE_SIZE;
- buf->buffer->element[i].addr =
- page_to_phys(pool_entry->elements[i]);
+ buf->buffer->element[i].addr = u64_to_dma64(
+ page_to_phys(pool_entry->elements[i]));
if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
else
@@ -3765,9 +3799,9 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
buffer->element[e].addr) {
- unsigned long phys_aob_addr = buffer->element[e].addr;
+ dma64_t phys_aob_addr = buffer->element[e].addr;
- qeth_qdio_handle_aob(card, phys_to_virt(phys_aob_addr));
+ qeth_qdio_handle_aob(card, dma64_to_virt(phys_aob_addr));
++e;
}
qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
@@ -4042,7 +4076,7 @@ static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
if (hd_len) {
is_first_elem = false;
- buffer->element[element].addr = virt_to_phys(hdr);
+ buffer->element[element].addr = virt_to_dma64(hdr);
buffer->element[element].length = hd_len;
buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
@@ -4063,7 +4097,7 @@ static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
elem_length = min_t(unsigned int, length,
PAGE_SIZE - offset_in_page(data));
- buffer->element[element].addr = virt_to_phys(data);
+ buffer->element[element].addr = virt_to_dma64(data);
buffer->element[element].length = elem_length;
length -= elem_length;
if (is_first_elem) {
@@ -4093,7 +4127,7 @@ static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
elem_length = min_t(unsigned int, length,
PAGE_SIZE - offset_in_page(data));
- buffer->element[element].addr = virt_to_phys(data);
+ buffer->element[element].addr = virt_to_dma64(data);
buffer->element[element].length = elem_length;
buffer->element[element].eflags =
SBAL_EFLAGS_MIDDLE_FRAG;
@@ -5569,7 +5603,7 @@ next_packet:
offset = 0;
}
- hdr = phys_to_virt(element->addr) + offset;
+ hdr = dma64_to_virt(element->addr) + offset;
offset += sizeof(*hdr);
skb = NULL;
@@ -5661,7 +5695,7 @@ use_skb:
walk_packet:
while (skb_len) {
int data_len = min(skb_len, (int)(element->length - offset));
- char *data = phys_to_virt(element->addr) + offset;
+ char *data = dma64_to_virt(element->addr) + offset;
skb_len -= data_len;
offset += data_len;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index ceed1b6f7cb61..22e82000334ab 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -2742,7 +2742,7 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
sbale = &sbal->element[idx];
- req_id = sbale->addr;
+ req_id = dma64_to_u64(sbale->addr);
fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
if (!fsf_req) {
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index f54f506b02d66..8cbc5e1711af0 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -125,7 +125,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
memset(pl, 0,
ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
sbale = qdio->res_q[idx]->element;
- req_id = sbale->addr;
+ req_id = dma64_to_u64(sbale->addr);
scount = min(sbale->scount + 1,
ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
/* incl. signaling SBAL */
@@ -256,7 +256,7 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
q_req->sbal_number);
return -EINVAL;
}
- sbale->addr = sg_phys(sg);
+ sbale->addr = u64_to_dma64(sg_phys(sg));
sbale->length = sg->length;
}
return 0;
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 90134d9b69a77..8f7d2ae94441a 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -129,14 +129,14 @@ void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
% QDIO_MAX_BUFFERS_PER_Q;
sbale = zfcp_qdio_sbale_req(qdio, q_req);
- sbale->addr = req_id;
+ sbale->addr = u64_to_dma64(req_id);
sbale->eflags = 0;
sbale->sflags = SBAL_SFLAGS0_COMMAND | sbtype;
if (unlikely(!data))
return;
sbale++;
- sbale->addr = virt_to_phys(data);
+ sbale->addr = virt_to_dma64(data);
sbale->length = len;
}
@@ -159,7 +159,7 @@ void zfcp_qdio_fill_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
BUG_ON(q_req->sbale_curr == qdio->max_sbale_per_sbal - 1);
q_req->sbale_curr++;
sbale = zfcp_qdio_sbale_curr(qdio, q_req);
- sbale->addr = virt_to_phys(data);
+ sbale->addr = virt_to_dma64(data);
sbale->length = len;
}
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index ac67576301bf5..d7569f3955591 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -72,6 +72,7 @@ struct virtio_ccw_device {
unsigned int config_ready;
void *airq_info;
struct vcdev_dma_area *dma_area;
+ dma32_t dma_area_addr;
};
static inline unsigned long *indicators(struct virtio_ccw_device *vcdev)
@@ -84,20 +85,50 @@ static inline unsigned long *indicators2(struct virtio_ccw_device *vcdev)
return &vcdev->dma_area->indicators2;
}
+/* Spec stipulates a 64 bit address */
+static inline dma64_t indicators_dma(struct virtio_ccw_device *vcdev)
+{
+ u64 dma_area_addr = dma32_to_u32(vcdev->dma_area_addr);
+
+ return dma64_add(u64_to_dma64(dma_area_addr),
+ offsetof(struct vcdev_dma_area, indicators));
+}
+
+/* Spec stipulates a 64 bit address */
+static inline dma64_t indicators2_dma(struct virtio_ccw_device *vcdev)
+{
+ u64 dma_area_addr = dma32_to_u32(vcdev->dma_area_addr);
+
+ return dma64_add(u64_to_dma64(dma_area_addr),
+ offsetof(struct vcdev_dma_area, indicators2));
+}
+
+static inline dma32_t config_block_dma(struct virtio_ccw_device *vcdev)
+{
+ return dma32_add(vcdev->dma_area_addr,
+ offsetof(struct vcdev_dma_area, config_block));
+}
+
+static inline dma32_t status_dma(struct virtio_ccw_device *vcdev)
+{
+ return dma32_add(vcdev->dma_area_addr,
+ offsetof(struct vcdev_dma_area, status));
+}
+
struct vq_info_block_legacy {
- __u64 queue;
+ dma64_t queue;
__u32 align;
__u16 index;
__u16 num;
} __packed;
struct vq_info_block {
- __u64 desc;
+ dma64_t desc;
__u32 res0;
__u16 index;
__u16 num;
- __u64 avail;
- __u64 used;
+ dma64_t avail;
+ dma64_t used;
} __packed;
struct virtio_feature_desc {
@@ -106,8 +137,8 @@ struct virtio_feature_desc {
} __packed;
struct virtio_thinint_area {
- unsigned long summary_indicator;
- unsigned long indicator;
+ dma64_t summary_indicator;
+ dma64_t indicator;
u64 bit_nr;
u8 isc;
} __packed;
@@ -123,6 +154,7 @@ struct virtio_rev_info {
struct virtio_ccw_vq_info {
struct virtqueue *vq;
+ dma32_t info_block_addr;
int num;
union {
struct vq_info_block s;
@@ -156,6 +188,11 @@ static inline u8 *get_summary_indicator(struct airq_info *info)
return summary_indicators + info->summary_indicator_idx;
}
+static inline dma64_t get_summary_indicator_dma(struct airq_info *info)
+{
+ return virt_to_dma64(get_summary_indicator(info));
+}
+
#define CCW_CMD_SET_VQ 0x13
#define CCW_CMD_VDEV_RESET 0x33
#define CCW_CMD_SET_IND 0x43
@@ -260,12 +297,12 @@ static struct airq_info *new_airq_info(int index)
return info;
}
-static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
- u64 *first, void **airq_info)
+static unsigned long *get_airq_indicator(struct virtqueue *vqs[], int nvqs,
+ u64 *first, void **airq_info)
{
int i, j;
struct airq_info *info;
- unsigned long indicator_addr = 0;
+ unsigned long *indicator_addr = NULL;
unsigned long bit, flags;
for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
@@ -275,7 +312,7 @@ static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
info = airq_areas[i];
mutex_unlock(&airq_areas_lock);
if (!info)
- return 0;
+ return NULL;
write_lock_irqsave(&info->lock, flags);
bit = airq_iv_alloc(info->aiv, nvqs);
if (bit == -1UL) {
@@ -285,7 +322,7 @@ static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
}
*first = bit;
*airq_info = info;
- indicator_addr = (unsigned long)info->aiv->vector;
+ indicator_addr = info->aiv->vector;
for (j = 0; j < nvqs; j++) {
airq_iv_set_ptr(info->aiv, bit + j,
(unsigned long)vqs[j]);
@@ -348,31 +385,31 @@ static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
struct ccw1 *ccw)
{
int ret;
- unsigned long *indicatorp = NULL;
struct virtio_thinint_area *thinint_area = NULL;
struct airq_info *airq_info = vcdev->airq_info;
+ dma64_t *indicatorp = NULL;
if (vcdev->is_thinint) {
thinint_area = ccw_device_dma_zalloc(vcdev->cdev,
- sizeof(*thinint_area));
+ sizeof(*thinint_area),
+ &ccw->cda);
if (!thinint_area)
return;
thinint_area->summary_indicator =
- (unsigned long) get_summary_indicator(airq_info);
+ get_summary_indicator_dma(airq_info);
thinint_area->isc = VIRTIO_AIRQ_ISC;
ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
ccw->count = sizeof(*thinint_area);
- ccw->cda = (__u32)virt_to_phys(thinint_area);
} else {
/* payload is the address of the indicators */
indicatorp = ccw_device_dma_zalloc(vcdev->cdev,
- sizeof(indicators(vcdev)));
+ sizeof(*indicatorp),
+ &ccw->cda);
if (!indicatorp)
return;
*indicatorp = 0;
ccw->cmd_code = CCW_CMD_SET_IND;
- ccw->count = sizeof(indicators(vcdev));
- ccw->cda = (__u32)virt_to_phys(indicatorp);
+ ccw->count = sizeof(*indicatorp);
}
/* Deregister indicators from host. */
*indicators(vcdev) = 0;
@@ -386,7 +423,7 @@ static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
"Failed to deregister indicators (%d)\n", ret);
else if (vcdev->is_thinint)
virtio_ccw_drop_indicators(vcdev);
- ccw_device_dma_free(vcdev->cdev, indicatorp, sizeof(indicators(vcdev)));
+ ccw_device_dma_free(vcdev->cdev, indicatorp, sizeof(*indicatorp));
ccw_device_dma_free(vcdev->cdev, thinint_area, sizeof(*thinint_area));
}
@@ -426,7 +463,7 @@ static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
ccw->cmd_code = CCW_CMD_READ_VQ_CONF;
ccw->flags = 0;
ccw->count = sizeof(struct vq_config_block);
- ccw->cda = (__u32)virt_to_phys(&vcdev->dma_area->config_block);
+ ccw->cda = config_block_dma(vcdev);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
if (ret)
return ret;
@@ -463,7 +500,7 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
}
ccw->cmd_code = CCW_CMD_SET_VQ;
ccw->flags = 0;
- ccw->cda = (__u32)virt_to_phys(info->info_block);
+ ccw->cda = info->info_block_addr;
ret = ccw_io_helper(vcdev, ccw,
VIRTIO_CCW_DOING_SET_VQ | index);
/*
@@ -486,7 +523,7 @@ static void virtio_ccw_del_vqs(struct virtio_device *vdev)
struct ccw1 *ccw;
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return;
@@ -525,7 +562,8 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
goto out_err;
}
info->info_block = ccw_device_dma_zalloc(vcdev->cdev,
- sizeof(*info->info_block));
+ sizeof(*info->info_block),
+ &info->info_block_addr);
if (!info->info_block) {
dev_warn(&vcdev->cdev->dev, "no info block\n");
err = -ENOMEM;
@@ -556,22 +594,22 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
/* Register it with the host. */
queue = virtqueue_get_desc_addr(vq);
if (vcdev->revision == 0) {
- info->info_block->l.queue = queue;
+ info->info_block->l.queue = u64_to_dma64(queue);
info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN;
info->info_block->l.index = i;
info->info_block->l.num = info->num;
ccw->count = sizeof(info->info_block->l);
} else {
- info->info_block->s.desc = queue;
+ info->info_block->s.desc = u64_to_dma64(queue);
info->info_block->s.index = i;
info->info_block->s.num = info->num;
- info->info_block->s.avail = (__u64)virtqueue_get_avail_addr(vq);
- info->info_block->s.used = (__u64)virtqueue_get_used_addr(vq);
+ info->info_block->s.avail = u64_to_dma64(virtqueue_get_avail_addr(vq));
+ info->info_block->s.used = u64_to_dma64(virtqueue_get_used_addr(vq));
ccw->count = sizeof(info->info_block->s);
}
ccw->cmd_code = CCW_CMD_SET_VQ;
ccw->flags = 0;
- ccw->cda = (__u32)virt_to_phys(info->info_block);
+ ccw->cda = info->info_block_addr;
err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i);
if (err) {
dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n");
@@ -605,11 +643,12 @@ static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
{
int ret;
struct virtio_thinint_area *thinint_area = NULL;
- unsigned long indicator_addr;
+ unsigned long *indicator_addr;
struct airq_info *info;
thinint_area = ccw_device_dma_zalloc(vcdev->cdev,
- sizeof(*thinint_area));
+ sizeof(*thinint_area),
+ &ccw->cda);
if (!thinint_area) {
ret = -ENOMEM;
goto out;
@@ -622,15 +661,13 @@ static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
ret = -ENOSPC;
goto out;
}
- thinint_area->indicator = virt_to_phys((void *)indicator_addr);
+ thinint_area->indicator = virt_to_dma64(indicator_addr);
info = vcdev->airq_info;
- thinint_area->summary_indicator =
- virt_to_phys(get_summary_indicator(info));
+ thinint_area->summary_indicator = get_summary_indicator_dma(info);
thinint_area->isc = VIRTIO_AIRQ_ISC;
ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
ccw->flags = CCW_FLAG_SLI;
ccw->count = sizeof(*thinint_area);
- ccw->cda = (__u32)virt_to_phys(thinint_area);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND_ADAPTER);
if (ret) {
if (ret == -EOPNOTSUPP) {
@@ -658,11 +695,11 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct irq_affinity *desc)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
- unsigned long *indicatorp = NULL;
+ dma64_t *indicatorp = NULL;
int ret, i, queue_idx = 0;
struct ccw1 *ccw;
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return -ENOMEM;
@@ -687,10 +724,11 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
* the address of the indicators.
*/
indicatorp = ccw_device_dma_zalloc(vcdev->cdev,
- sizeof(indicators(vcdev)));
+ sizeof(*indicatorp),
+ &ccw->cda);
if (!indicatorp)
goto out;
- *indicatorp = (unsigned long) indicators(vcdev);
+ *indicatorp = indicators_dma(vcdev);
if (vcdev->is_thinint) {
ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw);
if (ret)
@@ -702,32 +740,30 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
*indicators(vcdev) = 0;
ccw->cmd_code = CCW_CMD_SET_IND;
ccw->flags = 0;
- ccw->count = sizeof(indicators(vcdev));
- ccw->cda = (__u32)virt_to_phys(indicatorp);
+ ccw->count = sizeof(*indicatorp);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
if (ret)
goto out;
}
/* Register indicators2 with host for config changes */
- *indicatorp = (unsigned long) indicators2(vcdev);
+ *indicatorp = indicators2_dma(vcdev);
*indicators2(vcdev) = 0;
ccw->cmd_code = CCW_CMD_SET_CONF_IND;
ccw->flags = 0;
- ccw->count = sizeof(indicators2(vcdev));
- ccw->cda = (__u32)virt_to_phys(indicatorp);
+ ccw->count = sizeof(*indicatorp);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND);
if (ret)
goto out;
if (indicatorp)
ccw_device_dma_free(vcdev->cdev, indicatorp,
- sizeof(indicators(vcdev)));
+ sizeof(*indicatorp));
ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
return 0;
out:
if (indicatorp)
ccw_device_dma_free(vcdev->cdev, indicatorp,
- sizeof(indicators(vcdev)));
+ sizeof(*indicatorp));
ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
virtio_ccw_del_vqs(vdev);
return ret;
@@ -738,7 +774,7 @@ static void virtio_ccw_reset(struct virtio_device *vdev)
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
struct ccw1 *ccw;
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return;
@@ -762,11 +798,12 @@ static u64 virtio_ccw_get_features(struct virtio_device *vdev)
u64 rc;
struct ccw1 *ccw;
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return 0;
- features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features));
+ features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features),
+ &ccw->cda);
if (!features) {
rc = 0;
goto out_free;
@@ -776,7 +813,6 @@ static u64 virtio_ccw_get_features(struct virtio_device *vdev)
ccw->cmd_code = CCW_CMD_READ_FEAT;
ccw->flags = 0;
ccw->count = sizeof(*features);
- ccw->cda = (__u32)virt_to_phys(features);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
if (ret) {
rc = 0;
@@ -793,7 +829,6 @@ static u64 virtio_ccw_get_features(struct virtio_device *vdev)
ccw->cmd_code = CCW_CMD_READ_FEAT;
ccw->flags = 0;
ccw->count = sizeof(*features);
- ccw->cda = (__u32)virt_to_phys(features);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
if (ret == 0)
rc |= (u64)le32_to_cpu(features->features) << 32;
@@ -825,11 +860,12 @@ static int virtio_ccw_finalize_features(struct virtio_device *vdev)
return -EINVAL;
}
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return -ENOMEM;
- features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features));
+ features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features),
+ &ccw->cda);
if (!features) {
ret = -ENOMEM;
goto out_free;
@@ -846,7 +882,6 @@ static int virtio_ccw_finalize_features(struct virtio_device *vdev)
ccw->cmd_code = CCW_CMD_WRITE_FEAT;
ccw->flags = 0;
ccw->count = sizeof(*features);
- ccw->cda = (__u32)virt_to_phys(features);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
if (ret)
goto out_free;
@@ -860,7 +895,6 @@ static int virtio_ccw_finalize_features(struct virtio_device *vdev)
ccw->cmd_code = CCW_CMD_WRITE_FEAT;
ccw->flags = 0;
ccw->count = sizeof(*features);
- ccw->cda = (__u32)virt_to_phys(features);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
out_free:
@@ -879,12 +913,13 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
void *config_area;
unsigned long flags;
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return;
config_area = ccw_device_dma_zalloc(vcdev->cdev,
- VIRTIO_CCW_CONFIG_SIZE);
+ VIRTIO_CCW_CONFIG_SIZE,
+ &ccw->cda);
if (!config_area)
goto out_free;
@@ -892,7 +927,6 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
ccw->cmd_code = CCW_CMD_READ_CONF;
ccw->flags = 0;
ccw->count = offset + len;
- ccw->cda = (__u32)virt_to_phys(config_area);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG);
if (ret)
goto out_free;
@@ -919,12 +953,13 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
void *config_area;
unsigned long flags;
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return;
config_area = ccw_device_dma_zalloc(vcdev->cdev,
- VIRTIO_CCW_CONFIG_SIZE);
+ VIRTIO_CCW_CONFIG_SIZE,
+ &ccw->cda);
if (!config_area)
goto out_free;
@@ -939,7 +974,6 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
ccw->cmd_code = CCW_CMD_WRITE_CONF;
ccw->flags = 0;
ccw->count = offset + len;
- ccw->cda = (__u32)virt_to_phys(config_area);
ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG);
out_free:
@@ -956,14 +990,14 @@ static u8 virtio_ccw_get_status(struct virtio_device *vdev)
if (vcdev->revision < 2)
return vcdev->dma_area->status;
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return old_status;
ccw->cmd_code = CCW_CMD_READ_STATUS;
ccw->flags = 0;
ccw->count = sizeof(vcdev->dma_area->status);
- ccw->cda = (__u32)virt_to_phys(&vcdev->dma_area->status);
+ ccw->cda = status_dma(vcdev);
ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS);
/*
* If the channel program failed (should only happen if the device
@@ -983,7 +1017,7 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
struct ccw1 *ccw;
int ret;
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return;
@@ -992,11 +1026,11 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
ccw->cmd_code = CCW_CMD_WRITE_STATUS;
ccw->flags = 0;
ccw->count = sizeof(status);
- ccw->cda = (__u32)virt_to_phys(&vcdev->dma_area->status);
/* We use ssch for setting the status which is a serializing
* instruction that guarantees the memory writes have
* completed before ssch.
*/
+ ccw->cda = status_dma(vcdev);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
/* Write failed? We assume status is unchanged. */
if (ret)
@@ -1278,10 +1312,10 @@ static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev)
struct ccw1 *ccw;
int ret;
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return -ENOMEM;
- rev = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*rev));
+ rev = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*rev), &ccw->cda);
if (!rev) {
ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
return -ENOMEM;
@@ -1291,7 +1325,6 @@ static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev)
ccw->cmd_code = CCW_CMD_SET_VIRTIO_REV;
ccw->flags = 0;
ccw->count = sizeof(*rev);
- ccw->cda = (__u32)virt_to_phys(rev);
vcdev->revision = VIRTIO_CCW_REV_MAX;
do {
@@ -1333,7 +1366,8 @@ static int virtio_ccw_online(struct ccw_device *cdev)
vcdev->vdev.dev.parent = &cdev->dev;
vcdev->cdev = cdev;
vcdev->dma_area = ccw_device_dma_zalloc(vcdev->cdev,
- sizeof(*vcdev->dma_area));
+ sizeof(*vcdev->dma_area),
+ &vcdev->dma_area_addr);
if (!vcdev->dma_area) {
ret = -ENOMEM;
goto out_free;
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
index 1c76e27d527a7..3192dcb83b86d 100644
--- a/drivers/sbus/char/bbc_i2c.c
+++ b/drivers/sbus/char/bbc_i2c.c
@@ -358,9 +358,6 @@ fail:
return NULL;
}
-extern int bbc_envctrl_init(struct bbc_i2c_bus *bp);
-extern void bbc_envctrl_cleanup(struct bbc_i2c_bus *bp);
-
static int bbc_i2c_probe(struct platform_device *op)
{
struct bbc_i2c_bus *bp;
@@ -385,7 +382,7 @@ static int bbc_i2c_probe(struct platform_device *op)
return err;
}
-static int bbc_i2c_remove(struct platform_device *op)
+static void bbc_i2c_remove(struct platform_device *op)
{
struct bbc_i2c_bus *bp = dev_get_drvdata(&op->dev);
@@ -399,8 +396,6 @@ static int bbc_i2c_remove(struct platform_device *op)
of_iounmap(&op->resource[1], bp->i2c_control_regs, 2);
kfree(bp);
-
- return 0;
}
static const struct of_device_id bbc_i2c_match[] = {
@@ -418,7 +413,7 @@ static struct platform_driver bbc_i2c_driver = {
.of_match_table = bbc_i2c_match,
},
.probe = bbc_i2c_probe,
- .remove = bbc_i2c_remove,
+ .remove_new = bbc_i2c_remove,
};
module_platform_driver(bbc_i2c_driver);
diff --git a/drivers/sbus/char/bbc_i2c.h b/drivers/sbus/char/bbc_i2c.h
index 7ffe908c62dcf..6c748836754bf 100644
--- a/drivers/sbus/char/bbc_i2c.h
+++ b/drivers/sbus/char/bbc_i2c.h
@@ -82,4 +82,7 @@ extern int bbc_i2c_readb(struct bbc_i2c_client *, unsigned char *byte, int off);
extern int bbc_i2c_write_buf(struct bbc_i2c_client *, char *buf, int len, int off);
extern int bbc_i2c_read_buf(struct bbc_i2c_client *, char *buf, int len, int off);
+extern int bbc_envctrl_init(struct bbc_i2c_bus *bp);
+extern void bbc_envctrl_cleanup(struct bbc_i2c_bus *bp);
+
#endif /* _BBC_I2C_H */
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 18e6f84e754f2..521cf8affe65a 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -229,7 +229,7 @@ out_iounmap:
goto out;
}
-static int d7s_remove(struct platform_device *op)
+static void d7s_remove(struct platform_device *op)
{
struct d7s *p = dev_get_drvdata(&op->dev);
u8 regs = readb(p->regs);
@@ -245,8 +245,6 @@ static int d7s_remove(struct platform_device *op)
misc_deregister(&d7s_miscdev);
of_iounmap(&op->resource[0], p->regs, sizeof(u8));
-
- return 0;
}
static const struct of_device_id d7s_match[] = {
@@ -263,7 +261,7 @@ static struct platform_driver d7s_driver = {
.of_match_table = d7s_match,
},
.probe = d7s_probe,
- .remove = d7s_remove,
+ .remove_new = d7s_remove,
};
module_platform_driver(d7s_driver);
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index 3dd7274cb0a3e..491cc6c0b3f91 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -1097,7 +1097,7 @@ out_iounmap:
return err;
}
-static int envctrl_remove(struct platform_device *op)
+static void envctrl_remove(struct platform_device *op)
{
int index;
@@ -1108,8 +1108,6 @@ static int envctrl_remove(struct platform_device *op)
for (index = 0; index < ENVCTRL_MAX_CPU * 2; index++)
kfree(i2c_childlist[index].tables);
-
- return 0;
}
static const struct of_device_id envctrl_match[] = {
@@ -1127,7 +1125,7 @@ static struct platform_driver envctrl_driver = {
.of_match_table = envctrl_match,
},
.probe = envctrl_probe,
- .remove = envctrl_remove,
+ .remove_new = envctrl_remove,
};
module_platform_driver(envctrl_driver);
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index ea2d903ba673a..05d37d31c3b8f 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -187,11 +187,9 @@ static int flash_probe(struct platform_device *op)
return misc_register(&flash_dev);
}
-static int flash_remove(struct platform_device *op)
+static void flash_remove(struct platform_device *op)
{
misc_deregister(&flash_dev);
-
- return 0;
}
static const struct of_device_id flash_match[] = {
@@ -208,7 +206,7 @@ static struct platform_driver flash_driver = {
.of_match_table = flash_match,
},
.probe = flash_probe,
- .remove = flash_remove,
+ .remove_new = flash_remove,
};
module_platform_driver(flash_driver);
diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c
index 30b9751aad302..cc178874c4a66 100644
--- a/drivers/sbus/char/openprom.c
+++ b/drivers/sbus/char/openprom.c
@@ -33,7 +33,7 @@
#include <linux/pci.h>
#endif
-MODULE_AUTHOR("Thomas K. Dyas (tdyas@noc.rutgers.edu) and Eddie C. Dost (ecd@skynet.be)");
+MODULE_AUTHOR("Thomas K. Dyas <tdyas@noc.rutgers.edu> and Eddie C. Dost <ecd@skynet.be>");
MODULE_DESCRIPTION("OPENPROM Configuration Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.0");
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index 0660425e3a5a0..cf15a4186d037 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -399,7 +399,7 @@ out_free:
goto out;
}
-static int uctrl_remove(struct platform_device *op)
+static void uctrl_remove(struct platform_device *op)
{
struct uctrl_driver *p = dev_get_drvdata(&op->dev);
@@ -409,7 +409,6 @@ static int uctrl_remove(struct platform_device *op)
of_iounmap(&op->resource[0], p->regs, resource_size(&op->resource[0]));
kfree(p);
}
- return 0;
}
static const struct of_device_id uctrl_match[] = {
@@ -426,7 +425,7 @@ static struct platform_driver uctrl_driver = {
.of_match_table = uctrl_match,
},
.probe = uctrl_probe,
- .remove = uctrl_remove,
+ .remove_new = uctrl_remove,
};
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index f925f8664c2c1..6fb61c88ea119 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -161,28 +161,28 @@ static ssize_t twa_show_stats(struct device *dev,
ssize_t len;
spin_lock_irqsave(tw_dev->host->host_lock, flags);
- len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n"
- "Current commands posted: %4d\n"
- "Max commands posted: %4d\n"
- "Current pending commands: %4d\n"
- "Max pending commands: %4d\n"
- "Last sgl length: %4d\n"
- "Max sgl length: %4d\n"
- "Last sector count: %4d\n"
- "Max sector count: %4d\n"
- "SCSI Host Resets: %4d\n"
- "AEN's: %4d\n",
- TW_DRIVER_VERSION,
- tw_dev->posted_request_count,
- tw_dev->max_posted_request_count,
- tw_dev->pending_request_count,
- tw_dev->max_pending_request_count,
- tw_dev->sgl_entries,
- tw_dev->max_sgl_entries,
- tw_dev->sector_count,
- tw_dev->max_sector_count,
- tw_dev->num_resets,
- tw_dev->aen_count);
+ len = sysfs_emit(buf, "3w-9xxx Driver version: %s\n"
+ "Current commands posted: %4d\n"
+ "Max commands posted: %4d\n"
+ "Current pending commands: %4d\n"
+ "Max pending commands: %4d\n"
+ "Last sgl length: %4d\n"
+ "Max sgl length: %4d\n"
+ "Last sector count: %4d\n"
+ "Max sector count: %4d\n"
+ "SCSI Host Resets: %4d\n"
+ "AEN's: %4d\n",
+ TW_DRIVER_VERSION,
+ tw_dev->posted_request_count,
+ tw_dev->max_posted_request_count,
+ tw_dev->pending_request_count,
+ tw_dev->max_pending_request_count,
+ tw_dev->sgl_entries,
+ tw_dev->max_sgl_entries,
+ tw_dev->sector_count,
+ tw_dev->max_sector_count,
+ tw_dev->num_resets,
+ tw_dev->aen_count);
spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
return len;
} /* End twa_show_stats() */
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 9bdb75dfdcd7c..caa6713a62a44 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -166,24 +166,24 @@ static ssize_t twl_show_stats(struct device *dev,
ssize_t len;
spin_lock_irqsave(tw_dev->host->host_lock, flags);
- len = snprintf(buf, PAGE_SIZE, "3w-sas Driver version: %s\n"
- "Current commands posted: %4d\n"
- "Max commands posted: %4d\n"
- "Last sgl length: %4d\n"
- "Max sgl length: %4d\n"
- "Last sector count: %4d\n"
- "Max sector count: %4d\n"
- "SCSI Host Resets: %4d\n"
- "AEN's: %4d\n",
- TW_DRIVER_VERSION,
- tw_dev->posted_request_count,
- tw_dev->max_posted_request_count,
- tw_dev->sgl_entries,
- tw_dev->max_sgl_entries,
- tw_dev->sector_count,
- tw_dev->max_sector_count,
- tw_dev->num_resets,
- tw_dev->aen_count);
+ len = sysfs_emit(buf, "3w-sas Driver version: %s\n"
+ "Current commands posted: %4d\n"
+ "Max commands posted: %4d\n"
+ "Last sgl length: %4d\n"
+ "Max sgl length: %4d\n"
+ "Last sector count: %4d\n"
+ "Max sector count: %4d\n"
+ "SCSI Host Resets: %4d\n"
+ "AEN's: %4d\n",
+ TW_DRIVER_VERSION,
+ tw_dev->posted_request_count,
+ tw_dev->max_posted_request_count,
+ tw_dev->sgl_entries,
+ tw_dev->max_sgl_entries,
+ tw_dev->sector_count,
+ tw_dev->max_sector_count,
+ tw_dev->num_resets,
+ tw_dev->aen_count);
spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
return len;
} /* End twl_show_stats() */
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index f39c9ec2e7810..2c0fb6da0e608 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -496,28 +496,28 @@ static ssize_t tw_show_stats(struct device *dev, struct device_attribute *attr,
ssize_t len;
spin_lock_irqsave(tw_dev->host->host_lock, flags);
- len = snprintf(buf, PAGE_SIZE, "3w-xxxx Driver version: %s\n"
- "Current commands posted: %4d\n"
- "Max commands posted: %4d\n"
- "Current pending commands: %4d\n"
- "Max pending commands: %4d\n"
- "Last sgl length: %4d\n"
- "Max sgl length: %4d\n"
- "Last sector count: %4d\n"
- "Max sector count: %4d\n"
- "SCSI Host Resets: %4d\n"
- "AEN's: %4d\n",
- TW_DRIVER_VERSION,
- tw_dev->posted_request_count,
- tw_dev->max_posted_request_count,
- tw_dev->pending_request_count,
- tw_dev->max_pending_request_count,
- tw_dev->sgl_entries,
- tw_dev->max_sgl_entries,
- tw_dev->sector_count,
- tw_dev->max_sector_count,
- tw_dev->num_resets,
- tw_dev->aen_count);
+ len = sysfs_emit(buf, "3w-xxxx Driver version: %s\n"
+ "Current commands posted: %4d\n"
+ "Max commands posted: %4d\n"
+ "Current pending commands: %4d\n"
+ "Max pending commands: %4d\n"
+ "Last sgl length: %4d\n"
+ "Max sgl length: %4d\n"
+ "Last sector count: %4d\n"
+ "Max sector count: %4d\n"
+ "SCSI Host Resets: %4d\n"
+ "AEN's: %4d\n",
+ TW_DRIVER_VERSION,
+ tw_dev->posted_request_count,
+ tw_dev->max_posted_request_count,
+ tw_dev->pending_request_count,
+ tw_dev->max_pending_request_count,
+ tw_dev->sgl_entries,
+ tw_dev->max_sgl_entries,
+ tw_dev->sector_count,
+ tw_dev->max_sector_count,
+ tw_dev->num_resets,
+ tw_dev->aen_count);
spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
return len;
} /* End tw_show_stats() */
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 857be0f3ae5b9..85439e976143b 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -2071,7 +2071,7 @@ NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char
{
struct scsi_device *SDp = to_scsi_device(dev);
- return snprintf(buf, 20, "%d\n", NCR_700_get_depth(SDp));
+ return sysfs_emit(buf, "%d\n", NCR_700_get_depth(SDp));
}
static struct device_attribute NCR_700_active_tags_attr = {
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 9ce27092729c3..634f2f501c6c6 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -67,6 +67,15 @@ config SCSI_PROC_FS
If unsure say Y.
+config SCSI_LIB_KUNIT_TEST
+ tristate "KUnit tests for SCSI Mid Layer's scsi_lib" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Run SCSI Mid Layer's KUnit tests for scsi_lib.
+
+ If unsure say N.
+
comment "SCSI support type (disk, tape, CD-ROM)"
depends on SCSI
@@ -232,6 +241,11 @@ config SCSI_SCAN_ASYNC
Note that this setting also affects whether resuming from
system suspend will be performed asynchronously.
+config SCSI_PROTO_TEST
+ tristate "scsi_proto.h unit tests" if !KUNIT_ALL_TESTS
+ depends on SCSI && KUNIT
+ default KUNIT_ALL_TESTS
+
menu "SCSI Transports"
depends on SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index f055bfd54a683..1313ddf2fd1a1 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -24,6 +24,8 @@ obj-$(CONFIG_SCSI_COMMON) += scsi_common.o
obj-$(CONFIG_RAID_ATTRS) += raid_class.o
+obj-$(CONFIG_SCSI_PROTO_TEST) += scsi_proto_test.o
+
# --- NOTE ORDERING HERE ---
# For kernel non-modular link, transport attributes need to
# be initialised before drivers
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 70e1cac1975eb..b22857c6f3f4f 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1099,7 +1099,7 @@ static void get_container_serial_callback(void *context, struct fib * fibptr)
sp[0] = INQD_PDT_DA;
sp[1] = scsicmd->cmnd[2];
sp[2] = 0;
- sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X",
+ sp[3] = scnprintf(sp+4, sizeof(sp)-4, "%08X",
le32_to_cpu(get_serial_reply->uid));
scsi_sg_copy_from_buffer(scsicmd, sp,
sizeof(sp));
@@ -1169,8 +1169,8 @@ static int setinqserial(struct aac_dev *dev, void *data, int cid)
/*
* This breaks array migration.
*/
- return snprintf((char *)(data), sizeof(struct scsi_inq) - 4, "%08X%02X",
- le32_to_cpu(dev->adapter_info.serial[0]), cid);
+ return scnprintf((char *)(data), sizeof(struct scsi_inq) - 4, "%08X%02X",
+ le32_to_cpu(dev->adapter_info.serial[0]), cid);
}
static inline void set_sense(struct sense_data *sense_data, u8 sense_key,
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
index 7bd2ba1ad4d11..4cb9249e583cc 100644
--- a/drivers/scsi/bfa/bfa.h
+++ b/drivers/scsi/bfa/bfa.h
@@ -20,7 +20,6 @@
struct bfa_s;
typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
-typedef void (*bfa_cb_cbfn_status_t) (void *cbarg, bfa_status_t status);
/*
* Interrupt message handlers
@@ -216,8 +215,27 @@ struct bfa_faa_args_s {
bfa_boolean_t busy;
};
+/*
+ * IOCFC state machine definitions/declarations
+ */
+enum iocfc_event {
+ IOCFC_E_INIT = 1, /* IOCFC init request */
+ IOCFC_E_START = 2, /* IOCFC mod start request */
+ IOCFC_E_STOP = 3, /* IOCFC stop request */
+ IOCFC_E_ENABLE = 4, /* IOCFC enable request */
+ IOCFC_E_DISABLE = 5, /* IOCFC disable request */
+ IOCFC_E_IOC_ENABLED = 6, /* IOC enabled message */
+ IOCFC_E_IOC_DISABLED = 7, /* IOC disabled message */
+ IOCFC_E_IOC_FAILED = 8, /* failure notice by IOC sm */
+ IOCFC_E_DCONF_DONE = 9, /* dconf read/write done */
+ IOCFC_E_CFG_DONE = 10, /* IOCFC config complete */
+};
+
+struct bfa_iocfc_s;
+typedef void (*bfa_iocfs_fsm_t)(struct bfa_iocfc_s *, enum iocfc_event);
+
struct bfa_iocfc_s {
- bfa_fsm_t fsm;
+ bfa_iocfs_fsm_t fsm;
struct bfa_s *bfa;
struct bfa_iocfc_cfg_s cfg;
u32 req_cq_pi[BFI_IOC_MAX_CQS];
@@ -437,4 +455,12 @@ struct bfa_cb_pending_q_s {
(__qe)->data = (__data); \
} while (0)
+#define bfa_pending_q_init_status(__qe, __cbfn, __cbarg, __data) do { \
+ bfa_q_qe_init(&((__qe)->hcb_qe.qe)); \
+ (__qe)->hcb_qe.cbfn_status = (__cbfn); \
+ (__qe)->hcb_qe.cbarg = (__cbarg); \
+ (__qe)->hcb_qe.pre_rmv = BFA_TRUE; \
+ (__qe)->data = (__data); \
+} while (0)
+
#endif /* __BFA_H__ */
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 6846ca8f7313c..3438d0b8ba062 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -1907,15 +1907,13 @@ bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
struct list_head *qe;
struct list_head *qen;
struct bfa_cb_qe_s *hcb_qe;
- bfa_cb_cbfn_status_t cbfn;
list_for_each_safe(qe, qen, comp_q) {
hcb_qe = (struct bfa_cb_qe_s *) qe;
if (hcb_qe->pre_rmv) {
/* qe is invalid after return, dequeue before cbfn() */
list_del(qe);
- cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn);
- cbfn(hcb_qe->cbarg, hcb_qe->fw_status);
+ hcb_qe->cbfn_status(hcb_qe->cbarg, hcb_qe->fw_status);
} else
hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
}
diff --git a/drivers/scsi/bfa/bfa_cs.h b/drivers/scsi/bfa/bfa_cs.h
index 6b606bf589b42..6650b1dbb1ed0 100644
--- a/drivers/scsi/bfa/bfa_cs.h
+++ b/drivers/scsi/bfa/bfa_cs.h
@@ -187,10 +187,10 @@ typedef void (*bfa_sm_t)(void *sm, int event);
#define bfa_sm_state_decl(oc, st, otype, etype) \
static void oc ## _sm_ ## st(otype * fsm, etype event)
-#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state))
+#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (_state))
#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event)))
#define bfa_sm_get_state(_sm) ((_sm)->sm)
-#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
+#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (_state))
/*
* For converting from state machine function to state encoding.
@@ -200,7 +200,7 @@ struct bfa_sm_table_s {
int state; /* state machine encoding */
char *name; /* state name for display */
};
-#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
+#define BFA_SM(_sm) (_sm)
/*
* State machine with entry actions.
@@ -218,24 +218,13 @@ typedef void (*bfa_fsm_t)(void *fsm, int event);
static void oc ## _sm_ ## st ## _entry(otype * fsm)
#define bfa_fsm_set_state(_fsm, _state) do { \
- (_fsm)->fsm = (bfa_fsm_t)(_state); \
+ (_fsm)->fsm = (_state); \
_state ## _entry(_fsm); \
} while (0)
#define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event)))
#define bfa_fsm_get_state(_fsm) ((_fsm)->fsm)
-#define bfa_fsm_cmp_state(_fsm, _state) \
- ((_fsm)->fsm == (bfa_fsm_t)(_state))
-
-static inline int
-bfa_sm_to_state(struct bfa_sm_table_s *smt, bfa_sm_t sm)
-{
- int i = 0;
-
- while (smt[i].sm && smt[i].sm != sm)
- i++;
- return smt[i].state;
-}
+#define bfa_fsm_cmp_state(_fsm, _state) ((_fsm)->fsm == (_state))
/*
* @ Generic wait counter.
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 7ad22288071b1..28ae4dc14dc9c 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -65,21 +65,6 @@ enum bfa_ioim_lm_ua_status {
};
/*
- * itnim state machine event
- */
-enum bfa_itnim_event {
- BFA_ITNIM_SM_CREATE = 1, /* itnim is created */
- BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */
- BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */
- BFA_ITNIM_SM_FWRSP = 4, /* firmware response */
- BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */
- BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */
- BFA_ITNIM_SM_SLER = 7, /* second level error recovery */
- BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */
- BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
-};
-
-/*
* BFA IOIM related definitions
*/
#define bfa_ioim_move_to_comp_q(__ioim) do { \
@@ -98,30 +83,6 @@ enum bfa_itnim_event {
(__fcpim)->profile_start(__ioim); \
} while (0)
-/*
- * IO state machine events
- */
-enum bfa_ioim_event {
- BFA_IOIM_SM_START = 1, /* io start request from host */
- BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
- BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
- BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
- BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
- BFA_IOIM_SM_FREE = 6, /* io resource is freed */
- BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
- BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
- BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
- BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
- BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
- BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
- BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
- BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
- BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
- BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
- BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
- BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
-};
-
/*
* BFA TSKIM related definitions
@@ -141,18 +102,6 @@ enum bfa_ioim_event {
} while (0)
-enum bfa_tskim_event {
- BFA_TSKIM_SM_START = 1, /* TM command start */
- BFA_TSKIM_SM_DONE = 2, /* TM completion */
- BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
- BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
- BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
- BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
- BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
- BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
- BFA_TSKIM_SM_UTAG = 10, /* TM completion unknown tag */
-};
-
/*
* forward declaration for BFA ITNIM functions
*/
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index 8bf09433549b9..4499f84c2d812 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -155,11 +155,38 @@ struct bfa_fcp_mod_s {
};
/*
+ * IO state machine events
+ */
+enum bfa_ioim_event {
+ BFA_IOIM_SM_START = 1, /* io start request from host */
+ BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
+ BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
+ BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
+ BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
+ BFA_IOIM_SM_FREE = 6, /* io resource is freed */
+ BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
+ BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
+ BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
+ BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
+ BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
+ BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
+ BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
+ BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
+ BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
+ BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
+ BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
+ BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
+};
+
+struct bfa_ioim_s;
+typedef void (*bfa_ioim_sm_t)(struct bfa_ioim_s *, enum bfa_ioim_event);
+
+/*
* BFA IO (initiator mode)
*/
struct bfa_ioim_s {
struct list_head qe; /* queue elememt */
- bfa_sm_t sm; /* BFA ioim state machine */
+ bfa_ioim_sm_t sm; /* BFA ioim state machine */
struct bfa_s *bfa; /* BFA module */
struct bfa_fcpim_s *fcpim; /* parent fcpim module */
struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */
@@ -186,12 +213,27 @@ struct bfa_ioim_sp_s {
struct bfa_tskim_s *tskim; /* Relevant TM cmd */
};
+enum bfa_tskim_event {
+ BFA_TSKIM_SM_START = 1, /* TM command start */
+ BFA_TSKIM_SM_DONE = 2, /* TM completion */
+ BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
+ BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
+ BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
+ BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
+ BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
+ BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
+ BFA_TSKIM_SM_UTAG = 10, /* TM completion unknown tag */
+};
+
+struct bfa_tskim_s;
+typedef void (*bfa_tskim_sm_t)(struct bfa_tskim_s *, enum bfa_tskim_event);
+
/*
* BFA Task management command (initiator mode)
*/
struct bfa_tskim_s {
struct list_head qe;
- bfa_sm_t sm;
+ bfa_tskim_sm_t sm;
struct bfa_s *bfa; /* BFA module */
struct bfa_fcpim_s *fcpim; /* parent fcpim module */
struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */
@@ -209,11 +251,29 @@ struct bfa_tskim_s {
};
/*
+ * itnim state machine event
+ */
+enum bfa_itnim_event {
+ BFA_ITNIM_SM_CREATE = 1, /* itnim is created */
+ BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */
+ BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */
+ BFA_ITNIM_SM_FWRSP = 4, /* firmware response */
+ BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */
+ BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */
+ BFA_ITNIM_SM_SLER = 7, /* second level error recovery */
+ BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */
+ BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
+};
+
+struct bfa_itnim_s;
+typedef void (*bfa_itnim_sm_t)(struct bfa_itnim_s *, enum bfa_itnim_event);
+
+/*
* BFA i-t-n (initiator mode)
*/
struct bfa_itnim_s {
struct list_head qe; /* queue element */
- bfa_sm_t sm; /* i-t-n im BFA state machine */
+ bfa_itnim_sm_t sm; /* i-t-n im BFA state machine */
struct bfa_s *bfa; /* bfa instance */
struct bfa_rport_s *rport; /* bfa rport */
void *ditn; /* driver i-t-n structure */
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index c1baf5cd0d3e8..9788354b90da7 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -20,22 +20,6 @@
#define BFA_FCS_OS_STR_LEN 64
/*
- * lps_pvt BFA LPS private functions
- */
-
-enum bfa_lps_event {
- BFA_LPS_SM_LOGIN = 1, /* login request from user */
- BFA_LPS_SM_LOGOUT = 2, /* logout request from user */
- BFA_LPS_SM_FWRSP = 3, /* f/w response to login/logout */
- BFA_LPS_SM_RESUME = 4, /* space present in reqq queue */
- BFA_LPS_SM_DELETE = 5, /* lps delete from user */
- BFA_LPS_SM_OFFLINE = 6, /* Link is offline */
- BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */
- BFA_LPS_SM_SET_N2N_PID = 8, /* Set assigned PID for n2n */
-};
-
-
-/*
* !!! Only append to the enums defined here to avoid any versioning
* !!! needed between trace utility and driver version
*/
@@ -59,8 +43,30 @@ struct bfa_fcs_s;
#define BFA_FCS_PID_IS_WKA(pid) ((bfa_ntoh3b(pid) > 0xFFF000) ? 1 : 0)
#define BFA_FCS_MAX_RPORT_LOGINS 1024
+/*
+ * VPort NS State Machine events
+ */
+enum vport_ns_event {
+ NSSM_EVENT_PORT_ONLINE = 1,
+ NSSM_EVENT_PORT_OFFLINE = 2,
+ NSSM_EVENT_PLOGI_SENT = 3,
+ NSSM_EVENT_RSP_OK = 4,
+ NSSM_EVENT_RSP_ERROR = 5,
+ NSSM_EVENT_TIMEOUT = 6,
+ NSSM_EVENT_NS_QUERY = 7,
+ NSSM_EVENT_RSPNID_SENT = 8,
+ NSSM_EVENT_RFTID_SENT = 9,
+ NSSM_EVENT_RFFID_SENT = 10,
+ NSSM_EVENT_GIDFT_SENT = 11,
+ NSSM_EVENT_RNNID_SENT = 12,
+ NSSM_EVENT_RSNN_NN_SENT = 13,
+};
+
+struct bfa_fcs_lport_ns_s;
+typedef void (*bfa_fcs_lport_ns_sm_t)(struct bfa_fcs_lport_ns_s *fsm, enum vport_ns_event);
+
struct bfa_fcs_lport_ns_s {
- bfa_sm_t sm; /* state machine */
+ bfa_fcs_lport_ns_sm_t sm; /* state machine */
struct bfa_timer_s timer;
struct bfa_fcs_lport_s *port; /* parent port */
struct bfa_fcxp_s *fcxp;
@@ -69,9 +75,23 @@ struct bfa_fcs_lport_ns_s {
u8 num_rsnn_nn_retries;
};
+/*
+ * VPort SCN State Machine events
+ */
+enum port_scn_event {
+ SCNSM_EVENT_PORT_ONLINE = 1,
+ SCNSM_EVENT_PORT_OFFLINE = 2,
+ SCNSM_EVENT_RSP_OK = 3,
+ SCNSM_EVENT_RSP_ERROR = 4,
+ SCNSM_EVENT_TIMEOUT = 5,
+ SCNSM_EVENT_SCR_SENT = 6,
+};
+
+struct bfa_fcs_lport_scn_s;
+typedef void (*bfa_fcs_lport_scn_sm_t)(struct bfa_fcs_lport_scn_s *fsm, enum port_scn_event);
struct bfa_fcs_lport_scn_s {
- bfa_sm_t sm; /* state machine */
+ bfa_fcs_lport_scn_sm_t sm; /* state machine */
struct bfa_timer_s timer;
struct bfa_fcs_lport_s *port; /* parent port */
struct bfa_fcxp_s *fcxp;
@@ -79,8 +99,25 @@ struct bfa_fcs_lport_scn_s {
};
+/*
+ * FDMI State Machine events
+ */
+enum port_fdmi_event {
+ FDMISM_EVENT_PORT_ONLINE = 1,
+ FDMISM_EVENT_PORT_OFFLINE = 2,
+ FDMISM_EVENT_RSP_OK = 4,
+ FDMISM_EVENT_RSP_ERROR = 5,
+ FDMISM_EVENT_TIMEOUT = 6,
+ FDMISM_EVENT_RHBA_SENT = 7,
+ FDMISM_EVENT_RPRT_SENT = 8,
+ FDMISM_EVENT_RPA_SENT = 9,
+};
+
+struct bfa_fcs_lport_fdmi_s;
+typedef void (*bfa_fcs_lport_fdmi_sm_t)(struct bfa_fcs_lport_fdmi_s *fsm, enum port_fdmi_event);
+
struct bfa_fcs_lport_fdmi_s {
- bfa_sm_t sm; /* state machine */
+ bfa_fcs_lport_fdmi_sm_t sm; /* state machine */
struct bfa_timer_s timer;
struct bfa_fcs_lport_ms_s *ms; /* parent ms */
struct bfa_fcxp_s *fcxp;
@@ -88,10 +125,24 @@ struct bfa_fcs_lport_fdmi_s {
u8 retry_cnt; /* retry count */
u8 rsvd[3];
};
+/*
+ * MS State Machine events
+ */
+enum port_ms_event {
+ MSSM_EVENT_PORT_ONLINE = 1,
+ MSSM_EVENT_PORT_OFFLINE = 2,
+ MSSM_EVENT_RSP_OK = 3,
+ MSSM_EVENT_RSP_ERROR = 4,
+ MSSM_EVENT_TIMEOUT = 5,
+ MSSM_EVENT_FCXP_SENT = 6,
+ MSSM_EVENT_PORT_FABRIC_RSCN = 7
+};
+struct bfa_fcs_lport_ms_s;
+typedef void (*bfa_fcs_lport_ms_sm_t)(struct bfa_fcs_lport_ms_s *fsm, enum port_ms_event);
struct bfa_fcs_lport_ms_s {
- bfa_sm_t sm; /* state machine */
+ bfa_fcs_lport_ms_sm_t sm; /* state machine */
struct bfa_timer_s timer;
struct bfa_fcs_lport_s *port; /* parent port */
struct bfa_fcxp_s *fcxp;
@@ -131,10 +182,25 @@ union bfa_fcs_lport_topo_u {
struct bfa_fcs_lport_n2n_s pn2n;
};
+/*
+ * fcs_port_sm FCS logical port state machine
+ */
+
+enum bfa_fcs_lport_event {
+ BFA_FCS_PORT_SM_CREATE = 1,
+ BFA_FCS_PORT_SM_ONLINE = 2,
+ BFA_FCS_PORT_SM_OFFLINE = 3,
+ BFA_FCS_PORT_SM_DELETE = 4,
+ BFA_FCS_PORT_SM_DELRPORT = 5,
+ BFA_FCS_PORT_SM_STOP = 6,
+};
+
+struct bfa_fcs_lport_s;
+typedef void (*bfa_fcs_lport_sm_t)(struct bfa_fcs_lport_s *fsm, enum bfa_fcs_lport_event);
struct bfa_fcs_lport_s {
struct list_head qe; /* used by port/vport */
- bfa_sm_t sm; /* state machine */
+ bfa_fcs_lport_sm_t sm; /* state machine */
struct bfa_fcs_fabric_s *fabric; /* parent fabric */
struct bfa_lport_cfg_s port_cfg; /* port configuration */
struct bfa_timer_s link_timer; /* timer for link offline */
@@ -171,10 +237,37 @@ enum bfa_fcs_fabric_type {
BFA_FCS_FABRIC_LOOP = 3,
};
+/*
+ * Fabric state machine events
+ */
+enum bfa_fcs_fabric_event {
+ BFA_FCS_FABRIC_SM_CREATE = 1, /* create from driver */
+ BFA_FCS_FABRIC_SM_DELETE = 2, /* delete from driver */
+ BFA_FCS_FABRIC_SM_LINK_DOWN = 3, /* link down from port */
+ BFA_FCS_FABRIC_SM_LINK_UP = 4, /* link up from port */
+ BFA_FCS_FABRIC_SM_CONT_OP = 5, /* flogi/auth continue op */
+ BFA_FCS_FABRIC_SM_RETRY_OP = 6, /* flogi/auth retry op */
+ BFA_FCS_FABRIC_SM_NO_FABRIC = 7, /* from flogi/auth */
+ BFA_FCS_FABRIC_SM_PERF_EVFP = 8, /* from flogi/auth */
+ BFA_FCS_FABRIC_SM_ISOLATE = 9, /* from EVFP processing */
+ BFA_FCS_FABRIC_SM_NO_TAGGING = 10, /* no VFT tagging from EVFP */
+ BFA_FCS_FABRIC_SM_DELAYED = 11, /* timeout delay event */
+ BFA_FCS_FABRIC_SM_AUTH_FAILED = 12, /* auth failed */
+ BFA_FCS_FABRIC_SM_AUTH_SUCCESS = 13, /* auth successful */
+ BFA_FCS_FABRIC_SM_DELCOMP = 14, /* all vports deleted event */
+ BFA_FCS_FABRIC_SM_LOOPBACK = 15, /* Received our own FLOGI */
+ BFA_FCS_FABRIC_SM_START = 16, /* from driver */
+ BFA_FCS_FABRIC_SM_STOP = 17, /* Stop from driver */
+ BFA_FCS_FABRIC_SM_STOPCOMP = 18, /* Stop completion */
+ BFA_FCS_FABRIC_SM_LOGOCOMP = 19, /* FLOGO completion */
+};
+
+struct bfa_fcs_fabric_s;
+typedef void (*bfa_fcs_fabric_sm_t)(struct bfa_fcs_fabric_s *fsm, enum bfa_fcs_fabric_event);
struct bfa_fcs_fabric_s {
struct list_head qe; /* queue element */
- bfa_sm_t sm; /* state machine */
+ bfa_fcs_fabric_sm_t sm; /* state machine */
struct bfa_fcs_s *fcs; /* FCS instance */
struct bfa_fcs_lport_s bport; /* base logical port */
enum bfa_fcs_fabric_type fab_type; /* fabric type */
@@ -344,9 +437,33 @@ void bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port,
struct fchs_s *rx_frame, u32 len);
void bfa_fcs_lport_lip_scn_online(bfa_fcs_lport_t *port);
+/*
+ * VPort State Machine events
+ */
+enum bfa_fcs_vport_event {
+ BFA_FCS_VPORT_SM_CREATE = 1, /* vport create event */
+ BFA_FCS_VPORT_SM_DELETE = 2, /* vport delete event */
+ BFA_FCS_VPORT_SM_START = 3, /* vport start request */
+ BFA_FCS_VPORT_SM_STOP = 4, /* stop: unsupported */
+ BFA_FCS_VPORT_SM_ONLINE = 5, /* fabric online */
+ BFA_FCS_VPORT_SM_OFFLINE = 6, /* fabric offline event */
+ BFA_FCS_VPORT_SM_FRMSENT = 7, /* fdisc/logo sent events */
+ BFA_FCS_VPORT_SM_RSP_OK = 8, /* good response */
+ BFA_FCS_VPORT_SM_RSP_ERROR = 9, /* error/bad response */
+ BFA_FCS_VPORT_SM_TIMEOUT = 10, /* delay timer event */
+ BFA_FCS_VPORT_SM_DELCOMP = 11, /* lport delete completion */
+ BFA_FCS_VPORT_SM_RSP_DUP_WWN = 12, /* Dup wnn error*/
+ BFA_FCS_VPORT_SM_RSP_FAILED = 13, /* non-retryable failure */
+ BFA_FCS_VPORT_SM_STOPCOMP = 14, /* vport delete completion */
+ BFA_FCS_VPORT_SM_FABRIC_MAX = 15, /* max vports on fabric */
+};
+
+struct bfa_fcs_vport_s;
+typedef void (*bfa_fcs_vport_sm_t)(struct bfa_fcs_vport_s *fsm, enum bfa_fcs_vport_event);
+
struct bfa_fcs_vport_s {
struct list_head qe; /* queue elem */
- bfa_sm_t sm; /* state machine */
+ bfa_fcs_vport_sm_t sm; /* state machine */
bfa_fcs_lport_t lport; /* logical port */
struct bfa_timer_s timer;
struct bfad_vport_s *vport_drv; /* Driver private */
@@ -397,9 +514,26 @@ struct bfa_fcs_itnim_s;
struct bfa_fcs_tin_s;
struct bfa_fcs_iprp_s;
+/*
+ * fcs_rport_ftrs_sm FCS rport state machine events
+ */
+
+enum rpf_event {
+ RPFSM_EVENT_RPORT_OFFLINE = 1, /* Rport offline */
+ RPFSM_EVENT_RPORT_ONLINE = 2, /* Rport online */
+ RPFSM_EVENT_FCXP_SENT = 3, /* Frame from has been sent */
+ RPFSM_EVENT_TIMEOUT = 4, /* Rport SM timeout event */
+ RPFSM_EVENT_RPSC_COMP = 5,
+ RPFSM_EVENT_RPSC_FAIL = 6,
+ RPFSM_EVENT_RPSC_ERROR = 7,
+};
+
+struct bfa_fcs_rpf_s;
+typedef void (*bfa_fcs_rpf_sm_t)(struct bfa_fcs_rpf_s *, enum rpf_event);
+
/* Rport Features (RPF) */
struct bfa_fcs_rpf_s {
- bfa_sm_t sm; /* state machine */
+ bfa_fcs_rpf_sm_t sm; /* state machine */
struct bfa_fcs_rport_s *rport; /* parent rport */
struct bfa_timer_s timer; /* general purpose timer */
struct bfa_fcxp_s *fcxp; /* FCXP needed for discarding */
@@ -414,6 +548,36 @@ struct bfa_fcs_rpf_s {
*/
};
+/*
+ * fcs_rport_sm FCS rport state machine events
+ */
+enum rport_event {
+ RPSM_EVENT_PLOGI_SEND = 1, /* new rport; start with PLOGI */
+ RPSM_EVENT_PLOGI_RCVD = 2, /* Inbound PLOGI from remote port */
+ RPSM_EVENT_PLOGI_COMP = 3, /* PLOGI completed to rport */
+ RPSM_EVENT_LOGO_RCVD = 4, /* LOGO from remote device */
+ RPSM_EVENT_LOGO_IMP = 5, /* implicit logo for SLER */
+ RPSM_EVENT_FCXP_SENT = 6, /* Frame from has been sent */
+ RPSM_EVENT_DELETE = 7, /* RPORT delete request */
+ RPSM_EVENT_FAB_SCN = 8, /* state change notification */
+ RPSM_EVENT_ACCEPTED = 9, /* Good response from remote device */
+ RPSM_EVENT_FAILED = 10, /* Request to rport failed. */
+ RPSM_EVENT_TIMEOUT = 11, /* Rport SM timeout event */
+ RPSM_EVENT_HCB_ONLINE = 12, /* BFA rport online callback */
+ RPSM_EVENT_HCB_OFFLINE = 13, /* BFA rport offline callback */
+ RPSM_EVENT_FC4_OFFLINE = 14, /* FC-4 offline complete */
+ RPSM_EVENT_ADDRESS_CHANGE = 15, /* Rport's PID has changed */
+ RPSM_EVENT_ADDRESS_DISC = 16, /* Need to Discover rport's PID */
+ RPSM_EVENT_PRLO_RCVD = 17, /* PRLO from remote device */
+ RPSM_EVENT_PLOGI_RETRY = 18, /* Retry PLOGI continuously */
+ RPSM_EVENT_SCN_OFFLINE = 19, /* loop scn offline */
+ RPSM_EVENT_SCN_ONLINE = 20, /* loop scn online */
+ RPSM_EVENT_FC4_FCS_ONLINE = 21, /* FC-4 FCS online complete */
+};
+
+struct bfa_fcs_rport_s;
+typedef void (*bfa_fcs_rport_sm_t)(struct bfa_fcs_rport_s *, enum rport_event);
+
struct bfa_fcs_rport_s {
struct list_head qe; /* used by port/vport */
struct bfa_fcs_lport_s *port; /* parent FCS port */
@@ -430,7 +594,7 @@ struct bfa_fcs_rport_s {
wwn_t pwwn; /* port wwn of rport */
wwn_t nwwn; /* node wwn of rport */
struct bfa_rport_symname_s psym_name; /* port symbolic name */
- bfa_sm_t sm; /* state machine */
+ bfa_fcs_rport_sm_t sm; /* state machine */
struct bfa_timer_s timer; /* general purpose timer */
struct bfa_fcs_itnim_s *itnim; /* ITN initiator mode role */
struct bfa_fcs_tin_s *tin; /* ITN initiator mode role */
@@ -488,12 +652,34 @@ void bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport);
void bfa_fcs_rpf_rport_offline(struct bfa_fcs_rport_s *rport);
/*
+ * fcs_itnim_sm FCS itnim state machine events
+ */
+enum bfa_fcs_itnim_event {
+ BFA_FCS_ITNIM_SM_FCS_ONLINE = 1, /* rport online event */
+ BFA_FCS_ITNIM_SM_OFFLINE = 2, /* rport offline */
+ BFA_FCS_ITNIM_SM_FRMSENT = 3, /* prli frame is sent */
+ BFA_FCS_ITNIM_SM_RSP_OK = 4, /* good response */
+ BFA_FCS_ITNIM_SM_RSP_ERROR = 5, /* error response */
+ BFA_FCS_ITNIM_SM_TIMEOUT = 6, /* delay timeout */
+ BFA_FCS_ITNIM_SM_HCB_OFFLINE = 7, /* BFA online callback */
+ BFA_FCS_ITNIM_SM_HCB_ONLINE = 8, /* BFA offline callback */
+ BFA_FCS_ITNIM_SM_INITIATOR = 9, /* rport is initiator */
+ BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */
+ BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */
+ BFA_FCS_ITNIM_SM_RSP_NOT_SUPP = 12, /* cmd not supported rsp */
+ BFA_FCS_ITNIM_SM_HAL_ONLINE = 13, /* bfa rport online event */
+};
+
+struct bfa_fcs_itnim_s;
+typedef void (*bfa_fcs_itnim_sm_t)(struct bfa_fcs_itnim_s *, enum bfa_fcs_itnim_event);
+
+/*
* forward declarations
*/
struct bfad_itnim_s;
struct bfa_fcs_itnim_s {
- bfa_sm_t sm; /* state machine */
+ bfa_fcs_itnim_sm_t sm; /* state machine */
struct bfa_fcs_rport_s *rport; /* parent remote rport */
struct bfad_itnim_s *itnim_drv; /* driver peer instance */
struct bfa_fcs_s *fcs; /* fcs instance */
@@ -703,78 +889,6 @@ struct bfa_fcs_s {
*/
/*
- * Fabric state machine events
- */
-enum bfa_fcs_fabric_event {
- BFA_FCS_FABRIC_SM_CREATE = 1, /* create from driver */
- BFA_FCS_FABRIC_SM_DELETE = 2, /* delete from driver */
- BFA_FCS_FABRIC_SM_LINK_DOWN = 3, /* link down from port */
- BFA_FCS_FABRIC_SM_LINK_UP = 4, /* link up from port */
- BFA_FCS_FABRIC_SM_CONT_OP = 5, /* flogi/auth continue op */
- BFA_FCS_FABRIC_SM_RETRY_OP = 6, /* flogi/auth retry op */
- BFA_FCS_FABRIC_SM_NO_FABRIC = 7, /* from flogi/auth */
- BFA_FCS_FABRIC_SM_PERF_EVFP = 8, /* from flogi/auth */
- BFA_FCS_FABRIC_SM_ISOLATE = 9, /* from EVFP processing */
- BFA_FCS_FABRIC_SM_NO_TAGGING = 10, /* no VFT tagging from EVFP */
- BFA_FCS_FABRIC_SM_DELAYED = 11, /* timeout delay event */
- BFA_FCS_FABRIC_SM_AUTH_FAILED = 12, /* auth failed */
- BFA_FCS_FABRIC_SM_AUTH_SUCCESS = 13, /* auth successful */
- BFA_FCS_FABRIC_SM_DELCOMP = 14, /* all vports deleted event */
- BFA_FCS_FABRIC_SM_LOOPBACK = 15, /* Received our own FLOGI */
- BFA_FCS_FABRIC_SM_START = 16, /* from driver */
- BFA_FCS_FABRIC_SM_STOP = 17, /* Stop from driver */
- BFA_FCS_FABRIC_SM_STOPCOMP = 18, /* Stop completion */
- BFA_FCS_FABRIC_SM_LOGOCOMP = 19, /* FLOGO completion */
-};
-
-/*
- * fcs_rport_sm FCS rport state machine events
- */
-
-enum rport_event {
- RPSM_EVENT_PLOGI_SEND = 1, /* new rport; start with PLOGI */
- RPSM_EVENT_PLOGI_RCVD = 2, /* Inbound PLOGI from remote port */
- RPSM_EVENT_PLOGI_COMP = 3, /* PLOGI completed to rport */
- RPSM_EVENT_LOGO_RCVD = 4, /* LOGO from remote device */
- RPSM_EVENT_LOGO_IMP = 5, /* implicit logo for SLER */
- RPSM_EVENT_FCXP_SENT = 6, /* Frame from has been sent */
- RPSM_EVENT_DELETE = 7, /* RPORT delete request */
- RPSM_EVENT_FAB_SCN = 8, /* state change notification */
- RPSM_EVENT_ACCEPTED = 9, /* Good response from remote device */
- RPSM_EVENT_FAILED = 10, /* Request to rport failed. */
- RPSM_EVENT_TIMEOUT = 11, /* Rport SM timeout event */
- RPSM_EVENT_HCB_ONLINE = 12, /* BFA rport online callback */
- RPSM_EVENT_HCB_OFFLINE = 13, /* BFA rport offline callback */
- RPSM_EVENT_FC4_OFFLINE = 14, /* FC-4 offline complete */
- RPSM_EVENT_ADDRESS_CHANGE = 15, /* Rport's PID has changed */
- RPSM_EVENT_ADDRESS_DISC = 16, /* Need to Discover rport's PID */
- RPSM_EVENT_PRLO_RCVD = 17, /* PRLO from remote device */
- RPSM_EVENT_PLOGI_RETRY = 18, /* Retry PLOGI continuously */
- RPSM_EVENT_SCN_OFFLINE = 19, /* loop scn offline */
- RPSM_EVENT_SCN_ONLINE = 20, /* loop scn online */
- RPSM_EVENT_FC4_FCS_ONLINE = 21, /* FC-4 FCS online complete */
-};
-
-/*
- * fcs_itnim_sm FCS itnim state machine events
- */
-enum bfa_fcs_itnim_event {
- BFA_FCS_ITNIM_SM_FCS_ONLINE = 1, /* rport online event */
- BFA_FCS_ITNIM_SM_OFFLINE = 2, /* rport offline */
- BFA_FCS_ITNIM_SM_FRMSENT = 3, /* prli frame is sent */
- BFA_FCS_ITNIM_SM_RSP_OK = 4, /* good response */
- BFA_FCS_ITNIM_SM_RSP_ERROR = 5, /* error response */
- BFA_FCS_ITNIM_SM_TIMEOUT = 6, /* delay timeout */
- BFA_FCS_ITNIM_SM_HCB_OFFLINE = 7, /* BFA online callback */
- BFA_FCS_ITNIM_SM_HCB_ONLINE = 8, /* BFA offline callback */
- BFA_FCS_ITNIM_SM_INITIATOR = 9, /* rport is initiator */
- BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */
- BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */
- BFA_FCS_ITNIM_SM_RSP_NOT_SUPP = 12, /* cmd not supported rsp */
- BFA_FCS_ITNIM_SM_HAL_ONLINE = 13, /* bfa rport online event */
-};
-
-/*
* bfa fcs API functions
*/
void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa,
@@ -831,9 +945,7 @@ void bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
*/
struct bfad_port_s;
-struct bfad_vf_s;
struct bfad_vport_s;
-struct bfad_rport_s;
/*
* lport callbacks
diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c
index c7de62baeec99..40e65ab285040 100644
--- a/drivers/scsi/bfa/bfa_fcs_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c
@@ -16,6 +16,7 @@
#include "bfa_fcs.h"
#include "bfa_fcbuild.h"
#include "bfad_im.h"
+#include "bfa_fcpim.h"
BFA_TRC_FILE(FCS, FCPIM);
@@ -52,7 +53,23 @@ static void bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim,
static void bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
enum bfa_fcs_itnim_event event);
-static struct bfa_sm_table_s itnim_sm_table[] = {
+struct bfa_fcs_itnim_sm_table_s {
+ bfa_fcs_itnim_sm_t sm; /* state machine function */
+ enum bfa_itnim_state state; /* state machine encoding */
+ char *name; /* state name for display */
+};
+
+static inline enum bfa_itnim_state
+bfa_fcs_itnim_sm_to_state(struct bfa_fcs_itnim_sm_table_s *smt, bfa_fcs_itnim_sm_t sm)
+{
+ int i = 0;
+
+ while (smt[i].sm && smt[i].sm != sm)
+ i++;
+ return smt[i].state;
+}
+
+static struct bfa_fcs_itnim_sm_table_s itnim_sm_table[] = {
{BFA_SM(bfa_fcs_itnim_sm_offline), BFA_ITNIM_OFFLINE},
{BFA_SM(bfa_fcs_itnim_sm_prli_send), BFA_ITNIM_PRLI_SEND},
{BFA_SM(bfa_fcs_itnim_sm_prli), BFA_ITNIM_PRLI_SENT},
@@ -665,7 +682,7 @@ bfa_status_t
bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim)
{
bfa_trc(itnim->fcs, itnim->rport->pid);
- switch (bfa_sm_to_state(itnim_sm_table, itnim->sm)) {
+ switch (bfa_fcs_itnim_sm_to_state(itnim_sm_table, itnim->sm)) {
case BFA_ITNIM_ONLINE:
case BFA_ITNIM_INITIATIOR:
return BFA_STATUS_OK;
@@ -765,7 +782,7 @@ bfa_fcs_itnim_attr_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
if (itnim == NULL)
return BFA_STATUS_NO_FCPIM_NEXUS;
- attr->state = bfa_sm_to_state(itnim_sm_table, itnim->sm);
+ attr->state = bfa_fcs_itnim_sm_to_state(itnim_sm_table, itnim->sm);
attr->retry = itnim->seq_rec;
attr->rec_support = itnim->rec_support;
attr->conf_comp = itnim->conf_comp;
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 008afd8170871..966bf6cc6dd90 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -103,19 +103,6 @@ static struct {
},
};
-/*
- * fcs_port_sm FCS logical port state machine
- */
-
-enum bfa_fcs_lport_event {
- BFA_FCS_PORT_SM_CREATE = 1,
- BFA_FCS_PORT_SM_ONLINE = 2,
- BFA_FCS_PORT_SM_OFFLINE = 3,
- BFA_FCS_PORT_SM_DELETE = 4,
- BFA_FCS_PORT_SM_DELRPORT = 5,
- BFA_FCS_PORT_SM_STOP = 6,
-};
-
static void bfa_fcs_lport_sm_uninit(struct bfa_fcs_lport_s *port,
enum bfa_fcs_lport_event event);
static void bfa_fcs_lport_sm_init(struct bfa_fcs_lport_s *port,
@@ -1426,20 +1413,6 @@ u32 bfa_fcs_fdmi_convert_speed(enum bfa_port_speed pport_speed);
* fcs_fdmi_sm FCS FDMI state machine
*/
-/*
- * FDMI State Machine events
- */
-enum port_fdmi_event {
- FDMISM_EVENT_PORT_ONLINE = 1,
- FDMISM_EVENT_PORT_OFFLINE = 2,
- FDMISM_EVENT_RSP_OK = 4,
- FDMISM_EVENT_RSP_ERROR = 5,
- FDMISM_EVENT_TIMEOUT = 6,
- FDMISM_EVENT_RHBA_SENT = 7,
- FDMISM_EVENT_RPRT_SENT = 8,
- FDMISM_EVENT_RPA_SENT = 9,
-};
-
static void bfa_fcs_lport_fdmi_sm_offline(struct bfa_fcs_lport_fdmi_s *fdmi,
enum port_fdmi_event event);
static void bfa_fcs_lport_fdmi_sm_sending_rhba(
@@ -2863,19 +2836,6 @@ static void bfa_fcs_lport_ms_gfn_response(void *fcsarg,
* fcs_ms_sm FCS MS state machine
*/
-/*
- * MS State Machine events
- */
-enum port_ms_event {
- MSSM_EVENT_PORT_ONLINE = 1,
- MSSM_EVENT_PORT_OFFLINE = 2,
- MSSM_EVENT_RSP_OK = 3,
- MSSM_EVENT_RSP_ERROR = 4,
- MSSM_EVENT_TIMEOUT = 5,
- MSSM_EVENT_FCXP_SENT = 6,
- MSSM_EVENT_PORT_FABRIC_RSCN = 7
-};
-
static void bfa_fcs_lport_ms_sm_offline(struct bfa_fcs_lport_ms_s *ms,
enum port_ms_event event);
static void bfa_fcs_lport_ms_sm_plogi_sending(struct bfa_fcs_lport_ms_s *ms,
@@ -3644,25 +3604,6 @@ static void bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port);
* fcs_ns_sm FCS nameserver interface state machine
*/
-/*
- * VPort NS State Machine events
- */
-enum vport_ns_event {
- NSSM_EVENT_PORT_ONLINE = 1,
- NSSM_EVENT_PORT_OFFLINE = 2,
- NSSM_EVENT_PLOGI_SENT = 3,
- NSSM_EVENT_RSP_OK = 4,
- NSSM_EVENT_RSP_ERROR = 5,
- NSSM_EVENT_TIMEOUT = 6,
- NSSM_EVENT_NS_QUERY = 7,
- NSSM_EVENT_RSPNID_SENT = 8,
- NSSM_EVENT_RFTID_SENT = 9,
- NSSM_EVENT_RFFID_SENT = 10,
- NSSM_EVENT_GIDFT_SENT = 11,
- NSSM_EVENT_RNNID_SENT = 12,
- NSSM_EVENT_RSNN_NN_SENT = 13,
-};
-
static void bfa_fcs_lport_ns_sm_offline(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
static void bfa_fcs_lport_ns_sm_plogi_sending(struct bfa_fcs_lport_ns_s *ns,
@@ -5239,18 +5180,6 @@ static void bfa_fcs_lport_scn_timeout(void *arg);
* fcs_scm_sm FCS SCN state machine
*/
-/*
- * VPort SCN State Machine events
- */
-enum port_scn_event {
- SCNSM_EVENT_PORT_ONLINE = 1,
- SCNSM_EVENT_PORT_OFFLINE = 2,
- SCNSM_EVENT_RSP_OK = 3,
- SCNSM_EVENT_RSP_ERROR = 4,
- SCNSM_EVENT_TIMEOUT = 5,
- SCNSM_EVENT_SCR_SENT = 6,
-};
-
static void bfa_fcs_lport_scn_sm_offline(struct bfa_fcs_lport_scn_s *scn,
enum port_scn_event event);
static void bfa_fcs_lport_scn_sm_sending_scr(
@@ -5989,27 +5918,6 @@ static void bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport);
* fcs_vport_sm FCS virtual port state machine
*/
-/*
- * VPort State Machine events
- */
-enum bfa_fcs_vport_event {
- BFA_FCS_VPORT_SM_CREATE = 1, /* vport create event */
- BFA_FCS_VPORT_SM_DELETE = 2, /* vport delete event */
- BFA_FCS_VPORT_SM_START = 3, /* vport start request */
- BFA_FCS_VPORT_SM_STOP = 4, /* stop: unsupported */
- BFA_FCS_VPORT_SM_ONLINE = 5, /* fabric online */
- BFA_FCS_VPORT_SM_OFFLINE = 6, /* fabric offline event */
- BFA_FCS_VPORT_SM_FRMSENT = 7, /* fdisc/logo sent events */
- BFA_FCS_VPORT_SM_RSP_OK = 8, /* good response */
- BFA_FCS_VPORT_SM_RSP_ERROR = 9, /* error/bad response */
- BFA_FCS_VPORT_SM_TIMEOUT = 10, /* delay timer event */
- BFA_FCS_VPORT_SM_DELCOMP = 11, /* lport delete completion */
- BFA_FCS_VPORT_SM_RSP_DUP_WWN = 12, /* Dup wnn error*/
- BFA_FCS_VPORT_SM_RSP_FAILED = 13, /* non-retryable failure */
- BFA_FCS_VPORT_SM_STOPCOMP = 14, /* vport delete completion */
- BFA_FCS_VPORT_SM_FABRIC_MAX = 15, /* max vports on fabric */
-};
-
static void bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event);
static void bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
@@ -6037,7 +5945,23 @@ static void bfa_fcs_vport_sm_stopping(struct bfa_fcs_vport_s *vport,
static void bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event);
-static struct bfa_sm_table_s vport_sm_table[] = {
+struct bfa_fcs_vport_sm_table_s {
+ bfa_fcs_vport_sm_t sm; /* state machine function */
+ enum bfa_vport_state state; /* state machine encoding */
+ char *name; /* state name for display */
+};
+
+static inline enum bfa_vport_state
+bfa_vport_sm_to_state(struct bfa_fcs_vport_sm_table_s *smt, bfa_fcs_vport_sm_t sm)
+{
+ int i = 0;
+
+ while (smt[i].sm && smt[i].sm != sm)
+ i++;
+ return smt[i].state;
+}
+
+static struct bfa_fcs_vport_sm_table_s vport_sm_table[] = {
{BFA_SM(bfa_fcs_vport_sm_uninit), BFA_FCS_VPORT_UNINIT},
{BFA_SM(bfa_fcs_vport_sm_created), BFA_FCS_VPORT_CREATED},
{BFA_SM(bfa_fcs_vport_sm_offline), BFA_FCS_VPORT_OFFLINE},
@@ -6864,7 +6788,7 @@ bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
memset(attr, 0, sizeof(struct bfa_vport_attr_s));
bfa_fcs_lport_get_attr(&vport->lport, &attr->port_attr);
- attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm);
+ attr->vport_state = bfa_vport_sm_to_state(vport_sm_table, vport->sm);
}
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index c21aa37b8adbe..ce52a9c88ae63 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -136,7 +136,23 @@ static void bfa_fcs_rport_sm_fc4_off_delete(struct bfa_fcs_rport_s *rport,
static void bfa_fcs_rport_sm_delete_pending(struct bfa_fcs_rport_s *rport,
enum rport_event event);
-static struct bfa_sm_table_s rport_sm_table[] = {
+struct bfa_fcs_rport_sm_table_s {
+ bfa_fcs_rport_sm_t sm; /* state machine function */
+ enum bfa_rport_state state; /* state machine encoding */
+ char *name; /* state name for display */
+};
+
+static inline enum bfa_rport_state
+bfa_rport_sm_to_state(struct bfa_fcs_rport_sm_table_s *smt, bfa_fcs_rport_sm_t sm)
+{
+ int i = 0;
+
+ while (smt[i].sm && smt[i].sm != sm)
+ i++;
+ return smt[i].state;
+}
+
+static struct bfa_fcs_rport_sm_table_s rport_sm_table[] = {
{BFA_SM(bfa_fcs_rport_sm_uninit), BFA_RPORT_UNINIT},
{BFA_SM(bfa_fcs_rport_sm_plogi_sending), BFA_RPORT_PLOGI},
{BFA_SM(bfa_fcs_rport_sm_plogiacc_sending), BFA_RPORT_ONLINE},
@@ -2964,7 +2980,7 @@ bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
int
bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport)
{
- return bfa_sm_to_state(rport_sm_table, rport->sm);
+ return bfa_rport_sm_to_state(rport_sm_table, rport->sm);
}
@@ -3107,20 +3123,6 @@ static void bfa_fcs_rpf_rpsc2_response(void *fcsarg,
static void bfa_fcs_rpf_timeout(void *arg);
-/*
- * fcs_rport_ftrs_sm FCS rport state machine events
- */
-
-enum rpf_event {
- RPFSM_EVENT_RPORT_OFFLINE = 1, /* Rport offline */
- RPFSM_EVENT_RPORT_ONLINE = 2, /* Rport online */
- RPFSM_EVENT_FCXP_SENT = 3, /* Frame from has been sent */
- RPFSM_EVENT_TIMEOUT = 4, /* Rport SM timeout event */
- RPFSM_EVENT_RPSC_COMP = 5,
- RPFSM_EVENT_RPSC_FAIL = 6,
- RPFSM_EVENT_RPSC_ERROR = 7,
-};
-
static void bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf,
enum rpf_event event);
static void bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf,
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index e1ed1424fddb2..ea2f107f564cd 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -114,21 +114,6 @@ static enum bfi_ioc_img_ver_cmp_e bfa_ioc_flash_fwver_cmp(
/*
* IOC state machine definitions/declarations
*/
-enum ioc_event {
- IOC_E_RESET = 1, /* IOC reset request */
- IOC_E_ENABLE = 2, /* IOC enable request */
- IOC_E_DISABLE = 3, /* IOC disable request */
- IOC_E_DETACH = 4, /* driver detach cleanup */
- IOC_E_ENABLED = 5, /* f/w enabled */
- IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
- IOC_E_DISABLED = 7, /* f/w disabled */
- IOC_E_PFFAILED = 8, /* failure notice by iocpf sm */
- IOC_E_HBFAIL = 9, /* heartbeat failure */
- IOC_E_HWERROR = 10, /* hardware error interrupt */
- IOC_E_TIMEOUT = 11, /* timeout */
- IOC_E_HWFAILED = 12, /* PCI mapping failure notice */
-};
-
bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
@@ -140,7 +125,13 @@ bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
-static struct bfa_sm_table_s ioc_sm_table[] = {
+struct bfa_ioc_sm_table {
+ bfa_ioc_sm_t sm; /* state machine function */
+ enum bfa_ioc_state state; /* state machine encoding */
+ char *name; /* state name for display */
+};
+
+static struct bfa_ioc_sm_table ioc_sm_table[] = {
{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
@@ -153,6 +144,16 @@ static struct bfa_sm_table_s ioc_sm_table[] = {
{BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
};
+static inline enum bfa_ioc_state
+bfa_ioc_sm_to_state(struct bfa_ioc_sm_table *smt, bfa_ioc_sm_t sm)
+{
+ int i = 0;
+
+ while (smt[i].sm && smt[i].sm != sm)
+ i++;
+ return smt[i].state;
+}
+
/*
* IOCPF state machine definitions/declarations
*/
@@ -179,24 +180,6 @@ static void bfa_iocpf_sem_timeout(void *ioc_arg);
static void bfa_iocpf_poll_timeout(void *ioc_arg);
/*
- * IOCPF state machine events
- */
-enum iocpf_event {
- IOCPF_E_ENABLE = 1, /* IOCPF enable request */
- IOCPF_E_DISABLE = 2, /* IOCPF disable request */
- IOCPF_E_STOP = 3, /* stop on driver detach */
- IOCPF_E_FWREADY = 4, /* f/w initialization done */
- IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
- IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
- IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
- IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
- IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
- IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
- IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
- IOCPF_E_SEM_ERROR = 12, /* h/w sem mapping error */
-};
-
-/*
* IOCPF states
*/
enum bfa_iocpf_state {
@@ -228,7 +211,23 @@ bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
-static struct bfa_sm_table_s iocpf_sm_table[] = {
+struct bfa_iocpf_sm_table {
+ bfa_iocpf_sm_t sm; /* state machine function */
+ enum bfa_iocpf_state state; /* state machine encoding */
+ char *name; /* state name for display */
+};
+
+static inline enum bfa_iocpf_state
+bfa_iocpf_sm_to_state(struct bfa_iocpf_sm_table *smt, bfa_iocpf_sm_t sm)
+{
+ int i = 0;
+
+ while (smt[i].sm && smt[i].sm != sm)
+ i++;
+ return smt[i].state;
+}
+
+static struct bfa_iocpf_sm_table iocpf_sm_table[] = {
{BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
{BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
{BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
@@ -2815,12 +2814,12 @@ enum bfa_ioc_state
bfa_ioc_get_state(struct bfa_ioc_s *ioc)
{
enum bfa_iocpf_state iocpf_st;
- enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
+ enum bfa_ioc_state ioc_st = bfa_ioc_sm_to_state(ioc_sm_table, ioc->fsm);
if (ioc_st == BFA_IOC_ENABLING ||
ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
- iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
+ iocpf_st = bfa_iocpf_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
switch (iocpf_st) {
case BFA_IOCPF_SEMWAIT:
@@ -5805,18 +5804,6 @@ bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
}
}
-/*
- * DCONF state machine events
- */
-enum bfa_dconf_event {
- BFA_DCONF_SM_INIT = 1, /* dconf Init */
- BFA_DCONF_SM_FLASH_COMP = 2, /* read/write to flash */
- BFA_DCONF_SM_WR = 3, /* binding change, map */
- BFA_DCONF_SM_TIMEOUT = 4, /* Start timer */
- BFA_DCONF_SM_EXIT = 5, /* exit dconf module */
- BFA_DCONF_SM_IOCDISABLE = 6, /* IOC disable event */
-};
-
/* forward declaration of DCONF state machine */
static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event);
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 933a1c3890ff5..3ec10503caff9 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -260,6 +260,24 @@ struct bfa_ioc_cbfn_s {
/*
* IOC event notification mechanism.
*/
+enum ioc_event {
+ IOC_E_RESET = 1, /* IOC reset request */
+ IOC_E_ENABLE = 2, /* IOC enable request */
+ IOC_E_DISABLE = 3, /* IOC disable request */
+ IOC_E_DETACH = 4, /* driver detach cleanup */
+ IOC_E_ENABLED = 5, /* f/w enabled */
+ IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
+ IOC_E_DISABLED = 7, /* f/w disabled */
+ IOC_E_PFFAILED = 8, /* failure notice by iocpf sm */
+ IOC_E_HBFAIL = 9, /* heartbeat failure */
+ IOC_E_HWERROR = 10, /* hardware error interrupt */
+ IOC_E_TIMEOUT = 11, /* timeout */
+ IOC_E_HWFAILED = 12, /* PCI mapping failure notice */
+};
+
+struct bfa_ioc_s;
+typedef void (*bfa_ioc_sm_t)(struct bfa_ioc_s *fsm, enum ioc_event);
+
enum bfa_ioc_event_e {
BFA_IOC_E_ENABLED = 1,
BFA_IOC_E_DISABLED = 2,
@@ -282,8 +300,29 @@ struct bfa_ioc_notify_s {
(__notify)->cbarg = (__cbarg); \
} while (0)
+/*
+ * IOCPF state machine events
+ */
+enum iocpf_event {
+ IOCPF_E_ENABLE = 1, /* IOCPF enable request */
+ IOCPF_E_DISABLE = 2, /* IOCPF disable request */
+ IOCPF_E_STOP = 3, /* stop on driver detach */
+ IOCPF_E_FWREADY = 4, /* f/w initialization done */
+ IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
+ IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
+ IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
+ IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
+ IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
+ IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
+ IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
+ IOCPF_E_SEM_ERROR = 12, /* h/w sem mapping error */
+};
+
+struct bfa_iocpf_s;
+typedef void (*bfa_iocpf_sm_t)(struct bfa_iocpf_s *fsm, enum iocpf_event);
+
struct bfa_iocpf_s {
- bfa_fsm_t fsm;
+ bfa_iocpf_sm_t fsm;
struct bfa_ioc_s *ioc;
bfa_boolean_t fw_mismatch_notified;
bfa_boolean_t auto_recover;
@@ -291,7 +330,7 @@ struct bfa_iocpf_s {
};
struct bfa_ioc_s {
- bfa_fsm_t fsm;
+ bfa_ioc_sm_t fsm;
struct bfa_s *bfa;
struct bfa_pcidev_s pcidev;
struct bfa_timer_mod_s *timer_mod;
@@ -361,14 +400,18 @@ struct bfa_reqq_wait_s {
void *cbarg;
};
-typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
+typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
+typedef void (*bfa_cb_cbfn_status_t) (void *cbarg, bfa_status_t status);
/*
* Generic BFA callback element.
*/
struct bfa_cb_qe_s {
struct list_head qe;
- bfa_cb_cbfn_t cbfn;
+ union {
+ bfa_cb_cbfn_status_t cbfn_status;
+ bfa_cb_cbfn_t cbfn;
+ };
bfa_boolean_t once;
bfa_boolean_t pre_rmv; /* set for stack based qe(s) */
bfa_status_t fw_status; /* to access fw status in comp proc */
@@ -376,22 +419,6 @@ struct bfa_cb_qe_s {
};
/*
- * IOCFC state machine definitions/declarations
- */
-enum iocfc_event {
- IOCFC_E_INIT = 1, /* IOCFC init request */
- IOCFC_E_START = 2, /* IOCFC mod start request */
- IOCFC_E_STOP = 3, /* IOCFC stop request */
- IOCFC_E_ENABLE = 4, /* IOCFC enable request */
- IOCFC_E_DISABLE = 5, /* IOCFC disable request */
- IOCFC_E_IOC_ENABLED = 6, /* IOC enabled message */
- IOCFC_E_IOC_DISABLED = 7, /* IOC disabled message */
- IOCFC_E_IOC_FAILED = 8, /* failure notice by IOC sm */
- IOCFC_E_DCONF_DONE = 9, /* dconf read/write done */
- IOCFC_E_CFG_DONE = 10, /* IOCFC config complete */
-};
-
-/*
* ASIC block configurtion related
*/
@@ -775,8 +802,23 @@ struct bfa_dconf_s {
};
#pragma pack()
+/*
+ * DCONF state machine events
+ */
+enum bfa_dconf_event {
+ BFA_DCONF_SM_INIT = 1, /* dconf Init */
+ BFA_DCONF_SM_FLASH_COMP = 2, /* read/write to flash */
+ BFA_DCONF_SM_WR = 3, /* binding change, map */
+ BFA_DCONF_SM_TIMEOUT = 4, /* Start timer */
+ BFA_DCONF_SM_EXIT = 5, /* exit dconf module */
+ BFA_DCONF_SM_IOCDISABLE = 6, /* IOC disable event */
+};
+
+struct bfa_dconf_mod_s;
+typedef void (*bfa_dconf_sm_t)(struct bfa_dconf_mod_s *fsm, enum bfa_dconf_event);
+
struct bfa_dconf_mod_s {
- bfa_sm_t sm;
+ bfa_dconf_sm_t sm;
u8 instance;
bfa_boolean_t read_data_valid;
bfa_boolean_t min_cfg;
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index c9745c0b4eee3..9f33aa303b189 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -41,36 +41,6 @@ BFA_TRC_FILE(HAL, FCXP);
(bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
/*
- * BFA port state machine events
- */
-enum bfa_fcport_sm_event {
- BFA_FCPORT_SM_START = 1, /* start port state machine */
- BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
- BFA_FCPORT_SM_ENABLE = 3, /* enable port */
- BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
- BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
- BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
- BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
- BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
- BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
- BFA_FCPORT_SM_DPORTENABLE = 10, /* enable dport */
- BFA_FCPORT_SM_DPORTDISABLE = 11,/* disable dport */
- BFA_FCPORT_SM_FAA_MISCONFIG = 12, /* FAA misconfiguratin */
- BFA_FCPORT_SM_DDPORTENABLE = 13, /* enable ddport */
- BFA_FCPORT_SM_DDPORTDISABLE = 14, /* disable ddport */
-};
-
-/*
- * BFA port link notification state machine events
- */
-
-enum bfa_fcport_ln_sm_event {
- BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
- BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
- BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
-};
-
-/*
* RPORT related definitions
*/
#define bfa_rport_offline_cb(__rp) do { \
@@ -201,7 +171,23 @@ static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
enum bfa_fcport_ln_sm_event event);
-static struct bfa_sm_table_s hal_port_sm_table[] = {
+struct bfa_fcport_sm_table_s {
+ bfa_fcport_sm_t sm; /* state machine function */
+ enum bfa_port_states state; /* state machine encoding */
+ char *name; /* state name for display */
+};
+
+static inline enum bfa_port_states
+bfa_fcport_sm_to_state(struct bfa_fcport_sm_table_s *smt, bfa_fcport_sm_t sm)
+{
+ int i = 0;
+
+ while (smt[i].sm && smt[i].sm != sm)
+ i++;
+ return smt[i].state;
+}
+
+static struct bfa_fcport_sm_table_s hal_port_sm_table[] = {
{BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
{BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
{BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
@@ -3545,7 +3531,7 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
fcport->event_arg.i2hmsg = i2hmsg;
bfa_trc(bfa, msg->mhdr.msg_id);
- bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
+ bfa_trc(bfa, bfa_fcport_sm_to_state(hal_port_sm_table, fcport->sm));
switch (msg->mhdr.msg_id) {
case BFI_FCPORT_I2H_ENABLE_RSP:
@@ -3980,7 +3966,7 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
- attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
+ attr->port_state = bfa_fcport_sm_to_state(hal_port_sm_table, fcport->sm);
attr->fec_state = fcport->fec_state;
@@ -4062,7 +4048,7 @@ bfa_fcport_is_disabled(struct bfa_s *bfa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
+ return bfa_fcport_sm_to_state(hal_port_sm_table, fcport->sm) ==
BFA_PORT_ST_DISABLED;
}
@@ -4072,7 +4058,7 @@ bfa_fcport_is_dport(struct bfa_s *bfa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
+ return (bfa_fcport_sm_to_state(hal_port_sm_table, fcport->sm) ==
BFA_PORT_ST_DPORT);
}
@@ -4081,7 +4067,7 @@ bfa_fcport_is_ddport(struct bfa_s *bfa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
+ return (bfa_fcport_sm_to_state(hal_port_sm_table, fcport->sm) ==
BFA_PORT_ST_DDPORT);
}
@@ -5641,20 +5627,6 @@ enum bfa_dport_test_state_e {
BFA_DPORT_ST_NOTSTART = 4, /*!< test not start dport is enabled */
};
-/*
- * BFA DPORT state machine events
- */
-enum bfa_dport_sm_event {
- BFA_DPORT_SM_ENABLE = 1, /* dport enable event */
- BFA_DPORT_SM_DISABLE = 2, /* dport disable event */
- BFA_DPORT_SM_FWRSP = 3, /* fw enable/disable rsp */
- BFA_DPORT_SM_QRESUME = 4, /* CQ space available */
- BFA_DPORT_SM_HWFAIL = 5, /* IOC h/w failure */
- BFA_DPORT_SM_START = 6, /* re-start dport test */
- BFA_DPORT_SM_REQFAIL = 7, /* request failure */
- BFA_DPORT_SM_SCN = 8, /* state change notify frm fw */
-};
-
static void bfa_dport_sm_disabled(struct bfa_dport_s *dport,
enum bfa_dport_sm_event event);
static void bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index 9c83109574e91..26eeee82bedc6 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -226,22 +226,6 @@ struct bfa_fcxp_wqe_s {
void bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
-
-/*
- * RPORT related defines
- */
-enum bfa_rport_event {
- BFA_RPORT_SM_CREATE = 1, /* rport create event */
- BFA_RPORT_SM_DELETE = 2, /* deleting an existing rport */
- BFA_RPORT_SM_ONLINE = 3, /* rport is online */
- BFA_RPORT_SM_OFFLINE = 4, /* rport is offline */
- BFA_RPORT_SM_FWRSP = 5, /* firmware response */
- BFA_RPORT_SM_HWFAIL = 6, /* IOC h/w failure */
- BFA_RPORT_SM_QOS_SCN = 7, /* QoS SCN from firmware */
- BFA_RPORT_SM_SET_SPEED = 8, /* Set Rport Speed */
- BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */
-};
-
#define BFA_RPORT_MIN 4
struct bfa_rport_mod_s {
@@ -285,11 +269,29 @@ struct bfa_rport_info_s {
};
/*
+ * RPORT related defines
+ */
+enum bfa_rport_event {
+ BFA_RPORT_SM_CREATE = 1, /* rport create event */
+ BFA_RPORT_SM_DELETE = 2, /* deleting an existing rport */
+ BFA_RPORT_SM_ONLINE = 3, /* rport is online */
+ BFA_RPORT_SM_OFFLINE = 4, /* rport is offline */
+ BFA_RPORT_SM_FWRSP = 5, /* firmware response */
+ BFA_RPORT_SM_HWFAIL = 6, /* IOC h/w failure */
+ BFA_RPORT_SM_QOS_SCN = 7, /* QoS SCN from firmware */
+ BFA_RPORT_SM_SET_SPEED = 8, /* Set Rport Speed */
+ BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */
+};
+
+struct bfa_rport_s;
+typedef void (*bfa_rport_sm_t)(struct bfa_rport_s *, enum bfa_rport_event);
+
+/*
* BFA rport data structure
*/
struct bfa_rport_s {
struct list_head qe; /* queue element */
- bfa_sm_t sm; /* state machine */
+ bfa_rport_sm_t sm; /* state machine */
struct bfa_s *bfa; /* backpointer to BFA */
void *rport_drv; /* fcs/driver rport object */
u16 fw_handle; /* firmware rport handle */
@@ -378,12 +380,30 @@ void bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
void bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw);
/*
+ * lps_pvt BFA LPS private functions
+ */
+
+enum bfa_lps_event {
+ BFA_LPS_SM_LOGIN = 1, /* login request from user */
+ BFA_LPS_SM_LOGOUT = 2, /* logout request from user */
+ BFA_LPS_SM_FWRSP = 3, /* f/w response to login/logout */
+ BFA_LPS_SM_RESUME = 4, /* space present in reqq queue */
+ BFA_LPS_SM_DELETE = 5, /* lps delete from user */
+ BFA_LPS_SM_OFFLINE = 6, /* Link is offline */
+ BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */
+ BFA_LPS_SM_SET_N2N_PID = 8, /* Set assigned PID for n2n */
+};
+
+struct bfa_lps_s;
+typedef void (*bfa_lps_sm_t)(struct bfa_lps_s *, enum bfa_lps_event);
+
+/*
* LPS - bfa lport login/logout service interface
*/
struct bfa_lps_s {
struct list_head qe; /* queue element */
struct bfa_s *bfa; /* parent bfa instance */
- bfa_sm_t sm; /* finite state machine */
+ bfa_lps_sm_t sm; /* finite state machine */
u8 bfa_tag; /* lport tag */
u8 fw_tag; /* lport fw tag */
u8 reqq; /* lport request queue */
@@ -440,11 +460,24 @@ void bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
#define BFA_FCPORT(_bfa) (&((_bfa)->modules.port))
/*
+ * BFA port link notification state machine events
+ */
+
+enum bfa_fcport_ln_sm_event {
+ BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
+ BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
+ BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
+};
+
+struct bfa_fcport_ln_s;
+typedef void (*bfa_fcport_ln_sm_t)(struct bfa_fcport_ln_s *, enum bfa_fcport_ln_sm_event);
+
+/*
* Link notification data structure
*/
struct bfa_fcport_ln_s {
struct bfa_fcport_s *fcport;
- bfa_sm_t sm;
+ bfa_fcport_ln_sm_t sm;
struct bfa_cb_qe_s ln_qe; /* BFA callback queue elem for ln */
enum bfa_port_linkstate ln_event; /* ln event for callback */
};
@@ -454,11 +487,34 @@ struct bfa_fcport_trunk_s {
};
/*
+ * BFA port state machine events
+ */
+enum bfa_fcport_sm_event {
+ BFA_FCPORT_SM_START = 1, /* start port state machine */
+ BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
+ BFA_FCPORT_SM_ENABLE = 3, /* enable port */
+ BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
+ BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
+ BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
+ BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
+ BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
+ BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
+ BFA_FCPORT_SM_DPORTENABLE = 10, /* enable dport */
+ BFA_FCPORT_SM_DPORTDISABLE = 11,/* disable dport */
+ BFA_FCPORT_SM_FAA_MISCONFIG = 12, /* FAA misconfiguratin */
+ BFA_FCPORT_SM_DDPORTENABLE = 13, /* enable ddport */
+ BFA_FCPORT_SM_DDPORTDISABLE = 14, /* disable ddport */
+};
+
+struct bfa_fcport_s;
+typedef void (*bfa_fcport_sm_t)(struct bfa_fcport_s *, enum bfa_fcport_sm_event);
+
+/*
* BFA FC port data structure
*/
struct bfa_fcport_s {
struct bfa_s *bfa; /* parent BFA instance */
- bfa_sm_t sm; /* port state machine */
+ bfa_fcport_sm_t sm; /* port state machine */
wwn_t nwwn; /* node wwn of physical port */
wwn_t pwwn; /* port wwn of physical oprt */
enum bfa_port_speed speed_sup;
@@ -706,9 +762,26 @@ struct bfa_fcdiag_lb_s {
u32 status;
};
+/*
+ * BFA DPORT state machine events
+ */
+enum bfa_dport_sm_event {
+ BFA_DPORT_SM_ENABLE = 1, /* dport enable event */
+ BFA_DPORT_SM_DISABLE = 2, /* dport disable event */
+ BFA_DPORT_SM_FWRSP = 3, /* fw enable/disable rsp */
+ BFA_DPORT_SM_QRESUME = 4, /* CQ space available */
+ BFA_DPORT_SM_HWFAIL = 5, /* IOC h/w failure */
+ BFA_DPORT_SM_START = 6, /* re-start dport test */
+ BFA_DPORT_SM_REQFAIL = 7, /* request failure */
+ BFA_DPORT_SM_SCN = 8, /* state change notify frm fw */
+};
+
+struct bfa_dport_s;
+typedef void (*bfa_dport_sm_t)(struct bfa_dport_s *, enum bfa_dport_sm_event);
+
struct bfa_dport_s {
struct bfa_s *bfa; /* Back pointer to BFA */
- bfa_sm_t sm; /* finite state machine */
+ bfa_dport_sm_t sm; /* finite state machine */
struct bfa_reqq_wait_s reqq_wait;
bfa_cb_diag_t cbfn;
void *cbarg;
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index d4ceca2d435ee..54bd11e6d5933 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -2135,8 +2135,7 @@ bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd)
struct bfa_cb_pending_q_s cb_qe;
init_completion(&fcomp.comp);
- bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
- &fcomp, &iocmd->stats);
+ bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, &iocmd->stats);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -2159,7 +2158,7 @@ bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd)
struct bfa_cb_pending_q_s cb_qe;
init_completion(&fcomp.comp);
- bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL);
+ bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, NULL);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
@@ -2443,8 +2442,7 @@ bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
init_completion(&fcomp.comp);
- bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
- &fcomp, &iocmd->stats);
+ bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, &iocmd->stats);
spin_lock_irqsave(&bfad->bfad_lock, flags);
WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
@@ -2474,8 +2472,7 @@ bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
init_completion(&fcomp.comp);
- bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
- &fcomp, NULL);
+ bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, NULL);
spin_lock_irqsave(&bfad->bfad_lock, flags);
WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 7682cfa34265d..da42e3261237e 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -175,11 +175,27 @@ union bfad_tmp_buf {
wwn_t wwn[BFA_FCS_MAX_LPORTS];
};
+/* BFAD state machine events */
+enum bfad_sm_event {
+ BFAD_E_CREATE = 1,
+ BFAD_E_KTHREAD_CREATE_FAILED = 2,
+ BFAD_E_INIT = 3,
+ BFAD_E_INIT_SUCCESS = 4,
+ BFAD_E_HAL_INIT_FAILED = 5,
+ BFAD_E_INIT_FAILED = 6,
+ BFAD_E_FCS_EXIT_COMP = 7,
+ BFAD_E_EXIT_COMP = 8,
+ BFAD_E_STOP = 9
+};
+
+struct bfad_s;
+typedef void (*bfad_sm_t)(struct bfad_s *, enum bfad_sm_event);
+
/*
* BFAD (PCI function) data structure
*/
struct bfad_s {
- bfa_sm_t sm; /* state machine */
+ bfad_sm_t sm; /* state machine */
struct list_head list_entry;
struct bfa_s bfa;
struct bfa_fcs_s bfa_fcs;
@@ -226,19 +242,6 @@ struct bfad_s {
struct list_head vport_list;
};
-/* BFAD state machine events */
-enum bfad_sm_event {
- BFAD_E_CREATE = 1,
- BFAD_E_KTHREAD_CREATE_FAILED = 2,
- BFAD_E_INIT = 3,
- BFAD_E_INIT_SUCCESS = 4,
- BFAD_E_HAL_INIT_FAILED = 5,
- BFAD_E_INIT_FAILED = 6,
- BFAD_E_FCS_EXIT_COMP = 7,
- BFAD_E_EXIT_COMP = 8,
- BFAD_E_STOP = 9
-};
-
/*
* RPORT data structure
*/
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 2c246e80c1c4d..d91659811eb3c 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -833,7 +833,6 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n");
- spin_lock_bh(&tgt->cq_lock);
ctx_base_ptr = tgt->ctx_base;
tgt->ctx_base = NULL;
@@ -889,7 +888,6 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
tgt->sq, tgt->sq_dma);
tgt->sq = NULL;
}
- spin_unlock_bh(&tgt->cq_lock);
if (ctx_base_ptr)
iounmap(ctx_base_ptr);
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 2b864061e0730..fa07a6f54003e 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -102,7 +102,9 @@ do { \
#define MAX_RETRIES 1
-static struct class * ch_sysfs_class;
+static const struct class ch_sysfs_class = {
+ .name = "scsi_changer",
+};
typedef struct {
struct kref ref;
@@ -113,7 +115,6 @@ typedef struct {
struct scsi_device **dt; /* ptrs to data transfer elements */
u_int firsts[CH_TYPES];
u_int counts[CH_TYPES];
- u_int unit_attention;
u_int voltags;
struct mutex lock;
} scsi_changer;
@@ -186,17 +187,29 @@ static int
ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len,
void *buffer, unsigned int buflength, enum req_op op)
{
- int errno, retries = 0, timeout, result;
+ int errno = 0, timeout, result;
struct scsi_sense_hdr sshdr;
+ struct scsi_failure failure_defs[] = {
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = SCMD_FAILURE_ASC_ANY,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .allowed = 3,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
.sshdr = &sshdr,
+ .failures = &failures,
};
timeout = (cmd[0] == INITIALIZE_ELEMENT_STATUS)
? timeout_init : timeout_move;
- retry:
- errno = 0;
result = scsi_execute_cmd(ch->device, cmd, op, buffer, buflength,
timeout * HZ, MAX_RETRIES, &exec_args);
if (result < 0)
@@ -205,14 +218,6 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len,
if (debug)
scsi_print_sense_hdr(ch->device, ch->name, &sshdr);
errno = ch_find_errno(&sshdr);
-
- switch(sshdr.sense_key) {
- case UNIT_ATTENTION:
- ch->unit_attention = 1;
- if (retries++ < 3)
- goto retry;
- break;
- }
}
return errno;
}
@@ -927,7 +932,7 @@ static int ch_probe(struct device *dev)
mutex_init(&ch->lock);
kref_init(&ch->ref);
ch->device = sd;
- class_dev = device_create(ch_sysfs_class, dev,
+ class_dev = device_create(&ch_sysfs_class, dev,
MKDEV(SCSI_CHANGER_MAJOR, ch->minor), ch,
"s%s", ch->name);
if (IS_ERR(class_dev)) {
@@ -952,7 +957,7 @@ static int ch_probe(struct device *dev)
return 0;
destroy_dev:
- device_destroy(ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR, ch->minor));
+ device_destroy(&ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR, ch->minor));
put_device:
scsi_device_put(sd);
remove_idr:
@@ -971,7 +976,7 @@ static int ch_remove(struct device *dev)
dev_set_drvdata(dev, NULL);
spin_unlock(&ch_index_lock);
- device_destroy(ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR,ch->minor));
+ device_destroy(&ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR, ch->minor));
scsi_device_put(ch->device);
kref_put(&ch->ref, ch_destroy);
return 0;
@@ -1000,11 +1005,9 @@ static int __init init_ch_module(void)
int rc;
printk(KERN_INFO "SCSI Media Changer driver v" VERSION " \n");
- ch_sysfs_class = class_create("scsi_changer");
- if (IS_ERR(ch_sysfs_class)) {
- rc = PTR_ERR(ch_sysfs_class);
+ rc = class_register(&ch_sysfs_class);
+ if (rc)
return rc;
- }
rc = register_chrdev(SCSI_CHANGER_MAJOR,"ch",&changer_fops);
if (rc < 0) {
printk("Unable to get major %d for SCSI-Changer\n",
@@ -1019,7 +1022,7 @@ static int __init init_ch_module(void)
fail2:
unregister_chrdev(SCSI_CHANGER_MAJOR, "ch");
fail1:
- class_destroy(ch_sysfs_class);
+ class_unregister(&ch_sysfs_class);
return rc;
}
@@ -1027,7 +1030,7 @@ static void __exit exit_ch_module(void)
{
scsi_unregister_driver(&ch_template.gendrv);
unregister_chrdev(SCSI_CHANGER_MAJOR, "ch");
- class_destroy(ch_sysfs_class);
+ class_unregister(&ch_sysfs_class);
idr_destroy(&ch_index_idr);
}
diff --git a/drivers/scsi/csiostor/csio_defs.h b/drivers/scsi/csiostor/csio_defs.h
index c38017b4af982..e50e93e7fe5a1 100644
--- a/drivers/scsi/csiostor/csio_defs.h
+++ b/drivers/scsi/csiostor/csio_defs.h
@@ -73,7 +73,21 @@ csio_list_deleted(struct list_head *list)
#define csio_list_prev(elem) (((struct list_head *)(elem))->prev)
/* State machine */
-typedef void (*csio_sm_state_t)(void *, uint32_t);
+struct csio_lnode;
+
+/* State machine evets */
+enum csio_ln_ev {
+ CSIO_LNE_NONE = (uint32_t)0,
+ CSIO_LNE_LINKUP,
+ CSIO_LNE_FAB_INIT_DONE,
+ CSIO_LNE_LINK_DOWN,
+ CSIO_LNE_DOWN_LINK,
+ CSIO_LNE_LOGO,
+ CSIO_LNE_CLOSE,
+ CSIO_LNE_MAX_EVENT,
+};
+
+typedef void (*csio_sm_state_t)(struct csio_lnode *ln, enum csio_ln_ev evt);
struct csio_sm {
struct list_head sm_list;
@@ -83,7 +97,7 @@ struct csio_sm {
static inline void
csio_set_state(void *smp, void *state)
{
- ((struct csio_sm *)smp)->sm_state = (csio_sm_state_t)state;
+ ((struct csio_sm *)smp)->sm_state = state;
}
static inline void
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index d5ac938970232..5b3ffefae476d 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -1095,7 +1095,7 @@ csio_handle_link_down(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
int
csio_is_lnode_ready(struct csio_lnode *ln)
{
- return (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready));
+ return (csio_get_state(ln) == csio_lns_ready);
}
/*****************************************************************************/
@@ -1366,15 +1366,15 @@ csio_free_fcfinfo(struct kref *kref)
void
csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str)
{
- if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_uninit)) {
+ if (csio_get_state(ln) == csio_lns_uninit) {
strcpy(str, "UNINIT");
return;
}
- if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)) {
+ if (csio_get_state(ln) == csio_lns_ready) {
strcpy(str, "READY");
return;
}
- if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_offline)) {
+ if (csio_get_state(ln) == csio_lns_offline) {
strcpy(str, "OFFLINE");
return;
}
diff --git a/drivers/scsi/csiostor/csio_lnode.h b/drivers/scsi/csiostor/csio_lnode.h
index 372a67d122d38..607698a0f0631 100644
--- a/drivers/scsi/csiostor/csio_lnode.h
+++ b/drivers/scsi/csiostor/csio_lnode.h
@@ -53,19 +53,6 @@
extern int csio_fcoe_rnodes;
extern int csio_fdmi_enable;
-/* State machine evets */
-enum csio_ln_ev {
- CSIO_LNE_NONE = (uint32_t)0,
- CSIO_LNE_LINKUP,
- CSIO_LNE_FAB_INIT_DONE,
- CSIO_LNE_LINK_DOWN,
- CSIO_LNE_DOWN_LINK,
- CSIO_LNE_LOGO,
- CSIO_LNE_CLOSE,
- CSIO_LNE_MAX_EVENT,
-};
-
-
struct csio_fcf_info {
struct list_head list;
uint8_t priority;
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index debd369741197..e8382cc5cf23c 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -28,7 +28,12 @@ MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
MODULE_LICENSE("GPL");
-static struct class *cxlflash_class;
+static char *cxlflash_devnode(const struct device *dev, umode_t *mode);
+static const struct class cxlflash_class = {
+ .name = "cxlflash",
+ .devnode = cxlflash_devnode,
+};
+
static u32 cxlflash_major;
static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
@@ -3602,7 +3607,7 @@ static int init_chrdev(struct cxlflash_cfg *cfg)
goto err1;
}
- char_dev = device_create(cxlflash_class, NULL, devno,
+ char_dev = device_create(&cxlflash_class, NULL, devno,
NULL, "cxlflash%d", minor);
if (IS_ERR(char_dev)) {
rc = PTR_ERR(char_dev);
@@ -3880,14 +3885,12 @@ static int cxlflash_class_init(void)
cxlflash_major = MAJOR(devno);
- cxlflash_class = class_create("cxlflash");
- if (IS_ERR(cxlflash_class)) {
- rc = PTR_ERR(cxlflash_class);
+ rc = class_register(&cxlflash_class);
+ if (rc) {
pr_err("%s: class_create failed rc=%d\n", __func__, rc);
goto err;
}
- cxlflash_class->devnode = cxlflash_devnode;
out:
pr_debug("%s: returning rc=%d\n", __func__, rc);
return rc;
@@ -3903,7 +3906,7 @@ static void cxlflash_class_exit(void)
{
dev_t devno = MKDEV(cxlflash_major, 0);
- class_destroy(cxlflash_class);
+ class_unregister(&cxlflash_class);
unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
}
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 944ea4e0cc455..b6eaf49dfb004 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -46,9 +46,6 @@ static int tur_done(struct scsi_device *sdev, struct hp_sw_dh_data *h,
int ret = SCSI_DH_IO;
switch (sshdr->sense_key) {
- case UNIT_ATTENTION:
- ret = SCSI_DH_IMM_RETRY;
- break;
case NOT_READY:
if (sshdr->asc == 0x04 && sshdr->ascq == 2) {
/*
@@ -85,11 +82,24 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
int ret, res;
blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV |
REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER;
+ struct scsi_failure failure_defs[] = {
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = SCMD_FAILURE_ASC_ANY,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .allowed = SCMD_FAILURE_NO_LIMIT,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
.sshdr = &sshdr,
+ .failures = &failures,
};
-retry:
res = scsi_execute_cmd(sdev, cmd, opf, NULL, 0, HP_SW_TIMEOUT,
HP_SW_RETRIES, &exec_args);
if (res > 0 && scsi_sense_valid(&sshdr)) {
@@ -104,9 +114,6 @@ retry:
ret = SCSI_DH_IO;
}
- if (ret == SCSI_DH_IMM_RETRY)
- goto retry;
-
return ret;
}
@@ -122,14 +129,31 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h)
struct scsi_sense_hdr sshdr;
struct scsi_device *sdev = h->sdev;
int res, rc;
- int retry_cnt = HP_SW_RETRIES;
blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV |
REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER;
+ struct scsi_failure failure_defs[] = {
+ {
+ /*
+ * LUN not ready - manual intervention required
+ *
+ * Switch-over in progress, retry.
+ */
+ .sense = NOT_READY,
+ .asc = 0x04,
+ .ascq = 0x03,
+ .allowed = HP_SW_RETRIES,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
.sshdr = &sshdr,
+ .failures = &failures,
};
-retry:
res = scsi_execute_cmd(sdev, cmd, opf, NULL, 0, HP_SW_TIMEOUT,
HP_SW_RETRIES, &exec_args);
if (!res) {
@@ -144,13 +168,6 @@ retry:
switch (sshdr.sense_key) {
case NOT_READY:
if (sshdr.asc == 0x04 && sshdr.ascq == 3) {
- /*
- * LUN not ready - manual intervention required
- *
- * Switch-over in progress, retry.
- */
- if (--retry_cnt)
- goto retry;
rc = SCSI_DH_RETRY;
break;
}
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 1ac2ae17e8be3..f8a09e3eba582 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -485,43 +485,17 @@ static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
static int mode_select_handle_sense(struct scsi_device *sdev,
struct scsi_sense_hdr *sense_hdr)
{
- int err = SCSI_DH_IO;
struct rdac_dh_data *h = sdev->handler_data;
if (!scsi_sense_valid(sense_hdr))
- goto done;
-
- switch (sense_hdr->sense_key) {
- case NO_SENSE:
- case ABORTED_COMMAND:
- case UNIT_ATTENTION:
- err = SCSI_DH_RETRY;
- break;
- case NOT_READY:
- if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
- /* LUN Not Ready and is in the Process of Becoming
- * Ready
- */
- err = SCSI_DH_RETRY;
- break;
- case ILLEGAL_REQUEST:
- if (sense_hdr->asc == 0x91 && sense_hdr->ascq == 0x36)
- /*
- * Command Lock contention
- */
- err = SCSI_DH_IMM_RETRY;
- break;
- default:
- break;
- }
+ return SCSI_DH_IO;
RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
"MODE_SELECT returned with sense %02x/%02x/%02x",
(char *) h->ctlr->array_name, h->ctlr->index,
sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq);
-done:
- return err;
+ return SCSI_DH_IO;
}
static void send_mode_select(struct work_struct *work)
@@ -530,7 +504,7 @@ static void send_mode_select(struct work_struct *work)
container_of(work, struct rdac_controller, ms_work);
struct scsi_device *sdev = ctlr->ms_sdev;
struct rdac_dh_data *h = sdev->handler_data;
- int rc, err, retry_cnt = RDAC_RETRY_COUNT;
+ int rc, err;
struct rdac_queue_data *tmp, *qdata;
LIST_HEAD(list);
unsigned char cdb[MAX_COMMAND_SIZE];
@@ -538,8 +512,49 @@ static void send_mode_select(struct work_struct *work)
unsigned int data_size;
blk_opf_t opf = REQ_OP_DRV_OUT | REQ_FAILFAST_DEV |
REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER;
+ struct scsi_failure failure_defs[] = {
+ {
+ .sense = NO_SENSE,
+ .asc = SCMD_FAILURE_ASC_ANY,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .sense = ABORTED_COMMAND,
+ .asc = SCMD_FAILURE_ASC_ANY,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = SCMD_FAILURE_ASC_ANY,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ /* LUN Not Ready and is in the Process of Becoming Ready */
+ {
+ .sense = NOT_READY,
+ .asc = 0x04,
+ .ascq = 0x01,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ /* Command Lock contention */
+ {
+ .sense = ILLEGAL_REQUEST,
+ .asc = 0x91,
+ .ascq = 0x36,
+ .allowed = SCMD_FAILURE_NO_LIMIT,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .total_allowed = RDAC_RETRY_COUNT,
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
.sshdr = &sshdr,
+ .failures = &failures,
};
spin_lock(&ctlr->ms_lock);
@@ -548,15 +563,12 @@ static void send_mode_select(struct work_struct *work)
ctlr->ms_sdev = NULL;
spin_unlock(&ctlr->ms_lock);
- retry:
memset(cdb, 0, sizeof(cdb));
data_size = rdac_failover_get(ctlr, &list, cdb);
- RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
- "%s MODE_SELECT command",
- (char *) h->ctlr->array_name, h->ctlr->index,
- (retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
+ RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, queueing MODE_SELECT command",
+ (char *)h->ctlr->array_name, h->ctlr->index);
rc = scsi_execute_cmd(sdev, cdb, opf, &h->ctlr->mode_select, data_size,
RDAC_TIMEOUT * HZ, RDAC_RETRIES, &exec_args);
@@ -570,10 +582,6 @@ static void send_mode_select(struct work_struct *work)
err = SCSI_DH_IO;
} else {
err = mode_select_handle_sense(sdev, &sshdr);
- if (err == SCSI_DH_RETRY && retry_cnt--)
- goto retry;
- if (err == SCSI_DH_IMM_RETRY)
- goto retry;
}
list_for_each_entry_safe(qdata, tmp, &list, entry) {
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 97816a0e6240a..0175d2282b458 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -2753,7 +2753,7 @@ static void __exit esp_exit(void)
}
MODULE_DESCRIPTION("ESP SCSI driver core");
-MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index c64a085a7ee2f..453665ac6020b 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -597,7 +597,7 @@ static const struct attribute_group *fcoe_fcf_attr_groups[] = {
NULL,
};
-static struct bus_type fcoe_bus_type;
+static const struct bus_type fcoe_bus_type;
static int fcoe_bus_match(struct device *dev,
struct device_driver *drv)
@@ -664,7 +664,7 @@ static struct attribute *fcoe_bus_attrs[] = {
};
ATTRIBUTE_GROUPS(fcoe_bus);
-static struct bus_type fcoe_bus_type = {
+static const struct bus_type fcoe_bus_type = {
.name = "fcoe",
.match = &fcoe_bus_match,
.bus_groups = fcoe_bus_groups,
diff --git a/drivers/scsi/fnic/fnic_attrs.c b/drivers/scsi/fnic/fnic_attrs.c
index a61e0c5e65066..0c5e57c7e3229 100644
--- a/drivers/scsi/fnic/fnic_attrs.c
+++ b/drivers/scsi/fnic/fnic_attrs.c
@@ -14,13 +14,13 @@ static ssize_t fnic_show_state(struct device *dev,
struct fc_lport *lp = shost_priv(class_to_shost(dev));
struct fnic *fnic = lport_priv(lp);
- return snprintf(buf, PAGE_SIZE, "%s\n", fnic_state_str[fnic->state]);
+ return sysfs_emit(buf, "%s\n", fnic_state_str[fnic->state]);
}
static ssize_t fnic_show_drv_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
+ return sysfs_emit(buf, "%s\n", DRV_VERSION);
}
static ssize_t fnic_show_link_state(struct device *dev,
@@ -28,8 +28,7 @@ static ssize_t fnic_show_link_state(struct device *dev,
{
struct fc_lport *lp = shost_priv(class_to_shost(dev));
- return snprintf(buf, PAGE_SIZE, "%s\n", (lp->link_up)
- ? "Link Up" : "Link Down");
+ return sysfs_emit(buf, "%s\n", (lp->link_up) ? "Link Up" : "Link Down");
}
static DEVICE_ATTR(fnic_state, S_IRUGO, fnic_show_state, NULL);
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index fc4cee91b175c..2ba61dba4569b 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -1961,8 +1961,8 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
- "Issuing host reset due to out of order IO\n");
+ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "Issuing host reset due to out of order IO\n");
ret = FAILED;
goto fnic_abort_cmd_end;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index bbb7b2d9ffcfb..35f8e00850d6c 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -1507,7 +1507,12 @@ void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
scsi_block_requests(shost);
hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
- del_timer_sync(&hisi_hba->timer);
+ /*
+ * hisi_hba->timer is only used for v1/v2 hw, and check hw->sht
+ * which is also only used for v1/v2 hw to skip it for v3 hw
+ */
+ if (hisi_hba->hw->sht)
+ del_timer_sync(&hisi_hba->timer);
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
}
@@ -1573,7 +1578,7 @@ static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba)
return -EPERM;
}
- if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
+ if (hisi_sas_debugfs_enable)
hisi_hba->hw->debugfs_snapshot_regs(hisi_hba);
return 0;
@@ -1792,7 +1797,7 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
if (dev_is_sata(device)) {
struct ata_link *link = &device->sata_dev.ap->link;
- rc = ata_wait_after_reset(link, HISI_SAS_WAIT_PHYUP_TIMEOUT,
+ rc = ata_wait_after_reset(link, jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT,
smp_ata_check_ready_type);
} else {
msleep(2000);
@@ -1961,10 +1966,18 @@ static bool hisi_sas_internal_abort_timeout(struct sas_task *task,
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct hisi_sas_internal_abort_data *timeout = data;
- if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) {
- down(&hisi_hba->sem);
+ if (hisi_sas_debugfs_enable) {
+ /*
+ * If timeout occurs in device gone scenario, to avoid
+ * circular dependency like:
+ * hisi_sas_dev_gone() -> down() -> ... ->
+ * hisi_sas_internal_abort_timeout() -> down().
+ */
+ if (!timeout->rst_ha_timeout)
+ down(&hisi_hba->sem);
hisi_hba->hw->debugfs_snapshot_regs(hisi_hba);
- up(&hisi_hba->sem);
+ if (!timeout->rst_ha_timeout)
+ up(&hisi_hba->sem);
}
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
@@ -2617,7 +2630,8 @@ static __exit void hisi_sas_exit(void)
{
sas_release_transport(hisi_sas_stt);
- debugfs_remove(hisi_sas_debugfs_dir);
+ if (hisi_sas_debugfs_enable)
+ debugfs_remove(hisi_sas_debugfs_dir);
}
module_init(hisi_sas_init);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index b56fbc61a15ae..34f96cc35342b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -2244,7 +2244,15 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
if ((dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) &&
(sipc_rx_err_type & RX_FIS_STATUS_ERR_MSK)) {
- ts->stat = SAS_PROTO_RESPONSE;
+ if (task->ata_task.use_ncq) {
+ struct domain_device *device = task->dev;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+
+ sas_dev->dev_status = HISI_SAS_DEV_NCQ_ERR;
+ slot->abort = 1;
+ } else {
+ ts->stat = SAS_PROTO_RESPONSE;
+ }
} else if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
ts->residual = trans_tx_fail_type;
ts->stat = SAS_DATA_UNDERRUN;
@@ -4902,7 +4910,8 @@ err_out_unregister_ha:
err_out_remove_host:
scsi_remove_host(shost);
err_out_undo_debugfs:
- debugfs_exit_v3_hw(hisi_hba);
+ if (hisi_sas_debugfs_enable)
+ debugfs_exit_v3_hw(hisi_hba);
err_out_free_host:
hisi_sas_free(hisi_hba);
scsi_host_put(shost);
@@ -4934,7 +4943,6 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
struct Scsi_Host *shost = sha->shost;
pm_runtime_get_noresume(dev);
- del_timer_sync(&hisi_hba->timer);
sas_unregister_ha(sha);
flush_workqueue(hisi_hba->wq);
@@ -4942,7 +4950,9 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
hisi_sas_v3_destroy_irqs(pdev, hisi_hba);
hisi_sas_free(hisi_hba);
- debugfs_exit_v3_hw(hisi_hba);
+ if (hisi_sas_debugfs_enable)
+ debugfs_exit_v3_hw(hisi_hba);
+
scsi_host_put(shost);
}
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index d7f51b84f3c78..2d92549e52431 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -353,12 +353,13 @@ static void scsi_host_dev_release(struct device *dev)
if (shost->shost_state == SHOST_CREATED) {
/*
- * Free the shost_dev device name here if scsi_host_alloc()
- * and scsi_host_put() have been called but neither
+ * Free the shost_dev device name and remove the proc host dir
+ * here if scsi_host_{alloc,put}() have been called but neither
* scsi_host_add() nor scsi_remove_host() has been called.
* This avoids that the memory allocated for the shost_dev
- * name is leaked.
+ * name as well as the proc dir structure are leaked.
*/
+ scsi_proc_hostdir_rm(shost->hostt);
kfree(dev_name(&shost->shost_dev));
}
@@ -371,7 +372,7 @@ static void scsi_host_dev_release(struct device *dev)
kfree(shost);
}
-static struct device_type scsi_host_type = {
+static const struct device_type scsi_host_type = {
.name = "scsi_host",
.release = scsi_host_dev_release,
};
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 46d0b3a0e12fb..05b126bfd18b5 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -3482,8 +3482,7 @@ static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- vhost->login_buf->resp.partition_name);
+ return sysfs_emit(buf, "%s\n", vhost->login_buf->resp.partition_name);
}
static ssize_t ibmvfc_show_host_device_name(struct device *dev,
@@ -3492,8 +3491,7 @@ static ssize_t ibmvfc_show_host_device_name(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- vhost->login_buf->resp.device_name);
+ return sysfs_emit(buf, "%s\n", vhost->login_buf->resp.device_name);
}
static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
@@ -3502,8 +3500,7 @@ static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- vhost->login_buf->resp.port_loc_code);
+ return sysfs_emit(buf, "%s\n", vhost->login_buf->resp.port_loc_code);
}
static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
@@ -3512,8 +3509,7 @@ static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- vhost->login_buf->resp.drc_name);
+ return sysfs_emit(buf, "%s\n", vhost->login_buf->resp.drc_name);
}
static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
@@ -3521,7 +3517,8 @@ static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
- return snprintf(buf, PAGE_SIZE, "%d\n", be32_to_cpu(vhost->login_buf->resp.version));
+ return sysfs_emit(buf, "%d\n",
+ be32_to_cpu(vhost->login_buf->resp.version));
}
static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
@@ -3529,7 +3526,8 @@ static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
- return snprintf(buf, PAGE_SIZE, "%llx\n", be64_to_cpu(vhost->login_buf->resp.capabilities));
+ return sysfs_emit(buf, "%llx\n",
+ be64_to_cpu(vhost->login_buf->resp.capabilities));
}
/**
@@ -3550,7 +3548,7 @@ static ssize_t ibmvfc_show_log_level(struct device *dev,
int len;
spin_lock_irqsave(shost->host_lock, flags);
- len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level);
+ len = sysfs_emit(buf, "%d\n", vhost->log_level);
spin_unlock_irqrestore(shost->host_lock, flags);
return len;
}
@@ -3589,7 +3587,7 @@ static ssize_t ibmvfc_show_scsi_channels(struct device *dev,
int len;
spin_lock_irqsave(shost->host_lock, flags);
- len = snprintf(buf, PAGE_SIZE, "%d\n", scsi->desired_queues);
+ len = sysfs_emit(buf, "%d\n", scsi->desired_queues);
spin_unlock_irqrestore(shost->host_lock, flags);
return len;
}
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 4dc411a581070..68b99924ee4ff 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -1551,18 +1551,18 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
if (vscsi->client_data.partition_number == 0)
vscsi->client_data.partition_number =
be32_to_cpu(info->partition_number);
- strncpy(vscsi->client_data.srp_version, info->srp_version,
+ strscpy(vscsi->client_data.srp_version, info->srp_version,
sizeof(vscsi->client_data.srp_version));
- strncpy(vscsi->client_data.partition_name, info->partition_name,
+ strscpy(vscsi->client_data.partition_name, info->partition_name,
sizeof(vscsi->client_data.partition_name));
vscsi->client_data.mad_version = be32_to_cpu(info->mad_version);
vscsi->client_data.os_type = be32_to_cpu(info->os_type);
/* Copy our info */
- strncpy(info->srp_version, SRP_VERSION,
- sizeof(info->srp_version));
- strncpy(info->partition_name, vscsi->dds.partition_name,
- sizeof(info->partition_name));
+ strscpy_pad(info->srp_version, SRP_VERSION,
+ sizeof(info->srp_version));
+ strscpy_pad(info->partition_name, vscsi->dds.partition_name,
+ sizeof(info->partition_name));
info->partition_number = cpu_to_be32(vscsi->dds.partition_num);
info->mad_version = cpu_to_be32(MAD_VERSION_1);
info->os_type = cpu_to_be32(LINUX);
@@ -1645,8 +1645,8 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
be64_to_cpu(mad->buffer),
vscsi->dds.window[LOCAL].liobn, token);
if (rc == H_SUCCESS) {
- strncpy(cap->name, dev_name(&vscsi->dma_dev->dev),
- SRP_MAX_LOC_LEN);
+ strscpy_pad(cap->name, dev_name(&vscsi->dma_dev->dev),
+ sizeof(cap->name));
len = olen - min_len;
status = VIOSRP_MAD_SUCCESS;
@@ -3616,13 +3616,13 @@ static void ibmvscsis_remove(struct vio_dev *vdev)
static ssize_t system_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%s\n", system_id);
+ return sysfs_emit(buf, "%s\n", system_id);
}
static ssize_t partition_number_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%x\n", partition_number);
+ return sysfs_emit(buf, "%x\n", partition_number);
}
static ssize_t unit_address_show(struct device *dev,
@@ -3630,7 +3630,7 @@ static ssize_t unit_address_show(struct device *dev,
{
struct scsi_info *vscsi = container_of(dev, struct scsi_info, dev);
- return snprintf(buf, PAGE_SIZE, "%x\n", vscsi->dma_dev->unit_address);
+ return sysfs_emit(buf, "%x\n", vscsi->dma_dev->unit_address);
}
static int ibmvscsis_get_system_info(void)
@@ -3650,7 +3650,7 @@ static int ibmvscsis_get_system_info(void)
name = of_get_property(rootdn, "ibm,partition-name", NULL);
if (name)
- strncpy(partition_name, name, sizeof(partition_name));
+ strscpy(partition_name, name, sizeof(partition_name));
num = of_get_property(rootdn, "ibm,partition-no", NULL);
if (num)
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 6277162a028bb..c582a3932ceab 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -137,7 +137,7 @@ static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, c
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha);
- return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id);
+ return sysfs_emit(buf, "%d\n", ihost->id);
}
static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 494a671fb5564..fb04b0b515ab1 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -204,6 +204,6 @@ static struct platform_driver esp_jazz_driver = {
module_platform_driver(esp_jazz_driver);
MODULE_DESCRIPTION("JAZZ ESP SCSI driver");
-MODULE_AUTHOR("Thomas Bogendoerfer (tsbogend@alpha.franken.de)");
+MODULE_AUTHOR("Thomas Bogendoerfer <tsbogend@alpha.franken.de>");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/scsi/libfc/fc_encode.h b/drivers/scsi/libfc/fc_encode.h
index 7dcac3b6baa7e..6b7e4ca6b7b5e 100644
--- a/drivers/scsi/libfc/fc_encode.h
+++ b/drivers/scsi/libfc/fc_encode.h
@@ -136,22 +136,24 @@ static inline int fc_ct_ns_fill(struct fc_lport *lport,
break;
case FC_NS_RSPN_ID:
- len = strnlen(fc_host_symbolic_name(lport->host), 255);
+ len = strnlen(fc_host_symbolic_name(lport->host),
+ FC_SYMBOLIC_NAME_SIZE);
ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rspn) + len,
FC_FST_DIR, FC_NS_SUBTYPE);
hton24(ct->payload.spn.fr_fid.fp_fid, lport->port_id);
- strncpy(ct->payload.spn.fr_name,
- fc_host_symbolic_name(lport->host), len);
+ memcpy(ct->payload.spn.fr_name,
+ fc_host_symbolic_name(lport->host), len);
ct->payload.spn.fr_name_len = len;
break;
case FC_NS_RSNN_NN:
- len = strnlen(fc_host_symbolic_name(lport->host), 255);
+ len = strnlen(fc_host_symbolic_name(lport->host),
+ FC_SYMBOLIC_NAME_SIZE);
ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rsnn) + len,
FC_FST_DIR, FC_NS_SUBTYPE);
put_unaligned_be64(lport->wwnn, &ct->payload.snn.fr_wwn);
- strncpy(ct->payload.snn.fr_name,
- fc_host_symbolic_name(lport->host), len);
+ memcpy(ct->payload.snn.fr_name,
+ fc_host_symbolic_name(lport->host), len);
ct->payload.snn.fr_name_len = len;
break;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index a2204674b6808..f6e6db8b8aba9 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -135,7 +135,7 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
static inline void *alloc_smp_req(int size)
{
- u8 *p = kzalloc(size, GFP_KERNEL);
+ u8 *p = kzalloc(ALIGN(size, ARCH_DMA_MINALIGN), GFP_KERNEL);
if (p)
p[0] = SMP_REQUEST;
return p;
@@ -1621,6 +1621,16 @@ out_err:
/* ---------- Domain revalidation ---------- */
+static void sas_get_sas_addr_and_dev_type(struct smp_disc_resp *disc_resp,
+ u8 *sas_addr,
+ enum sas_device_type *type)
+{
+ memcpy(sas_addr, disc_resp->disc.attached_sas_addr, SAS_ADDR_SIZE);
+ *type = to_dev_type(&disc_resp->disc);
+ if (*type == SAS_PHY_UNUSED)
+ memset(sas_addr, 0, SAS_ADDR_SIZE);
+}
+
static int sas_get_phy_discover(struct domain_device *dev,
int phy_id, struct smp_disc_resp *disc_resp)
{
@@ -1674,13 +1684,8 @@ int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
return -ENOMEM;
res = sas_get_phy_discover(dev, phy_id, disc_resp);
- if (res == 0) {
- memcpy(sas_addr, disc_resp->disc.attached_sas_addr,
- SAS_ADDR_SIZE);
- *type = to_dev_type(&disc_resp->disc);
- if (*type == 0)
- memset(sas_addr, 0, SAS_ADDR_SIZE);
- }
+ if (res == 0)
+ sas_get_sas_addr_and_dev_type(disc_resp, sas_addr, type);
kfree(disc_resp);
return res;
}
@@ -1940,6 +1945,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id,
struct expander_device *ex = &dev->ex_dev;
struct ex_phy *phy = &ex->ex_phy[phy_id];
enum sas_device_type type = SAS_PHY_UNUSED;
+ struct smp_disc_resp *disc_resp;
u8 sas_addr[SAS_ADDR_SIZE];
char msg[80] = "";
int res;
@@ -1951,33 +1957,41 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id,
SAS_ADDR(dev->sas_addr), phy_id, msg);
memset(sas_addr, 0, SAS_ADDR_SIZE);
- res = sas_get_phy_attached_dev(dev, phy_id, sas_addr, &type);
+ disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
+ if (!disc_resp)
+ return -ENOMEM;
+
+ res = sas_get_phy_discover(dev, phy_id, disc_resp);
switch (res) {
case SMP_RESP_NO_PHY:
phy->phy_state = PHY_NOT_PRESENT;
sas_unregister_devs_sas_addr(dev, phy_id, last);
- return res;
+ goto out_free_resp;
case SMP_RESP_PHY_VACANT:
phy->phy_state = PHY_VACANT;
sas_unregister_devs_sas_addr(dev, phy_id, last);
- return res;
+ goto out_free_resp;
case SMP_RESP_FUNC_ACC:
break;
case -ECOMM:
break;
default:
- return res;
+ goto out_free_resp;
}
+ if (res == 0)
+ sas_get_sas_addr_and_dev_type(disc_resp, sas_addr, &type);
+
if ((SAS_ADDR(sas_addr) == 0) || (res == -ECOMM)) {
phy->phy_state = PHY_EMPTY;
sas_unregister_devs_sas_addr(dev, phy_id, last);
/*
- * Even though the PHY is empty, for convenience we discover
- * the PHY to update the PHY info, like negotiated linkrate.
+ * Even though the PHY is empty, for convenience we update
+ * the PHY info, like negotiated linkrate.
*/
- sas_ex_phy_discover(dev, phy_id);
- return res;
+ if (res == 0)
+ sas_set_ex_phy(dev, phy_id, disc_resp);
+ goto out_free_resp;
} else if (SAS_ADDR(sas_addr) == SAS_ADDR(phy->attached_sas_addr) &&
dev_type_flutter(type, phy->attached_dev_type)) {
struct domain_device *ata_dev = sas_ex_to_ata(dev, phy_id);
@@ -1989,7 +2003,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id,
action = ", needs recovery";
pr_debug("ex %016llx phy%02d broadcast flutter%s\n",
SAS_ADDR(dev->sas_addr), phy_id, action);
- return res;
+ goto out_free_resp;
}
/* we always have to delete the old device when we went here */
@@ -1998,7 +2012,10 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id,
SAS_ADDR(phy->attached_sas_addr));
sas_unregister_devs_sas_addr(dev, phy_id, last);
- return sas_discover_new(dev, phy_id);
+ res = sas_discover_new(dev, phy_id);
+out_free_resp:
+ kfree(disc_resp);
+ return res;
}
/**
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 04d608ea91060..98ca7df003efb 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -535,6 +535,44 @@ struct lpfc_cgn_acqe_stat {
atomic64_t warn;
};
+enum lpfc_fc_flag {
+ /* Several of these flags are HBA centric and should be moved to
+ * phba->link_flag (e.g. FC_PTP, FC_PUBLIC_LOOP)
+ */
+ FC_PT2PT, /* pt2pt with no fabric */
+ FC_PT2PT_PLOGI, /* pt2pt initiate PLOGI */
+ FC_DISC_TMO, /* Discovery timer running */
+ FC_PUBLIC_LOOP, /* Public loop */
+ FC_LBIT, /* LOGIN bit in loopinit set */
+ FC_RSCN_MODE, /* RSCN cmd rcv'ed */
+ FC_NLP_MORE, /* More node to process in node tbl */
+ FC_OFFLINE_MODE, /* Interface is offline for diag */
+ FC_FABRIC, /* We are fabric attached */
+ FC_VPORT_LOGO_RCVD, /* LOGO received on vport */
+ FC_RSCN_DISCOVERY, /* Auth all devices after RSCN */
+ FC_LOGO_RCVD_DID_CHNG, /* FDISC on phys port detect DID chng */
+ FC_PT2PT_NO_NVME, /* Don't send NVME PRLI */
+ FC_SCSI_SCAN_TMO, /* scsi scan timer running */
+ FC_ABORT_DISCOVERY, /* we want to abort discovery */
+ FC_NDISC_ACTIVE, /* NPort discovery active */
+ FC_BYPASSED_MODE, /* NPort is in bypassed mode */
+ FC_VPORT_NEEDS_REG_VPI, /* Needs to have its vpi registered */
+ FC_RSCN_DEFERRED, /* A deferred RSCN being processed */
+ FC_VPORT_NEEDS_INIT_VPI, /* Need to INIT_VPI before FDISC */
+ FC_VPORT_CVL_RCVD, /* VLink failed due to CVL */
+ FC_VFI_REGISTERED, /* VFI is registered */
+ FC_FDISC_COMPLETED, /* FDISC completed */
+ FC_DISC_DELAYED, /* Delay NPort discovery */
+};
+
+enum lpfc_load_flag {
+ FC_LOADING, /* HBA in process of loading drvr */
+ FC_UNLOADING, /* HBA in process of unloading drvr */
+ FC_ALLOW_FDMI, /* port is ready for FDMI requests */
+ FC_ALLOW_VMID, /* Allow VMID I/Os */
+ FC_DEREGISTER_ALL_APP_ID /* Deregister all VMIDs */
+};
+
struct lpfc_vport {
struct lpfc_hba *phba;
struct list_head listentry;
@@ -549,34 +587,7 @@ struct lpfc_vport {
uint8_t vpi_state;
#define LPFC_VPI_REGISTERED 0x1
- uint32_t fc_flag; /* FC flags */
-/* Several of these flags are HBA centric and should be moved to
- * phba->link_flag (e.g. FC_PTP, FC_PUBLIC_LOOP)
- */
-#define FC_PT2PT 0x1 /* pt2pt with no fabric */
-#define FC_PT2PT_PLOGI 0x2 /* pt2pt initiate PLOGI */
-#define FC_DISC_TMO 0x4 /* Discovery timer running */
-#define FC_PUBLIC_LOOP 0x8 /* Public loop */
-#define FC_LBIT 0x10 /* LOGIN bit in loopinit set */
-#define FC_RSCN_MODE 0x20 /* RSCN cmd rcv'ed */
-#define FC_NLP_MORE 0x40 /* More node to process in node tbl */
-#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */
-#define FC_FABRIC 0x100 /* We are fabric attached */
-#define FC_VPORT_LOGO_RCVD 0x200 /* LOGO received on vport */
-#define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */
-#define FC_LOGO_RCVD_DID_CHNG 0x800 /* FDISC on phys port detect DID chng*/
-#define FC_PT2PT_NO_NVME 0x1000 /* Don't send NVME PRLI */
-#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
-#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
-#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
-#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */
-#define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */
-#define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */
-#define FC_VPORT_NEEDS_INIT_VPI 0x200000 /* Need to INIT_VPI before FDISC */
-#define FC_VPORT_CVL_RCVD 0x400000 /* VLink failed due to CVL */
-#define FC_VFI_REGISTERED 0x800000 /* VFI is registered */
-#define FC_FDISC_COMPLETED 0x1000000/* FDISC completed */
-#define FC_DISC_DELAYED 0x2000000/* Delay NPort discovery */
+ unsigned long fc_flag; /* FC flags */
uint32_t ct_flags;
#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */
@@ -587,16 +598,18 @@ struct lpfc_vport {
#define FC_CT_RPRT_DEFER 0x20 /* Defer issuing FDMI RPRT */
struct list_head fc_nodes;
+ spinlock_t fc_nodes_list_lock; /* spinlock for fc_nodes list */
/* Keep counters for the number of entries in each list. */
- uint16_t fc_plogi_cnt;
- uint16_t fc_adisc_cnt;
- uint16_t fc_reglogin_cnt;
- uint16_t fc_prli_cnt;
- uint16_t fc_unmap_cnt;
- uint16_t fc_map_cnt;
- uint16_t fc_npr_cnt;
- uint16_t fc_unused_cnt;
+ atomic_t fc_plogi_cnt;
+ atomic_t fc_adisc_cnt;
+ atomic_t fc_reglogin_cnt;
+ atomic_t fc_prli_cnt;
+ atomic_t fc_unmap_cnt;
+ atomic_t fc_map_cnt;
+ atomic_t fc_npr_cnt;
+ atomic_t fc_unused_cnt;
+
struct serv_parm fc_sparam; /* buffer for our service parameters */
uint32_t fc_myDID; /* fibre channel S_ID */
@@ -642,12 +655,7 @@ struct lpfc_vport {
struct timer_list els_tmofunc;
struct timer_list delayed_disc_tmo;
- uint8_t load_flag;
-#define FC_LOADING 0x1 /* HBA in process of loading drvr */
-#define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */
-#define FC_ALLOW_FDMI 0x4 /* port is ready for FDMI requests */
-#define FC_ALLOW_VMID 0x8 /* Allow VMID I/Os */
-#define FC_DEREGISTER_ALL_APP_ID 0x10 /* Deregister all VMIDs */
+ unsigned long load_flag;
/* Vport Config Parameters */
uint32_t cfg_scan_down;
uint32_t cfg_lun_queue_depth;
@@ -1325,7 +1333,6 @@ struct lpfc_hba {
struct timer_list fabric_block_timer;
unsigned long bit_flags;
atomic_t num_rsrc_err;
- atomic_t num_cmd_success;
unsigned long last_rsrc_error_time;
unsigned long last_ramp_down_time;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -1430,6 +1437,7 @@ struct lpfc_hba {
struct timer_list inactive_vmid_poll;
/* RAS Support */
+ spinlock_t ras_fwlog_lock; /* do not take while holding another lock */
struct lpfc_ras_fwlog ras_fwlog;
uint32_t iocb_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index d3a5d6ecdf7d2..3c534b3cfe918 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -344,6 +344,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
struct lpfc_fc4_ctrl_stat *cstat;
uint64_t data1, data2, data3;
uint64_t totin, totout, tot;
+ unsigned long iflags;
char *statep;
int i;
int len = 0;
@@ -543,7 +544,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
goto buffer_done;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
nrport = NULL;
@@ -617,7 +618,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
goto unlock_buf_done;
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
if (!lport)
goto buffer_done;
@@ -681,7 +682,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
goto buffer_done;
unlock_buf_done:
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
buffer_done:
len = strnlen(buf, PAGE_SIZE);
@@ -1091,14 +1092,14 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
break;
}
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
- if (vport->fc_flag & FC_PUBLIC_LOOP)
+ if (test_bit(FC_PUBLIC_LOOP, &vport->fc_flag))
len += scnprintf(buf + len, PAGE_SIZE-len,
" Public Loop\n");
else
len += scnprintf(buf + len, PAGE_SIZE-len,
" Private Loop\n");
} else {
- if (vport->fc_flag & FC_FABRIC) {
+ if (test_bit(FC_FABRIC, &vport->fc_flag)) {
if (phba->sli_rev == LPFC_SLI_REV4 &&
vport->port_type == LPFC_PHYSICAL_PORT &&
phba->sli4_hba.fawwpn_flag &
@@ -1260,7 +1261,8 @@ lpfc_num_discovered_ports_show(struct device *dev,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
return scnprintf(buf, PAGE_SIZE, "%d\n",
- vport->fc_map_cnt + vport->fc_unmap_cnt);
+ atomic_read(&vport->fc_map_cnt) +
+ atomic_read(&vport->fc_unmap_cnt));
}
/**
@@ -1289,7 +1291,7 @@ lpfc_issue_lip(struct Scsi_Host *shost)
* If the link is offline, disabled or BLOCK_MGMT_IO
* it doesn't make any sense to allow issue_lip
*/
- if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+ if (test_bit(FC_OFFLINE_MODE, &vport->fc_flag) ||
(phba->hba_flag & LINK_DISABLED) ||
(phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO))
return -EPERM;
@@ -1303,8 +1305,8 @@ lpfc_issue_lip(struct Scsi_Host *shost)
pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
pmboxq->u.mb.mbxOwner = OWN_HOST;
- if ((vport->fc_flag & FC_PT2PT) && (vport->fc_flag & FC_PT2PT_NO_NVME))
- vport->fc_flag &= ~FC_PT2PT_NO_NVME;
+ if (test_bit(FC_PT2PT, &vport->fc_flag))
+ clear_bit(FC_PT2PT_NO_NVME, &vport->fc_flag);
mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
@@ -1494,7 +1496,8 @@ lpfc_reset_pci_bus(struct lpfc_hba *phba)
if (shost) {
phba_other =
((struct lpfc_vport *)shost->hostdata)->phba;
- if (!(phba_other->pport->fc_flag & FC_OFFLINE_MODE)) {
+ if (!test_bit(FC_OFFLINE_MODE,
+ &phba_other->pport->fc_flag)) {
lpfc_printf_log(phba_other, KERN_INFO, LOG_INIT,
"8349 WWPN = 0x%02x%02x%02x%02x"
"%02x%02x%02x%02x is not "
@@ -1549,7 +1552,7 @@ lpfc_selective_reset(struct lpfc_hba *phba)
if (!phba->cfg_enable_hba_reset)
return -EACCES;
- if (!(phba->pport->fc_flag & FC_OFFLINE_MODE)) {
+ if (!test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag)) {
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
if (status != 0)
@@ -1688,7 +1691,7 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
{
struct completion online_compl;
struct pci_dev *pdev = phba->pcidev;
- uint32_t before_fc_flag;
+ unsigned long before_fc_flag;
uint32_t sriov_nr_virtfn;
uint32_t reg_val;
int status = 0, rc = 0;
@@ -1759,7 +1762,7 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
}
/* keep the original port state */
- if (before_fc_flag & FC_OFFLINE_MODE) {
+ if (test_bit(FC_OFFLINE_MODE, &before_fc_flag)) {
if (phba->fw_dump_cmpl)
phba->fw_dump_cmpl = NULL;
goto out;
@@ -2097,7 +2100,7 @@ board_mode_out:
*board_mode_str = '\0';
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"3097 Failed \"%s\", status(%d), "
- "fc_flag(x%x)\n",
+ "fc_flag(x%lx)\n",
buf, status, phba->pport->fc_flag);
return status;
}
@@ -2156,7 +2159,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
pmb->mbxOwner = OWN_HOST;
pmboxq->ctx_buf = NULL;
- if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+ if (test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
rc = MBX_NOT_FINISHED;
else
rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3764,15 +3767,14 @@ lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
static void
lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost;
struct lpfc_nodelist *ndlp;
+ unsigned long iflags;
#if (IS_ENABLED(CONFIG_NVME_FC))
struct lpfc_nvme_rport *rport;
struct nvme_fc_remote_port *remoteport = NULL;
#endif
- shost = lpfc_shost_from_vport(vport);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->rport)
ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
@@ -3787,7 +3789,7 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
vport->cfg_devloss_tmo);
#endif
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
}
/**
@@ -3973,8 +3975,8 @@ lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH,
static int
lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp;
+ unsigned long iflags;
if (!lpfc_rangecheck(val, LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH))
return -EINVAL;
@@ -3982,14 +3984,13 @@ lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val)
if (val == vport->cfg_tgt_queue_depth)
return 0;
- spin_lock_irq(shost->host_lock);
vport->cfg_tgt_queue_depth = val;
/* Next loop thru nodelist and change cmd_qdepth */
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
-
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
return 0;
}
@@ -5235,8 +5236,8 @@ lpfc_vport_param_show(max_scsicmpl_time);
static int
lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp, *next_ndlp;
+ unsigned long iflags;
if (val == vport->cfg_max_scsicmpl_time)
return 0;
@@ -5244,13 +5245,13 @@ lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
return -EINVAL;
vport->cfg_max_scsicmpl_time = val;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
continue;
ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
return 0;
}
lpfc_vport_param_store(max_scsicmpl_time);
@@ -5864,9 +5865,9 @@ lpfc_ras_fwlog_buffsize_set(struct lpfc_hba *phba, uint val)
if (phba->cfg_ras_fwlog_func != PCI_FUNC(phba->pcidev->devfn))
return -EINVAL;
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->ras_fwlog_lock);
state = phba->ras_fwlog.state;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
if (state == REG_INPROGRESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6147 RAS Logging "
@@ -6200,7 +6201,7 @@ sysfs_ctlreg_write(struct file *filp, struct kobject *kobj,
if (memcmp(buf, LPFC_REG_WRITE_KEY, LPFC_REG_WRITE_KEY_SIZE))
return -EINVAL;
- if (!(vport->fc_flag & FC_OFFLINE_MODE))
+ if (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag))
return -EPERM;
spin_lock_irq(&phba->hbalock);
@@ -6429,26 +6430,22 @@ lpfc_get_host_port_type(struct Scsi_Host *shost)
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- spin_lock_irq(shost->host_lock);
-
if (vport->port_type == LPFC_NPIV_PORT) {
fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
} else if (lpfc_is_link_up(phba)) {
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
- if (vport->fc_flag & FC_PUBLIC_LOOP)
+ if (test_bit(FC_PUBLIC_LOOP, &vport->fc_flag))
fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
else
fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
} else {
- if (vport->fc_flag & FC_FABRIC)
+ if (test_bit(FC_FABRIC, &vport->fc_flag))
fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
else
fc_host_port_type(shost) = FC_PORTTYPE_PTP;
}
} else
fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
-
- spin_unlock_irq(shost->host_lock);
}
/**
@@ -6461,9 +6458,7 @@ lpfc_get_host_port_state(struct Scsi_Host *shost)
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- spin_lock_irq(shost->host_lock);
-
- if (vport->fc_flag & FC_OFFLINE_MODE)
+ if (test_bit(FC_OFFLINE_MODE, &vport->fc_flag))
fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
else {
switch (phba->link_state) {
@@ -6490,8 +6485,6 @@ lpfc_get_host_port_state(struct Scsi_Host *shost)
break;
}
}
-
- spin_unlock_irq(shost->host_lock);
}
/**
@@ -6504,8 +6497,6 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- spin_lock_irq(shost->host_lock);
-
if ((lpfc_is_link_up(phba)) && (!(phba->hba_flag & HBA_FCOE_MODE))) {
switch(phba->fc_linkspeed) {
case LPFC_LINK_SPEED_1GHZ:
@@ -6568,8 +6559,6 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
}
} else
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
-
- spin_unlock_irq(shost->host_lock);
}
/**
@@ -6583,19 +6572,15 @@ lpfc_get_host_fabric_name (struct Scsi_Host *shost)
struct lpfc_hba *phba = vport->phba;
u64 node_name;
- spin_lock_irq(shost->host_lock);
-
- if ((vport->port_state > LPFC_FLOGI) &&
- ((vport->fc_flag & FC_FABRIC) ||
- ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
- (vport->fc_flag & FC_PUBLIC_LOOP))))
+ if (vport->port_state > LPFC_FLOGI &&
+ (test_bit(FC_FABRIC, &vport->fc_flag) ||
+ (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
+ test_bit(FC_PUBLIC_LOOP, &vport->fc_flag))))
node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
else
/* fabric is local port if there is no F/FL_Port */
node_name = 0;
- spin_unlock_irq(shost->host_lock);
-
fc_host_fabric_name(shost) = node_name;
}
@@ -6646,7 +6631,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
pmboxq->ctx_buf = NULL;
pmboxq->vport = vport;
- if (vport->fc_flag & FC_OFFLINE_MODE) {
+ if (test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) {
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
if (rc != MBX_SUCCESS) {
mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -6699,7 +6684,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
pmboxq->ctx_buf = NULL;
pmboxq->vport = vport;
- if (vport->fc_flag & FC_OFFLINE_MODE) {
+ if (test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) {
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
if (rc != MBX_SUCCESS) {
mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -6786,8 +6771,8 @@ lpfc_reset_stats(struct Scsi_Host *shost)
pmboxq->ctx_buf = NULL;
pmboxq->vport = vport;
- if ((vport->fc_flag & FC_OFFLINE_MODE) ||
- (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
+ if (test_bit(FC_OFFLINE_MODE, &vport->fc_flag) ||
+ !(psli->sli_flag & LPFC_SLI_ACTIVE)) {
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
if (rc != MBX_SUCCESS) {
mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -6808,8 +6793,8 @@ lpfc_reset_stats(struct Scsi_Host *shost)
pmboxq->ctx_buf = NULL;
pmboxq->vport = vport;
- if ((vport->fc_flag & FC_OFFLINE_MODE) ||
- (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
+ if (test_bit(FC_OFFLINE_MODE, &vport->fc_flag) ||
+ !(psli->sli_flag & LPFC_SLI_ACTIVE)) {
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
if (rc != MBX_SUCCESS) {
mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -6868,17 +6853,19 @@ lpfc_get_node_by_target(struct scsi_target *starget)
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_nodelist *ndlp;
+ unsigned long iflags;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
/* Search for this, mapped, target ID */
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
starget->id == ndlp->nlp_sid) {
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock,
+ iflags);
return ndlp;
}
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
return NULL;
}
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 595dca92e8db5..529df1768fa89 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -1977,7 +1977,7 @@ lpfc_sli4_bsg_set_loopback_mode(struct lpfc_hba *phba, int mode,
static int
lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
{
- if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
+ if (test_bit(FC_VFI_REGISTERED, &phba->pport->fc_flag)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"3136 Port still had vfi registered: "
"mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
@@ -2513,7 +2513,7 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
return -ENOMEM;
}
- dmabuff = (struct lpfc_dmabuf *)mbox->ctx_buf;
+ dmabuff = mbox->ctx_buf;
mbox->ctx_buf = NULL;
mbox->ctx_ndlp = NULL;
status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
@@ -3169,10 +3169,10 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job)
}
cmdwqe = &cmdiocbq->wqe;
- memset(cmdwqe, 0, sizeof(union lpfc_wqe));
+ memset(cmdwqe, 0, sizeof(*cmdwqe));
if (phba->sli_rev < LPFC_SLI_REV4) {
rspwqe = &rspiocbq->wqe;
- memset(rspwqe, 0, sizeof(union lpfc_wqe));
+ memset(rspwqe, 0, sizeof(*rspwqe));
}
INIT_LIST_HEAD(&head);
@@ -3376,7 +3376,7 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
unsigned long flags;
uint8_t *pmb, *pmb_buf;
- dd_data = pmboxq->ctx_ndlp;
+ dd_data = pmboxq->ctx_u.dd_data;
/*
* The outgoing buffer is readily referred from the dma buffer,
@@ -3448,7 +3448,7 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
case MBX_RUN_DIAGS:
case MBX_RESTART:
case MBX_SET_MASK:
- if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
+ if (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2743 Command 0x%x is illegal in on-line "
"state\n",
@@ -3553,7 +3553,7 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
struct lpfc_sli_config_mbox *sli_cfg_mbx;
uint8_t *pmbx;
- dd_data = pmboxq->ctx_buf;
+ dd_data = pmboxq->ctx_u.dd_data;
/* Determine if job has been aborted */
spin_lock_irqsave(&phba->ct_ev_lock, flags);
@@ -3940,7 +3940,7 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
/* context fields to callback function */
- pmboxq->ctx_buf = dd_data;
+ pmboxq->ctx_u.dd_data = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->set_job = job;
dd_data->context_un.mbox.pmboxq = pmboxq;
@@ -4112,7 +4112,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
/* context fields to callback function */
- pmboxq->ctx_buf = dd_data;
+ pmboxq->ctx_u.dd_data = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->set_job = job;
dd_data->context_un.mbox.pmboxq = pmboxq;
@@ -4460,7 +4460,7 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
/* context fields to callback function */
- pmboxq->ctx_buf = dd_data;
+ pmboxq->ctx_u.dd_data = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->set_job = job;
dd_data->context_un.mbox.pmboxq = pmboxq;
@@ -4747,7 +4747,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
from = pmbx;
ext = from + sizeof(MAILBOX_t);
- pmboxq->ctx_buf = ext;
+ pmboxq->ext_buf = ext;
pmboxq->in_ext_byte_len =
mbox_req->inExtWLen * sizeof(uint32_t);
pmboxq->out_ext_byte_len =
@@ -4875,7 +4875,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
/* setup context field to pass wait_queue pointer to wake function */
- pmboxq->ctx_ndlp = dd_data;
+ pmboxq->ctx_u.dd_data = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->set_job = job;
dd_data->context_un.mbox.pmboxq = pmboxq;
@@ -4886,7 +4886,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
job->dd_data = dd_data;
- if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+ if (test_bit(FC_OFFLINE_MODE, &vport->fc_flag) ||
(!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
if (rc != MBX_SUCCESS) {
@@ -5070,12 +5070,12 @@ lpfc_bsg_get_ras_config(struct bsg_job *job)
bsg_reply->reply_data.vendor_reply.vendor_rsp;
/* Current logging state */
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->ras_fwlog_lock);
if (ras_fwlog->state == ACTIVE)
ras_reply->state = LPFC_RASLOG_STATE_RUNNING;
else
ras_reply->state = LPFC_RASLOG_STATE_STOPPED;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
ras_reply->log_level = phba->ras_fwlog.fw_loglevel;
ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize;
@@ -5132,13 +5132,13 @@ lpfc_bsg_set_ras_config(struct bsg_job *job)
if (action == LPFC_RASACTION_STOP_LOGGING) {
/* Check if already disabled */
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->ras_fwlog_lock);
if (ras_fwlog->state != ACTIVE) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
rc = -ESRCH;
goto ras_job_error;
}
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
/* Disable logging */
lpfc_ras_stop_fwlog(phba);
@@ -5149,10 +5149,10 @@ lpfc_bsg_set_ras_config(struct bsg_job *job)
* FW-logging with new log-level. Return status
* "Logging already Running" to caller.
**/
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->ras_fwlog_lock);
if (ras_fwlog->state != INACTIVE)
action_status = -EINPROGRESS;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
/* Enable logging */
rc = lpfc_sli4_ras_fwlog_init(phba, log_level,
@@ -5268,13 +5268,13 @@ lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
goto ras_job_error;
/* Logging to be stopped before reading */
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->ras_fwlog_lock);
if (ras_fwlog->state == ACTIVE) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
rc = -EINPROGRESS;
goto ras_job_error;
}
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
if (job->request_len <
sizeof(struct fc_bsg_request) +
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index baae1f8279e0c..8cc08e58dc05e 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -265,7 +265,7 @@ ct_free_mp:
kfree(mp);
ct_exit:
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "6440 Unsol CT: Rsp err %d Data: x%x\n",
+ "6440 Unsol CT: Rsp err %d Data: x%lx\n",
rc, vport->fc_flag);
}
@@ -298,7 +298,7 @@ lpfc_ct_handle_mibreq(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocbq)
}
/* Ignore traffic received during vport shutdown */
- if (vport->fc_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &vport->load_flag))
return;
ndlp = lpfc_findnode_did(vport, did);
@@ -723,7 +723,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
if (ndlp) {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
- "Parse GID_FTrsp: did:x%x flg:x%x x%x",
+ "Parse GID_FTrsp: did:x%x flg:x%lx x%x",
Did, ndlp->nlp_flag, vport->fc_flag);
/* By default, the driver expects to support FCP FC4 */
@@ -735,7 +735,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0238 Process x%06x NameServer Rsp "
- "Data: x%x x%x x%x x%x x%x\n", Did,
+ "Data: x%x x%x x%x x%lx x%x\n", Did,
ndlp->nlp_flag, ndlp->nlp_fc4_type,
ndlp->nlp_state, vport->fc_flag,
vport->fc_rscn_id_cnt);
@@ -751,20 +751,20 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
}
} else {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
- "Skip1 GID_FTrsp: did:x%x flg:x%x cnt:%d",
+ "Skip1 GID_FTrsp: did:x%x flg:x%lx cnt:%d",
Did, vport->fc_flag, vport->fc_rscn_id_cnt);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0239 Skip x%06x NameServer Rsp "
- "Data: x%x x%x x%px\n",
+ "Data: x%lx x%x x%px\n",
Did, vport->fc_flag,
vport->fc_rscn_id_cnt, ndlp);
}
} else {
- if (!(vport->fc_flag & FC_RSCN_MODE) ||
+ if (!test_bit(FC_RSCN_MODE, &vport->fc_flag) ||
lpfc_rscn_payload_check(vport, Did)) {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
- "Query GID_FTrsp: did:x%x flg:x%x cnt:%d",
+ "Query GID_FTrsp: did:x%x flg:x%lx cnt:%d",
Did, vport->fc_flag, vport->fc_rscn_id_cnt);
/*
@@ -787,12 +787,12 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
lpfc_setup_disc_node(vport, Did);
} else {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
- "Skip2 GID_FTrsp: did:x%x flg:x%x cnt:%d",
+ "Skip2 GID_FTrsp: did:x%x flg:x%lx cnt:%d",
Did, vport->fc_flag, vport->fc_rscn_id_cnt);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0245 Skip x%06x NameServer Rsp "
- "Data: x%x x%x\n", Did,
+ "Data: x%lx x%x\n", Did,
vport->fc_flag,
vport->fc_rscn_id_cnt);
}
@@ -914,7 +914,6 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_dmabuf *outp;
struct lpfc_dmabuf *inp;
struct lpfc_sli_ct_request *CTrsp;
@@ -943,9 +942,9 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
- /* Don't bother processing response if vport is being torn down. */
- if (vport->load_flag & FC_UNLOADING) {
- if (vport->fc_flag & FC_RSCN_MODE)
+ /* Skip processing response on pport if unloading */
+ if (vport == phba->pport && test_bit(FC_UNLOADING, &vport->load_flag)) {
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag))
lpfc_els_flush_rscn(vport);
goto out;
}
@@ -953,7 +952,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (lpfc_els_chk_latt(vport)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0216 Link event during NS query\n");
- if (vport->fc_flag & FC_RSCN_MODE)
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag))
lpfc_els_flush_rscn(vport);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
goto out;
@@ -961,22 +960,18 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0226 NS query failed due to link event: "
- "ulp_status x%x ulp_word4 x%x fc_flag x%x "
+ "ulp_status x%x ulp_word4 x%x fc_flag x%lx "
"port_state x%x gidft_inp x%x\n",
ulp_status, ulp_word4, vport->fc_flag,
vport->port_state, vport->gidft_inp);
- if (vport->fc_flag & FC_RSCN_MODE)
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag))
lpfc_els_flush_rscn(vport);
if (vport->gidft_inp)
vport->gidft_inp--;
goto out;
}
- spin_lock_irq(shost->host_lock);
- if (vport->fc_flag & FC_RSCN_DEFERRED) {
- vport->fc_flag &= ~FC_RSCN_DEFERRED;
- spin_unlock_irq(shost->host_lock);
-
+ if (test_and_clear_bit(FC_RSCN_DEFERRED, &vport->fc_flag)) {
/* This is a GID_FT completing so the gidft_inp counter was
* incremented before the GID_FT was issued to the wire.
*/
@@ -988,13 +983,12 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* Re-issue the NS cmd
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "0151 Process Deferred RSCN Data: x%x x%x\n",
+ "0151 Process Deferred RSCN Data: x%lx x%x\n",
vport->fc_flag, vport->fc_rscn_id_cnt);
lpfc_els_handle_rscn(vport);
goto out;
}
- spin_unlock_irq(shost->host_lock);
if (ulp_status) {
/* Check for retry */
@@ -1018,7 +1012,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
vport->gidft_inp--;
}
}
- if (vport->fc_flag & FC_RSCN_MODE)
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag))
lpfc_els_flush_rscn(vport);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -1031,7 +1025,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (CTrsp->CommandResponse.bits.CmdRsp ==
cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0208 NameServer Rsp Data: x%x x%x "
+ "0208 NameServer Rsp Data: x%lx x%x "
"x%x x%x sz x%x\n",
vport->fc_flag,
CTreq->un.gid.Fc4Type,
@@ -1051,7 +1045,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(vport, KERN_INFO,
LOG_DISCOVERY,
"0269 No NameServer Entries "
- "Data: x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%x x%lx\n",
be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
(uint32_t) CTrsp->ReasonCode,
(uint32_t) CTrsp->Explanation,
@@ -1066,7 +1060,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(vport, KERN_INFO,
LOG_DISCOVERY,
"0240 NameServer Rsp Error "
- "Data: x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%x x%lx\n",
be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
(uint32_t) CTrsp->ReasonCode,
(uint32_t) CTrsp->Explanation,
@@ -1084,7 +1078,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* NameServer Rsp Error */
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0241 NameServer Rsp Error "
- "Data: x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%x x%lx\n",
be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
(uint32_t) CTrsp->ReasonCode,
(uint32_t) CTrsp->Explanation,
@@ -1113,14 +1107,13 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* current driver state.
*/
if (vport->port_state >= LPFC_DISC_AUTH) {
- if (vport->fc_flag & FC_RSCN_MODE) {
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) {
lpfc_els_flush_rscn(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
- spin_unlock_irq(shost->host_lock);
- }
- else
+ /* RSCN still */
+ set_bit(FC_RSCN_MODE, &vport->fc_flag);
+ } else {
lpfc_els_flush_rscn(vport);
+ }
}
lpfc_disc_start(vport);
@@ -1136,7 +1129,6 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_dmabuf *outp;
struct lpfc_dmabuf *inp;
struct lpfc_sli_ct_request *CTrsp;
@@ -1166,9 +1158,9 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
- /* Don't bother processing response if vport is being torn down. */
- if (vport->load_flag & FC_UNLOADING) {
- if (vport->fc_flag & FC_RSCN_MODE)
+ /* Skip processing response on pport if unloading */
+ if (vport == phba->pport && test_bit(FC_UNLOADING, &vport->load_flag)) {
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag))
lpfc_els_flush_rscn(vport);
goto out;
}
@@ -1176,7 +1168,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (lpfc_els_chk_latt(vport)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"4108 Link event during NS query\n");
- if (vport->fc_flag & FC_RSCN_MODE)
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag))
lpfc_els_flush_rscn(vport);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
goto out;
@@ -1184,22 +1176,18 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"4166 NS query failed due to link event: "
- "ulp_status x%x ulp_word4 x%x fc_flag x%x "
+ "ulp_status x%x ulp_word4 x%x fc_flag x%lx "
"port_state x%x gidft_inp x%x\n",
ulp_status, ulp_word4, vport->fc_flag,
vport->port_state, vport->gidft_inp);
- if (vport->fc_flag & FC_RSCN_MODE)
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag))
lpfc_els_flush_rscn(vport);
if (vport->gidft_inp)
vport->gidft_inp--;
goto out;
}
- spin_lock_irq(shost->host_lock);
- if (vport->fc_flag & FC_RSCN_DEFERRED) {
- vport->fc_flag &= ~FC_RSCN_DEFERRED;
- spin_unlock_irq(shost->host_lock);
-
+ if (test_and_clear_bit(FC_RSCN_DEFERRED, &vport->fc_flag)) {
/* This is a GID_PT completing so the gidft_inp counter was
* incremented before the GID_PT was issued to the wire.
*/
@@ -1211,13 +1199,12 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* Re-issue the NS cmd
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "4167 Process Deferred RSCN Data: x%x x%x\n",
+ "4167 Process Deferred RSCN Data: x%lx x%x\n",
vport->fc_flag, vport->fc_rscn_id_cnt);
lpfc_els_handle_rscn(vport);
goto out;
}
- spin_unlock_irq(shost->host_lock);
if (ulp_status) {
/* Check for retry */
@@ -1237,7 +1224,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
vport->gidft_inp--;
}
}
- if (vport->fc_flag & FC_RSCN_MODE)
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag))
lpfc_els_flush_rscn(vport);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -1250,7 +1237,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) ==
SLI_CT_RESPONSE_FS_ACC) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "4105 NameServer Rsp Data: x%x x%x "
+ "4105 NameServer Rsp Data: x%lx x%x "
"x%x x%x sz x%x\n",
vport->fc_flag,
CTreq->un.gid.Fc4Type,
@@ -1270,7 +1257,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(
vport, KERN_INFO, LOG_DISCOVERY,
"4106 No NameServer Entries "
- "Data: x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%x x%lx\n",
be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
(uint32_t)CTrsp->ReasonCode,
(uint32_t)CTrsp->Explanation,
@@ -1286,7 +1273,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(
vport, KERN_INFO, LOG_DISCOVERY,
"4107 NameServer Rsp Error "
- "Data: x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%x x%lx\n",
be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
(uint32_t)CTrsp->ReasonCode,
(uint32_t)CTrsp->Explanation,
@@ -1303,7 +1290,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* NameServer Rsp Error */
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"4109 NameServer Rsp Error "
- "Data: x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%x x%lx\n",
be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
(uint32_t)CTrsp->ReasonCode,
(uint32_t)CTrsp->Explanation,
@@ -1333,11 +1320,10 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* current driver state.
*/
if (vport->port_state >= LPFC_DISC_AUTH) {
- if (vport->fc_flag & FC_RSCN_MODE) {
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) {
lpfc_els_flush_rscn(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
- spin_unlock_irq(shost->host_lock);
+ /* RSCN still */
+ set_bit(FC_RSCN_MODE, &vport->fc_flag);
} else {
lpfc_els_flush_rscn(vport);
}
@@ -1355,7 +1341,6 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
struct lpfc_sli_ct_request *CTrsp;
@@ -1445,7 +1430,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0267 NameServer GFF Rsp "
- "x%x Error (%d %d) Data: x%x x%x\n",
+ "x%x Error (%d %d) Data: x%lx x%x\n",
did, ulp_status, ulp_word4,
vport->fc_flag, vport->fc_rscn_id_cnt);
}
@@ -1455,13 +1440,13 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (ndlp) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0242 Process x%x GFF "
- "NameServer Rsp Data: x%x x%x x%x\n",
+ "NameServer Rsp Data: x%x x%lx x%x\n",
did, ndlp->nlp_flag, vport->fc_flag,
vport->fc_rscn_id_cnt);
} else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0243 Skip x%x GFF "
- "NameServer Rsp Data: x%x x%x\n", did,
+ "NameServer Rsp Data: x%lx x%x\n", did,
vport->fc_flag, vport->fc_rscn_id_cnt);
}
out:
@@ -1480,14 +1465,13 @@ out:
* current driver state.
*/
if (vport->port_state >= LPFC_DISC_AUTH) {
- if (vport->fc_flag & FC_RSCN_MODE) {
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) {
lpfc_els_flush_rscn(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
- spin_unlock_irq(shost->host_lock);
- }
- else
+ /* RSCN still */
+ set_bit(FC_RSCN_MODE, &vport->fc_flag);
+ } else {
lpfc_els_flush_rscn(vport);
+ }
}
lpfc_disc_start(vport);
}
@@ -1853,11 +1837,10 @@ static uint32_t
lpfc_find_map_node(struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp, *next_ndlp;
- struct Scsi_Host *shost;
+ unsigned long iflags;
uint32_t cnt = 0;
- shost = lpfc_shost_from_vport(vport);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_type & NLP_FABRIC)
continue;
@@ -1865,7 +1848,7 @@ lpfc_find_map_node(struct lpfc_vport *vport)
(ndlp->nlp_state == NLP_STE_UNMAPPED_NODE))
cnt++;
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
return cnt;
}
@@ -1950,7 +1933,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
/* NameServer Req */
lpfc_printf_vlog(vport, KERN_INFO ,LOG_DISCOVERY,
- "0236 NameServer Req Data: x%x x%x x%x x%x\n",
+ "0236 NameServer Req Data: x%x x%lx x%x x%x\n",
cmdcode, vport->fc_flag, vport->fc_rscn_id_cnt,
context);
@@ -2167,7 +2150,8 @@ ns_cmd_free_mp:
kfree(mp);
ns_cmd_exit:
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "0266 Issue NameServer Req x%x err %d Data: x%x x%x\n",
+ "0266 Issue NameServer Req x%x err %d Data: x%lx "
+ "x%x\n",
cmdcode, rc, vport->fc_flag, vport->fc_rscn_id_cnt);
return 1;
}
@@ -2453,7 +2437,7 @@ lpfc_fdmi_change_check(struct lpfc_vport *vport)
return;
/* Must be connected to a Fabric */
- if (!(vport->fc_flag & FC_FABRIC))
+ if (!test_bit(FC_FABRIC, &vport->fc_flag))
return;
ndlp = lpfc_findnode_did(vport, FDMI_DID);
@@ -2569,9 +2553,9 @@ lpfc_fdmi_set_attr_string(void *attr, uint16_t attrtype, char *attrstring)
* 64 bytes or less.
*/
- strncpy(ae->value_string, attrstring, sizeof(ae->value_string));
+ strscpy(ae->value_string, attrstring, sizeof(ae->value_string));
len = strnlen(ae->value_string, sizeof(ae->value_string));
- /* round string length to a 32bit boundary. Ensure there's a NULL */
+ /* round string length to a 32bit boundary */
len += (len & 3) ? (4 - (len & 3)) : 4;
/* size is Type/Len (4 bytes) plus string length */
size = FOURBYTES + len;
@@ -3233,7 +3217,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* FDMI request */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0218 FDMI Request x%x mask x%x Data: x%x x%x x%x\n",
+ "0218 FDMI Request x%x mask x%x Data: x%x x%lx x%x\n",
cmdcode, new_mask, vport->fdmi_port_mask,
vport->fc_flag, vport->port_state);
@@ -3470,15 +3454,8 @@ lpfc_delayed_disc_tmo(struct timer_list *t)
void
lpfc_delayed_disc_timeout_handler(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
- spin_lock_irq(shost->host_lock);
- if (!(vport->fc_flag & FC_DISC_DELAYED)) {
- spin_unlock_irq(shost->host_lock);
+ if (!test_and_clear_bit(FC_DISC_DELAYED, &vport->fc_flag))
return;
- }
- vport->fc_flag &= ~FC_DISC_DELAYED;
- spin_unlock_irq(shost->host_lock);
lpfc_do_scr_ns_plogi(vport->phba, vport);
}
@@ -3606,7 +3583,8 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
(ctrsp->Explanation != SLI_CT_APP_ID_NOT_AVAILABLE)) {
/* If DALLAPP_ID failed retry later */
if (cmd == SLI_CTAS_DALLAPP_ID)
- vport->load_flag |= FC_DEREGISTER_ALL_APP_ID;
+ set_bit(FC_DEREGISTER_ALL_APP_ID,
+ &vport->load_flag);
goto free_res;
}
}
@@ -3662,7 +3640,7 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (!hash_empty(vport->hash_table))
hash_for_each(vport->hash_table, bucket, cur, hnode)
hash_del(&cur->hnode);
- vport->load_flag |= FC_ALLOW_VMID;
+ set_bit(FC_ALLOW_VMID, &vport->load_flag);
break;
default:
lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
@@ -3729,7 +3707,7 @@ lpfc_vmid_cmd(struct lpfc_vport *vport,
INIT_LIST_HEAD(&bmp->list);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "3275 VMID Request Data: x%x x%x x%x\n",
+ "3275 VMID Request Data: x%lx x%x x%x\n",
vport->fc_flag, vport->port_state, cmdcode);
ctreq = (struct lpfc_sli_ct_request *)mp->virt;
data = mp->virt;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index ea9b42225e629..a2d2b02b34187 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2007-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -806,10 +806,10 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
{
int len = 0;
int i, iocnt, outio, cnt;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp;
unsigned char *statep;
+ unsigned long iflags;
struct nvme_fc_local_port *localport;
struct nvme_fc_remote_port *nrport = NULL;
struct lpfc_nvme_rport *rport;
@@ -818,7 +818,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
outio = 0;
len += scnprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n");
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
iocnt = 0;
if (!cnt) {
@@ -908,7 +908,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
ndlp->nlp_defer_did);
len += scnprintf(buf+len, size-len, "\n");
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
len += scnprintf(buf + len, size - len,
"\nOutstanding IO x%x\n", outio);
@@ -940,8 +940,6 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
if (!localport)
goto out_exit;
- spin_lock_irq(shost->host_lock);
-
/* Port state is only one of two values for now. */
if (localport->port_id)
statep = "ONLINE";
@@ -953,6 +951,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
localport->port_id, statep);
len += scnprintf(buf + len, size - len, "\tRport List:\n");
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
/* local short-hand pointer. */
spin_lock(&ndlp->lock);
@@ -1006,8 +1005,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
/* Terminate the string. */
len += scnprintf(buf + len, size - len, "\n");
}
-
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
out_exit:
return len;
}
@@ -2196,12 +2194,12 @@ static int lpfc_debugfs_ras_log_data(struct lpfc_hba *phba,
memset(buffer, 0, size);
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->ras_fwlog_lock);
if (phba->ras_fwlog.state != ACTIVE) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
return -EINVAL;
}
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
list_for_each_entry_safe(dmabuf, next,
&phba->ras_fwlog.fwlog_buff_list, list) {
@@ -2252,13 +2250,13 @@ lpfc_debugfs_ras_log_open(struct inode *inode, struct file *file)
int size;
int rc = -ENOMEM;
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->ras_fwlog_lock);
if (phba->ras_fwlog.state != ACTIVE) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
rc = -EINVAL;
goto out;
}
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
if (check_mul_overflow(LPFC_RAS_MIN_BUFF_POST_SIZE,
phba->cfg_ras_fwlog_buffsize, &size))
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 4d723200690a4..f7c28dc73bf67 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -93,7 +93,6 @@ static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid);
int
lpfc_els_chk_latt(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
uint32_t ha_copy;
@@ -121,9 +120,7 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
* will cleanup any left over in-progress discovery
* events.
*/
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_ABORT_DISCOVERY;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_ABORT_DISCOVERY, &vport->fc_flag);
if (phba->link_state != LPFC_CLEAR_LA)
lpfc_issue_clear_la(phba, vport);
@@ -301,7 +298,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, u8 expect_rsp,
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0116 Xmit ELS command x%x to remote "
"NPORT x%x I/O tag: x%x, port state:x%x "
- "rpi x%x fc_flag:x%x\n",
+ "rpi x%x fc_flag:x%lx\n",
elscmd, did, elsiocb->iotag,
vport->port_state, ndlp->nlp_rpi,
vport->fc_flag);
@@ -310,7 +307,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, u8 expect_rsp,
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0117 Xmit ELS response x%x to remote "
"NPORT x%x I/O tag: x%x, size: x%x "
- "port_state x%x rpi x%x fc_flag x%x\n",
+ "port_state x%x rpi x%x fc_flag x%lx\n",
elscmd, ndlp->nlp_DID, elsiocb->iotag,
cmd_size, vport->port_state,
ndlp->nlp_rpi, vport->fc_flag);
@@ -452,7 +449,7 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
/* move forward in case of SLI4 FC port loopback test and pt2pt mode */
if ((phba->sli_rev == LPFC_SLI_REV4) &&
!(phba->link_flag & LS_LOOPBACK_MODE) &&
- !(vport->fc_flag & FC_PT2PT)) {
+ !test_bit(FC_PT2PT, &vport->fc_flag)) {
ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (!ndlp) {
rc = -ENODEV;
@@ -467,7 +464,8 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
}
/* Supply CSP's only if we are fabric connect or pt-to-pt connect */
- if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) {
+ if (test_bit(FC_FABRIC, &vport->fc_flag) ||
+ test_bit(FC_PT2PT, &vport->fc_flag)) {
rc = lpfc_mbox_rsrc_prep(phba, mboxq);
if (rc) {
rc = -ENOMEM;
@@ -520,7 +518,6 @@ int
lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
- struct Scsi_Host *shost;
LPFC_MBOXQ_t *mboxq;
int rc;
@@ -546,10 +543,7 @@ lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
return -EIO;
}
- shost = lpfc_shost_from_vport(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_VFI_REGISTERED;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_VFI_REGISTERED, &vport->fc_flag);
return 0;
}
@@ -577,7 +571,6 @@ lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
{
struct lpfc_hba *phba = vport->phba;
uint8_t fabric_param_changed = 0;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if ((vport->fc_prevDID != vport->fc_myDID) ||
memcmp(&vport->fabric_portname, &sp->portName,
@@ -599,11 +592,8 @@ lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
* - lpfc_delay_discovery module parameter is set.
*/
if (fabric_param_changed && !sp->cmn.clean_address_bit &&
- (vport->fc_prevDID || phba->cfg_delay_discovery)) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_DISC_DELAYED;
- spin_unlock_irq(shost->host_lock);
- }
+ (vport->fc_prevDID || phba->cfg_delay_discovery))
+ set_bit(FC_DISC_DELAYED, &vport->fc_flag);
return fabric_param_changed;
}
@@ -633,15 +623,12 @@ static int
lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct serv_parm *sp, uint32_t ulp_word4)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *np;
struct lpfc_nodelist *next_np;
uint8_t fabric_param_changed;
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_FABRIC;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_FABRIC, &vport->fc_flag);
phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
@@ -650,11 +637,8 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
phba->fc_edtovResol = sp->cmn.edtovResolution;
phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
- if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_PUBLIC_LOOP;
- spin_unlock_irq(shost->host_lock);
- }
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP)
+ set_bit(FC_PUBLIC_LOOP, &vport->fc_flag);
vport->fc_myDID = ulp_word4 & Mask_DID;
memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
@@ -728,12 +712,12 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_unregister_fcf_prep(phba);
/* This should just update the VFI CSPs*/
- if (vport->fc_flag & FC_VFI_REGISTERED)
+ if (test_bit(FC_VFI_REGISTERED, &vport->fc_flag))
lpfc_issue_reg_vfi(vport);
}
if (fabric_param_changed &&
- !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
+ !test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) {
/* If our NportID changed, we need to ensure all
* remaining NPORTs get unreg_login'ed.
@@ -753,20 +737,16 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (phba->sli_rev == LPFC_SLI_REV4) {
lpfc_sli4_unreg_all_rpis(vport);
lpfc_mbx_unreg_vpi(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag);
}
/*
* For SLI3 and SLI4, the VPI needs to be reregistered in
* response to this fabric parameter change event.
*/
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
} else if ((phba->sli_rev == LPFC_SLI_REV4) &&
- !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
+ !test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) {
/*
* Driver needs to re-reg VPI in order for f/w
* to update the MAC address.
@@ -779,18 +759,18 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (phba->sli_rev < LPFC_SLI_REV4) {
lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
- vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
+ test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag))
lpfc_register_new_vport(phba, vport, ndlp);
else
lpfc_issue_fabric_reglogin(vport);
} else {
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
- if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
- (vport->vpi_state & LPFC_VPI_REGISTERED)) {
+ if ((!test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) &&
+ (vport->vpi_state & LPFC_VPI_REGISTERED)) {
lpfc_start_fdiscs(phba);
lpfc_do_scr_ns_plogi(phba, vport);
- } else if (vport->fc_flag & FC_VFI_REGISTERED)
+ } else if (test_bit(FC_VFI_REGISTERED, &vport->fc_flag))
lpfc_issue_init_vpi(vport);
else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -826,15 +806,13 @@ static int
lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct serv_parm *sp)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mbox;
int rc;
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
- vport->fc_flag |= FC_PT2PT;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_FABRIC, &vport->fc_flag);
+ clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag);
+ set_bit(FC_PT2PT, &vport->fc_flag);
/* If we are pt2pt with another NPort, force NPIV off! */
phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
@@ -842,10 +820,7 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
lpfc_unregister_fcf_prep(phba);
-
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_VFI_REGISTERED;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_VFI_REGISTERED, &vport->fc_flag);
phba->fc_topology_changed = 0;
}
@@ -854,9 +829,7 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (rc >= 0) {
/* This side will initiate the PLOGI */
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_PT2PT_PLOGI;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_PT2PT_PLOGI, &vport->fc_flag);
/*
* N_Port ID cannot be 0, set our Id to LocalID
@@ -953,7 +926,6 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
IOCB_t *irsp;
struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp;
@@ -1069,10 +1041,9 @@ stop_rr_fcf_flogi:
}
/* FLOGI failed, so there is no fabric */
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP |
- FC_PT2PT_NO_NVME);
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_FABRIC, &vport->fc_flag);
+ clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag);
+ clear_bit(FC_PT2PT_NO_NVME, &vport->fc_flag);
/* If private loop, then allow max outstanding els to be
* LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
@@ -1081,15 +1052,14 @@ stop_rr_fcf_flogi:
if (phba->alpa_map[0] == 0)
vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
if ((phba->sli_rev == LPFC_SLI_REV4) &&
- (!(vport->fc_flag & FC_VFI_REGISTERED) ||
+ (!test_bit(FC_VFI_REGISTERED, &vport->fc_flag) ||
(vport->fc_prevDID != vport->fc_myDID) ||
phba->fc_topology_changed)) {
- if (vport->fc_flag & FC_VFI_REGISTERED) {
+ if (test_bit(FC_VFI_REGISTERED, &vport->fc_flag)) {
if (phba->fc_topology_changed) {
lpfc_unregister_fcf_prep(phba);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_VFI_REGISTERED;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_VFI_REGISTERED,
+ &vport->fc_flag);
phba->fc_topology_changed = 0;
} else {
lpfc_sli4_unreg_all_rpis(vport);
@@ -1104,10 +1074,8 @@ stop_rr_fcf_flogi:
}
goto flogifail;
}
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
- vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_VPORT_CVL_RCVD, &vport->fc_flag);
+ clear_bit(FC_VPORT_LOGO_RCVD, &vport->fc_flag);
/*
* The FLOGI succeeded. Sync the data for the CPU before
@@ -1123,7 +1091,7 @@ stop_rr_fcf_flogi:
/* FLOGI completes successfully */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0101 FLOGI completes successfully, I/O tag:x%x "
- "xri x%x Data: x%x x%x x%x x%x x%x x%x x%x %d\n",
+ "xri x%x Data: x%x x%x x%x x%x x%x x%lx x%x %d\n",
cmdiocb->iotag, cmdiocb->sli4_xritag,
ulp_word4, sp->cmn.e_d_tov,
sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
@@ -1202,7 +1170,7 @@ stop_rr_fcf_flogi:
goto out;
}
} else if (vport->port_state > LPFC_FLOGI &&
- vport->fc_flag & FC_PT2PT) {
+ test_bit(FC_PT2PT, &vport->fc_flag)) {
/*
* In a p2p topology, it is possible that discovery has
* already progressed, and this completion can be ignored.
@@ -1506,8 +1474,9 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
if (ulp_command == CMD_ELS_REQUEST64_CR) {
ndlp = iocb->ndlp;
if (ndlp && ndlp->nlp_DID == Fabric_DID) {
- if ((phba->pport->fc_flag & FC_PT2PT) &&
- !(phba->pport->fc_flag & FC_PT2PT_PLOGI))
+ if (test_bit(FC_PT2PT, &phba->pport->fc_flag) &&
+ !test_bit(FC_PT2PT_PLOGI,
+ &phba->pport->fc_flag))
iocb->fabric_cmd_cmpl =
lpfc_ignore_els_cmpl;
lpfc_sli_issue_abort_iotag(phba, pring, iocb,
@@ -1562,7 +1531,7 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
}
/* Reset the Fabric flag, topology change may have happened */
- vport->fc_flag &= ~FC_FABRIC;
+ clear_bit(FC_FABRIC, &vport->fc_flag);
if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
/* A node reference should be retained while registered with a
* transport or dev-loss-evt work is pending.
@@ -1645,11 +1614,12 @@ lpfc_more_plogi(struct lpfc_vport *vport)
/* Continue discovery with <num_disc_nodes> PLOGIs to go */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0232 Continue discovery with %d PLOGIs to go "
- "Data: x%x x%x x%x\n",
- vport->num_disc_nodes, vport->fc_plogi_cnt,
+ "Data: x%x x%lx x%x\n",
+ vport->num_disc_nodes,
+ atomic_read(&vport->fc_plogi_cnt),
vport->fc_flag, vport->port_state);
/* Check to see if there are more PLOGIs to be sent */
- if (vport->fc_flag & FC_NLP_MORE)
+ if (test_bit(FC_NLP_MORE, &vport->fc_flag))
/* go thru NPR nodes and issue any remaining ELS PLOGIs */
lpfc_els_disc_plogi(vport);
@@ -1696,18 +1666,13 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
struct serv_parm *sp;
uint8_t name[sizeof(struct lpfc_name)];
uint32_t keepDID = 0, keep_nlp_flag = 0;
+ int rc;
uint32_t keep_new_nlp_flag = 0;
uint16_t keep_nlp_state;
u32 keep_nlp_fc4_type = 0;
struct lpfc_nvme_rport *keep_nrport = NULL;
unsigned long *active_rrqs_xri_bitmap = NULL;
- /* Fabric nodes can have the same WWPN so we don't bother searching
- * by WWPN. Just return the ndlp that was given to us.
- */
- if (ndlp->nlp_type & NLP_FABRIC)
- return ndlp;
-
sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
memset(name, 0, sizeof(struct lpfc_name));
@@ -1717,15 +1682,9 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
/* return immediately if the WWPN matches ndlp */
- if (!new_ndlp || (new_ndlp == ndlp))
+ if (new_ndlp == ndlp)
return ndlp;
- /*
- * Unregister from backend if not done yet. Could have been skipped
- * due to ADISC
- */
- lpfc_nlp_unreg_node(vport, new_ndlp);
-
if (phba->sli_rev == LPFC_SLI_REV4) {
active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
GFP_KERNEL);
@@ -1742,18 +1701,44 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
(new_ndlp ? new_ndlp->nlp_flag : 0),
(new_ndlp ? new_ndlp->nlp_fc4_type : 0));
- keepDID = new_ndlp->nlp_DID;
+ if (!new_ndlp) {
+ rc = memcmp(&ndlp->nlp_portname, name,
+ sizeof(struct lpfc_name));
+ if (!rc) {
+ if (active_rrqs_xri_bitmap)
+ mempool_free(active_rrqs_xri_bitmap,
+ phba->active_rrq_pool);
+ return ndlp;
+ }
+ new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID);
+ if (!new_ndlp) {
+ if (active_rrqs_xri_bitmap)
+ mempool_free(active_rrqs_xri_bitmap,
+ phba->active_rrq_pool);
+ return ndlp;
+ }
+ } else {
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ active_rrqs_xri_bitmap)
+ memcpy(active_rrqs_xri_bitmap,
+ new_ndlp->active_rrqs_xri_bitmap,
+ phba->cfg_rrq_xri_bitmap_sz);
- if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap)
- memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap,
- phba->cfg_rrq_xri_bitmap_sz);
+ /*
+ * Unregister from backend if not done yet. Could have been
+ * skipped due to ADISC
+ */
+ lpfc_nlp_unreg_node(vport, new_ndlp);
+ }
+
+ keepDID = new_ndlp->nlp_DID;
/* At this point in this routine, we know new_ndlp will be
* returned. however, any previous GID_FTs that were done
* would have updated nlp_fc4_type in ndlp, so we must ensure
* new_ndlp has the right value.
*/
- if (vport->fc_flag & FC_FABRIC) {
+ if (test_bit(FC_FABRIC, &vport->fc_flag)) {
keep_nlp_fc4_type = new_ndlp->nlp_fc4_type;
new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type;
}
@@ -1914,21 +1899,17 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
void
lpfc_end_rscn(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- if (vport->fc_flag & FC_RSCN_MODE) {
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) {
/*
* Check to see if more RSCNs came in while we were
* processing this one.
*/
if (vport->fc_rscn_id_cnt ||
- (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
+ test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag))
lpfc_els_handle_rscn(vport);
- else {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_RSCN_MODE;
- spin_unlock_irq(shost->host_lock);
- }
+ else
+ clear_bit(FC_RSCN_MODE, &vport->fc_flag);
}
}
@@ -2015,7 +1996,6 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
struct lpfc_nodelist *ndlp, *free_ndlp;
struct lpfc_dmabuf *prsp;
@@ -2162,9 +2142,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_more_plogi(vport);
if (vport->num_disc_nodes == 0) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_NDISC_ACTIVE;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag);
lpfc_can_disctmo(vport);
lpfc_end_rscn(vport);
@@ -2226,7 +2204,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
*/
if ((ndlp->nlp_flag & (NLP_IGNR_REG_CMPL | NLP_UNREG_INP)) &&
((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
- !(vport->fc_flag & FC_OFFLINE_MODE)) {
+ !test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"4110 Issue PLOGI x%x deferred "
"on NPort x%x rpi x%x flg x%x Data:"
@@ -2258,7 +2236,8 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
* If we are a N-port connected to a Fabric, fix-up paramm's so logins
* to device on remote loops work.
*/
- if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP))
+ if (test_bit(FC_FABRIC, &vport->fc_flag) &&
+ !test_bit(FC_PUBLIC_LOOP, &vport->fc_flag))
sp->cmn.altBbCredit = 1;
if (sp->cmn.fcphLow < FC_PH_4_3)
@@ -2382,8 +2361,8 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* If we don't send GFT_ID to Fabric, a PRLI error
* could be expected.
*/
- if ((vport->fc_flag & FC_FABRIC) ||
- (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) {
+ if (test_bit(FC_FABRIC, &vport->fc_flag) ||
+ vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH) {
mode = KERN_ERR;
loglevel = LOG_TRACE_EVENT;
} else {
@@ -2424,7 +2403,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* For P2P topology, retain the node so that PLOGI can be
* attempted on it again.
*/
- if (vport->fc_flag & FC_PT2PT)
+ if (test_bit(FC_PT2PT, &vport->fc_flag))
goto out;
/* As long as this node is not registered with the SCSI
@@ -2500,7 +2479,7 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* the remote NPort beng a NVME Target.
*/
if (phba->sli_rev == LPFC_SLI_REV4 &&
- vport->fc_flag & FC_RSCN_MODE &&
+ test_bit(FC_RSCN_MODE, &vport->fc_flag) &&
vport->nvmei_support)
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
local_nlp_type = ndlp->nlp_fc4_type;
@@ -2677,7 +2656,7 @@ lpfc_rscn_disc(struct lpfc_vport *vport)
/* RSCN discovery */
/* go thru NPR nodes and issue ELS PLOGIs */
- if (vport->fc_npr_cnt)
+ if (atomic_read(&vport->fc_npr_cnt))
if (lpfc_els_disc_plogi(vport))
return;
@@ -2697,7 +2676,6 @@ lpfc_rscn_disc(struct lpfc_vport *vport)
static void
lpfc_adisc_done(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
/*
@@ -2705,7 +2683,7 @@ lpfc_adisc_done(struct lpfc_vport *vport)
* and continue discovery.
*/
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
- !(vport->fc_flag & FC_RSCN_MODE) &&
+ !test_bit(FC_RSCN_MODE, &vport->fc_flag) &&
(phba->sli_rev < LPFC_SLI_REV4)) {
/*
@@ -2734,15 +2712,13 @@ lpfc_adisc_done(struct lpfc_vport *vport)
if (vport->port_state < LPFC_VPORT_READY) {
/* If we get here, there is nothing to ADISC */
lpfc_issue_clear_la(phba, vport);
- if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
+ if (!test_bit(FC_ABORT_DISCOVERY, &vport->fc_flag)) {
vport->num_disc_nodes = 0;
/* go thru NPR list, issue ELS PLOGIs */
- if (vport->fc_npr_cnt)
+ if (atomic_read(&vport->fc_npr_cnt))
lpfc_els_disc_plogi(vport);
if (!vport->num_disc_nodes) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_NDISC_ACTIVE;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag);
lpfc_can_disctmo(vport);
lpfc_end_rscn(vport);
}
@@ -2769,11 +2745,12 @@ lpfc_more_adisc(struct lpfc_vport *vport)
/* Continue discovery with <num_disc_nodes> ADISCs to go */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0210 Continue discovery with %d ADISCs to go "
- "Data: x%x x%x x%x\n",
- vport->num_disc_nodes, vport->fc_adisc_cnt,
+ "Data: x%x x%lx x%x\n",
+ vport->num_disc_nodes,
+ atomic_read(&vport->fc_adisc_cnt),
vport->fc_flag, vport->port_state);
/* Check to see if there are more ADISCs to be sent */
- if (vport->fc_flag & FC_NLP_MORE) {
+ if (test_bit(FC_NLP_MORE, &vport->fc_flag)) {
lpfc_set_disctmo(vport);
/* go thru NPR nodes and issue any remaining ELS ADISCs */
lpfc_els_disc_adisc(vport);
@@ -3618,10 +3595,10 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
/* Not supported for private loop */
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
- !(vport->fc_flag & FC_PUBLIC_LOOP))
+ !test_bit(FC_PUBLIC_LOOP, &vport->fc_flag))
return 1;
- if (vport->fc_flag & FC_PT2PT) {
+ if (test_bit(FC_PT2PT, &vport->fc_flag)) {
/* find any mapped nport - that would be the other nport */
ndlp = lpfc_findnode_mapped(vport);
if (!ndlp)
@@ -4399,7 +4376,6 @@ try_rdf:
void
lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_work_evt *evtp;
if (!(nlp->nlp_flag & NLP_DELAY_TMO))
@@ -4427,9 +4403,8 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
/* Check if there are more PLOGIs to be sent */
lpfc_more_plogi(vport);
if (vport->num_disc_nodes == 0) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_NDISC_ACTIVE;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_NDISC_ACTIVE,
+ &vport->fc_flag);
lpfc_can_disctmo(vport);
lpfc_end_rscn(vport);
}
@@ -4462,23 +4437,23 @@ lpfc_els_retry_delay(struct timer_list *t)
unsigned long flags;
struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
+ /* Hold a node reference for outstanding queued work */
+ if (!lpfc_nlp_get(ndlp))
+ return;
+
spin_lock_irqsave(&phba->hbalock, flags);
if (!list_empty(&evtp->evt_listp)) {
spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_nlp_put(ndlp);
return;
}
- /* We need to hold the node by incrementing the reference
- * count until the queued work is done
- */
- evtp->evt_arg1 = lpfc_nlp_get(ndlp);
- if (evtp->evt_arg1) {
- evtp->evt = LPFC_EVT_ELS_RETRY;
- list_add_tail(&evtp->evt_listp, &phba->work_list);
- lpfc_worker_wake_up(phba);
- }
+ evtp->evt_arg1 = ndlp;
+ evtp->evt = LPFC_EVT_ELS_RETRY;
+ list_add_tail(&evtp->evt_listp, &phba->work_list);
spin_unlock_irqrestore(&phba->hbalock, flags);
- return;
+
+ lpfc_worker_wake_up(phba);
}
/**
@@ -4546,7 +4521,7 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
}
break;
case ELS_CMD_FDISC:
- if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI))
+ if (!test_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag))
lpfc_issue_els_fdisc(vport, ndlp, retry);
break;
}
@@ -4784,7 +4759,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Added for Vendor specifc support
* Just keep retrying for these Rsn / Exp codes
*/
- if ((vport->fc_flag & FC_PT2PT) &&
+ if (test_bit(FC_PT2PT, &vport->fc_flag) &&
cmd == ELS_CMD_NVMEPRLI) {
switch (stat.un.b.lsRjtRsnCode) {
case LSRJT_UNABLE_TPC:
@@ -4797,7 +4772,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"support NVME, disabling NVME\n",
stat.un.b.lsRjtRsnCode);
retry = 0;
- vport->fc_flag |= FC_PT2PT_NO_NVME;
+ set_bit(FC_PT2PT_NO_NVME, &vport->fc_flag);
goto out_retry;
}
}
@@ -4989,7 +4964,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
retry = 0;
}
- if ((vport->load_flag & FC_UNLOADING) != 0)
+ if (test_bit(FC_UNLOADING, &vport->load_flag))
retry = 0;
out_retry:
@@ -5020,7 +4995,7 @@ out_retry:
/* If discovery / RSCN timer is running, reset it */
if (timer_pending(&vport->fc_disctmo) ||
- (vport->fc_flag & FC_RSCN_MODE))
+ test_bit(FC_RSCN_MODE, &vport->fc_flag))
lpfc_set_disctmo(vport);
}
@@ -5406,7 +5381,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (ulp_status == 0
&& (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
if (!lpfc_unreg_rpi(vport, ndlp) &&
- (!(vport->fc_flag & FC_PT2PT))) {
+ !test_bit(FC_PT2PT, &vport->fc_flag)) {
if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
ndlp->nlp_state ==
NLP_STE_REG_LOGIN_ISSUE) {
@@ -5778,7 +5753,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, "
"XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
- "RPI: x%x, fc_flag x%x refcnt %d\n",
+ "RPI: x%x, fc_flag x%lx refcnt %d\n",
rc, elsiocb->iotag, elsiocb->sli4_xritag,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref));
@@ -5984,7 +5959,7 @@ lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, "
"XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
- "RPI: x%x, fc_flag x%x\n",
+ "RPI: x%x, fc_flag x%lx\n",
rc, elsiocb->iotag, elsiocb->sli4_xritag,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi, vport->fc_flag);
@@ -6551,7 +6526,6 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
int
lpfc_els_disc_adisc(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp, *next_ndlp;
int sentadisc = 0;
@@ -6586,18 +6560,13 @@ lpfc_els_disc_adisc(struct lpfc_vport *vport)
vport->num_disc_nodes++;
if (vport->num_disc_nodes >=
vport->cfg_discovery_threads) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_NLP_MORE;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_NLP_MORE, &vport->fc_flag);
break;
}
}
- if (sentadisc == 0) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_NLP_MORE;
- spin_unlock_irq(shost->host_lock);
- }
+ if (sentadisc == 0)
+ clear_bit(FC_NLP_MORE, &vport->fc_flag);
return sentadisc;
}
@@ -6623,7 +6592,6 @@ lpfc_els_disc_adisc(struct lpfc_vport *vport)
int
lpfc_els_disc_plogi(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp, *next_ndlp;
int sentplogi = 0;
@@ -6640,26 +6608,20 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport)
vport->num_disc_nodes++;
if (vport->num_disc_nodes >=
vport->cfg_discovery_threads) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_NLP_MORE;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_NLP_MORE, &vport->fc_flag);
break;
}
}
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "6452 Discover PLOGI %d flag x%x\n",
+ "6452 Discover PLOGI %d flag x%lx\n",
sentplogi, vport->fc_flag);
- if (sentplogi) {
+ if (sentplogi)
lpfc_set_disctmo(vport);
- }
- else {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_NLP_MORE;
- spin_unlock_irq(shost->host_lock);
- }
+ else
+ clear_bit(FC_NLP_MORE, &vport->fc_flag);
return sentplogi;
}
@@ -7070,7 +7032,7 @@ lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
{
desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
- if (vport->fc_flag & FC_FABRIC) {
+ if (test_bit(FC_FABRIC, &vport->fc_flag)) {
memcpy(desc->port_names.wwnn, &vport->fabric_nodename,
sizeof(desc->port_names.wwnn));
@@ -7276,7 +7238,7 @@ lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
goto rdp_fail;
mbox->vport = rdp_context->ndlp->vport;
mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0;
- mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
+ mbox->ctx_u.rdp = rdp_context;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
@@ -7328,7 +7290,7 @@ int lpfc_get_sfp_info_wait(struct lpfc_hba *phba,
mbox->in_ext_byte_len = DMP_SFF_PAGE_A0_SIZE;
mbox->out_ext_byte_len = DMP_SFF_PAGE_A0_SIZE;
mbox->mbox_offset_word = 5;
- mbox->ctx_buf = virt;
+ mbox->ext_buf = virt;
} else {
bf_set(lpfc_mbx_memory_dump_type3_length,
&mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE);
@@ -7336,7 +7298,6 @@ int lpfc_get_sfp_info_wait(struct lpfc_hba *phba,
mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
}
mbox->vport = phba->pport;
- mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30);
if (rc == MBX_NOT_FINISHED) {
@@ -7345,7 +7306,7 @@ int lpfc_get_sfp_info_wait(struct lpfc_hba *phba,
}
if (phba->sli_rev == LPFC_SLI_REV4)
- mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
+ mp = mbox->ctx_buf;
else
mp = mpsave;
@@ -7388,7 +7349,7 @@ int lpfc_get_sfp_info_wait(struct lpfc_hba *phba,
mbox->in_ext_byte_len = DMP_SFF_PAGE_A2_SIZE;
mbox->out_ext_byte_len = DMP_SFF_PAGE_A2_SIZE;
mbox->mbox_offset_word = 5;
- mbox->ctx_buf = virt;
+ mbox->ext_buf = virt;
} else {
bf_set(lpfc_mbx_memory_dump_type3_length,
&mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A2_SIZE);
@@ -7396,7 +7357,6 @@ int lpfc_get_sfp_info_wait(struct lpfc_hba *phba,
mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
}
- mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30);
if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) {
rc = 1;
@@ -7538,9 +7498,9 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
int rc;
mb = &pmb->u.mb;
- lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp;
+ lcb_context = pmb->ctx_u.lcb;
ndlp = lcb_context->ndlp;
- pmb->ctx_ndlp = NULL;
+ memset(&pmb->ctx_u, 0, sizeof(pmb->ctx_u));
pmb->ctx_buf = NULL;
shdr = (union lpfc_sli4_cfg_shdr *)
@@ -7680,7 +7640,7 @@ lpfc_sli4_set_beacon(struct lpfc_vport *vport,
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len,
LPFC_SLI4_MBX_EMBED);
- mbox->ctx_ndlp = (void *)lcb_context;
+ mbox->ctx_u.lcb = lcb_context;
mbox->vport = phba->pport;
mbox->mbox_cmpl = lpfc_els_lcb_rsp;
bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config,
@@ -7854,9 +7814,10 @@ lpfc_els_flush_rscn(struct lpfc_vport *vport)
lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
vport->fc_rscn_id_list[i] = NULL;
}
+ clear_bit(FC_RSCN_MODE, &vport->fc_flag);
+ clear_bit(FC_RSCN_DISCOVERY, &vport->fc_flag);
spin_lock_irq(shost->host_lock);
vport->fc_rscn_id_cnt = 0;
- vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
spin_unlock_irq(shost->host_lock);
lpfc_can_disctmo(vport);
/* Indicate we are done walking this fc_rscn_id_list */
@@ -7891,7 +7852,7 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
return 0;
/* If we are doing a FULL RSCN rediscovery, match everything */
- if (vport->fc_flag & FC_RSCN_DISCOVERY)
+ if (test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag))
return did;
spin_lock_irq(shost->host_lock);
@@ -8070,7 +8031,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
payload_len -= sizeof(uint32_t); /* take off word 0 */
/* RSCN received */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0214 RSCN received Data: x%x x%x x%x x%x\n",
+ "0214 RSCN received Data: x%lx x%x x%x x%x\n",
vport->fc_flag, payload_len, *lp,
vport->fc_rscn_id_cnt);
@@ -8082,10 +8043,10 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
FCH_EVT_RSCN, lp[i]);
/* Check if RSCN is coming from a direct-connected remote NPort */
- if (vport->fc_flag & FC_PT2PT) {
+ if (test_bit(FC_PT2PT, &vport->fc_flag)) {
/* If so, just ACC it, no other action needed for now */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "2024 pt2pt RSCN %08x Data: x%x x%x\n",
+ "2024 pt2pt RSCN %08x Data: x%lx x%x\n",
*lp, vport->fc_flag, payload_len);
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
@@ -8129,7 +8090,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* ALL NPortIDs in RSCN are on HBA */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0219 Ignore RSCN "
- "Data: x%x x%x x%x x%x\n",
+ "Data: x%lx x%x x%x x%x\n",
vport->fc_flag, payload_len,
*lp, vport->fc_rscn_id_cnt);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -8140,7 +8101,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
ndlp, NULL);
/* Restart disctmo if its already running */
- if (vport->fc_flag & FC_DISC_TMO) {
+ if (test_bit(FC_DISC_TMO, &vport->fc_flag)) {
tmo = ((phba->fc_ratov * 3) + 3);
mod_timer(&vport->fc_disctmo,
jiffies +
@@ -8153,8 +8114,8 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
spin_lock_irq(shost->host_lock);
if (vport->fc_rscn_flush) {
/* Another thread is walking fc_rscn_id_list on this vport */
- vport->fc_flag |= FC_RSCN_DISCOVERY;
spin_unlock_irq(shost->host_lock);
+ set_bit(FC_RSCN_DISCOVERY, &vport->fc_flag);
/* Send back ACC */
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
return 0;
@@ -8167,24 +8128,23 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* If we are already processing an RSCN, save the received
* RSCN payload buffer, cmdiocb->cmd_dmabuf to process later.
*/
- if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag) ||
+ test_bit(FC_NDISC_ACTIVE, &vport->fc_flag)) {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
"RCV RSCN defer: did:x%x/ste:x%x flg:x%x",
ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_RSCN_DEFERRED;
+ set_bit(FC_RSCN_DEFERRED, &vport->fc_flag);
/* Restart disctmo if its already running */
- if (vport->fc_flag & FC_DISC_TMO) {
+ if (test_bit(FC_DISC_TMO, &vport->fc_flag)) {
tmo = ((phba->fc_ratov * 3) + 3);
mod_timer(&vport->fc_disctmo,
jiffies + msecs_to_jiffies(1000 * tmo));
}
if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
- !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
- vport->fc_flag |= FC_RSCN_MODE;
- spin_unlock_irq(shost->host_lock);
+ !test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag)) {
+ set_bit(FC_RSCN_MODE, &vport->fc_flag);
if (rscn_cnt) {
cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
@@ -8206,16 +8166,15 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* Deferred RSCN */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0235 Deferred RSCN "
- "Data: x%x x%x x%x\n",
+ "Data: x%x x%lx x%x\n",
vport->fc_rscn_id_cnt, vport->fc_flag,
vport->port_state);
} else {
- vport->fc_flag |= FC_RSCN_DISCOVERY;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_RSCN_DISCOVERY, &vport->fc_flag);
/* ReDiscovery RSCN */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0234 ReDiscovery RSCN "
- "Data: x%x x%x x%x\n",
+ "Data: x%x x%lx x%x\n",
vport->fc_rscn_id_cnt, vport->fc_flag,
vport->port_state);
}
@@ -8231,9 +8190,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
"RCV RSCN: did:x%x/ste:x%x flg:x%x",
ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_RSCN_MODE;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_RSCN_MODE, &vport->fc_flag);
vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
/* Indicate we are done walking fc_rscn_id_list on this vport */
vport->fc_rscn_flush = 0;
@@ -8273,7 +8230,7 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
struct lpfc_hba *phba = vport->phba;
/* Ignore RSCN if the port is being torn down. */
- if (vport->load_flag & FC_UNLOADING) {
+ if (test_bit(FC_UNLOADING, &vport->load_flag)) {
lpfc_els_flush_rscn(vport);
return 0;
}
@@ -8283,7 +8240,7 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
/* RSCN processed */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n",
+ "0215 RSCN processed Data: x%lx x%x x%x x%x x%x x%x\n",
vport->fc_flag, 0, vport->fc_rscn_id_cnt,
vport->port_state, vport->num_disc_nodes,
vport->gidft_inp);
@@ -8372,7 +8329,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
LPFC_MBOXQ_t *mbox;
uint32_t cmd, did;
int rc;
- uint32_t fc_flag = 0;
+ unsigned long fc_flag = 0;
uint32_t port_state = 0;
/* Clear external loopback plug detected flag */
@@ -8442,9 +8399,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 0;
} else if (rc > 0) { /* greater than */
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_PT2PT_PLOGI;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_PT2PT_PLOGI, &vport->fc_flag);
/* If we have the high WWPN we can assign our own
* myDID; otherwise, we have to WAIT for a PLOGI
@@ -8463,17 +8418,17 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
spin_lock_irq(shost->host_lock);
fc_flag = vport->fc_flag;
port_state = vport->port_state;
- vport->fc_flag |= FC_PT2PT;
- vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
-
/* Acking an unsol FLOGI. Count 1 for link bounce
* work-around.
*/
vport->rcv_flogi_cnt++;
spin_unlock_irq(shost->host_lock);
+ set_bit(FC_PT2PT, &vport->fc_flag);
+ clear_bit(FC_FABRIC, &vport->fc_flag);
+ clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag);
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3311 Rcv Flogi PS x%x new PS x%x "
- "fc_flag x%x new fc_flag x%x\n",
+ "fc_flag x%lx new fc_flag x%lx\n",
port_state, vport->port_state,
fc_flag, vport->fc_flag);
@@ -8682,9 +8637,9 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mb = &pmb->u.mb;
ndlp = pmb->ctx_ndlp;
- rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff);
- oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff);
- pmb->ctx_buf = NULL;
+ rxid = (uint16_t)(pmb->ctx_u.ox_rx_id & 0xffff);
+ oxid = (uint16_t)((pmb->ctx_u.ox_rx_id >> 16) & 0xffff);
+ memset(&pmb->ctx_u, 0, sizeof(pmb->ctx_u));
pmb->ctx_ndlp = NULL;
if (mb->mbxStatus) {
@@ -8788,8 +8743,7 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
if (mbox) {
lpfc_read_lnk_stat(phba, mbox);
- mbox->ctx_buf = (void *)((unsigned long)
- (ox_id << 16 | ctx));
+ mbox->ctx_u.ox_rx_id = ox_id << 16 | ctx;
mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
if (!mbox->ctx_ndlp)
goto node_err;
@@ -9492,11 +9446,11 @@ lpfc_els_timeout(struct timer_list *t)
spin_lock_irqsave(&vport->work_port_lock, iflag);
tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
- if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
+ if (!tmo_posted && !test_bit(FC_UNLOADING, &vport->load_flag))
vport->work_port_events |= WORKER_ELS_TMO;
spin_unlock_irqrestore(&vport->work_port_lock, iflag);
- if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
+ if (!tmo_posted && !test_bit(FC_UNLOADING, &vport->load_flag))
lpfc_worker_wake_up(phba);
return;
}
@@ -9532,7 +9486,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
if (unlikely(!pring))
return;
- if (phba->pport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
return;
spin_lock_irq(&phba->hbalock);
@@ -9608,7 +9562,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
lpfc_issue_hb_tmo(phba);
if (!list_empty(&pring->txcmplq))
- if (!(phba->pport->load_flag & FC_UNLOADING))
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
mod_timer(&vport->els_tmofunc,
jiffies + msecs_to_jiffies(1000 * timeout));
}
@@ -10116,6 +10070,9 @@ lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
pc_evt_str = lpfc_get_fpin_congn_event_nm(pc_evt);
cnt = be32_to_cpu(pc->pname_count);
+ /* Capture FPIN frequency */
+ phba->cgn_fpin_frequency = be32_to_cpu(pc->event_period);
+
lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_ELS,
"4684 FPIN Peer Congestion %s (x%x) "
"Duration %d mSecs "
@@ -10404,12 +10361,12 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
goto dropit;
/* Ignore traffic received during vport shutdown. */
- if (vport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &vport->load_flag))
goto dropit;
/* If NPort discovery is delayed drop incoming ELS */
- if ((vport->fc_flag & FC_DISC_DELAYED) &&
- (cmd != ELS_CMD_PLOGI))
+ if (test_bit(FC_DISC_DELAYED, &vport->fc_flag) &&
+ cmd != ELS_CMD_PLOGI)
goto dropit;
ndlp = lpfc_findnode_did(vport, did);
@@ -10453,14 +10410,14 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* ELS command <elsCmd> received from NPORT <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0112 ELS command x%x received from NPORT x%x "
- "refcnt %d Data: x%x x%x x%x x%x\n",
+ "refcnt %d Data: x%x x%lx x%x x%x\n",
cmd, did, kref_read(&ndlp->kref), vport->port_state,
vport->fc_flag, vport->fc_myDID, vport->fc_prevDID);
/* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */
if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
(cmd != ELS_CMD_FLOGI) &&
- !((cmd == ELS_CMD_PLOGI) && (vport->fc_flag & FC_PT2PT))) {
+ !((cmd == ELS_CMD_PLOGI) && test_bit(FC_PT2PT, &vport->fc_flag))) {
rjt_err = LSRJT_LOGICAL_BSY;
rjt_exp = LSEXP_NOTHING_MORE;
goto lsrjt;
@@ -10475,7 +10432,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvPLOGI++;
ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
if (phba->sli_rev == LPFC_SLI_REV4 &&
- (phba->pport->fc_flag & FC_PT2PT)) {
+ test_bit(FC_PT2PT, &phba->pport->fc_flag)) {
vport->fc_prevDID = vport->fc_myDID;
/* Our DID needs to be updated before registering
* the vfi. This is done in lpfc_rcv_plogi but
@@ -10493,15 +10450,15 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_send_els_event(vport, ndlp, payload);
/* If Nport discovery is delayed, reject PLOGIs */
- if (vport->fc_flag & FC_DISC_DELAYED) {
+ if (test_bit(FC_DISC_DELAYED, &vport->fc_flag)) {
rjt_err = LSRJT_UNABLE_TPC;
rjt_exp = LSEXP_NOTHING_MORE;
break;
}
if (vport->port_state < LPFC_DISC_AUTH) {
- if (!(phba->pport->fc_flag & FC_PT2PT) ||
- (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
+ if (!test_bit(FC_PT2PT, &phba->pport->fc_flag) ||
+ test_bit(FC_PT2PT_PLOGI, &phba->pport->fc_flag)) {
rjt_err = LSRJT_UNABLE_TPC;
rjt_exp = LSEXP_NOTHING_MORE;
break;
@@ -10527,7 +10484,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* bounce the link. There is some descrepancy.
*/
if (vport->port_state >= LPFC_LOCAL_CFG_LINK &&
- vport->fc_flag & FC_PT2PT &&
+ test_bit(FC_PT2PT, &vport->fc_flag) &&
vport->rcv_flogi_cnt >= 1) {
rjt_err = LSRJT_LOGICAL_BSY;
rjt_exp = LSEXP_NOTHING_MORE;
@@ -10650,7 +10607,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvPRLI++;
if ((vport->port_state < LPFC_DISC_AUTH) &&
- (vport->fc_flag & FC_FABRIC)) {
+ test_bit(FC_FABRIC, &vport->fc_flag)) {
rjt_err = LSRJT_UNABLE_TPC;
rjt_exp = LSEXP_NOTHING_MORE;
break;
@@ -10825,7 +10782,7 @@ lsrjt:
return;
dropit:
- if (vport && !(vport->load_flag & FC_UNLOADING))
+ if (vport && !test_bit(FC_UNLOADING, &vport->load_flag))
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0111 Dropping received ELS cmd "
"Data: x%x x%x x%x x%x\n",
@@ -10979,16 +10936,13 @@ void
lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
/*
* If lpfc_delay_discovery parameter is set and the clean address
* bit is cleared and fc fabric parameters chenged, delay FC NPort
* discovery.
*/
- spin_lock_irq(shost->host_lock);
- if (vport->fc_flag & FC_DISC_DELAYED) {
- spin_unlock_irq(shost->host_lock);
+ if (test_bit(FC_DISC_DELAYED, &vport->fc_flag)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"3334 Delay fc port discovery for %d secs\n",
phba->fc_ratov);
@@ -10996,7 +10950,6 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
return;
}
- spin_unlock_irq(shost->host_lock);
ndlp = lpfc_findnode_did(vport, NameServer_DID);
if (!ndlp) {
@@ -11025,8 +10978,8 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
}
if ((phba->cfg_enable_SmartSAN ||
- (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) &&
- (vport->load_flag & FC_ALLOW_FDMI))
+ phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) &&
+ test_bit(FC_ALLOW_FDMI, &vport->load_flag))
lpfc_start_fdmi(vport);
}
@@ -11046,14 +10999,12 @@ static void
lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
MAILBOX_t *mb = &pmb->u.mb;
int rc;
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
if (mb->mbxStatus) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -11070,16 +11021,13 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
case 0x9602: /* Link event since CLEAR_LA */
/* giving up on vport registration */
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_FABRIC, &vport->fc_flag);
+ clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag);
lpfc_can_disctmo(vport);
break;
/* If reg_vpi fail with invalid VPI status, re-init VPI */
case 0x20:
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
lpfc_init_vpi(phba, pmb, vport->vpi);
pmb->vport = vport;
pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
@@ -11100,13 +11048,11 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_sli4_unreg_all_rpis(vport);
lpfc_mbx_unreg_vpi(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
if (mb->mbxStatus == MBX_NOT_FINISHED)
break;
if ((vport->port_type == LPFC_PHYSICAL_PORT) &&
- !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) {
+ !test_bit(FC_LOGO_RCVD_DID_CHNG, &vport->fc_flag)) {
if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_issue_init_vfi(vport);
else
@@ -11167,7 +11113,6 @@ void
lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
LPFC_MBOXQ_t *mbox;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -11202,9 +11147,7 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
mbox_err_exit:
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
return;
}
@@ -11319,7 +11262,6 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
struct lpfc_nodelist *np;
struct lpfc_nodelist *next_np;
@@ -11367,13 +11309,11 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_check_nlp_post_devloss(vport, ndlp);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
- vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
- vport->fc_flag |= FC_FABRIC;
+ clear_bit(FC_VPORT_CVL_RCVD, &vport->fc_flag);
+ clear_bit(FC_VPORT_LOGO_RCVD, &vport->fc_flag);
+ set_bit(FC_FABRIC, &vport->fc_flag);
if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP)
- vport->fc_flag |= FC_PUBLIC_LOOP;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_PUBLIC_LOOP, &vport->fc_flag);
vport->fc_myDID = ulp_word4 & Mask_DID;
lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
@@ -11390,7 +11330,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
memcpy(&vport->fabric_nodename, &sp->nodeName,
sizeof(struct lpfc_name));
if (fabric_param_changed &&
- !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
+ !test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) {
/* If our NportID changed, we need to ensure all
* remaining NPORTs get unreg_login'ed so we can
* issue unreg_vpi.
@@ -11411,15 +11351,13 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_sli4_unreg_all_rpis(vport);
lpfc_mbx_unreg_vpi(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
if (phba->sli_rev == LPFC_SLI_REV4)
- vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+ set_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag);
else
- vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_LOGO_RCVD_DID_CHNG, &vport->fc_flag);
} else if ((phba->sli_rev == LPFC_SLI_REV4) &&
- !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
+ !test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) {
/*
* Driver needs to re-reg VPI in order for f/w
* to update the MAC address.
@@ -11429,9 +11367,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
- if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
+ if (test_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag))
lpfc_issue_init_vpi(vport);
- else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
+ else if (test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag))
lpfc_register_new_vport(phba, vport, ndlp);
else
lpfc_do_scr_ns_plogi(phba, vport);
@@ -11584,7 +11522,6 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_vport *vport = cmdiocb->vport;
IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
u32 ulp_status, ulp_word4, did, tmo;
ndlp = cmdiocb->ndlp;
@@ -11615,10 +11552,8 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp->fc4_xpt_flags);
if (ulp_status == IOSTAT_SUCCESS) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_NDISC_ACTIVE;
- vport->fc_flag &= ~FC_FABRIC;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag);
+ clear_bit(FC_FABRIC, &vport->fc_flag);
lpfc_can_disctmo(vport);
}
@@ -12076,7 +12011,7 @@ lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
* node and the vport is unloading, the xri aborted wcqe
* likely isn't coming back. Just release the sgl.
*/
- if ((vport->load_flag & FC_UNLOADING) &&
+ if (test_bit(FC_UNLOADING, &vport->load_flag) &&
ndlp->nlp_DID == Fabric_DID) {
list_del(&sglq_entry->list);
sglq_entry->state = SGL_FREED;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index f80bbc315f4ca..e42fa9c822b50 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -169,13 +169,13 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
"3181 dev_loss_callbk x%06x, rport x%px flg x%x "
- "load_flag x%x refcnt %u state %d xpt x%x\n",
+ "load_flag x%lx refcnt %u state %d xpt x%x\n",
ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag,
vport->load_flag, kref_read(&ndlp->kref),
ndlp->nlp_state, ndlp->fc4_xpt_flags);
/* Don't schedule a worker thread event if the vport is going down. */
- if (vport->load_flag & FC_UNLOADING) {
+ if (test_bit(FC_UNLOADING, &vport->load_flag)) {
spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->rport = NULL;
@@ -257,13 +257,15 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
if (evtp->evt_arg1) {
evtp->evt = LPFC_EVT_DEV_LOSS;
list_add_tail(&evtp->evt_listp, &phba->work_list);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
lpfc_worker_wake_up(phba);
+ return;
}
spin_unlock_irqrestore(&phba->hbalock, iflags);
} else {
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
"3188 worker thread is stopped %s x%06x, "
- " rport x%px flg x%x load_flag x%x refcnt "
+ " rport x%px flg x%x load_flag x%lx refcnt "
"%d\n", __func__, ndlp->nlp_DID,
ndlp->rport, ndlp->nlp_flag,
vport->load_flag, kref_read(&ndlp->kref));
@@ -275,10 +277,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RM);
}
-
}
-
- return;
}
/**
@@ -911,7 +910,7 @@ lpfc_work_list_done(struct lpfc_hba *phba)
free_evt = 0;
break;
case LPFC_EVT_RESET_HBA:
- if (!(phba->pport->load_flag & FC_UNLOADING))
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
lpfc_reset_hba(phba);
break;
}
@@ -1149,7 +1148,6 @@ lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
void
lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp, *next_ndlp;
@@ -1180,9 +1178,7 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_sli4_unreg_all_rpis(vport);
lpfc_mbx_unreg_vpi(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
}
}
@@ -1210,7 +1206,7 @@ void
lpfc_linkdown_port(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
fc_host_post_event(shost, fc_get_event_number(),
@@ -1223,9 +1219,7 @@ lpfc_linkdown_port(struct lpfc_vport *vport)
lpfc_port_link_failure(vport);
/* Stop delayed Nport discovery */
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_DISC_DELAYED;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_DISC_DELAYED, &vport->fc_flag);
del_timer_sync(&vport->delayed_disc_tmo);
if (phba->sli_rev == LPFC_SLI_REV4 &&
@@ -1240,7 +1234,7 @@ int
lpfc_linkdown(struct lpfc_hba *phba)
{
struct lpfc_vport *vport = phba->pport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_vport **vports;
LPFC_MBOXQ_t *mb;
int i;
@@ -1273,9 +1267,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
phba->sli4_hba.link_state.logical_speed =
LPFC_LINK_SPEED_UNKNOWN;
}
- spin_lock_irq(shost->host_lock);
- phba->pport->fc_flag &= ~FC_LBIT;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_LBIT, &phba->pport->fc_flag);
}
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL) {
@@ -1313,7 +1305,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
skip_unreg_did:
/* Setup myDID for link up if we are in pt2pt mode */
- if (phba->pport->fc_flag & FC_PT2PT) {
+ if (test_bit(FC_PT2PT, &phba->pport->fc_flag)) {
mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mb) {
lpfc_config_link(phba, mb);
@@ -1324,8 +1316,9 @@ lpfc_linkdown(struct lpfc_hba *phba)
mempool_free(mb, phba->mbox_mem_pool);
}
}
+ clear_bit(FC_PT2PT, &phba->pport->fc_flag);
+ clear_bit(FC_PT2PT_PLOGI, &phba->pport->fc_flag);
spin_lock_irq(shost->host_lock);
- phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
phba->pport->rcv_flogi_cnt = 0;
spin_unlock_irq(shost->host_lock);
}
@@ -1364,7 +1357,7 @@ lpfc_linkup_port(struct lpfc_vport *vport)
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
- if ((vport->load_flag & FC_UNLOADING) != 0)
+ if (test_bit(FC_UNLOADING, &vport->load_flag))
return;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
@@ -1376,19 +1369,22 @@ lpfc_linkup_port(struct lpfc_vport *vport)
(vport != phba->pport))
return;
- if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
- fc_host_post_event(shost, fc_get_event_number(),
- FCH_EVT_LINKUP, 0);
+ if (phba->defer_flogi_acc_flag) {
+ clear_bit(FC_ABORT_DISCOVERY, &vport->fc_flag);
+ clear_bit(FC_RSCN_MODE, &vport->fc_flag);
+ clear_bit(FC_NLP_MORE, &vport->fc_flag);
+ clear_bit(FC_RSCN_DISCOVERY, &vport->fc_flag);
+ } else {
+ clear_bit(FC_PT2PT, &vport->fc_flag);
+ clear_bit(FC_PT2PT_PLOGI, &vport->fc_flag);
+ clear_bit(FC_ABORT_DISCOVERY, &vport->fc_flag);
+ clear_bit(FC_RSCN_MODE, &vport->fc_flag);
+ clear_bit(FC_NLP_MORE, &vport->fc_flag);
+ clear_bit(FC_RSCN_DISCOVERY, &vport->fc_flag);
+ }
+ set_bit(FC_NDISC_ACTIVE, &vport->fc_flag);
spin_lock_irq(shost->host_lock);
- if (phba->defer_flogi_acc_flag)
- vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_RSCN_MODE |
- FC_NLP_MORE | FC_RSCN_DISCOVERY);
- else
- vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI |
- FC_ABORT_DISCOVERY | FC_RSCN_MODE |
- FC_NLP_MORE | FC_RSCN_DISCOVERY);
- vport->fc_flag |= FC_NDISC_ACTIVE;
vport->fc_ns_retry = 0;
spin_unlock_irq(shost->host_lock);
lpfc_setup_fdmi_mask(vport);
@@ -1439,7 +1435,6 @@ static void
lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_sli *psli = &phba->sli;
MAILBOX_t *mb = &pmb->u.mb;
uint32_t control;
@@ -1478,9 +1473,7 @@ out:
"0225 Device Discovery completes\n");
mempool_free(pmb, phba->mbox_mem_pool);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_ABORT_DISCOVERY;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_ABORT_DISCOVERY, &vport->fc_flag);
lpfc_can_disctmo(vport);
@@ -1517,8 +1510,8 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
- vport->fc_flag & FC_PUBLIC_LOOP &&
- !(vport->fc_flag & FC_LBIT)) {
+ test_bit(FC_PUBLIC_LOOP, &vport->fc_flag) &&
+ !test_bit(FC_LBIT, &vport->fc_flag)) {
/* Need to wait for FAN - use discovery timer
* for timeout. port_state is identically
* LPFC_LOCAL_CFG_LINK while waiting for FAN
@@ -1560,7 +1553,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_initial_flogi(vport);
}
} else {
- if (vport->fc_flag & FC_PT2PT)
+ if (test_bit(FC_PT2PT, &vport->fc_flag))
lpfc_disc_start(vport);
}
return;
@@ -1884,7 +1877,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
phba->hba_flag &= ~FCF_TS_INPROG;
if (phba->pport->port_state != LPFC_FLOGI &&
- phba->pport->fc_flag & FC_FABRIC) {
+ test_bit(FC_FABRIC, &phba->pport->fc_flag)) {
phba->hba_flag |= FCF_RR_INPROG;
spin_unlock_irq(&phba->hbalock);
lpfc_initial_flogi(phba->pport);
@@ -2742,7 +2735,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
"2836 New FCF matches in-use "
"FCF (x%x), port_state:x%x, "
- "fc_flag:x%x\n",
+ "fc_flag:x%lx\n",
phba->fcf.current_rec.fcf_indx,
phba->pport->port_state,
phba->pport->fc_flag);
@@ -3218,7 +3211,6 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
struct lpfc_vport *vport = mboxq->vport;
struct lpfc_nodelist *ndlp;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if (mboxq->u.mb.mbxStatus) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -3228,9 +3220,7 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
return;
}
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag);
/* If this port is physical port or FDISC is done, do reg_vpi */
if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) {
@@ -3328,7 +3318,8 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
FC_VPORT_LINKDOWN);
continue;
}
- if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
+ if (test_bit(FC_VPORT_NEEDS_INIT_VPI,
+ &vports[i]->fc_flag)) {
lpfc_issue_init_vpi(vports[i]);
continue;
}
@@ -3380,17 +3371,17 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
* Unless this was a VFI update and we are in PT2PT mode, then
* we should drop through to set the port state to ready.
*/
- if (vport->fc_flag & FC_VFI_REGISTERED)
+ if (test_bit(FC_VFI_REGISTERED, &vport->fc_flag))
if (!(phba->sli_rev == LPFC_SLI_REV4 &&
- vport->fc_flag & FC_PT2PT))
+ test_bit(FC_PT2PT, &vport->fc_flag)))
goto out_free_mem;
/* The VPI is implicitly registered when the VFI is registered */
+ set_bit(FC_VFI_REGISTERED, &vport->fc_flag);
+ clear_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
+ clear_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag);
spin_lock_irq(shost->host_lock);
vport->vpi_state |= LPFC_VPI_REGISTERED;
- vport->fc_flag |= FC_VFI_REGISTERED;
- vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
- vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
spin_unlock_irq(shost->host_lock);
/* In case SLI4 FC loopback test, we are ready */
@@ -3401,8 +3392,8 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
- "3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x "
- "alpacnt:%d LinkState:%x topology:%x\n",
+ "3313 cmpl reg vfi port_state:%x fc_flag:%lx "
+ "myDid:%x alpacnt:%d LinkState:%x topology:%x\n",
vport->port_state, vport->fc_flag, vport->fc_myDID,
vport->phba->alpa_map[0],
phba->link_state, phba->fc_topology);
@@ -3412,14 +3403,14 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
* For private loop or for NPort pt2pt,
* just start discovery and we are done.
*/
- if ((vport->fc_flag & FC_PT2PT) ||
- ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
- !(vport->fc_flag & FC_PUBLIC_LOOP))) {
+ if (test_bit(FC_PT2PT, &vport->fc_flag) ||
+ (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
+ !test_bit(FC_PUBLIC_LOOP, &vport->fc_flag))) {
/* Use loop map to make discovery list */
lpfc_disc_list_loopmap(vport);
/* Start discovery */
- if (vport->fc_flag & FC_PT2PT)
+ if (test_bit(FC_PT2PT, &vport->fc_flag))
vport->port_state = LPFC_VPORT_READY;
else
lpfc_disc_start(vport);
@@ -3437,7 +3428,7 @@ static void
lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
+ struct lpfc_dmabuf *mp = pmb->ctx_buf;
struct lpfc_vport *vport = pmb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct serv_parm *sp = &vport->fc_sparam;
@@ -3496,11 +3487,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
{
struct lpfc_vport *vport = phba->pport;
LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
- struct Scsi_Host *shost;
int i;
int rc;
struct fcf_record *fcf_record;
- uint32_t fc_flags = 0;
unsigned long iflags;
spin_lock_irqsave(&phba->hbalock, iflags);
@@ -3537,7 +3526,6 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
phba->link_flag &= ~(LS_NPIV_FAB_SUPPORTED | LS_CT_VEN_RPA);
- shost = lpfc_shost_from_vport(vport);
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
@@ -3550,7 +3538,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
"topology\n");
/* Get Loop Map information */
if (bf_get(lpfc_mbx_read_top_il, la))
- fc_flags |= FC_LBIT;
+ set_bit(FC_LBIT, &vport->fc_flag);
vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
i = la->lilpBde64.tus.f.bdeSize;
@@ -3599,16 +3587,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
}
vport->fc_myDID = phba->fc_pref_DID;
- fc_flags |= FC_LBIT;
+ set_bit(FC_LBIT, &vport->fc_flag);
}
spin_unlock_irqrestore(&phba->hbalock, iflags);
- if (fc_flags) {
- spin_lock_irqsave(shost->host_lock, iflags);
- vport->fc_flag |= fc_flags;
- spin_unlock_irqrestore(shost->host_lock, iflags);
- }
-
lpfc_linkup(phba);
sparam_mbox = NULL;
@@ -3751,13 +3733,11 @@ void
lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_mbx_read_top *la;
struct lpfc_sli_ring *pring;
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
+ struct lpfc_dmabuf *mp = pmb->ctx_buf;
uint8_t attn_type;
- unsigned long iflags;
/* Unblock ELS traffic */
pring = lpfc_phba_elsring(phba);
@@ -3779,12 +3759,10 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
memcpy(&phba->alpa_map[0], mp->virt, 128);
- spin_lock_irqsave(shost->host_lock, iflags);
if (bf_get(lpfc_mbx_read_top_pb, la))
- vport->fc_flag |= FC_BYPASSED_MODE;
+ set_bit(FC_BYPASSED_MODE, &vport->fc_flag);
else
- vport->fc_flag &= ~FC_BYPASSED_MODE;
- spin_unlock_irqrestore(shost->host_lock, iflags);
+ clear_bit(FC_BYPASSED_MODE, &vport->fc_flag);
if (phba->fc_eventTag <= la->eventTag) {
phba->fc_stat.LinkMultiEvent++;
@@ -3832,20 +3810,20 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1308 Link Down Event in loop back mode "
"x%x received "
- "Data: x%x x%x x%x\n",
+ "Data: x%x x%x x%lx\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag);
else if (attn_type == LPFC_ATT_UNEXP_WWPN)
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1313 Link Down Unexpected FA WWPN Event x%x "
- "received Data: x%x x%x x%x x%x\n",
+ "received Data: x%x x%x x%lx x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag,
bf_get(lpfc_mbx_read_top_fa, la));
else
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1305 Link Down Event x%x received "
- "Data: x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%lx x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag,
bf_get(lpfc_mbx_read_top_fa, la));
@@ -3872,8 +3850,8 @@ void
lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ struct lpfc_dmabuf *mp = pmb->ctx_buf;
+ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
/* The driver calls the state machine with the pmb pointer
* but wants to make sure a stale ctx_buf isn't acted on.
@@ -3945,13 +3923,14 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
vport->vpi, mb->mbxStatus);
- if (!(phba->pport->load_flag & FC_UNLOADING))
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
lpfc_workq_post_event(phba, NULL, NULL,
LPFC_EVT_RESET_HBA);
}
+
+ set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
spin_lock_irq(shost->host_lock);
vport->vpi_state &= ~LPFC_VPI_REGISTERED;
- vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
mempool_free(pmb, phba->mbox_mem_pool);
lpfc_cleanup_vports_rrqs(vport, NULL);
@@ -3959,7 +3938,7 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
* This shost reference might have been taken at the beginning of
* lpfc_vport_delete()
*/
- if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport))
+ if (test_bit(FC_UNLOADING, &vport->load_flag) && vport != phba->pport)
scsi_host_put(shost);
}
@@ -4002,9 +3981,8 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
"0912 cmpl_reg_vpi, mb status = 0x%x\n",
mb->mbxStatus);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_FABRIC, &vport->fc_flag);
+ clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag);
vport->fc_myDID = 0;
if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
@@ -4017,19 +3995,17 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
goto out;
}
+ clear_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
spin_lock_irq(shost->host_lock);
vport->vpi_state |= LPFC_VPI_REGISTERED;
- vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
vport->num_disc_nodes = 0;
/* go thru NPR list and issue ELS PLOGIs */
- if (vport->fc_npr_cnt)
+ if (atomic_read(&vport->fc_npr_cnt))
lpfc_els_disc_plogi(vport);
if (!vport->num_disc_nodes) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_NDISC_ACTIVE;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag);
lpfc_can_disctmo(vport);
}
vport->port_state = LPFC_VPORT_READY;
@@ -4089,7 +4065,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
* the dump routine is a single-use construct.
*/
if (pmb->ctx_buf) {
- mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
+ mp = pmb->ctx_buf;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
pmb->ctx_buf = NULL;
@@ -4112,7 +4088,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
if (phba->sli_rev == LPFC_SLI_REV4) {
byte_count = pmb->u.mqe.un.mb_words[5];
- mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
+ mp = pmb->ctx_buf;
if (byte_count > sizeof(struct static_vport_info) -
offset)
byte_count = sizeof(struct static_vport_info)
@@ -4192,8 +4168,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
- struct Scsi_Host *shost;
+ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
pmb->ctx_ndlp = NULL;
@@ -4232,14 +4207,8 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
/* when physical port receive logo donot start
* vport discovery */
- if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
+ if (!test_and_clear_bit(FC_LOGO_RCVD_DID_CHNG, &vport->fc_flag))
lpfc_start_fdiscs(phba);
- else {
- shost = lpfc_shost_from_vport(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
- spin_unlock_irq(shost->host_lock);
- }
lpfc_do_scr_ns_plogi(phba, vport);
}
@@ -4337,7 +4306,7 @@ void
lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
struct lpfc_vport *vport = pmb->vport;
int rc;
@@ -4461,7 +4430,7 @@ lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
pmb->ctx_ndlp = NULL;
if (mb->mbxStatus) {
@@ -4520,7 +4489,7 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
/* Don't add the remote port if unloading. */
- if (vport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &vport->load_flag))
return;
ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
@@ -4600,40 +4569,35 @@ lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
static void
lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- unsigned long iflags;
-
- spin_lock_irqsave(shost->host_lock, iflags);
switch (state) {
case NLP_STE_UNUSED_NODE:
- vport->fc_unused_cnt += count;
+ atomic_add(count, &vport->fc_unused_cnt);
break;
case NLP_STE_PLOGI_ISSUE:
- vport->fc_plogi_cnt += count;
+ atomic_add(count, &vport->fc_plogi_cnt);
break;
case NLP_STE_ADISC_ISSUE:
- vport->fc_adisc_cnt += count;
+ atomic_add(count, &vport->fc_adisc_cnt);
break;
case NLP_STE_REG_LOGIN_ISSUE:
- vport->fc_reglogin_cnt += count;
+ atomic_add(count, &vport->fc_reglogin_cnt);
break;
case NLP_STE_PRLI_ISSUE:
- vport->fc_prli_cnt += count;
+ atomic_add(count, &vport->fc_prli_cnt);
break;
case NLP_STE_UNMAPPED_NODE:
- vport->fc_unmap_cnt += count;
+ atomic_add(count, &vport->fc_unmap_cnt);
break;
case NLP_STE_MAPPED_NODE:
- vport->fc_map_cnt += count;
+ atomic_add(count, &vport->fc_map_cnt);
break;
case NLP_STE_NPR_NODE:
- if (vport->fc_npr_cnt == 0 && count == -1)
- vport->fc_npr_cnt = 0;
+ if (!atomic_read(&vport->fc_npr_cnt) && count == -1)
+ atomic_set(&vport->fc_npr_cnt, 0);
else
- vport->fc_npr_cnt += count;
+ atomic_add(count, &vport->fc_npr_cnt);
break;
}
- spin_unlock_irqrestore(shost->host_lock, iflags);
}
/* Register a node with backend if not already done */
@@ -4865,10 +4829,10 @@ void
lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int state)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
int old_state = ndlp->nlp_state;
int node_dropped = ndlp->nlp_flag & NLP_DROPPED;
char name1[16], name2[16];
+ unsigned long iflags;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0904 NPort state transition x%06x, %s -> %s\n",
@@ -4895,9 +4859,9 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
if (list_empty(&ndlp->nlp_listp)) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
} else if (old_state)
lpfc_nlp_counters(vport, old_state, -1);
@@ -4909,26 +4873,26 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void
lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ unsigned long iflags;
if (list_empty(&ndlp->nlp_listp)) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
}
}
void
lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ unsigned long iflags;
lpfc_cancel_retry_delay_tmo(vport, ndlp);
if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_del_init(&ndlp->nlp_listp);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
NLP_STE_UNUSED_NODE);
}
@@ -5003,7 +4967,6 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
void
lpfc_set_disctmo(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
uint32_t tmo;
@@ -5025,17 +4988,16 @@ lpfc_set_disctmo(struct lpfc_vport *vport)
}
mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_DISC_TMO;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_DISC_TMO, &vport->fc_flag);
/* Start Discovery Timer state <hba_state> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0247 Start Discovery Timer state x%x "
"Data: x%x x%lx x%x x%x\n",
vport->port_state, tmo,
- (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
- vport->fc_adisc_cnt);
+ (unsigned long)&vport->fc_disctmo,
+ atomic_read(&vport->fc_plogi_cnt),
+ atomic_read(&vport->fc_adisc_cnt));
return;
}
@@ -5046,7 +5008,6 @@ lpfc_set_disctmo(struct lpfc_vport *vport)
int
lpfc_can_disctmo(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
unsigned long iflags;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
@@ -5054,11 +5015,9 @@ lpfc_can_disctmo(struct lpfc_vport *vport)
vport->port_state, vport->fc_ns_retry, vport->fc_flag);
/* Turn off discovery timer if its running */
- if (vport->fc_flag & FC_DISC_TMO ||
+ if (test_bit(FC_DISC_TMO, &vport->fc_flag) ||
timer_pending(&vport->fc_disctmo)) {
- spin_lock_irqsave(shost->host_lock, iflags);
- vport->fc_flag &= ~FC_DISC_TMO;
- spin_unlock_irqrestore(shost->host_lock, iflags);
+ clear_bit(FC_DISC_TMO, &vport->fc_flag);
del_timer_sync(&vport->fc_disctmo);
spin_lock_irqsave(&vport->work_port_lock, iflags);
vport->work_port_events &= ~WORKER_DISC_TMO;
@@ -5068,9 +5027,10 @@ lpfc_can_disctmo(struct lpfc_vport *vport)
/* Cancel Discovery Timer state <hba_state> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0248 Cancel Discovery Timer state x%x "
- "Data: x%x x%x x%x\n",
+ "Data: x%lx x%x x%x\n",
vport->port_state, vport->fc_flag,
- vport->fc_plogi_cnt, vport->fc_adisc_cnt);
+ atomic_read(&vport->fc_plogi_cnt),
+ atomic_read(&vport->fc_adisc_cnt));
return 0;
}
@@ -5213,7 +5173,7 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct lpfc_vport *vport = pmb->vport;
struct lpfc_nodelist *ndlp;
- ndlp = (struct lpfc_nodelist *)(pmb->ctx_ndlp);
+ ndlp = pmb->ctx_ndlp;
if (!ndlp)
return;
lpfc_issue_els_logo(vport, ndlp, 0);
@@ -5274,13 +5234,13 @@ lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
} else if (phba->sli_rev == LPFC_SLI_REV4 &&
- (!(vport->load_flag & FC_UNLOADING)) &&
+ !test_bit(FC_UNLOADING, &vport->load_flag) &&
(bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
LPFC_SLI_INTF_IF_TYPE_2) &&
(kref_read(&ndlp->kref) > 0)) {
mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr;
} else {
- if (vport->load_flag & FC_UNLOADING) {
+ if (test_bit(FC_UNLOADING, &vport->load_flag)) {
if (phba->sli_rev == LPFC_SLI_REV4) {
spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->nlp_flag |= NLP_RELEASE_RPI;
@@ -5356,7 +5316,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
acc_plogi = 0;
if (((ndlp->nlp_DID & Fabric_DID_MASK) !=
Fabric_DID_MASK) &&
- (!(vport->fc_flag & FC_OFFLINE_MODE)))
+ (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag)))
ndlp->nlp_flag |= NLP_UNREG_INP;
lpfc_printf_vlog(vport, KERN_INFO,
@@ -5388,7 +5348,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* will issue a LOGO here and keep the rpi alive if
* not unloading.
*/
- if (!(vport->load_flag & FC_UNLOADING)) {
+ if (!test_bit(FC_UNLOADING, &vport->load_flag)) {
ndlp->nlp_flag &= ~NLP_UNREG_INP;
lpfc_issue_els_logo(vport, ndlp, 0);
ndlp->nlp_prev_state = ndlp->nlp_state;
@@ -5424,8 +5384,8 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
{
struct lpfc_vport **vports;
struct lpfc_nodelist *ndlp;
- struct Scsi_Host *shost;
int i;
+ unsigned long iflags;
vports = lpfc_create_vport_work_array(phba);
if (!vports) {
@@ -5434,17 +5394,18 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
return;
}
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- shost = lpfc_shost_from_vport(vports[i]);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vports[i]->fc_nodes_list_lock, iflags);
list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
/* The mempool_alloc might sleep */
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock,
+ iflags);
lpfc_unreg_rpi(vports[i], ndlp);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vports[i]->fc_nodes_list_lock,
+ iflags);
}
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock, iflags);
}
lpfc_destroy_vport_work_array(phba, vports);
}
@@ -5534,7 +5495,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if ((mb = phba->sli.mbox_active)) {
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
!(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
- (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
+ (ndlp == mb->ctx_ndlp)) {
mb->ctx_ndlp = NULL;
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
}
@@ -5545,7 +5506,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
(mb->mbox_flag & LPFC_MBX_IMED_UNREG) ||
- (ndlp != (struct lpfc_nodelist *)mb->ctx_ndlp))
+ (ndlp != mb->ctx_ndlp))
continue;
mb->ctx_ndlp = NULL;
@@ -5555,7 +5516,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
!(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
- (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
+ (ndlp == mb->ctx_ndlp)) {
list_del(&mb->list);
lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_LOCKED);
@@ -5686,12 +5647,11 @@ lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
struct lpfc_nodelist *
lpfc_findnode_mapped(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp;
uint32_t data1;
unsigned long iflags;
- spin_lock_irqsave(shost->host_lock, iflags);
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
@@ -5700,7 +5660,8 @@ lpfc_findnode_mapped(struct lpfc_vport *vport)
((uint32_t)ndlp->nlp_xri << 16) |
((uint32_t)ndlp->nlp_type << 8) |
((uint32_t)ndlp->nlp_rpi & 0xff));
- spin_unlock_irqrestore(shost->host_lock, iflags);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock,
+ iflags);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE,
"2025 FIND node DID MAPPED "
"Data: x%px x%x x%x x%x x%px\n",
@@ -5710,7 +5671,7 @@ lpfc_findnode_mapped(struct lpfc_vport *vport)
return ndlp;
}
}
- spin_unlock_irqrestore(shost->host_lock, iflags);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
/* FIND node did <did> NOT FOUND */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
@@ -5727,7 +5688,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
if (!ndlp) {
if (vport->phba->nvmet_support)
return NULL;
- if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag) &&
lpfc_rscn_payload_check(vport, did) == 0)
return NULL;
ndlp = lpfc_nlp_init(vport, did);
@@ -5737,7 +5698,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"6453 Setup New Node 2B_DISC x%x "
- "Data:x%x x%x x%x\n",
+ "Data:x%x x%x x%lx\n",
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, vport->fc_flag);
@@ -5751,8 +5712,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
* The goal is to allow the target to reset its state and clear
* pending IO in preparation for the initiator to recover.
*/
- if ((vport->fc_flag & FC_RSCN_MODE) &&
- !(vport->fc_flag & FC_NDISC_ACTIVE)) {
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag) &&
+ !test_bit(FC_NDISC_ACTIVE, &vport->fc_flag)) {
if (lpfc_rscn_payload_check(vport, did)) {
/* Since this node is marked for discovery,
@@ -5762,7 +5723,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"6455 Setup RSCN Node 2B_DISC x%x "
- "Data:x%x x%x x%x\n",
+ "Data:x%x x%x x%lx\n",
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, vport->fc_flag);
@@ -5774,14 +5735,6 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
if (vport->phba->nvmet_support)
return ndlp;
- /* If we've already received a PLOGI from this NPort
- * we don't need to try to discover it again.
- */
- if (ndlp->nlp_flag & NLP_RCV_PLOGI &&
- !(ndlp->nlp_type &
- (NLP_FCP_TARGET | NLP_NVME_TARGET)))
- return NULL;
-
if (ndlp->nlp_state > NLP_STE_UNUSED_NODE &&
ndlp->nlp_state < NLP_STE_PRLI_ISSUE) {
lpfc_disc_state_machine(vport, ndlp, NULL,
@@ -5794,7 +5747,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
} else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"6456 Skip Setup RSCN Node x%x "
- "Data:x%x x%x x%x\n",
+ "Data:x%x x%x x%lx\n",
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, vport->fc_flag);
ndlp = NULL;
@@ -5802,7 +5755,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
} else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"6457 Setup Active Node 2B_DISC x%x "
- "Data:x%x x%x x%x\n",
+ "Data:x%x x%x x%lx\n",
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, vport->fc_flag);
@@ -5930,7 +5883,6 @@ lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
void
lpfc_disc_start(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
uint32_t num_sent;
uint32_t clear_la_pending;
@@ -5958,9 +5910,11 @@ lpfc_disc_start(struct lpfc_vport *vport)
/* Start Discovery state <hba_state> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0202 Start Discovery port state x%x "
- "flg x%x Data: x%x x%x x%x\n",
- vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
- vport->fc_adisc_cnt, vport->fc_npr_cnt);
+ "flg x%lx Data: x%x x%x x%x\n",
+ vport->port_state, vport->fc_flag,
+ atomic_read(&vport->fc_plogi_cnt),
+ atomic_read(&vport->fc_adisc_cnt),
+ atomic_read(&vport->fc_npr_cnt));
/* First do ADISCs - if any */
num_sent = lpfc_els_disc_adisc(vport);
@@ -5970,8 +5924,8 @@ lpfc_disc_start(struct lpfc_vport *vport)
/* Register the VPI for SLI3, NPIV only. */
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
- !(vport->fc_flag & FC_PT2PT) &&
- !(vport->fc_flag & FC_RSCN_MODE) &&
+ !test_bit(FC_PT2PT, &vport->fc_flag) &&
+ !test_bit(FC_RSCN_MODE, &vport->fc_flag) &&
(phba->sli_rev < LPFC_SLI_REV4)) {
lpfc_issue_clear_la(phba, vport);
lpfc_issue_reg_vpi(phba, vport);
@@ -5986,16 +5940,14 @@ lpfc_disc_start(struct lpfc_vport *vport)
/* If we get here, there is nothing to ADISC */
lpfc_issue_clear_la(phba, vport);
- if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
+ if (!test_bit(FC_ABORT_DISCOVERY, &vport->fc_flag)) {
vport->num_disc_nodes = 0;
/* go thru NPR nodes and issue ELS PLOGIs */
- if (vport->fc_npr_cnt)
+ if (atomic_read(&vport->fc_npr_cnt))
lpfc_els_disc_plogi(vport);
if (!vport->num_disc_nodes) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_NDISC_ACTIVE;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag);
lpfc_can_disctmo(vport);
}
}
@@ -6007,18 +5959,17 @@ lpfc_disc_start(struct lpfc_vport *vport)
if (num_sent)
return;
- if (vport->fc_flag & FC_RSCN_MODE) {
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) {
/* Check to see if more RSCNs came in while we
* were processing this one.
*/
- if ((vport->fc_rscn_id_cnt == 0) &&
- (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_RSCN_MODE;
- spin_unlock_irq(shost->host_lock);
+ if (vport->fc_rscn_id_cnt == 0 &&
+ !test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag)) {
+ clear_bit(FC_RSCN_MODE, &vport->fc_flag);
lpfc_can_disctmo(vport);
- } else
+ } else {
lpfc_els_handle_rscn(vport);
+ }
}
}
return;
@@ -6085,7 +6036,8 @@ lpfc_disc_flush_list(struct lpfc_vport *vport)
struct lpfc_nodelist *ndlp, *next_ndlp;
struct lpfc_hba *phba = vport->phba;
- if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
+ if (atomic_read(&vport->fc_plogi_cnt) ||
+ atomic_read(&vport->fc_adisc_cnt)) {
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
nlp_listp) {
if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
@@ -6166,20 +6118,15 @@ lpfc_disc_timeout(struct timer_list *t)
static void
lpfc_disc_timeout_handler(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli *psli = &phba->sli;
struct lpfc_nodelist *ndlp, *next_ndlp;
LPFC_MBOXQ_t *initlinkmbox;
int rc, clrlaerr = 0;
- if (!(vport->fc_flag & FC_DISC_TMO))
+ if (!test_and_clear_bit(FC_DISC_TMO, &vport->fc_flag))
return;
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_DISC_TMO;
- spin_unlock_irq(shost->host_lock);
-
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"disc timeout: state:x%x rtry:x%x flg:x%x",
vport->port_state, vport->fc_ns_retry, vport->fc_flag);
@@ -6333,7 +6280,7 @@ restart_disc:
break;
case LPFC_VPORT_READY:
- if (vport->fc_flag & FC_RSCN_MODE) {
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_TRACE_EVENT,
"0231 RSCN timeout Data: x%x "
@@ -6409,7 +6356,7 @@ void
lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
struct lpfc_vport *vport = pmb->vport;
pmb->ctx_ndlp = NULL;
@@ -6750,7 +6697,7 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
struct lpfc_vport **vports;
int i, ret = 0;
struct lpfc_nodelist *ndlp;
- struct Scsi_Host *shost;
+ unsigned long iflags;
vports = lpfc_create_vport_work_array(phba);
@@ -6759,24 +6706,23 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
return 1;
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- shost = lpfc_shost_from_vport(vports[i]);
- spin_lock_irq(shost->host_lock);
/*
* IF the CVL_RCVD bit is not set then we have sent the
* flogi.
* If dev_loss fires while we are waiting we do not want to
* unreg the fcf.
*/
- if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
- spin_unlock_irq(shost->host_lock);
+ if (!test_bit(FC_VPORT_CVL_RCVD, &vports[i]->fc_flag)) {
ret = 1;
goto out;
}
+ spin_lock_irqsave(&vports[i]->fc_nodes_list_lock, iflags);
list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
if (ndlp->rport &&
(ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
ret = 1;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock,
+ iflags);
goto out;
} else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
ret = 1;
@@ -6788,7 +6734,7 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
ndlp->nlp_flag);
}
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock, iflags);
}
out:
lpfc_destroy_vport_work_array(phba, vports);
@@ -6806,7 +6752,6 @@ void
lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
struct lpfc_vport *vport = mboxq->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if (mboxq->u.mb.mbxStatus) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -6814,9 +6759,7 @@ lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
"HBA state x%x\n",
mboxq->u.mb.mbxStatus, vport->port_state);
}
- spin_lock_irq(shost->host_lock);
- phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_VFI_REGISTERED, &phba->pport->fc_flag);
mempool_free(mboxq, phba->mbox_mem_pool);
return;
}
@@ -6880,9 +6823,9 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
lpfc_mbx_unreg_vpi(vports[i]);
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
- vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
spin_unlock_irq(shost->host_lock);
+ set_bit(FC_VPORT_NEEDS_INIT_VPI, &vports[i]->fc_flag);
}
lpfc_destroy_vport_work_array(phba, vports);
if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) {
@@ -6895,9 +6838,9 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
lpfc_mbx_unreg_vpi(phba->pport);
shost = lpfc_shost_from_vport(phba->pport);
spin_lock_irq(shost->host_lock);
- phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED;
spin_unlock_irq(shost->host_lock);
+ set_bit(FC_VPORT_NEEDS_INIT_VPI, &phba->pport->fc_flag);
}
/* Cleanup any outstanding ELS commands */
@@ -6981,8 +6924,8 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
* If driver is not unloading, check if there is any other
* FCF record that can be used for discovery.
*/
- if ((phba->pport->load_flag & FC_UNLOADING) ||
- (phba->link_state < LPFC_LINK_UP))
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag) ||
+ phba->link_state < LPFC_LINK_UP)
return;
/* This is considered as the initial FCF discovery scan */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 5d4f9f27084d6..367e6b066d42f 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -4069,7 +4069,6 @@ struct lpfc_mcqe {
#define LPFC_TRAILER_CODE_GRP5 0x5
#define LPFC_TRAILER_CODE_FC 0x10
#define LPFC_TRAILER_CODE_SLI 0x11
-#define LPFC_TRAILER_CODE_CMSTAT 0x13
};
struct lpfc_acqe_link {
@@ -4339,6 +4338,7 @@ struct lpfc_acqe_sli {
#define LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE 0x10
#define LPFC_SLI_EVENT_TYPE_CGN_SIGNAL 0x11
#define LPFC_SLI_EVENT_TYPE_RD_SIGNAL 0x12
+#define LPFC_SLI_EVENT_TYPE_RESET_CM_STATS 0x13
};
/*
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 70bcee64bc8c6..f7a0aa3625f4e 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -94,6 +94,7 @@ static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
+static void lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba);
static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba);
static struct scsi_transport_template *lpfc_transport_template = NULL;
@@ -459,7 +460,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
return -EIO;
}
- mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
+ mp = pmb->ctx_buf;
/* This dmabuf was allocated by lpfc_read_sparam. The dmabuf is no
* longer needed. Prevent unintended ctx_buf access as the mbox is
@@ -891,7 +892,7 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
readl(phba->HCregaddr); /* flush */
}
- if (phba->pport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
lpfc_cleanup_discovery_resources(phba->pport);
else {
vports = lpfc_create_vport_work_array(phba);
@@ -1231,13 +1232,13 @@ lpfc_rrq_timeout(struct timer_list *t)
phba = from_timer(phba, t, rrq_tmr);
spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
- if (!(phba->pport->load_flag & FC_UNLOADING))
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
phba->hba_flag |= HBA_RRQ_ACTIVE;
else
phba->hba_flag &= ~HBA_RRQ_ACTIVE;
spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
- if (!(phba->pport->load_flag & FC_UNLOADING))
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
lpfc_worker_wake_up(phba);
}
@@ -1268,9 +1269,9 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
/* Check and reset heart-beat timer if necessary */
mempool_free(pmboxq, phba->mbox_mem_pool);
- if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
- !(phba->link_state == LPFC_HBA_ERROR) &&
- !(phba->pport->load_flag & FC_UNLOADING))
+ if (!test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag) &&
+ !(phba->link_state == LPFC_HBA_ERROR) &&
+ !test_bit(FC_UNLOADING, &phba->pport->load_flag))
mod_timer(&phba->hb_tmofunc,
jiffies +
msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
@@ -1297,11 +1298,11 @@ lpfc_idle_stat_delay_work(struct work_struct *work)
u32 i, idle_percent;
u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
- if (phba->pport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
return;
if (phba->link_state == LPFC_HBA_ERROR ||
- phba->pport->fc_flag & FC_OFFLINE_MODE ||
+ test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag) ||
phba->cmf_active_mode != LPFC_CFG_OFF)
goto requeue;
@@ -1358,11 +1359,12 @@ lpfc_hb_eq_delay_work(struct work_struct *work)
uint32_t usdelay;
int i;
- if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
+ if (!phba->cfg_auto_imax ||
+ test_bit(FC_UNLOADING, &phba->pport->load_flag))
return;
if (phba->link_state == LPFC_HBA_ERROR ||
- phba->pport->fc_flag & FC_OFFLINE_MODE)
+ test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
goto requeue;
ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
@@ -1533,9 +1535,9 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
}
lpfc_destroy_vport_work_array(phba, vports);
- if ((phba->link_state == LPFC_HBA_ERROR) ||
- (phba->pport->load_flag & FC_UNLOADING) ||
- (phba->pport->fc_flag & FC_OFFLINE_MODE))
+ if (phba->link_state == LPFC_HBA_ERROR ||
+ test_bit(FC_UNLOADING, &phba->pport->load_flag) ||
+ test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
return;
if (phba->elsbuf_cnt &&
@@ -1736,7 +1738,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
break;
}
/* If driver is unloading let the worker thread continue */
- if (phba->pport->load_flag & FC_UNLOADING) {
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
phba->work_hs = 0;
break;
}
@@ -1747,7 +1749,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
* first write to the host attention register clear the
* host status register.
*/
- if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
+ if (!phba->work_hs && !test_bit(FC_UNLOADING, &phba->pport->load_flag))
phba->work_hs = old_host_status & ~HS_FFER1;
spin_lock_irq(&phba->hbalock);
@@ -2215,7 +2217,7 @@ lpfc_handle_latt(struct lpfc_hba *phba)
/* Cleanup any outstanding ELS commands */
lpfc_els_flush_all_cmd(phba);
psli->slistat.link_event++;
- lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
+ lpfc_read_topology(phba, pmb, pmb->ctx_buf);
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
pmb->vport = vport;
/* Block ELS IOCBs until we have processed this mbox command */
@@ -3085,7 +3087,7 @@ lpfc_cleanup(struct lpfc_vport *vport)
* The flush here is only when the pci slot
* is offline.
*/
- if (vport->load_flag & FC_UNLOADING &&
+ if (test_bit(FC_UNLOADING, &vport->load_flag) &&
pci_channel_offline(phba->pcidev))
lpfc_sli_flush_io_rings(vport->phba);
@@ -3411,7 +3413,7 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba)
return;
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- if (vports[i]->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &vports[i]->load_flag))
continue;
list_for_each_entry_safe(ndlp, next_ndlp,
@@ -3611,7 +3613,7 @@ static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
lpfc_destroy_expedite_pool(phba);
- if (!(phba->pport->load_flag & FC_UNLOADING))
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
lpfc_sli_flush_io_rings(phba);
hwq_count = phba->cfg_hdw_queue;
@@ -3697,7 +3699,7 @@ lpfc_online(struct lpfc_hba *phba)
return 0;
vport = phba->pport;
- if (!(vport->fc_flag & FC_OFFLINE_MODE))
+ if (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag))
return 0;
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -3737,20 +3739,18 @@ lpfc_online(struct lpfc_hba *phba)
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL) {
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- struct Scsi_Host *shost;
- shost = lpfc_shost_from_vport(vports[i]);
- spin_lock_irq(shost->host_lock);
- vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
+ clear_bit(FC_OFFLINE_MODE, &vports[i]->fc_flag);
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
- vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ set_bit(FC_VPORT_NEEDS_REG_VPI,
+ &vports[i]->fc_flag);
if (phba->sli_rev == LPFC_SLI_REV4) {
- vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+ set_bit(FC_VPORT_NEEDS_INIT_VPI,
+ &vports[i]->fc_flag);
if ((vpis_cleared) &&
(vports[i]->port_type !=
LPFC_PHYSICAL_PORT))
vports[i]->vpi = 0;
}
- spin_unlock_irq(shost->host_lock);
}
}
lpfc_destroy_vport_work_array(phba, vports);
@@ -3805,7 +3805,7 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
int offline;
bool hba_pci_err;
- if (vport->fc_flag & FC_OFFLINE_MODE)
+ if (test_bit(FC_OFFLINE_MODE, &vport->fc_flag))
return;
lpfc_block_mgmt_io(phba, mbx_action);
@@ -3819,16 +3819,15 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL) {
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- if (vports[i]->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &vports[i]->load_flag))
continue;
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
- vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
- vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
spin_unlock_irq(shost->host_lock);
+ set_bit(FC_VPORT_NEEDS_REG_VPI, &vports[i]->fc_flag);
+ clear_bit(FC_VFI_REGISTERED, &vports[i]->fc_flag);
- shost = lpfc_shost_from_vport(vports[i]);
list_for_each_entry_safe(ndlp, next_ndlp,
&vports[i]->fc_nodes,
nlp_listp) {
@@ -3910,7 +3909,7 @@ lpfc_offline(struct lpfc_hba *phba)
struct lpfc_vport **vports;
int i;
- if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+ if (test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
return;
/* stop port and all timers associated with this hba */
@@ -3941,14 +3940,14 @@ lpfc_offline(struct lpfc_hba *phba)
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
vports[i]->work_port_events = 0;
- vports[i]->fc_flag |= FC_OFFLINE_MODE;
spin_unlock_irq(shost->host_lock);
+ set_bit(FC_OFFLINE_MODE, &vports[i]->fc_flag);
}
lpfc_destroy_vport_work_array(phba, vports);
/* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled
* in hba_unset
*/
- if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+ if (test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
__lpfc_cpuhp_remove(phba);
if (phba->cfg_xri_rebalancing)
@@ -4766,9 +4765,17 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
vport = (struct lpfc_vport *) shost->hostdata;
vport->phba = phba;
- vport->load_flag |= FC_LOADING;
- vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ set_bit(FC_LOADING, &vport->load_flag);
+ set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
vport->fc_rscn_flush = 0;
+ atomic_set(&vport->fc_plogi_cnt, 0);
+ atomic_set(&vport->fc_adisc_cnt, 0);
+ atomic_set(&vport->fc_reglogin_cnt, 0);
+ atomic_set(&vport->fc_prli_cnt, 0);
+ atomic_set(&vport->fc_unmap_cnt, 0);
+ atomic_set(&vport->fc_map_cnt, 0);
+ atomic_set(&vport->fc_npr_cnt, 0);
+ atomic_set(&vport->fc_unused_cnt, 0);
lpfc_get_vport_cfgparam(vport);
/* Adjust value in vport */
@@ -4824,6 +4831,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
/* Initialize all internally managed lists. */
INIT_LIST_HEAD(&vport->fc_nodes);
+ spin_lock_init(&vport->fc_nodes_list_lock);
INIT_LIST_HEAD(&vport->rcv_buffer_list);
spin_lock_init(&vport->work_port_lock);
@@ -4921,7 +4929,7 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
spin_lock_irq(shost->host_lock);
- if (vport->load_flag & FC_UNLOADING) {
+ if (test_bit(FC_UNLOADING, &vport->load_flag)) {
stat = 1;
goto finished;
}
@@ -4945,7 +4953,8 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
goto finished;
if (vport->num_disc_nodes || vport->fc_prli_sent)
goto finished;
- if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
+ if (!atomic_read(&vport->fc_map_cnt) &&
+ time < msecs_to_jiffies(2 * 1000))
goto finished;
if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
goto finished;
@@ -5034,9 +5043,7 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
fc_host_active_fc4s(shost)[7] = 1;
fc_host_max_npiv_vports(shost) = phba->max_vpi;
- spin_lock_irq(shost->host_lock);
- vport->load_flag &= ~FC_LOADING;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_LOADING, &vport->load_flag);
}
/**
@@ -5172,7 +5179,7 @@ lpfc_vmid_poll(struct timer_list *t)
/* Is the vmid inactivity timer enabled */
if (phba->pport->vmid_inactivity_timeout ||
- phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) {
+ test_bit(FC_DEREGISTER_ALL_APP_ID, &phba->pport->load_flag)) {
wake_up = 1;
phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID;
}
@@ -5447,7 +5454,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
phba->sli.slistat.link_event++;
/* Create lpfc_handle_latt mailbox command from link ACQE */
- lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
+ lpfc_read_topology(phba, pmb, pmb->ctx_buf);
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
pmb->vport = phba->pport;
@@ -6340,7 +6347,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
phba->sli.slistat.link_event++;
/* Create lpfc_handle_latt mailbox command from link ACQE */
- lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
+ lpfc_read_topology(phba, pmb, pmb->ctx_buf);
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
pmb->vport = phba->pport;
@@ -6636,6 +6643,11 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
acqe_sli->event_data1, acqe_sli->event_data2,
acqe_sli->event_data3);
break;
+ case LPFC_SLI_EVENT_TYPE_RESET_CM_STATS:
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "2905 Reset CM statistics\n");
+ lpfc_sli4_async_cmstat_evt(phba);
+ break;
default:
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"3193 Unrecognized SLI event, type: 0x%x",
@@ -6689,9 +6701,7 @@ lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
return NULL;
lpfc_linkdown_port(vport);
lpfc_cleanup_pending_mbox(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_VPORT_CVL_RCVD;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_VPORT_CVL_RCVD, &vport->fc_flag);
return ndlp;
}
@@ -6888,9 +6898,9 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
if (vports) {
for (i = 0; i <= phba->max_vports && vports[i] != NULL;
i++) {
- if ((!(vports[i]->fc_flag &
- FC_VPORT_CVL_RCVD)) &&
- (vports[i]->port_state > LPFC_FDISC)) {
+ if (!test_bit(FC_VPORT_CVL_RCVD,
+ &vports[i]->fc_flag) &&
+ vports[i]->port_state > LPFC_FDISC) {
active_vlink_present = 1;
break;
}
@@ -6903,8 +6913,8 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
* If we are here first then vport_delete is going to wait
* for discovery to complete.
*/
- if (!(vport->load_flag & FC_UNLOADING) &&
- active_vlink_present) {
+ if (!test_bit(FC_UNLOADING, &vport->load_flag) &&
+ active_vlink_present) {
/*
* If there are other active VLinks present,
* re-instantiate the Vlink using FDISC.
@@ -7346,9 +7356,6 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
case LPFC_TRAILER_CODE_SLI:
lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
break;
- case LPFC_TRAILER_CODE_CMSTAT:
- lpfc_sli4_async_cmstat_evt(phba);
- break;
default:
lpfc_printf_log(phba, KERN_ERR,
LOG_TRACE_EVENT,
@@ -7698,6 +7705,9 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
"NVME" : " "),
(phba->nvmet_support ? "NVMET" : " "));
+ /* ras_fwlog state */
+ spin_lock_init(&phba->ras_fwlog_lock);
+
/* Initialize the IO buffer list used by driver for SLI3 SCSI */
spin_lock_init(&phba->scsi_buf_list_get_lock);
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
@@ -9085,7 +9095,7 @@ lpfc_setup_fdmi_mask(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
- vport->load_flag |= FC_ALLOW_FDMI;
+ set_bit(FC_ALLOW_FDMI, &vport->load_flag);
if (phba->cfg_enable_SmartSAN ||
phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) {
/* Setup appropriate attribute masks */
@@ -12771,7 +12781,8 @@ static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
{
- if (phba->pport && (phba->pport->fc_flag & FC_OFFLINE_MODE))
+ if (phba->pport &&
+ test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
return;
__lpfc_cpuhp_remove(phba);
@@ -12796,7 +12807,7 @@ static void lpfc_cpuhp_add(struct lpfc_hba *phba)
static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
{
- if (phba->pport->load_flag & FC_UNLOADING) {
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
*retval = -EAGAIN;
return true;
}
@@ -13047,7 +13058,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
rc = request_threaded_irq(eqhdl->irq,
&lpfc_sli4_hba_intr_handler,
&lpfc_sli4_hba_intr_handler_th,
- IRQF_ONESHOT, name, eqhdl);
+ 0, name, eqhdl);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0486 MSI-X fast-path (%d) "
@@ -13316,12 +13327,7 @@ lpfc_sli4_disable_intr(struct lpfc_hba *phba)
static void
lpfc_unset_hba(struct lpfc_hba *phba)
{
- struct lpfc_vport *vport = phba->pport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
- spin_lock_irq(shost->host_lock);
- vport->load_flag |= FC_UNLOADING;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_UNLOADING, &phba->pport->load_flag);
kfree(phba->vpi_bmask);
kfree(phba->vpi_ids);
@@ -14113,9 +14119,7 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
struct lpfc_hba *phba = vport->phba;
int i;
- spin_lock_irq(&phba->hbalock);
- vport->load_flag |= FC_UNLOADING;
- spin_unlock_irq(&phba->hbalock);
+ set_bit(FC_UNLOADING, &vport->load_flag);
lpfc_free_sysfs_attr(vport);
@@ -14958,9 +14962,7 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
int i;
/* Mark the device unloading flag */
- spin_lock_irq(&phba->hbalock);
- vport->load_flag |= FC_UNLOADING;
- spin_unlock_irq(&phba->hbalock);
+ set_bit(FC_UNLOADING, &vport->load_flag);
if (phba->cgn_i)
lpfc_unreg_congestion_buf(phba);
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index cadcd16494e19..e98f1c2b22202 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -102,7 +102,7 @@ lpfc_mbox_rsrc_cleanup(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
{
struct lpfc_dmabuf *mp;
- mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
+ mp = mbox->ctx_buf;
mbox->ctx_buf = NULL;
/* Release the generic BPL buffer memory. */
@@ -204,10 +204,8 @@ lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset,
uint16_t region_id)
{
MAILBOX_t *mb;
- void *ctx;
mb = &pmb->u.mb;
- ctx = pmb->ctx_buf;
/* Setup to dump VPD region */
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
@@ -219,7 +217,6 @@ lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset,
mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t));
mb->un.varDmp.co = 0;
mb->un.varDmp.resp_offset = 0;
- pmb->ctx_buf = ctx;
mb->mbxOwner = OWN_HOST;
return;
}
@@ -236,11 +233,8 @@ void
lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb;
- void *ctx;
mb = &pmb->u.mb;
- /* Save context so that we can restore after memset */
- ctx = pmb->ctx_buf;
/* Setup to dump VPD region */
memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
@@ -254,7 +248,6 @@ lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE;
mb->un.varDmp.co = 0;
mb->un.varDmp.resp_offset = 0;
- pmb->ctx_buf = ctx;
return;
}
@@ -372,7 +365,7 @@ lpfc_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
/* Save address for later completion and set the owner to host so that
* the FW knows this mailbox is available for processing.
*/
- pmb->ctx_buf = (uint8_t *)mp;
+ pmb->ctx_buf = mp;
mb->mbxOwner = OWN_HOST;
return (0);
}
@@ -949,7 +942,7 @@ lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
* Set the re-reg VPI bit for f/w to update the MAC address.
*/
if ((phba->sli_rev == LPFC_SLI_REV4) &&
- !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI))
+ !test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag))
mb->un.varRegVpi.upd = 1;
mb->un.varRegVpi.vpi = phba->vpi_ids[vport->vpi];
@@ -1816,7 +1809,7 @@ lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
}
/* Reinitialize the context pointers to avoid stale usage. */
mbox->ctx_buf = NULL;
- mbox->context3 = NULL;
+ memset(&mbox->ctx_u, 0, sizeof(mbox->ctx_u));
kfree(mbox->sge_array);
/* Finally, free the mailbox command itself */
mempool_free(mbox, phba->mbox_mem_pool);
@@ -2244,7 +2237,7 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
/* Only FC supports upd bit */
if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) &&
- (vport->fc_flag & FC_VFI_REGISTERED) &&
+ test_bit(FC_VFI_REGISTERED, &vport->fc_flag) &&
(!phba->fc_topology_changed))
bf_set(lpfc_reg_vfi_upd, reg_vfi, 1);
@@ -2271,8 +2264,8 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
"3134 Register VFI, mydid:x%x, fcfi:%d, "
- " vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x"
- " port_state:x%x topology chg:%d bbscn_fabric :%d\n",
+ "vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%lx "
+ "port_state:x%x topology chg:%d bbscn_fabric :%d\n",
vport->fc_myDID,
phba->fcf.fcfi,
phba->sli4_hba.vfi_ids[vport->vfi],
@@ -2366,8 +2359,7 @@ lpfc_mbx_cmpl_rdp_link_stat(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
MAILBOX_t *mb;
int rc = FAILURE;
- struct lpfc_rdp_context *rdp_context =
- (struct lpfc_rdp_context *)(mboxq->ctx_ndlp);
+ struct lpfc_rdp_context *rdp_context = mboxq->ctx_u.rdp;
mb = &mboxq->u.mb;
if (mb->mbxStatus)
@@ -2385,9 +2377,8 @@ mbx_failed:
static void
lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
{
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
- struct lpfc_rdp_context *rdp_context =
- (struct lpfc_rdp_context *)(mbox->ctx_ndlp);
+ struct lpfc_dmabuf *mp = mbox->ctx_buf;
+ struct lpfc_rdp_context *rdp_context = mbox->ctx_u.rdp;
if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
goto error_mbox_free;
@@ -2401,7 +2392,7 @@ lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
/* Save the dma buffer for cleanup in the final completion. */
mbox->ctx_buf = mp;
mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_link_stat;
- mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
+ mbox->ctx_u.rdp = rdp_context;
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED)
goto error_mbox_free;
@@ -2416,9 +2407,8 @@ void
lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
{
int rc;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
- struct lpfc_rdp_context *rdp_context =
- (struct lpfc_rdp_context *)(mbox->ctx_ndlp);
+ struct lpfc_dmabuf *mp = mbox->ctx_buf;
+ struct lpfc_rdp_context *rdp_context = mbox->ctx_u.rdp;
if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
goto error;
@@ -2448,7 +2438,7 @@ lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a2;
- mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
+ mbox->ctx_u.rdp = rdp_context;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
goto error;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index d9074929fbab8..c4172791c2675 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -300,7 +300,7 @@ lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox)
int rc;
ndlp = login_mbox->ctx_ndlp;
- save_iocb = login_mbox->context3;
+ save_iocb = login_mbox->ctx_u.save_iocb;
if (mb->mbxStatus == MBX_SUCCESS) {
/* Now that REG_RPI completed successfully,
@@ -382,7 +382,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* PLOGI chkparm OK */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0114 PLOGI chkparm OK Data: x%x x%x x%x "
- "x%x x%x x%x\n",
+ "x%x x%x x%lx\n",
ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
ndlp->nlp_rpi, vport->port_state,
vport->fc_flag);
@@ -434,7 +434,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
if (nlp_portwwn != 0 &&
nlp_portwwn != wwn_to_u64(sp->portName.u.wwn))
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0143 PLOGI recv'd from DID: x%x "
"WWPN changed: old %llx new %llx\n",
ndlp->nlp_DID,
@@ -464,8 +464,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
save_iocb = NULL;
/* Check for Nport to NPort pt2pt protocol */
- if ((vport->fc_flag & FC_PT2PT) &&
- !(vport->fc_flag & FC_PT2PT_PLOGI)) {
+ if (test_bit(FC_PT2PT, &vport->fc_flag) &&
+ !test_bit(FC_PT2PT_PLOGI, &vport->fc_flag)) {
/* rcv'ed PLOGI decides what our NPortId will be */
if (phba->sli_rev == LPFC_SLI_REV4) {
vport->fc_myDID = bf_get(els_rsp64_sid,
@@ -580,7 +580,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* This only applies to a fabric environment.
*/
if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
- (vport->fc_flag & FC_FABRIC)) {
+ test_bit(FC_FABRIC, &vport->fc_flag)) {
/* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp);
}
@@ -640,7 +640,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!login_mbox->ctx_ndlp)
goto out;
- login_mbox->context3 = save_iocb; /* For PLOGI ACC */
+ login_mbox->ctx_u.save_iocb = save_iocb; /* For PLOGI ACC */
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
@@ -682,8 +682,8 @@ lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
struct lpfc_nodelist *ndlp;
uint32_t cmd;
- elsiocb = (struct lpfc_iocbq *)mboxq->ctx_buf;
- ndlp = (struct lpfc_nodelist *)mboxq->ctx_ndlp;
+ elsiocb = mboxq->ctx_u.save_iocb;
+ ndlp = mboxq->ctx_ndlp;
vport = mboxq->vport;
cmd = elsiocb->drvrTimeout;
@@ -748,8 +748,10 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Save the ELS cmd */
elsiocb->drvrTimeout = cmd;
- lpfc_sli4_resume_rpi(ndlp,
- lpfc_mbx_cmpl_resume_rpi, elsiocb);
+ if (lpfc_sli4_resume_rpi(ndlp,
+ lpfc_mbx_cmpl_resume_rpi,
+ elsiocb))
+ kfree(elsiocb);
goto out;
}
}
@@ -802,7 +804,6 @@ static int
lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_vport **vports;
int i, active_vlink_present = 0 ;
@@ -835,19 +836,17 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (ndlp->nlp_DID == Fabric_DID) {
if (vport->port_state <= LPFC_FDISC ||
- vport->fc_flag & FC_PT2PT)
+ test_bit(FC_PT2PT, &vport->fc_flag))
goto out;
lpfc_linkdown_port(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_VPORT_LOGO_RCVD;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_VPORT_LOGO_RCVD, &vport->fc_flag);
vports = lpfc_create_vport_work_array(phba);
if (vports) {
for (i = 0; i <= phba->max_vports && vports[i] != NULL;
i++) {
- if ((!(vports[i]->fc_flag &
- FC_VPORT_LOGO_RCVD)) &&
- (vports[i]->port_state > LPFC_FDISC)) {
+ if (!test_bit(FC_VPORT_LOGO_RCVD,
+ &vports[i]->fc_flag) &&
+ vports[i]->port_state > LPFC_FDISC) {
active_vlink_present = 1;
break;
}
@@ -860,8 +859,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* If we are here first then vport_delete is going to wait
* for discovery to complete.
*/
- if (!(vport->load_flag & FC_UNLOADING) &&
- active_vlink_present) {
+ if (!test_bit(FC_UNLOADING, &vport->load_flag) &&
+ active_vlink_present) {
/*
* If there are other active VLinks present,
* re-instantiate the Vlink using FDISC.
@@ -874,23 +873,21 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
vport->port_state = LPFC_FDISC;
} else {
- spin_lock_irq(shost->host_lock);
- phba->pport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_LOGO_RCVD_DID_CHNG, &phba->pport->fc_flag);
lpfc_retry_pport_discovery(phba);
}
} else {
lpfc_printf_vlog(vport, KERN_INFO,
LOG_NODE | LOG_ELS | LOG_DISCOVERY,
"3203 LOGO recover nport x%06x state x%x "
- "ntype x%x fc_flag x%x\n",
+ "ntype x%x fc_flag x%lx\n",
ndlp->nlp_DID, ndlp->nlp_state,
ndlp->nlp_type, vport->fc_flag);
/* Special cases for rports that recover post LOGO. */
if ((!(ndlp->nlp_type == NLP_FABRIC) &&
(ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) ||
- vport->fc_flag & FC_PT2PT)) ||
+ test_bit(FC_PT2PT, &vport->fc_flag))) ||
(ndlp->nlp_state >= NLP_STE_ADISC_ISSUE ||
ndlp->nlp_state <= NLP_STE_PRLI_ISSUE)) {
mod_timer(&ndlp->nlp_delayfunc,
@@ -1055,9 +1052,10 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
return 0;
}
- if (!(vport->fc_flag & FC_PT2PT)) {
+ if (!test_bit(FC_PT2PT, &vport->fc_flag)) {
/* Check config parameter use-adisc or FCP-2 */
- if (vport->cfg_use_adisc && ((vport->fc_flag & FC_RSCN_MODE) ||
+ if (vport->cfg_use_adisc &&
+ (test_bit(FC_RSCN_MODE, &vport->fc_flag) ||
((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
(ndlp->nlp_type & NLP_FCP_TARGET)))) {
spin_lock_irq(&ndlp->lock);
@@ -1121,7 +1119,7 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport,
}
if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
- (!(vport->fc_flag & FC_OFFLINE_MODE)))
+ (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag)))
ndlp->nlp_flag |= NLP_UNREG_INP;
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
@@ -1147,9 +1145,8 @@ lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
phba = vport->phba;
/* Release the RPI if reglogin completing */
- if (!(phba->pport->load_flag & FC_UNLOADING) &&
- (evt == NLP_EVT_CMPL_REG_LOGIN) &&
- (!pmb->u.mb.mbxStatus)) {
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag) &&
+ evt == NLP_EVT_CMPL_REG_LOGIN && !pmb->u.mb.mbxStatus) {
rpi = pmb->u.mb.un.varWords[0];
lpfc_release_rpi(phba, vport, ndlp, rpi);
}
@@ -1244,7 +1241,6 @@ static uint32_t
lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb = arg;
struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
@@ -1279,9 +1275,7 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Check if there are more PLOGIs to be sent */
lpfc_more_plogi(vport);
if (vport->num_disc_nodes == 0) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_NDISC_ACTIVE;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag);
lpfc_can_disctmo(vport);
lpfc_end_rscn(vport);
}
@@ -1421,8 +1415,8 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
- if ((vport->fc_flag & FC_PT2PT) &&
- (vport->fc_flag & FC_PT2PT_PLOGI)) {
+ if (test_bit(FC_PT2PT, &vport->fc_flag) &&
+ test_bit(FC_PT2PT_PLOGI, &vport->fc_flag)) {
ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
if (sp->cmn.edtovResolution) {
/* E_D_TOV ticks are in nanoseconds */
@@ -1576,8 +1570,8 @@ lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport,
phba = vport->phba;
/* Release the RPI */
- if (!(phba->pport->load_flag & FC_UNLOADING) &&
- !mb->mbxStatus) {
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag) &&
+ !mb->mbxStatus) {
rpi = pmb->u.mb.un.varWords[0];
lpfc_release_rpi(phba, vport, ndlp, rpi);
}
@@ -1613,7 +1607,7 @@ lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
/* Don't do anything that will mess up processing of the
* previous RSCN.
*/
- if (vport->fc_flag & FC_RSCN_DEFERRED)
+ if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag))
return ndlp->nlp_state;
/* software abort outstanding PLOGI */
@@ -1799,7 +1793,7 @@ lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
/* Don't do anything that will mess up processing of the
* previous RSCN.
*/
- if (vport->fc_flag & FC_RSCN_DEFERRED)
+ if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag))
return ndlp->nlp_state;
/* software abort outstanding ADISC */
@@ -1881,7 +1875,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
if ((mb = phba->sli.mbox_active)) {
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
- (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
+ (ndlp == mb->ctx_ndlp)) {
ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
lpfc_nlp_put(ndlp);
mb->ctx_ndlp = NULL;
@@ -1892,7 +1886,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
- (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
+ (ndlp == mb->ctx_ndlp)) {
ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
lpfc_nlp_put(ndlp);
list_del(&mb->list);
@@ -1989,13 +1983,13 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
* know what PRLI to send yet. Figure that out now and
* call PRLI depending on the outcome.
*/
- if (vport->fc_flag & FC_PT2PT) {
+ if (test_bit(FC_PT2PT, &vport->fc_flag)) {
/* If we are pt2pt, there is no Fabric to determine
* the FC4 type of the remote nport. So if NVME
* is configured try it.
*/
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
- if ((!(vport->fc_flag & FC_PT2PT_NO_NVME)) &&
+ if ((!test_bit(FC_PT2PT_NO_NVME, &vport->fc_flag)) &&
(vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
@@ -2027,7 +2021,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
}
} else {
- if ((vport->fc_flag & FC_PT2PT) && phba->nvmet_support)
+ if (test_bit(FC_PT2PT, &vport->fc_flag) && phba->nvmet_support)
phba->targetport->port_id = vport->fc_myDID;
/* Only Fabric ports should transition. NVME target
@@ -2068,7 +2062,7 @@ lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
/* Don't do anything that will mess up processing of the
* previous RSCN.
*/
- if (vport->fc_flag & FC_RSCN_DEFERRED)
+ if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag))
return ndlp->nlp_state;
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
@@ -2384,7 +2378,7 @@ lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
/* Don't do anything that will mess up processing of the
* previous RSCN.
*/
- if (vport->fc_flag & FC_RSCN_DEFERRED)
+ if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag))
return ndlp->nlp_state;
/* software abort outstanding PRLI */
@@ -2828,13 +2822,10 @@ static uint32_t
lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
/* For the fabric port just clear the fc flags. */
if (ndlp->nlp_DID == Fabric_DID) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_FABRIC, &vport->fc_flag);
+ clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag);
}
lpfc_unreg_rpi(vport, ndlp);
return ndlp->nlp_state;
@@ -2906,7 +2897,7 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Don't do anything that will mess up processing of the
* previous RSCN.
*/
- if (vport->fc_flag & FC_RSCN_DEFERRED)
+ if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag))
return ndlp->nlp_state;
lpfc_cancel_retry_delay_tmo(vport, ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 128fc1bab5865..c5792eaf3f64c 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -94,7 +94,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
vport = lport->vport;
- if (!vport || vport->load_flag & FC_UNLOADING ||
+ if (!vport || test_bit(FC_UNLOADING, &vport->load_flag) ||
vport->phba->hba_flag & HBA_IOQ_FLUSH)
return -ENODEV;
@@ -674,7 +674,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
return -EINVAL;
vport = lport->vport;
- if (vport->load_flag & FC_UNLOADING ||
+ if (test_bit(FC_UNLOADING, &vport->load_flag) ||
vport->phba->hba_flag & HBA_IOQ_FLUSH)
return -ENODEV;
@@ -765,7 +765,7 @@ lpfc_nvme_xmt_ls_rsp(struct nvme_fc_local_port *localport,
struct lpfc_nvme_lport *lport;
int rc;
- if (axchg->phba->pport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &axchg->phba->pport->load_flag))
return -ENODEV;
lport = (struct lpfc_nvme_lport *)localport->private;
@@ -810,7 +810,7 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
return;
vport = lport->vport;
- if (vport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &vport->load_flag))
return;
ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
@@ -1567,7 +1567,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
phba = vport->phba;
- if ((unlikely(vport->load_flag & FC_UNLOADING)) ||
+ if ((unlikely(test_bit(FC_UNLOADING, &vport->load_flag))) ||
phba->hba_flag & HBA_IOQ_FLUSH) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6124 Fail IO, Driver unload\n");
@@ -1886,7 +1886,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
if (unlikely(!freqpriv))
return;
- if (vport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &vport->load_flag))
return;
/* Announce entry to new IO submit field. */
@@ -2263,7 +2263,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
if (!vport->localport ||
test_bit(HBA_PCI_ERR, &vport->phba->bit_flags) ||
phba->link_state == LPFC_HBA_ERROR ||
- vport->load_flag & FC_UNLOADING)
+ test_bit(FC_UNLOADING, &vport->load_flag))
return;
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -2616,16 +2616,16 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* No concern about the role change on the nvme remoteport.
* The transport will update it.
*/
- spin_lock_irq(&vport->phba->hbalock);
+ spin_lock_irq(&ndlp->lock);
ndlp->fc4_xpt_flags |= NVME_XPT_UNREG_WAIT;
- spin_unlock_irq(&vport->phba->hbalock);
+ spin_unlock_irq(&ndlp->lock);
/* Don't let the host nvme transport keep sending keep-alives
* on this remoteport. Vport is unloading, no recovery. The
* return values is ignored. The upcall is a courtesy to the
* transport.
*/
- if (vport->load_flag & FC_UNLOADING ||
+ if (test_bit(FC_UNLOADING, &vport->load_flag) ||
unlikely(vport->phba->link_state == LPFC_HBA_ERROR))
(void)nvme_fc_set_remoteport_devloss(remoteport, 0);
@@ -2644,7 +2644,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
"port_state x%x\n",
ret, remoteport->port_state);
- if (vport->load_flag & FC_UNLOADING) {
+ if (test_bit(FC_UNLOADING, &vport->load_flag)) {
/* Only 1 thread can drop the initial node
* reference. Check if another thread has set
* NLP_DROPPED.
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 425328d9c2d80..561ced5503c63 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -872,7 +872,7 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
struct ulp_bde64 bpl;
int rc;
- if (phba->pport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
return -ENODEV;
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
@@ -984,7 +984,7 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
int rc;
- if (axchg->phba->pport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &axchg->phba->pport->load_flag))
return -ENODEV;
rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, lpfc_nvmet_xmt_ls_rsp_cmp);
@@ -1022,7 +1022,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
int id;
#endif
- if (phba->pport->load_flag & FC_UNLOADING) {
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
rc = -ENODEV;
goto aerr;
}
@@ -1145,7 +1145,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
struct lpfc_queue *wq;
unsigned long flags;
- if (phba->pport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
return;
if (!ctxp->hdwq)
@@ -1317,7 +1317,7 @@ lpfc_nvmet_ls_req(struct nvmet_fc_target_port *targetport,
return -EINVAL;
phba = lpfc_nvmet->phba;
- if (phba->pport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
return -EINVAL;
hstate = atomic_read(&lpfc_nvmet->state);
@@ -1353,7 +1353,7 @@ lpfc_nvmet_ls_abort(struct nvmet_fc_target_port *targetport,
int ret;
phba = lpfc_nvmet->phba;
- if (phba->pport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
return;
ndlp = (struct lpfc_nodelist *)hosthandle;
@@ -1586,7 +1586,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
wqe = &nvmewqe->wqe;
/* Initialize WQE */
- memset(wqe, 0, sizeof(union lpfc_wqe));
+ memset(wqe, 0, sizeof(*wqe));
ctx_buf->iocbq->cmd_dmabuf = NULL;
spin_lock(&phba->sli4_hba.sgl_list_lock);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index bf879d81846b6..4a6e5223a2241 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -167,11 +167,10 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
struct Scsi_Host *shost;
struct scsi_device *sdev;
unsigned long new_queue_depth;
- unsigned long num_rsrc_err, num_cmd_success;
+ unsigned long num_rsrc_err;
int i;
num_rsrc_err = atomic_read(&phba->num_rsrc_err);
- num_cmd_success = atomic_read(&phba->num_cmd_success);
/*
* The error and success command counters are global per
@@ -186,20 +185,16 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
shost = lpfc_shost_from_vport(vports[i]);
shost_for_each_device(sdev, shost) {
- new_queue_depth =
- sdev->queue_depth * num_rsrc_err /
- (num_rsrc_err + num_cmd_success);
- if (!new_queue_depth)
- new_queue_depth = sdev->queue_depth - 1;
+ if (num_rsrc_err >= sdev->queue_depth)
+ new_queue_depth = 1;
else
new_queue_depth = sdev->queue_depth -
- new_queue_depth;
+ num_rsrc_err;
scsi_change_queue_depth(sdev, new_queue_depth);
}
}
lpfc_destroy_vport_work_array(phba, vports);
atomic_set(&phba->num_rsrc_err, 0);
- atomic_set(&phba->num_cmd_success, 0);
}
/**
@@ -2728,14 +2723,14 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
sgde = scsi_sglist(cmd);
blksize = scsi_prot_interval(cmd);
data_src = (uint8_t *)sg_virt(sgde);
- data_len = sgde->length;
+ data_len = sg_dma_len(sgde);
if ((data_len & (blksize - 1)) == 0)
chk_guard = 1;
src = (struct scsi_dif_tuple *)sg_virt(sgpe);
start_ref_tag = scsi_prot_ref_tag(cmd);
start_app_tag = src->app_tag;
- len = sgpe->length;
+ len = sg_dma_len(sgpe);
while (src && protsegcnt) {
while (len) {
@@ -2800,7 +2795,7 @@ skipit:
goto out;
data_src = (uint8_t *)sg_virt(sgde);
- data_len = sgde->length;
+ data_len = sg_dma_len(sgde);
if ((data_len & (blksize - 1)) == 0)
chk_guard = 1;
}
@@ -2810,7 +2805,7 @@ skipit:
sgpe = sg_next(sgpe);
if (sgpe) {
src = (struct scsi_dif_tuple *)sg_virt(sgpe);
- len = sgpe->length;
+ len = sg_dma_len(sgpe);
} else {
src = NULL;
}
@@ -5336,16 +5331,6 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
}
err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
} else {
- if (vport->phba->cfg_enable_bg) {
- lpfc_printf_vlog(vport,
- KERN_INFO, LOG_SCSI_CMD,
- "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
- "x%x reftag x%x cnt %u pt %x\n",
- cmnd->cmnd[0],
- scsi_prot_ref_tag(cmnd),
- scsi_logical_block_count(cmnd),
- (cmnd->cmnd[1]>>5));
- }
err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
}
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 706985358c6a0..a028e008dd1ee 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -1036,7 +1036,7 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
}
spin_unlock_irqrestore(&phba->hbalock, iflags);
if ((!list_empty(&phba->active_rrq_list)) &&
- (!(phba->pport->load_flag & FC_UNLOADING)))
+ (!test_bit(FC_UNLOADING, &phba->pport->load_flag)))
mod_timer(&phba->rrq_tmr, next_time);
list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
list_del(&rrq->list);
@@ -1180,12 +1180,12 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
return -EINVAL;
spin_lock_irqsave(&phba->hbalock, iflags);
- if (phba->pport->load_flag & FC_UNLOADING) {
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
phba->hba_flag &= ~HBA_RRQ_ACTIVE;
goto out;
}
- if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
+ if (ndlp->vport && test_bit(FC_UNLOADING, &ndlp->vport->load_flag))
goto out;
if (!ndlp->active_rrqs_xri_bitmap)
@@ -1217,9 +1217,9 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
empty = list_empty(&phba->active_rrq_list);
list_add_tail(&rrq->list, &phba->active_rrq_list);
phba->hba_flag |= HBA_RRQ_ACTIVE;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
if (empty)
lpfc_worker_wake_up(phba);
- spin_unlock_irqrestore(&phba->hbalock, iflags);
return 0;
out:
spin_unlock_irqrestore(&phba->hbalock, iflags);
@@ -1732,7 +1732,7 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
(ulp_command != CMD_ABORT_XRI_CN) &&
(ulp_command != CMD_CLOSE_XRI_CN)) {
BUG_ON(!piocb->vport);
- if (!(piocb->vport->load_flag & FC_UNLOADING))
+ if (!test_bit(FC_UNLOADING, &piocb->vport->load_flag))
mod_timer(&piocb->vport->els_tmofunc,
jiffies +
msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
@@ -2830,7 +2830,7 @@ lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
*/
pmboxq->mbox_flag |= LPFC_MBX_WAKE;
spin_lock_irqsave(&phba->hbalock, drvr_flag);
- pmbox_done = (struct completion *)pmboxq->context3;
+ pmbox_done = pmboxq->ctx_u.mbox_wait;
if (pmbox_done)
complete(pmbox_done);
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
@@ -2882,10 +2882,10 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
* If a REG_LOGIN succeeded after node is destroyed or node
* is in re-discovery driver need to cleanup the RPI.
*/
- if (!(phba->pport->load_flag & FC_UNLOADING) &&
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag) &&
pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
!pmb->u.mb.mbxStatus) {
- mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
+ mp = pmb->ctx_buf;
if (mp) {
pmb->ctx_buf = NULL;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -2904,22 +2904,22 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
- !(phba->pport->load_flag & FC_UNLOADING) &&
+ !test_bit(FC_UNLOADING, &phba->pport->load_flag) &&
!pmb->u.mb.mbxStatus) {
shost = lpfc_shost_from_vport(vport);
spin_lock_irq(shost->host_lock);
vport->vpi_state |= LPFC_VPI_REGISTERED;
- vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
}
if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
- ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ ndlp = pmb->ctx_ndlp;
lpfc_nlp_put(ndlp);
}
if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
- ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ ndlp = pmb->ctx_ndlp;
/* Check to see if there are any deferred events to process */
if (ndlp) {
@@ -2927,7 +2927,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport,
KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
"1438 UNREG cmpl deferred mbox x%x "
- "on NPort x%x Data: x%x x%x x%px x%x x%x\n",
+ "on NPort x%x Data: x%x x%x x%px x%lx x%x\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_defer_did,
ndlp, vport->load_flag, kref_read(&ndlp->kref));
@@ -2952,7 +2952,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* This nlp_put pairs with lpfc_sli4_resume_rpi */
if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) {
- ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ ndlp = pmb->ctx_ndlp;
lpfc_nlp_put(ndlp);
}
@@ -3235,7 +3235,7 @@ lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n",
oxid, size, sid);
- if (phba->pport->load_flag & FC_UNLOADING) {
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
failwhy = "Driver Unloading";
} else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
failwhy = "NVME FC4 Disabled";
@@ -3940,7 +3940,7 @@ void lpfc_poll_eratt(struct timer_list *t)
if (!(phba->hba_flag & HBA_SETUP))
return;
- if (phba->pport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
return;
/* Here we will also keep track of interrupts per sec of the hba */
@@ -5819,7 +5819,7 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
goto out_free_mboxq;
}
- mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
+ mp = mboxq->ctx_buf;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
@@ -6849,9 +6849,9 @@ lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
{
struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->ras_fwlog_lock);
ras_fwlog->state = INACTIVE;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
/* Disable FW logging to host memory */
writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
@@ -6894,9 +6894,9 @@ lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
ras_fwlog->lwpd.virt = NULL;
}
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->ras_fwlog_lock);
ras_fwlog->state = INACTIVE;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
}
/**
@@ -6998,9 +6998,9 @@ lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
goto disable_ras;
}
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->ras_fwlog_lock);
ras_fwlog->state = ACTIVE;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
mempool_free(pmb, phba->mbox_mem_pool);
return;
@@ -7032,9 +7032,9 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
int rc = 0;
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->ras_fwlog_lock);
ras_fwlog->state = INACTIVE;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
phba->cfg_ras_fwlog_buffsize);
@@ -7095,9 +7095,9 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->ras_fwlog_lock);
ras_fwlog->state = REG_INPROGRESS;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->ras_fwlog_lock);
mbox->vport = phba->pport;
mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
@@ -7582,7 +7582,7 @@ lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
struct lpfc_sglq *sglq_entry = NULL;
struct lpfc_sglq *sglq_entry_next = NULL;
struct lpfc_sglq *sglq_entry_first = NULL;
- int status, total_cnt;
+ int status = 0, total_cnt;
int post_cnt = 0, num_posted = 0, block_cnt = 0;
int last_xritag = NO_XRI;
LIST_HEAD(prep_sgl_list);
@@ -8766,7 +8766,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
mboxq->vport = vport;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
- mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
+ mp = mboxq->ctx_buf;
if (rc == MBX_SUCCESS) {
memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
rc = 0;
@@ -9548,8 +9548,8 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
}
/* Copy the mailbox extension data */
- if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
- lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
+ if (pmbox->in_ext_byte_len && pmbox->ext_buf) {
+ lpfc_sli_pcimem_bcopy(pmbox->ext_buf,
(uint8_t *)phba->mbox_ext,
pmbox->in_ext_byte_len);
}
@@ -9562,10 +9562,10 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
= MAILBOX_HBA_EXT_OFFSET;
/* Copy the mailbox extension data */
- if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
+ if (pmbox->in_ext_byte_len && pmbox->ext_buf)
lpfc_memcpy_to_slim(phba->MBslimaddr +
MAILBOX_HBA_EXT_OFFSET,
- pmbox->ctx_buf, pmbox->in_ext_byte_len);
+ pmbox->ext_buf, pmbox->in_ext_byte_len);
if (mbx->mbxCommand == MBX_CONFIG_PORT)
/* copy command data into host mbox for cmpl */
@@ -9688,9 +9688,9 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
MAILBOX_CMD_SIZE);
/* Copy the mailbox extension data */
- if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
+ if (pmbox->out_ext_byte_len && pmbox->ext_buf) {
lpfc_sli_pcimem_bcopy(phba->mbox_ext,
- pmbox->ctx_buf,
+ pmbox->ext_buf,
pmbox->out_ext_byte_len);
}
} else {
@@ -9698,9 +9698,9 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
MAILBOX_CMD_SIZE);
/* Copy the mailbox extension data */
- if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
+ if (pmbox->out_ext_byte_len && pmbox->ext_buf) {
lpfc_memcpy_from_slim(
- pmbox->ctx_buf,
+ pmbox->ext_buf,
phba->MBslimaddr +
MAILBOX_HBA_EXT_OFFSET,
pmbox->out_ext_byte_len);
@@ -10888,7 +10888,7 @@ __lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq,
* all ELS pt2pt protocol traffic as well.
*/
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
- (vport->fc_flag & FC_PT2PT)) {
+ test_bit(FC_PT2PT, &vport->fc_flag)) {
if (expect_rsp) {
bf_set(els_req64_sid, &wqe->els_req, vport->fc_myDID);
@@ -11373,18 +11373,18 @@ lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
unsigned long iflags;
struct lpfc_work_evt *evtp = &ndlp->recovery_evt;
+ /* Hold a node reference for outstanding queued work */
+ if (!lpfc_nlp_get(ndlp))
+ return;
+
spin_lock_irqsave(&phba->hbalock, iflags);
if (!list_empty(&evtp->evt_listp)) {
spin_unlock_irqrestore(&phba->hbalock, iflags);
+ lpfc_nlp_put(ndlp);
return;
}
- /* Incrementing the reference count until the queued work is done. */
- evtp->evt_arg1 = lpfc_nlp_get(ndlp);
- if (!evtp->evt_arg1) {
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- return;
- }
+ evtp->evt_arg1 = ndlp;
evtp->evt = LPFC_EVT_RECOVER_PORT;
list_add_tail(&evtp->evt_listp, &phba->work_list);
spin_unlock_irqrestore(&phba->hbalock, iflags);
@@ -12428,7 +12428,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* If we're unloading, don't abort iocb on the ELS ring, but change
* the callback so that nothing happens when it finishes.
*/
- if ((vport->load_flag & FC_UNLOADING) &&
+ if (test_bit(FC_UNLOADING, &vport->load_flag) &&
pring->ringno == LPFC_ELS_RING) {
if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
@@ -13262,9 +13262,9 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
/* setup wake call as IOCB callback */
pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
- /* setup context3 field to pass wait_queue pointer to wake function */
+ /* setup ctx_u field to pass wait_queue pointer to wake function */
init_completion(&mbox_done);
- pmboxq->context3 = &mbox_done;
+ pmboxq->ctx_u.mbox_wait = &mbox_done;
/* now issue the command */
retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
@@ -13272,7 +13272,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
msecs_to_jiffies(timeout * 1000));
spin_lock_irqsave(&phba->hbalock, flag);
- pmboxq->context3 = NULL;
+ pmboxq->ctx_u.mbox_wait = NULL;
/*
* if LPFC_MBX_WAKE flag is set the mailbox is completed
* else do not free the resources.
@@ -13813,10 +13813,10 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
lpfc_sli_pcimem_bcopy(mbox, pmbox,
MAILBOX_CMD_SIZE);
if (pmb->out_ext_byte_len &&
- pmb->ctx_buf)
+ pmb->ext_buf)
lpfc_sli_pcimem_bcopy(
phba->mbox_ext,
- pmb->ctx_buf,
+ pmb->ext_buf,
pmb->out_ext_byte_len);
}
if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
@@ -13830,10 +13830,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
pmbox->un.varWords[0], 0);
if (!pmbox->mbxStatus) {
- mp = (struct lpfc_dmabuf *)
- (pmb->ctx_buf);
- ndlp = (struct lpfc_nodelist *)
- pmb->ctx_ndlp;
+ mp = pmb->ctx_buf;
+ ndlp = pmb->ctx_ndlp;
/* Reg_LOGIN of dflt RPI was
* successful. new lets get
@@ -14340,8 +14338,8 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
mcqe_status,
pmbox->un.varWords[0], 0);
if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
- mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
- ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ mp = pmb->ctx_buf;
+ ndlp = pmb->ctx_ndlp;
/* Reg_LOGIN of dflt RPI was successful. Mark the
* node as having an UNREG_LOGIN in progress to stop
@@ -14658,7 +14656,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Handle MDS Loopback frames */
- if (!(phba->pport->load_flag & FC_UNLOADING))
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
lpfc_sli4_handle_mds_loopback(phba->pport,
dma_buf);
else
@@ -18552,8 +18550,8 @@ lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
if (did == Fabric_DID)
return phba->pport;
- if ((phba->pport->fc_flag & FC_PT2PT) &&
- !(phba->link_state == LPFC_HBA_READY))
+ if (test_bit(FC_PT2PT, &phba->pport->fc_flag) &&
+ phba->link_state != LPFC_HBA_READY)
return phba->pport;
vports = lpfc_create_vport_work_array(phba);
@@ -18933,7 +18931,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
"oxid:x%x SID:x%x\n", oxid, sid);
return;
}
- /* Put ndlp onto pport node list */
+ /* Put ndlp onto vport node list */
lpfc_enqueue_node(vport, ndlp);
}
@@ -18953,7 +18951,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
return;
}
- ctiocb->vport = phba->pport;
+ ctiocb->vport = vport;
ctiocb->cmd_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
ctiocb->sli4_lxritag = NO_XRI;
ctiocb->sli4_xritag = NO_XRI;
@@ -19040,6 +19038,16 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
ctiocb->ndlp = NULL;
lpfc_sli_release_iocbq(phba, ctiocb);
}
+
+ /* if only usage of this nodelist is BLS response, release initial ref
+ * to free ndlp when transmit completes
+ */
+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE &&
+ !(ndlp->nlp_flag & NLP_DROPPED) &&
+ !(ndlp->fc4_xpt_flags & (NVME_XPT_REGD | SCSI_XPT_REGD))) {
+ ndlp->nlp_flag |= NLP_DROPPED;
+ lpfc_nlp_put(ndlp);
+ }
}
/**
@@ -19447,7 +19455,7 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
vport = phba->pport;
/* Handle MDS Loopback frames */
- if (!(phba->pport->load_flag & FC_UNLOADING))
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
lpfc_sli4_handle_mds_loopback(vport, dmabuf);
else
lpfc_in_buf_free(phba, &dmabuf->dbuf);
@@ -19497,8 +19505,8 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
* The pt2pt protocol allows for discovery frames
* to be received without a registered VPI.
*/
- if (!(vport->fc_flag & FC_PT2PT) ||
- (phba->link_state == LPFC_HBA_READY)) {
+ if (!test_bit(FC_PT2PT, &vport->fc_flag) ||
+ phba->link_state == LPFC_HBA_READY) {
lpfc_in_buf_free(phba, &dmabuf->dbuf);
return;
}
@@ -19813,14 +19821,15 @@ lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
* lpfc_sli4_resume_rpi - Remove the rpi bitmask region
* @ndlp: pointer to lpfc nodelist data structure.
* @cmpl: completion call-back.
- * @arg: data to load as MBox 'caller buffer information'
+ * @iocbq: data to load as mbox ctx_u information
*
* This routine is invoked to remove the memory region that
* provided rpi via a bitmask.
**/
int
lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
- void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
+ void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *),
+ struct lpfc_iocbq *iocbq)
{
LPFC_MBOXQ_t *mboxq;
struct lpfc_hba *phba = ndlp->phba;
@@ -19849,7 +19858,7 @@ lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
lpfc_resume_rpi(mboxq, ndlp);
if (cmpl) {
mboxq->mbox_cmpl = cmpl;
- mboxq->ctx_buf = arg;
+ mboxq->ctx_u.save_iocb = iocbq;
} else
mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mboxq->ctx_ndlp = ndlp;
@@ -20666,7 +20675,7 @@ lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
goto out;
mqe = &mboxq->u.mqe;
- mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
+ mp = mboxq->ctx_buf;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
if (rc)
goto out;
@@ -21025,7 +21034,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
(mb->u.mb.mbxCommand == MBX_REG_VPI))
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
- act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
+ act_mbx_ndlp = mb->ctx_ndlp;
/* This reference is local to this routine. The
* reference is removed at routine exit.
@@ -21054,7 +21063,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
- ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
+ ndlp = mb->ctx_ndlp;
/* Unregister the RPI when mailbox complete */
mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
restart_loop = 1;
@@ -21074,7 +21083,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
while (!list_empty(&mbox_cmd_list)) {
list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
- ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
+ ndlp = mb->ctx_ndlp;
mb->ctx_ndlp = NULL;
if (ndlp) {
spin_lock(&ndlp->lock);
@@ -22656,7 +22665,7 @@ lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job)
if_type = bf_get(lpfc_sli_intf_if_type,
&phba->sli4_hba.sli_intf);
if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
- if (job->vport->fc_flag & FC_PT2PT) {
+ if (test_bit(FC_PT2PT, &job->vport->fc_flag)) {
bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
job->vport->fc_myDID);
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index c911a39cb46b8..cf7c42ec03067 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -182,11 +182,29 @@ typedef struct lpfcMboxq {
struct lpfc_mqe mqe;
} u;
struct lpfc_vport *vport; /* virtual port pointer */
- void *ctx_ndlp; /* an lpfc_nodelist pointer */
- void *ctx_buf; /* an lpfc_dmabuf pointer */
- void *context3; /* a generic pointer. Code must
- * accommodate the actual datatype.
- */
+ struct lpfc_nodelist *ctx_ndlp; /* caller ndlp pointer */
+ struct lpfc_dmabuf *ctx_buf; /* caller buffer information */
+ void *ext_buf; /* extended buffer for extended mbox
+ * cmds. Not a generic pointer.
+ * Use for storing virtual address.
+ */
+
+ /* Pointers that are seldom used during mbox execution, but require
+ * a saved context.
+ */
+ union {
+ unsigned long ox_rx_id; /* Used in els_rsp_rls_acc */
+ struct lpfc_rdp_context *rdp; /* Used in get_rdp_info */
+ struct lpfc_lcb_context *lcb; /* Used in set_beacon */
+ struct completion *mbox_wait; /* Used in issue_mbox_wait */
+ struct bsg_job_data *dd_data; /* Used in bsg_issue_mbox_cmpl
+ * and
+ * bsg_issue_mbox_ext_handle_job
+ */
+ struct lpfc_iocbq *save_iocb; /* Used in defer_plogi_acc and
+ * lpfc_mbx_cmpl_resume_rpi
+ */
+ } ctx_u;
void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
uint8_t mbox_flag;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 2541a8fba093f..c1e9ec0243bac 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -1118,8 +1118,9 @@ void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
void lpfc_sli4_remove_rpis(struct lpfc_hba *);
void lpfc_sli4_async_event_proc(struct lpfc_hba *);
void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
-int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
- void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
+int lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
+ void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *),
+ struct lpfc_iocbq *iocbq);
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba);
void lpfc_sli4_nvme_pci_offline_aborted(struct lpfc_hba *phba,
struct lpfc_io_buf *lpfc_ncmd);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index aba1c1cee8c4d..915f2f11fb558 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.2.0.17"
+#define LPFC_DRIVER_VERSION "14.4.0.1"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
@@ -32,6 +32,6 @@
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
-#define LPFC_COPYRIGHT "Copyright (C) 2017-2023 Broadcom. All Rights " \
+#define LPFC_COPYRIGHT "Copyright (C) 2017-2024 Broadcom. All Rights " \
"Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \
"and/or its subsidiaries."
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 6c7559cf1a4b6..4439167a51882 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -166,7 +166,7 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
}
}
- mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
+ mp = pmb->ctx_buf;
memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
sizeof (struct lpfc_name));
@@ -238,13 +238,9 @@ lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
static void lpfc_discovery_wait(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
- uint32_t wait_flags = 0;
unsigned long wait_time_max;
unsigned long start_time;
- wait_flags = FC_RSCN_MODE | FC_RSCN_DISCOVERY | FC_NLP_MORE |
- FC_RSCN_DEFERRED | FC_NDISC_ACTIVE | FC_DISC_TMO;
-
/*
* The time constraint on this loop is a balance between the
* fabric RA_TOV value and dev_loss tmo. The driver's
@@ -255,14 +251,19 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
start_time = jiffies;
while (time_before(jiffies, wait_time_max)) {
if ((vport->num_disc_nodes > 0) ||
- (vport->fc_flag & wait_flags) ||
+ test_bit(FC_RSCN_MODE, &vport->fc_flag) ||
+ test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag) ||
+ test_bit(FC_NLP_MORE, &vport->fc_flag) ||
+ test_bit(FC_RSCN_DEFERRED, &vport->fc_flag) ||
+ test_bit(FC_NDISC_ACTIVE, &vport->fc_flag) ||
+ test_bit(FC_DISC_TMO, &vport->fc_flag) ||
((vport->port_state > LPFC_VPORT_FAILED) &&
(vport->port_state < LPFC_VPORT_READY))) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
- "1833 Vport discovery quiesce Wait:"
- " state x%x fc_flags x%x"
- " num_nodes x%x, waiting 1000 msecs"
- " total wait msecs x%x\n",
+ "1833 Vport discovery quiesce Wait: "
+ "state x%x fc_flags x%lx "
+ "num_nodes x%x, waiting 1000 msecs "
+ "total wait msecs x%x\n",
vport->port_state, vport->fc_flag,
vport->num_disc_nodes,
jiffies_to_msecs(jiffies - start_time));
@@ -270,9 +271,9 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
} else {
/* Base case. Wait variants satisfied. Break out */
lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
- "1834 Vport discovery quiesced:"
- " state x%x fc_flags x%x"
- " wait msecs x%x\n",
+ "1834 Vport discovery quiesced: "
+ "state x%x fc_flags x%lx "
+ "wait msecs x%x\n",
vport->port_state, vport->fc_flag,
jiffies_to_msecs(jiffies
- start_time));
@@ -283,7 +284,7 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
if (time_after(jiffies, wait_time_max))
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1835 Vport discovery quiesce failed:"
- " state x%x fc_flags x%x wait msecs x%x\n",
+ " state x%x fc_flags x%lx wait msecs x%x\n",
vport->port_state, vport->fc_flag,
jiffies_to_msecs(jiffies - start_time));
}
@@ -407,7 +408,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
vport->fc_vport = fc_vport;
/* At this point we are fully registered with SCSI Layer. */
- vport->load_flag |= FC_ALLOW_FDMI;
+ set_bit(FC_ALLOW_FDMI, &vport->load_flag);
if (phba->cfg_enable_SmartSAN ||
(phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
/* Setup appropriate attribute masks */
@@ -420,7 +421,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
* by the port.
*/
if ((phba->sli_rev == LPFC_SLI_REV4) &&
- (pport->fc_flag & FC_VFI_REGISTERED)) {
+ test_bit(FC_VFI_REGISTERED, &pport->fc_flag)) {
rc = lpfc_sli4_init_vpi(vport);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -435,7 +436,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
* Driver cannot INIT_VPI now. Set the flags to
* init_vpi when reg_vfi complete.
*/
- vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+ set_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag);
lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
rc = VPORT_OK;
goto out;
@@ -535,10 +536,9 @@ disable_vport(struct fc_vport *fc_vport)
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
/* Can't disable during an outstanding delete. */
- if (vport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &vport->load_flag))
return 0;
ndlp = lpfc_findnode_did(vport, Fabric_DID);
@@ -556,11 +556,8 @@ disable_vport(struct fc_vport *fc_vport)
* scsi_host_put() to release the vport.
*/
lpfc_mbx_unreg_vpi(vport);
- if (phba->sli_rev == LPFC_SLI_REV4) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
- spin_unlock_irq(shost->host_lock);
- }
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ set_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag);
lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
@@ -574,7 +571,6 @@ enable_vport(struct fc_vport *fc_vport)
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if ((phba->link_state < LPFC_LINK_UP) ||
(phba->fc_topology == LPFC_TOPOLOGY_LOOP)) {
@@ -582,16 +578,13 @@ enable_vport(struct fc_vport *fc_vport)
return VPORT_OK;
}
- spin_lock_irq(shost->host_lock);
- vport->load_flag |= FC_LOADING;
- if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_LOADING, &vport->load_flag);
+ if (test_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag)) {
lpfc_issue_init_vpi(vport);
goto out;
}
- vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
/* Use the Physical nodes Fabric NDLP to determine if the link is
* up and ready to FDISC.
@@ -643,22 +636,20 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
/* If the vport is a static vport fail the deletion. */
if ((vport->vport_flag & STATIC_VPORT) &&
- !(phba->pport->load_flag & FC_UNLOADING)) {
+ !test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1837 vport_delete failed: Cannot delete "
"static vport.\n");
return VPORT_ERROR;
}
- spin_lock_irq(&phba->hbalock);
- vport->load_flag |= FC_UNLOADING;
- spin_unlock_irq(&phba->hbalock);
+ set_bit(FC_UNLOADING, &vport->load_flag);
/*
* If we are not unloading the driver then prevent the vport_delete
* from happening until after this vport's discovery is finished.
*/
- if (!(phba->pport->load_flag & FC_UNLOADING)) {
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
int check_count = 0;
while (check_count < ((phba->fc_ratov * 3) + 3) &&
vport->port_state > LPFC_VPORT_FAILED &&
@@ -683,10 +674,6 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
lpfc_free_sysfs_attr(vport);
lpfc_debugfs_terminate(vport);
- /* Remove FC host to break driver binding. */
- fc_remove_host(shost);
- scsi_remove_host(shost);
-
/* Send the DA_ID and Fabric LOGO to cleanup Nameserver entries. */
ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (!ndlp)
@@ -725,18 +712,22 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
goto skip_logo;
}
- if (!(phba->pport->load_flag & FC_UNLOADING))
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
lpfc_discovery_wait(vport);
skip_logo:
+ /* Remove FC host to break driver binding. */
+ fc_remove_host(shost);
+ scsi_remove_host(shost);
+
lpfc_cleanup(vport);
/* Remove scsi host now. The nodes are cleaned up. */
lpfc_sli_host_down(vport);
lpfc_stop_vport_timers(vport);
- if (!(phba->pport->load_flag & FC_UNLOADING)) {
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
lpfc_unreg_all_rpis(vport);
lpfc_unreg_default_rpis(vport);
/*
@@ -773,7 +764,7 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
return NULL;
spin_lock_irq(&phba->port_list_lock);
list_for_each_entry(port_iterator, &phba->port_list, listentry) {
- if (port_iterator->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &port_iterator->load_flag))
continue;
if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
lpfc_printf_vlog(port_iterator, KERN_ERR,
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index 6a019132109c1..377dcab32cd8f 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -508,7 +508,7 @@ static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *mat
return rc;
}
-static int mac53c94_remove(struct macio_dev *mdev)
+static void mac53c94_remove(struct macio_dev *mdev)
{
struct fsc_state *fp = (struct fsc_state *)macio_get_drvdata(mdev);
struct Scsi_Host *host = fp->host;
@@ -526,11 +526,8 @@ static int mac53c94_remove(struct macio_dev *mdev)
scsi_host_put(host);
macio_release_resources(mdev);
-
- return 0;
}
-
static struct of_device_id mac53c94_match[] =
{
{
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 66a30a3e6cd53..38976f94453e0 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -219,7 +219,7 @@ mega_query_adapter(adapter_t *adapter)
raw_mbox[3] = ENQ3_GET_SOLICITED_FULL; /* i.e. 0x02 */
/* Issue a blocking command to the card */
- if ((retval = issue_scb_block(adapter, raw_mbox))) {
+ if (issue_scb_block(adapter, raw_mbox)) {
/* the adapter does not support 40ld */
mraid_ext_inquiry *ext_inq;
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index e276583c590c3..1c15cac41d805 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -54,7 +54,7 @@
#define KERN_DEBUG KERN_WARNING
#endif
-MODULE_AUTHOR("Paul Mackerras (paulus@samba.org)");
+MODULE_AUTHOR("Paul Mackerras <paulus@samba.org>");
MODULE_DESCRIPTION("PowerMac MESH SCSI driver");
MODULE_LICENSE("GPL");
@@ -1986,7 +1986,7 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
return -ENODEV;
}
-static int mesh_remove(struct macio_dev *mdev)
+static void mesh_remove(struct macio_dev *mdev)
{
struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
struct Scsi_Host *mesh_host = ms->host;
@@ -2013,11 +2013,8 @@ static int mesh_remove(struct macio_dev *mdev)
macio_release_resources(mdev);
scsi_host_put(mesh_host);
-
- return 0;
}
-
static struct of_device_id mesh_match[] =
{
{
diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
index 0380996b5ad27..55d590b919476 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
@@ -1644,7 +1644,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job)
if ((mpirep_offset != 0xFF) &&
drv_bufs[mpirep_offset].bsg_buf_len) {
drv_buf_iter = &drv_bufs[mpirep_offset];
- drv_buf_iter->kern_buf_len = (sizeof(*bsg_reply_buf) - 1 +
+ drv_buf_iter->kern_buf_len = (sizeof(*bsg_reply_buf) +
mrioc->reply_sz);
bsg_reply_buf = kzalloc(drv_buf_iter->kern_buf_len, GFP_KERNEL);
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index 1bffd629c1244..73c831a97d276 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -8,11 +8,12 @@
*/
#include "mpi3mr.h"
+#include <linux/idr.h>
/* global driver scop variables */
LIST_HEAD(mrioc_list);
DEFINE_SPINLOCK(mrioc_list_lock);
-static int mrioc_ids;
+static DEFINE_IDA(mrioc_ida);
static int warn_non_secure_ctlr;
atomic64_t event_counter;
@@ -5072,7 +5073,10 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
mrioc = shost_priv(shost);
- mrioc->id = mrioc_ids++;
+ retval = ida_alloc_range(&mrioc_ida, 1, U8_MAX, GFP_KERNEL);
+ if (retval < 0)
+ goto id_alloc_failed;
+ mrioc->id = (u8)retval;
sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME);
sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id);
INIT_LIST_HEAD(&mrioc->list);
@@ -5222,9 +5226,11 @@ init_ioc_failed:
resource_alloc_failed:
destroy_workqueue(mrioc->fwevt_worker_thread);
fwevtthread_failed:
+ ida_free(&mrioc_ida, mrioc->id);
spin_lock(&mrioc_list_lock);
list_del(&mrioc->list);
spin_unlock(&mrioc_list_lock);
+id_alloc_failed:
scsi_host_put(shost);
shost_failed:
return retval;
@@ -5310,6 +5316,7 @@ static void mpi3mr_remove(struct pci_dev *pdev)
mrioc->sas_hba.num_phys = 0;
}
+ ida_free(&mrioc_ida, mrioc->id);
spin_lock(&mrioc_list_lock);
list_del(&mrioc->list);
spin_unlock(&mrioc_list_lock);
@@ -5525,6 +5532,7 @@ static void __exit mpi3mr_exit(void)
&driver_attr_event_counter);
pci_unregister_driver(&mpi3mr_pci_driver);
sas_release_transport(mpi3mr_transport_template);
+ ida_destroy(&mrioc_ida);
}
module_init(mpi3mr_init);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index b8120ca93c797..1b492e9a3e55e 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -5481,7 +5481,7 @@ mpt3sas_atto_validate_nvram(struct MPT3SAS_ADAPTER *ioc,
* mpt3sas_atto_get_sas_addr - get the ATTO SAS address from mfg page 1
*
* @ioc : per adapter object
- * @*sas_addr : return sas address
+ * @sas_addr : return sas address
* Return: 0 for success, non-zero for failure.
*/
static int
@@ -7916,26 +7916,22 @@ mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
}
/**
- * _base_diag_reset - the "big hammer" start of day reset
- * @ioc: per adapter object
- *
- * Return: 0 for success, non-zero for failure.
- */
-static int
-_base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
-{
- u32 host_diagnostic;
- u32 ioc_state;
- u32 count;
- u32 hcb_size;
-
- ioc_info(ioc, "sending diag reset !!\n");
-
- pci_cfg_access_lock(ioc->pdev);
+* mpt3sas_base_unlock_and_get_host_diagnostic- enable Host Diagnostic Register writes
+* @ioc: per adapter object
+* @host_diagnostic: host diagnostic register content
+*
+* Return: 0 for success, non-zero for failure.
+*/
- drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
+int
+mpt3sas_base_unlock_and_get_host_diagnostic(struct MPT3SAS_ADAPTER *ioc,
+ u32 *host_diagnostic)
+{
+ u32 count;
+ *host_diagnostic = 0;
count = 0;
+
do {
/* Write magic sequence to WriteSequence register
* Loop until in diagnostic mode
@@ -7954,30 +7950,67 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
if (count++ > 20) {
ioc_info(ioc,
- "Stop writing magic sequence after 20 retries\n");
+ "Stop writing magic sequence after 20 retries\n");
_base_dump_reg_set(ioc);
- goto out;
+ return -EFAULT;
}
- host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic);
+ *host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic);
drsprintk(ioc,
- ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
- count, host_diagnostic));
+ ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
+ count, *host_diagnostic));
- } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
+ } while ((*host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
+ return 0;
+}
- hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
+/**
+ * mpt3sas_base_lock_host_diagnostic: Disable Host Diagnostic Register writes
+ * @ioc: per adapter object
+ */
+void
+mpt3sas_base_lock_host_diagnostic(struct MPT3SAS_ADAPTER *ioc)
+{
+ drsprintk(ioc, ioc_info(ioc, "disable writes to the diagnostic register\n"));
+ writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
+}
+
+/**
+ * _base_diag_reset - the "big hammer" start of day reset
+ * @ioc: per adapter object
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+_base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
+{
+ u32 host_diagnostic;
+ u32 ioc_state;
+ u32 count;
+ u32 hcb_size;
+
+ ioc_info(ioc, "sending diag reset !!\n");
+
+ pci_cfg_access_lock(ioc->pdev);
+
+ drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
+
+ mutex_lock(&ioc->hostdiag_unlock_mutex);
+ if (mpt3sas_base_unlock_and_get_host_diagnostic(ioc, &host_diagnostic))
+ goto out;
+
+ hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
&ioc->chip->HostDiagnostic);
- /*This delay allows the chip PCIe hardware time to finish reset tasks*/
+ /* This delay allows the chip PCIe hardware time to finish reset tasks */
msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
/* Approximately 300 second max wait */
for (count = 0; count < (300000000 /
- MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
+ MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic);
@@ -7990,13 +8023,15 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
break;
- msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000);
+ /* Wait to pass the second read delay window */
+ msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC/1000);
}
if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
drsprintk(ioc,
- ioc_info(ioc, "restart the adapter assuming the HCB Address points to good F/W\n"));
+ ioc_info(ioc, "restart the adapter assuming the\n"
+ "HCB Address points to good F/W\n"));
host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
writel(host_diagnostic, &ioc->chip->HostDiagnostic);
@@ -8010,9 +8045,8 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
&ioc->chip->HostDiagnostic);
- drsprintk(ioc,
- ioc_info(ioc, "disable writes to the diagnostic register\n"));
- writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
+ mpt3sas_base_lock_host_diagnostic(ioc);
+ mutex_unlock(&ioc->hostdiag_unlock_mutex);
drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n"));
ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
@@ -8030,6 +8064,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
out:
pci_cfg_access_unlock(ioc->pdev);
ioc_err(ioc, "diag reset: FAILED\n");
+ mutex_unlock(&ioc->hostdiag_unlock_mutex);
return -EFAULT;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 6d0bc8c667002..bf100a4ebfc36 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -77,8 +77,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "43.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 43
+#define MPT3SAS_DRIVER_VERSION "48.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 48
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -1366,6 +1366,7 @@ struct MPT3SAS_ADAPTER {
u8 got_task_abort_from_ioctl;
struct mutex reset_in_progress_mutex;
+ struct mutex hostdiag_unlock_mutex;
spinlock_t ioc_reset_in_progress_lock;
u8 ioc_link_reset_in_progress;
@@ -1790,6 +1791,9 @@ void mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc);
int mpt3sas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num);
void mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER *ioc);
+int mpt3sas_base_unlock_and_get_host_diagnostic(struct MPT3SAS_ADAPTER *ioc,
+ u32 *host_diagnostic);
+void mpt3sas_base_lock_host_diagnostic(struct MPT3SAS_ADAPTER *ioc);
/* scsih shared API */
struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 147cb7088d55f..1c9fd26195b81 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -2543,6 +2543,56 @@ out:
return 0;
}
+/**
+ * _ctl_enable_diag_sbr_reload - enable sbr reload bit
+ * @ioc: per adapter object
+ * @arg: user space buffer containing ioctl content
+ *
+ * Enable the SBR reload bit
+ */
+static int
+_ctl_enable_diag_sbr_reload(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ u32 ioc_state, host_diagnostic;
+
+ if (ioc->shost_recovery ||
+ ioc->pci_error_recovery || ioc->is_driver_loading ||
+ ioc->remove_host)
+ return -EAGAIN;
+
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+
+ if (ioc_state != MPI2_IOC_STATE_OPERATIONAL)
+ return -EFAULT;
+
+ host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
+
+ if (host_diagnostic & MPI2_DIAG_SBR_RELOAD)
+ return 0;
+
+ if (mutex_trylock(&ioc->hostdiag_unlock_mutex)) {
+ if (mpt3sas_base_unlock_and_get_host_diagnostic(ioc, &host_diagnostic)) {
+ mutex_unlock(&ioc->hostdiag_unlock_mutex);
+ return -EFAULT;
+ }
+ } else
+ return -EAGAIN;
+
+ host_diagnostic |= MPI2_DIAG_SBR_RELOAD;
+ writel(host_diagnostic, &ioc->chip->HostDiagnostic);
+ host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
+ mpt3sas_base_lock_host_diagnostic(ioc);
+ mutex_unlock(&ioc->hostdiag_unlock_mutex);
+
+ if (!(host_diagnostic & MPI2_DIAG_SBR_RELOAD)) {
+ ioc_err(ioc, "%s: Failed to set Diag SBR Reload Bit\n", __func__);
+ return -EFAULT;
+ }
+
+ ioc_info(ioc, "%s: Successfully set the Diag SBR Reload Bit\n", __func__);
+ return 0;
+}
+
#ifdef CONFIG_COMPAT
/**
* _ctl_compat_mpt_command - convert 32bit pointers to 64bit.
@@ -2719,6 +2769,10 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
if (_IOC_SIZE(cmd) == sizeof(struct mpt3_addnl_diag_query))
ret = _ctl_addnl_diag_query(ioc, arg);
break;
+ case MPT3ENABLEDIAGSBRRELOAD:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_enable_diag_sbr_reload))
+ ret = _ctl_enable_diag_sbr_reload(ioc, arg);
+ break;
default:
dctlprintk(ioc,
ioc_info(ioc, "unsupported ioctl opcode(0x%08x)\n",
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
index 8f6ffb40261c9..171709e910066 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
@@ -98,6 +98,8 @@
struct mpt3_diag_read_buffer)
#define MPT3ADDNLDIAGQUERY _IOWR(MPT3_MAGIC_NUMBER, 32, \
struct mpt3_addnl_diag_query)
+#define MPT3ENABLEDIAGSBRRELOAD _IOWR(MPT3_MAGIC_NUMBER, 33, \
+ struct mpt3_enable_diag_sbr_reload)
/* Trace Buffer default UniqueId */
#define MPT2DIAGBUFFUNIQUEID (0x07075900)
@@ -448,4 +450,12 @@ struct mpt3_addnl_diag_query {
uint32_t reserved2[2];
};
+/**
+ * struct mpt3_enable_diag_sbr_reload - enable sbr reload
+ * @hdr - generic header
+ */
+struct mpt3_enable_diag_sbr_reload {
+ struct mpt3_ioctl_header hdr;
+};
+
#endif /* MPT3SAS_CTL_H_INCLUDED */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 51b5788da040a..ef8ee93005eae 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -12240,6 +12240,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* misc semaphores and spin locks */
mutex_init(&ioc->reset_in_progress_mutex);
+ mutex_init(&ioc->hostdiag_unlock_mutex);
/* initializing pci_access_mutex lock */
mutex_init(&ioc->pci_access_mutex);
spin_lock_init(&ioc->ioc_reset_in_progress_lock);
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index ca2e932dd9b70..f684eb5e04898 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -1775,9 +1775,9 @@ static ssize_t raid_state_show(struct device *dev,
name = myrb_devstate_name(ldev_info->state);
if (name)
- ret = snprintf(buf, 32, "%s\n", name);
+ ret = snprintf(buf, 64, "%s\n", name);
else
- ret = snprintf(buf, 32, "Invalid (%02X)\n",
+ ret = snprintf(buf, 64, "Invalid (%02X)\n",
ldev_info->state);
} else {
struct myrb_pdev_state *pdev_info = sdev->hostdata;
@@ -1796,9 +1796,9 @@ static ssize_t raid_state_show(struct device *dev,
else
name = myrb_devstate_name(pdev_info->state);
if (name)
- ret = snprintf(buf, 32, "%s\n", name);
+ ret = snprintf(buf, 64, "%s\n", name);
else
- ret = snprintf(buf, 32, "Invalid (%02X)\n",
+ ret = snprintf(buf, 64, "Invalid (%02X)\n",
pdev_info->state);
}
return ret;
@@ -1886,11 +1886,11 @@ static ssize_t raid_level_show(struct device *dev,
name = myrb_raidlevel_name(ldev_info->raid_level);
if (!name)
- return snprintf(buf, 32, "Invalid (%02X)\n",
+ return snprintf(buf, 64, "Invalid (%02X)\n",
ldev_info->state);
- return snprintf(buf, 32, "%s\n", name);
+ return snprintf(buf, 64, "%s\n", name);
}
- return snprintf(buf, 32, "Physical Drive\n");
+ return snprintf(buf, 64, "Physical Drive\n");
}
static DEVICE_ATTR_RO(raid_level);
@@ -1903,15 +1903,15 @@ static ssize_t rebuild_show(struct device *dev,
unsigned char status;
if (sdev->channel < myrb_logical_channel(sdev->host))
- return snprintf(buf, 32, "physical device - not rebuilding\n");
+ return snprintf(buf, 64, "physical device - not rebuilding\n");
status = myrb_get_rbld_progress(cb, &rbld_buf);
if (rbld_buf.ldev_num != sdev->id ||
status != MYRB_STATUS_SUCCESS)
- return snprintf(buf, 32, "not rebuilding\n");
+ return snprintf(buf, 64, "not rebuilding\n");
- return snprintf(buf, 32, "rebuilding block %u of %u\n",
+ return snprintf(buf, 64, "rebuilding block %u of %u\n",
rbld_buf.ldev_size - rbld_buf.blocks_left,
rbld_buf.ldev_size);
}
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index a1eec65a9713f..e824be9d9bbb9 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -947,9 +947,9 @@ static ssize_t raid_state_show(struct device *dev,
name = myrs_devstate_name(ldev_info->dev_state);
if (name)
- ret = snprintf(buf, 32, "%s\n", name);
+ ret = snprintf(buf, 64, "%s\n", name);
else
- ret = snprintf(buf, 32, "Invalid (%02X)\n",
+ ret = snprintf(buf, 64, "Invalid (%02X)\n",
ldev_info->dev_state);
} else {
struct myrs_pdev_info *pdev_info;
@@ -958,9 +958,9 @@ static ssize_t raid_state_show(struct device *dev,
pdev_info = sdev->hostdata;
name = myrs_devstate_name(pdev_info->dev_state);
if (name)
- ret = snprintf(buf, 32, "%s\n", name);
+ ret = snprintf(buf, 64, "%s\n", name);
else
- ret = snprintf(buf, 32, "Invalid (%02X)\n",
+ ret = snprintf(buf, 64, "Invalid (%02X)\n",
pdev_info->dev_state);
}
return ret;
@@ -1066,13 +1066,13 @@ static ssize_t raid_level_show(struct device *dev,
ldev_info = sdev->hostdata;
name = myrs_raid_level_name(ldev_info->raid_level);
if (!name)
- return snprintf(buf, 32, "Invalid (%02X)\n",
+ return snprintf(buf, 64, "Invalid (%02X)\n",
ldev_info->dev_state);
} else
name = myrs_raid_level_name(MYRS_RAID_PHYSICAL);
- return snprintf(buf, 32, "%s\n", name);
+ return snprintf(buf, 64, "%s\n", name);
}
static DEVICE_ATTR_RO(raid_level);
@@ -1086,7 +1086,7 @@ static ssize_t rebuild_show(struct device *dev,
unsigned char status;
if (sdev->channel < cs->ctlr_info->physchan_present)
- return snprintf(buf, 32, "physical device - not rebuilding\n");
+ return snprintf(buf, 64, "physical device - not rebuilding\n");
ldev_info = sdev->hostdata;
ldev_num = ldev_info->ldev_num;
@@ -1098,11 +1098,11 @@ static ssize_t rebuild_show(struct device *dev,
return -EIO;
}
if (ldev_info->rbld_active) {
- return snprintf(buf, 32, "rebuilding block %zu of %zu\n",
+ return snprintf(buf, 64, "rebuilding block %zu of %zu\n",
(size_t)ldev_info->rbld_lba,
(size_t)ldev_info->cfg_devsize);
} else
- return snprintf(buf, 32, "not rebuilding\n");
+ return snprintf(buf, 64, "not rebuilding\n");
}
static ssize_t rebuild_store(struct device *dev,
@@ -1190,7 +1190,7 @@ static ssize_t consistency_check_show(struct device *dev,
unsigned short ldev_num;
if (sdev->channel < cs->ctlr_info->physchan_present)
- return snprintf(buf, 32, "physical device - not checking\n");
+ return snprintf(buf, 64, "physical device - not checking\n");
ldev_info = sdev->hostdata;
if (!ldev_info)
@@ -1198,11 +1198,11 @@ static ssize_t consistency_check_show(struct device *dev,
ldev_num = ldev_info->ldev_num;
myrs_get_ldev_info(cs, ldev_num, ldev_info);
if (ldev_info->cc_active)
- return snprintf(buf, 32, "checking block %zu of %zu\n",
+ return snprintf(buf, 64, "checking block %zu of %zu\n",
(size_t)ldev_info->cc_lba,
(size_t)ldev_info->cfg_devsize);
else
- return snprintf(buf, 32, "not checking\n");
+ return snprintf(buf, 64, "not checking\n");
}
static ssize_t consistency_check_store(struct device *dev,
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 5c26a13ffbd26..7b27618fd7b2e 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -880,9 +880,9 @@ static ssize_t pm8001_show_update_fw(struct device *cdev,
if (pm8001_ha->fw_status != FLASH_IN_PROGRESS)
pm8001_ha->fw_status = FLASH_OK;
- return snprintf(buf, PAGE_SIZE, "status=%x %s\n",
- flash_error_table[i].err_code,
- flash_error_table[i].reason);
+ return sysfs_emit(buf, "status=%x %s\n",
+ flash_error_table[i].err_code,
+ flash_error_table[i].reason);
}
static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUSR|S_IWGRP,
pm8001_show_update_fw, pm8001_store_update_fw);
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index e8bcc3a88732a..0614b7e366b77 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -61,7 +61,9 @@ static atomic_t pmcraid_adapter_count = ATOMIC_INIT(0);
* pmcraid_minor - minor number(s) to use
*/
static unsigned int pmcraid_major;
-static struct class *pmcraid_class;
+static const struct class pmcraid_class = {
+ .name = PMCRAID_DEVFILE,
+};
static DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
/*
@@ -4723,7 +4725,7 @@ static int pmcraid_setup_chrdev(struct pmcraid_instance *pinstance)
if (error)
pmcraid_release_minor(minor);
else
- device_create(pmcraid_class, NULL, MKDEV(pmcraid_major, minor),
+ device_create(&pmcraid_class, NULL, MKDEV(pmcraid_major, minor),
NULL, "%s%u", PMCRAID_DEVFILE, minor);
return error;
}
@@ -4739,7 +4741,7 @@ static int pmcraid_setup_chrdev(struct pmcraid_instance *pinstance)
static void pmcraid_release_chrdev(struct pmcraid_instance *pinstance)
{
pmcraid_release_minor(MINOR(pinstance->cdev.dev));
- device_destroy(pmcraid_class,
+ device_destroy(&pmcraid_class,
MKDEV(pmcraid_major, MINOR(pinstance->cdev.dev)));
cdev_del(&pinstance->cdev);
}
@@ -5390,10 +5392,10 @@ static int __init pmcraid_init(void)
}
pmcraid_major = MAJOR(dev);
- pmcraid_class = class_create(PMCRAID_DEVFILE);
- if (IS_ERR(pmcraid_class)) {
- error = PTR_ERR(pmcraid_class);
+ error = class_register(&pmcraid_class);
+
+ if (error) {
pmcraid_err("failed to register with sysfs, error = %x\n",
error);
goto out_unreg_chrdev;
@@ -5402,7 +5404,7 @@ static int __init pmcraid_init(void)
error = pmcraid_netlink_init();
if (error) {
- class_destroy(pmcraid_class);
+ class_unregister(&pmcraid_class);
goto out_unreg_chrdev;
}
@@ -5413,7 +5415,7 @@ static int __init pmcraid_init(void)
pmcraid_err("failed to register pmcraid driver, error = %x\n",
error);
- class_destroy(pmcraid_class);
+ class_unregister(&pmcraid_class);
pmcraid_netlink_release();
out_unreg_chrdev:
@@ -5432,7 +5434,7 @@ static void __exit pmcraid_exit(void)
unregister_chrdev_region(MKDEV(pmcraid_major, 0),
PMCRAID_MAX_ADAPTERS);
pci_unregister_driver(&pmcraid_driver);
- class_destroy(pmcraid_class);
+ class_unregister(&pmcraid_class);
}
module_init(pmcraid_init);
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 27bce80262c20..8958547ac111a 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -2478,7 +2478,6 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
/* Load return mailbox registers. */
optr = mb;
iptr = (uint16_t *) &ha->mailbox_out[0];
- mr = MAILBOX_REGISTER_COUNT;
memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t));
if (ha->flags.reset_marker)
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 44449c70a375f..76eeba435fd04 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2741,7 +2741,13 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
return;
if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
- qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
+ /* Will wait for wind down of adapter */
+ ql_dbg(ql_dbg_aer, fcport->vha, 0x900c,
+ "%s pci offline detected (id %06x)\n", __func__,
+ fcport->d_id.b24);
+ qla_pci_set_eeh_busy(fcport->vha);
+ qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24,
+ 0, WAIT_TARGET);
return;
}
}
@@ -2763,7 +2769,11 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
vha = fcport->vha;
if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
- qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
+ /* Will wait for wind down of adapter */
+ ql_dbg(ql_dbg_aer, fcport->vha, 0x900b,
+ "%s pci offline detected (id %06x)\n", __func__,
+ fcport->d_id.b24);
+ qla_pci_set_eeh_busy(vha);
qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24,
0, WAIT_TARGET);
return;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index deb642607deb6..2f49baf131e26 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -82,7 +82,7 @@ typedef union {
#include "qla_nvme.h"
#define QLA2XXX_DRIVER_NAME "qla2xxx"
#define QLA2XXX_APIDEV "ql2xapidev"
-#define QLA2XXX_MANUFACTURER "Marvell Semiconductor, Inc."
+#define QLA2XXX_MANUFACTURER "Marvell"
/*
* We have MAILBOX_REGISTER_COUNT sized arrays in a few places,
diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
index 26e6b3e3af431..dcde55c8ee5de 100644
--- a/drivers/scsi/qla2xxx/qla_edif.c
+++ b/drivers/scsi/qla2xxx/qla_edif.c
@@ -1100,7 +1100,7 @@ qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
if (fcport->edif.enable) {
- if (pcnt > app_req.num_ports)
+ if (pcnt >= app_req.num_ports)
break;
app_reply->elem[pcnt].rekey_count =
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 09cb9413670a5..7309310d2ab94 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -44,7 +44,7 @@ extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *);
extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
extern int qla24xx_els_dcmd_iocb(scsi_qla_host_t *, int, port_id_t);
-extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *, bool);
+extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *);
extern void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha,
struct els_plogi *els_plogi);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index a314cfc5b263f..8377624d76c98 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1193,8 +1193,12 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
return rval;
done_free_sp:
- /* ref: INIT */
- kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ /*
+ * use qla24xx_async_gnl_sp_done to purge all pending gnl request.
+ * kref_put is call behind the scene.
+ */
+ sp->u.iocb_cmd.u.mbx.in_mb[0] = MBS_COMMAND_ERROR;
+ qla24xx_async_gnl_sp_done(sp, QLA_COMMAND_ERROR);
fcport->flags &= ~(FCF_ASYNC_SENT);
done:
fcport->flags &= ~(FCF_ASYNC_ACTIVE);
@@ -2665,6 +2669,40 @@ exit:
return rval;
}
+static void qla_enable_fce_trace(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (ha->fce) {
+ ha->flags.fce_enabled = 1;
+ memset(ha->fce, 0, fce_calc_size(ha->fce_bufs));
+ rval = qla2x00_enable_fce_trace(vha,
+ ha->fce_dma, ha->fce_bufs, ha->fce_mb, &ha->fce_bufs);
+
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x8033,
+ "Unable to reinitialize FCE (%d).\n", rval);
+ ha->flags.fce_enabled = 0;
+ }
+ }
+}
+
+static void qla_enable_eft_trace(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (ha->eft) {
+ memset(ha->eft, 0, EFT_SIZE);
+ rval = qla2x00_enable_eft_trace(vha, ha->eft_dma, EFT_NUM_BUFFERS);
+
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x8034,
+ "Unable to reinitialize EFT (%d).\n", rval);
+ }
+ }
+}
/*
* qla2x00_initialize_adapter
* Initialize board.
@@ -3668,9 +3706,8 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
}
static void
-qla2x00_init_fce_trace(scsi_qla_host_t *vha)
+qla2x00_alloc_fce_trace(scsi_qla_host_t *vha)
{
- int rval;
dma_addr_t tc_dma;
void *tc;
struct qla_hw_data *ha = vha->hw;
@@ -3699,27 +3736,17 @@ qla2x00_init_fce_trace(scsi_qla_host_t *vha)
return;
}
- rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
- ha->fce_mb, &ha->fce_bufs);
- if (rval) {
- ql_log(ql_log_warn, vha, 0x00bf,
- "Unable to initialize FCE (%d).\n", rval);
- dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma);
- return;
- }
-
ql_dbg(ql_dbg_init, vha, 0x00c0,
"Allocated (%d KB) for FCE...\n", FCE_SIZE / 1024);
- ha->flags.fce_enabled = 1;
ha->fce_dma = tc_dma;
ha->fce = tc;
+ ha->fce_bufs = FCE_NUM_BUFFERS;
}
static void
-qla2x00_init_eft_trace(scsi_qla_host_t *vha)
+qla2x00_alloc_eft_trace(scsi_qla_host_t *vha)
{
- int rval;
dma_addr_t tc_dma;
void *tc;
struct qla_hw_data *ha = vha->hw;
@@ -3744,14 +3771,6 @@ qla2x00_init_eft_trace(scsi_qla_host_t *vha)
return;
}
- rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
- if (rval) {
- ql_log(ql_log_warn, vha, 0x00c2,
- "Unable to initialize EFT (%d).\n", rval);
- dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma);
- return;
- }
-
ql_dbg(ql_dbg_init, vha, 0x00c3,
"Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
@@ -3759,13 +3778,6 @@ qla2x00_init_eft_trace(scsi_qla_host_t *vha)
ha->eft = tc;
}
-static void
-qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
-{
- qla2x00_init_fce_trace(vha);
- qla2x00_init_eft_trace(vha);
-}
-
void
qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
{
@@ -3820,10 +3832,10 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
if (ha->tgt.atio_ring)
mq_size += ha->tgt.atio_q_length * sizeof(request_t);
- qla2x00_init_fce_trace(vha);
+ qla2x00_alloc_fce_trace(vha);
if (ha->fce)
fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
- qla2x00_init_eft_trace(vha);
+ qla2x00_alloc_eft_trace(vha);
if (ha->eft)
eft_size = EFT_SIZE;
}
@@ -4253,7 +4265,6 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
unsigned long flags;
- uint16_t fw_major_version;
int done_once = 0;
if (IS_P3P_TYPE(ha)) {
@@ -4320,7 +4331,6 @@ execute_fw_with_lr:
goto failed;
enable_82xx_npiv:
- fw_major_version = ha->fw_major_version;
if (IS_P3P_TYPE(ha))
qla82xx_check_md_needed(vha);
else
@@ -4349,12 +4359,11 @@ enable_82xx_npiv:
if (rval != QLA_SUCCESS)
goto failed;
- if (!fw_major_version && !(IS_P3P_TYPE(ha)))
- qla2x00_alloc_offload_mem(vha);
-
if (ql2xallocfwdump && !(IS_P3P_TYPE(ha)))
qla2x00_alloc_fw_dump(vha);
+ qla_enable_fce_trace(vha);
+ qla_enable_eft_trace(vha);
} else {
goto failed;
}
@@ -7487,12 +7496,12 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
int
qla2x00_abort_isp(scsi_qla_host_t *vha)
{
- int rval;
uint8_t status = 0;
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *vp, *tvp;
struct req_que *req = ha->req_q_map[0];
unsigned long flags;
+ fc_port_t *fcport;
if (vha->flags.online) {
qla2x00_abort_isp_cleanup(vha);
@@ -7561,6 +7570,15 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
"ISP Abort - ISP reg disconnect post nvmram config, exiting.\n");
return status;
}
+
+ /* User may have updated [fcp|nvme] prefer in flash */
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (NVME_PRIORITY(ha, fcport))
+ fcport->do_prli_nvme = 1;
+ else
+ fcport->do_prli_nvme = 0;
+ }
+
if (!qla2x00_restart_isp(vha)) {
clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
@@ -7581,31 +7599,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
if (IS_QLA81XX(ha) || IS_QLA8031(ha))
qla2x00_get_fw_version(vha);
- if (ha->fce) {
- ha->flags.fce_enabled = 1;
- memset(ha->fce, 0,
- fce_calc_size(ha->fce_bufs));
- rval = qla2x00_enable_fce_trace(vha,
- ha->fce_dma, ha->fce_bufs, ha->fce_mb,
- &ha->fce_bufs);
- if (rval) {
- ql_log(ql_log_warn, vha, 0x8033,
- "Unable to reinitialize FCE "
- "(%d).\n", rval);
- ha->flags.fce_enabled = 0;
- }
- }
- if (ha->eft) {
- memset(ha->eft, 0, EFT_SIZE);
- rval = qla2x00_enable_eft_trace(vha,
- ha->eft_dma, EFT_NUM_BUFFERS);
- if (rval) {
- ql_log(ql_log_warn, vha, 0x8034,
- "Unable to reinitialize EFT "
- "(%d).\n", rval);
- }
- }
} else { /* failed the ISP abort */
vha->flags.online = 1;
if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
@@ -7655,6 +7649,14 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
atomic_inc(&vp->vref_count);
spin_unlock_irqrestore(&ha->vport_slock, flags);
+ /* User may have updated [fcp|nvme] prefer in flash */
+ list_for_each_entry(fcport, &vp->vp_fcports, list) {
+ if (NVME_PRIORITY(ha, fcport))
+ fcport->do_prli_nvme = 1;
+ else
+ fcport->do_prli_nvme = 0;
+ }
+
qla2x00_vp_abort_isp(vp);
spin_lock_irqsave(&ha->vport_slock, flags);
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index df90169f82440..0b41e8a066026 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2587,6 +2587,33 @@ void
qla2x00_sp_release(struct kref *kref)
{
struct srb *sp = container_of(kref, struct srb, cmd_kref);
+ struct scsi_qla_host *vha = sp->vha;
+
+ switch (sp->type) {
+ case SRB_CT_PTHRU_CMD:
+ /* GPSC & GFPNID use fcport->ct_desc.ct_sns for both req & rsp */
+ if (sp->u.iocb_cmd.u.ctarg.req &&
+ (!sp->fcport ||
+ sp->u.iocb_cmd.u.ctarg.req != sp->fcport->ct_desc.ct_sns)) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+ if (sp->u.iocb_cmd.u.ctarg.rsp &&
+ (!sp->fcport ||
+ sp->u.iocb_cmd.u.ctarg.rsp != sp->fcport->ct_desc.ct_sns)) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+ break;
+ default:
+ break;
+ }
sp->free(sp);
}
@@ -2610,7 +2637,8 @@ static void qla2x00_els_dcmd_sp_free(srb_t *sp)
{
struct srb_iocb *elsio = &sp->u.iocb_cmd;
- kfree(sp->fcport);
+ if (sp->fcport)
+ qla2x00_free_fcport(sp->fcport);
if (elsio->u.els_logo.els_logo_pyld)
dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
@@ -2692,7 +2720,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
*/
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp) {
- kfree(fcport);
+ qla2x00_free_fcport(fcport);
ql_log(ql_log_info, vha, 0x70e6,
"SRB allocation failed\n");
return -ENOMEM;
@@ -2723,6 +2751,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
if (!elsio->u.els_logo.els_logo_pyld) {
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ qla2x00_free_fcport(fcport);
return QLA_FUNCTION_FAILED;
}
@@ -2747,6 +2776,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
if (rval != QLA_SUCCESS) {
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ qla2x00_free_fcport(fcport);
return QLA_FUNCTION_FAILED;
}
@@ -3012,7 +3042,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
int
qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
- fc_port_t *fcport, bool wait)
+ fc_port_t *fcport)
{
srb_t *sp;
struct srb_iocb *elsio = NULL;
@@ -3027,8 +3057,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
if (!sp) {
ql_log(ql_log_info, vha, 0x70e6,
"SRB allocation failed\n");
- fcport->flags &= ~FCF_ASYNC_ACTIVE;
- return -ENOMEM;
+ goto done;
}
fcport->flags |= FCF_ASYNC_SENT;
@@ -3037,9 +3066,6 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
ql_dbg(ql_dbg_io, vha, 0x3073,
"%s Enter: PLOGI portid=%06x\n", __func__, fcport->d_id.b24);
- if (wait)
- sp->flags = SRB_WAKEUP_ON_COMP;
-
sp->type = SRB_ELS_DCMD;
sp->name = "ELS_DCMD";
sp->fcport = fcport;
@@ -3055,7 +3081,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
if (!elsio->u.els_plogi.els_plogi_pyld) {
rval = QLA_FUNCTION_FAILED;
- goto out;
+ goto done_free_sp;
}
resp_ptr = elsio->u.els_plogi.els_resp_pyld =
@@ -3064,7 +3090,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
if (!elsio->u.els_plogi.els_resp_pyld) {
rval = QLA_FUNCTION_FAILED;
- goto out;
+ goto done_free_sp;
}
ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
@@ -3080,7 +3106,6 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
if (els_opcode == ELS_DCMD_PLOGI && DBELL_ACTIVE(vha)) {
struct fc_els_flogi *p = ptr;
-
p->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_SEC);
}
@@ -3089,10 +3114,11 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
(uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
sizeof(*elsio->u.els_plogi.els_plogi_pyld));
- init_completion(&elsio->u.els_plogi.comp);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
+ fcport->flags |= FCF_LOGIN_NEEDED;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ goto done_free_sp;
} else {
ql_dbg(ql_dbg_disc, vha, 0x3074,
"%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
@@ -3100,21 +3126,15 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
fcport->d_id.b24, vha->d_id.b24);
}
- if (wait) {
- wait_for_completion(&elsio->u.els_plogi.comp);
-
- if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
- rval = QLA_FUNCTION_FAILED;
- } else {
- goto done;
- }
+ return rval;
-out:
- fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+done_free_sp:
qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+ qla2x00_set_fcport_disc_state(fcport, DSC_DELETED);
return rval;
}
@@ -3918,7 +3938,7 @@ qla2x00_start_sp(srb_t *sp)
return -EAGAIN;
}
- pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
+ pkt = qla2x00_alloc_iocbs_ready(sp->qpair, sp);
if (!pkt) {
rval = -EAGAIN;
ql_log(ql_log_warn, vha, 0x700c,
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 21ec32b4fb280..0cd6f3e148824 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -194,7 +194,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
ha->flags.eeh_busy) {
ql_log(ql_log_warn, vha, 0xd035,
- "Error detected: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n",
+ "Purge mbox: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n",
ha->flags.purge_mbox, ha->flags.eeh_busy, mcp->mb[0]);
rval = QLA_ABORTED;
goto premature_exit;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index dd674378f2f39..1e2f52210f605 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -4602,6 +4602,7 @@ fail_free_init_cb:
ha->init_cb_dma = 0;
fail_free_vp_map:
kfree(ha->vp_map);
+ ha->vp_map = NULL;
fail:
ql_log(ql_log_fatal, NULL, 0x0030,
"Memory allocation failure.\n");
@@ -5583,7 +5584,7 @@ qla2x00_do_work(struct scsi_qla_host *vha)
break;
case QLA_EVT_ELS_PLOGI:
qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI,
- e->u.fcport.fcport, false);
+ e->u.fcport.fcport);
break;
case QLA_EVT_SA_REPLACE:
rc = qla24xx_issue_sa_replace_iocb(vha, e);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 2ef2dbac0db27..d7551b1443e4a 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1062,6 +1062,16 @@ void qlt_free_session_done(struct work_struct *work)
"%s: sess %p logout completed\n", __func__, sess);
}
+ /* check for any straggling io left behind */
+ if (!(sess->flags & FCF_FCP2_DEVICE) &&
+ qla2x00_eh_wait_for_pending_commands(sess->vha, sess->d_id.b24, 0, WAIT_TARGET)) {
+ ql_log(ql_log_warn, vha, 0x3027,
+ "IO not return. Resetting.\n");
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ qla2x00_wait_for_chip_reset(vha);
+ }
+
if (sess->logo_ack_needed) {
sess->logo_ack_needed = 0;
qla24xx_async_notify_ack(vha, sess,
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index d903563e969eb..7627fd807bc3e 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -6,9 +6,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.02.09.100-k"
+#define QLA2XXX_VERSION "10.02.09.200-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 2
#define QLA_DRIVER_PATCH_VER 9
-#define QLA_DRIVER_BETA_VER 100
+#define QLA_DRIVER_BETA_VER 200
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 5d560d9b89440..6177f4798f3ac 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -1468,7 +1468,7 @@ static struct platform_driver qpti_sbus_driver = {
module_platform_driver(qpti_sbus_driver);
MODULE_DESCRIPTION("QlogicISP SBUS driver");
-MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
MODULE_LICENSE("GPL");
MODULE_VERSION("2.1");
MODULE_FIRMWARE("qlogic/isp1000.bin");
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 8cad9792a5627..3e0c0381277ac 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -517,6 +517,8 @@ void scsi_attach_vpd(struct scsi_device *sdev)
scsi_update_vpd_page(sdev, 0xb1, &sdev->vpd_pgb1);
if (vpd_buf->data[i] == 0xb2)
scsi_update_vpd_page(sdev, 0xb2, &sdev->vpd_pgb2);
+ if (vpd_buf->data[i] == 0xb7)
+ scsi_update_vpd_page(sdev, 0xb7, &sdev->vpd_pgb7);
}
kfree(vpd_buf);
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index d03d66f114930..acf0592d63dae 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -43,6 +43,7 @@
#include <linux/prefetch.h>
#include <linux/debugfs.h>
#include <linux/async.h>
+#include <linux/cleanup.h>
#include <net/checksum.h>
@@ -532,6 +533,8 @@ static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_get_stream_status(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip);
static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
@@ -606,6 +609,9 @@ static const struct opcode_info_t sa_in_16_iarr[] = {
{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
{16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
+ {0, 0x9e, 0x16, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL,
+ {16, 0x16, 0, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
+ 0, 0} }, /* GET STREAM STATUS */
};
static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
@@ -896,10 +902,12 @@ static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
static int poll_queues; /* iouring iopoll interface.*/
+static atomic_long_t writes_by_group_number[64];
+
static char sdebug_proc_name[] = MY_NAME;
static const char *my_name = MY_NAME;
-static struct bus_type pseudo_lld_bus;
+static const struct bus_type pseudo_lld_bus;
static struct device_driver sdebug_driverfs_driver = {
.name = sdebug_proc_name,
@@ -1867,6 +1875,19 @@ static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
return 0x3c;
}
+#define SDEBUG_BLE_LEN_AFTER_B4 28 /* thus vpage 32 bytes long */
+
+enum { MAXIMUM_NUMBER_OF_STREAMS = 6, PERMANENT_STREAM_COUNT = 5 };
+
+/* Block limits extension VPD page (SBC-4) */
+static int inquiry_vpd_b7(unsigned char *arrb4)
+{
+ memset(arrb4, 0, SDEBUG_BLE_LEN_AFTER_B4);
+ arrb4[1] = 1; /* Reduced stream control support (RSCS) */
+ put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS, &arrb4[2]);
+ return SDEBUG_BLE_LEN_AFTER_B4;
+}
+
#define SDEBUG_LONG_INQ_SZ 96
#define SDEBUG_MAX_INQ_ARR_SZ 584
@@ -1903,7 +1924,8 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
u32 len;
char lu_id_str[6];
int host_no = devip->sdbg_host->shost->host_no;
-
+
+ arr[1] = cmd[2];
port_group_id = (((host_no + 1) & 0x7f) << 8) +
(devip->channel & 0x7f);
if (sdebug_vpd_use_hostno == 0)
@@ -1914,7 +1936,6 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
(devip->target * 1000) - 3;
len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
if (0 == cmd[2]) { /* supported vital product data pages */
- arr[1] = cmd[2]; /*sanity */
n = 4;
arr[n++] = 0x0; /* this page */
arr[n++] = 0x80; /* unit serial number */
@@ -1932,26 +1953,22 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
arr[n++] = 0xb2; /* LB Provisioning */
if (is_zbc)
arr[n++] = 0xb6; /* ZB dev. char. */
+ arr[n++] = 0xb7; /* Block limits extension */
}
arr[3] = n - 4; /* number of supported VPD pages */
} else if (0x80 == cmd[2]) { /* unit serial number */
- arr[1] = cmd[2]; /*sanity */
arr[3] = len;
memcpy(&arr[4], lu_id_str, len);
} else if (0x83 == cmd[2]) { /* device identification */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
target_dev_id, lu_id_num,
lu_id_str, len,
&devip->lu_name);
} else if (0x84 == cmd[2]) { /* Software interface ident. */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_84(&arr[4]);
} else if (0x85 == cmd[2]) { /* Management network addresses */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_85(&arr[4]);
} else if (0x86 == cmd[2]) { /* extended inquiry */
- arr[1] = cmd[2]; /*sanity */
arr[3] = 0x3c; /* number of following entries */
if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
arr[4] = 0x4; /* SPT: GRD_CHK:1 */
@@ -1959,33 +1976,32 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
else
arr[4] = 0x0; /* no protection stuff */
- arr[5] = 0x7; /* head of q, ordered + simple q's */
+ /*
+ * GROUP_SUP=1; HEADSUP=1 (HEAD OF QUEUE); ORDSUP=1
+ * (ORDERED queuing); SIMPSUP=1 (SIMPLE queuing).
+ */
+ arr[5] = 0x17;
} else if (0x87 == cmd[2]) { /* mode page policy */
- arr[1] = cmd[2]; /*sanity */
arr[3] = 0x8; /* number of following entries */
arr[4] = 0x2; /* disconnect-reconnect mp */
arr[6] = 0x80; /* mlus, shared */
arr[8] = 0x18; /* protocol specific lu */
arr[10] = 0x82; /* mlus, per initiator port */
} else if (0x88 == cmd[2]) { /* SCSI Ports */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
- arr[1] = cmd[2]; /*sanity */
n = inquiry_vpd_89(&arr[4]);
put_unaligned_be16(n, arr + 2);
} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_b0(&arr[4]);
} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_b1(devip, &arr[4]);
} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_b2(&arr[4]);
} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_b6(devip, &arr[4]);
+ } else if (cmd[2] == 0xb7) { /* block limits extension page */
+ arr[3] = inquiry_vpd_b7(&arr[4]);
} else {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
kfree(arr);
@@ -2554,6 +2570,40 @@ static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
return sizeof(ctrl_m_pg);
}
+/* IO Advice Hints Grouping mode page */
+static int resp_grouping_m_pg(unsigned char *p, int pcontrol, int target)
+{
+ /* IO Advice Hints Grouping mode page */
+ struct grouping_m_pg {
+ u8 page_code; /* OR 0x40 when subpage_code > 0 */
+ u8 subpage_code;
+ __be16 page_length;
+ u8 reserved[12];
+ struct scsi_io_group_descriptor descr[MAXIMUM_NUMBER_OF_STREAMS];
+ };
+ static const struct grouping_m_pg gr_m_pg = {
+ .page_code = 0xa | 0x40,
+ .subpage_code = 5,
+ .page_length = cpu_to_be16(sizeof(gr_m_pg) - 4),
+ .descr = {
+ { .st_enble = 1 },
+ { .st_enble = 1 },
+ { .st_enble = 1 },
+ { .st_enble = 1 },
+ { .st_enble = 1 },
+ { .st_enble = 0 },
+ }
+ };
+
+ BUILD_BUG_ON(sizeof(struct grouping_m_pg) !=
+ 16 + MAXIMUM_NUMBER_OF_STREAMS * 16);
+ memcpy(p, &gr_m_pg, sizeof(gr_m_pg));
+ if (1 == pcontrol) {
+ /* There are no changeable values so clear from byte 4 on. */
+ memset(p + 4, 0, sizeof(gr_m_pg) - 4);
+ }
+ return sizeof(gr_m_pg);
+}
static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
{ /* Informational Exceptions control mode page for mode_sense */
@@ -2627,7 +2677,8 @@ static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
return sizeof(sas_sha_m_pg);
}
-#define SDEBUG_MAX_MSENSE_SZ 256
+/* PAGE_SIZE is more than necessary but provides room for future expansion. */
+#define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE
static int resp_mode_sense(struct scsi_cmnd *scp,
struct sdebug_dev_info *devip)
@@ -2638,10 +2689,13 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
int target_dev_id;
int target = scp->device->id;
unsigned char *ap;
- unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
+ unsigned char *arr __free(kfree);
unsigned char *cmd = scp->cmnd;
- bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
+ bool dbd, llbaa, msense_6, is_disk, is_zbc;
+ arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
+ if (!arr)
+ return -ENOMEM;
dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
pcontrol = (cmd[2] & 0xc0) >> 6;
pcode = cmd[2] & 0x3f;
@@ -2699,45 +2753,63 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
ap = arr + offset;
}
- if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
- /* TODO: Control Extension page */
- mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
- return check_condition_result;
- }
- bad_pcode = false;
-
+ /*
+ * N.B. If len>0 before resp_*_pg() call, then form of that call should be:
+ * len += resp_*_pg(ap + len, pcontrol, target);
+ */
switch (pcode) {
case 0x1: /* Read-Write error recovery page, direct access */
+ if (subpcode > 0x0 && subpcode < 0xff)
+ goto bad_subpcode;
len = resp_err_recov_pg(ap, pcontrol, target);
offset += len;
break;
case 0x2: /* Disconnect-Reconnect page, all devices */
+ if (subpcode > 0x0 && subpcode < 0xff)
+ goto bad_subpcode;
len = resp_disconnect_pg(ap, pcontrol, target);
offset += len;
break;
case 0x3: /* Format device page, direct access */
+ if (subpcode > 0x0 && subpcode < 0xff)
+ goto bad_subpcode;
if (is_disk) {
len = resp_format_pg(ap, pcontrol, target);
offset += len;
- } else
- bad_pcode = true;
+ } else {
+ goto bad_pcode;
+ }
break;
case 0x8: /* Caching page, direct access */
+ if (subpcode > 0x0 && subpcode < 0xff)
+ goto bad_subpcode;
if (is_disk || is_zbc) {
len = resp_caching_pg(ap, pcontrol, target);
offset += len;
- } else
- bad_pcode = true;
+ } else {
+ goto bad_pcode;
+ }
break;
case 0xa: /* Control Mode page, all devices */
- len = resp_ctrl_m_pg(ap, pcontrol, target);
+ switch (subpcode) {
+ case 0:
+ len = resp_ctrl_m_pg(ap, pcontrol, target);
+ break;
+ case 0x05:
+ len = resp_grouping_m_pg(ap, pcontrol, target);
+ break;
+ case 0xff:
+ len = resp_ctrl_m_pg(ap, pcontrol, target);
+ len += resp_grouping_m_pg(ap + len, pcontrol, target);
+ break;
+ default:
+ goto bad_subpcode;
+ }
offset += len;
break;
case 0x19: /* if spc==1 then sas phy, control+discover */
- if ((subpcode > 0x2) && (subpcode < 0xff)) {
- mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
- return check_condition_result;
- }
+ if (subpcode > 0x2 && subpcode < 0xff)
+ goto bad_subpcode;
len = 0;
if ((0x0 == subpcode) || (0xff == subpcode))
len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
@@ -2749,49 +2821,50 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
offset += len;
break;
case 0x1c: /* Informational Exceptions Mode page, all devices */
+ if (subpcode > 0x0 && subpcode < 0xff)
+ goto bad_subpcode;
len = resp_iec_m_pg(ap, pcontrol, target);
offset += len;
break;
case 0x3f: /* Read all Mode pages */
- if ((0 == subpcode) || (0xff == subpcode)) {
- len = resp_err_recov_pg(ap, pcontrol, target);
- len += resp_disconnect_pg(ap + len, pcontrol, target);
- if (is_disk) {
- len += resp_format_pg(ap + len, pcontrol,
- target);
- len += resp_caching_pg(ap + len, pcontrol,
- target);
- } else if (is_zbc) {
- len += resp_caching_pg(ap + len, pcontrol,
- target);
- }
- len += resp_ctrl_m_pg(ap + len, pcontrol, target);
- len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
- if (0xff == subpcode) {
- len += resp_sas_pcd_m_spg(ap + len, pcontrol,
- target, target_dev_id);
- len += resp_sas_sha_m_spg(ap + len, pcontrol);
- }
- len += resp_iec_m_pg(ap + len, pcontrol, target);
- offset += len;
- } else {
- mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
- return check_condition_result;
+ if (subpcode > 0x0 && subpcode < 0xff)
+ goto bad_subpcode;
+ len = resp_err_recov_pg(ap, pcontrol, target);
+ len += resp_disconnect_pg(ap + len, pcontrol, target);
+ if (is_disk) {
+ len += resp_format_pg(ap + len, pcontrol, target);
+ len += resp_caching_pg(ap + len, pcontrol, target);
+ } else if (is_zbc) {
+ len += resp_caching_pg(ap + len, pcontrol, target);
}
+ len += resp_ctrl_m_pg(ap + len, pcontrol, target);
+ if (0xff == subpcode)
+ len += resp_grouping_m_pg(ap + len, pcontrol, target);
+ len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
+ if (0xff == subpcode) {
+ len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
+ target_dev_id);
+ len += resp_sas_sha_m_spg(ap + len, pcontrol);
+ }
+ len += resp_iec_m_pg(ap + len, pcontrol, target);
+ offset += len;
break;
default:
- bad_pcode = true;
- break;
- }
- if (bad_pcode) {
- mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
- return check_condition_result;
+ goto bad_pcode;
}
if (msense_6)
arr[0] = offset - 1;
else
put_unaligned_be16((offset - 2), arr + 0);
return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
+
+bad_pcode:
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
+ return check_condition_result;
+
+bad_subpcode:
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
+ return check_condition_result;
}
#define SDEBUG_MAX_MSELECT_SZ 512
@@ -3306,7 +3379,8 @@ static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
/* Returns number of bytes copied or -1 if error. */
static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
- u32 sg_skip, u64 lba, u32 num, bool do_write)
+ u32 sg_skip, u64 lba, u32 num, bool do_write,
+ u8 group_number)
{
int ret;
u64 block, rest = 0;
@@ -3325,6 +3399,10 @@ static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
return 0;
if (scp->sc_data_direction != dir)
return -1;
+
+ if (do_write && group_number < ARRAY_SIZE(writes_by_group_number))
+ atomic_long_inc(&writes_by_group_number[group_number]);
+
fsp = sip->storep;
block = do_div(lba, sdebug_store_sectors);
@@ -3698,7 +3776,7 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
}
}
- ret = do_device_access(sip, scp, 0, lba, num, false);
+ ret = do_device_access(sip, scp, 0, lba, num, false, 0);
sdeb_read_unlock(sip);
if (unlikely(ret == -1))
return DID_ERROR << 16;
@@ -3883,6 +3961,7 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
bool check_prot;
u32 num;
+ u8 group = 0;
u32 ei_lba;
int ret;
u64 lba;
@@ -3894,11 +3973,13 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
ei_lba = 0;
lba = get_unaligned_be64(cmd + 2);
num = get_unaligned_be32(cmd + 10);
+ group = cmd[14] & 0x3f;
check_prot = true;
break;
case WRITE_10:
ei_lba = 0;
lba = get_unaligned_be32(cmd + 2);
+ group = cmd[6] & 0x3f;
num = get_unaligned_be16(cmd + 7);
check_prot = true;
break;
@@ -3913,15 +3994,18 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
ei_lba = 0;
lba = get_unaligned_be32(cmd + 2);
num = get_unaligned_be32(cmd + 6);
+ group = cmd[6] & 0x3f;
check_prot = true;
break;
case 0x53: /* XDWRITEREAD(10) */
ei_lba = 0;
lba = get_unaligned_be32(cmd + 2);
+ group = cmd[6] & 0x1f;
num = get_unaligned_be16(cmd + 7);
check_prot = false;
break;
default: /* assume WRITE(32) */
+ group = cmd[6] & 0x3f;
lba = get_unaligned_be64(cmd + 12);
ei_lba = get_unaligned_be32(cmd + 20);
num = get_unaligned_be32(cmd + 28);
@@ -3976,7 +4060,7 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
}
}
- ret = do_device_access(sip, scp, 0, lba, num, true);
+ ret = do_device_access(sip, scp, 0, lba, num, true, group);
if (unlikely(scsi_debug_lbp()))
map_region(sip, lba, num);
/* If ZBC zone then bump its write pointer */
@@ -4028,12 +4112,14 @@ static int resp_write_scat(struct scsi_cmnd *scp,
u32 lb_size = sdebug_sector_size;
u32 ei_lba;
u64 lba;
+ u8 group;
int ret, res;
bool is_16;
static const u32 lrd_size = 32; /* + parameter list header size */
if (cmd[0] == VARIABLE_LENGTH_CMD) {
is_16 = false;
+ group = cmd[6] & 0x3f;
wrprotect = (cmd[10] >> 5) & 0x7;
lbdof = get_unaligned_be16(cmd + 12);
num_lrd = get_unaligned_be16(cmd + 16);
@@ -4044,6 +4130,7 @@ static int resp_write_scat(struct scsi_cmnd *scp,
lbdof = get_unaligned_be16(cmd + 4);
num_lrd = get_unaligned_be16(cmd + 8);
bt_len = get_unaligned_be32(cmd + 10);
+ group = cmd[14] & 0x3f;
if (unlikely(have_dif_prot)) {
if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
wrprotect) {
@@ -4132,7 +4219,7 @@ static int resp_write_scat(struct scsi_cmnd *scp,
}
}
- ret = do_device_access(sip, scp, sg_off, lba, num, true);
+ ret = do_device_access(sip, scp, sg_off, lba, num, true, group);
/* If ZBC zone then bump its write pointer */
if (sdebug_dev_is_zoned(devip))
zbc_inc_wp(devip, lba, num);
@@ -4507,6 +4594,51 @@ static int resp_get_lba_status(struct scsi_cmnd *scp,
return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
}
+static int resp_get_stream_status(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ u16 starting_stream_id, stream_id;
+ const u8 *cmd = scp->cmnd;
+ u32 alloc_len, offset;
+ u8 arr[256] = {};
+ struct scsi_stream_status_header *h = (void *)arr;
+
+ starting_stream_id = get_unaligned_be16(cmd + 4);
+ alloc_len = get_unaligned_be32(cmd + 10);
+
+ if (alloc_len < 8) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
+ return check_condition_result;
+ }
+
+ if (starting_stream_id >= MAXIMUM_NUMBER_OF_STREAMS) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
+ return check_condition_result;
+ }
+
+ /*
+ * The GET STREAM STATUS command only reports status information
+ * about open streams. Treat the non-permanent stream as open.
+ */
+ put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS,
+ &h->number_of_open_streams);
+
+ for (offset = 8, stream_id = starting_stream_id;
+ offset + 8 <= min_t(u32, alloc_len, sizeof(arr)) &&
+ stream_id < MAXIMUM_NUMBER_OF_STREAMS;
+ offset += 8, stream_id++) {
+ struct scsi_stream_status *stream_status = (void *)arr + offset;
+
+ stream_status->perm = stream_id < PERMANENT_STREAM_COUNT;
+ put_unaligned_be16(stream_id,
+ &stream_status->stream_identifier);
+ stream_status->rel_lifetime = stream_id + 1;
+ }
+ put_unaligned_be32(offset - 8, &h->len); /* PARAMETER DATA LENGTH */
+
+ return fill_from_dev_buffer(scp, arr, min(offset, alloc_len));
+}
+
static int resp_sync_cache(struct scsi_cmnd *scp,
struct sdebug_dev_info *devip)
{
@@ -7182,6 +7314,30 @@ static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
}
static DRIVER_ATTR_RO(tur_ms_to_ready);
+static ssize_t group_number_stats_show(struct device_driver *ddp, char *buf)
+{
+ char *p = buf, *end = buf + PAGE_SIZE;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
+ p += scnprintf(p, end - p, "%d %ld\n", i,
+ atomic_long_read(&writes_by_group_number[i]));
+
+ return p - buf;
+}
+
+static ssize_t group_number_stats_store(struct device_driver *ddp,
+ const char *buf, size_t count)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
+ atomic_long_set(&writes_by_group_number[i], 0);
+
+ return count;
+}
+static DRIVER_ATTR_RW(group_number_stats);
+
/* Note: The following array creates attribute files in the
/sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
files (over those found in the /sys/module/scsi_debug/parameters
@@ -7228,6 +7384,7 @@ static struct attribute *sdebug_drv_attrs[] = {
&driver_attr_cdb_len.attr,
&driver_attr_tur_ms_to_ready.attr,
&driver_attr_zbc.attr,
+ &driver_attr_group_number_stats.attr,
NULL,
};
ATTRIBUTE_GROUPS(sdebug_drv);
@@ -8405,7 +8562,7 @@ static void sdebug_driver_remove(struct device *dev)
scsi_host_put(sdbg_host->shost);
}
-static struct bus_type pseudo_lld_bus = {
+static const struct bus_type pseudo_lld_bus = {
.name = "pseudo",
.probe = sdebug_driver_probe,
.remove = sdebug_driver_remove,
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 3fcaf10a9dfe7..ba7237e838633 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -551,9 +551,9 @@ static int scsi_dev_info_list_add_str(char *dev_list)
if (model)
strflags = strsep(&next, next_check);
if (!model || !strflags) {
- printk(KERN_ERR "%s: bad dev info string '%s' '%s'"
- " '%s'\n", __func__, vendor, model,
- strflags);
+ pr_err("%s: bad dev info string '%s' '%s' '%s'\n",
+ __func__, vendor, model ? model : "",
+ strflags ? strflags : "");
res = -EINVAL;
} else
res = scsi_dev_info_list_add(0 /* compatible */, vendor,
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index df5ac03d5d6c2..5b3230ef51fe6 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -184,6 +184,92 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
__scsi_queue_insert(cmd, reason, true);
}
+void scsi_failures_reset_retries(struct scsi_failures *failures)
+{
+ struct scsi_failure *failure;
+
+ failures->total_retries = 0;
+
+ for (failure = failures->failure_definitions; failure->result;
+ failure++)
+ failure->retries = 0;
+}
+EXPORT_SYMBOL_GPL(scsi_failures_reset_retries);
+
+/**
+ * scsi_check_passthrough - Determine if passthrough scsi_cmnd needs a retry.
+ * @scmd: scsi_cmnd to check.
+ * @failures: scsi_failures struct that lists failures to check for.
+ *
+ * Returns -EAGAIN if the caller should retry else 0.
+ */
+static int scsi_check_passthrough(struct scsi_cmnd *scmd,
+ struct scsi_failures *failures)
+{
+ struct scsi_failure *failure;
+ struct scsi_sense_hdr sshdr;
+ enum sam_status status;
+
+ if (!failures)
+ return 0;
+
+ for (failure = failures->failure_definitions; failure->result;
+ failure++) {
+ if (failure->result == SCMD_FAILURE_RESULT_ANY)
+ goto maybe_retry;
+
+ if (host_byte(scmd->result) &&
+ host_byte(scmd->result) == host_byte(failure->result))
+ goto maybe_retry;
+
+ status = status_byte(scmd->result);
+ if (!status)
+ continue;
+
+ if (failure->result == SCMD_FAILURE_STAT_ANY &&
+ !scsi_status_is_good(scmd->result))
+ goto maybe_retry;
+
+ if (status != status_byte(failure->result))
+ continue;
+
+ if (status_byte(failure->result) != SAM_STAT_CHECK_CONDITION ||
+ failure->sense == SCMD_FAILURE_SENSE_ANY)
+ goto maybe_retry;
+
+ if (!scsi_command_normalize_sense(scmd, &sshdr))
+ return 0;
+
+ if (failure->sense != sshdr.sense_key)
+ continue;
+
+ if (failure->asc == SCMD_FAILURE_ASC_ANY)
+ goto maybe_retry;
+
+ if (failure->asc != sshdr.asc)
+ continue;
+
+ if (failure->ascq == SCMD_FAILURE_ASCQ_ANY ||
+ failure->ascq == sshdr.ascq)
+ goto maybe_retry;
+ }
+
+ return 0;
+
+maybe_retry:
+ if (failure->allowed) {
+ if (failure->allowed == SCMD_FAILURE_NO_LIMIT ||
+ ++failure->retries <= failure->allowed)
+ return -EAGAIN;
+ } else {
+ if (failures->total_allowed == SCMD_FAILURE_NO_LIMIT ||
+ ++failures->total_retries <= failures->total_allowed)
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
/**
* scsi_execute_cmd - insert request and wait for the result
* @sdev: scsi_device
@@ -192,7 +278,7 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
* @buffer: data buffer
* @bufflen: len of buffer
* @timeout: request timeout in HZ
- * @retries: number of times to retry request
+ * @ml_retries: number of times SCSI midlayer will retry request
* @args: Optional args. See struct definition for field descriptions
*
* Returns the scsi_cmnd result field if a command was executed, or a negative
@@ -200,7 +286,7 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
*/
int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
blk_opf_t opf, void *buffer, unsigned int bufflen,
- int timeout, int retries,
+ int timeout, int ml_retries,
const struct scsi_exec_args *args)
{
static const struct scsi_exec_args default_args;
@@ -214,6 +300,7 @@ int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
args->sense_len != SCSI_SENSE_BUFFERSIZE))
return -EINVAL;
+retry:
req = scsi_alloc_request(sdev->request_queue, opf, args->req_flags);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -227,7 +314,7 @@ int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
scmd = blk_mq_rq_to_pdu(req);
scmd->cmd_len = COMMAND_SIZE(cmd[0]);
memcpy(scmd->cmnd, cmd, scmd->cmd_len);
- scmd->allowed = retries;
+ scmd->allowed = ml_retries;
scmd->flags |= args->scmd_flags;
req->timeout = timeout;
req->rq_flags |= RQF_QUIET;
@@ -237,6 +324,11 @@ int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
*/
blk_execute_rq(req, true);
+ if (scsi_check_passthrough(scmd, args->failures) == -EAGAIN) {
+ blk_mq_free_request(req);
+ goto retry;
+ }
+
/*
* Some devices (USB mass-storage in particular) may transfer
* garbage data together with a residue indicating that the data
@@ -543,10 +635,9 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
if (blk_queue_add_random(q))
add_disk_randomness(req->q->disk);
- if (!blk_rq_is_passthrough(req)) {
- WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
- cmd->flags &= ~SCMD_INITIALIZED;
- }
+ WARN_ON_ONCE(!blk_rq_is_passthrough(req) &&
+ !(cmd->flags & SCMD_INITIALIZED));
+ cmd->flags = 0;
/*
* Calling rcu_barrier() is not necessary here because the
@@ -2172,11 +2263,25 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, int subpage,
unsigned char cmd[12];
int use_10_for_ms;
int header_length;
- int result, retry_count = retries;
+ int result;
struct scsi_sense_hdr my_sshdr;
+ struct scsi_failure failure_defs[] = {
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = SCMD_FAILURE_ASC_ANY,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .allowed = retries,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
/* caller might not be interested in sense, but we need it */
.sshdr = sshdr ? : &my_sshdr,
+ .failures = &failures,
};
memset(data, 0, sizeof(*data));
@@ -2238,12 +2343,6 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, int subpage,
goto retry;
}
}
- if (scsi_status_is_check_condition(result) &&
- sshdr->sense_key == UNIT_ATTENTION &&
- retry_count) {
- retry_count--;
- goto retry;
- }
}
return -EIO;
}
@@ -3336,3 +3435,7 @@ void scsi_build_sense(struct scsi_cmnd *scmd, int desc, u8 key, u8 asc, u8 ascq)
scmd->result = SAM_STAT_CHECK_CONDITION;
}
EXPORT_SYMBOL_GPL(scsi_build_sense);
+
+#ifdef CONFIG_SCSI_LIB_KUNIT_TEST
+#include "scsi_lib_test.c"
+#endif
diff --git a/drivers/scsi/scsi_lib_test.c b/drivers/scsi/scsi_lib_test.c
new file mode 100644
index 0000000000000..99834426a100a
--- /dev/null
+++ b/drivers/scsi/scsi_lib_test.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit tests for scsi_lib.c.
+ *
+ * Copyright (C) 2023, Oracle Corporation
+ */
+#include <kunit/test.h>
+
+#include <scsi/scsi_proto.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+
+#define SCSI_LIB_TEST_MAX_ALLOWED 3
+#define SCSI_LIB_TEST_TOTAL_MAX_ALLOWED 5
+
+static void scsi_lib_test_multiple_sense(struct kunit *test)
+{
+ struct scsi_failure multiple_sense_failure_defs[] = {
+ {
+ .sense = DATA_PROTECT,
+ .asc = 0x1,
+ .ascq = 0x1,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = 0x11,
+ .ascq = 0x0,
+ .allowed = SCSI_LIB_TEST_MAX_ALLOWED,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .sense = NOT_READY,
+ .asc = 0x11,
+ .ascq = 0x22,
+ .allowed = SCSI_LIB_TEST_MAX_ALLOWED,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .sense = ABORTED_COMMAND,
+ .asc = 0x11,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .allowed = SCSI_LIB_TEST_MAX_ALLOWED,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .sense = HARDWARE_ERROR,
+ .asc = SCMD_FAILURE_ASC_ANY,
+ .allowed = SCSI_LIB_TEST_MAX_ALLOWED,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .sense = ILLEGAL_REQUEST,
+ .asc = 0x91,
+ .ascq = 0x36,
+ .allowed = SCSI_LIB_TEST_MAX_ALLOWED,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = multiple_sense_failure_defs,
+ };
+ u8 sense[SCSI_SENSE_BUFFERSIZE] = {};
+ struct scsi_cmnd sc = {
+ .sense_buffer = sense,
+ };
+ int i;
+
+ /* Match end of array */
+ scsi_build_sense(&sc, 0, ILLEGAL_REQUEST, 0x91, 0x36);
+ KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc, &failures));
+ /* Basic match in array */
+ scsi_build_sense(&sc, 0, UNIT_ATTENTION, 0x11, 0x0);
+ KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc, &failures));
+ /* No matching sense entry */
+ scsi_build_sense(&sc, 0, MISCOMPARE, 0x11, 0x11);
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, &failures));
+ /* Match using SCMD_FAILURE_ASCQ_ANY */
+ scsi_build_sense(&sc, 0, ABORTED_COMMAND, 0x11, 0x22);
+ KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc, &failures));
+ /* Fail to match */
+ scsi_build_sense(&sc, 0, ABORTED_COMMAND, 0x22, 0x22);
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, &failures));
+ /* Match using SCMD_FAILURE_ASC_ANY */
+ scsi_build_sense(&sc, 0, HARDWARE_ERROR, 0x11, 0x22);
+ KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc, &failures));
+ /* No matching status entry */
+ sc.result = SAM_STAT_RESERVATION_CONFLICT;
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, &failures));
+
+ /* Test hitting allowed limit */
+ scsi_build_sense(&sc, 0, NOT_READY, 0x11, 0x22);
+ for (i = 0; i < SCSI_LIB_TEST_MAX_ALLOWED; i++)
+ KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc,
+ &failures));
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, &failures));
+
+ /* reset retries so we can retest */
+ failures.failure_definitions = multiple_sense_failure_defs;
+ scsi_failures_reset_retries(&failures);
+
+ /* Test no retries allowed */
+ scsi_build_sense(&sc, 0, DATA_PROTECT, 0x1, 0x1);
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, &failures));
+}
+
+static void scsi_lib_test_any_sense(struct kunit *test)
+{
+ struct scsi_failure any_sense_failure_defs[] = {
+ {
+ .result = SCMD_FAILURE_SENSE_ANY,
+ .allowed = SCSI_LIB_TEST_MAX_ALLOWED,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = any_sense_failure_defs,
+ };
+ u8 sense[SCSI_SENSE_BUFFERSIZE] = {};
+ struct scsi_cmnd sc = {
+ .sense_buffer = sense,
+ };
+
+ /* Match using SCMD_FAILURE_SENSE_ANY */
+ failures.failure_definitions = any_sense_failure_defs;
+ scsi_build_sense(&sc, 0, MEDIUM_ERROR, 0x11, 0x22);
+ KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc, &failures));
+}
+
+static void scsi_lib_test_host(struct kunit *test)
+{
+ struct scsi_failure retryable_host_failure_defs[] = {
+ {
+ .result = DID_TRANSPORT_DISRUPTED << 16,
+ .allowed = SCSI_LIB_TEST_MAX_ALLOWED,
+ },
+ {
+ .result = DID_TIME_OUT << 16,
+ .allowed = SCSI_LIB_TEST_MAX_ALLOWED,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = retryable_host_failure_defs,
+ };
+ u8 sense[SCSI_SENSE_BUFFERSIZE] = {};
+ struct scsi_cmnd sc = {
+ .sense_buffer = sense,
+ };
+
+ /* No matching host byte entry */
+ failures.failure_definitions = retryable_host_failure_defs;
+ sc.result = DID_NO_CONNECT << 16;
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, &failures));
+ /* Matching host byte entry */
+ sc.result = DID_TIME_OUT << 16;
+ KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc, &failures));
+}
+
+static void scsi_lib_test_any_failure(struct kunit *test)
+{
+ struct scsi_failure any_failure_defs[] = {
+ {
+ .result = SCMD_FAILURE_RESULT_ANY,
+ .allowed = SCSI_LIB_TEST_MAX_ALLOWED,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = any_failure_defs,
+ };
+ u8 sense[SCSI_SENSE_BUFFERSIZE] = {};
+ struct scsi_cmnd sc = {
+ .sense_buffer = sense,
+ };
+
+ /* Match SCMD_FAILURE_RESULT_ANY */
+ failures.failure_definitions = any_failure_defs;
+ sc.result = DID_TRANSPORT_FAILFAST << 16;
+ KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc, &failures));
+}
+
+static void scsi_lib_test_any_status(struct kunit *test)
+{
+ struct scsi_failure any_status_failure_defs[] = {
+ {
+ .result = SCMD_FAILURE_STAT_ANY,
+ .allowed = SCSI_LIB_TEST_MAX_ALLOWED,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = any_status_failure_defs,
+ };
+ u8 sense[SCSI_SENSE_BUFFERSIZE] = {};
+ struct scsi_cmnd sc = {
+ .sense_buffer = sense,
+ };
+
+ /* Test any status handling */
+ failures.failure_definitions = any_status_failure_defs;
+ sc.result = SAM_STAT_RESERVATION_CONFLICT;
+ KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc, &failures));
+}
+
+static void scsi_lib_test_total_allowed(struct kunit *test)
+{
+ struct scsi_failure total_allowed_defs[] = {
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = SCMD_FAILURE_ASC_ANY,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ /* Fail all CCs except the UA above */
+ {
+ .sense = SCMD_FAILURE_SENSE_ANY,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ /* Retry any other errors not listed above */
+ {
+ .result = SCMD_FAILURE_RESULT_ANY,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = total_allowed_defs,
+ };
+ u8 sense[SCSI_SENSE_BUFFERSIZE] = {};
+ struct scsi_cmnd sc = {
+ .sense_buffer = sense,
+ };
+ int i;
+
+ /* Test total_allowed */
+ failures.failure_definitions = total_allowed_defs;
+ scsi_failures_reset_retries(&failures);
+ failures.total_allowed = SCSI_LIB_TEST_TOTAL_MAX_ALLOWED;
+
+ scsi_build_sense(&sc, 0, UNIT_ATTENTION, 0x28, 0x0);
+ for (i = 0; i < SCSI_LIB_TEST_TOTAL_MAX_ALLOWED; i++)
+ /* Retry since we under the total_allowed limit */
+ KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc,
+ &failures));
+ sc.result = DID_TIME_OUT << 16;
+ /* We have now hit the total_allowed limit so no more retries */
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, &failures));
+}
+
+static void scsi_lib_test_mixed_total(struct kunit *test)
+{
+ struct scsi_failure mixed_total_defs[] = {
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = 0x28,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = 0x29,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .allowed = 1,
+ .result = DID_TIME_OUT << 16,
+ },
+ {}
+ };
+ u8 sense[SCSI_SENSE_BUFFERSIZE] = {};
+ struct scsi_failures failures = {
+ .failure_definitions = mixed_total_defs,
+ };
+ struct scsi_cmnd sc = {
+ .sense_buffer = sense,
+ };
+ int i;
+
+ /*
+ * Test total_allowed when there is a mix of per failure allowed
+ * and total_allowed limits.
+ */
+ failures.failure_definitions = mixed_total_defs;
+ scsi_failures_reset_retries(&failures);
+ failures.total_allowed = SCSI_LIB_TEST_TOTAL_MAX_ALLOWED;
+
+ scsi_build_sense(&sc, 0, UNIT_ATTENTION, 0x28, 0x0);
+ for (i = 0; i < SCSI_LIB_TEST_TOTAL_MAX_ALLOWED; i++)
+ /* Retry since we under the total_allowed limit */
+ KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc,
+ &failures));
+ /* Do not retry since we are now over total_allowed limit */
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, &failures));
+
+ scsi_failures_reset_retries(&failures);
+ scsi_build_sense(&sc, 0, UNIT_ATTENTION, 0x28, 0x0);
+ for (i = 0; i < SCSI_LIB_TEST_TOTAL_MAX_ALLOWED; i++)
+ /* Retry since we under the total_allowed limit */
+ KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc,
+ &failures));
+ sc.result = DID_TIME_OUT << 16;
+ /* Retry because this failure has a per failure limit */
+ KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc, &failures));
+ scsi_build_sense(&sc, 0, UNIT_ATTENTION, 0x29, 0x0);
+ /* total_allowed is now hit so no more retries */
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, &failures));
+}
+
+static void scsi_lib_test_check_passthough(struct kunit *test)
+{
+ scsi_lib_test_multiple_sense(test);
+ scsi_lib_test_any_sense(test);
+ scsi_lib_test_host(test);
+ scsi_lib_test_any_failure(test);
+ scsi_lib_test_any_status(test);
+ scsi_lib_test_total_allowed(test);
+ scsi_lib_test_mixed_total(test);
+}
+
+static struct kunit_case scsi_lib_test_cases[] = {
+ KUNIT_CASE(scsi_lib_test_check_passthough),
+ {}
+};
+
+static struct kunit_suite scsi_lib_test_suite = {
+ .name = "scsi_lib",
+ .test_cases = scsi_lib_test_cases,
+};
+
+kunit_test_suite(scsi_lib_test_suite);
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 1fbfe1b52c9f1..9fc397a9ce7a4 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -156,7 +156,7 @@ extern void scsi_sysfs_device_initialize(struct scsi_device *);
extern struct scsi_transport_template blank_transport_template;
extern void __scsi_remove_device(struct scsi_device *);
-extern struct bus_type scsi_bus_type;
+extern const struct bus_type scsi_bus_type;
extern const struct attribute_group *scsi_shost_groups[];
/* scsi_netlink.c */
diff --git a/drivers/scsi/scsi_proto_test.c b/drivers/scsi/scsi_proto_test.c
new file mode 100644
index 0000000000000..7fa0a78a2ad16
--- /dev/null
+++ b/drivers/scsi/scsi_proto_test.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Google LLC
+ */
+#include <kunit/test.h>
+#include <asm-generic/unaligned.h>
+#include <scsi/scsi_proto.h>
+
+static void test_scsi_proto(struct kunit *test)
+{
+ static const union {
+ struct scsi_io_group_descriptor desc;
+ u8 arr[sizeof(struct scsi_io_group_descriptor)];
+ } d = { .arr = { 0x45, 0, 0, 0, 0xb0, 0xe4, 0xe3 } };
+ KUNIT_EXPECT_EQ(test, d.desc.io_advice_hints_mode + 0, 1);
+ KUNIT_EXPECT_EQ(test, d.desc.st_enble + 0, 1);
+ KUNIT_EXPECT_EQ(test, d.desc.cs_enble + 0, 0);
+ KUNIT_EXPECT_EQ(test, d.desc.ic_enable + 0, 1);
+ KUNIT_EXPECT_EQ(test, d.desc.acdlu + 0, 1);
+ KUNIT_EXPECT_EQ(test, d.desc.rlbsr + 0, 3);
+ KUNIT_EXPECT_EQ(test, d.desc.lbm_descriptor_type + 0, 0);
+ KUNIT_EXPECT_EQ(test, d.desc.params[0] + 0, 0xe4);
+ KUNIT_EXPECT_EQ(test, d.desc.params[1] + 0, 0xe3);
+
+ static const union {
+ struct scsi_stream_status s;
+ u8 arr[sizeof(struct scsi_stream_status)];
+ } ss = { .arr = { 0x80, 0, 0x12, 0x34, 0x3f } };
+ KUNIT_EXPECT_EQ(test, ss.s.perm + 0, 1);
+ KUNIT_EXPECT_EQ(test, get_unaligned_be16(&ss.s.stream_identifier),
+ 0x1234);
+ KUNIT_EXPECT_EQ(test, ss.s.rel_lifetime + 0, 0x3f);
+
+ static const union {
+ struct scsi_stream_status_header h;
+ u8 arr[sizeof(struct scsi_stream_status_header)];
+ } sh = { .arr = { 1, 2, 3, 4, 0, 0, 5, 6 } };
+ KUNIT_EXPECT_EQ(test, get_unaligned_be32(&sh.h.len), 0x1020304);
+ KUNIT_EXPECT_EQ(test, get_unaligned_be16(&sh.h.number_of_open_streams),
+ 0x506);
+}
+
+static struct kunit_case scsi_proto_test_cases[] = {
+ KUNIT_CASE(test_scsi_proto),
+ {}
+};
+
+static struct kunit_suite scsi_proto_test_suite = {
+ .name = "scsi_proto",
+ .test_cases = scsi_proto_test_cases,
+};
+kunit_test_suite(scsi_proto_test_suite);
+
+MODULE_DESCRIPTION("<scsi/scsi_proto.h> unit tests");
+MODULE_AUTHOR("Bart Van Assche");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 9969f4e2f1c3d..ffd7e7e72933c 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -412,7 +412,7 @@ static void scsi_target_dev_release(struct device *dev)
put_device(parent);
}
-static struct device_type scsi_target_type = {
+static const struct device_type scsi_target_type = {
.name = "scsi_target",
.release = scsi_target_dev_release,
};
@@ -626,6 +626,7 @@ void scsi_sanitize_inquiry_string(unsigned char *s, int len)
}
EXPORT_SYMBOL(scsi_sanitize_inquiry_string);
+
/**
* scsi_probe_lun - probe a single LUN using a SCSI INQUIRY
* @sdev: scsi_device to probe
@@ -647,10 +648,36 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
int first_inquiry_len, try_inquiry_len, next_inquiry_len;
int response_len = 0;
int pass, count, result, resid;
- struct scsi_sense_hdr sshdr;
+ struct scsi_failure failure_defs[] = {
+ /*
+ * not-ready to ready transition [asc/ascq=0x28/0x0] or
+ * power-on, reset [asc/ascq=0x29/0x0], continue. INQUIRY
+ * should not yield UNIT_ATTENTION but many buggy devices do
+ * so anyway.
+ */
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = 0x28,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = 0x29,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .allowed = 1,
+ .result = DID_TIME_OUT << 16,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .total_allowed = 3,
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
- .sshdr = &sshdr,
.resid = &resid,
+ .failures = &failures,
};
*bflags = 0;
@@ -668,6 +695,8 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
pass, try_inquiry_len));
/* Each pass gets up to three chances to ignore Unit Attention */
+ scsi_failures_reset_retries(&failures);
+
for (count = 0; count < 3; ++count) {
memset(scsi_cmd, 0, 6);
scsi_cmd[0] = INQUIRY;
@@ -684,22 +713,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
"scsi scan: INQUIRY %s with code 0x%x\n",
result ? "failed" : "successful", result));
- if (result > 0) {
- /*
- * not-ready to ready transition [asc/ascq=0x28/0x0]
- * or power-on, reset [asc/ascq=0x29/0x0], continue.
- * INQUIRY should not yield UNIT_ATTENTION
- * but many buggy devices do so anyway.
- */
- if (scsi_status_is_check_condition(result) &&
- scsi_sense_valid(&sshdr)) {
- if ((sshdr.sense_key == UNIT_ATTENTION) &&
- ((sshdr.asc == 0x28) ||
- (sshdr.asc == 0x29)) &&
- (sshdr.ascq == 0))
- continue;
- }
- } else if (result == 0) {
+ if (result == 0) {
/*
* if nothing was transferred, we try
* again. It's a workaround for some USB
@@ -1402,14 +1416,34 @@ static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflag
unsigned int length;
u64 lun;
unsigned int num_luns;
- unsigned int retries;
int result;
struct scsi_lun *lunp, *lun_data;
- struct scsi_sense_hdr sshdr;
struct scsi_device *sdev;
struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ struct scsi_failure failure_defs[] = {
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = SCMD_FAILURE_ASC_ANY,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ /* Fail all CCs except the UA above */
+ {
+ .sense = SCMD_FAILURE_SENSE_ANY,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ /* Retry any other errors not listed above */
+ {
+ .result = SCMD_FAILURE_RESULT_ANY,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .total_allowed = 3,
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
- .sshdr = &sshdr,
+ .failures = &failures,
};
int ret = 0;
@@ -1480,29 +1514,18 @@ retry:
* should come through as a check condition, and will not generate
* a retry.
*/
- for (retries = 0; retries < 3; retries++) {
- SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
- "scsi scan: Sending REPORT LUNS to (try %d)\n",
- retries));
-
- result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN,
- lun_data, length,
- SCSI_REPORT_LUNS_TIMEOUT, 3,
- &exec_args);
+ scsi_failures_reset_retries(&failures);
- SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
- "scsi scan: REPORT LUNS"
- " %s (try %d) result 0x%x\n",
- result ? "failed" : "successful",
- retries, result));
- if (result == 0)
- break;
- else if (scsi_sense_valid(&sshdr)) {
- if (sshdr.sense_key != UNIT_ATTENTION)
- break;
- }
- }
+ SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
+ "scsi scan: Sending REPORT LUNS\n"));
+
+ result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, lun_data,
+ length, SCSI_REPORT_LUNS_TIMEOUT, 3,
+ &exec_args);
+ SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
+ "scsi scan: REPORT LUNS %s result 0x%x\n",
+ result ? "failed" : "successful", result));
if (result) {
/*
* The device probably does not support a REPORT LUN command
@@ -1619,6 +1642,40 @@ int scsi_add_device(struct Scsi_Host *host, uint channel,
}
EXPORT_SYMBOL(scsi_add_device);
+int scsi_resume_device(struct scsi_device *sdev)
+{
+ struct device *dev = &sdev->sdev_gendev;
+ int ret = 0;
+
+ device_lock(dev);
+
+ /*
+ * Bail out if the device or its queue are not running. Otherwise,
+ * the rescan may block waiting for commands to be executed, with us
+ * holding the device lock. This can result in a potential deadlock
+ * in the power management core code when system resume is on-going.
+ */
+ if (sdev->sdev_state != SDEV_RUNNING ||
+ blk_queue_pm_only(sdev->request_queue)) {
+ ret = -EWOULDBLOCK;
+ goto unlock;
+ }
+
+ if (dev->driver && try_module_get(dev->driver->owner)) {
+ struct scsi_driver *drv = to_scsi_driver(dev->driver);
+
+ if (drv->resume)
+ ret = drv->resume(dev);
+ module_put(dev->driver->owner);
+ }
+
+unlock:
+ device_unlock(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL(scsi_resume_device);
+
int scsi_rescan_device(struct scsi_device *sdev)
{
struct device *dev = &sdev->sdev_gendev;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 24f6eefb68030..775df00021e4d 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -27,7 +27,7 @@
#include "scsi_priv.h"
#include "scsi_logging.h"
-static struct device_type scsi_dev_type;
+static const struct device_type scsi_dev_type;
static const struct {
enum scsi_device_state value;
@@ -449,6 +449,7 @@ static void scsi_device_dev_release(struct device *dev)
struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL;
struct scsi_vpd *vpd_pgb0 = NULL, *vpd_pgb1 = NULL, *vpd_pgb2 = NULL;
+ struct scsi_vpd *vpd_pgb7 = NULL;
unsigned long flags;
might_sleep();
@@ -494,6 +495,8 @@ static void scsi_device_dev_release(struct device *dev)
lockdep_is_held(&sdev->inquiry_mutex));
vpd_pgb2 = rcu_replace_pointer(sdev->vpd_pgb2, vpd_pgb2,
lockdep_is_held(&sdev->inquiry_mutex));
+ vpd_pgb7 = rcu_replace_pointer(sdev->vpd_pgb7, vpd_pgb7,
+ lockdep_is_held(&sdev->inquiry_mutex));
mutex_unlock(&sdev->inquiry_mutex);
if (vpd_pg0)
@@ -510,6 +513,8 @@ static void scsi_device_dev_release(struct device *dev)
kfree_rcu(vpd_pgb1, rcu);
if (vpd_pgb2)
kfree_rcu(vpd_pgb2, rcu);
+ if (vpd_pgb7)
+ kfree_rcu(vpd_pgb7, rcu);
kfree(sdev->inquiry);
kfree(sdev);
@@ -549,7 +554,7 @@ static int scsi_bus_uevent(const struct device *dev, struct kobj_uevent_env *env
return 0;
}
-struct bus_type scsi_bus_type = {
+const struct bus_type scsi_bus_type = {
.name = "scsi",
.match = scsi_bus_match,
.uevent = scsi_bus_uevent,
@@ -921,6 +926,7 @@ sdev_vpd_pg_attr(pg89);
sdev_vpd_pg_attr(pgb0);
sdev_vpd_pg_attr(pgb1);
sdev_vpd_pg_attr(pgb2);
+sdev_vpd_pg_attr(pgb7);
sdev_vpd_pg_attr(pg0);
static ssize_t show_inquiry(struct file *filep, struct kobject *kobj,
@@ -1295,6 +1301,9 @@ static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj,
if (attr == &dev_attr_vpd_pgb2 && !sdev->vpd_pgb2)
return 0;
+ if (attr == &dev_attr_vpd_pgb7 && !sdev->vpd_pgb7)
+ return 0;
+
return S_IRUGO;
}
@@ -1347,6 +1356,7 @@ static struct bin_attribute *scsi_sdev_bin_attrs[] = {
&dev_attr_vpd_pgb0,
&dev_attr_vpd_pgb1,
&dev_attr_vpd_pgb2,
+ &dev_attr_vpd_pgb7,
&dev_attr_inquiry,
NULL
};
@@ -1626,7 +1636,7 @@ int scsi_sysfs_add_host(struct Scsi_Host *shost)
return 0;
}
-static struct device_type scsi_dev_type = {
+static const struct device_type scsi_dev_type = {
.name = "scsi_device",
.release = scsi_device_dev_release,
.groups = scsi_sdev_attr_groups,
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 3075b2ddf7a69..af3ac6346796b 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1201,7 +1201,7 @@ static const struct device_type iscsi_flashnode_conn_dev_type = {
.release = iscsi_flashnode_conn_release,
};
-static struct bus_type iscsi_flashnode_bus;
+static const struct bus_type iscsi_flashnode_bus;
int iscsi_flashnode_bus_match(struct device *dev,
struct device_driver *drv)
@@ -1212,7 +1212,7 @@ int iscsi_flashnode_bus_match(struct device *dev,
}
EXPORT_SYMBOL_GPL(iscsi_flashnode_bus_match);
-static struct bus_type iscsi_flashnode_bus = {
+static const struct bus_type iscsi_flashnode_bus = {
.name = "iscsi_flashnode",
.match = &iscsi_flashnode_bus_match,
};
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index f668c1c0a98f2..64852e6df3e32 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -108,29 +108,30 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd,
enum req_op op, void *buffer, unsigned int bufflen,
struct scsi_sense_hdr *sshdr)
{
- int i, result;
- struct scsi_sense_hdr sshdr_tmp;
blk_opf_t opf = op | REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
+ struct scsi_failure failure_defs[] = {
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = SCMD_FAILURE_ASC_ANY,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .allowed = DV_RETRIES,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
+ /* bypass the SDEV_QUIESCE state with BLK_MQ_REQ_PM */
.req_flags = BLK_MQ_REQ_PM,
- .sshdr = sshdr ? : &sshdr_tmp,
+ .sshdr = sshdr,
+ .failures = &failures,
};
- sshdr = exec_args.sshdr;
-
- for(i = 0; i < DV_RETRIES; i++) {
- /*
- * The purpose of the RQF_PM flag below is to bypass the
- * SDEV_QUIESCE state.
- */
- result = scsi_execute_cmd(sdev, cmd, opf, buffer, bufflen,
- DV_TIMEOUT, 1, &exec_args);
- if (result < 0 || !scsi_sense_valid(sshdr) ||
- sshdr->sense_key != UNIT_ATTENTION)
- break;
- }
- return result;
+ return scsi_execute_cmd(sdev, cmd, opf, buffer, bufflen, DV_TIMEOUT, 1,
+ &exec_args);
}
static struct {
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index bdd0acf7fa3cb..58fdf679341dc 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -47,6 +47,7 @@
#include <linux/blkpg.h>
#include <linux/blk-pm.h>
#include <linux/delay.h>
+#include <linux/rw_hint.h>
#include <linux/major.h>
#include <linux/mutex.h>
#include <linux/string_helpers.h>
@@ -1080,12 +1081,38 @@ static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
return BLK_STS_OK;
}
+/**
+ * sd_group_number() - Compute the GROUP NUMBER field
+ * @cmd: SCSI command for which to compute the value of the six-bit GROUP NUMBER
+ * field.
+ *
+ * From SBC-5 r05 (https://www.t10.org/cgi-bin/ac.pl?t=f&f=sbc5r05.pdf):
+ * 0: no relative lifetime.
+ * 1: shortest relative lifetime.
+ * 2: second shortest relative lifetime.
+ * 3 - 0x3d: intermediate relative lifetimes.
+ * 0x3e: second longest relative lifetime.
+ * 0x3f: longest relative lifetime.
+ */
+static u8 sd_group_number(struct scsi_cmnd *cmd)
+{
+ const struct request *rq = scsi_cmd_to_rq(cmd);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
+
+ if (!sdkp->rscs)
+ return 0;
+
+ return min3((u32)rq->write_hint, (u32)sdkp->permanent_stream_count,
+ 0x3fu);
+}
+
static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write,
sector_t lba, unsigned int nr_blocks,
unsigned char flags, unsigned int dld)
{
cmd->cmd_len = SD_EXT_CDB_SIZE;
cmd->cmnd[0] = VARIABLE_LENGTH_CMD;
+ cmd->cmnd[6] = sd_group_number(cmd);
cmd->cmnd[7] = 0x18; /* Additional CDB len */
cmd->cmnd[9] = write ? WRITE_32 : READ_32;
cmd->cmnd[10] = flags;
@@ -1104,7 +1131,7 @@ static blk_status_t sd_setup_rw16_cmnd(struct scsi_cmnd *cmd, bool write,
cmd->cmd_len = 16;
cmd->cmnd[0] = write ? WRITE_16 : READ_16;
cmd->cmnd[1] = flags | ((dld >> 2) & 0x01);
- cmd->cmnd[14] = (dld & 0x03) << 6;
+ cmd->cmnd[14] = ((dld & 0x03) << 6) | sd_group_number(cmd);
cmd->cmnd[15] = 0;
put_unaligned_be64(lba, &cmd->cmnd[2]);
put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
@@ -1119,7 +1146,7 @@ static blk_status_t sd_setup_rw10_cmnd(struct scsi_cmnd *cmd, bool write,
cmd->cmd_len = 10;
cmd->cmnd[0] = write ? WRITE_10 : READ_10;
cmd->cmnd[1] = flags;
- cmd->cmnd[6] = 0;
+ cmd->cmnd[6] = sd_group_number(cmd);
cmd->cmnd[9] = 0;
put_unaligned_be32(lba, &cmd->cmnd[2]);
put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
@@ -1256,7 +1283,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks,
protect | fua, dld);
} else if ((nr_blocks > 0xff) || (lba > 0x1fffff) ||
- sdp->use_10_for_rw || protect) {
+ sdp->use_10_for_rw || protect || rq->write_hint) {
ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks,
protect | fua);
} else {
@@ -1645,36 +1672,35 @@ out:
static int sd_sync_cache(struct scsi_disk *sdkp)
{
- int retries, res;
+ int res;
struct scsi_device *sdp = sdkp->device;
const int timeout = sdp->request_queue->rq_timeout
* SD_FLUSH_TIMEOUT_MULTIPLIER;
+ /* Leave the rest of the command zero to indicate flush everything. */
+ const unsigned char cmd[16] = { sdp->use_16_for_sync ?
+ SYNCHRONIZE_CACHE_16 : SYNCHRONIZE_CACHE };
struct scsi_sense_hdr sshdr;
+ struct scsi_failure failure_defs[] = {
+ {
+ .allowed = 3,
+ .result = SCMD_FAILURE_RESULT_ANY,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
.req_flags = BLK_MQ_REQ_PM,
.sshdr = &sshdr,
+ .failures = &failures,
};
if (!scsi_device_online(sdp))
return -ENODEV;
- for (retries = 3; retries > 0; --retries) {
- unsigned char cmd[16] = { 0 };
-
- if (sdp->use_16_for_sync)
- cmd[0] = SYNCHRONIZE_CACHE_16;
- else
- cmd[0] = SYNCHRONIZE_CACHE;
- /*
- * Leave the rest of the command zero to indicate
- * flush everything.
- */
- res = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0,
- timeout, sdkp->max_retries, &exec_args);
- if (res == 0)
- break;
- }
-
+ res = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0, timeout,
+ sdkp->max_retries, &exec_args);
if (res) {
sd_print_result(sdkp, "Synchronize Cache(10) failed", res);
@@ -1801,8 +1827,22 @@ static int sd_pr_in_command(struct block_device *bdev, u8 sa,
struct scsi_device *sdev = sdkp->device;
struct scsi_sense_hdr sshdr;
u8 cmd[10] = { PERSISTENT_RESERVE_IN, sa };
+ struct scsi_failure failure_defs[] = {
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = SCMD_FAILURE_ASC_ANY,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .allowed = 5,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
.sshdr = &sshdr,
+ .failures = &failures,
};
int result;
@@ -1889,8 +1929,22 @@ static int sd_pr_out_command(struct block_device *bdev, u8 sa, u64 key,
struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
struct scsi_device *sdev = sdkp->device;
struct scsi_sense_hdr sshdr;
+ struct scsi_failure failure_defs[] = {
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = SCMD_FAILURE_ASC_ANY,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .allowed = 5,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
.sshdr = &sshdr,
+ .failures = &failures,
};
int result;
u8 cmd[16] = { 0, };
@@ -2235,55 +2289,68 @@ static int sd_done(struct scsi_cmnd *SCpnt)
static void
sd_spinup_disk(struct scsi_disk *sdkp)
{
- unsigned char cmd[10];
+ static const u8 cmd[10] = { TEST_UNIT_READY };
unsigned long spintime_expire = 0;
- int retries, spintime;
+ int spintime, sense_valid = 0;
unsigned int the_result;
struct scsi_sense_hdr sshdr;
+ struct scsi_failure failure_defs[] = {
+ /* Do not retry Medium Not Present */
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = 0x3A,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .sense = NOT_READY,
+ .asc = 0x3A,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ /* Retry when scsi_status_is_good would return false 3 times */
+ {
+ .result = SCMD_FAILURE_STAT_ANY,
+ .allowed = 3,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
.sshdr = &sshdr,
+ .failures = &failures,
};
- int sense_valid = 0;
spintime = 0;
/* Spin up drives, as required. Only do this at boot time */
/* Spinup needs to be done for module loads too. */
do {
- retries = 0;
+ bool media_was_present = sdkp->media_present;
- do {
- bool media_was_present = sdkp->media_present;
+ scsi_failures_reset_retries(&failures);
- cmd[0] = TEST_UNIT_READY;
- memset((void *) &cmd[1], 0, 9);
-
- the_result = scsi_execute_cmd(sdkp->device, cmd,
- REQ_OP_DRV_IN, NULL, 0,
- SD_TIMEOUT,
- sdkp->max_retries,
- &exec_args);
+ the_result = scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN,
+ NULL, 0, SD_TIMEOUT,
+ sdkp->max_retries, &exec_args);
- if (the_result > 0) {
- /*
- * If the drive has indicated to us that it
- * doesn't have any media in it, don't bother
- * with any more polling.
- */
- if (media_not_present(sdkp, &sshdr)) {
- if (media_was_present)
- sd_printk(KERN_NOTICE, sdkp,
- "Media removed, stopped polling\n");
- return;
- }
- sense_valid = scsi_sense_valid(&sshdr);
+ if (the_result > 0) {
+ /*
+ * If the drive has indicated to us that it doesn't
+ * have any media in it, don't bother with any more
+ * polling.
+ */
+ if (media_not_present(sdkp, &sshdr)) {
+ if (media_was_present)
+ sd_printk(KERN_NOTICE, sdkp,
+ "Media removed, stopped polling\n");
+ return;
}
- retries++;
- } while (retries < 3 &&
- (!scsi_status_is_good(the_result) ||
- (scsi_status_is_check_condition(the_result) &&
- sense_valid && sshdr.sense_key == UNIT_ATTENTION)));
+ sense_valid = scsi_sense_valid(&sshdr);
+ }
if (!scsi_status_is_check_condition(the_result)) {
/* no sense, TUR either succeeded or failed
@@ -2318,14 +2385,16 @@ sd_spinup_disk(struct scsi_disk *sdkp)
* Issue command to spin up drive when not ready
*/
if (!spintime) {
+ /* Return immediately and start spin cycle */
+ const u8 start_cmd[10] = {
+ [0] = START_STOP,
+ [1] = 1,
+ [4] = sdkp->device->start_stop_pwr_cond ?
+ 0x11 : 1,
+ };
+
sd_printk(KERN_NOTICE, sdkp, "Spinning up disk...");
- cmd[0] = START_STOP;
- cmd[1] = 1; /* Return immediately */
- memset((void *) &cmd[2], 0, 8);
- cmd[4] = 1; /* Start spin cycle */
- if (sdkp->device->start_stop_pwr_cond)
- cmd[4] |= 1 << 4;
- scsi_execute_cmd(sdkp->device, cmd,
+ scsi_execute_cmd(sdkp->device, start_cmd,
REQ_OP_DRV_IN, NULL, 0,
SD_TIMEOUT, sdkp->max_retries,
&exec_args);
@@ -2546,42 +2615,58 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
unsigned char *buffer)
{
- unsigned char cmd[16];
+ static const u8 cmd[10] = { READ_CAPACITY };
struct scsi_sense_hdr sshdr;
+ struct scsi_failure failure_defs[] = {
+ /* Do not retry Medium Not Present */
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = 0x3A,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .sense = NOT_READY,
+ .asc = 0x3A,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ /* Device reset might occur several times so retry a lot */
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = 0x29,
+ .allowed = READ_CAPACITY_RETRIES_ON_RESET,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ /* Any other error not listed above retry 3 times */
+ {
+ .result = SCMD_FAILURE_RESULT_ANY,
+ .allowed = 3,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
.sshdr = &sshdr,
+ .failures = &failures,
};
int sense_valid = 0;
int the_result;
- int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
sector_t lba;
unsigned sector_size;
- do {
- cmd[0] = READ_CAPACITY;
- memset(&cmd[1], 0, 9);
- memset(buffer, 0, 8);
+ memset(buffer, 0, 8);
+
+ the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, buffer,
+ 8, SD_TIMEOUT, sdkp->max_retries,
+ &exec_args);
- the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, buffer,
- 8, SD_TIMEOUT, sdkp->max_retries,
- &exec_args);
+ if (the_result > 0) {
+ sense_valid = scsi_sense_valid(&sshdr);
if (media_not_present(sdkp, &sshdr))
return -ENODEV;
-
- if (the_result > 0) {
- sense_valid = scsi_sense_valid(&sshdr);
- if (sense_valid &&
- sshdr.sense_key == UNIT_ATTENTION &&
- sshdr.asc == 0x29 && sshdr.ascq == 0x00)
- /* Device reset might occur several times,
- * give it one more chance */
- if (--reset_retries > 0)
- continue;
- }
- retries--;
-
- } while (the_result && retries);
+ }
if (the_result) {
sd_print_result(sdkp, "Read Capacity(10) failed", the_result);
@@ -3001,6 +3086,70 @@ defaults:
sdkp->DPOFUA = 0;
}
+static bool sd_is_perm_stream(struct scsi_disk *sdkp, unsigned int stream_id)
+{
+ u8 cdb[16] = { SERVICE_ACTION_IN_16, SAI_GET_STREAM_STATUS };
+ struct {
+ struct scsi_stream_status_header h;
+ struct scsi_stream_status s;
+ } buf;
+ struct scsi_device *sdev = sdkp->device;
+ struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
+ int res;
+
+ put_unaligned_be16(stream_id, &cdb[4]);
+ put_unaligned_be32(sizeof(buf), &cdb[10]);
+
+ res = scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, &buf, sizeof(buf),
+ SD_TIMEOUT, sdkp->max_retries, &exec_args);
+ if (res < 0)
+ return false;
+ if (scsi_status_is_check_condition(res) && scsi_sense_valid(&sshdr))
+ sd_print_sense_hdr(sdkp, &sshdr);
+ if (res)
+ return false;
+ if (get_unaligned_be32(&buf.h.len) < sizeof(struct scsi_stream_status))
+ return false;
+ return buf.h.stream_status[0].perm;
+}
+
+static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer)
+{
+ struct scsi_device *sdp = sdkp->device;
+ const struct scsi_io_group_descriptor *desc, *start, *end;
+ struct scsi_sense_hdr sshdr;
+ struct scsi_mode_data data;
+ int res;
+
+ res = scsi_mode_sense(sdp, /*dbd=*/0x8, /*modepage=*/0x0a,
+ /*subpage=*/0x05, buffer, SD_BUF_SIZE, SD_TIMEOUT,
+ sdkp->max_retries, &data, &sshdr);
+ if (res < 0)
+ return;
+ start = (void *)buffer + data.header_length + 16;
+ end = (void *)buffer + ALIGN_DOWN(data.header_length + data.length,
+ sizeof(*end));
+ /*
+ * From "SBC-5 Constrained Streams with Data Lifetimes": Device severs
+ * should assign the lowest numbered stream identifiers to permanent
+ * streams.
+ */
+ for (desc = start; desc < end; desc++)
+ if (!desc->st_enble || !sd_is_perm_stream(sdkp, desc - start))
+ break;
+ sdkp->permanent_stream_count = desc - start;
+ if (sdkp->rscs && sdkp->permanent_stream_count < 2)
+ sd_printk(KERN_INFO, sdkp,
+ "Unexpected: RSCS has been set and the permanent stream count is %u\n",
+ sdkp->permanent_stream_count);
+ else if (sdkp->permanent_stream_count)
+ sd_printk(KERN_INFO, sdkp, "permanent stream count = %d\n",
+ sdkp->permanent_stream_count);
+}
+
/*
* The ATO bit indicates whether the DIF application tag is available
* for use by the operating system.
@@ -3108,6 +3257,18 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
rcu_read_unlock();
}
+/* Parse the Block Limits Extension VPD page (0xb7) */
+static void sd_read_block_limits_ext(struct scsi_disk *sdkp)
+{
+ struct scsi_vpd *vpd;
+
+ rcu_read_lock();
+ vpd = rcu_dereference(sdkp->device->vpd_pgb7);
+ if (vpd && vpd->len >= 2)
+ sdkp->rscs = vpd->data[5] & 1;
+ rcu_read_unlock();
+}
+
/**
* sd_read_block_characteristics - Query block dev. characteristics
* @sdkp: disk to query
@@ -3483,6 +3644,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
if (scsi_device_supports_vpd(sdp)) {
sd_read_block_provisioning(sdkp);
sd_read_block_limits(sdkp);
+ sd_read_block_limits_ext(sdkp);
sd_read_block_characteristics(sdkp);
sd_zbc_read_zones(sdkp, buffer);
sd_read_cpr(sdkp);
@@ -3492,6 +3654,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_write_protect_flag(sdkp, buffer);
sd_read_cache_type(sdkp, buffer);
+ sd_read_io_hints(sdkp, buffer);
sd_read_app_tag_own(sdkp, buffer);
sd_read_write_same(sdkp, buffer);
sd_read_security(sdkp, buffer);
@@ -3752,12 +3915,12 @@ static int sd_probe(struct device *dev)
blk_pm_runtime_init(sdp->request_queue, dev);
if (sdp->rpm_autosuspend) {
pm_runtime_set_autosuspend_delay(dev,
- sdp->host->hostt->rpm_autosuspend_delay);
+ sdp->host->rpm_autosuspend_delay);
}
error = device_add_disk(dev, gd, NULL);
if (error) {
- put_device(&sdkp->disk_dev);
+ device_unregister(&sdkp->disk_dev);
put_disk(gd);
goto out;
}
@@ -3945,7 +4108,21 @@ static int sd_suspend_runtime(struct device *dev)
return sd_suspend_common(dev, true);
}
-static int sd_resume(struct device *dev, bool runtime)
+static int sd_resume(struct device *dev)
+{
+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
+
+ sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
+
+ if (opal_unlock_from_suspend(sdkp->opal_dev)) {
+ sd_printk(KERN_NOTICE, sdkp, "OPAL unlock failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int sd_resume_common(struct device *dev, bool runtime)
{
struct scsi_disk *sdkp = dev_get_drvdata(dev);
int ret;
@@ -3961,7 +4138,7 @@ static int sd_resume(struct device *dev, bool runtime)
sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
ret = sd_start_stop_device(sdkp, 1);
if (!ret) {
- opal_unlock_from_suspend(sdkp->opal_dev);
+ sd_resume(dev);
sdkp->suspended = false;
}
@@ -3980,7 +4157,7 @@ static int sd_resume_system(struct device *dev)
return 0;
}
- return sd_resume(dev, false);
+ return sd_resume_common(dev, false);
}
static int sd_resume_runtime(struct device *dev)
@@ -4007,7 +4184,7 @@ static int sd_resume_runtime(struct device *dev)
"Failed to clear sense data\n");
}
- return sd_resume(dev, true);
+ return sd_resume_common(dev, true);
}
static const struct dev_pm_ops sd_pm_ops = {
@@ -4030,6 +4207,7 @@ static struct scsi_driver sd_template = {
.pm = &sd_pm_ops,
},
.rescan = sd_rescan,
+ .resume = sd_resume,
.init_command = sd_init_command,
.uninit_command = sd_uninit_command,
.done = sd_done,
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 409dda5350d10..5c4285a582b22 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -125,6 +125,8 @@ struct scsi_disk {
unsigned int physical_block_size;
unsigned int max_medium_access_timeouts;
unsigned int medium_access_timed_out;
+ /* number of permanent streams */
+ u16 permanent_stream_count;
u8 media_present;
u8 write_prot;
u8 protection_type;/* Data Integrity Field */
@@ -151,6 +153,7 @@ struct scsi_disk {
unsigned urswrz : 1;
unsigned security : 1;
unsigned ignore_medium_access_errors : 1;
+ unsigned rscs : 1; /* reduced stream control support */
};
#define to_scsi_disk(obj) container_of(obj, struct scsi_disk, disk_dev)
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index d7d0c35c58b80..0f2c87cc95e62 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -87,19 +87,32 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
0
};
unsigned char recv_page_code;
- unsigned int retries = SES_RETRIES;
- struct scsi_sense_hdr sshdr;
+ struct scsi_failure failure_defs[] = {
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = 0x29,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .allowed = SES_RETRIES,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .sense = NOT_READY,
+ .asc = SCMD_FAILURE_ASC_ANY,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .allowed = SES_RETRIES,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
- .sshdr = &sshdr,
+ .failures = &failures,
};
- do {
- ret = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buf, bufflen,
- SES_TIMEOUT, 1, &exec_args);
- } while (ret > 0 && --retries && scsi_sense_valid(&sshdr) &&
- (sshdr.sense_key == NOT_READY ||
- (sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29)));
-
+ ret = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buf, bufflen,
+ SES_TIMEOUT, 1, &exec_args);
if (unlikely(ret))
return ret;
@@ -131,19 +144,32 @@ static int ses_send_diag(struct scsi_device *sdev, int page_code,
bufflen & 0xff,
0
};
- struct scsi_sense_hdr sshdr;
- unsigned int retries = SES_RETRIES;
+ struct scsi_failure failure_defs[] = {
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = 0x29,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .allowed = SES_RETRIES,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .sense = NOT_READY,
+ .asc = SCMD_FAILURE_ASC_ANY,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .allowed = SES_RETRIES,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
- .sshdr = &sshdr,
+ .failures = &failures,
};
- do {
- result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, buf,
- bufflen, SES_TIMEOUT, 1, &exec_args);
- } while (result > 0 && --retries && scsi_sense_valid(&sshdr) &&
- (sshdr.sense_key == NOT_READY ||
- (sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29)));
-
+ result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, buf, bufflen,
+ SES_TIMEOUT, 1, &exec_args);
if (result)
sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n",
result);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 86210e4dd0d35..baf870a03ecf6 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -285,6 +285,7 @@ sg_open(struct inode *inode, struct file *filp)
int dev = iminor(inode);
int flags = filp->f_flags;
struct request_queue *q;
+ struct scsi_device *device;
Sg_device *sdp;
Sg_fd *sfp;
int retval;
@@ -301,11 +302,12 @@ sg_open(struct inode *inode, struct file *filp)
/* This driver's module count bumped by fops_get in <linux/fs.h> */
/* Prevent the device driver from vanishing while we sleep */
- retval = scsi_device_get(sdp->device);
+ device = sdp->device;
+ retval = scsi_device_get(device);
if (retval)
goto sg_put;
- retval = scsi_autopm_get_device(sdp->device);
+ retval = scsi_autopm_get_device(device);
if (retval)
goto sdp_put;
@@ -313,7 +315,7 @@ sg_open(struct inode *inode, struct file *filp)
* check if O_NONBLOCK. Permits SCSI commands to be issued
* during error recovery. Tread carefully. */
if (!((flags & O_NONBLOCK) ||
- scsi_block_when_processing_errors(sdp->device))) {
+ scsi_block_when_processing_errors(device))) {
retval = -ENXIO;
/* we are in error recovery for this device */
goto error_out;
@@ -344,7 +346,7 @@ sg_open(struct inode *inode, struct file *filp)
if (sdp->open_cnt < 1) { /* no existing opens */
sdp->sgdebug = 0;
- q = sdp->device->request_queue;
+ q = device->request_queue;
sdp->sg_tablesize = queue_max_segments(q);
}
sfp = sg_add_sfp(sdp);
@@ -370,10 +372,11 @@ out_undo:
error_mutex_locked:
mutex_unlock(&sdp->open_rel_lock);
error_out:
- scsi_autopm_put_device(sdp->device);
+ scsi_autopm_put_device(device);
sdp_put:
- scsi_device_put(sdp->device);
- goto sg_put;
+ kref_put(&sdp->d_ref, sg_device_destroy);
+ scsi_device_put(device);
+ return retval;
}
/* Release resources associated with a successful sg_open()
@@ -1424,7 +1427,9 @@ static const struct file_operations sg_fops = {
.llseek = no_llseek,
};
-static struct class *sg_sysfs_class;
+static const struct class sg_sysfs_class = {
+ .name = "scsi_generic"
+};
static int sg_sysfs_valid = 0;
@@ -1526,7 +1531,7 @@ sg_add_device(struct device *cl_dev)
if (sg_sysfs_valid) {
struct device *sg_class_member;
- sg_class_member = device_create(sg_sysfs_class, cl_dev->parent,
+ sg_class_member = device_create(&sg_sysfs_class, cl_dev->parent,
MKDEV(SCSI_GENERIC_MAJOR,
sdp->index),
sdp, "%s", sdp->name);
@@ -1616,7 +1621,7 @@ sg_remove_device(struct device *cl_dev)
read_unlock_irqrestore(&sdp->sfd_lock, iflags);
sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
- device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
+ device_destroy(&sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
cdev_del(sdp->cdev);
sdp->cdev = NULL;
@@ -1687,11 +1692,9 @@ init_sg(void)
SG_MAX_DEVS, "sg");
if (rc)
return rc;
- sg_sysfs_class = class_create("scsi_generic");
- if ( IS_ERR(sg_sysfs_class) ) {
- rc = PTR_ERR(sg_sysfs_class);
+ rc = class_register(&sg_sysfs_class);
+ if (rc)
goto err_out;
- }
sg_sysfs_valid = 1;
rc = scsi_register_interface(&sg_interface);
if (0 == rc) {
@@ -1700,7 +1703,7 @@ init_sg(void)
#endif /* CONFIG_SCSI_PROC_FS */
return 0;
}
- class_destroy(sg_sysfs_class);
+ class_unregister(&sg_sysfs_class);
register_sg_sysctls();
err_out:
unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
@@ -1715,7 +1718,7 @@ exit_sg(void)
remove_proc_subtree("scsi/sg", NULL);
#endif /* CONFIG_SCSI_PROC_FS */
scsi_unregister_interface(&sg_interface);
- class_destroy(sg_sysfs_class);
+ class_unregister(&sg_sysfs_class);
sg_sysfs_valid = 0;
unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
SG_MAX_DEVS);
@@ -2207,6 +2210,7 @@ sg_remove_sfp_usercontext(struct work_struct *work)
{
struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
struct sg_device *sdp = sfp->parentdp;
+ struct scsi_device *device = sdp->device;
Sg_request *srp;
unsigned long iflags;
@@ -2232,8 +2236,8 @@ sg_remove_sfp_usercontext(struct work_struct *work)
"sg_remove_sfp: sfp=0x%p\n", sfp));
kfree(sfp);
- scsi_device_put(sdp->device);
kref_put(&sdp->d_ref, sg_device_destroy);
+ scsi_device_put(device);
module_put(THIS_MODULE);
}
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index d093dd187b2f9..268b3a40891ed 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -717,27 +717,29 @@ fail:
static void get_sectorsize(struct scsi_cd *cd)
{
- unsigned char cmd[10];
- unsigned char buffer[8];
- int the_result, retries = 3;
+ static const u8 cmd[10] = { READ_CAPACITY };
+ unsigned char buffer[8] = { };
+ int the_result;
int sector_size;
struct request_queue *queue;
+ struct scsi_failure failure_defs[] = {
+ {
+ .result = SCMD_FAILURE_RESULT_ANY,
+ .allowed = 3,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = failure_defs,
+ };
+ const struct scsi_exec_args exec_args = {
+ .failures = &failures,
+ };
- do {
- cmd[0] = READ_CAPACITY;
- memset((void *) &cmd[1], 0, 9);
- memset(buffer, 0, sizeof(buffer));
-
- /* Do the command and wait.. */
- the_result = scsi_execute_cmd(cd->device, cmd, REQ_OP_DRV_IN,
- buffer, sizeof(buffer),
- SR_TIMEOUT, MAX_RETRIES, NULL);
-
- retries--;
-
- } while (the_result && retries);
-
-
+ /* Do the command and wait.. */
+ the_result = scsi_execute_cmd(cd->device, cmd, REQ_OP_DRV_IN, buffer,
+ sizeof(buffer), SR_TIMEOUT, MAX_RETRIES,
+ &exec_args);
if (the_result) {
cd->capacity = 0x1fffff;
sector_size = 2048; /* A guess, just in case */
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 338aa8c429682..5a9bcf8e0792e 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -87,7 +87,7 @@ static int try_rdio = 1;
static int try_wdio = 1;
static int debug_flag;
-static struct class st_sysfs_class;
+static const struct class st_sysfs_class;
static const struct attribute_group *st_dev_groups[];
static const struct attribute_group *st_drv_groups[];
@@ -4438,7 +4438,7 @@ static void scsi_tape_release(struct kref *kref)
return;
}
-static struct class st_sysfs_class = {
+static const struct class st_sysfs_class = {
.name = "scsi_tape",
.dev_groups = st_dev_groups,
};
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 09219c362accc..e20f314cf3e7d 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -273,7 +273,7 @@ static struct platform_driver esp_sun3x_driver = {
module_platform_driver(esp_sun3x_driver);
MODULE_DESCRIPTION("Sun3x ESP SCSI driver");
-MODULE_AUTHOR("Thomas Bogendoerfer (tsbogend@alpha.franken.de)");
+MODULE_AUTHOR("Thomas Bogendoerfer <tsbogend@alpha.franken.de>");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:sun3x_esp");
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index 64a7c2c6c5ff4..5ce6c9d19d1e6 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -608,6 +608,6 @@ static struct platform_driver esp_sbus_driver = {
module_platform_driver(esp_sbus_driver);
MODULE_DESCRIPTION("Sun ESP SCSI driver");
-MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index ca4f4ca413f11..74350b5871dc8 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -455,7 +455,7 @@ struct syscore_ops intc_syscore_ops = {
.resume = intc_resume,
};
-struct bus_type intc_subsys = {
+const struct bus_type intc_subsys = {
.name = "intc",
.dev_name = "intc",
};
diff --git a/drivers/sh/intc/internals.h b/drivers/sh/intc/internals.h
index fa73c173b56a6..9b6cd1bebb4e3 100644
--- a/drivers/sh/intc/internals.h
+++ b/drivers/sh/intc/internals.h
@@ -160,7 +160,7 @@ void _intc_enable(struct irq_data *data, unsigned long handle);
/* core.c */
extern struct list_head intc_list;
extern raw_spinlock_t intc_big_lock;
-extern struct bus_type intc_subsys;
+extern const struct bus_type intc_subsys;
unsigned int intc_get_dfl_prio_level(void);
unsigned int intc_get_prio_level(unsigned int irq);
diff --git a/drivers/siox/siox-bus-gpio.c b/drivers/siox/siox-bus-gpio.c
index aeefeb7255247..9e01642e72de3 100644
--- a/drivers/siox/siox-bus-gpio.c
+++ b/drivers/siox/siox-bus-gpio.c
@@ -91,63 +91,42 @@ static int siox_gpio_probe(struct platform_device *pdev)
int ret;
struct siox_master *smaster;
- smaster = siox_master_alloc(&pdev->dev, sizeof(*ddata));
- if (!smaster) {
- dev_err(dev, "failed to allocate siox master\n");
- return -ENOMEM;
- }
+ smaster = devm_siox_master_alloc(dev, sizeof(*ddata));
+ if (!smaster)
+ return dev_err_probe(dev, -ENOMEM,
+ "failed to allocate siox master\n");
platform_set_drvdata(pdev, smaster);
ddata = siox_master_get_devdata(smaster);
ddata->din = devm_gpiod_get(dev, "din", GPIOD_IN);
- if (IS_ERR(ddata->din)) {
- ret = dev_err_probe(dev, PTR_ERR(ddata->din),
- "Failed to get din GPIO\n");
- goto err;
- }
+ if (IS_ERR(ddata->din))
+ return dev_err_probe(dev, PTR_ERR(ddata->din),
+ "Failed to get din GPIO\n");
ddata->dout = devm_gpiod_get(dev, "dout", GPIOD_OUT_LOW);
- if (IS_ERR(ddata->dout)) {
- ret = dev_err_probe(dev, PTR_ERR(ddata->dout),
- "Failed to get dout GPIO\n");
- goto err;
- }
+ if (IS_ERR(ddata->dout))
+ return dev_err_probe(dev, PTR_ERR(ddata->dout),
+ "Failed to get dout GPIO\n");
ddata->dclk = devm_gpiod_get(dev, "dclk", GPIOD_OUT_LOW);
- if (IS_ERR(ddata->dclk)) {
- ret = dev_err_probe(dev, PTR_ERR(ddata->dclk),
- "Failed to get dclk GPIO\n");
- goto err;
- }
+ if (IS_ERR(ddata->dclk))
+ return dev_err_probe(dev, PTR_ERR(ddata->dclk),
+ "Failed to get dclk GPIO\n");
ddata->dld = devm_gpiod_get(dev, "dld", GPIOD_OUT_LOW);
- if (IS_ERR(ddata->dld)) {
- ret = dev_err_probe(dev, PTR_ERR(ddata->dld),
- "Failed to get dld GPIO\n");
- goto err;
- }
+ if (IS_ERR(ddata->dld))
+ return dev_err_probe(dev, PTR_ERR(ddata->dld),
+ "Failed to get dld GPIO\n");
smaster->pushpull = siox_gpio_pushpull;
/* XXX: determine automatically like spi does */
smaster->busno = 0;
- ret = siox_master_register(smaster);
- if (ret) {
- dev_err_probe(dev, ret,
- "Failed to register siox master\n");
-err:
- siox_master_put(smaster);
- }
-
- return ret;
-}
-
-static int siox_gpio_remove(struct platform_device *pdev)
-{
- struct siox_master *master = platform_get_drvdata(pdev);
-
- siox_master_unregister(master);
+ ret = devm_siox_master_register(dev, smaster);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register siox master\n");
return 0;
}
@@ -160,7 +139,6 @@ MODULE_DEVICE_TABLE(of, siox_gpio_dt_ids);
static struct platform_driver siox_gpio_driver = {
.probe = siox_gpio_probe,
- .remove = siox_gpio_remove,
.driver = {
.name = DRIVER_NAME,
diff --git a/drivers/siox/siox-core.c b/drivers/siox/siox-core.c
index 561408583b2bf..24a45920a240d 100644
--- a/drivers/siox/siox-core.c
+++ b/drivers/siox/siox-core.c
@@ -498,7 +498,7 @@ static void siox_device_release(struct device *dev)
kfree(sdevice);
}
-static struct device_type siox_device_type = {
+static const struct device_type siox_device_type = {
.groups = siox_device_groups,
.release = siox_device_release,
};
@@ -543,7 +543,7 @@ static void siox_shutdown(struct device *dev)
sdriver->shutdown(sdevice);
}
-static struct bus_type siox_bus_type = {
+static const struct bus_type siox_bus_type = {
.name = "siox",
.match = siox_match,
.probe = siox_probe,
@@ -676,7 +676,7 @@ static void siox_master_release(struct device *dev)
kfree(smaster);
}
-static struct device_type siox_master_type = {
+static const struct device_type siox_master_type = {
.groups = siox_master_groups,
.release = siox_master_release,
};
@@ -707,6 +707,31 @@ struct siox_master *siox_master_alloc(struct device *dev,
}
EXPORT_SYMBOL_GPL(siox_master_alloc);
+static void devm_siox_master_put(void *data)
+{
+ struct siox_master *smaster = data;
+
+ siox_master_put(smaster);
+}
+
+struct siox_master *devm_siox_master_alloc(struct device *dev,
+ size_t size)
+{
+ struct siox_master *smaster;
+ int ret;
+
+ smaster = siox_master_alloc(dev, size);
+ if (!smaster)
+ return NULL;
+
+ ret = devm_add_action_or_reset(dev, devm_siox_master_put, smaster);
+ if (ret)
+ return NULL;
+
+ return smaster;
+}
+EXPORT_SYMBOL_GPL(devm_siox_master_alloc);
+
int siox_master_register(struct siox_master *smaster)
{
int ret;
@@ -717,6 +742,8 @@ int siox_master_register(struct siox_master *smaster)
if (!smaster->pushpull)
return -EINVAL;
+ get_device(&smaster->dev);
+
dev_set_name(&smaster->dev, "siox-%d", smaster->busno);
mutex_init(&smaster->lock);
@@ -768,6 +795,25 @@ void siox_master_unregister(struct siox_master *smaster)
}
EXPORT_SYMBOL_GPL(siox_master_unregister);
+static void devm_siox_master_unregister(void *data)
+{
+ struct siox_master *smaster = data;
+
+ siox_master_unregister(smaster);
+}
+
+int devm_siox_master_register(struct device *dev, struct siox_master *smaster)
+{
+ int ret;
+
+ ret = siox_master_register(smaster);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, devm_siox_master_unregister, smaster);
+}
+EXPORT_SYMBOL_GPL(devm_siox_master_register);
+
static struct siox_device *siox_device_add(struct siox_master *smaster,
const char *type, size_t inbytes,
size_t outbytes, u8 statustype)
diff --git a/drivers/siox/siox.h b/drivers/siox/siox.h
index f08b43b713c5c..513f2c8312f7d 100644
--- a/drivers/siox/siox.h
+++ b/drivers/siox/siox.h
@@ -45,5 +45,9 @@ static inline void siox_master_put(struct siox_master *smaster)
put_device(&smaster->dev);
}
+struct siox_master *devm_siox_master_alloc(struct device *dev, size_t size);
+
int siox_master_register(struct siox_master *smaster);
void siox_master_unregister(struct siox_master *smaster);
+
+int devm_siox_master_register(struct device *dev, struct siox_master *smaster);
diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
index d43873bb5fe6d..41e62de1f91f3 100644
--- a/drivers/slimbus/core.c
+++ b/drivers/slimbus/core.c
@@ -100,7 +100,7 @@ static int slim_device_uevent(const struct device *dev, struct kobj_uevent_env *
return add_uevent_var(env, "MODALIAS=slim:%s", dev_name(&sbdev->dev));
}
-struct bus_type slimbus_bus = {
+const struct bus_type slimbus_bus = {
.name = "slimbus",
.match = slim_device_match,
.probe = slim_device_probe,
@@ -436,8 +436,8 @@ static int slim_device_alloc_laddr(struct slim_device *sbdev,
if (ret < 0)
goto err;
} else if (report_present) {
- ret = ida_simple_get(&ctrl->laddr_ida,
- 0, SLIM_LA_MANAGER - 1, GFP_KERNEL);
+ ret = ida_alloc_max(&ctrl->laddr_ida,
+ SLIM_LA_MANAGER - 1, GFP_KERNEL);
if (ret < 0)
goto err;
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index 77aa6d26476cd..efeba8275a669 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -220,7 +220,7 @@ struct slimbus_power_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
-static struct qmi_elem_info slimbus_select_inst_req_msg_v01_ei[] = {
+static const struct qmi_elem_info slimbus_select_inst_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -262,7 +262,7 @@ static struct qmi_elem_info slimbus_select_inst_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info slimbus_select_inst_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info slimbus_select_inst_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -284,7 +284,7 @@ static struct qmi_elem_info slimbus_select_inst_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info slimbus_power_req_msg_v01_ei[] = {
+static const struct qmi_elem_info slimbus_power_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -324,7 +324,7 @@ static struct qmi_elem_info slimbus_power_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info slimbus_power_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info slimbus_power_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
index 1d2b27e3ea63f..b811446e0fa55 100644
--- a/drivers/soc/fsl/dpio/dpio-service.c
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -523,7 +523,7 @@ int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d,
struct qbman_eq_desc *ed;
int i, ret;
- ed = kcalloc(sizeof(struct qbman_eq_desc), 32, GFP_KERNEL);
+ ed = kcalloc(32, sizeof(struct qbman_eq_desc), GFP_KERNEL);
if (!ed)
return -ENOMEM;
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c
index cb24a08be084d..b0f26f6f731e7 100644
--- a/drivers/soc/fsl/qbman/bman_ccsr.c
+++ b/drivers/soc/fsl/qbman/bman_ccsr.c
@@ -144,17 +144,6 @@ static int bm_set_memory(u64 ba, u32 size)
static dma_addr_t fbpr_a;
static size_t fbpr_sz;
-static int bman_fbpr(struct reserved_mem *rmem)
-{
- fbpr_a = rmem->base;
- fbpr_sz = rmem->size;
-
- WARN_ON(!(fbpr_a && fbpr_sz));
-
- return 0;
-}
-RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr);
-
static irqreturn_t bman_isr(int irq, void *ptr)
{
u32 isr_val, ier_val, ecsr_val, isr_mask, i;
@@ -242,17 +231,11 @@ static int fsl_bman_probe(struct platform_device *pdev)
return -ENODEV;
}
- /*
- * If FBPR memory wasn't defined using the qbman compatible string
- * try using the of_reserved_mem_device method
- */
- if (!fbpr_a) {
- ret = qbman_init_private_mem(dev, 0, &fbpr_a, &fbpr_sz);
- if (ret) {
- dev_err(dev, "qbman_init_private_mem() failed 0x%x\n",
- ret);
- return -ENODEV;
- }
+ ret = qbman_init_private_mem(dev, 0, "fsl,bman-fbpr", &fbpr_a, &fbpr_sz);
+ if (ret) {
+ dev_err(dev, "qbman_init_private_mem() failed 0x%x\n",
+ ret);
+ return -ENODEV;
}
dev_dbg(dev, "Allocated FBPR 0x%llx 0x%zx\n", fbpr_a, fbpr_sz);
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.c b/drivers/soc/fsl/qbman/dpaa_sys.c
index 33751450047ed..e1d7b79cc4503 100644
--- a/drivers/soc/fsl/qbman/dpaa_sys.c
+++ b/drivers/soc/fsl/qbman/dpaa_sys.c
@@ -34,8 +34,8 @@
/*
* Initialize a devices private memory region
*/
-int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
- size_t *size)
+int qbman_init_private_mem(struct device *dev, int idx, const char *compat,
+ dma_addr_t *addr, size_t *size)
{
struct device_node *mem_node;
struct reserved_mem *rmem;
@@ -44,8 +44,12 @@ int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
mem_node = of_parse_phandle(dev->of_node, "memory-region", idx);
if (!mem_node) {
- dev_err(dev, "No memory-region found for index %d\n", idx);
- return -ENODEV;
+ mem_node = of_find_compatible_node(NULL, NULL, compat);
+ if (!mem_node) {
+ dev_err(dev, "No memory-region found for index %d or compatible '%s'\n",
+ idx, compat);
+ return -ENODEV;
+ }
}
rmem = of_reserved_mem_lookup(mem_node);
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h
index ae8afa552b1ef..16485bde96366 100644
--- a/drivers/soc/fsl/qbman/dpaa_sys.h
+++ b/drivers/soc/fsl/qbman/dpaa_sys.h
@@ -101,8 +101,8 @@ static inline u8 dpaa_cyc_diff(u8 ringsize, u8 first, u8 last)
#define DPAA_GENALLOC_OFF 0x80000000
/* Initialize the devices private memory region */
-int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
- size_t *size);
+int qbman_init_private_mem(struct device *dev, int idx, const char *compat,
+ dma_addr_t *addr, size_t *size);
/* memremap() attributes for different platforms */
#ifdef CONFIG_PPC
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 739e4eee6b75c..7e9074519ad22 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -991,7 +991,7 @@ struct qman_portal {
/* linked-list of CSCN handlers. */
struct list_head cgr_cbs;
/* list lock */
- spinlock_t cgr_lock;
+ raw_spinlock_t cgr_lock;
struct work_struct congestion_work;
struct work_struct mr_work;
char irqname[MAX_IRQNAME];
@@ -1281,7 +1281,7 @@ static int qman_create_portal(struct qman_portal *portal,
/* if the given mask is NULL, assume all CGRs can be seen */
qman_cgrs_fill(&portal->cgrs[0]);
INIT_LIST_HEAD(&portal->cgr_cbs);
- spin_lock_init(&portal->cgr_lock);
+ raw_spin_lock_init(&portal->cgr_lock);
INIT_WORK(&portal->congestion_work, qm_congestion_task);
INIT_WORK(&portal->mr_work, qm_mr_process_task);
portal->bits = 0;
@@ -1456,11 +1456,14 @@ static void qm_congestion_task(struct work_struct *work)
union qm_mc_result *mcr;
struct qman_cgr *cgr;
- spin_lock(&p->cgr_lock);
+ /*
+ * FIXME: QM_MCR_TIMEOUT is 10ms, which is too long for a raw spinlock!
+ */
+ raw_spin_lock_irq(&p->cgr_lock);
qm_mc_start(&p->p);
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
- spin_unlock(&p->cgr_lock);
+ raw_spin_unlock_irq(&p->cgr_lock);
dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
qman_p_irqsource_add(p, QM_PIRQ_CSCI);
return;
@@ -1476,7 +1479,7 @@ static void qm_congestion_task(struct work_struct *work)
list_for_each_entry(cgr, &p->cgr_cbs, node)
if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
- spin_unlock(&p->cgr_lock);
+ raw_spin_unlock_irq(&p->cgr_lock);
qman_p_irqsource_add(p, QM_PIRQ_CSCI);
}
@@ -2440,7 +2443,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
preempt_enable();
cgr->chan = p->config->channel;
- spin_lock(&p->cgr_lock);
+ raw_spin_lock_irq(&p->cgr_lock);
if (opts) {
struct qm_mcc_initcgr local_opts = *opts;
@@ -2477,7 +2480,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
cgr->cb(p, cgr, 1);
out:
- spin_unlock(&p->cgr_lock);
+ raw_spin_unlock_irq(&p->cgr_lock);
put_affine_portal();
return ret;
}
@@ -2512,7 +2515,7 @@ int qman_delete_cgr(struct qman_cgr *cgr)
return -EINVAL;
memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
- spin_lock_irqsave(&p->cgr_lock, irqflags);
+ raw_spin_lock_irqsave(&p->cgr_lock, irqflags);
list_del(&cgr->node);
/*
* If there are no other CGR objects for this CGRID in the list,
@@ -2537,7 +2540,7 @@ int qman_delete_cgr(struct qman_cgr *cgr)
/* add back to the list */
list_add(&cgr->node, &p->cgr_cbs);
release_lock:
- spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+ raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags);
put_affine_portal();
return ret;
}
@@ -2577,9 +2580,9 @@ static int qman_update_cgr(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts)
if (!p)
return -EINVAL;
- spin_lock_irqsave(&p->cgr_lock, irqflags);
+ raw_spin_lock_irqsave(&p->cgr_lock, irqflags);
ret = qm_modify_cgr(cgr, 0, opts);
- spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+ raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags);
put_affine_portal();
return ret;
}
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
index 157659fd033a3..392e54f14dbe9 100644
--- a/drivers/soc/fsl/qbman/qman_ccsr.c
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -468,28 +468,6 @@ static int zero_priv_mem(phys_addr_t addr, size_t sz)
return 0;
}
-
-static int qman_fqd(struct reserved_mem *rmem)
-{
- fqd_a = rmem->base;
- fqd_sz = rmem->size;
-
- WARN_ON(!(fqd_a && fqd_sz));
- return 0;
-}
-RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
-
-static int qman_pfdr(struct reserved_mem *rmem)
-{
- pfdr_a = rmem->base;
- pfdr_sz = rmem->size;
-
- WARN_ON(!(pfdr_a && pfdr_sz));
-
- return 0;
-}
-RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
-
#endif
unsigned int qm_get_fqid_maxcnt(void)
@@ -796,39 +774,34 @@ static int fsl_qman_probe(struct platform_device *pdev)
qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
}
- if (fqd_a) {
+ /*
+ * Order of memory regions is assumed as FQD followed by PFDR
+ * in order to ensure allocations from the correct regions the
+ * driver initializes then allocates each piece in order
+ */
+ ret = qbman_init_private_mem(dev, 0, "fsl,qman-fqd", &fqd_a, &fqd_sz);
+ if (ret) {
+ dev_err(dev, "qbman_init_private_mem() for FQD failed 0x%x\n",
+ ret);
+ return -ENODEV;
+ }
#ifdef CONFIG_PPC
- /*
- * For PPC backward DT compatibility
- * FQD memory MUST be zero'd by software
- */
- zero_priv_mem(fqd_a, fqd_sz);
+ /*
+ * For PPC backward DT compatibility
+ * FQD memory MUST be zero'd by software
+ */
+ zero_priv_mem(fqd_a, fqd_sz);
#else
- WARN(1, "Unexpected architecture using non shared-dma-mem reservations");
+ WARN(1, "Unexpected architecture using non shared-dma-mem reservations");
#endif
- } else {
- /*
- * Order of memory regions is assumed as FQD followed by PFDR
- * in order to ensure allocations from the correct regions the
- * driver initializes then allocates each piece in order
- */
- ret = qbman_init_private_mem(dev, 0, &fqd_a, &fqd_sz);
- if (ret) {
- dev_err(dev, "qbman_init_private_mem() for FQD failed 0x%x\n",
- ret);
- return -ENODEV;
- }
- }
dev_dbg(dev, "Allocated FQD 0x%llx 0x%zx\n", fqd_a, fqd_sz);
- if (!pfdr_a) {
- /* Setup PFDR memory */
- ret = qbman_init_private_mem(dev, 1, &pfdr_a, &pfdr_sz);
- if (ret) {
- dev_err(dev, "qbman_init_private_mem() for PFDR failed 0x%x\n",
- ret);
- return -ENODEV;
- }
+ /* Setup PFDR memory */
+ ret = qbman_init_private_mem(dev, 1, "fsl,qman-pfdr", &pfdr_a, &pfdr_sz);
+ if (ret) {
+ dev_err(dev, "qbman_init_private_mem() for PFDR failed 0x%x\n",
+ ret);
+ return -ENODEV;
}
dev_dbg(dev, "Allocated PFDR 0x%llx 0x%zx\n", pfdr_a, pfdr_sz);
diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c
index 4458b2e0562b0..6eb6cf06278e6 100644
--- a/drivers/soc/sunxi/sunxi_sram.c
+++ b/drivers/soc/sunxi/sunxi_sram.c
@@ -287,6 +287,7 @@ EXPORT_SYMBOL(sunxi_sram_release);
struct sunxi_sramc_variant {
int num_emac_clocks;
bool has_ldo_ctrl;
+ bool has_ths_offset;
};
static const struct sunxi_sramc_variant sun4i_a10_sramc_variant = {
@@ -308,8 +309,10 @@ static const struct sunxi_sramc_variant sun50i_a64_sramc_variant = {
static const struct sunxi_sramc_variant sun50i_h616_sramc_variant = {
.num_emac_clocks = 2,
+ .has_ths_offset = true,
};
+#define SUNXI_SRAM_THS_OFFSET_REG 0x0
#define SUNXI_SRAM_EMAC_CLOCK_REG 0x30
#define SUNXI_SYS_LDO_CTRL_REG 0x150
@@ -318,6 +321,8 @@ static bool sunxi_sram_regmap_accessible_reg(struct device *dev,
{
const struct sunxi_sramc_variant *variant = dev_get_drvdata(dev);
+ if (reg == SUNXI_SRAM_THS_OFFSET_REG && variant->has_ths_offset)
+ return true;
if (reg >= SUNXI_SRAM_EMAC_CLOCK_REG &&
reg < SUNXI_SRAM_EMAC_CLOCK_REG + variant->num_emac_clocks * 4)
return true;
@@ -327,6 +332,20 @@ static bool sunxi_sram_regmap_accessible_reg(struct device *dev,
return false;
}
+static void sunxi_sram_lock(void *_lock)
+{
+ spinlock_t *lock = _lock;
+
+ spin_lock(lock);
+}
+
+static void sunxi_sram_unlock(void *_lock)
+{
+ spinlock_t *lock = _lock;
+
+ spin_unlock(lock);
+}
+
static struct regmap_config sunxi_sram_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
@@ -336,6 +355,9 @@ static struct regmap_config sunxi_sram_regmap_config = {
/* other devices have no business accessing other registers */
.readable_reg = sunxi_sram_regmap_accessible_reg,
.writeable_reg = sunxi_sram_regmap_accessible_reg,
+ .lock = sunxi_sram_lock,
+ .unlock = sunxi_sram_unlock,
+ .lock_arg = &sram_lock,
};
static int __init sunxi_sram_probe(struct platform_device *pdev)
diff --git a/drivers/soundwire/Makefile b/drivers/soundwire/Makefile
index 657f5888a77b0..e80a2c2cf3e71 100644
--- a/drivers/soundwire/Makefile
+++ b/drivers/soundwire/Makefile
@@ -20,7 +20,7 @@ soundwire-bus-y += irq.o
endif
#AMD driver
-soundwire-amd-y := amd_manager.o
+soundwire-amd-y := amd_init.o amd_manager.o
obj-$(CONFIG_SOUNDWIRE_AMD) += soundwire-amd.o
#Cadence Objs
diff --git a/drivers/soundwire/amd_init.c b/drivers/soundwire/amd_init.c
new file mode 100644
index 0000000000000..e45dc8261ab13
--- /dev/null
+++ b/drivers/soundwire/amd_init.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * SoundWire AMD Manager Initialize routines
+ *
+ * Initializes and creates SDW devices based on ACPI and Hardware values
+ *
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/acpi.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "amd_init.h"
+
+#define ACP_PAD_PULLDOWN_CTRL 0x0001448
+#define ACP_SW_PAD_KEEPER_EN 0x0001454
+#define AMD_SDW_PAD_PULLDOWN_CTRL_ENABLE_MASK 0x7f9a
+#define AMD_SDW0_PAD_PULLDOWN_CTRL_ENABLE_MASK 0x7f9f
+#define AMD_SDW1_PAD_PULLDOWN_CTRL_ENABLE_MASK 0x7ffa
+#define AMD_SDW0_PAD_EN_MASK 1
+#define AMD_SDW1_PAD_EN_MASK 0x10
+#define AMD_SDW_PAD_EN_MASK (AMD_SDW0_PAD_EN_MASK | AMD_SDW1_PAD_EN_MASK)
+
+static int amd_enable_sdw_pads(void __iomem *mmio, u32 link_mask, struct device *dev)
+{
+ u32 val;
+ u32 pad_keeper_en_mask, pad_pulldown_ctrl_mask;
+
+ switch (link_mask) {
+ case 1:
+ pad_keeper_en_mask = AMD_SDW0_PAD_EN_MASK;
+ pad_pulldown_ctrl_mask = AMD_SDW0_PAD_PULLDOWN_CTRL_ENABLE_MASK;
+ break;
+ case 2:
+ pad_keeper_en_mask = AMD_SDW1_PAD_EN_MASK;
+ pad_pulldown_ctrl_mask = AMD_SDW1_PAD_PULLDOWN_CTRL_ENABLE_MASK;
+ break;
+ case 3:
+ pad_keeper_en_mask = AMD_SDW_PAD_EN_MASK;
+ pad_pulldown_ctrl_mask = AMD_SDW_PAD_PULLDOWN_CTRL_ENABLE_MASK;
+ break;
+ default:
+ dev_err(dev, "No SDW Links are enabled\n");
+ return -ENODEV;
+ }
+
+ val = readl(mmio + ACP_SW_PAD_KEEPER_EN);
+ val |= pad_keeper_en_mask;
+ writel(val, mmio + ACP_SW_PAD_KEEPER_EN);
+ val = readl(mmio + ACP_PAD_PULLDOWN_CTRL);
+ val &= pad_pulldown_ctrl_mask;
+ writel(val, mmio + ACP_PAD_PULLDOWN_CTRL);
+ return 0;
+}
+
+static int sdw_amd_cleanup(struct sdw_amd_ctx *ctx)
+{
+ int i;
+
+ for (i = 0; i < ctx->count; i++) {
+ if (!(ctx->link_mask & BIT(i)))
+ continue;
+ platform_device_unregister(ctx->pdev[i]);
+ }
+
+ return 0;
+}
+
+static struct sdw_amd_ctx *sdw_amd_probe_controller(struct sdw_amd_res *res)
+{
+ struct sdw_amd_ctx *ctx;
+ struct acpi_device *adev;
+ struct resource *sdw_res;
+ struct acp_sdw_pdata sdw_pdata[2];
+ struct platform_device_info pdevinfo[2];
+ u32 link_mask;
+ int count, index;
+ int ret;
+
+ if (!res)
+ return NULL;
+
+ adev = acpi_fetch_acpi_dev(res->handle);
+ if (!adev)
+ return NULL;
+
+ if (!res->count)
+ return NULL;
+
+ count = res->count;
+ dev_dbg(&adev->dev, "Creating %d SDW Link devices\n", count);
+ ret = amd_enable_sdw_pads(res->mmio_base, res->link_mask, res->parent);
+ if (ret)
+ return NULL;
+
+ /*
+ * we need to alloc/free memory manually and can't use devm:
+ * this routine may be called from a workqueue, and not from
+ * the parent .probe.
+ * If devm_ was used, the memory might never be freed on errors.
+ */
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ ctx->count = count;
+ ctx->link_mask = res->link_mask;
+ sdw_res = kzalloc(sizeof(*sdw_res), GFP_KERNEL);
+ if (!sdw_res) {
+ kfree(ctx);
+ return NULL;
+ }
+ sdw_res->flags = IORESOURCE_MEM;
+ sdw_res->start = res->addr;
+ sdw_res->end = res->addr + res->reg_range;
+ memset(&pdevinfo, 0, sizeof(pdevinfo));
+ link_mask = ctx->link_mask;
+ for (index = 0; index < count; index++) {
+ if (!(link_mask & BIT(index)))
+ continue;
+
+ sdw_pdata[index].instance = index;
+ sdw_pdata[index].acp_sdw_lock = res->acp_lock;
+ pdevinfo[index].name = "amd_sdw_manager";
+ pdevinfo[index].id = index;
+ pdevinfo[index].parent = res->parent;
+ pdevinfo[index].num_res = 1;
+ pdevinfo[index].res = sdw_res;
+ pdevinfo[index].data = &sdw_pdata[index];
+ pdevinfo[index].size_data = sizeof(struct acp_sdw_pdata);
+ pdevinfo[index].fwnode = acpi_fwnode_handle(adev);
+ ctx->pdev[index] = platform_device_register_full(&pdevinfo[index]);
+ if (IS_ERR(ctx->pdev[index]))
+ goto err;
+ }
+ kfree(sdw_res);
+ return ctx;
+err:
+ while (index--) {
+ if (!(link_mask & BIT(index)))
+ continue;
+
+ platform_device_unregister(ctx->pdev[index]);
+ }
+
+ kfree(sdw_res);
+ kfree(ctx);
+ return NULL;
+}
+
+static int sdw_amd_startup(struct sdw_amd_ctx *ctx)
+{
+ struct amd_sdw_manager *amd_manager;
+ int i, ret;
+
+ /* Startup SDW Manager devices */
+ for (i = 0; i < ctx->count; i++) {
+ if (!(ctx->link_mask & BIT(i)))
+ continue;
+ amd_manager = dev_get_drvdata(&ctx->pdev[i]->dev);
+ ret = amd_sdw_manager_start(amd_manager);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int sdw_amd_probe(struct sdw_amd_res *res, struct sdw_amd_ctx **sdw_ctx)
+{
+ *sdw_ctx = sdw_amd_probe_controller(res);
+ if (!*sdw_ctx)
+ return -ENODEV;
+
+ return sdw_amd_startup(*sdw_ctx);
+}
+EXPORT_SYMBOL_NS(sdw_amd_probe, SOUNDWIRE_AMD_INIT);
+
+void sdw_amd_exit(struct sdw_amd_ctx *ctx)
+{
+ sdw_amd_cleanup(ctx);
+ kfree(ctx->ids);
+ kfree(ctx);
+}
+EXPORT_SYMBOL_NS(sdw_amd_exit, SOUNDWIRE_AMD_INIT);
+
+int sdw_amd_get_slave_info(struct sdw_amd_ctx *ctx)
+{
+ struct amd_sdw_manager *amd_manager;
+ struct sdw_bus *bus;
+ struct sdw_slave *slave;
+ struct list_head *node;
+ int index;
+ int i = 0;
+ int num_slaves = 0;
+
+ for (index = 0; index < ctx->count; index++) {
+ if (!(ctx->link_mask & BIT(index)))
+ continue;
+ amd_manager = dev_get_drvdata(&ctx->pdev[index]->dev);
+ if (!amd_manager)
+ return -ENODEV;
+ bus = &amd_manager->bus;
+ /* Calculate number of slaves */
+ list_for_each(node, &bus->slaves)
+ num_slaves++;
+ }
+
+ ctx->ids = kcalloc(num_slaves, sizeof(*ctx->ids), GFP_KERNEL);
+ if (!ctx->ids)
+ return -ENOMEM;
+ ctx->num_slaves = num_slaves;
+ for (index = 0; index < ctx->count; index++) {
+ if (!(ctx->link_mask & BIT(index)))
+ continue;
+ amd_manager = dev_get_drvdata(&ctx->pdev[index]->dev);
+ if (amd_manager) {
+ bus = &amd_manager->bus;
+ list_for_each_entry(slave, &bus->slaves, node) {
+ ctx->ids[i].id = slave->id;
+ ctx->ids[i].link_id = bus->link_id;
+ i++;
+ }
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_NS(sdw_amd_get_slave_info, SOUNDWIRE_AMD_INIT);
+
+MODULE_AUTHOR("Vijendar.Mukunda@amd.com");
+MODULE_DESCRIPTION("AMD SoundWire Init Library");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/soundwire/amd_init.h b/drivers/soundwire/amd_init.h
new file mode 100644
index 0000000000000..928b0c707162e
--- /dev/null
+++ b/drivers/soundwire/amd_init.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright (C) 2024 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef __AMD_INIT_H
+#define __AMD_INIT_H
+
+#include <linux/soundwire/sdw_amd.h>
+
+int amd_sdw_manager_start(struct amd_sdw_manager *amd_manager);
+
+#endif
diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
index f54bb4dd2d101..7cd24bd8e2248 100644
--- a/drivers/soundwire/amd_manager.c
+++ b/drivers/soundwire/amd_manager.c
@@ -1,8 +1,8 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* SoundWire AMD Manager driver
*
- * Copyright 2023 Advanced Micro Devices, Inc.
+ * Copyright 2023-24 Advanced Micro Devices, Inc.
*/
#include <linux/completion.h>
@@ -19,29 +19,13 @@
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include "bus.h"
+#include "amd_init.h"
#include "amd_manager.h"
#define DRV_NAME "amd_sdw_manager"
#define to_amd_sdw(b) container_of(b, struct amd_sdw_manager, bus)
-static void amd_enable_sdw_pads(struct amd_sdw_manager *amd_manager)
-{
- u32 sw_pad_pulldown_val;
- u32 val;
-
- mutex_lock(amd_manager->acp_sdw_lock);
- val = readl(amd_manager->acp_mmio + ACP_SW_PAD_KEEPER_EN);
- val |= amd_manager->reg_mask->sw_pad_enable_mask;
- writel(val, amd_manager->acp_mmio + ACP_SW_PAD_KEEPER_EN);
- usleep_range(1000, 1500);
-
- sw_pad_pulldown_val = readl(amd_manager->acp_mmio + ACP_PAD_PULLDOWN_CTRL);
- sw_pad_pulldown_val &= amd_manager->reg_mask->sw_pad_pulldown_mask;
- writel(sw_pad_pulldown_val, amd_manager->acp_mmio + ACP_PAD_PULLDOWN_CTRL);
- mutex_unlock(amd_manager->acp_sdw_lock);
-}
-
static int amd_init_sdw_manager(struct amd_sdw_manager *amd_manager)
{
u32 val;
@@ -102,12 +86,11 @@ static int amd_disable_sdw_manager(struct amd_sdw_manager *amd_manager)
static void amd_enable_sdw_interrupts(struct amd_sdw_manager *amd_manager)
{
- struct sdw_manager_reg_mask *reg_mask = amd_manager->reg_mask;
u32 val;
mutex_lock(amd_manager->acp_sdw_lock);
val = readl(amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(amd_manager->instance));
- val |= reg_mask->acp_sdw_intr_mask;
+ val |= sdw_manager_reg_mask_array[amd_manager->instance];
writel(val, amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(amd_manager->instance));
mutex_unlock(amd_manager->acp_sdw_lock);
@@ -120,12 +103,11 @@ static void amd_enable_sdw_interrupts(struct amd_sdw_manager *amd_manager)
static void amd_disable_sdw_interrupts(struct amd_sdw_manager *amd_manager)
{
- struct sdw_manager_reg_mask *reg_mask = amd_manager->reg_mask;
u32 val;
mutex_lock(amd_manager->acp_sdw_lock);
val = readl(amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(amd_manager->instance));
- val &= ~reg_mask->acp_sdw_intr_mask;
+ val &= ~sdw_manager_reg_mask_array[amd_manager->instance];
writel(val, amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(amd_manager->instance));
mutex_unlock(amd_manager->acp_sdw_lock);
@@ -864,23 +846,20 @@ static void amd_sdw_irq_thread(struct work_struct *work)
writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_0TO7);
}
-static void amd_sdw_probe_work(struct work_struct *work)
+int amd_sdw_manager_start(struct amd_sdw_manager *amd_manager)
{
- struct amd_sdw_manager *amd_manager = container_of(work, struct amd_sdw_manager,
- probe_work);
struct sdw_master_prop *prop;
int ret;
prop = &amd_manager->bus.prop;
if (!prop->hw_disabled) {
- amd_enable_sdw_pads(amd_manager);
ret = amd_init_sdw_manager(amd_manager);
if (ret)
- return;
+ return ret;
amd_enable_sdw_interrupts(amd_manager);
ret = amd_enable_sdw_manager(amd_manager);
if (ret)
- return;
+ return ret;
amd_sdw_set_frameshape(amd_manager);
}
/* Enable runtime PM */
@@ -889,6 +868,7 @@ static void amd_sdw_probe_work(struct work_struct *work)
pm_runtime_mark_last_busy(amd_manager->dev);
pm_runtime_set_active(amd_manager->dev);
pm_runtime_enable(amd_manager->dev);
+ return 0;
}
static int amd_sdw_manager_probe(struct platform_device *pdev)
@@ -948,7 +928,6 @@ static int amd_sdw_manager_probe(struct platform_device *pdev)
return -EINVAL;
}
- amd_manager->reg_mask = &sdw_manager_reg_mask_array[amd_manager->instance];
params = &amd_manager->bus.params;
params->col = AMD_SDW_DEFAULT_COLUMNS;
@@ -972,11 +951,6 @@ static int amd_sdw_manager_probe(struct platform_device *pdev)
dev_set_drvdata(dev, amd_manager);
INIT_WORK(&amd_manager->amd_sdw_irq_thread, amd_sdw_irq_thread);
INIT_WORK(&amd_manager->amd_sdw_work, amd_sdw_update_slave_status_work);
- INIT_WORK(&amd_manager->probe_work, amd_sdw_probe_work);
- /*
- * Instead of having lengthy probe sequence, use deferred probe.
- */
- schedule_work(&amd_manager->probe_work);
return 0;
}
@@ -986,7 +960,6 @@ static void amd_sdw_manager_remove(struct platform_device *pdev)
int ret;
pm_runtime_disable(&pdev->dev);
- cancel_work_sync(&amd_manager->probe_work);
amd_disable_sdw_interrupts(amd_manager);
sdw_bus_master_delete(&amd_manager->bus);
ret = amd_disable_sdw_manager(amd_manager);
@@ -1215,5 +1188,5 @@ module_platform_driver(amd_sdw_driver);
MODULE_AUTHOR("Vijendar.Mukunda@amd.com");
MODULE_DESCRIPTION("AMD SoundWire driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/soundwire/amd_manager.h b/drivers/soundwire/amd_manager.h
index 5f040151a259b..418b679e0b1a6 100644
--- a/drivers/soundwire/amd_manager.h
+++ b/drivers/soundwire/amd_manager.h
@@ -1,6 +1,6 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
- * Copyright (C) 2023 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright (C) 2023-24 Advanced Micro Devices, Inc. All rights reserved.
*/
#ifndef __AMD_MANAGER_H
@@ -243,16 +243,8 @@ static struct sdw_manager_dp_reg sdw1_manager_dp_reg[AMD_SDW1_MAX_DAI] = {
ACP_SW_AUDIO1_RX_OFFSET, ACP_SW_AUDIO1_RX_CHANNEL_ENABLE_DP0}
};
-static struct sdw_manager_reg_mask sdw_manager_reg_mask_array[2] = {
- {
- AMD_SDW0_PAD_KEEPER_EN_MASK,
- AMD_SDW0_PAD_PULLDOWN_CTRL_ENABLE_MASK,
- AMD_SDW0_EXT_INTR_MASK
- },
- {
- AMD_SDW1_PAD_KEEPER_EN_MASK,
- AMD_SDW1_PAD_PULLDOWN_CTRL_ENABLE_MASK,
+static u32 sdw_manager_reg_mask_array[AMD_SDW_MAX_MANAGER_COUNT] = {
+ AMD_SDW0_EXT_INTR_MASK,
AMD_SDW1_EXT_INTR_MASK
- }
};
#endif
diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
index 9fa93bb923d70..fd65b2360fc1e 100644
--- a/drivers/soundwire/bus_type.c
+++ b/drivers/soundwire/bus_type.c
@@ -72,7 +72,7 @@ int sdw_slave_uevent(const struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-struct bus_type sdw_bus_type = {
+const struct bus_type sdw_bus_type = {
.name = "soundwire",
.match = sdw_bus_match,
};
diff --git a/drivers/soundwire/dmi-quirks.c b/drivers/soundwire/dmi-quirks.c
index 9ebdd0cd0b1cf..91ab97a456fa9 100644
--- a/drivers/soundwire/dmi-quirks.c
+++ b/drivers/soundwire/dmi-quirks.c
@@ -131,6 +131,14 @@ static const struct dmi_system_id adr_remap_quirk_table[] = {
.driver_data = (void *)intel_rooks_county,
},
{
+ /* quirk used for NUC15 LAPRC710 skew */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "LAPRC710"),
+ },
+ .driver_data = (void *)intel_rooks_county,
+ },
+ {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0A3E")
diff --git a/drivers/soundwire/intel_auxdevice.c b/drivers/soundwire/intel_auxdevice.c
index 93698532deac4..95125cc2fc599 100644
--- a/drivers/soundwire/intel_auxdevice.c
+++ b/drivers/soundwire/intel_auxdevice.c
@@ -621,8 +621,6 @@ static int __maybe_unused intel_resume(struct device *dev)
return 0;
}
- link_flags = md_flags >> (bus->link_id * 8);
-
if (pm_runtime_suspended(dev)) {
dev_dbg(dev, "pm_runtime status was suspended, forcing active\n");
diff --git a/drivers/soundwire/master.c b/drivers/soundwire/master.c
index 51abedbbaa663..b2c64512739d6 100644
--- a/drivers/soundwire/master.c
+++ b/drivers/soundwire/master.c
@@ -112,7 +112,7 @@ static const struct dev_pm_ops master_dev_pm = {
pm_generic_runtime_resume, NULL)
};
-struct device_type sdw_master_type = {
+const struct device_type sdw_master_type = {
.name = "soundwire_master",
.release = sdw_master_device_release,
.pm = &master_dev_pm,
diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c
index 060c2982e26b0..9963b92eb5053 100644
--- a/drivers/soundwire/slave.c
+++ b/drivers/soundwire/slave.c
@@ -16,7 +16,7 @@ static void sdw_slave_release(struct device *dev)
kfree(slave);
}
-struct device_type sdw_slave_type = {
+const struct device_type sdw_slave_type = {
.name = "sdw_slave",
.release = sdw_slave_release,
.uevent = sdw_slave_uevent,
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index f9c0adc0738db..4e9e7d2a942d8 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -1718,7 +1718,7 @@ EXPORT_SYMBOL(sdw_deprepare_stream);
static int set_stream(struct snd_pcm_substream *substream,
struct sdw_stream_runtime *sdw_stream)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
struct snd_soc_dai *dai;
int ret = 0;
int i;
@@ -1771,7 +1771,7 @@ EXPORT_SYMBOL(sdw_alloc_stream);
int sdw_startup_stream(void *sdw_substream)
{
struct snd_pcm_substream *substream = sdw_substream;
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
struct sdw_stream_runtime *sdw_stream;
char *name;
int ret;
@@ -1815,7 +1815,7 @@ EXPORT_SYMBOL(sdw_startup_stream);
void sdw_shutdown_stream(void *sdw_substream)
{
struct snd_pcm_substream *substream = sdw_substream;
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
struct sdw_stream_runtime *sdw_stream;
struct snd_soc_dai *dai;
diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c
index 27c995b657f2c..aabef9fc84bdf 100644
--- a/drivers/spi/spi-cs42l43.c
+++ b/drivers/spi/spi-cs42l43.c
@@ -11,7 +11,9 @@
#include <linux/errno.h>
#include <linux/mfd/cs42l43.h>
#include <linux/mfd/cs42l43-regs.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 11991eb126364..92a662d1b55cf 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -830,11 +830,11 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
is_target = of_property_read_bool((&pdev->dev)->of_node, "spi-slave");
if (is_target)
- controller = spi_alloc_target(&pdev->dev,
- sizeof(struct fsl_lpspi_data));
+ controller = devm_spi_alloc_target(&pdev->dev,
+ sizeof(struct fsl_lpspi_data));
else
- controller = spi_alloc_host(&pdev->dev,
- sizeof(struct fsl_lpspi_data));
+ controller = devm_spi_alloc_host(&pdev->dev,
+ sizeof(struct fsl_lpspi_data));
if (!controller)
return -ENOMEM;
@@ -852,39 +852,39 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
fsl_lpspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(fsl_lpspi->base)) {
ret = PTR_ERR(fsl_lpspi->base);
- goto out_controller_put;
+ return ret;
}
fsl_lpspi->base_phys = res->start;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
- goto out_controller_put;
+ return ret;
}
ret = devm_request_irq(&pdev->dev, irq, fsl_lpspi_isr, 0,
dev_name(&pdev->dev), fsl_lpspi);
if (ret) {
dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
- goto out_controller_put;
+ return ret;
}
fsl_lpspi->clk_per = devm_clk_get(&pdev->dev, "per");
if (IS_ERR(fsl_lpspi->clk_per)) {
ret = PTR_ERR(fsl_lpspi->clk_per);
- goto out_controller_put;
+ return ret;
}
fsl_lpspi->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(fsl_lpspi->clk_ipg)) {
ret = PTR_ERR(fsl_lpspi->clk_ipg);
- goto out_controller_put;
+ return ret;
}
/* enable the clock */
ret = fsl_lpspi_init_rpm(fsl_lpspi);
if (ret)
- goto out_controller_put;
+ return ret;
ret = pm_runtime_get_sync(fsl_lpspi->dev);
if (ret < 0) {
@@ -945,8 +945,6 @@ out_pm_get:
pm_runtime_dont_use_autosuspend(fsl_lpspi->dev);
pm_runtime_put_sync(fsl_lpspi->dev);
pm_runtime_disable(fsl_lpspi->dev);
-out_controller_put:
- spi_controller_put(controller);
return ret;
}
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 833a1bb7a9143..c3e5cee18bea7 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -668,8 +668,8 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
ctrl |= (MX51_ECSPI_CTRL_MAX_BURST * BITS_PER_BYTE - 1)
<< MX51_ECSPI_CTRL_BL_OFFSET;
else
- ctrl |= spi_imx->count / DIV_ROUND_UP(spi_imx->bits_per_word,
- BITS_PER_BYTE) * spi_imx->bits_per_word
+ ctrl |= (spi_imx->count / DIV_ROUND_UP(spi_imx->bits_per_word,
+ BITS_PER_BYTE) * spi_imx->bits_per_word - 1)
<< MX51_ECSPI_CTRL_BL_OFFSET;
}
}
diff --git a/drivers/spi/spi-lm70llp.c b/drivers/spi/spi-lm70llp.c
index f982bdebd0283..3c0c24ed1f3db 100644
--- a/drivers/spi/spi-lm70llp.c
+++ b/drivers/spi/spi-lm70llp.c
@@ -29,10 +29,10 @@
*
* Datasheet and Schematic:
* The LM70 is a temperature sensor chip from National Semiconductor; its
- * datasheet is available at http://www.national.com/pf/LM/LM70.html
+ * datasheet is available at https://www.ti.com/lit/gpn/lm70
* The schematic for this particular board (the LM70EVAL-LLP) is
* available (on page 4) here:
- * http://www.national.com/appinfo/tempsensors/files/LM70LLPEVALmanual.pdf
+ * https://download.datasheets.com/pdfs/documentation/nat/kit&board/lm70llpevalmanual.pdf
*
* Also see Documentation/spi/spi-lm70llp.rst. The SPI<->parport code here is
* (heavily) based on spi-butterfly by David Brownell.
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index c9d6d42a88f55..17b8baf749e6a 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -382,7 +382,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
* read path) and expect the core to use the regular SPI
* interface in other cases.
*/
- if (!ret || ret != -ENOTSUPP || ret != -EOPNOTSUPP) {
+ if (!ret || (ret != -ENOTSUPP && ret != -EOPNOTSUPP)) {
spi_mem_add_op_stats(ctlr->pcpu_statistics, op, ret);
spi_mem_add_op_stats(mem->spi->pcpu_statistics, op, ret);
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 8d4633b353eef..e4cb22fe00752 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -788,17 +788,19 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len);
mtk_spi_setup_packet(host);
- cnt = mdata->xfer_len / 4;
- iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
- trans->tx_buf + mdata->num_xfered, cnt);
+ if (trans->tx_buf) {
+ cnt = mdata->xfer_len / 4;
+ iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
+ trans->tx_buf + mdata->num_xfered, cnt);
- remainder = mdata->xfer_len % 4;
- if (remainder > 0) {
- reg_val = 0;
- memcpy(&reg_val,
- trans->tx_buf + (cnt * 4) + mdata->num_xfered,
- remainder);
- writel(reg_val, mdata->base + SPI_TX_DATA_REG);
+ remainder = mdata->xfer_len % 4;
+ if (remainder > 0) {
+ reg_val = 0;
+ memcpy(&reg_val,
+ trans->tx_buf + (cnt * 4) + mdata->num_xfered,
+ remainder);
+ writel(reg_val, mdata->base + SPI_TX_DATA_REG);
+ }
}
mtk_spi_enable_transfer(host);
diff --git a/drivers/spi/spi-pci1xxxx.c b/drivers/spi/spi-pci1xxxx.c
index 969965d7bc98b..cc18d320370f9 100644
--- a/drivers/spi/spi-pci1xxxx.c
+++ b/drivers/spi/spi-pci1xxxx.c
@@ -725,6 +725,8 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
spi_bus->spi_int[iter] = devm_kzalloc(&pdev->dev,
sizeof(struct pci1xxxx_spi_internal),
GFP_KERNEL);
+ if (!spi_bus->spi_int[iter])
+ return -ENOMEM;
spi_sub_ptr = spi_bus->spi_int[iter];
spi_sub_ptr->spi_host = devm_spi_alloc_host(dev, sizeof(struct spi_controller));
if (!spi_sub_ptr->spi_host)
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 9fcbe040cb2f2..f726d86704287 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -430,7 +430,7 @@ static bool s3c64xx_spi_can_dma(struct spi_controller *host,
struct s3c64xx_spi_driver_data *sdd = spi_controller_get_devdata(host);
if (sdd->rx_dma.ch && sdd->tx_dma.ch)
- return xfer->len > sdd->fifo_depth;
+ return xfer->len >= sdd->fifo_depth;
return false;
}
@@ -826,10 +826,9 @@ static int s3c64xx_spi_transfer_one(struct spi_controller *host,
return status;
}
- if (!is_polling(sdd) && (xfer->len > fifo_len) &&
+ if (!is_polling(sdd) && xfer->len >= fifo_len &&
sdd->rx_dma.ch && sdd->tx_dma.ch) {
use_dma = 1;
-
} else if (xfer->len >= fifo_len) {
tx_buf = xfer->tx_buf;
rx_buf = xfer->rx_buf;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index f18738ae95f8f..ff75838c1b5df 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1063,10 +1063,14 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
if (spi->mode & SPI_CS_HIGH)
enable = !enable;
- if (spi_is_csgpiod(spi)) {
- if (!spi->controller->set_cs_timing && !activate)
- spi_delay_exec(&spi->cs_hold, NULL);
+ /*
+ * Handle chip select delays for GPIO based CS or controllers without
+ * programmable chip select timing.
+ */
+ if ((spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) && !activate)
+ spi_delay_exec(&spi->cs_hold, NULL);
+ if (spi_is_csgpiod(spi)) {
if (!(spi->mode & SPI_NO_CS)) {
/*
* Historically ACPI has no means of the GPIO polarity and
@@ -1099,16 +1103,16 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
spi->controller->set_cs)
spi->controller->set_cs(spi, !enable);
-
- if (!spi->controller->set_cs_timing) {
- if (activate)
- spi_delay_exec(&spi->cs_setup, NULL);
- else
- spi_delay_exec(&spi->cs_inactive, NULL);
- }
} else if (spi->controller->set_cs) {
spi->controller->set_cs(spi, !enable);
}
+
+ if (spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) {
+ if (activate)
+ spi_delay_exec(&spi->cs_setup, NULL);
+ else
+ spi_delay_exec(&spi->cs_inactive, NULL);
+ }
}
#ifdef CONFIG_HAS_DMA
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 784b9f673ead0..5175b1c4f1619 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -46,14 +46,10 @@ source "drivers/staging/iio/Kconfig"
source "drivers/staging/sm750fb/Kconfig"
-source "drivers/staging/emxx_udc/Kconfig"
-
source "drivers/staging/nvec/Kconfig"
source "drivers/staging/media/Kconfig"
-source "drivers/staging/board/Kconfig"
-
source "drivers/staging/gdm724x/Kconfig"
source "drivers/staging/fbtft/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 2ea99c7b05d97..67399c0ad8719 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -14,9 +14,7 @@ obj-$(CONFIG_VT6656) += vt6656/
obj-$(CONFIG_VME_BUS) += vme_user/
obj-$(CONFIG_IIO) += iio/
obj-$(CONFIG_FB_SM750) += sm750fb/
-obj-$(CONFIG_USB_EMXX) += emxx_udc/
obj-$(CONFIG_MFD_NVEC) += nvec/
-obj-$(CONFIG_STAGING_BOARD) += board/
obj-$(CONFIG_LTE_GDM724X) += gdm724x/
obj-$(CONFIG_FB_TFT) += fbtft/
obj-$(CONFIG_MOST) += most/
diff --git a/drivers/staging/axis-fifo/axis-fifo.c b/drivers/staging/axis-fifo/axis-fifo.c
index 727b956aa2317..c51818c56dd21 100644
--- a/drivers/staging/axis-fifo/axis-fifo.c
+++ b/drivers/staging/axis-fifo/axis-fifo.c
@@ -165,14 +165,9 @@ static ssize_t sysfs_read(struct device *dev, char *buf,
{
struct axis_fifo *fifo = dev_get_drvdata(dev);
unsigned int read_val;
- unsigned int len;
- char tmp[32];
read_val = ioread32(fifo->base_addr + addr_offset);
- len = snprintf(tmp, sizeof(tmp), "0x%x\n", read_val);
- memcpy(buf, tmp, len);
-
- return len;
+ return sysfs_emit(buf, "0x%x\n", read_val);
}
static ssize_t isr_store(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/staging/board/Kconfig b/drivers/staging/board/Kconfig
deleted file mode 100644
index b49216768ef69..0000000000000
--- a/drivers/staging/board/Kconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config STAGING_BOARD
- bool "Staging Board Support"
- depends on OF_ADDRESS && OF_IRQ && HAVE_CLK
- help
- Staging board base is to support continuous upstream
- in-tree development and integration of platform devices.
-
- Helps developers integrate devices as platform devices for
- device drivers that only provide platform device bindings.
- This in turn allows for incremental development of both
- hardware feature support and DT binding work in parallel.
diff --git a/drivers/staging/board/Makefile b/drivers/staging/board/Makefile
deleted file mode 100644
index ed7839752e129..0000000000000
--- a/drivers/staging/board/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-y := board.o
-obj-$(CONFIG_ARCH_EMEV2) += kzm9d.o
-obj-$(CONFIG_ARCH_R8A7740) += armadillo800eva.o
diff --git a/drivers/staging/board/TODO b/drivers/staging/board/TODO
deleted file mode 100644
index 8db70e10aa674..0000000000000
--- a/drivers/staging/board/TODO
+++ /dev/null
@@ -1,2 +0,0 @@
-* replace platform device code with DT nodes once the driver supports DT
-* remove staging board code when no more platform devices are needed
diff --git a/drivers/staging/board/armadillo800eva.c b/drivers/staging/board/armadillo800eva.c
deleted file mode 100644
index 0225234dd7aa6..0000000000000
--- a/drivers/staging/board/armadillo800eva.c
+++ /dev/null
@@ -1,88 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Staging board support for Armadillo 800 eva.
- * Enable not-yet-DT-capable devices here.
- *
- * Based on board-armadillo800eva.c
- *
- * Copyright (C) 2012 Renesas Solutions Corp.
- * Copyright (C) 2012 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- */
-
-#include <linux/dma-mapping.h>
-#include <linux/fb.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/videodev2.h>
-
-#include <video/sh_mobile_lcdc.h>
-
-#include "board.h"
-
-static struct fb_videomode lcdc0_mode = {
- .name = "AMPIER/AM-800480",
- .xres = 800,
- .yres = 480,
- .left_margin = 88,
- .right_margin = 40,
- .hsync_len = 128,
- .upper_margin = 20,
- .lower_margin = 5,
- .vsync_len = 5,
- .sync = 0,
-};
-
-static struct sh_mobile_lcdc_info lcdc0_info = {
- .clock_source = LCDC_CLK_BUS,
- .ch[0] = {
- .chan = LCDC_CHAN_MAINLCD,
- .fourcc = V4L2_PIX_FMT_RGB565,
- .interface_type = RGB24,
- .clock_divider = 5,
- .flags = 0,
- .lcd_modes = &lcdc0_mode,
- .num_modes = 1,
- .panel_cfg = {
- .width = 111,
- .height = 68,
- },
- },
-};
-
-static struct resource lcdc0_resources[] = {
- DEFINE_RES_MEM_NAMED(0xfe940000, 0x4000, "LCD0"),
- DEFINE_RES_IRQ(177 + 32),
-};
-
-static struct platform_device lcdc0_device = {
- .name = "sh_mobile_lcdc_fb",
- .num_resources = ARRAY_SIZE(lcdc0_resources),
- .resource = lcdc0_resources,
- .id = 0,
- .dev = {
- .platform_data = &lcdc0_info,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
-};
-
-static const struct board_staging_clk lcdc0_clocks[] __initconst = {
- { "lcdc0", NULL, "sh_mobile_lcdc_fb.0" },
-};
-
-static const struct board_staging_dev armadillo800eva_devices[] __initconst = {
- {
- .pdev = &lcdc0_device,
- .clocks = lcdc0_clocks,
- .nclocks = ARRAY_SIZE(lcdc0_clocks),
- .domain = "/system-controller@e6180000/pm-domains/c5/a4lc@1"
- },
-};
-
-static void __init armadillo800eva_init(void)
-{
- board_staging_gic_setup_xlate("arm,pl390", 32);
- board_staging_register_devices(armadillo800eva_devices,
- ARRAY_SIZE(armadillo800eva_devices));
-}
-
-board_staging("renesas,armadillo800eva", armadillo800eva_init);
diff --git a/drivers/staging/board/board.c b/drivers/staging/board/board.c
deleted file mode 100644
index f980af0373452..0000000000000
--- a/drivers/staging/board/board.c
+++ /dev/null
@@ -1,204 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2014 Magnus Damm
- * Copyright (C) 2015 Glider bvba
- */
-
-#define pr_fmt(fmt) "board_staging: " fmt
-
-#include <linux/clkdev.h>
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/platform_device.h>
-#include <linux/pm_domain.h>
-
-#include "board.h"
-
-static struct device_node *irqc_node __initdata;
-static unsigned int irqc_base __initdata;
-
-static bool find_by_address(u64 base_address)
-{
- struct device_node *dn = of_find_all_nodes(NULL);
- struct resource res;
-
- while (dn) {
- if (!of_address_to_resource(dn, 0, &res)) {
- if (res.start == base_address) {
- of_node_put(dn);
- return true;
- }
- }
- dn = of_find_all_nodes(dn);
- }
-
- return false;
-}
-
-bool __init board_staging_dt_node_available(const struct resource *resource,
- unsigned int num_resources)
-{
- unsigned int i;
-
- for (i = 0; i < num_resources; i++) {
- const struct resource *r = resource + i;
-
- if (resource_type(r) == IORESOURCE_MEM)
- if (find_by_address(r->start))
- return true; /* DT node available */
- }
-
- return false; /* Nothing found */
-}
-
-int __init board_staging_gic_setup_xlate(const char *gic_match,
- unsigned int base)
-{
- WARN_ON(irqc_node);
-
- irqc_node = of_find_compatible_node(NULL, NULL, gic_match);
-
- WARN_ON(!irqc_node);
- if (!irqc_node)
- return -ENOENT;
-
- irqc_base = base;
- return 0;
-}
-
-static void __init gic_fixup_resource(struct resource *res)
-{
- struct of_phandle_args irq_data;
- unsigned int hwirq = res->start;
- unsigned int virq;
-
- if (resource_type(res) != IORESOURCE_IRQ || !irqc_node)
- return;
-
- irq_data.np = irqc_node;
- irq_data.args_count = 3;
- irq_data.args[0] = 0;
- irq_data.args[1] = hwirq - irqc_base;
- switch (res->flags &
- (IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE |
- IORESOURCE_IRQ_LOWLEVEL | IORESOURCE_IRQ_HIGHLEVEL)) {
- case IORESOURCE_IRQ_LOWEDGE:
- irq_data.args[2] = IRQ_TYPE_EDGE_FALLING;
- break;
- case IORESOURCE_IRQ_HIGHEDGE:
- irq_data.args[2] = IRQ_TYPE_EDGE_RISING;
- break;
- case IORESOURCE_IRQ_LOWLEVEL:
- irq_data.args[2] = IRQ_TYPE_LEVEL_LOW;
- break;
- case IORESOURCE_IRQ_HIGHLEVEL:
- default:
- irq_data.args[2] = IRQ_TYPE_LEVEL_HIGH;
- break;
- }
-
- virq = irq_create_of_mapping(&irq_data);
- if (WARN_ON(!virq))
- return;
-
- pr_debug("hwirq %u -> virq %u\n", hwirq, virq);
- res->start = virq;
-}
-
-void __init board_staging_gic_fixup_resources(struct resource *res,
- unsigned int nres)
-{
- unsigned int i;
-
- for (i = 0; i < nres; i++)
- gic_fixup_resource(&res[i]);
-}
-
-int __init board_staging_register_clock(const struct board_staging_clk *bsc)
-{
- int error;
-
- pr_debug("Aliasing clock %s for con_id %s dev_id %s\n", bsc->clk,
- bsc->con_id, bsc->dev_id);
- error = clk_add_alias(bsc->con_id, bsc->dev_id, bsc->clk, NULL);
- if (error)
- pr_err("Failed to alias clock %s (%d)\n", bsc->clk, error);
-
- return error;
-}
-
-#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
-static int board_staging_add_dev_domain(struct platform_device *pdev,
- const char *domain)
-{
- struct device *dev = &pdev->dev;
- struct of_phandle_args pd_args;
- struct device_node *np;
-
- np = of_find_node_by_path(domain);
- if (!np) {
- pr_err("Cannot find domain node %s\n", domain);
- return -ENOENT;
- }
-
- pd_args.np = np;
- pd_args.args_count = 0;
-
- /* Initialization similar to device_pm_init_common() */
- spin_lock_init(&dev->power.lock);
- dev->power.early_init = true;
-
- return of_genpd_add_device(&pd_args, dev);
-}
-#else
-static inline int board_staging_add_dev_domain(struct platform_device *pdev,
- const char *domain)
-{
- return 0;
-}
-#endif
-
-int __init board_staging_register_device(const struct board_staging_dev *dev)
-{
- struct platform_device *pdev = dev->pdev;
- unsigned int i;
- int error;
-
- pr_debug("Trying to register device %s\n", pdev->name);
- if (board_staging_dt_node_available(pdev->resource,
- pdev->num_resources)) {
- pr_warn("Skipping %s, already in DT\n", pdev->name);
- return -EEXIST;
- }
-
- board_staging_gic_fixup_resources(pdev->resource, pdev->num_resources);
-
- for (i = 0; i < dev->nclocks; i++)
- board_staging_register_clock(&dev->clocks[i]);
-
- if (dev->domain)
- board_staging_add_dev_domain(pdev, dev->domain);
-
- error = platform_device_register(pdev);
- if (error) {
- pr_err("Failed to register device %s (%d)\n", pdev->name,
- error);
- return error;
- }
-
- return error;
-}
-
-void __init board_staging_register_devices(const struct board_staging_dev *devs,
- unsigned int ndevs)
-{
- unsigned int i;
-
- for (i = 0; i < ndevs; i++)
- board_staging_register_device(&devs[i]);
-}
diff --git a/drivers/staging/board/board.h b/drivers/staging/board/board.h
deleted file mode 100644
index 5609daf4d8695..0000000000000
--- a/drivers/staging/board/board.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __BOARD_H__
-#define __BOARD_H__
-
-#include <linux/init.h>
-#include <linux/of.h>
-
-struct board_staging_clk {
- const char *clk;
- const char *con_id;
- const char *dev_id;
-};
-
-struct board_staging_dev {
- /* Platform Device */
- struct platform_device *pdev;
- /* Clocks (optional) */
- const struct board_staging_clk *clocks;
- unsigned int nclocks;
- /* Generic PM Domain (optional) */
- const char *domain;
-};
-
-struct resource;
-
-bool board_staging_dt_node_available(const struct resource *resource,
- unsigned int num_resources);
-int board_staging_gic_setup_xlate(const char *gic_match, unsigned int base);
-void board_staging_gic_fixup_resources(struct resource *res, unsigned int nres);
-int board_staging_register_clock(const struct board_staging_clk *bsc);
-int board_staging_register_device(const struct board_staging_dev *dev);
-void board_staging_register_devices(const struct board_staging_dev *devs,
- unsigned int ndevs);
-
-#define board_staging(str, fn) \
-static int __init runtime_board_check(void) \
-{ \
- if (of_machine_is_compatible(str)) \
- fn(); \
- \
- return 0; \
-} \
- \
-device_initcall(runtime_board_check)
-
-#endif /* __BOARD_H__ */
diff --git a/drivers/staging/board/kzm9d.c b/drivers/staging/board/kzm9d.c
deleted file mode 100644
index d449a837414e6..0000000000000
--- a/drivers/staging/board/kzm9d.c
+++ /dev/null
@@ -1,26 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Staging board support for KZM9D. Enable not-yet-DT-capable devices here. */
-
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include "board.h"
-
-static struct resource usbs1_res[] __initdata = {
- DEFINE_RES_MEM(0xe2800000, 0x2000),
- DEFINE_RES_IRQ(159),
-};
-
-static void __init kzm9d_init(void)
-{
- board_staging_gic_setup_xlate("arm,pl390", 32);
-
- if (!board_staging_dt_node_available(usbs1_res,
- ARRAY_SIZE(usbs1_res))) {
- board_staging_gic_fixup_resources(usbs1_res,
- ARRAY_SIZE(usbs1_res));
- platform_device_register_simple("emxx_udc", -1, usbs1_res,
- ARRAY_SIZE(usbs1_res));
- }
-}
-
-board_staging("renesas,kzm9d", kzm9d_init);
diff --git a/drivers/staging/emxx_udc/Kconfig b/drivers/staging/emxx_udc/Kconfig
deleted file mode 100644
index e7a95b3b6a2f7..0000000000000
--- a/drivers/staging/emxx_udc/Kconfig
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config USB_EMXX
- tristate "EMXX USB Function Device Controller"
- depends on USB_GADGET && (ARCH_RENESAS || COMPILE_TEST)
- help
- The Emma Mobile series of SoCs from Renesas Electronics and
- former NEC Electronics include USB Function hardware.
-
- Say "y" to link the driver statically, or "m" to build a
- dynamically linked module called "emxx_udc" and force all
- gadget drivers to also be dynamically linked.
diff --git a/drivers/staging/emxx_udc/Makefile b/drivers/staging/emxx_udc/Makefile
deleted file mode 100644
index 569c5e9a9baef..0000000000000
--- a/drivers/staging/emxx_udc/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_USB_EMXX) := emxx_udc.o
diff --git a/drivers/staging/emxx_udc/TODO b/drivers/staging/emxx_udc/TODO
deleted file mode 100644
index 471529a470c70..0000000000000
--- a/drivers/staging/emxx_udc/TODO
+++ /dev/null
@@ -1,6 +0,0 @@
-* add clock framework support (platform device with CCF needs special care)
-* break out board-specific VBUS GPIO to work with multiplatform
-* convert VBUS GPIO to use GPIO descriptors from <linux/gpio/consumer.h>
- and stop using the old GPIO API
-* DT bindings
-* move driver into drivers/usb/gadget/
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
deleted file mode 100644
index eb63daaca702e..0000000000000
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ /dev/null
@@ -1,3223 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * drivers/usb/gadget/emxx_udc.c
- * EMXX FCD (Function Controller Driver) for USB.
- *
- * Copyright (C) 2010 Renesas Electronics Corporation
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
-#include <linux/proc_fs.h>
-#include <linux/clk.h>
-#include <linux/ctype.h>
-#include <linux/string.h>
-#include <linux/dma-mapping.h>
-#include <linux/workqueue.h>
-#include <linux/device.h>
-
-#include <linux/usb/ch9.h>
-#include <linux/usb/gadget.h>
-
-#include <linux/irq.h>
-#include <linux/gpio/consumer.h>
-
-#include "emxx_udc.h"
-
-#define DRIVER_DESC "EMXX UDC driver"
-#define DMA_ADDR_INVALID (~(dma_addr_t)0)
-
-static struct gpio_desc *vbus_gpio;
-static int vbus_irq;
-
-static const char driver_name[] = "emxx_udc";
-
-/*===========================================================================*/
-/* Prototype */
-static void _nbu2ss_ep_dma_abort(struct nbu2ss_udc *, struct nbu2ss_ep *);
-static void _nbu2ss_ep0_enable(struct nbu2ss_udc *);
-/*static void _nbu2ss_ep0_disable(struct nbu2ss_udc *);*/
-static void _nbu2ss_ep_done(struct nbu2ss_ep *, struct nbu2ss_req *, int);
-static void _nbu2ss_set_test_mode(struct nbu2ss_udc *, u32 mode);
-static void _nbu2ss_endpoint_toggle_reset(struct nbu2ss_udc *udc, u8 ep_adrs);
-
-static int _nbu2ss_pullup(struct nbu2ss_udc *, int);
-static void _nbu2ss_fifo_flush(struct nbu2ss_udc *, struct nbu2ss_ep *);
-
-/*===========================================================================*/
-/* Macro */
-#define _nbu2ss_zero_len_pkt(udc, epnum) \
- _nbu2ss_ep_in_end(udc, epnum, 0, 0)
-
-/*===========================================================================*/
-/* Global */
-static struct nbu2ss_udc udc_controller;
-
-/*-------------------------------------------------------------------------*/
-/* Read */
-static inline u32 _nbu2ss_readl(void __iomem *address)
-{
- return __raw_readl(address);
-}
-
-/*-------------------------------------------------------------------------*/
-/* Write */
-static inline void _nbu2ss_writel(void __iomem *address, u32 udata)
-{
- __raw_writel(udata, address);
-}
-
-/*-------------------------------------------------------------------------*/
-/* Set Bit */
-static inline void _nbu2ss_bitset(void __iomem *address, u32 udata)
-{
- u32 reg_dt = __raw_readl(address) | (udata);
-
- __raw_writel(reg_dt, address);
-}
-
-/*-------------------------------------------------------------------------*/
-/* Clear Bit */
-static inline void _nbu2ss_bitclr(void __iomem *address, u32 udata)
-{
- u32 reg_dt = __raw_readl(address) & ~(udata);
-
- __raw_writel(reg_dt, address);
-}
-
-#ifdef UDC_DEBUG_DUMP
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_dump_register(struct nbu2ss_udc *udc)
-{
- int i;
- u32 reg_data;
-
- pr_info("=== %s()\n", __func__);
-
- if (!udc) {
- pr_err("%s udc == NULL\n", __func__);
- return;
- }
-
- spin_unlock(&udc->lock);
-
- dev_dbg(&udc->dev, "\n-USB REG-\n");
- for (i = 0x0 ; i < USB_BASE_SIZE ; i += 16) {
- reg_data = _nbu2ss_readl(IO_ADDRESS(USB_BASE_ADDRESS + i));
- dev_dbg(&udc->dev, "USB%04x =%08x", i, (int)reg_data);
-
- reg_data = _nbu2ss_readl(IO_ADDRESS(USB_BASE_ADDRESS + i + 4));
- dev_dbg(&udc->dev, " %08x", (int)reg_data);
-
- reg_data = _nbu2ss_readl(IO_ADDRESS(USB_BASE_ADDRESS + i + 8));
- dev_dbg(&udc->dev, " %08x", (int)reg_data);
-
- reg_data = _nbu2ss_readl(IO_ADDRESS(USB_BASE_ADDRESS + i + 12));
- dev_dbg(&udc->dev, " %08x\n", (int)reg_data);
- }
-
- spin_lock(&udc->lock);
-}
-#endif /* UDC_DEBUG_DUMP */
-
-/*-------------------------------------------------------------------------*/
-/* Endpoint 0 Callback (Complete) */
-static void _nbu2ss_ep0_complete(struct usb_ep *_ep, struct usb_request *_req)
-{
- u8 recipient;
- u16 selector;
- u16 wIndex;
- u32 test_mode;
- struct usb_ctrlrequest *p_ctrl;
- struct nbu2ss_udc *udc;
-
- if (!_ep || !_req)
- return;
-
- udc = (struct nbu2ss_udc *)_req->context;
- p_ctrl = &udc->ctrl;
- if ((p_ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
- if (p_ctrl->bRequest == USB_REQ_SET_FEATURE) {
- /*-------------------------------------------------*/
- /* SET_FEATURE */
- recipient = (u8)(p_ctrl->bRequestType & USB_RECIP_MASK);
- selector = le16_to_cpu(p_ctrl->wValue);
- if ((recipient == USB_RECIP_DEVICE) &&
- (selector == USB_DEVICE_TEST_MODE)) {
- wIndex = le16_to_cpu(p_ctrl->wIndex);
- test_mode = (u32)(wIndex >> 8);
- _nbu2ss_set_test_mode(udc, test_mode);
- }
- }
- }
-}
-
-/*-------------------------------------------------------------------------*/
-/* Initialization usb_request */
-static void _nbu2ss_create_ep0_packet(struct nbu2ss_udc *udc,
- void *p_buf, unsigned int length)
-{
- udc->ep0_req.req.buf = p_buf;
- udc->ep0_req.req.length = length;
- udc->ep0_req.req.dma = 0;
- udc->ep0_req.req.zero = true;
- udc->ep0_req.req.complete = _nbu2ss_ep0_complete;
- udc->ep0_req.req.status = -EINPROGRESS;
- udc->ep0_req.req.context = udc;
- udc->ep0_req.req.actual = 0;
-}
-
-/*-------------------------------------------------------------------------*/
-/* Acquisition of the first address of RAM(FIFO) */
-static u32 _nbu2ss_get_begin_ram_address(struct nbu2ss_udc *udc)
-{
- u32 num, buf_type;
- u32 data, last_ram_adr, use_ram_size;
-
- struct ep_regs __iomem *p_ep_regs;
-
- last_ram_adr = (D_RAM_SIZE_CTRL / sizeof(u32)) * 2;
- use_ram_size = 0;
-
- for (num = 0; num < NUM_ENDPOINTS - 1; num++) {
- p_ep_regs = &udc->p_regs->EP_REGS[num];
- data = _nbu2ss_readl(&p_ep_regs->EP_PCKT_ADRS);
- buf_type = _nbu2ss_readl(&p_ep_regs->EP_CONTROL) & EPN_BUF_TYPE;
- if (buf_type == 0) {
- /* Single Buffer */
- use_ram_size += (data & EPN_MPKT) / sizeof(u32);
- } else {
- /* Double Buffer */
- use_ram_size += ((data & EPN_MPKT) / sizeof(u32)) * 2;
- }
-
- if ((data >> 16) > last_ram_adr)
- last_ram_adr = data >> 16;
- }
-
- return last_ram_adr + use_ram_size;
-}
-
-/*-------------------------------------------------------------------------*/
-/* Construction of Endpoint */
-static int _nbu2ss_ep_init(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
-{
- u32 num;
- u32 data;
- u32 begin_adrs;
-
- if (ep->epnum == 0)
- return -EINVAL;
-
- num = ep->epnum - 1;
-
- /*-------------------------------------------------------------*/
- /* RAM Transfer Address */
- begin_adrs = _nbu2ss_get_begin_ram_address(udc);
- data = (begin_adrs << 16) | ep->ep.maxpacket;
- _nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_PCKT_ADRS, data);
-
- /*-------------------------------------------------------------*/
- /* Interrupt Enable */
- data = 1 << (ep->epnum + 8);
- _nbu2ss_bitset(&udc->p_regs->USB_INT_ENA, data);
-
- /*-------------------------------------------------------------*/
- /* Endpoint Type(Mode) */
- /* Bulk, Interrupt, ISO */
- switch (ep->ep_type) {
- case USB_ENDPOINT_XFER_BULK:
- data = EPN_BULK;
- break;
-
- case USB_ENDPOINT_XFER_INT:
- data = EPN_BUF_SINGLE | EPN_INTERRUPT;
- break;
-
- case USB_ENDPOINT_XFER_ISOC:
- data = EPN_ISO;
- break;
-
- default:
- data = 0;
- break;
- }
-
- _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
- _nbu2ss_endpoint_toggle_reset(udc, (ep->epnum | ep->direct));
-
- if (ep->direct == USB_DIR_OUT) {
- /*---------------------------------------------------------*/
- /* OUT */
- data = EPN_EN | EPN_BCLR | EPN_DIR0;
- _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
-
- data = EPN_ONAK | EPN_OSTL_EN | EPN_OSTL;
- _nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
-
- data = EPN_OUT_EN | EPN_OUT_END_EN;
- _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_INT_ENA, data);
- } else {
- /*---------------------------------------------------------*/
- /* IN */
- data = EPN_EN | EPN_BCLR | EPN_AUTO;
- _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
-
- data = EPN_ISTL;
- _nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
-
- data = EPN_IN_EN | EPN_IN_END_EN;
- _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_INT_ENA, data);
- }
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-/* Release of Endpoint */
-static int _nbu2ss_epn_exit(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
-{
- u32 num;
- u32 data;
-
- if ((ep->epnum == 0) || (udc->vbus_active == 0))
- return -EINVAL;
-
- num = ep->epnum - 1;
-
- /*-------------------------------------------------------------*/
- /* RAM Transfer Address */
- _nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_PCKT_ADRS, 0);
-
- /*-------------------------------------------------------------*/
- /* Interrupt Disable */
- data = 1 << (ep->epnum + 8);
- _nbu2ss_bitclr(&udc->p_regs->USB_INT_ENA, data);
-
- if (ep->direct == USB_DIR_OUT) {
- /*---------------------------------------------------------*/
- /* OUT */
- data = EPN_ONAK | EPN_BCLR;
- _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
-
- data = EPN_EN | EPN_DIR0;
- _nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
-
- data = EPN_OUT_EN | EPN_OUT_END_EN;
- _nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_INT_ENA, data);
- } else {
- /*---------------------------------------------------------*/
- /* IN */
- data = EPN_BCLR;
- _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
-
- data = EPN_EN | EPN_AUTO;
- _nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
-
- data = EPN_IN_EN | EPN_IN_END_EN;
- _nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_INT_ENA, data);
- }
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-/* DMA setting (without Endpoint 0) */
-static void _nbu2ss_ep_dma_init(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
-{
- u32 num;
- u32 data;
-
- data = _nbu2ss_readl(&udc->p_regs->USBSSCONF);
- if (((ep->epnum == 0) || (data & (1 << ep->epnum)) == 0))
- return; /* Not Support DMA */
-
- num = ep->epnum - 1;
-
- if (ep->direct == USB_DIR_OUT) {
- /*---------------------------------------------------------*/
- /* OUT */
- data = ep->ep.maxpacket;
- _nbu2ss_writel(&udc->p_regs->EP_DCR[num].EP_DCR2, data);
-
- /*---------------------------------------------------------*/
- /* Transfer Direct */
- data = DCR1_EPN_DIR0;
- _nbu2ss_bitset(&udc->p_regs->EP_DCR[num].EP_DCR1, data);
-
- /*---------------------------------------------------------*/
- /* DMA Mode etc. */
- data = EPN_STOP_MODE | EPN_STOP_SET | EPN_DMAMODE0;
- _nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_DMA_CTRL, data);
- } else {
- /*---------------------------------------------------------*/
- /* IN */
- _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, EPN_AUTO);
-
- /*---------------------------------------------------------*/
- /* DMA Mode etc. */
- data = EPN_BURST_SET | EPN_DMAMODE0;
- _nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_DMA_CTRL, data);
- }
-}
-
-/*-------------------------------------------------------------------------*/
-/* DMA setting release */
-static void _nbu2ss_ep_dma_exit(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
-{
- u32 num;
- u32 data;
- struct fc_regs __iomem *preg = udc->p_regs;
-
- if (udc->vbus_active == 0)
- return; /* VBUS OFF */
-
- data = _nbu2ss_readl(&preg->USBSSCONF);
- if ((ep->epnum == 0) || ((data & (1 << ep->epnum)) == 0))
- return; /* Not Support DMA */
-
- num = ep->epnum - 1;
-
- _nbu2ss_ep_dma_abort(udc, ep);
-
- if (ep->direct == USB_DIR_OUT) {
- /*---------------------------------------------------------*/
- /* OUT */
- _nbu2ss_writel(&preg->EP_DCR[num].EP_DCR2, 0);
- _nbu2ss_bitclr(&preg->EP_DCR[num].EP_DCR1, DCR1_EPN_DIR0);
- _nbu2ss_writel(&preg->EP_REGS[num].EP_DMA_CTRL, 0);
- } else {
- /*---------------------------------------------------------*/
- /* IN */
- _nbu2ss_bitclr(&preg->EP_REGS[num].EP_CONTROL, EPN_AUTO);
- _nbu2ss_writel(&preg->EP_REGS[num].EP_DMA_CTRL, 0);
- }
-}
-
-/*-------------------------------------------------------------------------*/
-/* Abort DMA */
-static void _nbu2ss_ep_dma_abort(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
-{
- struct fc_regs __iomem *preg = udc->p_regs;
-
- _nbu2ss_bitclr(&preg->EP_DCR[ep->epnum - 1].EP_DCR1, DCR1_EPN_REQEN);
- mdelay(DMA_DISABLE_TIME); /* DCR1_EPN_REQEN Clear */
- _nbu2ss_bitclr(&preg->EP_REGS[ep->epnum - 1].EP_DMA_CTRL, EPN_DMA_EN);
-}
-
-/*-------------------------------------------------------------------------*/
-/* Start IN Transfer */
-static void _nbu2ss_ep_in_end(struct nbu2ss_udc *udc,
- u32 epnum, u32 data32, u32 length)
-{
- u32 data;
- u32 num;
- struct fc_regs __iomem *preg = udc->p_regs;
-
- if (length >= sizeof(u32))
- return;
-
- if (epnum == 0) {
- _nbu2ss_bitclr(&preg->EP0_CONTROL, EP0_AUTO);
-
- /* Writing of 1-4 bytes */
- if (length)
- _nbu2ss_writel(&preg->EP0_WRITE, data32);
-
- data = ((length << 5) & EP0_DW) | EP0_DEND;
- _nbu2ss_writel(&preg->EP0_CONTROL, data);
-
- _nbu2ss_bitset(&preg->EP0_CONTROL, EP0_AUTO);
- } else {
- num = epnum - 1;
-
- _nbu2ss_bitclr(&preg->EP_REGS[num].EP_CONTROL, EPN_AUTO);
-
- /* Writing of 1-4 bytes */
- if (length)
- _nbu2ss_writel(&preg->EP_REGS[num].EP_WRITE, data32);
-
- data = (((length) << 5) & EPN_DW) | EPN_DEND;
- _nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, data);
-
- _nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, EPN_AUTO);
- }
-}
-
-#ifdef USE_DMA
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_dma_map_single(struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req, u8 direct)
-{
- if (req->req.dma == DMA_ADDR_INVALID) {
- if (req->unaligned) {
- req->req.dma = ep->phys_buf;
- } else {
- req->req.dma = dma_map_single(udc->gadget.dev.parent,
- req->req.buf,
- req->req.length,
- (direct == USB_DIR_IN)
- ? DMA_TO_DEVICE
- : DMA_FROM_DEVICE);
- }
- req->mapped = 1;
- } else {
- if (!req->unaligned)
- dma_sync_single_for_device(udc->gadget.dev.parent,
- req->req.dma,
- req->req.length,
- (direct == USB_DIR_IN)
- ? DMA_TO_DEVICE
- : DMA_FROM_DEVICE);
-
- req->mapped = 0;
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_dma_unmap_single(struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req, u8 direct)
-{
- u8 data[4];
- u8 *p;
- u32 count = 0;
-
- if (direct == USB_DIR_OUT) {
- count = req->req.actual % 4;
- if (count) {
- p = req->req.buf;
- p += (req->req.actual - count);
- memcpy(data, p, count);
- }
- }
-
- if (req->mapped) {
- if (req->unaligned) {
- if (direct == USB_DIR_OUT)
- memcpy(req->req.buf, ep->virt_buf,
- req->req.actual & 0xfffffffc);
- } else {
- dma_unmap_single(udc->gadget.dev.parent,
- req->req.dma, req->req.length,
- (direct == USB_DIR_IN)
- ? DMA_TO_DEVICE
- : DMA_FROM_DEVICE);
- }
- req->req.dma = DMA_ADDR_INVALID;
- req->mapped = 0;
- } else {
- if (!req->unaligned)
- dma_sync_single_for_cpu(udc->gadget.dev.parent,
- req->req.dma, req->req.length,
- (direct == USB_DIR_IN)
- ? DMA_TO_DEVICE
- : DMA_FROM_DEVICE);
- }
-
- if (count) {
- p = req->req.buf;
- p += (req->req.actual - count);
- memcpy(p, data, count);
- }
-}
-#endif
-
-/*-------------------------------------------------------------------------*/
-/* Endpoint 0 OUT Transfer (PIO) */
-static int ep0_out_pio(struct nbu2ss_udc *udc, u8 *buf, u32 length)
-{
- u32 i;
- u32 numreads = length / sizeof(u32);
- union usb_reg_access *buf32 = (union usb_reg_access *)buf;
-
- if (!numreads)
- return 0;
-
- /* PIO Read */
- for (i = 0; i < numreads; i++) {
- buf32->dw = _nbu2ss_readl(&udc->p_regs->EP0_READ);
- buf32++;
- }
-
- return numreads * sizeof(u32);
-}
-
-/*-------------------------------------------------------------------------*/
-/* Endpoint 0 OUT Transfer (PIO, OverBytes) */
-static int ep0_out_overbytes(struct nbu2ss_udc *udc, u8 *p_buf, u32 length)
-{
- u32 i;
- u32 i_read_size = 0;
- union usb_reg_access temp_32;
- union usb_reg_access *p_buf_32 = (union usb_reg_access *)p_buf;
-
- if ((length > 0) && (length < sizeof(u32))) {
- temp_32.dw = _nbu2ss_readl(&udc->p_regs->EP0_READ);
- for (i = 0 ; i < length ; i++)
- p_buf_32->byte.DATA[i] = temp_32.byte.DATA[i];
- i_read_size += length;
- }
-
- return i_read_size;
-}
-
-/*-------------------------------------------------------------------------*/
-/* Endpoint 0 IN Transfer (PIO) */
-static int EP0_in_PIO(struct nbu2ss_udc *udc, u8 *p_buf, u32 length)
-{
- u32 i;
- u32 i_max_length = EP0_PACKETSIZE;
- u32 i_word_length = 0;
- u32 i_write_length = 0;
- union usb_reg_access *p_buf_32 = (union usb_reg_access *)p_buf;
-
- /*------------------------------------------------------------*/
- /* Transfer Length */
- if (i_max_length < length)
- i_word_length = i_max_length / sizeof(u32);
- else
- i_word_length = length / sizeof(u32);
-
- /*------------------------------------------------------------*/
- /* PIO */
- for (i = 0; i < i_word_length; i++) {
- _nbu2ss_writel(&udc->p_regs->EP0_WRITE, p_buf_32->dw);
- p_buf_32++;
- i_write_length += sizeof(u32);
- }
-
- return i_write_length;
-}
-
-/*-------------------------------------------------------------------------*/
-/* Endpoint 0 IN Transfer (PIO, OverBytes) */
-static int ep0_in_overbytes(struct nbu2ss_udc *udc,
- u8 *p_buf,
- u32 i_remain_size)
-{
- u32 i;
- union usb_reg_access temp_32;
- union usb_reg_access *p_buf_32 = (union usb_reg_access *)p_buf;
-
- if ((i_remain_size > 0) && (i_remain_size < sizeof(u32))) {
- for (i = 0 ; i < i_remain_size ; i++)
- temp_32.byte.DATA[i] = p_buf_32->byte.DATA[i];
- _nbu2ss_ep_in_end(udc, 0, temp_32.dw, i_remain_size);
-
- return i_remain_size;
- }
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-/* Transfer NULL Packet (Epndoint 0) */
-static int EP0_send_NULL(struct nbu2ss_udc *udc, bool pid_flag)
-{
- u32 data;
-
- data = _nbu2ss_readl(&udc->p_regs->EP0_CONTROL);
- data &= ~(u32)EP0_INAK;
-
- if (pid_flag)
- data |= (EP0_INAK_EN | EP0_PIDCLR | EP0_DEND);
- else
- data |= (EP0_INAK_EN | EP0_DEND);
-
- _nbu2ss_writel(&udc->p_regs->EP0_CONTROL, data);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-/* Receive NULL Packet (Endpoint 0) */
-static int EP0_receive_NULL(struct nbu2ss_udc *udc, bool pid_flag)
-{
- u32 data;
-
- data = _nbu2ss_readl(&udc->p_regs->EP0_CONTROL);
- data &= ~(u32)EP0_ONAK;
-
- if (pid_flag)
- data |= EP0_PIDCLR;
-
- _nbu2ss_writel(&udc->p_regs->EP0_CONTROL, data);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_ep0_in_transfer(struct nbu2ss_udc *udc,
- struct nbu2ss_req *req)
-{
- u8 *p_buffer; /* IN Data Buffer */
- u32 data;
- u32 i_remain_size = 0;
- int result = 0;
-
- /*-------------------------------------------------------------*/
- /* End confirmation */
- if (req->req.actual == req->req.length) {
- if ((req->req.actual % EP0_PACKETSIZE) == 0) {
- if (req->zero) {
- req->zero = false;
- EP0_send_NULL(udc, false);
- return 1;
- }
- }
-
- return 0; /* Transfer End */
- }
-
- /*-------------------------------------------------------------*/
- /* NAK release */
- data = _nbu2ss_readl(&udc->p_regs->EP0_CONTROL);
- data |= EP0_INAK_EN;
- data &= ~(u32)EP0_INAK;
- _nbu2ss_writel(&udc->p_regs->EP0_CONTROL, data);
-
- i_remain_size = req->req.length - req->req.actual;
- p_buffer = (u8 *)req->req.buf;
- p_buffer += req->req.actual;
-
- /*-------------------------------------------------------------*/
- /* Data transfer */
- result = EP0_in_PIO(udc, p_buffer, i_remain_size);
-
- req->div_len = result;
- i_remain_size -= result;
-
- if (i_remain_size == 0) {
- EP0_send_NULL(udc, false);
- return result;
- }
-
- if ((i_remain_size < sizeof(u32)) && (result != EP0_PACKETSIZE)) {
- p_buffer += result;
- result += ep0_in_overbytes(udc, p_buffer, i_remain_size);
- req->div_len = result;
- }
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_ep0_out_transfer(struct nbu2ss_udc *udc,
- struct nbu2ss_req *req)
-{
- u8 *p_buffer;
- u32 i_remain_size;
- u32 i_recv_length;
- int result = 0;
- int f_rcv_zero;
-
- /*-------------------------------------------------------------*/
- /* Receive data confirmation */
- i_recv_length = _nbu2ss_readl(&udc->p_regs->EP0_LENGTH) & EP0_LDATA;
- if (i_recv_length != 0) {
- f_rcv_zero = 0;
-
- i_remain_size = req->req.length - req->req.actual;
- p_buffer = (u8 *)req->req.buf;
- p_buffer += req->req.actual;
-
- result = ep0_out_pio(udc, p_buffer
- , min(i_remain_size, i_recv_length));
- if (result < 0)
- return result;
-
- req->req.actual += result;
- i_recv_length -= result;
-
- if ((i_recv_length > 0) && (i_recv_length < sizeof(u32))) {
- p_buffer += result;
- i_remain_size -= result;
-
- result = ep0_out_overbytes(udc, p_buffer
- , min(i_remain_size, i_recv_length));
- req->req.actual += result;
- }
- } else {
- f_rcv_zero = 1;
- }
-
- /*-------------------------------------------------------------*/
- /* End confirmation */
- if (req->req.actual == req->req.length) {
- if ((req->req.actual % EP0_PACKETSIZE) == 0) {
- if (req->zero) {
- req->zero = false;
- EP0_receive_NULL(udc, false);
- return 1;
- }
- }
-
- return 0; /* Transfer End */
- }
-
- if ((req->req.actual % EP0_PACKETSIZE) != 0)
- return 0; /* Short Packet Transfer End */
-
- if (req->req.actual > req->req.length) {
- dev_err(udc->dev, " *** Overrun Error\n");
- return -EOVERFLOW;
- }
-
- if (f_rcv_zero != 0) {
- i_remain_size = _nbu2ss_readl(&udc->p_regs->EP0_CONTROL);
- if (i_remain_size & EP0_ONAK) {
- /*---------------------------------------------------*/
- /* NACK release */
- _nbu2ss_bitclr(&udc->p_regs->EP0_CONTROL, EP0_ONAK);
- }
- result = 1;
- }
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_out_dma(struct nbu2ss_udc *udc, struct nbu2ss_req *req,
- u32 num, u32 length)
-{
- dma_addr_t p_buffer;
- u32 mpkt;
- u32 lmpkt;
- u32 dmacnt;
- u32 burst = 1;
- u32 data;
- int result;
- struct fc_regs __iomem *preg = udc->p_regs;
-
- if (req->dma_flag)
- return 1; /* DMA is forwarded */
-
- req->dma_flag = true;
- p_buffer = req->req.dma;
- p_buffer += req->req.actual;
-
- /* DMA Address */
- _nbu2ss_writel(&preg->EP_DCR[num].EP_TADR, (u32)p_buffer);
-
- /* Number of transfer packets */
- mpkt = _nbu2ss_readl(&preg->EP_REGS[num].EP_PCKT_ADRS) & EPN_MPKT;
- dmacnt = length / mpkt;
- lmpkt = (length % mpkt) & ~(u32)0x03;
-
- if (dmacnt > DMA_MAX_COUNT) {
- dmacnt = DMA_MAX_COUNT;
- lmpkt = 0;
- } else if (lmpkt != 0) {
- if (dmacnt == 0)
- burst = 0; /* Burst OFF */
- dmacnt++;
- }
-
- data = mpkt | (lmpkt << 16);
- _nbu2ss_writel(&preg->EP_DCR[num].EP_DCR2, data);
-
- data = ((dmacnt & 0xff) << 16) | DCR1_EPN_DIR0 | DCR1_EPN_REQEN;
- _nbu2ss_writel(&preg->EP_DCR[num].EP_DCR1, data);
-
- if (burst == 0) {
- _nbu2ss_writel(&preg->EP_REGS[num].EP_LEN_DCNT, 0);
- _nbu2ss_bitclr(&preg->EP_REGS[num].EP_DMA_CTRL, EPN_BURST_SET);
- } else {
- _nbu2ss_writel(&preg->EP_REGS[num].EP_LEN_DCNT
- , (dmacnt << 16));
- _nbu2ss_bitset(&preg->EP_REGS[num].EP_DMA_CTRL, EPN_BURST_SET);
- }
- _nbu2ss_bitset(&preg->EP_REGS[num].EP_DMA_CTRL, EPN_DMA_EN);
-
- result = length & ~(u32)0x03;
- req->div_len = result;
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_epn_out_pio(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep,
- struct nbu2ss_req *req, u32 length)
-{
- u8 *p_buffer;
- u32 i;
- u32 data;
- u32 i_word_length;
- union usb_reg_access temp_32;
- union usb_reg_access *p_buf_32;
- int result = 0;
- struct fc_regs __iomem *preg = udc->p_regs;
-
- if (req->dma_flag)
- return 1; /* DMA is forwarded */
-
- if (length == 0)
- return 0;
-
- p_buffer = (u8 *)req->req.buf;
- p_buf_32 = (union usb_reg_access *)(p_buffer + req->req.actual);
-
- i_word_length = length / sizeof(u32);
- if (i_word_length > 0) {
- /*---------------------------------------------------------*/
- /* Copy of every four bytes */
- for (i = 0; i < i_word_length; i++) {
- p_buf_32->dw =
- _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_READ);
- p_buf_32++;
- }
- result = i_word_length * sizeof(u32);
- }
-
- data = length - result;
- if (data > 0) {
- /*---------------------------------------------------------*/
- /* Copy of fraction byte */
- temp_32.dw =
- _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_READ);
- for (i = 0 ; i < data ; i++)
- p_buf_32->byte.DATA[i] = temp_32.byte.DATA[i];
- result += data;
- }
-
- req->req.actual += result;
-
- if ((req->req.actual == req->req.length) ||
- ((req->req.actual % ep->ep.maxpacket) != 0)) {
- result = 0;
- }
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_epn_out_data(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep,
- struct nbu2ss_req *req, u32 data_size)
-{
- u32 num;
- u32 i_buf_size;
- int nret = 1;
-
- if (ep->epnum == 0)
- return -EINVAL;
-
- num = ep->epnum - 1;
-
- i_buf_size = min((req->req.length - req->req.actual), data_size);
-
- if ((ep->ep_type != USB_ENDPOINT_XFER_INT) && (req->req.dma != 0) &&
- (i_buf_size >= sizeof(u32))) {
- nret = _nbu2ss_out_dma(udc, req, num, i_buf_size);
- } else {
- i_buf_size = min_t(u32, i_buf_size, ep->ep.maxpacket);
- nret = _nbu2ss_epn_out_pio(udc, ep, req, i_buf_size);
- }
-
- return nret;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_epn_out_transfer(struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req)
-{
- u32 num;
- u32 i_recv_length;
- int result = 1;
- struct fc_regs __iomem *preg = udc->p_regs;
-
- if (ep->epnum == 0)
- return -EINVAL;
-
- num = ep->epnum - 1;
-
- /*-------------------------------------------------------------*/
- /* Receive Length */
- i_recv_length =
- _nbu2ss_readl(&preg->EP_REGS[num].EP_LEN_DCNT) & EPN_LDATA;
-
- if (i_recv_length != 0) {
- result = _nbu2ss_epn_out_data(udc, ep, req, i_recv_length);
- if (i_recv_length < ep->ep.maxpacket) {
- if (i_recv_length == result) {
- req->req.actual += result;
- result = 0;
- }
- }
- } else {
- if ((req->req.actual == req->req.length) ||
- ((req->req.actual % ep->ep.maxpacket) != 0)) {
- result = 0;
- }
- }
-
- if (result == 0) {
- if ((req->req.actual % ep->ep.maxpacket) == 0) {
- if (req->zero) {
- req->zero = false;
- return 1;
- }
- }
- }
-
- if (req->req.actual > req->req.length) {
- dev_err(udc->dev, " Overrun Error\n");
- dev_err(udc->dev, " actual = %d, length = %d\n",
- req->req.actual, req->req.length);
- result = -EOVERFLOW;
- }
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_in_dma(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep,
- struct nbu2ss_req *req, u32 num, u32 length)
-{
- dma_addr_t p_buffer;
- u32 mpkt; /* MaxPacketSize */
- u32 lmpkt; /* Last Packet Data Size */
- u32 dmacnt; /* IN Data Size */
- u32 i_write_length;
- u32 data;
- int result = -EINVAL;
- struct fc_regs __iomem *preg = udc->p_regs;
-
- if (req->dma_flag)
- return 1; /* DMA is forwarded */
-
-#ifdef USE_DMA
- if (req->req.actual == 0)
- _nbu2ss_dma_map_single(udc, ep, req, USB_DIR_IN);
-#endif
- req->dma_flag = true;
-
- /* MAX Packet Size */
- mpkt = _nbu2ss_readl(&preg->EP_REGS[num].EP_PCKT_ADRS) & EPN_MPKT;
-
- i_write_length = min(DMA_MAX_COUNT * mpkt, length);
-
- /*------------------------------------------------------------*/
- /* Number of transmission packets */
- if (mpkt < i_write_length) {
- dmacnt = i_write_length / mpkt;
- lmpkt = (i_write_length % mpkt) & ~(u32)0x3;
- if (lmpkt != 0)
- dmacnt++;
- else
- lmpkt = mpkt & ~(u32)0x3;
-
- } else {
- dmacnt = 1;
- lmpkt = i_write_length & ~(u32)0x3;
- }
-
- /* Packet setting */
- data = mpkt | (lmpkt << 16);
- _nbu2ss_writel(&preg->EP_DCR[num].EP_DCR2, data);
-
- /* Address setting */
- p_buffer = req->req.dma;
- p_buffer += req->req.actual;
- _nbu2ss_writel(&preg->EP_DCR[num].EP_TADR, (u32)p_buffer);
-
- /* Packet and DMA setting */
- data = ((dmacnt & 0xff) << 16) | DCR1_EPN_REQEN;
- _nbu2ss_writel(&preg->EP_DCR[num].EP_DCR1, data);
-
- /* Packet setting of EPC */
- data = dmacnt << 16;
- _nbu2ss_writel(&preg->EP_REGS[num].EP_LEN_DCNT, data);
-
- /*DMA setting of EPC */
- _nbu2ss_bitset(&preg->EP_REGS[num].EP_DMA_CTRL, EPN_DMA_EN);
-
- result = i_write_length & ~(u32)0x3;
- req->div_len = result;
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_epn_in_pio(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep,
- struct nbu2ss_req *req, u32 length)
-{
- u8 *p_buffer;
- u32 i;
- u32 data;
- u32 i_word_length;
- union usb_reg_access temp_32;
- union usb_reg_access *p_buf_32 = NULL;
- int result = 0;
- struct fc_regs __iomem *preg = udc->p_regs;
-
- if (req->dma_flag)
- return 1; /* DMA is forwarded */
-
- if (length > 0) {
- p_buffer = (u8 *)req->req.buf;
- p_buf_32 = (union usb_reg_access *)(p_buffer + req->req.actual);
-
- i_word_length = length / sizeof(u32);
- if (i_word_length > 0) {
- for (i = 0; i < i_word_length; i++) {
- _nbu2ss_writel(&preg->EP_REGS[ep->epnum - 1].EP_WRITE,
- p_buf_32->dw);
-
- p_buf_32++;
- }
- result = i_word_length * sizeof(u32);
- }
- }
-
- if (result != ep->ep.maxpacket) {
- data = length - result;
- temp_32.dw = 0;
- for (i = 0 ; i < data ; i++)
- temp_32.byte.DATA[i] = p_buf_32->byte.DATA[i];
-
- _nbu2ss_ep_in_end(udc, ep->epnum, temp_32.dw, data);
- result += data;
- }
-
- req->div_len = result;
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_epn_in_data(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep,
- struct nbu2ss_req *req, u32 data_size)
-{
- u32 num;
- int nret = 1;
-
- if (ep->epnum == 0)
- return -EINVAL;
-
- num = ep->epnum - 1;
-
- if ((ep->ep_type != USB_ENDPOINT_XFER_INT) && (req->req.dma != 0) &&
- (data_size >= sizeof(u32))) {
- nret = _nbu2ss_in_dma(udc, ep, req, num, data_size);
- } else {
- data_size = min_t(u32, data_size, ep->ep.maxpacket);
- nret = _nbu2ss_epn_in_pio(udc, ep, req, data_size);
- }
-
- return nret;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_epn_in_transfer(struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep, struct nbu2ss_req *req)
-{
- u32 num;
- u32 i_buf_size;
- int result = 0;
- u32 status;
-
- if (ep->epnum == 0)
- return -EINVAL;
-
- num = ep->epnum - 1;
-
- status = _nbu2ss_readl(&udc->p_regs->EP_REGS[num].EP_STATUS);
-
- /*-------------------------------------------------------------*/
- /* State confirmation of FIFO */
- if (req->req.actual == 0) {
- if ((status & EPN_IN_EMPTY) == 0)
- return 1; /* Not Empty */
-
- } else {
- if ((status & EPN_IN_FULL) != 0)
- return 1; /* Not Empty */
- }
-
- /*-------------------------------------------------------------*/
- /* Start transfer */
- i_buf_size = req->req.length - req->req.actual;
- if (i_buf_size > 0)
- result = _nbu2ss_epn_in_data(udc, ep, req, i_buf_size);
- else if (req->req.length == 0)
- _nbu2ss_zero_len_pkt(udc, ep->epnum);
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_start_transfer(struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req,
- bool bflag)
-{
- int nret = -EINVAL;
-
- req->dma_flag = false;
- req->div_len = 0;
-
- if (req->req.length == 0) {
- req->zero = false;
- } else {
- if ((req->req.length % ep->ep.maxpacket) == 0)
- req->zero = req->req.zero;
- else
- req->zero = false;
- }
-
- if (ep->epnum == 0) {
- /* EP0 */
- switch (udc->ep0state) {
- case EP0_IN_DATA_PHASE:
- nret = _nbu2ss_ep0_in_transfer(udc, req);
- break;
-
- case EP0_OUT_DATA_PHASE:
- nret = _nbu2ss_ep0_out_transfer(udc, req);
- break;
-
- case EP0_IN_STATUS_PHASE:
- nret = EP0_send_NULL(udc, true);
- break;
-
- default:
- break;
- }
-
- } else {
- /* EPN */
- if (ep->direct == USB_DIR_OUT) {
- /* OUT */
- if (!bflag)
- nret = _nbu2ss_epn_out_transfer(udc, ep, req);
- } else {
- /* IN */
- nret = _nbu2ss_epn_in_transfer(udc, ep, req);
- }
- }
-
- return nret;
-}
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_restert_transfer(struct nbu2ss_ep *ep)
-{
- u32 length;
- bool bflag = false;
- struct nbu2ss_req *req;
-
- req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
- if (!req)
- return;
-
- if (ep->epnum > 0) {
- length = _nbu2ss_readl(&ep->udc->p_regs->EP_REGS[ep->epnum - 1].EP_LEN_DCNT);
-
- length &= EPN_LDATA;
- if (length < ep->ep.maxpacket)
- bflag = true;
- }
-
- _nbu2ss_start_transfer(ep->udc, ep, req, bflag);
-}
-
-/*-------------------------------------------------------------------------*/
-/* Endpoint Toggle Reset */
-static void _nbu2ss_endpoint_toggle_reset(struct nbu2ss_udc *udc, u8 ep_adrs)
-{
- u8 num;
- u32 data;
-
- if ((ep_adrs == 0) || (ep_adrs == 0x80))
- return;
-
- num = (ep_adrs & 0x7F) - 1;
-
- if (ep_adrs & USB_DIR_IN)
- data = EPN_IPIDCLR;
- else
- data = EPN_BCLR | EPN_OPIDCLR;
-
- _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
-}
-
-/*-------------------------------------------------------------------------*/
-/* Endpoint STALL set */
-static void _nbu2ss_set_endpoint_stall(struct nbu2ss_udc *udc,
- u8 ep_adrs, bool bstall)
-{
- u8 num, epnum;
- u32 data;
- struct nbu2ss_ep *ep;
- struct fc_regs __iomem *preg = udc->p_regs;
-
- if ((ep_adrs == 0) || (ep_adrs == 0x80)) {
- if (bstall) {
- /* Set STALL */
- _nbu2ss_bitset(&preg->EP0_CONTROL, EP0_STL);
- } else {
- /* Clear STALL */
- _nbu2ss_bitclr(&preg->EP0_CONTROL, EP0_STL);
- }
- } else {
- epnum = ep_adrs & USB_ENDPOINT_NUMBER_MASK;
- num = epnum - 1;
- ep = &udc->ep[epnum];
-
- if (bstall) {
- /* Set STALL */
- ep->halted = true;
-
- if (ep_adrs & USB_DIR_IN)
- data = EPN_BCLR | EPN_ISTL;
- else
- data = EPN_OSTL_EN | EPN_OSTL;
-
- _nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, data);
- } else {
- if (ep_adrs & USB_DIR_IN) {
- _nbu2ss_bitclr(&preg->EP_REGS[num].EP_CONTROL
- , EPN_ISTL);
- } else {
- data =
- _nbu2ss_readl(&preg->EP_REGS[num].EP_CONTROL);
-
- data &= ~EPN_OSTL;
- data |= EPN_OSTL_EN;
-
- _nbu2ss_writel(&preg->EP_REGS[num].EP_CONTROL
- , data);
- }
-
- /* Clear STALL */
- ep->stalled = false;
- if (ep->halted) {
- ep->halted = false;
- _nbu2ss_restert_transfer(ep);
- }
- }
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_set_test_mode(struct nbu2ss_udc *udc, u32 mode)
-{
- u32 data;
-
- if (mode > MAX_TEST_MODE_NUM)
- return;
-
- dev_info(udc->dev, "SET FEATURE : test mode = %d\n", mode);
-
- data = _nbu2ss_readl(&udc->p_regs->USB_CONTROL);
- data &= ~TEST_FORCE_ENABLE;
- data |= mode << TEST_MODE_SHIFT;
-
- _nbu2ss_writel(&udc->p_regs->USB_CONTROL, data);
- _nbu2ss_bitset(&udc->p_regs->TEST_CONTROL, CS_TESTMODEEN);
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_set_feature_device(struct nbu2ss_udc *udc,
- u16 selector, u16 wIndex)
-{
- int result = -EOPNOTSUPP;
-
- switch (selector) {
- case USB_DEVICE_REMOTE_WAKEUP:
- if (wIndex == 0x0000) {
- udc->remote_wakeup = U2F_ENABLE;
- result = 0;
- }
- break;
-
- case USB_DEVICE_TEST_MODE:
- wIndex >>= 8;
- if (wIndex <= MAX_TEST_MODE_NUM)
- result = 0;
- break;
-
- default:
- break;
- }
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_get_ep_stall(struct nbu2ss_udc *udc, u8 ep_adrs)
-{
- u8 epnum;
- u32 data = 0, bit_data;
- struct fc_regs __iomem *preg = udc->p_regs;
-
- epnum = ep_adrs & ~USB_ENDPOINT_DIR_MASK;
- if (epnum == 0) {
- data = _nbu2ss_readl(&preg->EP0_CONTROL);
- bit_data = EP0_STL;
-
- } else {
- data = _nbu2ss_readl(&preg->EP_REGS[epnum - 1].EP_CONTROL);
- if ((data & EPN_EN) == 0)
- return -1;
-
- if (ep_adrs & USB_ENDPOINT_DIR_MASK)
- bit_data = EPN_ISTL;
- else
- bit_data = EPN_OSTL;
- }
-
- if ((data & bit_data) == 0)
- return 0;
- return 1;
-}
-
-/*-------------------------------------------------------------------------*/
-static inline int _nbu2ss_req_feature(struct nbu2ss_udc *udc, bool bset)
-{
- u8 recipient = (u8)(udc->ctrl.bRequestType & USB_RECIP_MASK);
- u8 direction = (u8)(udc->ctrl.bRequestType & USB_DIR_IN);
- u16 selector = le16_to_cpu(udc->ctrl.wValue);
- u16 wIndex = le16_to_cpu(udc->ctrl.wIndex);
- u8 ep_adrs;
- int result = -EOPNOTSUPP;
-
- if ((udc->ctrl.wLength != 0x0000) ||
- (direction != USB_DIR_OUT)) {
- return -EINVAL;
- }
-
- switch (recipient) {
- case USB_RECIP_DEVICE:
- if (bset)
- result =
- _nbu2ss_set_feature_device(udc, selector, wIndex);
- break;
-
- case USB_RECIP_ENDPOINT:
- if (0x0000 == (wIndex & 0xFF70)) {
- if (selector == USB_ENDPOINT_HALT) {
- ep_adrs = wIndex & 0xFF;
- if (!bset) {
- _nbu2ss_endpoint_toggle_reset(udc,
- ep_adrs);
- }
-
- _nbu2ss_set_endpoint_stall(udc, ep_adrs, bset);
-
- result = 0;
- }
- }
- break;
-
- default:
- break;
- }
-
- if (result >= 0)
- _nbu2ss_create_ep0_packet(udc, udc->ep0_buf, 0);
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static inline enum usb_device_speed _nbu2ss_get_speed(struct nbu2ss_udc *udc)
-{
- u32 data;
- enum usb_device_speed speed = USB_SPEED_FULL;
-
- data = _nbu2ss_readl(&udc->p_regs->USB_STATUS);
- if (data & HIGH_SPEED)
- speed = USB_SPEED_HIGH;
-
- return speed;
-}
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_epn_set_stall(struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep)
-{
- u8 ep_adrs;
- u32 regdata;
- int limit_cnt = 0;
-
- struct fc_regs __iomem *preg = udc->p_regs;
-
- if (ep->direct == USB_DIR_IN) {
- for (limit_cnt = 0
- ; limit_cnt < IN_DATA_EMPTY_COUNT
- ; limit_cnt++) {
- regdata = _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_STATUS);
-
- if ((regdata & EPN_IN_DATA) == 0)
- break;
-
- mdelay(1);
- }
- }
-
- ep_adrs = ep->epnum | ep->direct;
- _nbu2ss_set_endpoint_stall(udc, ep_adrs, 1);
-}
-
-/*-------------------------------------------------------------------------*/
-static int std_req_get_status(struct nbu2ss_udc *udc)
-{
- u32 length;
- u16 status_data = 0;
- u8 recipient = (u8)(udc->ctrl.bRequestType & USB_RECIP_MASK);
- u8 direction = (u8)(udc->ctrl.bRequestType & USB_DIR_IN);
- u8 ep_adrs;
- int result = -EINVAL;
-
- if ((udc->ctrl.wValue != 0x0000) || (direction != USB_DIR_IN))
- return result;
-
- length =
- min_t(u16, le16_to_cpu(udc->ctrl.wLength), sizeof(status_data));
- switch (recipient) {
- case USB_RECIP_DEVICE:
- if (udc->ctrl.wIndex == 0x0000) {
- if (udc->gadget.is_selfpowered)
- status_data |= BIT(USB_DEVICE_SELF_POWERED);
-
- if (udc->remote_wakeup)
- status_data |= BIT(USB_DEVICE_REMOTE_WAKEUP);
-
- result = 0;
- }
- break;
-
- case USB_RECIP_ENDPOINT:
- if (0x0000 == (le16_to_cpu(udc->ctrl.wIndex) & 0xFF70)) {
- ep_adrs = (u8)(le16_to_cpu(udc->ctrl.wIndex) & 0xFF);
- result = _nbu2ss_get_ep_stall(udc, ep_adrs);
-
- if (result > 0)
- status_data |= BIT(USB_ENDPOINT_HALT);
- }
- break;
-
- default:
- break;
- }
-
- if (result >= 0) {
- memcpy(udc->ep0_buf, &status_data, length);
- _nbu2ss_create_ep0_packet(udc, udc->ep0_buf, length);
- _nbu2ss_ep0_in_transfer(udc, &udc->ep0_req);
-
- } else {
- dev_err(udc->dev, " Error GET_STATUS\n");
- }
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int std_req_clear_feature(struct nbu2ss_udc *udc)
-{
- return _nbu2ss_req_feature(udc, false);
-}
-
-/*-------------------------------------------------------------------------*/
-static int std_req_set_feature(struct nbu2ss_udc *udc)
-{
- return _nbu2ss_req_feature(udc, true);
-}
-
-/*-------------------------------------------------------------------------*/
-static int std_req_set_address(struct nbu2ss_udc *udc)
-{
- int result = 0;
- u32 wValue = le16_to_cpu(udc->ctrl.wValue);
-
- if ((udc->ctrl.bRequestType != 0x00) ||
- (udc->ctrl.wIndex != 0x0000) ||
- (udc->ctrl.wLength != 0x0000)) {
- return -EINVAL;
- }
-
- if (wValue != (wValue & 0x007F))
- return -EINVAL;
-
- wValue <<= USB_ADRS_SHIFT;
-
- _nbu2ss_writel(&udc->p_regs->USB_ADDRESS, wValue);
- _nbu2ss_create_ep0_packet(udc, udc->ep0_buf, 0);
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int std_req_set_configuration(struct nbu2ss_udc *udc)
-{
- u32 config_value = (u32)(le16_to_cpu(udc->ctrl.wValue) & 0x00ff);
-
- if ((udc->ctrl.wIndex != 0x0000) ||
- (udc->ctrl.wLength != 0x0000) ||
- (udc->ctrl.bRequestType != 0x00)) {
- return -EINVAL;
- }
-
- udc->curr_config = config_value;
-
- if (config_value > 0) {
- _nbu2ss_bitset(&udc->p_regs->USB_CONTROL, CONF);
- udc->devstate = USB_STATE_CONFIGURED;
-
- } else {
- _nbu2ss_bitclr(&udc->p_regs->USB_CONTROL, CONF);
- udc->devstate = USB_STATE_ADDRESS;
- }
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_read_request_data(struct nbu2ss_udc *udc, u32 *pdata)
-{
- *pdata = _nbu2ss_readl(&udc->p_regs->SETUP_DATA0);
- pdata++;
- *pdata = _nbu2ss_readl(&udc->p_regs->SETUP_DATA1);
-}
-
-/*-------------------------------------------------------------------------*/
-static inline int _nbu2ss_decode_request(struct nbu2ss_udc *udc)
-{
- bool bcall_back = true;
- int nret = -EINVAL;
- struct usb_ctrlrequest *p_ctrl;
-
- p_ctrl = &udc->ctrl;
- _nbu2ss_read_request_data(udc, (u32 *)p_ctrl);
-
- /* ep0 state control */
- if (p_ctrl->wLength == 0) {
- udc->ep0state = EP0_IN_STATUS_PHASE;
-
- } else {
- if (p_ctrl->bRequestType & USB_DIR_IN)
- udc->ep0state = EP0_IN_DATA_PHASE;
- else
- udc->ep0state = EP0_OUT_DATA_PHASE;
- }
-
- if ((p_ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
- switch (p_ctrl->bRequest) {
- case USB_REQ_GET_STATUS:
- nret = std_req_get_status(udc);
- bcall_back = false;
- break;
-
- case USB_REQ_CLEAR_FEATURE:
- nret = std_req_clear_feature(udc);
- bcall_back = false;
- break;
-
- case USB_REQ_SET_FEATURE:
- nret = std_req_set_feature(udc);
- bcall_back = false;
- break;
-
- case USB_REQ_SET_ADDRESS:
- nret = std_req_set_address(udc);
- bcall_back = false;
- break;
-
- case USB_REQ_SET_CONFIGURATION:
- nret = std_req_set_configuration(udc);
- break;
-
- default:
- break;
- }
- }
-
- if (!bcall_back) {
- if (udc->ep0state == EP0_IN_STATUS_PHASE) {
- if (nret >= 0) {
- /*--------------------------------------*/
- /* Status Stage */
- nret = EP0_send_NULL(udc, true);
- }
- }
-
- } else {
- spin_unlock(&udc->lock);
- nret = udc->driver->setup(&udc->gadget, &udc->ctrl);
- spin_lock(&udc->lock);
- }
-
- if (nret < 0)
- udc->ep0state = EP0_IDLE;
-
- return nret;
-}
-
-/*-------------------------------------------------------------------------*/
-static inline int _nbu2ss_ep0_in_data_stage(struct nbu2ss_udc *udc)
-{
- int nret;
- struct nbu2ss_req *req;
- struct nbu2ss_ep *ep = &udc->ep[0];
-
- req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
- if (!req)
- req = &udc->ep0_req;
-
- req->req.actual += req->div_len;
- req->div_len = 0;
-
- nret = _nbu2ss_ep0_in_transfer(udc, req);
- if (nret == 0) {
- udc->ep0state = EP0_OUT_STATUS_PAHSE;
- EP0_receive_NULL(udc, true);
- }
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static inline int _nbu2ss_ep0_out_data_stage(struct nbu2ss_udc *udc)
-{
- int nret;
- struct nbu2ss_req *req;
- struct nbu2ss_ep *ep = &udc->ep[0];
-
- req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
- if (!req)
- req = &udc->ep0_req;
-
- nret = _nbu2ss_ep0_out_transfer(udc, req);
- if (nret == 0) {
- udc->ep0state = EP0_IN_STATUS_PHASE;
- EP0_send_NULL(udc, true);
-
- } else if (nret < 0) {
- _nbu2ss_bitset(&udc->p_regs->EP0_CONTROL, EP0_BCLR);
- req->req.status = nret;
- }
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static inline int _nbu2ss_ep0_status_stage(struct nbu2ss_udc *udc)
-{
- struct nbu2ss_req *req;
- struct nbu2ss_ep *ep = &udc->ep[0];
-
- req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
- if (!req) {
- req = &udc->ep0_req;
- if (req->req.complete)
- req->req.complete(&ep->ep, &req->req);
-
- } else {
- if (req->req.complete)
- _nbu2ss_ep_done(ep, req, 0);
- }
-
- udc->ep0state = EP0_IDLE;
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_ep0_int(struct nbu2ss_udc *udc)
-{
- int i;
- u32 status;
- u32 intr;
- int nret = -1;
-
- status = _nbu2ss_readl(&udc->p_regs->EP0_STATUS);
- intr = status & EP0_STATUS_RW_BIT;
- _nbu2ss_writel(&udc->p_regs->EP0_STATUS, ~intr);
-
- status &= (SETUP_INT | EP0_IN_INT | EP0_OUT_INT
- | STG_END_INT | EP0_OUT_NULL_INT);
-
- if (status == 0) {
- dev_info(udc->dev, "%s Not Decode Interrupt\n", __func__);
- dev_info(udc->dev, "EP0_STATUS = 0x%08x\n", intr);
- return;
- }
-
- if (udc->gadget.speed == USB_SPEED_UNKNOWN)
- udc->gadget.speed = _nbu2ss_get_speed(udc);
-
- for (i = 0; i < EP0_END_XFER; i++) {
- switch (udc->ep0state) {
- case EP0_IDLE:
- if (status & SETUP_INT) {
- status = 0;
- nret = _nbu2ss_decode_request(udc);
- }
- break;
-
- case EP0_IN_DATA_PHASE:
- if (status & EP0_IN_INT) {
- status &= ~EP0_IN_INT;
- nret = _nbu2ss_ep0_in_data_stage(udc);
- }
- break;
-
- case EP0_OUT_DATA_PHASE:
- if (status & EP0_OUT_INT) {
- status &= ~EP0_OUT_INT;
- nret = _nbu2ss_ep0_out_data_stage(udc);
- }
- break;
-
- case EP0_IN_STATUS_PHASE:
- if ((status & STG_END_INT) || (status & SETUP_INT)) {
- status &= ~(STG_END_INT | EP0_IN_INT);
- nret = _nbu2ss_ep0_status_stage(udc);
- }
- break;
-
- case EP0_OUT_STATUS_PAHSE:
- if ((status & STG_END_INT) || (status & SETUP_INT) ||
- (status & EP0_OUT_NULL_INT)) {
- status &= ~(STG_END_INT
- | EP0_OUT_INT
- | EP0_OUT_NULL_INT);
-
- nret = _nbu2ss_ep0_status_stage(udc);
- }
-
- break;
-
- default:
- status = 0;
- break;
- }
-
- if (status == 0)
- break;
- }
-
- if (nret < 0) {
- /* Send Stall */
- _nbu2ss_set_endpoint_stall(udc, 0, true);
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_ep_done(struct nbu2ss_ep *ep,
- struct nbu2ss_req *req,
- int status)
-{
- struct nbu2ss_udc *udc = ep->udc;
-
- list_del_init(&req->queue);
-
- if (status == -ECONNRESET)
- _nbu2ss_fifo_flush(udc, ep);
-
- if (likely(req->req.status == -EINPROGRESS))
- req->req.status = status;
-
- if (ep->stalled) {
- _nbu2ss_epn_set_stall(udc, ep);
- } else {
- if (!list_empty(&ep->queue))
- _nbu2ss_restert_transfer(ep);
- }
-
-#ifdef USE_DMA
- if ((ep->direct == USB_DIR_OUT) && (ep->epnum > 0) &&
- (req->req.dma != 0))
- _nbu2ss_dma_unmap_single(udc, ep, req, USB_DIR_OUT);
-#endif
-
- spin_unlock(&udc->lock);
- req->req.complete(&ep->ep, &req->req);
- spin_lock(&udc->lock);
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_epn_in_int(struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req)
-{
- int result = 0;
- u32 status;
-
- struct fc_regs __iomem *preg = udc->p_regs;
-
- if (req->dma_flag)
- return; /* DMA is forwarded */
-
- req->req.actual += req->div_len;
- req->div_len = 0;
-
- if (req->req.actual != req->req.length) {
- /*---------------------------------------------------------*/
- /* remainder of data */
- result = _nbu2ss_epn_in_transfer(udc, ep, req);
-
- } else {
- if (req->zero && ((req->req.actual % ep->ep.maxpacket) == 0)) {
- status =
- _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_STATUS);
-
- if ((status & EPN_IN_FULL) == 0) {
- /*-----------------------------------------*/
- /* 0 Length Packet */
- req->zero = false;
- _nbu2ss_zero_len_pkt(udc, ep->epnum);
- }
- return;
- }
- }
-
- if (result <= 0) {
- /*---------------------------------------------------------*/
- /* Complete */
- _nbu2ss_ep_done(ep, req, result);
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_epn_out_int(struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req)
-{
- int result;
-
- result = _nbu2ss_epn_out_transfer(udc, ep, req);
- if (result <= 0)
- _nbu2ss_ep_done(ep, req, result);
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_epn_in_dma_int(struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req)
-{
- u32 mpkt;
- u32 size;
- struct usb_request *preq;
-
- preq = &req->req;
-
- if (!req->dma_flag)
- return;
-
- preq->actual += req->div_len;
- req->div_len = 0;
- req->dma_flag = false;
-
-#ifdef USE_DMA
- _nbu2ss_dma_unmap_single(udc, ep, req, USB_DIR_IN);
-#endif
-
- if (preq->actual != preq->length) {
- _nbu2ss_epn_in_transfer(udc, ep, req);
- } else {
- mpkt = ep->ep.maxpacket;
- size = preq->actual % mpkt;
- if (size > 0) {
- if (((preq->actual & 0x03) == 0) && (size < mpkt))
- _nbu2ss_ep_in_end(udc, ep->epnum, 0, 0);
- } else {
- _nbu2ss_epn_in_int(udc, ep, req);
- }
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_epn_out_dma_int(struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req)
-{
- int i;
- u32 num;
- u32 dmacnt, ep_dmacnt;
- u32 mpkt;
- struct fc_regs __iomem *preg = udc->p_regs;
-
- num = ep->epnum - 1;
-
- if (req->req.actual == req->req.length) {
- if ((req->req.length % ep->ep.maxpacket) && !req->zero) {
- req->div_len = 0;
- req->dma_flag = false;
- _nbu2ss_ep_done(ep, req, 0);
- return;
- }
- }
-
- ep_dmacnt = _nbu2ss_readl(&preg->EP_REGS[num].EP_LEN_DCNT)
- & EPN_DMACNT;
- ep_dmacnt >>= 16;
-
- for (i = 0; i < EPC_PLL_LOCK_COUNT; i++) {
- dmacnt = _nbu2ss_readl(&preg->EP_DCR[num].EP_DCR1)
- & DCR1_EPN_DMACNT;
- dmacnt >>= 16;
- if (ep_dmacnt == dmacnt)
- break;
- }
-
- _nbu2ss_bitclr(&preg->EP_DCR[num].EP_DCR1, DCR1_EPN_REQEN);
-
- if (dmacnt != 0) {
- mpkt = ep->ep.maxpacket;
- if ((req->div_len % mpkt) == 0)
- req->div_len -= mpkt * dmacnt;
- }
-
- if ((req->req.actual % ep->ep.maxpacket) > 0) {
- if (req->req.actual == req->div_len) {
- req->div_len = 0;
- req->dma_flag = false;
- _nbu2ss_ep_done(ep, req, 0);
- return;
- }
- }
-
- req->req.actual += req->div_len;
- req->div_len = 0;
- req->dma_flag = false;
-
- _nbu2ss_epn_out_int(udc, ep, req);
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_epn_int(struct nbu2ss_udc *udc, u32 epnum)
-{
- u32 num;
- u32 status;
-
- struct nbu2ss_req *req;
- struct nbu2ss_ep *ep = &udc->ep[epnum];
-
- num = epnum - 1;
-
- /* Interrupt Status */
- status = _nbu2ss_readl(&udc->p_regs->EP_REGS[num].EP_STATUS);
-
- /* Interrupt Clear */
- _nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_STATUS, ~status);
-
- req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
- if (!req) {
- /* pr_warn("=== %s(%d) req == NULL\n", __func__, epnum); */
- return;
- }
-
- if (status & EPN_OUT_END_INT) {
- status &= ~EPN_OUT_INT;
- _nbu2ss_epn_out_dma_int(udc, ep, req);
- }
-
- if (status & EPN_OUT_INT)
- _nbu2ss_epn_out_int(udc, ep, req);
-
- if (status & EPN_IN_END_INT) {
- status &= ~EPN_IN_INT;
- _nbu2ss_epn_in_dma_int(udc, ep, req);
- }
-
- if (status & EPN_IN_INT)
- _nbu2ss_epn_in_int(udc, ep, req);
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_ep_int(struct nbu2ss_udc *udc, u32 epnum)
-{
- if (epnum == 0)
- _nbu2ss_ep0_int(udc);
- else
- _nbu2ss_epn_int(udc, epnum);
-}
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_ep0_enable(struct nbu2ss_udc *udc)
-{
- _nbu2ss_bitset(&udc->p_regs->EP0_CONTROL, (EP0_AUTO | EP0_BCLR));
- _nbu2ss_writel(&udc->p_regs->EP0_INT_ENA, EP0_INT_EN_BIT);
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_nuke(struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- int status)
-{
- struct nbu2ss_req *req, *n;
-
- /* Endpoint Disable */
- _nbu2ss_epn_exit(udc, ep);
-
- /* DMA Disable */
- _nbu2ss_ep_dma_exit(udc, ep);
-
- if (list_empty(&ep->queue))
- return 0;
-
- /* called with irqs blocked */
- list_for_each_entry_safe(req, n, &ep->queue, queue) {
- _nbu2ss_ep_done(ep, req, status);
- }
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_quiesce(struct nbu2ss_udc *udc)
-{
- struct nbu2ss_ep *ep;
-
- udc->gadget.speed = USB_SPEED_UNKNOWN;
-
- _nbu2ss_nuke(udc, &udc->ep[0], -ESHUTDOWN);
-
- /* Endpoint n */
- list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
- _nbu2ss_nuke(udc, ep, -ESHUTDOWN);
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_pullup(struct nbu2ss_udc *udc, int is_on)
-{
- u32 reg_dt;
-
- if (udc->vbus_active == 0)
- return -ESHUTDOWN;
-
- if (is_on) {
- /* D+ Pullup */
- if (udc->driver) {
- reg_dt = (_nbu2ss_readl(&udc->p_regs->USB_CONTROL)
- | PUE2) & ~(u32)CONNECTB;
-
- _nbu2ss_writel(&udc->p_regs->USB_CONTROL, reg_dt);
- }
-
- } else {
- /* D+ Pulldown */
- reg_dt = (_nbu2ss_readl(&udc->p_regs->USB_CONTROL) | CONNECTB)
- & ~(u32)PUE2;
-
- _nbu2ss_writel(&udc->p_regs->USB_CONTROL, reg_dt);
- udc->gadget.speed = USB_SPEED_UNKNOWN;
- }
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_fifo_flush(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
-{
- struct fc_regs __iomem *p = udc->p_regs;
-
- if (udc->vbus_active == 0)
- return;
-
- if (ep->epnum == 0) {
- /* EP0 */
- _nbu2ss_bitset(&p->EP0_CONTROL, EP0_BCLR);
-
- } else {
- /* EPN */
- _nbu2ss_ep_dma_abort(udc, ep);
- _nbu2ss_bitset(&p->EP_REGS[ep->epnum - 1].EP_CONTROL, EPN_BCLR);
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_enable_controller(struct nbu2ss_udc *udc)
-{
- int waitcnt = 0;
-
- if (udc->udc_enabled)
- return 0;
-
- /* Reset */
- _nbu2ss_bitset(&udc->p_regs->EPCTR, (DIRPD | EPC_RST));
- udelay(EPC_RST_DISABLE_TIME); /* 1us wait */
-
- _nbu2ss_bitclr(&udc->p_regs->EPCTR, DIRPD);
- mdelay(EPC_DIRPD_DISABLE_TIME); /* 1ms wait */
-
- _nbu2ss_bitclr(&udc->p_regs->EPCTR, EPC_RST);
-
- _nbu2ss_writel(&udc->p_regs->AHBSCTR, WAIT_MODE);
-
- _nbu2ss_writel(&udc->p_regs->AHBMCTR,
- HBUSREQ_MODE | HTRANS_MODE | WBURST_TYPE);
-
- while (!(_nbu2ss_readl(&udc->p_regs->EPCTR) & PLL_LOCK)) {
- waitcnt++;
- udelay(1); /* 1us wait */
- if (waitcnt == EPC_PLL_LOCK_COUNT) {
- dev_err(udc->dev, "*** Reset Cancel failed\n");
- return -EINVAL;
- }
- }
-
- _nbu2ss_bitset(&udc->p_regs->UTMI_CHARACTER_1, USB_SQUSET);
-
- _nbu2ss_bitset(&udc->p_regs->USB_CONTROL, (INT_SEL | SOF_RCV));
-
- /* EP0 */
- _nbu2ss_ep0_enable(udc);
-
- /* USB Interrupt Enable */
- _nbu2ss_bitset(&udc->p_regs->USB_INT_ENA, USB_INT_EN_BIT);
-
- udc->udc_enabled = true;
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_reset_controller(struct nbu2ss_udc *udc)
-{
- _nbu2ss_bitset(&udc->p_regs->EPCTR, EPC_RST);
- _nbu2ss_bitclr(&udc->p_regs->EPCTR, EPC_RST);
-}
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_disable_controller(struct nbu2ss_udc *udc)
-{
- if (udc->udc_enabled) {
- udc->udc_enabled = false;
- _nbu2ss_reset_controller(udc);
- _nbu2ss_bitset(&udc->p_regs->EPCTR, (DIRPD | EPC_RST));
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_check_vbus(struct nbu2ss_udc *udc)
-{
- int nret;
- u32 reg_dt;
-
- /* chattering */
- mdelay(VBUS_CHATTERING_MDELAY); /* wait (ms) */
-
- /* VBUS ON Check*/
- reg_dt = gpiod_get_value(vbus_gpio);
- if (reg_dt == 0) {
- udc->linux_suspended = 0;
-
- _nbu2ss_reset_controller(udc);
- dev_info(udc->dev, " ----- VBUS OFF\n");
-
- if (udc->vbus_active == 1) {
- /* VBUS OFF */
- udc->vbus_active = 0;
- if (udc->usb_suspended) {
- udc->usb_suspended = 0;
- /* _nbu2ss_reset_controller(udc); */
- }
- udc->devstate = USB_STATE_NOTATTACHED;
-
- _nbu2ss_quiesce(udc);
- if (udc->driver) {
- spin_unlock(&udc->lock);
- udc->driver->disconnect(&udc->gadget);
- spin_lock(&udc->lock);
- }
-
- _nbu2ss_disable_controller(udc);
- }
- } else {
- mdelay(5); /* wait (5ms) */
- reg_dt = gpiod_get_value(vbus_gpio);
- if (reg_dt == 0)
- return;
-
- dev_info(udc->dev, " ----- VBUS ON\n");
-
- if (udc->linux_suspended)
- return;
-
- if (udc->vbus_active == 0) {
- /* VBUS ON */
- udc->vbus_active = 1;
- udc->devstate = USB_STATE_POWERED;
-
- nret = _nbu2ss_enable_controller(udc);
- if (nret < 0) {
- _nbu2ss_disable_controller(udc);
- udc->vbus_active = 0;
- return;
- }
-
- _nbu2ss_pullup(udc, 1);
-
-#ifdef UDC_DEBUG_DUMP
- _nbu2ss_dump_register(udc);
-#endif /* UDC_DEBUG_DUMP */
-
- } else {
- if (udc->devstate == USB_STATE_POWERED)
- _nbu2ss_pullup(udc, 1);
- }
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_int_bus_reset(struct nbu2ss_udc *udc)
-{
- udc->devstate = USB_STATE_DEFAULT;
- udc->remote_wakeup = 0;
-
- _nbu2ss_quiesce(udc);
-
- udc->ep0state = EP0_IDLE;
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_int_usb_resume(struct nbu2ss_udc *udc)
-{
- if (udc->usb_suspended == 1) {
- udc->usb_suspended = 0;
- if (udc->driver && udc->driver->resume) {
- spin_unlock(&udc->lock);
- udc->driver->resume(&udc->gadget);
- spin_lock(&udc->lock);
- }
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_int_usb_suspend(struct nbu2ss_udc *udc)
-{
- u32 reg_dt;
-
- if (udc->usb_suspended == 0) {
- reg_dt = gpiod_get_value(vbus_gpio);
-
- if (reg_dt == 0)
- return;
-
- udc->usb_suspended = 1;
- if (udc->driver && udc->driver->suspend) {
- spin_unlock(&udc->lock);
- udc->driver->suspend(&udc->gadget);
- spin_lock(&udc->lock);
- }
-
- _nbu2ss_bitset(&udc->p_regs->USB_CONTROL, SUSPEND);
- }
-}
-
-/*-------------------------------------------------------------------------*/
-/* VBUS (GPIO153) Interrupt */
-static irqreturn_t _nbu2ss_vbus_irq(int irq, void *_udc)
-{
- struct nbu2ss_udc *udc = (struct nbu2ss_udc *)_udc;
-
- spin_lock(&udc->lock);
- _nbu2ss_check_vbus(udc);
- spin_unlock(&udc->lock);
-
- return IRQ_HANDLED;
-}
-
-/*-------------------------------------------------------------------------*/
-/* Interrupt (udc) */
-static irqreturn_t _nbu2ss_udc_irq(int irq, void *_udc)
-{
- u8 suspend_flag = 0;
- u32 status;
- u32 epnum, int_bit;
-
- struct nbu2ss_udc *udc = (struct nbu2ss_udc *)_udc;
- struct fc_regs __iomem *preg = udc->p_regs;
-
- if (gpiod_get_value(vbus_gpio) == 0) {
- _nbu2ss_writel(&preg->USB_INT_STA, ~USB_INT_STA_RW);
- _nbu2ss_writel(&preg->USB_INT_ENA, 0);
- return IRQ_HANDLED;
- }
-
- spin_lock(&udc->lock);
-
- for (;;) {
- if (gpiod_get_value(vbus_gpio) == 0) {
- _nbu2ss_writel(&preg->USB_INT_STA, ~USB_INT_STA_RW);
- _nbu2ss_writel(&preg->USB_INT_ENA, 0);
- status = 0;
- } else {
- status = _nbu2ss_readl(&preg->USB_INT_STA);
- }
-
- if (status == 0)
- break;
-
- _nbu2ss_writel(&preg->USB_INT_STA, ~(status & USB_INT_STA_RW));
-
- if (status & USB_RST_INT) {
- /* USB Reset */
- _nbu2ss_int_bus_reset(udc);
- }
-
- if (status & RSUM_INT) {
- /* Resume */
- _nbu2ss_int_usb_resume(udc);
- }
-
- if (status & SPND_INT) {
- /* Suspend */
- suspend_flag = 1;
- }
-
- if (status & EPN_INT) {
- /* EP INT */
- int_bit = status >> 8;
-
- for (epnum = 0; epnum < NUM_ENDPOINTS; epnum++) {
- if (0x01 & int_bit)
- _nbu2ss_ep_int(udc, epnum);
-
- int_bit >>= 1;
-
- if (int_bit == 0)
- break;
- }
- }
- }
-
- if (suspend_flag)
- _nbu2ss_int_usb_suspend(udc);
-
- spin_unlock(&udc->lock);
-
- return IRQ_HANDLED;
-}
-
-/*-------------------------------------------------------------------------*/
-/* usb_ep_ops */
-static int nbu2ss_ep_enable(struct usb_ep *_ep,
- const struct usb_endpoint_descriptor *desc)
-{
- u8 ep_type;
- unsigned long flags;
-
- struct nbu2ss_ep *ep;
- struct nbu2ss_udc *udc;
-
- if (!_ep || !desc) {
- pr_err(" *** %s, bad param\n", __func__);
- return -EINVAL;
- }
-
- ep = container_of(_ep, struct nbu2ss_ep, ep);
- if (!ep->udc) {
- pr_err(" *** %s, ep == NULL !!\n", __func__);
- return -EINVAL;
- }
-
- ep_type = usb_endpoint_type(desc);
- if ((ep_type == USB_ENDPOINT_XFER_CONTROL) ||
- (ep_type == USB_ENDPOINT_XFER_ISOC)) {
- pr_err(" *** %s, bat bmAttributes\n", __func__);
- return -EINVAL;
- }
-
- udc = ep->udc;
- if (udc->vbus_active == 0)
- return -ESHUTDOWN;
-
- if ((!udc->driver) || (udc->gadget.speed == USB_SPEED_UNKNOWN)) {
- dev_err(ep->udc->dev, " *** %s, udc !!\n", __func__);
- return -ESHUTDOWN;
- }
-
- spin_lock_irqsave(&udc->lock, flags);
-
- ep->desc = desc;
- ep->epnum = usb_endpoint_num(desc);
- ep->direct = desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK;
- ep->ep_type = ep_type;
- ep->wedged = 0;
- ep->halted = false;
- ep->stalled = false;
-
- ep->ep.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
-
- /* DMA setting */
- _nbu2ss_ep_dma_init(udc, ep);
-
- /* Endpoint setting */
- _nbu2ss_ep_init(udc, ep);
-
- spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_ep_disable(struct usb_ep *_ep)
-{
- struct nbu2ss_ep *ep;
- struct nbu2ss_udc *udc;
- unsigned long flags;
-
- if (!_ep) {
- pr_err(" *** %s, bad param\n", __func__);
- return -EINVAL;
- }
-
- ep = container_of(_ep, struct nbu2ss_ep, ep);
- if (!ep->udc) {
- pr_err("udc: *** %s, ep == NULL !!\n", __func__);
- return -EINVAL;
- }
-
- udc = ep->udc;
- if (udc->vbus_active == 0)
- return -ESHUTDOWN;
-
- spin_lock_irqsave(&udc->lock, flags);
- _nbu2ss_nuke(udc, ep, -EINPROGRESS); /* dequeue request */
- spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static struct usb_request *nbu2ss_ep_alloc_request(struct usb_ep *ep,
- gfp_t gfp_flags)
-{
- struct nbu2ss_req *req;
-
- req = kzalloc(sizeof(*req), gfp_flags);
- if (!req)
- return NULL;
-
-#ifdef USE_DMA
- req->req.dma = DMA_ADDR_INVALID;
-#endif
- INIT_LIST_HEAD(&req->queue);
-
- return &req->req;
-}
-
-/*-------------------------------------------------------------------------*/
-static void nbu2ss_ep_free_request(struct usb_ep *_ep,
- struct usb_request *_req)
-{
- struct nbu2ss_req *req;
-
- if (_req) {
- req = container_of(_req, struct nbu2ss_req, req);
-
- kfree(req);
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_ep_queue(struct usb_ep *_ep,
- struct usb_request *_req, gfp_t gfp_flags)
-{
- struct nbu2ss_req *req;
- struct nbu2ss_ep *ep;
- struct nbu2ss_udc *udc;
- unsigned long flags;
- bool bflag;
- int result = -EINVAL;
-
- /* catch various bogus parameters */
- if (!_ep || !_req) {
- if (!_ep)
- pr_err("udc: %s --- _ep == NULL\n", __func__);
-
- if (!_req)
- pr_err("udc: %s --- _req == NULL\n", __func__);
-
- return -EINVAL;
- }
-
- req = container_of(_req, struct nbu2ss_req, req);
- if (unlikely(!_req->complete ||
- !_req->buf ||
- !list_empty(&req->queue))) {
- if (!_req->complete)
- pr_err("udc: %s --- !_req->complete\n", __func__);
-
- if (!_req->buf)
- pr_err("udc:%s --- !_req->buf\n", __func__);
-
- if (!list_empty(&req->queue))
- pr_err("%s --- !list_empty(&req->queue)\n", __func__);
-
- return -EINVAL;
- }
-
- ep = container_of(_ep, struct nbu2ss_ep, ep);
- udc = ep->udc;
-
- if (udc->vbus_active == 0) {
- dev_info(udc->dev, "Can't ep_queue (VBUS OFF)\n");
- return -ESHUTDOWN;
- }
-
- if (unlikely(!udc->driver)) {
- dev_err(udc->dev, "%s, bogus device state %p\n", __func__,
- udc->driver);
- return -ESHUTDOWN;
- }
-
- spin_lock_irqsave(&udc->lock, flags);
-
-#ifdef USE_DMA
- if ((uintptr_t)req->req.buf & 0x3)
- req->unaligned = true;
- else
- req->unaligned = false;
-
- if (req->unaligned) {
- if (!ep->virt_buf) {
- ep->virt_buf = dma_alloc_coherent(udc->dev, PAGE_SIZE,
- &ep->phys_buf,
- GFP_ATOMIC | GFP_DMA);
- if (!ep->virt_buf) {
- spin_unlock_irqrestore(&udc->lock, flags);
- return -ENOMEM;
- }
- }
- if (ep->epnum > 0) {
- if (ep->direct == USB_DIR_IN)
- memcpy(ep->virt_buf, req->req.buf,
- req->req.length);
- }
- }
-
- if ((ep->epnum > 0) && (ep->direct == USB_DIR_OUT) &&
- (req->req.dma != 0))
- _nbu2ss_dma_map_single(udc, ep, req, USB_DIR_OUT);
-#endif
-
- _req->status = -EINPROGRESS;
- _req->actual = 0;
-
- bflag = list_empty(&ep->queue);
- list_add_tail(&req->queue, &ep->queue);
-
- if (bflag && !ep->stalled) {
- result = _nbu2ss_start_transfer(udc, ep, req, false);
- if (result < 0) {
- dev_err(udc->dev, " *** %s, result = %d\n", __func__,
- result);
- list_del(&req->queue);
- } else if ((ep->epnum > 0) && (ep->direct == USB_DIR_OUT)) {
-#ifdef USE_DMA
- if (req->req.length < 4 &&
- req->req.length == req->req.actual)
-#else
- if (req->req.length == req->req.actual)
-#endif
- _nbu2ss_ep_done(ep, req, result);
- }
- }
-
- spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
-{
- struct nbu2ss_req *req;
- struct nbu2ss_ep *ep;
- struct nbu2ss_udc *udc;
- unsigned long flags;
-
- /* catch various bogus parameters */
- if (!_ep || !_req) {
- /* pr_err("%s, bad param(1)\n", __func__); */
- return -EINVAL;
- }
-
- ep = container_of(_ep, struct nbu2ss_ep, ep);
-
- udc = ep->udc;
- if (!udc)
- return -EINVAL;
-
- spin_lock_irqsave(&udc->lock, flags);
-
- /* make sure it's actually queued on this endpoint */
- list_for_each_entry(req, &ep->queue, queue) {
- if (&req->req == _req) {
- _nbu2ss_ep_done(ep, req, -ECONNRESET);
- spin_unlock_irqrestore(&udc->lock, flags);
- return 0;
- }
- }
-
- spin_unlock_irqrestore(&udc->lock, flags);
-
- pr_debug("%s no queue(EINVAL)\n", __func__);
-
- return -EINVAL;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_ep_set_halt(struct usb_ep *_ep, int value)
-{
- u8 ep_adrs;
- unsigned long flags;
-
- struct nbu2ss_ep *ep;
- struct nbu2ss_udc *udc;
-
- if (!_ep) {
- pr_err("%s, bad param\n", __func__);
- return -EINVAL;
- }
-
- ep = container_of(_ep, struct nbu2ss_ep, ep);
-
- udc = ep->udc;
- if (!udc) {
- dev_err(ep->udc->dev, " *** %s, bad udc\n", __func__);
- return -EINVAL;
- }
-
- spin_lock_irqsave(&udc->lock, flags);
-
- ep_adrs = ep->epnum | ep->direct;
- if (value == 0) {
- _nbu2ss_set_endpoint_stall(udc, ep_adrs, value);
- ep->stalled = false;
- } else {
- if (list_empty(&ep->queue))
- _nbu2ss_epn_set_stall(udc, ep);
- else
- ep->stalled = true;
- }
-
- if (value == 0)
- ep->wedged = 0;
-
- spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
-}
-
-static int nbu2ss_ep_set_wedge(struct usb_ep *_ep)
-{
- return nbu2ss_ep_set_halt(_ep, 1);
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_ep_fifo_status(struct usb_ep *_ep)
-{
- u32 data;
- struct nbu2ss_ep *ep;
- struct nbu2ss_udc *udc;
- unsigned long flags;
- struct fc_regs __iomem *preg;
-
- if (!_ep) {
- pr_err("%s, bad param\n", __func__);
- return -EINVAL;
- }
-
- ep = container_of(_ep, struct nbu2ss_ep, ep);
-
- udc = ep->udc;
- if (!udc) {
- dev_err(ep->udc->dev, "%s, bad udc\n", __func__);
- return -EINVAL;
- }
-
- preg = udc->p_regs;
-
- data = gpiod_get_value(vbus_gpio);
- if (data == 0)
- return -EINVAL;
-
- spin_lock_irqsave(&udc->lock, flags);
-
- if (ep->epnum == 0) {
- data = _nbu2ss_readl(&preg->EP0_LENGTH) & EP0_LDATA;
-
- } else {
- data = _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_LEN_DCNT)
- & EPN_LDATA;
- }
-
- spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static void nbu2ss_ep_fifo_flush(struct usb_ep *_ep)
-{
- u32 data;
- struct nbu2ss_ep *ep;
- struct nbu2ss_udc *udc;
- unsigned long flags;
-
- if (!_ep) {
- pr_err("udc: %s, bad param\n", __func__);
- return;
- }
-
- ep = container_of(_ep, struct nbu2ss_ep, ep);
-
- udc = ep->udc;
- if (!udc) {
- dev_err(ep->udc->dev, "%s, bad udc\n", __func__);
- return;
- }
-
- data = gpiod_get_value(vbus_gpio);
- if (data == 0)
- return;
-
- spin_lock_irqsave(&udc->lock, flags);
- _nbu2ss_fifo_flush(udc, ep);
- spin_unlock_irqrestore(&udc->lock, flags);
-}
-
-/*-------------------------------------------------------------------------*/
-static const struct usb_ep_ops nbu2ss_ep_ops = {
- .enable = nbu2ss_ep_enable,
- .disable = nbu2ss_ep_disable,
-
- .alloc_request = nbu2ss_ep_alloc_request,
- .free_request = nbu2ss_ep_free_request,
-
- .queue = nbu2ss_ep_queue,
- .dequeue = nbu2ss_ep_dequeue,
-
- .set_halt = nbu2ss_ep_set_halt,
- .set_wedge = nbu2ss_ep_set_wedge,
-
- .fifo_status = nbu2ss_ep_fifo_status,
- .fifo_flush = nbu2ss_ep_fifo_flush,
-};
-
-/*-------------------------------------------------------------------------*/
-/* usb_gadget_ops */
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_gad_get_frame(struct usb_gadget *pgadget)
-{
- u32 data;
- struct nbu2ss_udc *udc;
-
- if (!pgadget) {
- pr_err("udc: %s, bad param\n", __func__);
- return -EINVAL;
- }
-
- udc = container_of(pgadget, struct nbu2ss_udc, gadget);
- data = gpiod_get_value(vbus_gpio);
- if (data == 0)
- return -EINVAL;
-
- return _nbu2ss_readl(&udc->p_regs->USB_ADDRESS) & FRAME;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_gad_wakeup(struct usb_gadget *pgadget)
-{
- int i;
- u32 data;
-
- struct nbu2ss_udc *udc;
-
- if (!pgadget) {
- pr_err("%s, bad param\n", __func__);
- return -EINVAL;
- }
-
- udc = container_of(pgadget, struct nbu2ss_udc, gadget);
-
- data = gpiod_get_value(vbus_gpio);
- if (data == 0) {
- dev_warn(&pgadget->dev, "VBUS LEVEL = %d\n", data);
- return -EINVAL;
- }
-
- _nbu2ss_bitset(&udc->p_regs->EPCTR, PLL_RESUME);
-
- for (i = 0; i < EPC_PLL_LOCK_COUNT; i++) {
- data = _nbu2ss_readl(&udc->p_regs->EPCTR);
-
- if (data & PLL_LOCK)
- break;
- }
-
- _nbu2ss_bitclr(&udc->p_regs->EPCTR, PLL_RESUME);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_gad_set_selfpowered(struct usb_gadget *pgadget,
- int is_selfpowered)
-{
- struct nbu2ss_udc *udc;
- unsigned long flags;
-
- if (!pgadget) {
- pr_err("%s, bad param\n", __func__);
- return -EINVAL;
- }
-
- udc = container_of(pgadget, struct nbu2ss_udc, gadget);
-
- spin_lock_irqsave(&udc->lock, flags);
- pgadget->is_selfpowered = (is_selfpowered != 0);
- spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_gad_vbus_session(struct usb_gadget *pgadget, int is_active)
-{
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_gad_vbus_draw(struct usb_gadget *pgadget, unsigned int mA)
-{
- struct nbu2ss_udc *udc;
- unsigned long flags;
-
- if (!pgadget) {
- pr_err("%s, bad param\n", __func__);
- return -EINVAL;
- }
-
- udc = container_of(pgadget, struct nbu2ss_udc, gadget);
-
- spin_lock_irqsave(&udc->lock, flags);
- udc->mA = mA;
- spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_gad_pullup(struct usb_gadget *pgadget, int is_on)
-{
- struct nbu2ss_udc *udc;
- unsigned long flags;
-
- if (!pgadget) {
- pr_err("%s, bad param\n", __func__);
- return -EINVAL;
- }
-
- udc = container_of(pgadget, struct nbu2ss_udc, gadget);
-
- if (!udc->driver) {
- pr_warn("%s, Not Regist Driver\n", __func__);
- return -EINVAL;
- }
-
- if (udc->vbus_active == 0)
- return -ESHUTDOWN;
-
- spin_lock_irqsave(&udc->lock, flags);
- _nbu2ss_pullup(udc, is_on);
- spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_gad_ioctl(struct usb_gadget *pgadget,
- unsigned int code, unsigned long param)
-{
- return 0;
-}
-
-static const struct usb_gadget_ops nbu2ss_gadget_ops = {
- .get_frame = nbu2ss_gad_get_frame,
- .wakeup = nbu2ss_gad_wakeup,
- .set_selfpowered = nbu2ss_gad_set_selfpowered,
- .vbus_session = nbu2ss_gad_vbus_session,
- .vbus_draw = nbu2ss_gad_vbus_draw,
- .pullup = nbu2ss_gad_pullup,
- .ioctl = nbu2ss_gad_ioctl,
-};
-
-static const struct {
- const char *name;
- const struct usb_ep_caps caps;
-} ep_info[NUM_ENDPOINTS] = {
-#define EP_INFO(_name, _caps) \
- { \
- .name = _name, \
- .caps = _caps, \
- }
-
- EP_INFO("ep0",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("ep1-bulk",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("ep2-bulk",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("ep3in-int",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
- EP_INFO("ep4-iso",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("ep5-iso",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("ep6-bulk",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("ep7-bulk",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("ep8in-int",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
- EP_INFO("ep9-iso",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("epa-iso",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("epb-bulk",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("epc-bulk",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("epdin-int",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
-
-#undef EP_INFO
-};
-
-/*-------------------------------------------------------------------------*/
-static void nbu2ss_drv_ep_init(struct nbu2ss_udc *udc)
-{
- int i;
-
- INIT_LIST_HEAD(&udc->gadget.ep_list);
- udc->gadget.ep0 = &udc->ep[0].ep;
-
- for (i = 0; i < NUM_ENDPOINTS; i++) {
- struct nbu2ss_ep *ep = &udc->ep[i];
-
- ep->udc = udc;
- ep->desc = NULL;
-
- ep->ep.driver_data = NULL;
- ep->ep.name = ep_info[i].name;
- ep->ep.caps = ep_info[i].caps;
- ep->ep.ops = &nbu2ss_ep_ops;
-
- usb_ep_set_maxpacket_limit(&ep->ep,
- i == 0 ? EP0_PACKETSIZE
- : EP_PACKETSIZE);
-
- list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
- INIT_LIST_HEAD(&ep->queue);
- }
-
- list_del_init(&udc->ep[0].ep.ep_list);
-}
-
-/*-------------------------------------------------------------------------*/
-/* platform_driver */
-static int nbu2ss_drv_contest_init(struct platform_device *pdev,
- struct nbu2ss_udc *udc)
-{
- spin_lock_init(&udc->lock);
- udc->dev = &pdev->dev;
-
- udc->gadget.is_selfpowered = 1;
- udc->devstate = USB_STATE_NOTATTACHED;
- udc->pdev = pdev;
- udc->mA = 0;
-
- udc->pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-
- /* init Endpoint */
- nbu2ss_drv_ep_init(udc);
-
- /* init Gadget */
- udc->gadget.ops = &nbu2ss_gadget_ops;
- udc->gadget.ep0 = &udc->ep[0].ep;
- udc->gadget.speed = USB_SPEED_UNKNOWN;
- udc->gadget.name = driver_name;
- /* udc->gadget.is_dualspeed = 1; */
-
- device_initialize(&udc->gadget.dev);
-
- dev_set_name(&udc->gadget.dev, "gadget");
- udc->gadget.dev.parent = &pdev->dev;
- udc->gadget.dev.dma_mask = pdev->dev.dma_mask;
-
- return 0;
-}
-
-/*
- * probe - binds to the platform device
- */
-static int nbu2ss_drv_probe(struct platform_device *pdev)
-{
- int status;
- struct nbu2ss_udc *udc;
- int irq;
- void __iomem *mmio_base;
-
- udc = &udc_controller;
- memset(udc, 0, sizeof(struct nbu2ss_udc));
-
- platform_set_drvdata(pdev, udc);
-
- /* require I/O memory and IRQ to be provided as resources */
- mmio_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(mmio_base))
- return PTR_ERR(mmio_base);
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
- status = devm_request_irq(&pdev->dev, irq, _nbu2ss_udc_irq,
- 0, driver_name, udc);
-
- /* IO Memory */
- udc->p_regs = (struct fc_regs __iomem *)mmio_base;
-
- /* USB Function Controller Interrupt */
- if (status != 0) {
- dev_err(udc->dev, "request_irq(USB_UDC_IRQ_1) failed\n");
- return status;
- }
-
- /* Driver Initialization */
- status = nbu2ss_drv_contest_init(pdev, udc);
- if (status < 0) {
- /* Error */
- return status;
- }
-
- /* VBUS Interrupt */
- vbus_irq = gpiod_to_irq(vbus_gpio);
- irq_set_irq_type(vbus_irq, IRQ_TYPE_EDGE_BOTH);
- status = request_irq(vbus_irq,
- _nbu2ss_vbus_irq, IRQF_SHARED, driver_name, udc);
-
- if (status != 0) {
- dev_err(udc->dev, "request_irq(vbus_irq) failed\n");
- return status;
- }
-
- return status;
-}
-
-/*-------------------------------------------------------------------------*/
-static void nbu2ss_drv_shutdown(struct platform_device *pdev)
-{
- struct nbu2ss_udc *udc;
-
- udc = platform_get_drvdata(pdev);
- if (!udc)
- return;
-
- _nbu2ss_disable_controller(udc);
-}
-
-/*-------------------------------------------------------------------------*/
-static void nbu2ss_drv_remove(struct platform_device *pdev)
-{
- struct nbu2ss_udc *udc;
- struct nbu2ss_ep *ep;
- int i;
-
- udc = &udc_controller;
-
- for (i = 0; i < NUM_ENDPOINTS; i++) {
- ep = &udc->ep[i];
- if (ep->virt_buf)
- dma_free_coherent(udc->dev, PAGE_SIZE, (void *)ep->virt_buf,
- ep->phys_buf);
- }
-
- /* Interrupt Handler - Release */
- free_irq(vbus_irq, udc);
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_drv_suspend(struct platform_device *pdev, pm_message_t state)
-{
- struct nbu2ss_udc *udc;
-
- udc = platform_get_drvdata(pdev);
- if (!udc)
- return 0;
-
- if (udc->vbus_active) {
- udc->vbus_active = 0;
- udc->devstate = USB_STATE_NOTATTACHED;
- udc->linux_suspended = 1;
-
- if (udc->usb_suspended) {
- udc->usb_suspended = 0;
- _nbu2ss_reset_controller(udc);
- }
-
- _nbu2ss_quiesce(udc);
- }
- _nbu2ss_disable_controller(udc);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_drv_resume(struct platform_device *pdev)
-{
- u32 data;
- struct nbu2ss_udc *udc;
-
- udc = platform_get_drvdata(pdev);
- if (!udc)
- return 0;
-
- data = gpiod_get_value(vbus_gpio);
- if (data) {
- udc->vbus_active = 1;
- udc->devstate = USB_STATE_POWERED;
- _nbu2ss_enable_controller(udc);
- _nbu2ss_pullup(udc, 1);
- }
-
- udc->linux_suspended = 0;
-
- return 0;
-}
-
-static struct platform_driver udc_driver = {
- .probe = nbu2ss_drv_probe,
- .shutdown = nbu2ss_drv_shutdown,
- .remove_new = nbu2ss_drv_remove,
- .suspend = nbu2ss_drv_suspend,
- .resume = nbu2ss_drv_resume,
- .driver = {
- .name = driver_name,
- },
-};
-
-module_platform_driver(udc_driver);
-
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_AUTHOR("Renesas Electronics Corporation");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/emxx_udc/emxx_udc.h b/drivers/staging/emxx_udc/emxx_udc.h
deleted file mode 100644
index c9e37a1b8139c..0000000000000
--- a/drivers/staging/emxx_udc/emxx_udc.h
+++ /dev/null
@@ -1,554 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * EMXX FCD (Function Controller Driver) for USB.
- *
- * Copyright (C) 2010 Renesas Electronics Corporation
- */
-
-#ifndef _LINUX_EMXX_H
-#define _LINUX_EMXX_H
-
-/*---------------------------------------------------------------------------*/
-
-/*----------------- Default define */
-#define USE_DMA 1
-#define USE_SUSPEND_WAIT 1
-
-/*------------ Board dependence(Resource) */
-#define VBUS_VALUE GPIO_VBUS
-
-/* below hacked up for staging integration */
-#define GPIO_VBUS 0 /* GPIO_P153 on KZM9D */
-#define INT_VBUS 0 /* IRQ for GPIO_P153 */
-
-/*------------ Board dependence(Wait) */
-
-/* CHATTERING wait time ms */
-#define VBUS_CHATTERING_MDELAY 1
-/* DMA Abort wait time ms */
-#define DMA_DISABLE_TIME 10
-
-/*------------ Controller dependence */
-#define NUM_ENDPOINTS 14 /* Endpoint */
-#define REG_EP_NUM 15 /* Endpoint Register */
-#define DMA_MAX_COUNT 256 /* DMA Block */
-
-#define EPC_RST_DISABLE_TIME 1 /* 1 usec */
-#define EPC_DIRPD_DISABLE_TIME 1 /* 1 msec */
-#define EPC_PLL_LOCK_COUNT 1000 /* 1000 */
-#define IN_DATA_EMPTY_COUNT 1000 /* 1000 */
-
-#define CHATGER_TIME 700 /* 700msec */
-#define USB_SUSPEND_TIME 2000 /* 2 sec */
-
-/* U2F FLAG */
-#define U2F_ENABLE 1
-#define U2F_DISABLE 0
-
-#define TEST_FORCE_ENABLE (BIT(18) | BIT(16))
-
-#define INT_SEL BIT(10)
-#define CONSTFS BIT(9)
-#define SOF_RCV BIT(8)
-#define RSUM_IN BIT(7)
-#define SUSPEND BIT(6)
-#define CONF BIT(5)
-#define DEFAULT BIT(4)
-#define CONNECTB BIT(3)
-#define PUE2 BIT(2)
-
-#define MAX_TEST_MODE_NUM 0x05
-#define TEST_MODE_SHIFT 16
-
-/*------- (0x0004) USB Status Register */
-#define SPEED_MODE BIT(6)
-#define HIGH_SPEED BIT(6)
-
-#define CONF BIT(5)
-#define DEFAULT BIT(4)
-#define USB_RST BIT(3)
-#define SPND_OUT BIT(2)
-#define RSUM_OUT BIT(1)
-
-/*------- (0x0008) USB Address Register */
-#define USB_ADDR 0x007F0000
-#define SOF_STATUS BIT(15)
-#define UFRAME (BIT(14) | BIT(13) | BIT(12))
-#define FRAME 0x000007FF
-
-#define USB_ADRS_SHIFT 16
-
-/*------- (0x000C) UTMI Characteristic 1 Register */
-#define SQUSET (BIT(7) | BIT(6) | BIT(5) | BIT(4))
-
-#define USB_SQUSET (BIT(6) | BIT(5) | BIT(4))
-
-/*------- (0x0010) TEST Control Register */
-#define FORCEHS BIT(2)
-#define CS_TESTMODEEN BIT(1)
-#define LOOPBACK BIT(0)
-
-/*------- (0x0018) Setup Data 0 Register */
-/*------- (0x001C) Setup Data 1 Register */
-
-/*------- (0x0020) USB Interrupt Status Register */
-#define EPN_INT 0x00FFFF00
-#define EP15_INT BIT(23)
-#define EP14_INT BIT(22)
-#define EP13_INT BIT(21)
-#define EP12_INT BIT(20)
-#define EP11_INT BIT(19)
-#define EP10_INT BIT(18)
-#define EP9_INT BIT(17)
-#define EP8_INT BIT(16)
-#define EP7_INT BIT(15)
-#define EP6_INT BIT(14)
-#define EP5_INT BIT(13)
-#define EP4_INT BIT(12)
-#define EP3_INT BIT(11)
-#define EP2_INT BIT(10)
-#define EP1_INT BIT(9)
-#define EP0_INT BIT(8)
-#define SPEED_MODE_INT BIT(6)
-#define SOF_ERROR_INT BIT(5)
-#define SOF_INT BIT(4)
-#define USB_RST_INT BIT(3)
-#define SPND_INT BIT(2)
-#define RSUM_INT BIT(1)
-
-#define USB_INT_STA_RW 0x7E
-
-/*------- (0x0024) USB Interrupt Enable Register */
-#define EP15_0_EN 0x00FFFF00
-#define EP15_EN BIT(23)
-#define EP14_EN BIT(22)
-#define EP13_EN BIT(21)
-#define EP12_EN BIT(20)
-#define EP11_EN BIT(19)
-#define EP10_EN BIT(18)
-#define EP9_EN BIT(17)
-#define EP8_EN BIT(16)
-#define EP7_EN BIT(15)
-#define EP6_EN BIT(14)
-#define EP5_EN BIT(13)
-#define EP4_EN BIT(12)
-#define EP3_EN BIT(11)
-#define EP2_EN BIT(10)
-#define EP1_EN BIT(9)
-#define EP0_EN BIT(8)
-#define SPEED_MODE_EN BIT(6)
-#define SOF_ERROR_EN BIT(5)
-#define SOF_EN BIT(4)
-#define USB_RST_EN BIT(3)
-#define SPND_EN BIT(2)
-#define RSUM_EN BIT(1)
-
-#define USB_INT_EN_BIT \
- (EP0_EN | SPEED_MODE_EN | USB_RST_EN | SPND_EN | RSUM_EN)
-
-/*------- (0x0028) EP0 Control Register */
-#define EP0_STGSEL BIT(18)
-#define EP0_OVERSEL BIT(17)
-#define EP0_AUTO BIT(16)
-#define EP0_PIDCLR BIT(9)
-#define EP0_BCLR BIT(8)
-#define EP0_DEND BIT(7)
-#define EP0_DW (BIT(6) | BIT(5))
-#define EP0_DW4 0
-#define EP0_DW3 (BIT(6) | BIT(5))
-#define EP0_DW2 BIT(6)
-#define EP0_DW1 BIT(5)
-
-#define EP0_INAK_EN BIT(4)
-#define EP0_PERR_NAK_CLR BIT(3)
-#define EP0_STL BIT(2)
-#define EP0_INAK BIT(1)
-#define EP0_ONAK BIT(0)
-
-/*------- (0x002C) EP0 Status Register */
-#define EP0_PID BIT(18)
-#define EP0_PERR_NAK BIT(17)
-#define EP0_PERR_NAK_INT BIT(16)
-#define EP0_OUT_NAK_INT BIT(15)
-#define EP0_OUT_NULL BIT(14)
-#define EP0_OUT_FULL BIT(13)
-#define EP0_OUT_EMPTY BIT(12)
-#define EP0_IN_NAK_INT BIT(11)
-#define EP0_IN_DATA BIT(10)
-#define EP0_IN_FULL BIT(9)
-#define EP0_IN_EMPTY BIT(8)
-#define EP0_OUT_NULL_INT BIT(7)
-#define EP0_OUT_OR_INT BIT(6)
-#define EP0_OUT_INT BIT(5)
-#define EP0_IN_INT BIT(4)
-#define EP0_STALL_INT BIT(3)
-#define STG_END_INT BIT(2)
-#define STG_START_INT BIT(1)
-#define SETUP_INT BIT(0)
-
-#define EP0_STATUS_RW_BIT (BIT(16) | BIT(15) | BIT(11) | 0xFF)
-
-/*------- (0x0030) EP0 Interrupt Enable Register */
-#define EP0_PERR_NAK_EN BIT(16)
-#define EP0_OUT_NAK_EN BIT(15)
-
-#define EP0_IN_NAK_EN BIT(11)
-
-#define EP0_OUT_NULL_EN BIT(7)
-#define EP0_OUT_OR_EN BIT(6)
-#define EP0_OUT_EN BIT(5)
-#define EP0_IN_EN BIT(4)
-#define EP0_STALL_EN BIT(3)
-#define STG_END_EN BIT(2)
-#define STG_START_EN BIT(1)
-#define SETUP_EN BIT(0)
-
-#define EP0_INT_EN_BIT \
- (EP0_OUT_OR_EN | EP0_OUT_EN | EP0_IN_EN | STG_END_EN | SETUP_EN)
-
-/*------- (0x0034) EP0 Length Register */
-#define EP0_LDATA 0x0000007F
-
-/*------- (0x0038) EP0 Read Register */
-/*------- (0x003C) EP0 Write Register */
-
-/*------- (0x0040:) EPN Control Register */
-#define EPN_EN BIT(31)
-#define EPN_BUF_TYPE BIT(30)
-#define EPN_BUF_SINGLE BIT(30)
-
-#define EPN_DIR0 BIT(26)
-#define EPN_MODE (BIT(25) | BIT(24))
-#define EPN_BULK 0
-#define EPN_INTERRUPT BIT(24)
-#define EPN_ISO BIT(25)
-
-#define EPN_OVERSEL BIT(17)
-#define EPN_AUTO BIT(16)
-
-#define EPN_IPIDCLR BIT(11)
-#define EPN_OPIDCLR BIT(10)
-#define EPN_BCLR BIT(9)
-#define EPN_CBCLR BIT(8)
-#define EPN_DEND BIT(7)
-#define EPN_DW (BIT(6) | BIT(5))
-#define EPN_DW4 0
-#define EPN_DW3 (BIT(6) | BIT(5))
-#define EPN_DW2 BIT(6)
-#define EPN_DW1 BIT(5)
-
-#define EPN_OSTL_EN BIT(4)
-#define EPN_ISTL BIT(3)
-#define EPN_OSTL BIT(2)
-
-#define EPN_ONAK BIT(0)
-
-/*------- (0x0044:) EPN Status Register */
-#define EPN_ISO_PIDERR BIT(29) /* R */
-#define EPN_OPID BIT(28) /* R */
-#define EPN_OUT_NOTKN BIT(27) /* R */
-#define EPN_ISO_OR BIT(26) /* R */
-
-#define EPN_ISO_CRC BIT(24) /* R */
-#define EPN_OUT_END_INT BIT(23) /* RW */
-#define EPN_OUT_OR_INT BIT(22) /* RW */
-#define EPN_OUT_NAK_ERR_INT BIT(21) /* RW */
-#define EPN_OUT_STALL_INT BIT(20) /* RW */
-#define EPN_OUT_INT BIT(19) /* RW */
-#define EPN_OUT_NULL_INT BIT(18) /* RW */
-#define EPN_OUT_FULL BIT(17) /* R */
-#define EPN_OUT_EMPTY BIT(16) /* R */
-
-#define EPN_IPID BIT(10) /* R */
-#define EPN_IN_NOTKN BIT(9) /* R */
-#define EPN_ISO_UR BIT(8) /* R */
-#define EPN_IN_END_INT BIT(7) /* RW */
-
-#define EPN_IN_NAK_ERR_INT BIT(5) /* RW */
-#define EPN_IN_STALL_INT BIT(4) /* RW */
-#define EPN_IN_INT BIT(3) /* RW */
-#define EPN_IN_DATA BIT(2) /* R */
-#define EPN_IN_FULL BIT(1) /* R */
-#define EPN_IN_EMPTY BIT(0) /* R */
-
-#define EPN_INT_EN \
- (EPN_OUT_END_INT | EPN_OUT_INT | EPN_IN_END_INT | EPN_IN_INT)
-
-/*------- (0x0048:) EPN Interrupt Enable Register */
-#define EPN_OUT_END_EN BIT(23) /* RW */
-#define EPN_OUT_OR_EN BIT(22) /* RW */
-#define EPN_OUT_NAK_ERR_EN BIT(21) /* RW */
-#define EPN_OUT_STALL_EN BIT(20) /* RW */
-#define EPN_OUT_EN BIT(19) /* RW */
-#define EPN_OUT_NULL_EN BIT(18) /* RW */
-
-#define EPN_IN_END_EN BIT(7) /* RW */
-
-#define EPN_IN_NAK_ERR_EN BIT(5) /* RW */
-#define EPN_IN_STALL_EN BIT(4) /* RW */
-#define EPN_IN_EN BIT(3) /* RW */
-
-/*------- (0x004C:) EPN Interrupt Enable Register */
-#define EPN_STOP_MODE BIT(11)
-#define EPN_DEND_SET BIT(10)
-#define EPN_BURST_SET BIT(9)
-#define EPN_STOP_SET BIT(8)
-
-#define EPN_DMA_EN BIT(4)
-
-#define EPN_DMAMODE0 BIT(0)
-
-/*------- (0x0050:) EPN MaxPacket & BaseAddress Register */
-#define EPN_BASEAD 0x1FFF0000
-#define EPN_MPKT 0x000007FF
-
-/*------- (0x0054:) EPN Length & DMA Count Register */
-#define EPN_DMACNT 0x01FF0000
-#define EPN_LDATA 0x000007FF
-
-/*------- (0x0058:) EPN Read Register */
-/*------- (0x005C:) EPN Write Register */
-
-/*------- (0x1000) AHBSCTR Register */
-#define WAIT_MODE BIT(0)
-
-/*------- (0x1004) AHBMCTR Register */
-#define ARBITER_CTR BIT(31) /* RW */
-#define MCYCLE_RST BIT(12) /* RW */
-
-#define ENDIAN_CTR (BIT(9) | BIT(8)) /* RW */
-#define ENDIAN_BYTE_SWAP BIT(9)
-#define ENDIAN_HALF_WORD_SWAP ENDIAN_CTR
-
-#define HBUSREQ_MODE BIT(5) /* RW */
-#define HTRANS_MODE BIT(4) /* RW */
-
-#define WBURST_TYPE BIT(2) /* RW */
-#define BURST_TYPE (BIT(1) | BIT(0)) /* RW */
-#define BURST_MAX_16 0
-#define BURST_MAX_8 BIT(0)
-#define BURST_MAX_4 BIT(1)
-#define BURST_SINGLE BURST_TYPE
-
-/*------- (0x1008) AHBBINT Register */
-#define DMA_ENDINT 0xFFFE0000 /* RW */
-
-#define AHB_VBUS_INT BIT(13) /* RW */
-
-#define MBUS_ERRINT BIT(6) /* RW */
-
-#define SBUS_ERRINT0 BIT(4) /* RW */
-#define ERR_MASTER 0x0000000F /* R */
-
-/*------- (0x100C) AHBBINTEN Register */
-#define DMA_ENDINTEN 0xFFFE0000 /* RW */
-
-#define VBUS_INTEN BIT(13) /* RW */
-
-#define MBUS_ERRINTEN BIT(6) /* RW */
-
-#define SBUS_ERRINT0EN BIT(4) /* RW */
-
-/*------- (0x1010) EPCTR Register */
-#define DIRPD BIT(12) /* RW */
-
-#define VBUS_LEVEL BIT(8) /* R */
-
-#define PLL_RESUME BIT(5) /* RW */
-#define PLL_LOCK BIT(4) /* R */
-
-#define EPC_RST BIT(0) /* RW */
-
-/*------- (0x1014) USBF_EPTEST Register */
-#define LINESTATE (BIT(9) | BIT(8)) /* R */
-#define DM_LEVEL BIT(9) /* R */
-#define DP_LEVEL BIT(8) /* R */
-
-#define PHY_TST BIT(1) /* RW */
-#define PHY_TSTCLK BIT(0) /* RW */
-
-/*------- (0x1020) USBSSVER Register */
-#define AHBB_VER 0x00FF0000 /* R */
-#define EPC_VER 0x0000FF00 /* R */
-#define SS_VER 0x000000FF /* R */
-
-/*------- (0x1024) USBSSCONF Register */
-#define EP_AVAILABLE 0xFFFF0000 /* R */
-#define DMA_AVAILABLE 0x0000FFFF /* R */
-
-/*------- (0x1110:) EPNDCR1 Register */
-#define DCR1_EPN_DMACNT 0x00FF0000 /* RW */
-
-#define DCR1_EPN_DIR0 BIT(1) /* RW */
-#define DCR1_EPN_REQEN BIT(0) /* RW */
-
-/*------- (0x1114:) EPNDCR2 Register */
-#define DCR2_EPN_LMPKT 0x07FF0000 /* RW */
-
-#define DCR2_EPN_MPKT 0x000007FF /* RW */
-
-/*------- (0x1118:) EPNTADR Register */
-#define EPN_TADR 0xFFFFFFFF /* RW */
-
-/*===========================================================================*/
-/* Struct */
-/*------- ep_regs */
-struct ep_regs {
- u32 EP_CONTROL; /* EP Control */
- u32 EP_STATUS; /* EP Status */
- u32 EP_INT_ENA; /* EP Interrupt Enable */
- u32 EP_DMA_CTRL; /* EP DMA Control */
- u32 EP_PCKT_ADRS; /* EP Maxpacket & BaseAddress */
- u32 EP_LEN_DCNT; /* EP Length & DMA count */
- u32 EP_READ; /* EP Read */
- u32 EP_WRITE; /* EP Write */
-};
-
-/*------- ep_dcr */
-struct ep_dcr {
- u32 EP_DCR1; /* EP_DCR1 */
- u32 EP_DCR2; /* EP_DCR2 */
- u32 EP_TADR; /* EP_TADR */
- u32 Reserved; /* Reserved */
-};
-
-/*------- Function Registers */
-struct fc_regs {
- u32 USB_CONTROL; /* (0x0000) USB Control */
- u32 USB_STATUS; /* (0x0004) USB Status */
- u32 USB_ADDRESS; /* (0x0008) USB Address */
- u32 UTMI_CHARACTER_1; /* (0x000C) UTMI Setting */
- u32 TEST_CONTROL; /* (0x0010) TEST Control */
- u32 reserved_14; /* (0x0014) Reserved */
- u32 SETUP_DATA0; /* (0x0018) Setup Data0 */
- u32 SETUP_DATA1; /* (0x001C) Setup Data1 */
- u32 USB_INT_STA; /* (0x0020) USB Interrupt Status */
- u32 USB_INT_ENA; /* (0x0024) USB Interrupt Enable */
- u32 EP0_CONTROL; /* (0x0028) EP0 Control */
- u32 EP0_STATUS; /* (0x002C) EP0 Status */
- u32 EP0_INT_ENA; /* (0x0030) EP0 Interrupt Enable */
- u32 EP0_LENGTH; /* (0x0034) EP0 Length */
- u32 EP0_READ; /* (0x0038) EP0 Read */
- u32 EP0_WRITE; /* (0x003C) EP0 Write */
-
- struct ep_regs EP_REGS[REG_EP_NUM]; /* Endpoint Register */
-
- u8 reserved_220[0x1000 - 0x220]; /* (0x0220:0x0FFF) Reserved */
-
- u32 AHBSCTR; /* (0x1000) AHBSCTR */
- u32 AHBMCTR; /* (0x1004) AHBMCTR */
- u32 AHBBINT; /* (0x1008) AHBBINT */
- u32 AHBBINTEN; /* (0x100C) AHBBINTEN */
- u32 EPCTR; /* (0x1010) EPCTR */
- u32 USBF_EPTEST; /* (0x1014) USBF_EPTEST */
-
- u8 reserved_1018[0x20 - 0x18]; /* (0x1018:0x101F) Reserved */
-
- u32 USBSSVER; /* (0x1020) USBSSVER */
- u32 USBSSCONF; /* (0x1024) USBSSCONF */
-
- u8 reserved_1028[0x110 - 0x28]; /* (0x1028:0x110F) Reserved */
-
- struct ep_dcr EP_DCR[REG_EP_NUM]; /* */
-
- u8 reserved_1200[0x1000 - 0x200]; /* Reserved */
-} __aligned(32);
-
-#define EP0_PACKETSIZE 64
-#define EP_PACKETSIZE 1024
-
-/* EPN RAM SIZE */
-#define D_RAM_SIZE_CTRL 64
-
-/* EPN Bulk Endpoint Max Packet Size */
-#define D_FS_RAM_SIZE_BULK 64
-#define D_HS_RAM_SIZE_BULK 512
-
-struct nbu2ss_udc;
-
-enum ep0_state {
- EP0_IDLE,
- EP0_IN_DATA_PHASE,
- EP0_OUT_DATA_PHASE,
- EP0_IN_STATUS_PHASE,
- EP0_OUT_STATUS_PAHSE,
- EP0_END_XFER,
- EP0_SUSPEND,
- EP0_STALL,
-};
-
-struct nbu2ss_req {
- struct usb_request req;
- struct list_head queue;
-
- u32 div_len;
- bool dma_flag;
- bool zero;
-
- bool unaligned;
-
- unsigned mapped:1;
-};
-
-struct nbu2ss_ep {
- struct usb_ep ep;
- struct list_head queue;
-
- struct nbu2ss_udc *udc;
-
- const struct usb_endpoint_descriptor *desc;
-
- u8 epnum;
- u8 direct;
- u8 ep_type;
-
- unsigned wedged:1;
- unsigned halted:1;
- unsigned stalled:1;
-
- u8 *virt_buf;
- dma_addr_t phys_buf;
-};
-
-struct nbu2ss_udc {
- struct usb_gadget gadget;
- struct usb_gadget_driver *driver;
- struct platform_device *pdev;
- struct device *dev;
- spinlock_t lock; /* Protects nbu2ss_udc structure fields */
- struct completion *pdone;
-
- enum ep0_state ep0state;
- enum usb_device_state devstate;
- struct usb_ctrlrequest ctrl;
- struct nbu2ss_req ep0_req;
- u8 ep0_buf[EP0_PACKETSIZE];
-
- struct nbu2ss_ep ep[NUM_ENDPOINTS];
-
- unsigned softconnect:1;
- unsigned vbus_active:1;
- unsigned linux_suspended:1;
- unsigned linux_resume:1;
- unsigned usb_suspended:1;
- unsigned remote_wakeup:1;
- unsigned udc_enabled:1;
-
- unsigned int mA;
-
- u32 curr_config; /* Current Configuration Number */
-
- struct fc_regs __iomem *p_regs;
-};
-
-/* USB register access structure */
-union usb_reg_access {
- struct {
- unsigned char DATA[4];
- } byte;
- unsigned int dw;
-};
-
-/*-------------------------------------------------------------------------*/
-
-#endif /* _LINUX_EMXX_H */
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 68add4d598ae8..38845f23023fe 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -327,7 +327,6 @@ static void fbtft_deferred_io(struct fb_info *info, struct list_head *pagereflis
unsigned int dirty_lines_start, dirty_lines_end;
struct fb_deferred_io_pageref *pageref;
unsigned int y_low = 0, y_high = 0;
- int count = 0;
spin_lock(&par->dirty_lock);
dirty_lines_start = par->dirty_lines_start;
@@ -339,7 +338,6 @@ static void fbtft_deferred_io(struct fb_info *info, struct list_head *pagereflis
/* Mark display lines as dirty */
list_for_each_entry(pageref, pagereflist, list) {
- count++;
y_low = pageref->offset / info->fix.line_length;
y_high = (pageref->offset + PAGE_SIZE - 1) / info->fix.line_length;
dev_dbg(info->device,
diff --git a/drivers/staging/fieldbus/anybuss/arcx-anybus.c b/drivers/staging/fieldbus/anybuss/arcx-anybus.c
index 34d18b09beddc..fcd3e3722ae01 100644
--- a/drivers/staging/fieldbus/anybuss/arcx-anybus.c
+++ b/drivers/staging/fieldbus/anybuss/arcx-anybus.c
@@ -285,7 +285,7 @@ static int controller_probe(struct platform_device *pdev)
}
}
- id = ida_simple_get(&controller_index_ida, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&controller_index_ida, GFP_KERNEL);
if (id < 0) {
err = id;
goto out_reset;
@@ -318,7 +318,7 @@ static int controller_probe(struct platform_device *pdev)
out_dev:
put_device(cd->class_dev);
out_ida:
- ida_simple_remove(&controller_index_ida, id);
+ ida_free(&controller_index_ida, id);
out_reset:
gpiod_set_value_cansleep(cd->reset_gpiod, 1);
return err;
@@ -330,7 +330,7 @@ static void controller_remove(struct platform_device *pdev)
int id = cd->class_dev->id;
device_unregister(cd->class_dev);
- ida_simple_remove(&controller_index_ida, id);
+ ida_free(&controller_index_ida, id);
gpiod_set_value_cansleep(cd->reset_gpiod, 1);
}
diff --git a/drivers/staging/fieldbus/anybuss/host.c b/drivers/staging/fieldbus/anybuss/host.c
index cd86b9c9e3458..410e6f8073c0b 100644
--- a/drivers/staging/fieldbus/anybuss/host.c
+++ b/drivers/staging/fieldbus/anybuss/host.c
@@ -1195,7 +1195,7 @@ static void anybus_bus_remove(struct device *dev)
adrv->remove(to_anybuss_client(dev));
}
-static struct bus_type anybus_bus = {
+static const struct bus_type anybus_bus = {
.name = "anybuss",
.match = anybus_bus_match,
.probe = anybus_bus_probe,
diff --git a/drivers/staging/fieldbus/dev_core.c b/drivers/staging/fieldbus/dev_core.c
index bf1812d8924fa..0053ebd91442d 100644
--- a/drivers/staging/fieldbus/dev_core.c
+++ b/drivers/staging/fieldbus/dev_core.c
@@ -152,7 +152,7 @@ static const struct attribute_group fieldbus_group = {
};
__ATTRIBUTE_GROUPS(fieldbus);
-static struct class fieldbus_class = {
+static const struct class fieldbus_class = {
.name = "fieldbus_dev",
.dev_groups = fieldbus_groups,
};
@@ -247,7 +247,7 @@ static void __fieldbus_dev_unregister(struct fieldbus_dev *fb)
return;
device_destroy(&fieldbus_class, fb->cdev.dev);
cdev_del(&fb->cdev);
- ida_simple_remove(&fieldbus_ida, fb->id);
+ ida_free(&fieldbus_ida, fb->id);
}
void fieldbus_dev_unregister(struct fieldbus_dev *fb)
@@ -267,7 +267,7 @@ static int __fieldbus_dev_register(struct fieldbus_dev *fb)
return -EINVAL;
if (!fb->read_area || !fb->write_area || !fb->fieldbus_id_get)
return -EINVAL;
- fb->id = ida_simple_get(&fieldbus_ida, 0, MAX_FIELDBUSES, GFP_KERNEL);
+ fb->id = ida_alloc_max(&fieldbus_ida, MAX_FIELDBUSES - 1, GFP_KERNEL);
if (fb->id < 0)
return fb->id;
devno = MKDEV(MAJOR(fieldbus_devt), fb->id);
@@ -290,7 +290,7 @@ static int __fieldbus_dev_register(struct fieldbus_dev *fb)
err_dev_create:
cdev_del(&fb->cdev);
err_cdev:
- ida_simple_remove(&fieldbus_ida, fb->id);
+ ida_free(&fieldbus_ida, fb->id);
return err;
}
diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
index 5703a9ddb6d0d..eb754b231429b 100644
--- a/drivers/staging/gdm724x/gdm_lte.c
+++ b/drivers/staging/gdm724x/gdm_lte.c
@@ -43,7 +43,7 @@ static struct {
struct sock *sock;
} lte_event;
-static struct device_type wwan_type = {
+static const struct device_type wwan_type = {
.name = "wwan",
};
diff --git a/drivers/staging/greybus/Kconfig b/drivers/staging/greybus/Kconfig
index 927cfa4bc9898..1e745a8d439c8 100644
--- a/drivers/staging/greybus/Kconfig
+++ b/drivers/staging/greybus/Kconfig
@@ -64,7 +64,7 @@ config GREYBUS_HID
config GREYBUS_LIGHT
tristate "Greybus LED Class driver"
- depends on LEDS_CLASS
+ depends on LEDS_CLASS_FLASH
help
Select this option if you have a device that follows the
Greybus LED Class specification.
diff --git a/drivers/staging/greybus/audio_apbridgea.h b/drivers/staging/greybus/audio_apbridgea.h
index efec0f815efd1..ab707d310129d 100644
--- a/drivers/staging/greybus/audio_apbridgea.h
+++ b/drivers/staging/greybus/audio_apbridgea.h
@@ -65,7 +65,6 @@
struct audio_apbridgea_hdr {
__u8 type;
__le16 i2s_port;
- __u8 data[];
} __packed;
struct audio_apbridgea_set_config_request {
diff --git a/drivers/staging/greybus/audio_manager.c b/drivers/staging/greybus/audio_manager.c
index 9a3f7c034ab49..fa43d35bbcece 100644
--- a/drivers/staging/greybus/audio_manager.c
+++ b/drivers/staging/greybus/audio_manager.c
@@ -44,14 +44,14 @@ int gb_audio_manager_add(struct gb_audio_manager_module_descriptor *desc)
int id;
int err;
- id = ida_simple_get(&module_id, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&module_id, GFP_KERNEL);
if (id < 0)
return id;
err = gb_audio_manager_module_create(&module, manager_kset,
id, desc);
if (err) {
- ida_simple_remove(&module_id, id);
+ ida_free(&module_id, id);
return err;
}
@@ -78,7 +78,7 @@ int gb_audio_manager_remove(int id)
list_del(&module->list);
kobject_put(&module->kobj);
up_write(&modules_rwsem);
- ida_simple_remove(&module_id, id);
+ ida_free(&module_id, id);
return 0;
}
EXPORT_SYMBOL_GPL(gb_audio_manager_remove);
@@ -92,7 +92,7 @@ void gb_audio_manager_remove_all(void)
list_for_each_entry_safe(module, next, &modules_list, list) {
list_del(&module->list);
- ida_simple_remove(&module_id, module->id);
+ ida_free(&module_id, module->id);
kobject_put(&module->kobj);
}
diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c
index 08e6a807c1327..5dc4721105d44 100644
--- a/drivers/staging/greybus/audio_topology.c
+++ b/drivers/staging/greybus/audio_topology.c
@@ -761,7 +761,6 @@ static int gbcodec_enum_dapm_ctl_put(struct snd_kcontrol *kcontrol,
{
int ret, wi, ctl_id;
unsigned int val, mux, change;
- unsigned int mask;
struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
struct snd_soc_dapm_widget *widget = wlist->widgets[0];
struct gb_audio_ctl_elem_value gbvalue;
@@ -802,7 +801,6 @@ static int gbcodec_enum_dapm_ctl_put(struct snd_kcontrol *kcontrol,
mux = ucontrol->value.enumerated.item[0];
val = mux << e->shift_l;
- mask = e->mask << e->shift_l;
if (le32_to_cpu(gbvalue.value.enumerated_item[0]) !=
ucontrol->value.enumerated.item[0]) {
@@ -815,7 +813,6 @@ static int gbcodec_enum_dapm_ctl_put(struct snd_kcontrol *kcontrol,
if (ucontrol->value.enumerated.item[1] > e->items - 1)
return -EINVAL;
val |= ucontrol->value.enumerated.item[1] << e->shift_r;
- mask |= e->mask << e->shift_r;
if (le32_to_cpu(gbvalue.value.enumerated_item[1]) !=
ucontrol->value.enumerated.item[1]) {
change = 1;
diff --git a/drivers/staging/greybus/authentication.c b/drivers/staging/greybus/authentication.c
index b67315641d18e..d53e58f92e817 100644
--- a/drivers/staging/greybus/authentication.c
+++ b/drivers/staging/greybus/authentication.c
@@ -324,7 +324,7 @@ int gb_cap_connection_init(struct gb_connection *connection)
if (ret)
goto err_list_del;
- minor = ida_simple_get(&cap_minors_map, 0, NUM_MINORS, GFP_KERNEL);
+ minor = ida_alloc_max(&cap_minors_map, NUM_MINORS - 1, GFP_KERNEL);
if (minor < 0) {
ret = minor;
goto err_connection_disable;
@@ -351,7 +351,7 @@ int gb_cap_connection_init(struct gb_connection *connection)
err_del_cdev:
cdev_del(&cap->cdev);
err_remove_ida:
- ida_simple_remove(&cap_minors_map, minor);
+ ida_free(&cap_minors_map, minor);
err_connection_disable:
gb_connection_disable(connection);
err_list_del:
@@ -375,7 +375,7 @@ void gb_cap_connection_exit(struct gb_connection *connection)
device_destroy(&cap_class, cap->dev_num);
cdev_del(&cap->cdev);
- ida_simple_remove(&cap_minors_map, MINOR(cap->dev_num));
+ ida_free(&cap_minors_map, MINOR(cap->dev_num));
/*
* Disallow any new ioctl operations on the char device and wait for
diff --git a/drivers/staging/greybus/bootrom.c b/drivers/staging/greybus/bootrom.c
index 79581457c4afb..c0d338db6b525 100644
--- a/drivers/staging/greybus/bootrom.c
+++ b/drivers/staging/greybus/bootrom.c
@@ -243,10 +243,10 @@ static int gb_bootrom_get_firmware(struct gb_operation *op)
struct gb_bootrom *bootrom = gb_connection_get_data(op->connection);
const struct firmware *fw;
struct gb_bootrom_get_firmware_request *firmware_request;
- struct gb_bootrom_get_firmware_response *firmware_response;
struct device *dev = &op->connection->bundle->dev;
unsigned int offset, size;
enum next_request_type next_request;
+ u8 *firmware_response;
int ret = 0;
/* Disable timeouts */
@@ -280,15 +280,15 @@ static int gb_bootrom_get_firmware(struct gb_operation *op)
goto unlock;
}
- if (!gb_operation_response_alloc(op, sizeof(*firmware_response) + size,
- GFP_KERNEL)) {
+ /* gb_bootrom_get_firmware_response contains only a byte array */
+ if (!gb_operation_response_alloc(op, size, GFP_KERNEL)) {
dev_err(dev, "%s: error allocating response\n", __func__);
ret = -ENOMEM;
goto unlock;
}
firmware_response = op->response->payload;
- memcpy(firmware_response->data, fw->data + offset, size);
+ memcpy(firmware_response, fw->data + offset, size);
dev_dbg(dev, "responding with firmware (offs = %u, size = %u)\n",
offset, size);
diff --git a/drivers/staging/greybus/fw-download.c b/drivers/staging/greybus/fw-download.c
index 543692c567f92..9a09bd3af79ba 100644
--- a/drivers/staging/greybus/fw-download.c
+++ b/drivers/staging/greybus/fw-download.c
@@ -63,8 +63,7 @@ static void fw_req_release(struct kref *kref)
* just hope that it never happens.
*/
if (!fw_req->timedout)
- ida_simple_remove(&fw_req->fw_download->id_map,
- fw_req->firmware_id);
+ ida_free(&fw_req->fw_download->id_map, fw_req->firmware_id);
kfree(fw_req);
}
@@ -171,7 +170,7 @@ static struct fw_request *find_firmware(struct fw_download *fw_download,
return ERR_PTR(-ENOMEM);
/* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */
- ret = ida_simple_get(&fw_download->id_map, 1, 256, GFP_KERNEL);
+ ret = ida_alloc_range(&fw_download->id_map, 1, 255, GFP_KERNEL);
if (ret < 0) {
dev_err(fw_download->parent,
"failed to allocate firmware id (%d)\n", ret);
@@ -212,7 +211,7 @@ static struct fw_request *find_firmware(struct fw_download *fw_download,
return fw_req;
err_free_id:
- ida_simple_remove(&fw_download->id_map, fw_req->firmware_id);
+ ida_free(&fw_download->id_map, fw_req->firmware_id);
err_free_req:
kfree(fw_req);
@@ -271,11 +270,11 @@ static int fw_download_fetch_firmware(struct gb_operation *op)
struct gb_connection *connection = op->connection;
struct fw_download *fw_download = gb_connection_get_data(connection);
struct gb_fw_download_fetch_firmware_request *request;
- struct gb_fw_download_fetch_firmware_response *response;
struct fw_request *fw_req;
const struct firmware *fw;
unsigned int offset, size;
u8 firmware_id;
+ u8 *response;
int ret = 0;
if (op->request->payload_size != sizeof(*request)) {
@@ -325,8 +324,8 @@ static int fw_download_fetch_firmware(struct gb_operation *op)
goto put_fw;
}
- if (!gb_operation_response_alloc(op, sizeof(*response) + size,
- GFP_KERNEL)) {
+ /* gb_fw_download_fetch_firmware_response contains only a byte array */
+ if (!gb_operation_response_alloc(op, size, GFP_KERNEL)) {
dev_err(fw_download->parent,
"error allocating fetch firmware response\n");
ret = -ENOMEM;
@@ -334,7 +333,7 @@ static int fw_download_fetch_firmware(struct gb_operation *op)
}
response = op->response->payload;
- memcpy(response->data, fw->data + offset, size);
+ memcpy(response, fw->data + offset, size);
dev_dbg(fw_download->parent,
"responding with firmware (offs = %u, size = %u)\n", offset,
diff --git a/drivers/staging/greybus/fw-management.c b/drivers/staging/greybus/fw-management.c
index 93137a3c4907c..3054f084d777b 100644
--- a/drivers/staging/greybus/fw-management.c
+++ b/drivers/staging/greybus/fw-management.c
@@ -165,7 +165,7 @@ static int fw_mgmt_load_and_validate_operation(struct fw_mgmt *fw_mgmt,
}
/* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */
- ret = ida_simple_get(&fw_mgmt->id_map, 1, 256, GFP_KERNEL);
+ ret = ida_alloc_range(&fw_mgmt->id_map, 1, 255, GFP_KERNEL);
if (ret < 0) {
dev_err(fw_mgmt->parent, "failed to allocate request id (%d)\n",
ret);
@@ -180,8 +180,7 @@ static int fw_mgmt_load_and_validate_operation(struct fw_mgmt *fw_mgmt,
GB_FW_MGMT_TYPE_LOAD_AND_VALIDATE_FW, &request,
sizeof(request), NULL, 0);
if (ret) {
- ida_simple_remove(&fw_mgmt->id_map,
- fw_mgmt->intf_fw_request_id);
+ ida_free(&fw_mgmt->id_map, fw_mgmt->intf_fw_request_id);
fw_mgmt->intf_fw_request_id = 0;
dev_err(fw_mgmt->parent,
"load and validate firmware request failed (%d)\n",
@@ -220,7 +219,7 @@ static int fw_mgmt_interface_fw_loaded_operation(struct gb_operation *op)
return -ENODEV;
}
- ida_simple_remove(&fw_mgmt->id_map, fw_mgmt->intf_fw_request_id);
+ ida_free(&fw_mgmt->id_map, fw_mgmt->intf_fw_request_id);
fw_mgmt->intf_fw_request_id = 0;
fw_mgmt->intf_fw_status = request->status;
fw_mgmt->intf_fw_major = le16_to_cpu(request->major);
@@ -316,7 +315,7 @@ static int fw_mgmt_backend_fw_update_operation(struct fw_mgmt *fw_mgmt,
}
/* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */
- ret = ida_simple_get(&fw_mgmt->id_map, 1, 256, GFP_KERNEL);
+ ret = ida_alloc_range(&fw_mgmt->id_map, 1, 255, GFP_KERNEL);
if (ret < 0) {
dev_err(fw_mgmt->parent, "failed to allocate request id (%d)\n",
ret);
@@ -330,8 +329,7 @@ static int fw_mgmt_backend_fw_update_operation(struct fw_mgmt *fw_mgmt,
GB_FW_MGMT_TYPE_BACKEND_FW_UPDATE, &request,
sizeof(request), NULL, 0);
if (ret) {
- ida_simple_remove(&fw_mgmt->id_map,
- fw_mgmt->backend_fw_request_id);
+ ida_free(&fw_mgmt->id_map, fw_mgmt->backend_fw_request_id);
fw_mgmt->backend_fw_request_id = 0;
dev_err(fw_mgmt->parent,
"backend %s firmware update request failed (%d)\n", tag,
@@ -369,7 +367,7 @@ static int fw_mgmt_backend_fw_updated_operation(struct gb_operation *op)
return -ENODEV;
}
- ida_simple_remove(&fw_mgmt->id_map, fw_mgmt->backend_fw_request_id);
+ ida_free(&fw_mgmt->id_map, fw_mgmt->backend_fw_request_id);
fw_mgmt->backend_fw_request_id = 0;
fw_mgmt->backend_fw_status = request->status;
@@ -617,7 +615,7 @@ int gb_fw_mgmt_connection_init(struct gb_connection *connection)
if (ret)
goto err_list_del;
- minor = ida_simple_get(&fw_mgmt_minors_map, 0, NUM_MINORS, GFP_KERNEL);
+ minor = ida_alloc_max(&fw_mgmt_minors_map, NUM_MINORS - 1, GFP_KERNEL);
if (minor < 0) {
ret = minor;
goto err_connection_disable;
@@ -645,7 +643,7 @@ int gb_fw_mgmt_connection_init(struct gb_connection *connection)
err_del_cdev:
cdev_del(&fw_mgmt->cdev);
err_remove_ida:
- ida_simple_remove(&fw_mgmt_minors_map, minor);
+ ida_free(&fw_mgmt_minors_map, minor);
err_connection_disable:
gb_connection_disable(connection);
err_list_del:
@@ -669,7 +667,7 @@ void gb_fw_mgmt_connection_exit(struct gb_connection *connection)
device_destroy(&fw_mgmt_class, fw_mgmt->dev_num);
cdev_del(&fw_mgmt->cdev);
- ida_simple_remove(&fw_mgmt_minors_map, MINOR(fw_mgmt->dev_num));
+ ida_free(&fw_mgmt_minors_map, MINOR(fw_mgmt->dev_num));
/*
* Disallow any new ioctl operations on the char device and wait for
diff --git a/drivers/staging/greybus/gbphy.c b/drivers/staging/greybus/gbphy.c
index 6a7d8cf2a1ebc..d827f03f52538 100644
--- a/drivers/staging/greybus/gbphy.c
+++ b/drivers/staging/greybus/gbphy.c
@@ -46,7 +46,7 @@ static void gbphy_dev_release(struct device *dev)
{
struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
- ida_simple_remove(&gbphy_id, gbphy_dev->id);
+ ida_free(&gbphy_id, gbphy_dev->id);
kfree(gbphy_dev);
}
@@ -182,7 +182,7 @@ static void gbphy_dev_remove(struct device *dev)
pm_runtime_dont_use_autosuspend(dev);
}
-static struct bus_type gbphy_bus_type = {
+static const struct bus_type gbphy_bus_type = {
.name = "gbphy",
.match = gbphy_dev_match,
.probe = gbphy_dev_probe,
@@ -225,13 +225,13 @@ static struct gbphy_device *gb_gbphy_create_dev(struct gb_bundle *bundle,
int retval;
int id;
- id = ida_simple_get(&gbphy_id, 1, 0, GFP_KERNEL);
+ id = ida_alloc_min(&gbphy_id, 1, GFP_KERNEL);
if (id < 0)
return ERR_PTR(id);
gbphy_dev = kzalloc(sizeof(*gbphy_dev), GFP_KERNEL);
if (!gbphy_dev) {
- ida_simple_remove(&gbphy_id, id);
+ ida_free(&gbphy_id, id);
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/staging/greybus/greybus_authentication.h b/drivers/staging/greybus/greybus_authentication.h
index 48b4a9794d3c8..ee88f880cfe3b 100644
--- a/drivers/staging/greybus/greybus_authentication.h
+++ b/drivers/staging/greybus/greybus_authentication.h
@@ -44,7 +44,7 @@
/* IOCTL support */
struct cap_ioc_get_endpoint_uid {
__u8 uid[8];
-} __attribute__ ((__packed__));
+} __packed;
struct cap_ioc_get_ims_certificate {
__u32 certificate_class;
@@ -53,7 +53,7 @@ struct cap_ioc_get_ims_certificate {
__u8 result_code;
__u32 cert_size;
__u8 certificate[CAP_CERTIFICATE_MAX_SIZE];
-} __attribute__ ((__packed__));
+} __packed;
struct cap_ioc_authenticate {
__u32 auth_type;
@@ -64,7 +64,7 @@ struct cap_ioc_authenticate {
__u8 response[64];
__u32 signature_size;
__u8 signature[CAP_SIGNATURE_MAX_SIZE];
-} __attribute__ ((__packed__));
+} __packed;
#define CAP_IOCTL_BASE 'C'
#define CAP_IOC_GET_ENDPOINT_UID _IOR(CAP_IOCTL_BASE, 0, struct cap_ioc_get_endpoint_uid)
diff --git a/drivers/staging/greybus/greybus_firmware.h b/drivers/staging/greybus/greybus_firmware.h
index f68fd5e253217..b6042a82ada47 100644
--- a/drivers/staging/greybus/greybus_firmware.h
+++ b/drivers/staging/greybus/greybus_firmware.h
@@ -41,14 +41,14 @@ struct fw_mgmt_ioc_get_intf_version {
__u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
__u16 major;
__u16 minor;
-} __attribute__ ((__packed__));
+} __packed;
struct fw_mgmt_ioc_get_backend_version {
__u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
__u16 major;
__u16 minor;
__u8 status;
-} __attribute__ ((__packed__));
+} __packed;
struct fw_mgmt_ioc_intf_load_and_validate {
__u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
@@ -56,12 +56,12 @@ struct fw_mgmt_ioc_intf_load_and_validate {
__u8 status;
__u16 major;
__u16 minor;
-} __attribute__ ((__packed__));
+} __packed;
struct fw_mgmt_ioc_backend_fw_update {
__u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
__u8 status;
-} __attribute__ ((__packed__));
+} __packed;
#define FW_MGMT_IOCTL_BASE 'F'
#define FW_MGMT_IOC_GET_INTF_FW _IOR(FW_MGMT_IOCTL_BASE, 0, struct fw_mgmt_ioc_get_intf_version)
diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
index 87d36948c6106..a5c2fe963866d 100644
--- a/drivers/staging/greybus/light.c
+++ b/drivers/staging/greybus/light.c
@@ -29,13 +29,9 @@ struct gb_channel {
struct attribute_group *attr_group;
const struct attribute_group **attr_groups;
struct led_classdev *led;
-#if IS_REACHABLE(CONFIG_LEDS_CLASS_FLASH)
struct led_classdev_flash fled;
struct led_flash_setting intensity_uA;
struct led_flash_setting timeout_us;
-#else
- struct led_classdev cled;
-#endif
struct gb_light *light;
bool is_registered;
bool releasing;
@@ -84,7 +80,6 @@ static bool is_channel_flash(struct gb_channel *channel)
| GB_CHANNEL_MODE_INDICATOR));
}
-#if IS_REACHABLE(CONFIG_LEDS_CLASS_FLASH)
static struct gb_channel *get_channel_from_cdev(struct led_classdev *cdev)
{
struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(cdev);
@@ -100,15 +95,15 @@ static struct led_classdev *get_channel_cdev(struct gb_channel *channel)
static struct gb_channel *get_channel_from_mode(struct gb_light *light,
u32 mode)
{
- struct gb_channel *channel = NULL;
+ struct gb_channel *channel;
int i;
for (i = 0; i < light->channels_count; i++) {
channel = &light->channels[i];
- if (channel && channel->mode == mode)
- break;
+ if (channel->mode == mode)
+ return channel;
}
- return channel;
+ return NULL;
}
static int __gb_lights_flash_intensity_set(struct gb_channel *channel,
@@ -153,22 +148,6 @@ static int __gb_lights_flash_brightness_set(struct gb_channel *channel)
return __gb_lights_flash_intensity_set(channel, intensity);
}
-#else
-static struct gb_channel *get_channel_from_cdev(struct led_classdev *cdev)
-{
- return container_of(cdev, struct gb_channel, cled);
-}
-
-static struct led_classdev *get_channel_cdev(struct gb_channel *channel)
-{
- return &channel->cled;
-}
-
-static int __gb_lights_flash_brightness_set(struct gb_channel *channel)
-{
- return 0;
-}
-#endif
static int gb_lights_color_set(struct gb_channel *channel, u32 color);
static int gb_lights_fade_set(struct gb_channel *channel);
diff --git a/drivers/staging/greybus/loopback.c b/drivers/staging/greybus/loopback.c
index d7b39f3bb6525..bb33379b5297e 100644
--- a/drivers/staging/greybus/loopback.c
+++ b/drivers/staging/greybus/loopback.c
@@ -1028,7 +1028,7 @@ static int gb_loopback_probe(struct gb_bundle *bundle,
gb->file = debugfs_create_file(name, S_IFREG | 0444, gb_dev.root, gb,
&gb_loopback_dbgfs_latency_fops);
- gb->id = ida_simple_get(&loopback_ida, 0, 0, GFP_KERNEL);
+ gb->id = ida_alloc(&loopback_ida, GFP_KERNEL);
if (gb->id < 0) {
retval = gb->id;
goto out_debugfs_remove;
@@ -1079,7 +1079,7 @@ out_conn:
out_connection_disable:
gb_connection_disable(connection);
out_ida_remove:
- ida_simple_remove(&loopback_ida, gb->id);
+ ida_free(&loopback_ida, gb->id);
out_debugfs_remove:
debugfs_remove(gb->file);
out_connection_destroy:
@@ -1121,7 +1121,7 @@ static void gb_loopback_disconnect(struct gb_bundle *bundle)
spin_unlock_irqrestore(&gb_dev.lock, flags);
device_unregister(gb->dev);
- ida_simple_remove(&loopback_ida, gb->id);
+ ida_free(&loopback_ida, gb->id);
gb_connection_destroy(gb->connection);
kfree(gb);
diff --git a/drivers/staging/greybus/raw.c b/drivers/staging/greybus/raw.c
index b9c6eff7cdc11..836d35e5fa859 100644
--- a/drivers/staging/greybus/raw.c
+++ b/drivers/staging/greybus/raw.c
@@ -181,7 +181,7 @@ static int gb_raw_probe(struct gb_bundle *bundle,
raw->connection = connection;
greybus_set_drvdata(bundle, raw);
- minor = ida_simple_get(&minors, 0, 0, GFP_KERNEL);
+ minor = ida_alloc(&minors, GFP_KERNEL);
if (minor < 0) {
retval = minor;
goto error_connection_destroy;
@@ -214,7 +214,7 @@ error_connection_disable:
gb_connection_disable(connection);
error_remove_ida:
- ida_simple_remove(&minors, minor);
+ ida_free(&minors, minor);
error_connection_destroy:
gb_connection_destroy(connection);
@@ -235,7 +235,7 @@ static void gb_raw_disconnect(struct gb_bundle *bundle)
device_destroy(&raw_class, raw->dev);
cdev_del(&raw->cdev);
gb_connection_disable(connection);
- ida_simple_remove(&minors, MINOR(raw->dev));
+ ida_free(&minors, MINOR(raw->dev));
gb_connection_destroy(connection);
mutex_lock(&raw->list_lock);
diff --git a/drivers/staging/greybus/vibrator.c b/drivers/staging/greybus/vibrator.c
index 227e18d92a958..89bef80455496 100644
--- a/drivers/staging/greybus/vibrator.c
+++ b/drivers/staging/greybus/vibrator.c
@@ -153,7 +153,7 @@ static int gb_vibrator_probe(struct gb_bundle *bundle,
* there is a "real" device somewhere in the kernel for this, but I
* can't find it at the moment...
*/
- vib->minor = ida_simple_get(&minors, 0, 0, GFP_KERNEL);
+ vib->minor = ida_alloc(&minors, GFP_KERNEL);
if (vib->minor < 0) {
retval = vib->minor;
goto err_connection_disable;
@@ -173,7 +173,7 @@ static int gb_vibrator_probe(struct gb_bundle *bundle,
return 0;
err_ida_remove:
- ida_simple_remove(&minors, vib->minor);
+ ida_free(&minors, vib->minor);
err_connection_disable:
gb_connection_disable(connection);
err_connection_destroy:
@@ -197,7 +197,7 @@ static void gb_vibrator_disconnect(struct gb_bundle *bundle)
turn_off(vib);
device_unregister(vib->dev);
- ida_simple_remove(&minors, vib->minor);
+ ida_free(&minors, vib->minor);
gb_connection_disable(vib->connection);
gb_connection_destroy(vib->connection);
kfree(vib);
diff --git a/drivers/staging/media/atomisp/TODO b/drivers/staging/media/atomisp/TODO
index d99cc898cd991..bfef99997a1d2 100644
--- a/drivers/staging/media/atomisp/TODO
+++ b/drivers/staging/media/atomisp/TODO
@@ -29,16 +29,6 @@ TODO
1. Items which MUST be fixed before the driver can be moved out of staging:
-* The atomisp ov2680 and ov5693 sensor drivers bind to the same hw-ids as
- the standard ov2680 and ov5693 drivers under drivers/media/i2c, which
- conflicts. Drop the atomisp private ov2680 and ov5693 drivers:
- * Port various ov2680 improvements from atomisp_ov2680.c to regular ov2680.c
- and switch to regular ov2680 driver
- * Make atomisp work with the regular ov5693 driver and drop atomisp_ov5693
-
-* Fix atomisp causing the whole machine to hang in its probe() error-exit
- path taken in the firmware missing case
-
* Remove/disable private IOCTLs
* Remove/disable custom v4l2-ctrls
diff --git a/drivers/staging/media/atomisp/i2c/gc2235.h b/drivers/staging/media/atomisp/i2c/gc2235.h
index 55ea422291ba8..ade28950db732 100644
--- a/drivers/staging/media/atomisp/i2c/gc2235.h
+++ b/drivers/staging/media/atomisp/i2c/gc2235.h
@@ -77,9 +77,6 @@
/*
* GC2235 System control registers
*/
-/*
- * GC2235 System control registers
- */
#define GC2235_SENSOR_ID_H 0xF0
#define GC2235_SENSOR_ID_L 0xF1
#define GC2235_RESET_RELATED 0xFE
@@ -167,7 +164,7 @@ enum gc2235_tok_type {
GC2235_TOK_MASK = 0xfff0
};
-/**
+/*
* struct gc2235_reg - MI sensor register format
* @type: type of the register
* @reg: 8-bit offset to register
diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
index d0db2efe00452..8593ba90605f6 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_cmd.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
@@ -3721,22 +3721,34 @@ apply_min_padding:
*padding_h = max_t(u32, *padding_h, min_pad_h);
}
-static int atomisp_set_crop(struct atomisp_device *isp,
- const struct v4l2_mbus_framefmt *format,
- struct v4l2_subdev_state *sd_state,
- int which)
+static int atomisp_set_crop_and_fmt(struct atomisp_device *isp,
+ struct v4l2_mbus_framefmt *ffmt,
+ int which)
{
struct atomisp_input_subdev *input = &isp->inputs[isp->asd.input_curr];
struct v4l2_subdev_selection sel = {
.which = which,
.target = V4L2_SEL_TGT_CROP,
- .r.width = format->width,
- .r.height = format->height,
+ .r.width = ffmt->width,
+ .r.height = ffmt->height,
};
- int ret;
+ struct v4l2_subdev_format format = {
+ .which = which,
+ .format = *ffmt,
+ };
+ struct v4l2_subdev_state *sd_state;
+ int ret = 0;
+
+ if (!input->camera)
+ return -EINVAL;
+
+ sd_state = (which == V4L2_SUBDEV_FORMAT_TRY) ? input->try_sd_state :
+ input->camera->active_state;
+ if (sd_state)
+ v4l2_subdev_lock_state(sd_state);
if (!input->crop_support)
- return 0;
+ goto set_fmt;
/* Cropping is done before binning, when binning double the crop rect */
if (input->binning_support && sel.r.width <= (input->native_rect.width / 2) &&
@@ -3757,6 +3769,14 @@ static int atomisp_set_crop(struct atomisp_device *isp,
dev_err(isp->dev, "Error setting crop to %ux%u @%ux%u: %d\n",
sel.r.width, sel.r.height, sel.r.left, sel.r.top, ret);
+set_fmt:
+ if (ret == 0)
+ ret = v4l2_subdev_call(input->camera, pad, set_fmt, sd_state, &format);
+
+ if (sd_state)
+ v4l2_subdev_unlock_state(sd_state);
+
+ *ffmt = format.format;
return ret;
}
@@ -3767,16 +3787,10 @@ int atomisp_try_fmt(struct atomisp_device *isp, struct v4l2_pix_format *f,
{
const struct atomisp_format_bridge *fmt, *snr_fmt;
struct atomisp_sub_device *asd = &isp->asd;
- struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
- struct v4l2_subdev_format format = {
- .which = V4L2_SUBDEV_FORMAT_TRY,
- };
+ struct v4l2_mbus_framefmt ffmt = { };
u32 padding_w, padding_h;
int ret;
- if (!input->camera)
- return -EINVAL;
-
fmt = atomisp_get_format_bridge(f->pixelformat);
/* Currently, raw formats are broken!!! */
if (!fmt || fmt->sh_fmt == IA_CSS_FRAME_FORMAT_RAW) {
@@ -3797,38 +3811,27 @@ int atomisp_try_fmt(struct atomisp_device *isp, struct v4l2_pix_format *f,
* the set_fmt call, like atomisp_set_fmt_to_snr() does.
*/
atomisp_get_padding(isp, f->width, f->height, &padding_w, &padding_h);
- v4l2_fill_mbus_format(&format.format, f, fmt->mbus_code);
- format.format.width += padding_w;
- format.format.height += padding_h;
-
- dev_dbg(isp->dev, "try_mbus_fmt: asking for %ux%u\n",
- format.format.width, format.format.height);
-
- v4l2_subdev_lock_state(input->try_sd_state);
+ v4l2_fill_mbus_format(&ffmt, f, fmt->mbus_code);
+ ffmt.width += padding_w;
+ ffmt.height += padding_h;
- ret = atomisp_set_crop(isp, &format.format, input->try_sd_state,
- V4L2_SUBDEV_FORMAT_TRY);
- if (ret == 0)
- ret = v4l2_subdev_call(input->camera, pad, set_fmt,
- input->try_sd_state, &format);
-
- v4l2_subdev_unlock_state(input->try_sd_state);
+ dev_dbg(isp->dev, "try_mbus_fmt: try %ux%u\n", ffmt.width, ffmt.height);
+ ret = atomisp_set_crop_and_fmt(isp, &ffmt, V4L2_SUBDEV_FORMAT_TRY);
if (ret)
return ret;
- dev_dbg(isp->dev, "try_mbus_fmt: got %ux%u\n",
- format.format.width, format.format.height);
+ dev_dbg(isp->dev, "try_mbus_fmt: got %ux%u\n", ffmt.width, ffmt.height);
- snr_fmt = atomisp_get_format_bridge_from_mbus(format.format.code);
+ snr_fmt = atomisp_get_format_bridge_from_mbus(ffmt.code);
if (!snr_fmt) {
dev_err(isp->dev, "unknown sensor format 0x%8.8x\n",
- format.format.code);
+ ffmt.code);
return -EINVAL;
}
- f->width = format.format.width - padding_w;
- f->height = format.format.height - padding_h;
+ f->width = ffmt.width - padding_w;
+ f->height = ffmt.height - padding_h;
/*
* If the format is jpeg or custom RAW, then the width and height will
@@ -4236,28 +4239,22 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_p
struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
struct atomisp_sub_device *asd = pipe->asd;
struct atomisp_device *isp = asd->isp;
- struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
const struct atomisp_format_bridge *format;
- struct v4l2_subdev_state *act_sd_state;
- struct v4l2_subdev_format vformat = {
- .which = V4L2_SUBDEV_FORMAT_TRY,
- };
- struct v4l2_mbus_framefmt *ffmt = &vformat.format;
- struct v4l2_mbus_framefmt *req_ffmt;
+ struct v4l2_mbus_framefmt req_ffmt, ffmt = { };
struct atomisp_input_stream_info *stream_info =
- (struct atomisp_input_stream_info *)ffmt->reserved;
+ (struct atomisp_input_stream_info *)&ffmt.reserved;
int ret;
format = atomisp_get_format_bridge(f->pixelformat);
if (!format)
return -EINVAL;
- v4l2_fill_mbus_format(ffmt, f, format->mbus_code);
- ffmt->height += asd->sink_pad_padding_h + dvs_env_h;
- ffmt->width += asd->sink_pad_padding_w + dvs_env_w;
+ v4l2_fill_mbus_format(&ffmt, f, format->mbus_code);
+ ffmt.height += asd->sink_pad_padding_h + dvs_env_h;
+ ffmt.width += asd->sink_pad_padding_w + dvs_env_w;
dev_dbg(isp->dev, "s_mbus_fmt: ask %ux%u (padding %ux%u, dvs %ux%u)\n",
- ffmt->width, ffmt->height, asd->sink_pad_padding_w, asd->sink_pad_padding_h,
+ ffmt.width, ffmt.height, asd->sink_pad_padding_w, asd->sink_pad_padding_h,
dvs_env_w, dvs_env_h);
__atomisp_init_stream_info(ATOMISP_INPUT_STREAM_GENERAL, stream_info);
@@ -4266,28 +4263,17 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_p
/* Disable dvs if resolution can't be supported by sensor */
if (asd->params.video_dis_en && asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
- v4l2_subdev_lock_state(input->try_sd_state);
-
- ret = atomisp_set_crop(isp, &vformat.format, input->try_sd_state,
- V4L2_SUBDEV_FORMAT_TRY);
- if (ret == 0) {
- vformat.which = V4L2_SUBDEV_FORMAT_TRY;
- ret = v4l2_subdev_call(input->camera, pad, set_fmt,
- input->try_sd_state, &vformat);
- }
-
- v4l2_subdev_unlock_state(input->try_sd_state);
-
+ ret = atomisp_set_crop_and_fmt(isp, &ffmt, V4L2_SUBDEV_FORMAT_TRY);
if (ret)
return ret;
dev_dbg(isp->dev, "video dis: sensor width: %d, height: %d\n",
- ffmt->width, ffmt->height);
+ ffmt.width, ffmt.height);
- if (ffmt->width < req_ffmt->width ||
- ffmt->height < req_ffmt->height) {
- req_ffmt->height -= dvs_env_h;
- req_ffmt->width -= dvs_env_w;
+ if (ffmt.width < req_ffmt.width ||
+ ffmt.height < req_ffmt.height) {
+ req_ffmt.height -= dvs_env_h;
+ req_ffmt.width -= dvs_env_w;
ffmt = req_ffmt;
dev_warn(isp->dev,
"can not enable video dis due to sensor limitation.");
@@ -4295,32 +4281,21 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_p
}
}
- act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera);
-
- ret = atomisp_set_crop(isp, &vformat.format, act_sd_state,
- V4L2_SUBDEV_FORMAT_ACTIVE);
- if (ret == 0) {
- vformat.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- ret = v4l2_subdev_call(input->camera, pad, set_fmt, act_sd_state, &vformat);
- }
-
- if (act_sd_state)
- v4l2_subdev_unlock_state(act_sd_state);
-
+ ret = atomisp_set_crop_and_fmt(isp, &ffmt, V4L2_SUBDEV_FORMAT_ACTIVE);
if (ret)
return ret;
__atomisp_update_stream_env(asd, ATOMISP_INPUT_STREAM_GENERAL, stream_info);
dev_dbg(isp->dev, "sensor width: %d, height: %d\n",
- ffmt->width, ffmt->height);
+ ffmt.width, ffmt.height);
- if (ffmt->width < ATOM_ISP_STEP_WIDTH ||
- ffmt->height < ATOM_ISP_STEP_HEIGHT)
+ if (ffmt.width < ATOM_ISP_STEP_WIDTH ||
+ ffmt.height < ATOM_ISP_STEP_HEIGHT)
return -EINVAL;
if (asd->params.video_dis_en && asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO &&
- (ffmt->width < req_ffmt->width || ffmt->height < req_ffmt->height)) {
+ (ffmt.width < req_ffmt.width || ffmt.height < req_ffmt.height)) {
dev_warn(isp->dev,
"can not enable video dis due to sensor limitation.");
asd->params.video_dis_en = false;
@@ -4328,9 +4303,9 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_p
atomisp_subdev_set_ffmt(&asd->subdev, NULL,
V4L2_SUBDEV_FORMAT_ACTIVE,
- ATOMISP_SUBDEV_PAD_SINK, ffmt);
+ ATOMISP_SUBDEV_PAD_SINK, &ffmt);
- return css_input_resolution_changed(asd, ffmt);
+ return css_input_resolution_changed(asd, &ffmt);
}
int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f)
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
index 02f06294bbfe0..6fe8b0b7467a7 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
@@ -757,7 +757,7 @@ int atomisp_css_init(struct atomisp_device *isp)
return ret;
/* Init ISP */
- err = ia_css_init(isp->dev, &isp->css_env.isp_css_env, NULL,
+ err = ia_css_init(isp->dev, &isp->css_env.isp_css_env,
(uint32_t)mmu_base_addr, IA_CSS_IRQ_TYPE_PULSE);
if (err) {
dev_err(isp->dev, "css init failed --- bad firmware?\n");
diff --git a/drivers/staging/media/atomisp/pci/atomisp_drvfs.c b/drivers/staging/media/atomisp/pci/atomisp_drvfs.c
index 1df534bf54d32..ba7dd569a55a1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_drvfs.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_drvfs.c
@@ -27,31 +27,17 @@
#include "hmm/hmm.h"
#include "ia_css_debug.h"
+#define OPTION_BIN_LIST BIT(0)
+#define OPTION_BIN_RUN BIT(1)
+#define OPTION_VALID (OPTION_BIN_LIST | OPTION_BIN_RUN)
+
/*
- * _iunit_debug:
- * dbglvl: iunit css driver trace level
* dbgopt: iunit debug option:
* bit 0: binary list
* bit 1: running binary
* bit 2: memory statistic
-*/
-struct _iunit_debug {
- struct device_driver *drv;
- struct atomisp_device *isp;
- unsigned int dbglvl;
- unsigned int dbgfun;
- unsigned int dbgopt;
-};
-
-#define OPTION_BIN_LIST BIT(0)
-#define OPTION_BIN_RUN BIT(1)
-#define OPTION_VALID (OPTION_BIN_LIST \
- | OPTION_BIN_RUN)
-
-static struct _iunit_debug iunit_debug = {
- .dbglvl = 0,
- .dbgopt = OPTION_BIN_LIST,
-};
+ */
+static unsigned int dbgopt = OPTION_BIN_LIST;
static inline int iunit_dump_dbgopt(struct atomisp_device *isp,
unsigned int opt)
@@ -88,34 +74,44 @@ opt_err:
return ret;
}
-static ssize_t iunit_dbglvl_show(struct device_driver *drv, char *buf)
+static ssize_t dbglvl_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- iunit_debug.dbglvl = dbg_level;
- return sysfs_emit(buf, "dtrace level:%u\n", iunit_debug.dbglvl);
+ unsigned int dbglvl = ia_css_debug_get_dtrace_level();
+
+ return sysfs_emit(buf, "dtrace level:%u\n", dbglvl);
}
-static ssize_t iunit_dbglvl_store(struct device_driver *drv, const char *buf,
- size_t size)
+static ssize_t dbglvl_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
{
- if (kstrtouint(buf, 10, &iunit_debug.dbglvl)
- || iunit_debug.dbglvl < 1
- || iunit_debug.dbglvl > 9) {
+ unsigned int dbglvl;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &dbglvl);
+ if (ret)
+ return ret;
+
+ if (dbglvl < 1 || dbglvl > 9)
return -ERANGE;
- }
- ia_css_debug_set_dtrace_level(iunit_debug.dbglvl);
+ ia_css_debug_set_dtrace_level(dbglvl);
return size;
}
+static DEVICE_ATTR_RW(dbglvl);
-static ssize_t iunit_dbgfun_show(struct device_driver *drv, char *buf)
+static ssize_t dbgfun_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- iunit_debug.dbgfun = atomisp_get_css_dbgfunc();
- return sysfs_emit(buf, "dbgfun opt:%u\n", iunit_debug.dbgfun);
+ unsigned int dbgfun = atomisp_get_css_dbgfunc();
+
+ return sysfs_emit(buf, "dbgfun opt:%u\n", dbgfun);
}
-static ssize_t iunit_dbgfun_store(struct device_driver *drv, const char *buf,
- size_t size)
+static ssize_t dbgfun_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
{
+ struct atomisp_device *isp = dev_get_drvdata(dev);
unsigned int opt;
int ret;
@@ -123,23 +119,20 @@ static ssize_t iunit_dbgfun_store(struct device_driver *drv, const char *buf,
if (ret)
return ret;
- ret = atomisp_set_css_dbgfunc(iunit_debug.isp, opt);
- if (ret)
- return ret;
-
- iunit_debug.dbgfun = opt;
-
- return size;
+ return atomisp_set_css_dbgfunc(isp, opt);
}
+static DEVICE_ATTR_RW(dbgfun);
-static ssize_t iunit_dbgopt_show(struct device_driver *drv, char *buf)
+static ssize_t dbgopt_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- return sysfs_emit(buf, "option:0x%x\n", iunit_debug.dbgopt);
+ return sysfs_emit(buf, "option:0x%x\n", dbgopt);
}
-static ssize_t iunit_dbgopt_store(struct device_driver *drv, const char *buf,
- size_t size)
+static ssize_t dbgopt_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
{
+ struct atomisp_device *isp = dev_get_drvdata(dev);
unsigned int opt;
int ret;
@@ -147,56 +140,27 @@ static ssize_t iunit_dbgopt_store(struct device_driver *drv, const char *buf,
if (ret)
return ret;
- iunit_debug.dbgopt = opt;
- ret = iunit_dump_dbgopt(iunit_debug.isp, iunit_debug.dbgopt);
+ dbgopt = opt;
+ ret = iunit_dump_dbgopt(isp, dbgopt);
if (ret)
return ret;
return size;
}
+static DEVICE_ATTR_RW(dbgopt);
-static const struct driver_attribute iunit_drvfs_attrs[] = {
- __ATTR(dbglvl, 0644, iunit_dbglvl_show, iunit_dbglvl_store),
- __ATTR(dbgfun, 0644, iunit_dbgfun_show, iunit_dbgfun_store),
- __ATTR(dbgopt, 0644, iunit_dbgopt_show, iunit_dbgopt_store),
+static struct attribute *dbg_attrs[] = {
+ &dev_attr_dbglvl.attr,
+ &dev_attr_dbgfun.attr,
+ &dev_attr_dbgopt.attr,
+ NULL
};
-static int iunit_drvfs_create_files(struct device_driver *drv)
-{
- int i, ret = 0;
-
- for (i = 0; i < ARRAY_SIZE(iunit_drvfs_attrs); i++)
- ret |= driver_create_file(drv, &iunit_drvfs_attrs[i]);
-
- return ret;
-}
-
-static void iunit_drvfs_remove_files(struct device_driver *drv)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(iunit_drvfs_attrs); i++)
- driver_remove_file(drv, &iunit_drvfs_attrs[i]);
-}
-
-int atomisp_drvfs_init(struct atomisp_device *isp)
-{
- struct device_driver *drv = isp->dev->driver;
- int ret;
-
- iunit_debug.isp = isp;
- iunit_debug.drv = drv;
-
- ret = iunit_drvfs_create_files(iunit_debug.drv);
- if (ret) {
- dev_err(isp->dev, "drvfs_create_files error: %d\n", ret);
- iunit_drvfs_remove_files(iunit_debug.drv);
- }
-
- return ret;
-}
+static const struct attribute_group dbg_attr_group = {
+ .attrs = dbg_attrs,
+};
-void atomisp_drvfs_exit(void)
-{
- iunit_drvfs_remove_files(iunit_debug.drv);
-}
+const struct attribute_group *dbg_attr_groups[] = {
+ &dbg_attr_group,
+ NULL
+};
diff --git a/drivers/staging/media/atomisp/pci/atomisp_drvfs.h b/drivers/staging/media/atomisp/pci/atomisp_drvfs.h
index 8f4cc722b881e..8495cc133c060 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_drvfs.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_drvfs.h
@@ -19,7 +19,8 @@
#ifndef __ATOMISP_DRVFS_H__
#define __ATOMISP_DRVFS_H__
-int atomisp_drvfs_init(struct atomisp_device *isp);
-void atomisp_drvfs_exit(void);
+#include <linux/sysfs.h>
+
+extern const struct attribute_group *dbg_attr_groups[];
#endif /* __ATOMISP_DRVFS_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_internal.h b/drivers/staging/media/atomisp/pci/atomisp_internal.h
index d5b077e602cae..bba9bc64d4474 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_internal.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_internal.h
@@ -192,6 +192,7 @@ struct atomisp_device {
struct dev_pm_domain pm_domain;
struct pm_qos_request pm_qos;
s32 max_isr_latency;
+ bool pm_only;
struct atomisp_mipi_csi2_device csi2_port[ATOMISP_CAMERA_NR_PORTS];
struct atomisp_tpg_device tpg;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
index 5b2d88c02d36a..bb8e5e883b508 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
@@ -666,14 +666,6 @@ static int atomisp_s_input(struct file *file, void *fh, unsigned int input)
return ret;
}
- /* select operating sensor */
- ret = v4l2_subdev_call(isp->inputs[input].camera, video, s_routing,
- 0, 0, 0);
- if (ret && (ret != -ENOIOCTLCMD)) {
- dev_err(isp->dev, "Failed to select sensor\n");
- return ret;
- }
-
if (!IS_ISP2401) {
motor = isp->inputs[input].motor;
} else {
diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
index 547e1444ad973..f736e54c7df35 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
@@ -55,10 +55,6 @@
/* G-Min addition: pull this in from intel_mid_pm.h */
#define CSTATE_EXIT_LATENCY_C1 1
-static uint skip_fwload;
-module_param(skip_fwload, uint, 0644);
-MODULE_PARM_DESC(skip_fwload, "Skip atomisp firmware load");
-
/* cross componnet debug message flag */
int dbg_level;
module_param(dbg_level, int, 0644);
@@ -552,7 +548,7 @@ static int atomisp_mrfld_power(struct atomisp_device *isp, bool enable)
dev_dbg(isp->dev, "IUNIT power-%s.\n", enable ? "on" : "off");
/* WA for P-Unit, if DVFS enabled, ISP timeout observed */
- if (IS_CHT && enable) {
+ if (IS_CHT && enable && !isp->pm_only) {
punit_ddr_dvfs_enable(false);
msleep(20);
}
@@ -562,7 +558,7 @@ static int atomisp_mrfld_power(struct atomisp_device *isp, bool enable)
val, MRFLD_ISPSSPM0_ISPSSC_MASK);
/* WA:Enable DVFS */
- if (IS_CHT && !enable)
+ if (IS_CHT && !enable && !isp->pm_only)
punit_ddr_dvfs_enable(true);
/*
@@ -591,9 +587,6 @@ static int atomisp_mrfld_power(struct atomisp_device *isp, bool enable)
usleep_range(100, 150);
} while (1);
- if (enable)
- msleep(10);
-
dev_err(isp->dev, "IUNIT power-%s timeout.\n", enable ? "on" : "off");
return -EBUSY;
}
@@ -605,11 +598,15 @@ int atomisp_power_off(struct device *dev)
int ret;
u32 reg;
- atomisp_css_uninit(isp);
+ if (isp->pm_only) {
+ pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, 0);
+ } else {
+ atomisp_css_uninit(isp);
- ret = atomisp_mrfld_pre_power_down(isp);
- if (ret)
- return ret;
+ ret = atomisp_mrfld_pre_power_down(isp);
+ if (ret)
+ return ret;
+ }
/*
* MRFLD IUNIT DPHY is located in an always-power-on island
@@ -638,6 +635,9 @@ int atomisp_power_on(struct device *dev)
pci_restore_state(to_pci_dev(dev));
cpu_latency_qos_update_request(&isp->pm_qos, isp->max_isr_latency);
+ if (isp->pm_only)
+ return 0;
+
/*restore register values for iUnit and iUnitPHY registers*/
if (isp->saved_regs.pcicmdsts)
atomisp_restore_iunit_reg(isp);
@@ -1161,9 +1161,6 @@ atomisp_load_firmware(struct atomisp_device *isp)
int rc;
char *fw_path = NULL;
- if (skip_fwload)
- return NULL;
-
if (firmware_name[0] != '\0') {
fw_path = firmware_name;
} else {
@@ -1199,46 +1196,39 @@ atomisp_load_firmware(struct atomisp_device *isp)
return fw;
}
-/*
- * Check for flags the driver was compiled with against the PCI
- * device. Always returns true on other than ISP 2400.
- */
-static bool is_valid_device(struct pci_dev *pdev, const struct pci_device_id *id)
+static void atomisp_pm_init(struct atomisp_device *isp)
{
- const char *name;
- const char *product;
-
- product = dmi_get_system_info(DMI_PRODUCT_NAME);
-
- switch (id->device & ATOMISP_PCI_DEVICE_SOC_MASK) {
- case ATOMISP_PCI_DEVICE_SOC_MRFLD:
- name = "Merrifield";
- break;
- case ATOMISP_PCI_DEVICE_SOC_BYT:
- name = "Baytrail";
- break;
- case ATOMISP_PCI_DEVICE_SOC_ANN:
- name = "Anniedale";
- break;
- case ATOMISP_PCI_DEVICE_SOC_CHT:
- name = "Cherrytrail";
- break;
- default:
- dev_err(&pdev->dev, "%s: unknown device ID %x04:%x04\n",
- product, id->vendor, id->device);
- return false;
- }
+ /*
+ * The atomisp does not use standard PCI power-management through the
+ * PCI config space. Instead this driver directly tells the P-Unit to
+ * disable the ISP over the IOSF. The standard PCI subsystem pm_ops will
+ * try to access the config space before (resume) / after (suspend) this
+ * driver has turned the ISP on / off, resulting in the following errors:
+ *
+ * "Unable to change power state from D0 to D3hot, device inaccessible"
+ * "Unable to change power state from D3cold to D0, device inaccessible"
+ *
+ * To avoid these errors override the pm_domain so that all the PCI
+ * subsys suspend / resume handling is skipped.
+ */
+ isp->pm_domain.ops.runtime_suspend = atomisp_power_off;
+ isp->pm_domain.ops.runtime_resume = atomisp_power_on;
+ isp->pm_domain.ops.suspend = atomisp_suspend;
+ isp->pm_domain.ops.resume = atomisp_resume;
- if (pdev->revision <= ATOMISP_PCI_REV_BYT_A0_MAX) {
- dev_err(&pdev->dev, "%s revision %d is not unsupported\n",
- name, pdev->revision);
- return false;
- }
+ cpu_latency_qos_add_request(&isp->pm_qos, PM_QOS_DEFAULT_VALUE);
+ dev_pm_domain_set(isp->dev, &isp->pm_domain);
- dev_info(&pdev->dev, "Detected %s version %d (ISP240%c) on %s\n",
- name, pdev->revision, IS_ISP2401 ? '1' : '0', product);
+ pm_runtime_allow(isp->dev);
+ pm_runtime_put_sync_suspend(isp->dev);
+}
- return true;
+static void atomisp_pm_uninit(struct atomisp_device *isp)
+{
+ pm_runtime_get_sync(isp->dev);
+ pm_runtime_forbid(isp->dev);
+ dev_pm_domain_set(isp->dev, NULL);
+ cpu_latency_qos_remove_request(&isp->pm_qos);
}
#define ATOM_ISP_PCI_BAR 0
@@ -1249,10 +1239,6 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
struct atomisp_device *isp;
unsigned int start;
int err, val;
- u32 irq;
-
- if (!is_valid_device(pdev, id))
- return -ENODEV;
/* Pointer to struct device. */
atomisp_dev = &pdev->dev;
@@ -1261,32 +1247,16 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
if (!pdata)
dev_warn(&pdev->dev, "no platform data available\n");
- err = pcim_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to enable CI ISP device (%d)\n", err);
- return err;
- }
-
start = pci_resource_start(pdev, ATOM_ISP_PCI_BAR);
dev_dbg(&pdev->dev, "start: 0x%x\n", start);
- err = pcim_iomap_regions(pdev, BIT(ATOM_ISP_PCI_BAR), pci_name(pdev));
- if (err) {
- dev_err(&pdev->dev, "Failed to I/O memory remapping (%d)\n", err);
- goto ioremap_fail;
- }
-
isp = devm_kzalloc(&pdev->dev, sizeof(*isp), GFP_KERNEL);
- if (!isp) {
- err = -ENOMEM;
- goto atomisp_dev_alloc_fail;
- }
+ if (!isp)
+ return -ENOMEM;
isp->dev = &pdev->dev;
- isp->base = pcim_iomap_table(pdev)[ATOM_ISP_PCI_BAR];
isp->saved_regs.ispmmadr = start;
-
- dev_dbg(&pdev->dev, "atomisp mmio base: %p\n", isp->base);
+ isp->asd.isp = isp;
mutex_init(&isp->mutex);
spin_lock_init(&isp->lock);
@@ -1389,8 +1359,12 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
break;
default:
dev_err(&pdev->dev, "un-supported IUNIT device\n");
- err = -ENODEV;
- goto atomisp_dev_alloc_fail;
+ return -ENODEV;
+ }
+
+ if (pdev->revision <= ATOMISP_PCI_REV_BYT_A0_MAX) {
+ dev_err(&pdev->dev, "revision %d is not unsupported\n", pdev->revision);
+ return -ENODEV;
}
dev_info(&pdev->dev, "ISP HPLL frequency base = %d MHz\n", isp->hpll_freq);
@@ -1400,29 +1374,43 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
/* Load isp firmware from user space */
isp->firmware = atomisp_load_firmware(isp);
if (!isp->firmware) {
- err = -ENOENT;
- dev_dbg(&pdev->dev, "Firmware load failed\n");
- goto load_fw_fail;
+ /* No firmware continue in pm-only mode for S0i3 support */
+ dev_info(&pdev->dev, "Continuing in power-management only mode\n");
+ isp->pm_only = true;
+ atomisp_pm_init(isp);
+ return 0;
}
err = sh_css_check_firmware_version(isp->dev, isp->firmware->data);
if (err) {
dev_dbg(&pdev->dev, "Firmware version check failed\n");
- goto fw_validation_fail;
+ goto error_release_firmware;
+ }
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable ISP PCI device (%d)\n", err);
+ goto error_release_firmware;
+ }
+
+ err = pcim_iomap_regions(pdev, BIT(ATOM_ISP_PCI_BAR), pci_name(pdev));
+ if (err) {
+ dev_err(&pdev->dev, "Failed to I/O memory remapping (%d)\n", err);
+ goto error_release_firmware;
}
+ isp->base = pcim_iomap_table(pdev)[ATOM_ISP_PCI_BAR];
+
pci_set_master(pdev);
err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
if (err < 0) {
dev_err(&pdev->dev, "Failed to enable msi (%d)\n", err);
- goto enable_msi_fail;
+ goto error_release_firmware;
}
atomisp_msi_irq_init(isp);
- cpu_latency_qos_add_request(&isp->pm_qos, PM_QOS_DEFAULT_VALUE);
-
/*
* for MRFLD, Software/firmware needs to write a 1 to bit 0 of
* the register at CSI_RECEIVER_SELECTION_REG to enable SH CSI
@@ -1459,13 +1447,13 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
err = atomisp_initialize_modules(isp);
if (err < 0) {
dev_err(&pdev->dev, "atomisp_initialize_modules (%d)\n", err);
- goto initialize_modules_fail;
+ goto error_irq_uninit;
}
err = atomisp_register_entities(isp);
if (err < 0) {
dev_err(&pdev->dev, "atomisp_register_entities failed (%d)\n", err);
- goto register_entities_fail;
+ goto error_uninitialize_modules;
}
INIT_WORK(&isp->assert_recovery_work, atomisp_assert_recovery_work);
@@ -1473,29 +1461,6 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
/* save the iunit context only once after all the values are init'ed. */
atomisp_save_iunit_reg(isp);
- /*
- * The atomisp does not use standard PCI power-management through the
- * PCI config space. Instead this driver directly tells the P-Unit to
- * disable the ISP over the IOSF. The standard PCI subsystem pm_ops will
- * try to access the config space before (resume) / after (suspend) this
- * driver has turned the ISP on / off, resulting in the following errors:
- *
- * "Unable to change power state from D0 to D3hot, device inaccessible"
- * "Unable to change power state from D3cold to D0, device inaccessible"
- *
- * To avoid these errors override the pm_domain so that all the PCI
- * subsys suspend / resume handling is skipped.
- */
- isp->pm_domain.ops.runtime_suspend = atomisp_power_off;
- isp->pm_domain.ops.runtime_resume = atomisp_power_on;
- isp->pm_domain.ops.suspend = atomisp_suspend;
- isp->pm_domain.ops.resume = atomisp_resume;
-
- dev_pm_domain_set(&pdev->dev, &isp->pm_domain);
-
- pm_runtime_put_noidle(&pdev->dev);
- pm_runtime_allow(&pdev->dev);
-
/* Init ISP memory management */
hmm_init();
@@ -1504,72 +1469,45 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
IRQF_SHARED, "isp_irq", isp);
if (err) {
dev_err(&pdev->dev, "Failed to request irq (%d)\n", err);
- goto request_irq_fail;
+ goto error_unregister_entities;
}
/* Load firmware into ISP memory */
err = atomisp_css_load_firmware(isp);
if (err) {
dev_err(&pdev->dev, "Failed to init css.\n");
- goto css_init_fail;
+ goto error_free_irq;
}
/* Clear FW image from memory */
release_firmware(isp->firmware);
isp->firmware = NULL;
isp->css_env.isp_css_fw.data = NULL;
+ atomisp_pm_init(isp);
+
err = v4l2_async_nf_register(&isp->notifier);
if (err) {
dev_err(isp->dev, "failed to register async notifier : %d\n", err);
- goto css_init_fail;
+ goto error_unload_firmware;
}
- atomisp_drvfs_init(isp);
-
return 0;
-css_init_fail:
+error_unload_firmware:
+ atomisp_pm_uninit(isp);
+ ia_css_unload_firmware();
+error_free_irq:
devm_free_irq(&pdev->dev, pdev->irq, isp);
-request_irq_fail:
+error_unregister_entities:
hmm_cleanup();
- pm_runtime_get_noresume(&pdev->dev);
- dev_pm_domain_set(&pdev->dev, NULL);
atomisp_unregister_entities(isp);
-register_entities_fail:
+error_uninitialize_modules:
atomisp_uninitialize_modules(isp);
-initialize_modules_fail:
- cpu_latency_qos_remove_request(&isp->pm_qos);
+error_irq_uninit:
atomisp_msi_irq_uninit(isp);
pci_free_irq_vectors(pdev);
-enable_msi_fail:
-fw_validation_fail:
+error_release_firmware:
release_firmware(isp->firmware);
-load_fw_fail:
- /*
- * Switch off ISP, as keeping it powered on would prevent
- * reaching S0ix states.
- *
- * The following lines have been copied from atomisp suspend path
- */
-
- pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
- irq &= BIT(INTR_IIR);
- pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq);
-
- pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
- irq &= ~BIT(INTR_IER);
- pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq);
-
- atomisp_msi_irq_uninit(isp);
-
- /* Address later when we worry about the ...field chips */
- if (IS_ENABLED(CONFIG_PM) && atomisp_mrfld_power(isp, false))
- dev_err(&pdev->dev, "Failed to switch off ISP\n");
-
-atomisp_dev_alloc_fail:
- pcim_iounmap_regions(pdev, BIT(ATOM_ISP_PCI_BAR));
-
-ioremap_fail:
return err;
}
@@ -1577,22 +1515,21 @@ static void atomisp_pci_remove(struct pci_dev *pdev)
{
struct atomisp_device *isp = pci_get_drvdata(pdev);
- dev_info(&pdev->dev, "Removing atomisp driver\n");
+ atomisp_pm_uninit(isp);
- atomisp_drvfs_exit();
+ if (isp->pm_only)
+ return;
+ /* Undo ia_css_init() from atomisp_power_on() */
+ atomisp_css_uninit(isp);
ia_css_unload_firmware();
+ devm_free_irq(&pdev->dev, pdev->irq, isp);
hmm_cleanup();
- pm_runtime_forbid(&pdev->dev);
- pm_runtime_get_noresume(&pdev->dev);
- dev_pm_domain_set(&pdev->dev, NULL);
- cpu_latency_qos_remove_request(&isp->pm_qos);
-
- atomisp_msi_irq_uninit(isp);
atomisp_unregister_entities(isp);
-
- release_firmware(isp->firmware);
+ atomisp_uninitialize_modules(isp);
+ atomisp_msi_irq_uninit(isp);
+ pci_free_irq_vectors(pdev);
}
static const struct pci_device_id atomisp_pci_tbl[] = {
@@ -1608,11 +1545,12 @@ static const struct pci_device_id atomisp_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ATOMISP_PCI_DEVICE_SOC_CHT)},
{0,}
};
-
MODULE_DEVICE_TABLE(pci, atomisp_pci_tbl);
-
static struct pci_driver atomisp_pci_driver = {
+ .driver = {
+ .dev_groups = dbg_attr_groups,
+ },
.name = "atomisp-isp2",
.id_table = atomisp_pci_tbl,
.probe = atomisp_pci_probe,
diff --git a/drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h b/drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
index 0579deac55350..e9846951f4ed9 100644
--- a/drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
+++ b/drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
@@ -73,7 +73,7 @@ uint32_t ia_css_circbuf_pop(
/**
* @brief Extract a value out of the circular buffer.
- * Get a value at an arbitrary poistion in the circular
+ * Get a value at an arbitrary position in the circular
* buffer. The user should call "ia_css_circbuf_is_empty()"
* to avoid accessing to an empty buffer.
*
diff --git a/drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c b/drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
index d9f7c143794dd..198c9f6e61911 100644
--- a/drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
+++ b/drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
@@ -24,7 +24,7 @@
**********************************************************************/
/*
* @brief Read the oldest element from the circular buffer.
- * Read the oldest element WITHOUT checking whehter the
+ * Read the oldest element WITHOUT checking whether the
* circular buffer is empty or not. The oldest element is
* also removed out from the circular buffer.
*
@@ -129,7 +129,7 @@ uint32_t ia_css_circbuf_extract(ia_css_circbuf_t *cb, int offset)
u32 src_pos;
u32 dest_pos;
- /* get the maximum offest */
+ /* get the maximum offset */
max_offset = ia_css_circbuf_get_offset(cb, cb->desc->start, cb->desc->end);
max_offset--;
@@ -207,7 +207,7 @@ bool ia_css_circbuf_increase_size(
{
u8 curr_size;
u8 curr_end;
- unsigned int i = 0;
+ unsigned int i;
if (!cb || sz_delta == 0)
return false;
diff --git a/drivers/staging/media/atomisp/pci/ia_css_acc_types.h b/drivers/staging/media/atomisp/pci/ia_css_acc_types.h
index d6e52b4971d68..f6838a8fc9d5d 100644
--- a/drivers/staging/media/atomisp/pci/ia_css_acc_types.h
+++ b/drivers/staging/media/atomisp/pci/ia_css_acc_types.h
@@ -84,7 +84,7 @@ struct ia_css_blob_info {
memory_offsets; /** offset wrt hdr in bytes */
u32 prog_name_offset; /** offset wrt hdr in bytes */
u32 size; /** Size of blob */
- u32 padding_size; /** total cummulative of bytes added due to section alignment */
+ u32 padding_size; /** total accumulation of bytes added due to section alignment */
u32 icache_source; /** Position of icache in blob */
u32 icache_size; /** Size of icache section */
u32 icache_padding;/** bytes added due to icache section alignment */
@@ -408,7 +408,7 @@ struct ia_css_acc_sp {
};
/* Acceleration firmware descriptor.
- * This descriptor descibes either SP code (stand-alone), or
+ * This descriptor describes either SP code (stand-alone), or
* ISP code (a separate pipeline stage).
*/
struct ia_css_acc_fw_hdr {
diff --git a/drivers/staging/media/atomisp/pci/ia_css_control.h b/drivers/staging/media/atomisp/pci/ia_css_control.h
index 88f031a63ba26..6a473459b346a 100644
--- a/drivers/staging/media/atomisp/pci/ia_css_control.h
+++ b/drivers/staging/media/atomisp/pci/ia_css_control.h
@@ -30,39 +30,28 @@
* environment in which the CSS code runs. This is
* used for host side memory access and message
* printing. May not be NULL.
- * @param[in] fw Firmware package containing the firmware for all
- * predefined ISP binaries.
- * if fw is NULL the firmware must be loaded before
- * through a call of ia_css_load_firmware
* @param[in] l1_base Base index (isp2400)
* of the L1 page table. This is a physical
* address or index.
* @param[in] irq_type The type of interrupt to be used (edge or level)
- * @return Returns -EINVAL in case of any
+ * @return Returns -EINVAL in case of any
* errors and 0 otherwise.
*
* This function initializes the API which includes allocating and initializing
- * internal data structures. This also interprets the firmware package. All
- * contents of this firmware package are copied into local data structures, so
- * the fw pointer could be freed after this function completes.
+ * internal data structures.
+ * ia_css_load_firmware() must be called to load the firmware before calling
+ * this function.
*/
int ia_css_init(struct device *dev,
- const struct ia_css_env *env,
- const struct ia_css_fw *fw,
- u32 l1_base,
- enum ia_css_irq_type irq_type);
+ const struct ia_css_env *env,
+ u32 l1_base,
+ enum ia_css_irq_type irq_type);
/* @brief Un-initialize the CSS API.
* @return None
*
- * This function deallocates all memory that has been allocated by the CSS API
- * Exception: if you explicitly loaded firmware through ia_css_load_firmware
- * you need to call ia_css_unload_firmware to deallocate the memory reserved
- * for the firmware.
- * After this function is called, no other CSS functions should be called
- * with the exception of ia_css_init which will re-initialize the CSS code,
- * ia_css_unload_firmware to unload the firmware or ia_css_load_firmware
- * to load new firmware
+ * This function deallocates all memory that has been allocated by the CSS API.
+ * After this function is called, no other CSS functions should be called.
*/
void
ia_css_uninit(void);
diff --git a/drivers/staging/media/atomisp/pci/ia_css_firmware.h b/drivers/staging/media/atomisp/pci/ia_css_firmware.h
index 01d2faf557cf8..d3a66128b4de6 100644
--- a/drivers/staging/media/atomisp/pci/ia_css_firmware.h
+++ b/drivers/staging/media/atomisp/pci/ia_css_firmware.h
@@ -46,10 +46,6 @@ struct device;
* This function interprets the firmware package. All
* contents of this firmware package are copied into local data structures, so
* the fw pointer could be freed after this function completes.
- *
- * Rationale for this function is that it can be called before ia_css_init, and thus
- * speeds up ia_css_init (ia_css_init is called each time a stream is created but the
- * firmware only needs to be loaded once).
*/
int
ia_css_load_firmware(struct device *dev, const struct ia_css_env *env,
@@ -61,6 +57,8 @@ ia_css_load_firmware(struct device *dev, const struct ia_css_env *env,
* This function unloads the firmware loaded by ia_css_load_firmware.
* It is pointless to call this function if no firmware is loaded,
* but it won't harm. Use this to deallocate all memory associated with the firmware.
+ * This function may only be called when the CSS API is in uninitialized state
+ * (e.g. after calling ia_css_uninit()).
*/
void
ia_css_unload_firmware(void);
diff --git a/drivers/staging/media/atomisp/pci/ia_css_irq.h b/drivers/staging/media/atomisp/pci/ia_css_irq.h
index 26b1b3c8ba625..00e2fd1f9647a 100644
--- a/drivers/staging/media/atomisp/pci/ia_css_irq.h
+++ b/drivers/staging/media/atomisp/pci/ia_css_irq.h
@@ -84,11 +84,11 @@ enum ia_css_irq_info {
IA_CSS_IRQ_INFO_ISP_BINARY_STATISTICS_READY = BIT(17),
/** ISP binary statistics are ready */
IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR = BIT(18),
- /** the input system in in error */
+ /** the input system is in error */
IA_CSS_IRQ_INFO_IF_ERROR = BIT(19),
- /** the input formatter in in error */
+ /** the input formatter is in error */
IA_CSS_IRQ_INFO_DMA_ERROR = BIT(20),
- /** the dma in in error */
+ /** the dma is in error */
IA_CSS_IRQ_INFO_ISYS_EVENTS_READY = BIT(21),
/** end-of-frame events are ready in the isys_event queue */
};
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_types.h
index 175c301ee96ac..ecc98686f5cf5 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_types.h
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_types.h
@@ -57,9 +57,9 @@ struct ia_css_hdr_exclusion_params {
};
/**
- * \brief HDR public paramterers.
+ * \brief HDR public parameters.
* \details Struct with all parameters for HDR that can be seet from
- * the CSS API. Currenly, only test parameters are defined.
+ * the CSS API. Currently, only test parameters are defined.
*/
struct ia_css_hdr_config {
struct ia_css_hdr_irradiance_params irradiance; /** HDR irradiance parameters */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.c
index 946b074e82886..d25bf59273baa 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.c
@@ -19,7 +19,7 @@
/* Multi-Axes Color Correction table for ISP1.
* 64values = 2x2matrix for 16area, [s2.13]
- * ineffective: 16 of "identity 2x2 matix" {8192,0,0,8192}
+ * ineffective: 16 of "identity 2x2 matrix" {8192,0,0,8192}
*/
const struct ia_css_macc_table default_macc_table = {
{
@@ -36,7 +36,7 @@ const struct ia_css_macc_table default_macc_table = {
/* Multi-Axes Color Correction table for ISP2.
* 64values = 2x2matrix for 16area, [s1.12]
- * ineffective: 16 of "identity 2x2 matix" {4096,0,0,4096}
+ * ineffective: 16 of "identity 2x2 matrix" {4096,0,0,4096}
*/
const struct ia_css_macc_table default_macc2_table = {
{
diff --git a/drivers/staging/media/atomisp/pci/isp2400_input_system_global.h b/drivers/staging/media/atomisp/pci/isp2400_input_system_global.h
index 61f23814e2fdb..3ff61faf0621a 100644
--- a/drivers/staging/media/atomisp/pci/isp2400_input_system_global.h
+++ b/drivers/staging/media/atomisp/pci/isp2400_input_system_global.h
@@ -19,7 +19,7 @@
#define N_CSI_PORTS (3)
//AM: Use previous define for this.
-//MIPI allows upto 4 channels.
+//MIPI allows up to 4 channels.
#define N_CHANNELS (4)
// 12KB = 256bit x 384 words
#define IB_CAPACITY_IN_WORDS (384)
diff --git a/drivers/staging/media/atomisp/pci/isp2400_input_system_public.h b/drivers/staging/media/atomisp/pci/isp2400_input_system_public.h
index 447c7c5c55a1f..523c948923f31 100644
--- a/drivers/staging/media/atomisp/pci/isp2400_input_system_public.h
+++ b/drivers/staging/media/atomisp/pci/isp2400_input_system_public.h
@@ -163,7 +163,7 @@ STORAGE_CLASS_INPUT_SYSTEM_H void receiver_port_reg_store(
const hrt_address reg,
const hrt_data value);
-/*! Read from a control register PORT[port_ID] of of RECEIVER[ID]
+/*! Read from a control register PORT[port_ID] of RECEIVER[ID]
\param ID[in] RECEIVER identifier
\param port_ID[in] mipi PORT identifier
diff --git a/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c b/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c
index 0f3729e55e14a..130662f8e7684 100644
--- a/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c
+++ b/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c
@@ -534,7 +534,7 @@ ia_css_binary_uninit(void) {
static int
binary_grid_deci_factor_log2(int width, int height)
{
- /* 3A/Shading decimation factor spcification (at August 2008)
+ /* 3A/Shading decimation factor specification (at August 2008)
* ------------------------------------------------------------------
* [Image Width (BQ)] [Decimation Factor (BQ)] [Resulting grid cells]
* 1280 ?c 32 40 ?c
diff --git a/drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c b/drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c
index 3d8741e7d5ca7..9d2b5f9cbb146 100644
--- a/drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c
+++ b/drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c
@@ -693,7 +693,7 @@ static void pipeline_init_defaults(
static void ia_css_pipeline_set_zoom_stage(struct ia_css_pipeline *pipeline)
{
struct ia_css_pipeline_stage *stage = NULL;
- int err = 0;
+ int err;
assert(pipeline);
if (pipeline->pipe_id == IA_CSS_PIPE_ID_PREVIEW) {
diff --git a/drivers/staging/media/atomisp/pci/runtime/queue/src/queue.c b/drivers/staging/media/atomisp/pci/runtime/queue/src/queue.c
index 2f1c2df59f719..0e430388b331b 100644
--- a/drivers/staging/media/atomisp/pci/runtime/queue/src/queue.c
+++ b/drivers/staging/media/atomisp/pci/runtime/queue/src/queue.c
@@ -81,7 +81,7 @@ int ia_css_queue_uninit(ia_css_queue_t *qhandle)
int ia_css_queue_enqueue(ia_css_queue_t *qhandle, uint32_t item)
{
- int error = 0;
+ int error;
if (!qhandle)
return -EINVAL;
@@ -123,7 +123,7 @@ int ia_css_queue_enqueue(ia_css_queue_t *qhandle, uint32_t item)
/* c. Store the queue object */
/* Set only fields requiring update with
- * valid value. Avoids uncessary calls
+ * valid value. Avoids unnecessary calls
* to load/store functions
*/
ignore_desc_flags = QUEUE_IGNORE_SIZE_START_STEP_FLAGS;
@@ -138,7 +138,7 @@ int ia_css_queue_enqueue(ia_css_queue_t *qhandle, uint32_t item)
int ia_css_queue_dequeue(ia_css_queue_t *qhandle, uint32_t *item)
{
- int error = 0;
+ int error;
if (!qhandle || NULL == item)
return -EINVAL;
@@ -180,7 +180,7 @@ int ia_css_queue_dequeue(ia_css_queue_t *qhandle, uint32_t *item)
/* c. Store the queue object */
/* Set only fields requiring update with
- * valid value. Avoids uncessary calls
+ * valid value. Avoids unnecessary calls
* to load/store functions
*/
ignore_desc_flags = QUEUE_IGNORE_SIZE_END_STEP_FLAGS;
@@ -193,7 +193,7 @@ int ia_css_queue_dequeue(ia_css_queue_t *qhandle, uint32_t *item)
int ia_css_queue_is_full(ia_css_queue_t *qhandle, bool *is_full)
{
- int error = 0;
+ int error;
if ((!qhandle) || (!is_full))
return -EINVAL;
@@ -225,7 +225,7 @@ int ia_css_queue_is_full(ia_css_queue_t *qhandle, bool *is_full)
int ia_css_queue_get_free_space(ia_css_queue_t *qhandle, uint32_t *size)
{
- int error = 0;
+ int error;
if ((!qhandle) || (!size))
return -EINVAL;
@@ -257,7 +257,7 @@ int ia_css_queue_get_free_space(ia_css_queue_t *qhandle, uint32_t *size)
int ia_css_queue_get_used_space(ia_css_queue_t *qhandle, uint32_t *size)
{
- int error = 0;
+ int error;
if ((!qhandle) || (!size))
return -EINVAL;
@@ -289,8 +289,8 @@ int ia_css_queue_get_used_space(ia_css_queue_t *qhandle, uint32_t *size)
int ia_css_queue_peek(ia_css_queue_t *qhandle, u32 offset, uint32_t *element)
{
- u32 num_elems = 0;
- int error = 0;
+ u32 num_elems;
+ int error;
if ((!qhandle) || (!element))
return -EINVAL;
@@ -338,7 +338,7 @@ int ia_css_queue_peek(ia_css_queue_t *qhandle, u32 offset, uint32_t *element)
int ia_css_queue_is_empty(ia_css_queue_t *qhandle, bool *is_empty)
{
- int error = 0;
+ int error;
if ((!qhandle) || (!is_empty))
return -EINVAL;
@@ -370,7 +370,7 @@ int ia_css_queue_is_empty(ia_css_queue_t *qhandle, bool *is_empty)
int ia_css_queue_get_size(ia_css_queue_t *qhandle, uint32_t *size)
{
- int error = 0;
+ int error;
if ((!qhandle) || (!size))
return -EINVAL;
diff --git a/drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c b/drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c
index 2e07dab8bf519..1f24db77fe387 100644
--- a/drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c
+++ b/drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c
@@ -198,7 +198,7 @@ void rmgr_push_handle(struct ia_css_rmgr_vbuf_pool *pool,
struct ia_css_rmgr_vbuf_handle **handle)
{
u32 i;
- bool succes = false;
+ bool success = false;
assert(pool);
assert(pool->recycle);
@@ -208,11 +208,11 @@ void rmgr_push_handle(struct ia_css_rmgr_vbuf_pool *pool,
if (!pool->handles[i]) {
ia_css_rmgr_refcount_retain_vbuf(handle);
pool->handles[i] = *handle;
- succes = true;
+ success = true;
break;
}
}
- assert(succes);
+ assert(success);
}
/*
diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c
index f35c90809414c..938a4ea89c590 100644
--- a/drivers/staging/media/atomisp/pci/sh_css.c
+++ b/drivers/staging/media/atomisp/pci/sh_css.c
@@ -174,8 +174,6 @@ static struct sh_css_hmm_buffer_record hmm_buffer_record[MAX_HMM_BUFFER_NUM];
#define GPIO_FLASH_PIN_MASK BIT(HIVE_GPIO_STROBE_TRIGGER_PIN)
-static bool fw_explicitly_loaded;
-
/*
* Local prototypes
*/
@@ -1360,7 +1358,6 @@ ia_css_unload_firmware(void)
ia_css_binary_uninit();
sh_css_unload_firmware();
}
- fw_explicitly_loaded = false;
}
static void
@@ -1405,13 +1402,9 @@ ia_css_load_firmware(struct device *dev, const struct ia_css_env *env,
my_css.flush = env->cpu_mem_env.flush;
}
- ia_css_unload_firmware(); /* in case we are called twice */
err = sh_css_load_firmware(dev, fw->data, fw->bytes);
- if (!err) {
+ if (!err)
err = ia_css_binary_init_infos();
- if (!err)
- fw_explicitly_loaded = true;
- }
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_load_firmware() leave\n");
return err;
@@ -1419,9 +1412,7 @@ ia_css_load_firmware(struct device *dev, const struct ia_css_env *env,
int
ia_css_init(struct device *dev, const struct ia_css_env *env,
- const struct ia_css_fw *fw,
- u32 mmu_l1_base,
- enum ia_css_irq_type irq_type)
+ u32 mmu_l1_base, enum ia_css_irq_type irq_type)
{
int err;
ia_css_spctrl_cfg spctrl_cfg;
@@ -1466,8 +1457,6 @@ ia_css_init(struct device *dev, const struct ia_css_env *env,
/* Check struct ia_css_init_dmem_cfg */
COMPILATION_ERROR_IF(sizeof(struct ia_css_sp_init_dmem_cfg) != SIZE_OF_IA_CSS_SP_INIT_DMEM_CFG_STRUCT);
- if (!fw && !fw_explicitly_loaded)
- return -EINVAL;
if (!env)
return -EINVAL;
@@ -1543,22 +1532,7 @@ ia_css_init(struct device *dev, const struct ia_css_env *env,
IA_CSS_LEAVE_ERR(err);
return err;
}
- if (fw) {
- ia_css_unload_firmware(); /* in case we already had firmware loaded */
- err = sh_css_load_firmware(dev, fw->data, fw->bytes);
- if (err) {
- IA_CSS_LEAVE_ERR(err);
- return err;
- }
- err = ia_css_binary_init_infos();
- if (err) {
- IA_CSS_LEAVE_ERR(err);
- return err;
- }
- fw_explicitly_loaded = false;
- my_css_save.loaded_fw = (struct ia_css_fw *)fw;
- }
if (!sh_css_setup_spctrl_config(&sh_css_sp_fw, SP_PROG_NAME, &spctrl_cfg))
return -EINVAL;
@@ -2163,9 +2137,6 @@ ia_css_uninit(void)
ifmtr_set_if_blocking_mode_reset = true;
}
- if (!fw_explicitly_loaded)
- ia_css_unload_firmware();
-
ia_css_spctrl_unload_fw(SP0_ID);
sh_css_sp_set_sp_running(false);
/* check and free any remaining mipi frames */
@@ -3635,7 +3606,7 @@ ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
assert(pipeline || pipe_id == IA_CSS_PIPE_ID_COPY);
- assert(sizeof(NULL) <= sizeof(ddr_buffer.kernel_ptr));
+ assert(sizeof(void *) <= sizeof(ddr_buffer.kernel_ptr));
ddr_buffer.kernel_ptr = HOST_ADDRESS(NULL);
ddr_buffer.cookie_ptr = buffer->driver_cookie;
ddr_buffer.timing_data = buffer->timing_data;
diff --git a/drivers/staging/media/atomisp/pci/sh_css_defs.h b/drivers/staging/media/atomisp/pci/sh_css_defs.h
index 7eb10b226f0a9..2afde974e75d2 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_defs.h
+++ b/drivers/staging/media/atomisp/pci/sh_css_defs.h
@@ -131,7 +131,7 @@ RGB[0,8191],coef[-8192,8191] -> RGB[0,8191]
* invalid rows/columns that result from filter initialization are skipped. */
#define SH_CSS_MIN_DVS_ENVELOPE 12U
-/* The FPGA system (vec_nelems == 16) only supports upto 5MP */
+/* The FPGA system (vec_nelems == 16) only supports up to 5MP */
#define SH_CSS_MAX_SENSOR_WIDTH 4608
#define SH_CSS_MAX_SENSOR_HEIGHT 3450
diff --git a/drivers/staging/media/atomisp/pci/sh_css_mipi.c b/drivers/staging/media/atomisp/pci/sh_css_mipi.c
index b7c1e164ee244..6e11fd7719384 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_mipi.c
+++ b/drivers/staging/media/atomisp/pci/sh_css_mipi.c
@@ -174,7 +174,7 @@ ia_css_mipi_frame_calculate_size(const unsigned int width,
mem_words = ((embedded_data_size_words + 7) >> 3) +
mem_words_for_first_line +
(((height + 1) >> 1) - 1) * mem_words_per_odd_line +
- /* ceil (height/2) - 1 (first line is calculated separatelly) */
+ /* ceil (height/2) - 1 (first line is calculated separately) */
(height >> 1) * mem_words_per_even_line + /* floor(height/2) */
mem_words_for_EOF;
@@ -537,7 +537,7 @@ send_mipi_frames(struct ia_css_pipe *pipe)
/* Hand-over the SP-internal mipi buffers */
for (i = 0; i < my_css.num_mipi_frames[port]; i++) {
- /* Need to include the ofset for port. */
+ /* Need to include the offset for port. */
sh_css_update_host2sp_mipi_frame(port * NUM_MIPI_FRAMES_PER_STREAM + i,
my_css.mipi_frames[port][i]);
sh_css_update_host2sp_mipi_metadata(port * NUM_MIPI_FRAMES_PER_STREAM + i,
diff --git a/drivers/staging/media/imx/imx-media-csc-scaler.c b/drivers/staging/media/imx/imx-media-csc-scaler.c
index 1fd39a2fca98a..95cca281e8a37 100644
--- a/drivers/staging/media/imx/imx-media-csc-scaler.c
+++ b/drivers/staging/media/imx/imx-media-csc-scaler.c
@@ -803,6 +803,7 @@ static int ipu_csc_scaler_release(struct file *file)
dev_dbg(priv->dev, "Releasing instance %p\n", ctx);
+ v4l2_ctrl_handler_free(&ctx->ctrl_hdlr);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
diff --git a/drivers/staging/media/imx/imx-media-fim.c b/drivers/staging/media/imx/imx-media-fim.c
index e28a33d9dec75..ccbc0371fba2e 100644
--- a/drivers/staging/media/imx/imx-media-fim.c
+++ b/drivers/staging/media/imx/imx-media-fim.c
@@ -401,7 +401,7 @@ int imx_media_fim_add_controls(struct imx_media_fim *fim)
{
/* add the FIM controls to the calling subdev ctrl handler */
return v4l2_ctrl_add_handler(fim->sd->ctrl_handler,
- &fim->ctrl_handler, NULL, false);
+ &fim->ctrl_handler, NULL, true);
}
/* Called by the subdev in its subdev registered callback */
diff --git a/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h b/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
index caa358e0bae40..4aa2797f5e3cf 100644
--- a/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
+++ b/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
@@ -2485,11 +2485,9 @@ struct ipu3_uapi_anr_config {
* &ipu3_uapi_yuvp1_y_ee_nr_config
* @yds: y down scaler config. See &ipu3_uapi_yuvp1_yds_config
* @chnr: chroma noise reduction config. See &ipu3_uapi_yuvp1_chnr_config
- * @reserved1: reserved
* @yds2: y channel down scaler config. See &ipu3_uapi_yuvp1_yds_config
* @tcc: total color correction config as defined in struct
* &ipu3_uapi_yuvp2_tcc_static_config
- * @reserved2: reserved
* @anr: advanced noise reduction config.See &ipu3_uapi_anr_config
* @awb_fr: AWB filter response config. See ipu3_uapi_awb_fr_config
* @ae: auto exposure config As specified by &ipu3_uapi_ae_config
@@ -2724,7 +2722,6 @@ struct ipu3_uapi_obgrid_param {
* @acc_ae: 0 = no update, 1 = update.
* @acc_af: 0 = no update, 1 = update.
* @acc_awb: 0 = no update, 1 = update.
- * @__acc_osys: 0 = no update, 1 = update.
* @reserved3: Not used.
* @lin_vmem_params: 0 = no update, 1 = update.
* @tnr3_vmem_params: 0 = no update, 1 = update.
diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
index a66f034380c05..3df58eb3e8822 100644
--- a/drivers/staging/media/ipu3/ipu3-v4l2.c
+++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
@@ -1069,6 +1069,11 @@ static int imgu_v4l2_subdev_register(struct imgu_device *imgu,
struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
/* Initialize subdev media entity */
+ imgu_sd->subdev.entity.ops = &imgu_media_ops;
+ for (i = 0; i < IMGU_NODE_NUM; i++) {
+ imgu_sd->subdev_pads[i].flags = imgu_pipe->nodes[i].output ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+ }
r = media_entity_pads_init(&imgu_sd->subdev.entity, IMGU_NODE_NUM,
imgu_sd->subdev_pads);
if (r) {
@@ -1076,11 +1081,6 @@ static int imgu_v4l2_subdev_register(struct imgu_device *imgu,
"failed initialize subdev media entity (%d)\n", r);
return r;
}
- imgu_sd->subdev.entity.ops = &imgu_media_ops;
- for (i = 0; i < IMGU_NODE_NUM; i++) {
- imgu_sd->subdev_pads[i].flags = imgu_pipe->nodes[i].output ?
- MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
- }
/* Initialize subdev */
v4l2_subdev_init(&imgu_sd->subdev, &imgu_subdev_ops);
@@ -1177,15 +1177,15 @@ static int imgu_v4l2_node_setup(struct imgu_device *imgu, unsigned int pipe,
}
/* Initialize media entities */
+ node->vdev_pad.flags = node->output ?
+ MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
+ vdev->entity.ops = NULL;
r = media_entity_pads_init(&vdev->entity, 1, &node->vdev_pad);
if (r) {
dev_err(dev, "failed initialize media entity (%d)\n", r);
mutex_destroy(&node->lock);
return r;
}
- node->vdev_pad.flags = node->output ?
- MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
- vdev->entity.ops = NULL;
/* Initialize vbq */
vbq->type = node->vdev_fmt.type;
diff --git a/drivers/staging/media/meson/vdec/vdec.h b/drivers/staging/media/meson/vdec/vdec.h
index 0906b8fb5cc60..2586851777000 100644
--- a/drivers/staging/media/meson/vdec/vdec.h
+++ b/drivers/staging/media/meson/vdec/vdec.h
@@ -101,7 +101,6 @@ struct amvdec_core {
* @conf_esparser: mandatory call to let the vdec configure the ESPARSER
* @vififo_level: mandatory call to get the current amount of data
* in the VIFIFO
- * @use_offsets: mandatory call. Returns 1 if the VDEC supports vififo offsets
*/
struct amvdec_ops {
int (*start)(struct amvdec_session *sess);
diff --git a/drivers/staging/media/starfive/camss/stf-capture.c b/drivers/staging/media/starfive/camss/stf-capture.c
index 70c24b050a1b5..ec5169e7b3918 100644
--- a/drivers/staging/media/starfive/camss/stf-capture.c
+++ b/drivers/staging/media/starfive/camss/stf-capture.c
@@ -20,28 +20,28 @@ static const struct stfcamss_format_info stf_wr_fmts[] = {
.pixelformat = V4L2_PIX_FMT_SRGGB10,
.planes = 1,
.vsub = { 1 },
- .bpp = 10,
+ .bpp = 16,
},
{
.code = MEDIA_BUS_FMT_SGRBG10_1X10,
.pixelformat = V4L2_PIX_FMT_SGRBG10,
.planes = 1,
.vsub = { 1 },
- .bpp = 10,
+ .bpp = 16,
},
{
.code = MEDIA_BUS_FMT_SGBRG10_1X10,
.pixelformat = V4L2_PIX_FMT_SGBRG10,
.planes = 1,
.vsub = { 1 },
- .bpp = 10,
+ .bpp = 16,
},
{
.code = MEDIA_BUS_FMT_SBGGR10_1X10,
.pixelformat = V4L2_PIX_FMT_SBGGR10,
.planes = 1,
.vsub = { 1 },
- .bpp = 10,
+ .bpp = 16,
},
};
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
index 52e94c8f2f01a..780da4a8b5af1 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
@@ -427,11 +427,11 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
unsigned int ctb_addr_x, ctb_addr_y;
struct cedrus_buffer *cedrus_buf;
dma_addr_t src_buf_addr;
- dma_addr_t src_buf_end_addr;
u32 chroma_log2_weight_denom;
u32 num_entry_point_offsets;
u32 output_pic_list_index;
u32 pic_order_cnt[2];
+ size_t slice_bytes;
u8 padding;
int count;
u32 reg;
@@ -443,6 +443,7 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
pred_weight_table = &slice_params->pred_weight_table;
num_entry_point_offsets = slice_params->num_entry_point_offsets;
cedrus_buf = vb2_to_cedrus_buffer(&run->dst->vb2_buf);
+ slice_bytes = vb2_get_plane_payload(&run->src->vb2_buf, 0);
/*
* If entry points offsets are present, we should get them
@@ -490,7 +491,7 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
cedrus_write(dev, VE_DEC_H265_BITS_OFFSET, 0);
- reg = slice_params->bit_size;
+ reg = slice_bytes * 8;
cedrus_write(dev, VE_DEC_H265_BITS_LEN, reg);
/* Source beginning and end addresses. */
@@ -504,10 +505,7 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
cedrus_write(dev, VE_DEC_H265_BITS_ADDR, reg);
- src_buf_end_addr = src_buf_addr +
- DIV_ROUND_UP(slice_params->bit_size, 8);
-
- reg = VE_DEC_H265_BITS_END_ADDR_BASE(src_buf_end_addr);
+ reg = VE_DEC_H265_BITS_END_ADDR_BASE(src_buf_addr + slice_bytes);
cedrus_write(dev, VE_DEC_H265_BITS_END_ADDR, reg);
/* Coding tree block address */
diff --git a/drivers/staging/nvec/TODO b/drivers/staging/nvec/TODO
index e4d85d9b46817..8afde3ccc960e 100644
--- a/drivers/staging/nvec/TODO
+++ b/drivers/staging/nvec/TODO
@@ -1,6 +1,5 @@
ToDo list (incomplete, unordered)
- - add compile as module support
- - move half of the nvec init stuff to i2c-tegra.c
- - move event handling to nvec_events
+ - move the driver to the new i2c slave framework
- finish suspend/resume support
- - add support for more device implementations
+ - fix udelay in the isr
+ - add atomic ops in order to fix shutoff/reboot problems
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 2823cacde1309..282a664c91763 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -709,10 +709,11 @@ static irqreturn_t nvec_interrupt(int irq, void *dev)
status & RNW ? " RNW" : "");
/*
- * TODO: A correct fix needs to be found for this.
+ * TODO: replace the udelay with a read back after each writel above
+ * in order to work around a hardware issue, see i2c-tegra.c
*
- * We experience less incomplete messages with this delay than without
- * it, but we don't know why. Help is appreciated.
+ * Unfortunately, this change causes an intialisation issue with the
+ * touchpad, which needs to be fixed first.
*/
udelay(100);
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index b3049108edc45..211423059e303 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -10,7 +10,6 @@
#include <linux/phy.h>
#include <linux/ratelimit.h>
#include <linux/of_mdio.h>
-#include <generated/utsrelease.h>
#include <net/dst.h>
#include "octeon-ethernet.h"
@@ -22,7 +21,6 @@ static void cvm_oct_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strscpy(info->version, UTS_RELEASE, sizeof(info->version));
strscpy(info->bus_info, "Builtin", sizeof(info->bus_info));
}
diff --git a/drivers/staging/octeon/octeon-stubs.h b/drivers/staging/octeon/octeon-stubs.h
index 3e7b92cd2e35e..44cced319c111 100644
--- a/drivers/staging/octeon/octeon-stubs.h
+++ b/drivers/staging/octeon/octeon-stubs.h
@@ -1362,7 +1362,7 @@ static inline struct cvmx_wqe *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
}
static inline int cvmx_spi_restart_interface(int interface,
- cvmx_spi_mode_t mode, int timeout)
+ cvmx_spi_mode_t mode, int timeout)
{
return 0;
}
diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
index 0ec3130225db1..b6c4917d515e6 100644
--- a/drivers/staging/pi433/pi433_if.c
+++ b/drivers/staging/pi433/pi433_if.c
@@ -49,6 +49,7 @@
#define N_PI433_MINORS BIT(MINORBITS) /*32*/ /* ... up to 256 */
#define MAX_MSG_SIZE 900 /* min: FIFO_SIZE! */
#define MSG_FIFO_SIZE 65536 /* 65536 = 2^16 */
+#define FIFO_THRESHOLD 15 /* bytes */
#define NUM_DIO 2
static dev_t pi433_dev;
diff --git a/drivers/staging/pi433/rf69.c b/drivers/staging/pi433/rf69.c
index 8c7fab6a46bb2..5a1c362badb61 100644
--- a/drivers/staging/pi433/rf69.c
+++ b/drivers/staging/pi433/rf69.c
@@ -8,12 +8,12 @@
#include <linux/types.h>
#include <linux/spi/spi.h>
+#include <linux/units.h>
#include "rf69.h"
#include "rf69_registers.h"
-#define F_OSC 32000000 /* in Hz */
-#define FIFO_SIZE 66 /* in byte */
+#define F_OSC (32 * HZ_PER_MHZ)
/*-------------------------------------------------------------------------*/
diff --git a/drivers/staging/pi433/rf69.h b/drivers/staging/pi433/rf69.h
index 78fa0b8bab8b0..76f0f9896a524 100644
--- a/drivers/staging/pi433/rf69.h
+++ b/drivers/staging/pi433/rf69.h
@@ -11,11 +11,7 @@
#include "rf69_enum.h"
#include "rf69_registers.h"
-/* NOTE: Modifying FREQUENCY value impacts CE certification */
-#define F_OSC 32000000 /* Hz */
-#define FREQUENCY 433920000 /* Hz */
#define FIFO_SIZE 66 /* bytes */
-#define FIFO_THRESHOLD 15 /* bytes */
u8 rf69_read_reg(struct spi_device *spi, u8 addr);
int rf69_get_version(struct spi_device *spi);
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index c7a2eae2fdb90..e3ed709a7674e 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -164,7 +164,7 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
eACI);
break;
}
- priv->rtllib->SetHwRegHandler(dev, HW_VAR_ACM_CTRL,
+ priv->rtllib->set_hw_reg_handler(dev, HW_VAR_ACM_CTRL,
&pAcParam);
break;
}
@@ -693,11 +693,10 @@ void rtl92e_link_change(struct net_device *dev)
u32 reg;
reg = rtl92e_readl(dev, RCR);
- if (priv->rtllib->link_state == MAC80211_LINKED) {
+ if (priv->rtllib->link_state == MAC80211_LINKED)
priv->receive_config = reg |= RCR_CBSSID;
- } else {
+ else
priv->receive_config = reg &= ~RCR_CBSSID;
- }
rtl92e_writel(dev, RCR, reg);
}
@@ -1296,7 +1295,6 @@ static void _rtl92e_query_rxphystatus(
pwdb_all = rtl92e_rx_db_to_percent(rx_pwr_all);
pstats->RxPWDBAll = precord_stats->RxPWDBAll = pwdb_all;
- pstats->RxPower = precord_stats->RxPower = rx_pwr_all;
pstats->RecvSignalPower = rx_pwr_all;
if (pdrvinfo->RxHT && pdrvinfo->RxRate >= DESC90_RATEMCS8 &&
pdrvinfo->RxRate <= DESC90_RATEMCS15)
@@ -1348,14 +1346,7 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
static u32 slide_beacon_adc_pwdb_index;
static u32 slide_beacon_adc_pwdb_statistics;
static u32 last_beacon_adc_pwdb;
- struct ieee80211_hdr_3addr *hdr;
- u16 sc;
- unsigned int seq;
- hdr = (struct ieee80211_hdr_3addr *)buffer;
- sc = le16_to_cpu(hdr->seq_ctrl);
- seq = WLAN_GET_SEQ_SEQ(sc);
- curr_st->Seq_Num = seq;
if (!prev_st->bIsAMPDU)
bcheck = true;
@@ -1536,7 +1527,7 @@ static void _rtl92e_update_received_rate_histogram_stats(
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
u32 rcvType = 1;
- u32 rateIndex;
+ u32 rate_index;
if (pstats->bCRC)
rcvType = 2;
@@ -1545,95 +1536,95 @@ static void _rtl92e_update_received_rate_histogram_stats(
switch (pstats->rate) {
case MGN_1M:
- rateIndex = 0;
+ rate_index = 0;
break;
case MGN_2M:
- rateIndex = 1;
+ rate_index = 1;
break;
case MGN_5_5M:
- rateIndex = 2;
+ rate_index = 2;
break;
case MGN_11M:
- rateIndex = 3;
+ rate_index = 3;
break;
case MGN_6M:
- rateIndex = 4;
+ rate_index = 4;
break;
case MGN_9M:
- rateIndex = 5;
+ rate_index = 5;
break;
case MGN_12M:
- rateIndex = 6;
+ rate_index = 6;
break;
case MGN_18M:
- rateIndex = 7;
+ rate_index = 7;
break;
case MGN_24M:
- rateIndex = 8;
+ rate_index = 8;
break;
case MGN_36M:
- rateIndex = 9;
+ rate_index = 9;
break;
case MGN_48M:
- rateIndex = 10;
+ rate_index = 10;
break;
case MGN_54M:
- rateIndex = 11;
+ rate_index = 11;
break;
case MGN_MCS0:
- rateIndex = 12;
+ rate_index = 12;
break;
case MGN_MCS1:
- rateIndex = 13;
+ rate_index = 13;
break;
case MGN_MCS2:
- rateIndex = 14;
+ rate_index = 14;
break;
case MGN_MCS3:
- rateIndex = 15;
+ rate_index = 15;
break;
case MGN_MCS4:
- rateIndex = 16;
+ rate_index = 16;
break;
case MGN_MCS5:
- rateIndex = 17;
+ rate_index = 17;
break;
case MGN_MCS6:
- rateIndex = 18;
+ rate_index = 18;
break;
case MGN_MCS7:
- rateIndex = 19;
+ rate_index = 19;
break;
case MGN_MCS8:
- rateIndex = 20;
+ rate_index = 20;
break;
case MGN_MCS9:
- rateIndex = 21;
+ rate_index = 21;
break;
case MGN_MCS10:
- rateIndex = 22;
+ rate_index = 22;
break;
case MGN_MCS11:
- rateIndex = 23;
+ rate_index = 23;
break;
case MGN_MCS12:
- rateIndex = 24;
+ rate_index = 24;
break;
case MGN_MCS13:
- rateIndex = 25;
+ rate_index = 25;
break;
case MGN_MCS14:
- rateIndex = 26;
+ rate_index = 26;
break;
case MGN_MCS15:
- rateIndex = 27;
+ rate_index = 27;
break;
default:
- rateIndex = 28;
+ rate_index = 28;
break;
}
- priv->stats.received_rate_histogram[0][rateIndex]++;
- priv->stats.received_rate_histogram[rcvType][rateIndex]++;
+ priv->stats.received_rate_histogram[0][rate_index]++;
+ priv->stats.received_rate_histogram[rcvType][rate_index]++;
}
bool rtl92e_get_rx_stats(struct net_device *dev, struct rtllib_rx_stats *stats,
@@ -1650,7 +1641,6 @@ bool rtl92e_get_rx_stats(struct net_device *dev, struct rtllib_rx_stats *stats,
stats->bHwError |= 1;
if (stats->bHwError) {
- stats->bShift = false;
return false;
}
@@ -1662,7 +1652,6 @@ bool rtl92e_get_rx_stats(struct net_device *dev, struct rtllib_rx_stats *stats,
stats->rate = _rtl92e_rate_hw_to_mgn((bool)pDrvInfo->RxHT,
pDrvInfo->RxRate);
- stats->bShortPreamble = pDrvInfo->SPLCP;
_rtl92e_update_received_rate_histogram_stats(dev, stats);
@@ -1673,19 +1662,9 @@ bool rtl92e_get_rx_stats(struct net_device *dev, struct rtllib_rx_stats *stats,
stats->TimeStampLow = pDrvInfo->TSFL;
stats->TimeStampHigh = rtl92e_readl(dev, TSFR + 4);
- if ((stats->RxBufShift + stats->RxDrvInfoSize) > 0)
- stats->bShift = 1;
-
- stats->RxIs40MHzPacket = pDrvInfo->BW;
-
_rtl92e_translate_rx_signal_stats(dev, skb, stats, pdesc, pDrvInfo);
skb_trim(skb, skb->len - S_CRC_LEN);
-
- stats->packetlength = stats->Length - 4;
- stats->fraglength = stats->packetlength;
- stats->fragoffset = 0;
- stats->ntotalfrag = 1;
return true;
}
@@ -1698,7 +1677,7 @@ void rtl92e_stop_adapter(struct net_device *dev, bool reset)
u32 ulRegRead;
op_mode = RT_OP_MODE_NO_LINK;
- priv->rtllib->SetHwRegHandler(dev, HW_VAR_MEDIA_STATUS, &op_mode);
+ priv->rtllib->set_hw_reg_handler(dev, HW_VAR_MEDIA_STATUS, &op_mode);
if (!priv->rtllib->bSupportRemoteWakeUp) {
u1bTmp = 0x0;
@@ -1852,7 +1831,7 @@ bool rtl92e_is_rx_stuck(struct net_device *dev)
u16 RegRxCounter = rtl92e_readw(dev, 0x130);
bool bStuck = false;
static u8 rx_chk_cnt;
- u32 SlotIndex = 0, TotalRxStuckCount = 0;
+ u32 slot_index = 0, TotalRxStuckCount = 0;
u8 i;
u8 SilentResetRxSoltNum = 4;
@@ -1882,10 +1861,10 @@ bool rtl92e_is_rx_stuck(struct net_device *dev)
}
- SlotIndex = (priv->silent_reset_rx_slot_index++) % SilentResetRxSoltNum;
+ slot_index = (priv->silent_reset_rx_slot_index++) % SilentResetRxSoltNum;
if (priv->rx_ctr == RegRxCounter) {
- priv->silent_reset_rx_stuck_event[SlotIndex] = 1;
+ priv->silent_reset_rx_stuck_event[slot_index] = 1;
for (i = 0; i < SilentResetRxSoltNum; i++)
TotalRxStuckCount += priv->silent_reset_rx_stuck_event[i];
@@ -1897,7 +1876,7 @@ bool rtl92e_is_rx_stuck(struct net_device *dev)
priv->silent_reset_rx_stuck_event[i];
}
} else {
- priv->silent_reset_rx_stuck_event[SlotIndex] = 0;
+ priv->silent_reset_rx_stuck_event[slot_index] = 0;
}
priv->rx_ctr = RegRxCounter;
@@ -1938,5 +1917,5 @@ bool rtl92e_is_halfn_supported_by_ap(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
- return ieee->bHalfWirelessN24GMode;
+ return ieee->half_wireless_n24g_mode;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
index e1bd4d67e862b..18b948d4d86d3 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
@@ -714,7 +714,6 @@ void rtl92e_set_channel(struct net_device *dev, u8 channel)
if (priv->up)
_rtl92e_phy_switch_channel_work_item(dev);
priv->sw_chnl_in_progress = false;
- return;
}
static void _rtl92e_cck_tx_power_track_bw_switch_tssi(struct net_device *dev)
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 6815d18a7919e..649b529657bab 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -172,7 +172,7 @@ bool rtl92e_set_rf_state(struct net_device *dev,
priv->blinked_ingpio = true;
else
priv->blinked_ingpio = false;
- rtllib_MgntDisconnect(priv->rtllib,
+ rtllib_mgnt_disconnect(priv->rtllib,
WLAN_REASON_DISASSOC_STA_HAS_LEFT);
}
}
@@ -236,14 +236,14 @@ static void _rtl92e_update_cap(struct net_device *dev, u16 cap)
if (priv->dot11_current_preamble_mode != PREAMBLE_SHORT) {
ShortPreamble = true;
priv->dot11_current_preamble_mode = PREAMBLE_SHORT;
- priv->rtllib->SetHwRegHandler(dev, HW_VAR_ACK_PREAMBLE,
+ priv->rtllib->set_hw_reg_handler(dev, HW_VAR_ACK_PREAMBLE,
(unsigned char *)&ShortPreamble);
}
} else {
if (priv->dot11_current_preamble_mode != PREAMBLE_LONG) {
ShortPreamble = false;
priv->dot11_current_preamble_mode = PREAMBLE_LONG;
- priv->rtllib->SetHwRegHandler(dev, HW_VAR_ACK_PREAMBLE,
+ priv->rtllib->set_hw_reg_handler(dev, HW_VAR_ACK_PREAMBLE,
(unsigned char *)&ShortPreamble);
}
}
@@ -256,13 +256,13 @@ static void _rtl92e_update_cap(struct net_device *dev, u16 cap)
(!priv->rtllib->ht_info->current_rt2rt_long_slot_time)) {
if (cur_slot_time != SHORT_SLOT_TIME) {
slot_time_val = SHORT_SLOT_TIME;
- priv->rtllib->SetHwRegHandler(dev,
+ priv->rtllib->set_hw_reg_handler(dev,
HW_VAR_SLOT_TIME, &slot_time_val);
}
} else {
if (cur_slot_time != NON_SHORT_SLOT_TIME) {
slot_time_val = NON_SHORT_SLOT_TIME;
- priv->rtllib->SetHwRegHandler(dev,
+ priv->rtllib->set_hw_reg_handler(dev,
HW_VAR_SLOT_TIME, &slot_time_val);
}
}
@@ -301,7 +301,7 @@ static void _rtl92e_qos_activate(void *data)
goto success;
for (i = 0; i < QOS_QUEUE_NUM; i++)
- priv->rtllib->SetHwRegHandler(dev, HW_VAR_AC_PARAM, (u8 *)(&i));
+ priv->rtllib->set_hw_reg_handler(dev, HW_VAR_AC_PARAM, (u8 *)(&i));
success:
mutex_unlock(&priv->mutex);
@@ -656,12 +656,12 @@ static void _rtl92e_init_priv_handler(struct net_device *dev)
priv->rtllib->enter_sleep_state = rtl92e_enter_sleep;
priv->rtllib->ps_is_queue_empty = _rtl92e_is_tx_queue_empty;
- priv->rtllib->GetNmodeSupportBySecCfg = rtl92e_get_nmode_support_by_sec;
- priv->rtllib->GetHalfNmodeSupportByAPsHandler =
+ priv->rtllib->get_nmode_support_by_sec_cfg = rtl92e_get_nmode_support_by_sec;
+ priv->rtllib->get_half_nmode_support_by_aps_handler =
rtl92e_is_halfn_supported_by_ap;
- priv->rtllib->SetHwRegHandler = rtl92e_set_reg;
- priv->rtllib->AllowAllDestAddrHandler = rtl92e_set_monitor_mode;
+ priv->rtllib->set_hw_reg_handler = rtl92e_set_reg;
+ priv->rtllib->allow_all_dest_addr_handler = rtl92e_set_monitor_mode;
priv->rtllib->init_gain_handler = rtl92e_init_gain;
priv->rtllib->rtllib_ips_leave_wq = rtl92e_rtllib_ips_leave_wq;
priv->rtllib->rtllib_ips_leave = rtl92e_rtllib_ips_leave;
@@ -705,7 +705,7 @@ static void _rtl92e_init_priv_variable(struct net_device *dev)
priv->hw_rf_off_action = 0;
priv->set_rf_pwr_state_in_progress = false;
priv->rtllib->pwr_save_ctrl.bLeisurePs = true;
- priv->rtllib->LPSDelayCnt = 0;
+ priv->rtllib->lps_delay_cnt = 0;
priv->rtllib->sta_sleep = LPS_IS_WAKE;
priv->rtllib->rf_power_state = rf_on;
@@ -909,25 +909,24 @@ static void _rtl92e_if_check_reset(struct net_device *dev)
netdev_info(dev, "%s(): TxResetType is %d, RxResetType is %d\n",
__func__, TxResetType, RxResetType);
}
- return;
}
static void _rtl92e_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum,
u32 *TotalRxDataNum)
{
- u16 SlotIndex;
+ u16 slot_index;
u8 i;
*TotalRxBcnNum = 0;
*TotalRxDataNum = 0;
- SlotIndex = (priv->rtllib->link_detect_info.SlotIndex++) %
- (priv->rtllib->link_detect_info.SlotNum);
- priv->rtllib->link_detect_info.RxBcnNum[SlotIndex] =
- priv->rtllib->link_detect_info.NumRecvBcnInPeriod;
- priv->rtllib->link_detect_info.RxDataNum[SlotIndex] =
- priv->rtllib->link_detect_info.NumRecvDataInPeriod;
- for (i = 0; i < priv->rtllib->link_detect_info.SlotNum; i++) {
+ slot_index = (priv->rtllib->link_detect_info.slot_index++) %
+ (priv->rtllib->link_detect_info.slot_num);
+ priv->rtllib->link_detect_info.RxBcnNum[slot_index] =
+ priv->rtllib->link_detect_info.num_recv_bcn_in_period;
+ priv->rtllib->link_detect_info.RxDataNum[slot_index] =
+ priv->rtllib->link_detect_info.num_recv_data_in_period;
+ for (i = 0; i < priv->rtllib->link_detect_info.slot_num; i++) {
*TotalRxBcnNum += priv->rtllib->link_detect_info.RxBcnNum[i];
*TotalRxDataNum += priv->rtllib->link_detect_info.RxDataNum[i];
}
@@ -943,7 +942,7 @@ static void _rtl92e_watchdog_wq_cb(void *data)
unsigned long flags;
struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *)
(&priv->rtllib->pwr_save_ctrl);
- bool bBusyTraffic = false;
+ bool busy_traffic = false;
bool bHigherBusyTraffic = false;
bool bHigherBusyRxTraffic = false;
bool bEnterPS = false;
@@ -965,15 +964,14 @@ static void _rtl92e_watchdog_wq_cb(void *data)
MAC80211_NOLINK) &&
(ieee->rf_power_state == rf_on) && !ieee->is_set_key &&
(!ieee->proto_stoppping) && !ieee->wx_set_enc) {
- if (ieee->pwr_save_ctrl.ReturnPoint == IPS_CALLBACK_NONE) {
+ if (ieee->pwr_save_ctrl.ReturnPoint == IPS_CALLBACK_NONE)
rtl92e_ips_enter(dev);
- }
}
}
if ((ieee->link_state == MAC80211_LINKED) && (ieee->iw_mode == IW_MODE_INFRA)) {
if (ieee->link_detect_info.num_rx_ok_in_period > 100 ||
ieee->link_detect_info.num_tx_ok_in_period > 100)
- bBusyTraffic = true;
+ busy_traffic = true;
if (ieee->link_detect_info.num_rx_ok_in_period > 4000 ||
ieee->link_detect_info.num_tx_ok_in_period > 4000) {
@@ -984,9 +982,9 @@ static void _rtl92e_watchdog_wq_cb(void *data)
bHigherBusyRxTraffic = false;
}
- if (((ieee->link_detect_info.NumRxUnicastOkInPeriod +
+ if (((ieee->link_detect_info.num_rx_unicast_ok_in_period +
ieee->link_detect_info.num_tx_ok_in_period) > 8) ||
- (ieee->link_detect_info.NumRxUnicastOkInPeriod > 2))
+ (ieee->link_detect_info.num_rx_unicast_ok_in_period > 2))
bEnterPS = false;
else
bEnterPS = true;
@@ -1005,8 +1003,8 @@ static void _rtl92e_watchdog_wq_cb(void *data)
ieee->link_detect_info.num_rx_ok_in_period = 0;
ieee->link_detect_info.num_tx_ok_in_period = 0;
- ieee->link_detect_info.NumRxUnicastOkInPeriod = 0;
- ieee->link_detect_info.bBusyTraffic = bBusyTraffic;
+ ieee->link_detect_info.num_rx_unicast_ok_in_period = 0;
+ ieee->link_detect_info.busy_traffic = busy_traffic;
ieee->link_detect_info.bHigherBusyTraffic = bHigherBusyTraffic;
ieee->link_detect_info.bHigherBusyRxTraffic = bHigherBusyRxTraffic;
@@ -1032,7 +1030,7 @@ static void _rtl92e_watchdog_wq_cb(void *data)
ieee->link_state = RTLLIB_ASSOCIATING;
- RemovePeerTS(priv->rtllib,
+ remove_peer_ts(priv->rtllib,
priv->rtllib->current_network.bssid);
ieee->is_roaming = true;
ieee->is_set_key = false;
@@ -1046,8 +1044,8 @@ static void _rtl92e_watchdog_wq_cb(void *data)
priv->check_roaming_cnt = 0;
}
- ieee->link_detect_info.NumRecvBcnInPeriod = 0;
- ieee->link_detect_info.NumRecvDataInPeriod = 0;
+ ieee->link_detect_info.num_recv_bcn_in_period = 0;
+ ieee->link_detect_info.num_recv_data_in_period = 0;
}
spin_lock_irqsave(&priv->tx_lock, flags);
@@ -1257,7 +1255,7 @@ static short _rtl92e_tx(struct net_device *dev, struct sk_buff *skb)
int idx;
u32 fwinfo_size = 0;
- priv->rtllib->bAwakePktSent = true;
+ priv->rtllib->awake_pkt_sent = true;
fwinfo_size = sizeof(struct tx_fwinfo_8190pci);
@@ -1502,8 +1500,6 @@ static void _rtl92e_rx_normal(struct net_device *dev)
};
unsigned int count = priv->rxringcount;
- stats.nic_type = NIC_8192E;
-
while (count--) {
struct rx_desc *pdesc = &priv->rx_ring
[priv->rx_idx];
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index 92143c50c149a..c34087af973cf 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -354,7 +354,7 @@ static void _rtl92e_dm_check_rate_adaptive(struct net_device *dev)
}
}
- if (priv->rtllib->GetHalfNmodeSupportByAPsHandler(dev))
+ if (priv->rtllib->get_half_nmode_support_by_aps_handler(dev))
target_ratr &= 0xf00fffff;
current_ratr = rtl92e_readl(dev, RATR0);
@@ -1185,7 +1185,7 @@ static void _rtl92e_dm_check_edca_turbo(struct net_device *dev)
if (priv->bcurrent_turbo_EDCA) {
u8 tmp = AC0_BE;
- priv->rtllib->SetHwRegHandler(dev, HW_VAR_AC_PARAM,
+ priv->rtllib->set_hw_reg_handler(dev, HW_VAR_AC_PARAM,
(u8 *)(&tmp));
priv->bcurrent_turbo_EDCA = false;
}
@@ -1523,7 +1523,7 @@ static void _rtl92e_dm_init_fsync(struct net_device *dev)
priv->rtllib->fsync_multiple_timeinterval = 3;
priv->rtllib->fsync_firstdiff_ratethreshold = 100;
priv->rtllib->fsync_seconddiff_ratethreshold = 200;
- priv->rtllib->fsync_state = Default_Fsync;
+ priv->rtllib->fsync_state = DEFAULT_FSYNC;
timer_setup(&priv->fsync_timer, _rtl92e_dm_fsync_timer_callback, 0);
}
@@ -1636,7 +1636,7 @@ static void _rtl92e_dm_start_hw_fsync(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
rtl92e_writel(dev, rOFDM0_RxDetector2, 0x465c12cf);
- priv->rtllib->SetHwRegHandler(dev, HW_VAR_RF_TIMING,
+ priv->rtllib->set_hw_reg_handler(dev, HW_VAR_RF_TIMING,
(u8 *)(&rf_timing));
rtl92e_writeb(dev, 0xc3b, 0x41);
}
@@ -1647,7 +1647,7 @@ static void _rtl92e_dm_end_hw_fsync(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
rtl92e_writel(dev, rOFDM0_RxDetector2, 0x465c52cd);
- priv->rtllib->SetHwRegHandler(dev, HW_VAR_RF_TIMING, (u8 *)
+ priv->rtllib->set_hw_reg_handler(dev, HW_VAR_RF_TIMING, (u8 *)
(&rf_timing));
rtl92e_writeb(dev, 0xc3b, 0x49);
}
@@ -1716,31 +1716,29 @@ static void _rtl92e_dm_check_fsync(struct net_device *dev)
priv->rtllib->ht_info->iot_peer == HT_IOT_PEER_BROADCOM) {
if (priv->rtllib->bfsync_enable == 0) {
switch (priv->rtllib->fsync_state) {
- case Default_Fsync:
+ case DEFAULT_FSYNC:
_rtl92e_dm_start_hw_fsync(dev);
- priv->rtllib->fsync_state = HW_Fsync;
+ priv->rtllib->fsync_state = HW_FSYNC;
break;
- case SW_Fsync:
+ case SW_FSYNC:
_rtl92e_dm_end_sw_fsync(dev);
_rtl92e_dm_start_hw_fsync(dev);
- priv->rtllib->fsync_state = HW_Fsync;
+ priv->rtllib->fsync_state = HW_FSYNC;
break;
- case HW_Fsync:
default:
break;
}
} else {
switch (priv->rtllib->fsync_state) {
- case Default_Fsync:
+ case DEFAULT_FSYNC:
_rtl92e_dm_start_sw_fsync(dev);
- priv->rtllib->fsync_state = SW_Fsync;
+ priv->rtllib->fsync_state = SW_FSYNC;
break;
- case HW_Fsync:
+ case HW_FSYNC:
_rtl92e_dm_end_hw_fsync(dev);
_rtl92e_dm_start_sw_fsync(dev);
- priv->rtllib->fsync_state = SW_Fsync;
+ priv->rtllib->fsync_state = SW_FSYNC;
break;
- case SW_Fsync:
default:
break;
}
@@ -1752,15 +1750,14 @@ static void _rtl92e_dm_check_fsync(struct net_device *dev)
}
} else {
switch (priv->rtllib->fsync_state) {
- case HW_Fsync:
+ case HW_FSYNC:
_rtl92e_dm_end_hw_fsync(dev);
- priv->rtllib->fsync_state = Default_Fsync;
+ priv->rtllib->fsync_state = DEFAULT_FSYNC;
break;
- case SW_Fsync:
+ case SW_FSYNC:
_rtl92e_dm_end_sw_fsync(dev);
- priv->rtllib->fsync_state = Default_Fsync;
+ priv->rtllib->fsync_state = DEFAULT_FSYNC;
break;
- case Default_Fsync:
default:
break;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
index 44a9fe8318497..5aac9110bff68 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
@@ -208,12 +208,12 @@ void rtl92e_leisure_ps_enter(struct net_device *dev)
return;
if (psc->bLeisurePs) {
- if (psc->LpsIdleCount >= RT_CHECK_FOR_HANG_PERIOD) {
+ if (psc->lps_idle_count >= RT_CHECK_FOR_HANG_PERIOD) {
if (priv->rtllib->ps == RTLLIB_PS_DISABLED)
_rtl92e_ps_set_mode(dev, RTLLIB_PS_MBCAST | RTLLIB_PS_UNICAST);
} else {
- psc->LpsIdleCount++;
+ psc->lps_idle_count++;
}
}
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
index 4c884c5277f94..d131ef525f463 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
@@ -253,7 +253,7 @@ static int _rtl92e_wx_set_scan(struct net_device *dev,
rt_state = priv->rtllib->rf_power_state;
if (!priv->up)
return -ENETDOWN;
- if (priv->rtllib->link_detect_info.bBusyTraffic)
+ if (priv->rtllib->link_detect_info.busy_traffic)
return -EAGAIN;
if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
@@ -269,7 +269,7 @@ static int _rtl92e_wx_set_scan(struct net_device *dev,
mutex_lock(&priv->wx_mutex);
- priv->rtllib->FirstIe_InScan = true;
+ priv->rtllib->first_ie_in_scan = true;
if (priv->rtllib->link_state != MAC80211_LINKED) {
if (rt_state == rf_off) {
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index ee9ce392155c3..834329886ea2e 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -125,7 +125,7 @@ static struct sk_buff *rtllib_ADDBA(struct rtllib_device *ieee, u8 *dst,
static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst,
struct ba_record *ba,
- enum tr_select TxRxSelect, u16 reason_code)
+ enum tr_select tx_rx_select, u16 reason_code)
{
union delba_param_set del_ba_param_set;
struct sk_buff *skb = NULL;
@@ -139,7 +139,7 @@ static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst,
memset(&del_ba_param_set, 0, 2);
- del_ba_param_set.field.initiator = (TxRxSelect == TX_DIR) ? 1 : 0;
+ del_ba_param_set.field.initiator = (tx_rx_select == TX_DIR) ? 1 : 0;
del_ba_param_set.field.tid = ba->ba_param_set.field.tid;
skb = dev_alloc_skb(len + sizeof(struct ieee80211_hdr_3addr));
@@ -173,8 +173,8 @@ static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst,
return skb;
}
-static void rtllib_send_ADDBAReq(struct rtllib_device *ieee, u8 *dst,
- struct ba_record *ba)
+static void rtllib_send_add_ba_req(struct rtllib_device *ieee, u8 *dst,
+ struct ba_record *ba)
{
struct sk_buff *skb;
@@ -186,8 +186,8 @@ static void rtllib_send_ADDBAReq(struct rtllib_device *ieee, u8 *dst,
netdev_dbg(ieee->dev, "Failed to generate ADDBAReq packet.\n");
}
-static void rtllib_send_ADDBARsp(struct rtllib_device *ieee, u8 *dst,
- struct ba_record *ba, u16 status_code)
+static void rtllib_send_add_ba_rsp(struct rtllib_device *ieee, u8 *dst,
+ struct ba_record *ba, u16 status_code)
{
struct sk_buff *skb;
@@ -199,19 +199,19 @@ static void rtllib_send_ADDBARsp(struct rtllib_device *ieee, u8 *dst,
}
static void rtllib_send_DELBA(struct rtllib_device *ieee, u8 *dst,
- struct ba_record *ba, enum tr_select TxRxSelect,
+ struct ba_record *ba, enum tr_select tx_rx_select,
u16 reason_code)
{
struct sk_buff *skb;
- skb = rtllib_DELBA(ieee, dst, ba, TxRxSelect, reason_code);
+ skb = rtllib_DELBA(ieee, dst, ba, tx_rx_select, reason_code);
if (skb)
softmac_mgmt_xmit(skb, ieee);
else
netdev_dbg(ieee->dev, "Failed to generate DELBA packet.\n");
}
-int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
+int rtllib_rx_add_ba_req(struct rtllib_device *ieee, struct sk_buff *skb)
{
struct ieee80211_hdr_3addr *req = NULL;
u16 rc = 0;
@@ -251,13 +251,13 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
"Failed to reply on ADDBA_REQ as some capability is not ready(%d, %d)\n",
ieee->current_network.qos_data.active,
ieee->ht_info->current_ht_support);
- goto OnADDBAReq_Fail;
+ goto on_add_ba_req_fail;
}
if (!rtllib_get_ts(ieee, (struct ts_common_info **)&ts, dst,
- (u8)(ba_param_set->field.tid), RX_DIR, true)) {
+ (u8)(ba_param_set->field.tid), RX_DIR, true)) {
rc = ADDBA_STATUS_REFUSED;
netdev_warn(ieee->dev, "%s(): can't get TS\n", __func__);
- goto OnADDBAReq_Fail;
+ goto on_add_ba_req_fail;
}
ba = &ts->rx_admitted_ba_record;
@@ -265,10 +265,10 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
rc = ADDBA_STATUS_INVALID_PARAM;
netdev_warn(ieee->dev, "%s(): BA Policy is not correct\n",
__func__);
- goto OnADDBAReq_Fail;
+ goto on_add_ba_req_fail;
}
- rtllib_FlushRxTsPendingPkts(ieee, ts);
+ rtllib_flush_rx_ts_pending_pkts(ieee, ts);
deactivate_ba_entry(ieee, ba);
ba->dialog_token = *dialog_token;
@@ -276,18 +276,18 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
ba->ba_timeout_value = *ba_timeout_value;
ba->ba_start_seq_ctrl = *ba_start_seq_ctrl;
- if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev) ||
- (ieee->ht_info->iot_action & HT_IOT_ACT_ALLOW_PEER_AGG_ONE_PKT))
+ if (ieee->get_half_nmode_support_by_aps_handler(ieee->dev) ||
+ (ieee->ht_info->iot_action & HT_IOT_ACT_ALLOW_PEER_AGG_ONE_PKT))
ba->ba_param_set.field.buffer_size = 1;
else
ba->ba_param_set.field.buffer_size = 32;
activate_ba_entry(ba, 0);
- rtllib_send_ADDBARsp(ieee, dst, ba, ADDBA_STATUS_SUCCESS);
+ rtllib_send_add_ba_rsp(ieee, dst, ba, ADDBA_STATUS_SUCCESS);
return 0;
-OnADDBAReq_Fail:
+on_add_ba_req_fail:
{
struct ba_record BA;
@@ -295,12 +295,12 @@ OnADDBAReq_Fail:
BA.ba_timeout_value = *ba_timeout_value;
BA.dialog_token = *dialog_token;
BA.ba_param_set.field.ba_policy = BA_POLICY_IMMEDIATE;
- rtllib_send_ADDBARsp(ieee, dst, &BA, rc);
+ rtllib_send_add_ba_rsp(ieee, dst, &BA, rc);
return 0;
}
}
-int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
+int rtllib_rx_add_ba_rsp(struct rtllib_device *ieee, struct sk_buff *skb)
{
struct ieee80211_hdr_3addr *rsp = NULL;
struct ba_record *pending_ba, *admitted_ba;
@@ -334,14 +334,14 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
ieee->ht_info->current_ht_support,
ieee->ht_info->current_ampdu_enable);
reason_code = DELBA_REASON_UNKNOWN_BA;
- goto OnADDBARsp_Reject;
+ goto on_add_ba_rsp_reject;
}
if (!rtllib_get_ts(ieee, (struct ts_common_info **)&ts, dst,
- (u8)(ba_param_set->field.tid), TX_DIR, false)) {
+ (u8)(ba_param_set->field.tid), TX_DIR, false)) {
netdev_warn(ieee->dev, "%s(): can't get TS\n", __func__);
reason_code = DELBA_REASON_UNKNOWN_BA;
- goto OnADDBARsp_Reject;
+ goto on_add_ba_rsp_reject;
}
ts->add_ba_req_in_progress = false;
@@ -358,7 +358,7 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
"%s(): ADDBA Rsp. BA invalid, DELBA!\n",
__func__);
reason_code = DELBA_REASON_UNKNOWN_BA;
- goto OnADDBARsp_Reject;
+ goto on_add_ba_rsp_reject;
} else {
netdev_dbg(ieee->dev,
"%s(): Recv ADDBA Rsp. BA is admitted! Status code:%X\n",
@@ -371,7 +371,7 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
ts->add_ba_req_delayed = true;
deactivate_ba_entry(ieee, admitted_ba);
reason_code = DELBA_REASON_END_BA;
- goto OnADDBARsp_Reject;
+ goto on_add_ba_rsp_reject;
}
admitted_ba->dialog_token = *dialog_token;
@@ -384,12 +384,12 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
ts->add_ba_req_delayed = true;
ts->disable_add_ba = true;
reason_code = DELBA_REASON_END_BA;
- goto OnADDBARsp_Reject;
+ goto on_add_ba_rsp_reject;
}
return 0;
-OnADDBARsp_Reject:
+on_add_ba_rsp_reject:
{
struct ba_record BA;
@@ -433,7 +433,7 @@ int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb)
struct rx_ts_record *ts;
if (!rtllib_get_ts(ieee, (struct ts_common_info **)&ts, dst,
- (u8)del_ba_param_set->field.tid, RX_DIR, false)) {
+ (u8)del_ba_param_set->field.tid, RX_DIR, false)) {
netdev_warn(ieee->dev,
"%s(): can't get TS for RXTS. dst:%pM TID:%d\n",
__func__, dst,
@@ -446,7 +446,7 @@ int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb)
struct tx_ts_record *ts;
if (!rtllib_get_ts(ieee, (struct ts_common_info **)&ts, dst,
- (u8)del_ba_param_set->field.tid, TX_DIR, false)) {
+ (u8)del_ba_param_set->field.tid, TX_DIR, false)) {
netdev_warn(ieee->dev, "%s(): can't get TS for TXTS\n",
__func__);
return -1;
@@ -481,14 +481,14 @@ void rtllib_ts_init_add_ba(struct rtllib_device *ieee, struct tx_ts_record *ts,
activate_ba_entry(ba, BA_SETUP_TIMEOUT);
- rtllib_send_ADDBAReq(ieee, ts->ts_common_info.addr, ba);
+ rtllib_send_add_ba_req(ieee, ts->ts_common_info.addr, ba);
}
void rtllib_ts_init_del_ba(struct rtllib_device *ieee,
struct ts_common_info *ts_common_info,
- enum tr_select TxRxSelect)
+ enum tr_select tx_rx_select)
{
- if (TxRxSelect == TX_DIR) {
+ if (tx_rx_select == TX_DIR) {
struct tx_ts_record *ts =
(struct tx_ts_record *)ts_common_info;
@@ -497,14 +497,14 @@ void rtllib_ts_init_del_ba(struct rtllib_device *ieee,
(ts->tx_admitted_ba_record.b_valid) ?
(&ts->tx_admitted_ba_record) :
(&ts->tx_pending_ba_record),
- TxRxSelect, DELBA_REASON_END_BA);
- } else if (TxRxSelect == RX_DIR) {
+ tx_rx_select, DELBA_REASON_END_BA);
+ } else if (tx_rx_select == RX_DIR) {
struct rx_ts_record *ts =
(struct rx_ts_record *)ts_common_info;
if (rx_ts_delete_ba(ieee, ts))
rtllib_send_DELBA(ieee, ts_common_info->addr,
&ts->rx_admitted_ba_record,
- TxRxSelect, DELBA_REASON_END_BA);
+ tx_rx_select, DELBA_REASON_END_BA);
}
}
diff --git a/drivers/staging/rtl8192e/rtl819x_HT.h b/drivers/staging/rtl8192e/rtl819x_HT.h
index 68577bffb936d..a4580445305d9 100644
--- a/drivers/staging/rtl8192e/rtl819x_HT.h
+++ b/drivers/staging/rtl8192e/rtl819x_HT.h
@@ -98,9 +98,9 @@ struct rt_hi_throughput {
u8 cur_short_gi_40mhz;
u8 cur_short_gi_20mhz;
enum ht_spec_ver peer_ht_spec_ver;
- struct ht_capab_ele SelfHTCap;
- u8 PeerHTCapBuf[32];
- u8 PeerHTInfoBuf[32];
+ struct ht_capab_ele self_ht_cap;
+ u8 peer_ht_cap_buf[32];
+ u8 peer_ht_info_buf[32];
u8 ampdu_enable;
u8 current_ampdu_enable;
u8 ampdu_factor;
diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c
index 6d0912f90198f..fa96a2c2c9161 100644
--- a/drivers/staging/rtl8192e/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c
@@ -252,7 +252,7 @@ void ht_construct_capability_element(struct rtllib_device *ieee, u8 *pos_ht_cap,
}
cap_ele->AdvCoding = 0;
- if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
+ if (ieee->get_half_nmode_support_by_aps_handler(ieee->dev))
cap_ele->ChlWidth = 0;
else
cap_ele->ChlWidth = 1;
@@ -301,7 +301,7 @@ void ht_construct_capability_element(struct rtllib_device *ieee, u8 *pos_ht_cap,
if (ht->iot_action & HT_IOT_ACT_DISABLE_RX_40MHZ_SHORT_GI)
cap_ele->ShortGI40Mhz = 0;
- if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) {
+ if (ieee->get_half_nmode_support_by_aps_handler(ieee->dev)) {
cap_ele->ChlWidth = 0;
cap_ele->MCS[1] = 0;
}
@@ -408,7 +408,7 @@ static u8 ht_filter_mcs_rate(struct rtllib_device *ieee, u8 *pSupportMCS,
ht_pick_mcs_rate(ieee, pOperateMCS);
- if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
+ if (ieee->get_half_nmode_support_by_aps_handler(ieee->dev))
pOperateMCS[1] = 0;
for (i = 2; i <= 15; i++)
@@ -437,16 +437,16 @@ void ht_on_assoc_rsp(struct rtllib_device *ieee)
}
netdev_dbg(ieee->dev, "%s(): HT_ENABLE\n", __func__);
- if (!memcmp(ht_info->PeerHTCapBuf, EWC11NHTCap, sizeof(EWC11NHTCap)))
- pPeerHTCap = (struct ht_capab_ele *)(&ht_info->PeerHTCapBuf[4]);
+ if (!memcmp(ht_info->peer_ht_cap_buf, EWC11NHTCap, sizeof(EWC11NHTCap)))
+ pPeerHTCap = (struct ht_capab_ele *)(&ht_info->peer_ht_cap_buf[4]);
else
- pPeerHTCap = (struct ht_capab_ele *)(ht_info->PeerHTCapBuf);
+ pPeerHTCap = (struct ht_capab_ele *)(ht_info->peer_ht_cap_buf);
- if (!memcmp(ht_info->PeerHTInfoBuf, EWC11NHTInfo, sizeof(EWC11NHTInfo)))
+ if (!memcmp(ht_info->peer_ht_info_buf, EWC11NHTInfo, sizeof(EWC11NHTInfo)))
pPeerHTInfo = (struct ht_info_ele *)
- (&ht_info->PeerHTInfoBuf[4]);
+ (&ht_info->peer_ht_info_buf[4]);
else
- pPeerHTInfo = (struct ht_info_ele *)(ht_info->PeerHTInfoBuf);
+ pPeerHTInfo = (struct ht_info_ele *)(ht_info->peer_ht_info_buf);
#ifdef VERBOSE_DEBUG
print_hex_dump_bytes("%s: ", __func__, DUMP_PREFIX_NONE,
@@ -480,9 +480,9 @@ void ht_on_assoc_rsp(struct rtllib_device *ieee)
}
ht_info->current_mpdu_density = pPeerHTCap->MPDUDensity;
- if (ht_info->iot_action & HT_IOT_ACT_TX_USE_AMSDU_8K) {
+ if (ht_info->iot_action & HT_IOT_ACT_TX_USE_AMSDU_8K)
ht_info->current_ampdu_enable = false;
- }
+
ht_info->cur_rx_reorder_enable = 1;
if (pPeerHTCap->MCS[0] == 0)
@@ -516,12 +516,12 @@ void ht_initialize_ht_info(struct rtllib_device *ieee)
ht_info->current_mpdu_density = 0;
ht_info->CurrentAMPDUFactor = ht_info->ampdu_factor;
- memset((void *)(&ht_info->SelfHTCap), 0,
- sizeof(ht_info->SelfHTCap));
- memset((void *)(&ht_info->PeerHTCapBuf), 0,
- sizeof(ht_info->PeerHTCapBuf));
- memset((void *)(&ht_info->PeerHTInfoBuf), 0,
- sizeof(ht_info->PeerHTInfoBuf));
+ memset((void *)(&ht_info->self_ht_cap), 0,
+ sizeof(ht_info->self_ht_cap));
+ memset((void *)(&ht_info->peer_ht_cap_buf), 0,
+ sizeof(ht_info->peer_ht_cap_buf));
+ memset((void *)(&ht_info->peer_ht_info_buf), 0,
+ sizeof(ht_info->peer_ht_info_buf));
ht_info->sw_bw_in_progress = false;
@@ -572,15 +572,15 @@ void ht_reset_self_and_save_peer_setting(struct rtllib_device *ieee,
ht_info->peer_ht_spec_ver = pNetwork->bssht.bd_ht_spec_ver;
if (pNetwork->bssht.bd_ht_cap_len > 0 &&
- pNetwork->bssht.bd_ht_cap_len <= sizeof(ht_info->PeerHTCapBuf))
- memcpy(ht_info->PeerHTCapBuf,
+ pNetwork->bssht.bd_ht_cap_len <= sizeof(ht_info->peer_ht_cap_buf))
+ memcpy(ht_info->peer_ht_cap_buf,
pNetwork->bssht.bd_ht_cap_buf,
pNetwork->bssht.bd_ht_cap_len);
if (pNetwork->bssht.bd_ht_info_len > 0 &&
pNetwork->bssht.bd_ht_info_len <=
- sizeof(ht_info->PeerHTInfoBuf))
- memcpy(ht_info->PeerHTInfoBuf,
+ sizeof(ht_info->peer_ht_info_buf))
+ memcpy(ht_info->peer_ht_info_buf,
pNetwork->bssht.bd_ht_info_buf,
pNetwork->bssht.bd_ht_info_len);
@@ -666,7 +666,7 @@ void ht_set_connect_bw_mode(struct rtllib_device *ieee,
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
- if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
+ if (ieee->get_half_nmode_support_by_aps_handler(ieee->dev))
bandwidth = HT_CHANNEL_WIDTH_20;
if (ht_info->sw_bw_in_progress) {
diff --git a/drivers/staging/rtl8192e/rtl819x_Qos.h b/drivers/staging/rtl8192e/rtl819x_Qos.h
index 50e01ca49a4ce..dc991100742f4 100644
--- a/drivers/staging/rtl8192e/rtl819x_Qos.h
+++ b/drivers/staging/rtl8192e/rtl819x_Qos.h
@@ -13,7 +13,7 @@ struct qos_tsinfo {
};
struct octet_string {
- u8 *Octet;
+ u8 *octet;
u16 Length;
};
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
index 7e73d31dcccfc..9903fe3f3c77c 100644
--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -171,14 +171,14 @@ void rtllib_ts_init(struct rtllib_device *ieee)
static struct ts_common_info *SearchAdmitTRStream(struct rtllib_device *ieee,
u8 *addr, u8 TID,
- enum tr_select TxRxSelect)
+ enum tr_select tx_rx_select)
{
u8 dir;
bool search_dir[4] = {0};
struct list_head *psearch_list;
struct ts_common_info *pRet = NULL;
- if (TxRxSelect == TX_DIR) {
+ if (tx_rx_select == TX_DIR) {
search_dir[DIR_UP] = true;
search_dir[DIR_BI_DIR] = true;
search_dir[DIR_DIRECT] = true;
@@ -188,7 +188,7 @@ static struct ts_common_info *SearchAdmitTRStream(struct rtllib_device *ieee,
search_dir[DIR_DIRECT] = true;
}
- if (TxRxSelect == TX_DIR)
+ if (tx_rx_select == TX_DIR)
psearch_list = &ieee->Tx_TS_Admit_List;
else
psearch_list = &ieee->Rx_TS_Admit_List;
@@ -225,7 +225,7 @@ static void MakeTSEntry(struct ts_common_info *ts_common_info, u8 *addr,
}
bool rtllib_get_ts(struct rtllib_device *ieee, struct ts_common_info **ppTS,
- u8 *addr, u8 TID, enum tr_select TxRxSelect, bool bAddNewTs)
+ u8 *addr, u8 TID, enum tr_select tx_rx_select, bool bAddNewTs)
{
u8 UP = 0;
struct qos_tsinfo tspec;
@@ -265,7 +265,7 @@ bool rtllib_get_ts(struct rtllib_device *ieee, struct ts_common_info **ppTS,
}
}
- *ppTS = SearchAdmitTRStream(ieee, addr, UP, TxRxSelect);
+ *ppTS = SearchAdmitTRStream(ieee, addr, UP, tx_rx_select);
if (*ppTS)
return true;
@@ -274,21 +274,21 @@ bool rtllib_get_ts(struct rtllib_device *ieee, struct ts_common_info **ppTS,
return false;
}
- pUnusedList = (TxRxSelect == TX_DIR) ?
+ pUnusedList = (tx_rx_select == TX_DIR) ?
(&ieee->Tx_TS_Unused_List) :
(&ieee->Rx_TS_Unused_List);
- pAddmitList = (TxRxSelect == TX_DIR) ?
+ pAddmitList = (tx_rx_select == TX_DIR) ?
(&ieee->Tx_TS_Admit_List) :
(&ieee->Rx_TS_Admit_List);
- Dir = ((TxRxSelect == TX_DIR) ? DIR_UP : DIR_DOWN);
+ Dir = ((tx_rx_select == TX_DIR) ? DIR_UP : DIR_DOWN);
if (!list_empty(pUnusedList)) {
(*ppTS) = list_entry(pUnusedList->next,
struct ts_common_info, list);
list_del_init(&(*ppTS)->list);
- if (TxRxSelect == TX_DIR) {
+ if (tx_rx_select == TX_DIR) {
struct tx_ts_record *tmp =
container_of(*ppTS,
struct tx_ts_record,
@@ -321,11 +321,11 @@ bool rtllib_get_ts(struct rtllib_device *ieee, struct ts_common_info **ppTS,
}
static void RemoveTsEntry(struct rtllib_device *ieee,
- struct ts_common_info *pTs, enum tr_select TxRxSelect)
+ struct ts_common_info *pTs, enum tr_select tx_rx_select)
{
- rtllib_ts_init_del_ba(ieee, pTs, TxRxSelect);
+ rtllib_ts_init_del_ba(ieee, pTs, tx_rx_select);
- if (TxRxSelect == RX_DIR) {
+ if (tx_rx_select == RX_DIR) {
struct rx_reorder_entry *pRxReorderEntry;
struct rx_ts_record *ts = (struct rx_ts_record *)pTs;
@@ -360,7 +360,7 @@ static void RemoveTsEntry(struct rtllib_device *ieee,
}
}
-void RemovePeerTS(struct rtllib_device *ieee, u8 *addr)
+void remove_peer_ts(struct rtllib_device *ieee, u8 *addr)
{
struct ts_common_info *ts, *pTmpTS;
@@ -400,9 +400,9 @@ void RemovePeerTS(struct rtllib_device *ieee, u8 *addr)
}
}
}
-EXPORT_SYMBOL(RemovePeerTS);
+EXPORT_SYMBOL(remove_peer_ts);
-void RemoveAllTS(struct rtllib_device *ieee)
+void remove_all_ts(struct rtllib_device *ieee)
{
struct ts_common_info *ts, *pTmpTS;
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index 7b39a1987fdd6..6fbf11ac168f5 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -123,7 +123,7 @@ struct cb_desc {
u8 bPacketBW:1;
u8 bRTSUseShortPreamble:1;
u8 bRTSUseShortGI:1;
- u8 bMulticast:1;
+ u8 multicast:1;
u8 bBroadcast:1;
u8 drv_agg_enable:1;
u8 reserved2:1;
@@ -474,47 +474,30 @@ struct rtllib_rx_stats {
u8 control;
u8 mask;
u16 len;
- u64 tsf;
- u32 beacon_time;
- u8 nic_type;
u16 Length;
u8 SignalQuality;
s32 RecvSignalPower;
- s8 RxPower;
u8 SignalStrength;
u16 bHwError:1;
u16 bCRC:1;
u16 bICV:1;
- u16 bShortPreamble:1;
- u16 Antenna:1;
u16 Decrypted:1;
- u16 Wakeup:1;
- u16 Reserved0:1;
- u8 AGC;
u32 TimeStampLow;
u32 TimeStampHigh;
- bool bShift;
- bool bIsQosData;
u8 RxDrvInfoSize;
u8 RxBufShift;
bool bIsAMPDU;
bool bFirstMPDU;
bool bContainHTC;
- bool RxIs40MHzPacket;
u32 RxPWDBAll;
u8 RxMIMOSignalStrength[4];
s8 RxMIMOSignalQuality[2];
bool bPacketMatchBSSID;
bool bIsCCK;
bool bPacketToSelf;
- u16 packetlength;
- u16 fraglength;
- u16 fragoffset;
- u16 ntotalfrag;
bool bPacketBeacon;
bool bToSelfBA;
- u16 Seq_Num;
};
/* IEEE 802.11 requires that STA supports concurrent reception of at least
@@ -928,14 +911,14 @@ struct rtllib_network {
struct rtllib_qos_data qos_data;
bool bWithAironetIE;
- bool bCkipSupported;
- bool bCcxRmEnable;
+ bool ckip_supported;
+ bool ccx_rm_enable;
u8 CcxRmState[2];
bool bMBssidValid;
u8 MBssidMask;
u8 MBssid[ETH_ALEN];
bool bWithCcxVerNum;
- u8 BssCcxVerNumber;
+ u8 bss_ccx_ver_number;
/* These are network statistics */
struct rtllib_rx_stats stats;
u16 capability;
@@ -965,7 +948,7 @@ struct rtllib_network {
u8 wmm_info;
struct rtllib_wmm_ac_param wmm_param[4];
- u8 Turbo_Enable;
+ u8 turbo_enable;
u16 CountryIeLen;
u8 CountryIeBuf[MAX_IE_LEN];
struct bss_ht bssht;
@@ -1048,9 +1031,9 @@ struct rx_reorder_entry {
};
enum fsync_state {
- Default_Fsync,
- HW_Fsync,
- SW_Fsync
+ DEFAULT_FSYNC,
+ HW_FSYNC,
+ SW_FSYNC
};
enum ips_callback_function {
@@ -1071,8 +1054,8 @@ struct rt_pwr_save_ctrl {
enum ips_callback_function ReturnPoint;
bool bLeisurePs;
- u8 LpsIdleCount;
- u8 LPSAwakeIntvl;
+ u8 lps_idle_count;
+ u8 lps_awake_intvl;
u32 CurPsLevel;
};
@@ -1110,18 +1093,18 @@ enum scan_op_backup_opt {
#define RT_MAX_LD_SLOT_NUM 10
struct rt_link_detect {
- u32 NumRecvBcnInPeriod;
- u32 NumRecvDataInPeriod;
+ u32 num_recv_bcn_in_period;
+ u32 num_recv_data_in_period;
u32 RxBcnNum[RT_MAX_LD_SLOT_NUM];
u32 RxDataNum[RT_MAX_LD_SLOT_NUM];
- u16 SlotNum;
- u16 SlotIndex;
+ u16 slot_num;
+ u16 slot_index;
u32 num_tx_ok_in_period;
u32 num_rx_ok_in_period;
- u32 NumRxUnicastOkInPeriod;
- bool bBusyTraffic;
+ u32 num_rx_unicast_ok_in_period;
+ bool busy_traffic;
bool bHigherBusyTraffic;
bool bHigherBusyRxTraffic;
};
@@ -1161,7 +1144,7 @@ struct rate_adaptive {
#define NUM_PMKID_CACHE 16
struct rt_pmkid_list {
- u8 Bssid[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
u8 PMKID[16];
u8 SsidBuf[33];
u8 used;
@@ -1193,7 +1176,7 @@ struct rtllib_device {
u8 *assocreq_ies, *assocresp_ies;
size_t assocreq_ies_len, assocresp_ies_len;
- bool bForcedBgMode;
+ bool forced_bg_mode;
u8 hwsec_active;
bool is_roaming;
@@ -1201,7 +1184,7 @@ struct rtllib_device {
bool cannot_notify;
bool bSupportRemoteWakeUp;
bool actscanning;
- bool FirstIe_InScan;
+ bool first_ie_in_scan;
bool be_scan_inprogress;
bool beinretry;
enum rt_rf_power_state rf_power_state;
@@ -1264,7 +1247,7 @@ struct rtllib_device {
int ieee802_1x; /* is IEEE 802.1X used */
/* WPA data */
- bool bHalfWirelessN24GMode;
+ bool half_wireless_n24g_mode;
int wpa_enabled;
int drop_unencrypted;
int tkip_countermeasures;
@@ -1281,7 +1264,7 @@ struct rtllib_device {
struct sw_cam_table swcamtable[TOTAL_CAM_ENTRY];
- struct rt_pmkid_list PMKIDList[NUM_PMKID_CACHE];
+ struct rt_pmkid_list pmkid_list[NUM_PMKID_CACHE];
/* Fragmentation structures */
struct rtllib_frag_entry frag_cache[17][RTLLIB_FRAG_CACHE_LEN];
@@ -1374,14 +1357,14 @@ struct rtllib_device {
/* for PS mode */
unsigned long last_rx_ps_time;
- bool bAwakePktSent;
- u8 LPSDelayCnt;
+ bool awake_pkt_sent;
+ u8 lps_delay_cnt;
/* used if IEEE_SOFTMAC_SINGLE_QUEUE is set */
struct sk_buff *mgmt_queue_ring[MGMT_QUEUE_NUM];
int mgmt_queue_head;
int mgmt_queue_tail;
- u8 AsocRetryCount;
+ u8 asoc_retry_count;
struct sk_buff_head skb_waitq[MAX_QUEUE_SIZE];
bool bdynamic_txpower_enable;
@@ -1484,17 +1467,18 @@ struct rtllib_device {
void (*set_bw_mode_handler)(struct net_device *dev,
enum ht_channel_width bandwidth,
enum ht_extchnl_offset Offset);
- bool (*GetNmodeSupportBySecCfg)(struct net_device *dev);
+ bool (*get_nmode_support_by_sec_cfg)(struct net_device *dev);
void (*set_wireless_mode)(struct net_device *dev, u8 wireless_mode);
- bool (*GetHalfNmodeSupportByAPsHandler)(struct net_device *dev);
+ bool (*get_half_nmode_support_by_aps_handler)(struct net_device *dev);
u8 (*rtllib_ap_sec_type)(struct rtllib_device *ieee);
void (*init_gain_handler)(struct net_device *dev, u8 Operation);
void (*ScanOperationBackupHandler)(struct net_device *dev,
u8 Operation);
- void (*SetHwRegHandler)(struct net_device *dev, u8 variable, u8 *val);
+ void (*set_hw_reg_handler)(struct net_device *dev, u8 variable, u8 *val);
- void (*AllowAllDestAddrHandler)(struct net_device *dev,
- bool bAllowAllDA, bool WriteIntoReg);
+ void (*allow_all_dest_addr_handler)(struct net_device *dev,
+ bool bAllowAllDA,
+ bool WriteIntoReg);
void (*rtllib_ips_leave_wq)(struct net_device *dev);
void (*rtllib_ips_leave)(struct net_device *dev);
@@ -1662,7 +1646,7 @@ int rtllib_rx_frame_softmac(struct rtllib_device *ieee, struct sk_buff *skb,
void rtllib_softmac_new_net(struct rtllib_device *ieee,
struct rtllib_network *net);
-void SendDisassociation(struct rtllib_device *ieee, bool deauth, u16 asRsn);
+void send_disassociation(struct rtllib_device *ieee, bool deauth, u16 rsn);
void rtllib_softmac_xmit(struct rtllib_txb *txb, struct rtllib_device *ieee);
int rtllib_softmac_init(struct rtllib_device *ieee);
@@ -1771,25 +1755,25 @@ u8 ht_c_check(struct rtllib_device *ieee, u8 *pFrame);
void ht_reset_iot_setting(struct rt_hi_throughput *ht_info);
bool is_ht_half_nmode_aps(struct rtllib_device *ieee);
u16 tx_count_to_data_rate(struct rtllib_device *ieee, u8 nDataRate);
-int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb);
-int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb);
+int rtllib_rx_add_ba_req(struct rtllib_device *ieee, struct sk_buff *skb);
+int rtllib_rx_add_ba_rsp(struct rtllib_device *ieee, struct sk_buff *skb);
int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb);
void rtllib_ts_init_add_ba(struct rtllib_device *ieee, struct tx_ts_record *ts,
u8 policy, u8 overwrite_pending);
void rtllib_ts_init_del_ba(struct rtllib_device *ieee,
struct ts_common_info *ts_common_info,
- enum tr_select TxRxSelect);
+ enum tr_select tx_rx_select);
void rtllib_ba_setup_timeout(struct timer_list *t);
void rtllib_tx_ba_inact_timeout(struct timer_list *t);
void rtllib_rx_ba_inact_timeout(struct timer_list *t);
void rtllib_reset_ba_entry(struct ba_record *ba);
bool rtllib_get_ts(struct rtllib_device *ieee, struct ts_common_info **ppTS, u8 *addr,
- u8 TID, enum tr_select TxRxSelect, bool bAddNewTs);
+ u8 TID, enum tr_select tx_rx_select, bool bAddNewTs);
void rtllib_ts_init(struct rtllib_device *ieee);
void TsStartAddBaProcess(struct rtllib_device *ieee,
struct tx_ts_record *pTxTS);
-void RemovePeerTS(struct rtllib_device *ieee, u8 *addr);
-void RemoveAllTS(struct rtllib_device *ieee);
+void remove_peer_ts(struct rtllib_device *ieee, u8 *addr);
+void remove_all_ts(struct rtllib_device *ieee);
static inline const char *escape_essid(const char *essid, u8 essid_len)
{
@@ -1805,13 +1789,13 @@ static inline const char *escape_essid(const char *essid, u8 essid_len)
}
/* fun with the built-in rtllib stack... */
-bool rtllib_MgntDisconnect(struct rtllib_device *rtllib, u8 asRsn);
+bool rtllib_mgnt_disconnect(struct rtllib_device *rtllib, u8 rsn);
/* For the function is more related to hardware setting, it's better to use the
* ieee handler to refer to it.
*/
-void rtllib_FlushRxTsPendingPkts(struct rtllib_device *ieee,
- struct rx_ts_record *ts);
+void rtllib_flush_rx_ts_pending_pkts(struct rtllib_device *ieee,
+ struct rx_ts_record *ts);
int rtllib_parse_info_param(struct rtllib_device *ieee,
struct rtllib_info_element *info_element,
u16 length,
@@ -1821,6 +1805,6 @@ int rtllib_parse_info_param(struct rtllib_device *ieee,
void rtllib_indicate_packets(struct rtllib_device *ieee,
struct rtllib_rxb **prxbIndicateArray, u8 index);
#define RT_ASOC_RETRY_LIMIT 5
-u8 MgntQuery_TxRateExcludeCCKRates(struct rtllib_device *ieee);
+u8 mgnt_query_tx_rate_exclude_cck_rates(struct rtllib_device *ieee);
#endif /* RTLLIB_H */
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 4df20f4d6bf91..ebf8a2fd36d35 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -487,8 +487,8 @@ void rtllib_indicate_packets(struct rtllib_device *ieee,
}
}
-void rtllib_FlushRxTsPendingPkts(struct rtllib_device *ieee,
- struct rx_ts_record *ts)
+void rtllib_flush_rx_ts_pending_pkts(struct rtllib_device *ieee,
+ struct rx_ts_record *ts)
{
struct rx_reorder_entry *pRxReorderEntry;
u8 RfdCnt = 0;
@@ -865,9 +865,6 @@ static size_t rtllib_rx_get_hdrlen(struct rtllib_device *ieee,
rx_stats->bContainHTC = true;
}
- if (RTLLIB_QOS_HAS_SEQ(fc))
- rx_stats->bIsQosData = true;
-
return hdrlen;
}
@@ -943,10 +940,9 @@ static void rtllib_rx_extract_addr(struct rtllib_device *ieee,
static int rtllib_rx_data_filter(struct rtllib_device *ieee, struct ieee80211_hdr *hdr,
u8 *dst, u8 *src, u8 *bssid, u8 *addr2)
{
- u8 type, stype;
u16 fc = le16_to_cpu(hdr->frame_control);
- type = WLAN_FC_GET_TYPE(fc);
- stype = WLAN_FC_GET_STYPE(fc);
+ u8 type = WLAN_FC_GET_TYPE(fc);
+ u8 stype = WLAN_FC_GET_STYPE(fc);
/* Filter frames from different BSS */
if (ieee80211_has_a4(hdr->frame_control) &&
@@ -1149,9 +1145,9 @@ static void rtllib_rx_check_leave_lps(struct rtllib_device *ieee, u8 unicast,
{
if (unicast) {
if (ieee->link_state == MAC80211_LINKED) {
- if (((ieee->link_detect_info.NumRxUnicastOkInPeriod +
+ if (((ieee->link_detect_info.num_rx_unicast_ok_in_period +
ieee->link_detect_info.num_tx_ok_in_period) > 8) ||
- (ieee->link_detect_info.NumRxUnicastOkInPeriod > 2)) {
+ (ieee->link_detect_info.num_rx_unicast_ok_in_period > 2)) {
ieee->leisure_ps_leave(ieee->dev);
}
}
@@ -1284,7 +1280,7 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
/* Filter WAPI DATA Frame */
/* Update statstics for AP roaming */
- ieee->link_detect_info.NumRecvDataInPeriod++;
+ ieee->link_detect_info.num_recv_data_in_period++;
ieee->link_detect_info.num_rx_ok_in_period++;
/* Data frame - extract src/dst addresses */
@@ -1363,7 +1359,7 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
else
nr_subframes = 1;
if (unicast)
- ieee->link_detect_info.NumRxUnicastOkInPeriod += nr_subframes;
+ ieee->link_detect_info.num_rx_unicast_ok_in_period += nr_subframes;
rtllib_rx_check_leave_lps(ieee, unicast, nr_subframes);
/* Indicate packets to upper layer or Rx Reorder */
@@ -1689,7 +1685,7 @@ static void rtllib_parse_mife_generic(struct rtllib_device *ieee,
info_element->data[2] == 0x4c &&
info_element->data[3] == 0x01 &&
info_element->data[4] == 0x02)
- network->Turbo_Enable = 1;
+ network->turbo_enable = 1;
if (*tmp_htcap_len == 0) {
if (info_element->len >= 4 &&
@@ -1819,9 +1815,9 @@ static void rtllib_parse_mife_generic(struct rtllib_device *ieee,
if (info_element->len == 6) {
memcpy(network->CcxRmState, &info_element->data[4], 2);
if (network->CcxRmState[0] != 0)
- network->bCcxRmEnable = true;
+ network->ccx_rm_enable = true;
else
- network->bCcxRmEnable = false;
+ network->ccx_rm_enable = false;
network->MBssidMask = network->CcxRmState[1] & 0x07;
if (network->MBssidMask != 0) {
network->bMBssidValid = true;
@@ -1834,7 +1830,7 @@ static void rtllib_parse_mife_generic(struct rtllib_device *ieee,
network->bMBssidValid = false;
}
} else {
- network->bCcxRmEnable = false;
+ network->ccx_rm_enable = false;
}
}
if (info_element->len > 4 &&
@@ -1844,10 +1840,10 @@ static void rtllib_parse_mife_generic(struct rtllib_device *ieee,
info_element->data[3] == 0x03) {
if (info_element->len == 5) {
network->bWithCcxVerNum = true;
- network->BssCcxVerNumber = info_element->data[4];
+ network->bss_ccx_ver_number = info_element->data[4];
} else {
network->bWithCcxVerNum = false;
- network->BssCcxVerNumber = 0;
+ network->bss_ccx_ver_number = 0;
}
}
if (info_element->len > 4 &&
@@ -2100,12 +2096,12 @@ int rtllib_parse_info_param(struct rtllib_device *ieee,
& SUPPORT_CKIP_MIC) ||
(info_element->data[IE_CISCO_FLAG_POSITION]
& SUPPORT_CKIP_PK))
- network->bCkipSupported = true;
+ network->ckip_supported = true;
else
- network->bCkipSupported = false;
+ network->ckip_supported = false;
} else {
network->bWithAironetIE = false;
- network->bCkipSupported = false;
+ network->ckip_supported = false;
}
break;
case MFIE_TYPE_QOS_PARAMETER:
@@ -2184,7 +2180,7 @@ static inline int rtllib_network_init(
network->realtek_cap_exit = false;
network->marvell_cap_exist = false;
network->airgo_cap_exist = false;
- network->Turbo_Enable = 0;
+ network->turbo_enable = 0;
network->SignalStrength = stats->SignalStrength;
network->RSSI = stats->SignalStrength;
network->CountryIeLen = 0;
@@ -2344,20 +2340,20 @@ static inline void update_network(struct rtllib_device *ieee,
dst->SignalStrength = src->SignalStrength;
dst->RSSI = src->RSSI;
- dst->Turbo_Enable = src->Turbo_Enable;
+ dst->turbo_enable = src->turbo_enable;
dst->CountryIeLen = src->CountryIeLen;
memcpy(dst->CountryIeBuf, src->CountryIeBuf, src->CountryIeLen);
dst->bWithAironetIE = src->bWithAironetIE;
- dst->bCkipSupported = src->bCkipSupported;
+ dst->ckip_supported = src->ckip_supported;
memcpy(dst->CcxRmState, src->CcxRmState, 2);
- dst->bCcxRmEnable = src->bCcxRmEnable;
+ dst->ccx_rm_enable = src->ccx_rm_enable;
dst->MBssidMask = src->MBssidMask;
dst->bMBssidValid = src->bMBssidValid;
memcpy(dst->MBssid, src->MBssid, 6);
dst->bWithCcxVerNum = src->bWithCcxVerNum;
- dst->BssCcxVerNumber = src->BssCcxVerNumber;
+ dst->bss_ccx_ver_number = src->bss_ccx_ver_number;
}
static int IsPassiveChannel(struct rtllib_device *rtllib, u8 channel)
@@ -2470,7 +2466,7 @@ static inline void rtllib_process_probe_response(
}
if (ieee80211_is_beacon(frame_ctl)) {
if (ieee->link_state >= MAC80211_LINKED)
- ieee->link_detect_info.NumRecvBcnInPeriod++;
+ ieee->link_detect_info.num_recv_bcn_in_period++;
}
}
list_for_each_entry(target, &ieee->network_list, list) {
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index b9278b26accd8..97fdca828da70 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -138,7 +138,7 @@ static void init_mgmt_queue(struct rtllib_device *ieee)
ieee->mgmt_queue_head = 0;
}
-u8 MgntQuery_TxRateExcludeCCKRates(struct rtllib_device *ieee)
+u8 mgnt_query_tx_rate_exclude_cck_rates(struct rtllib_device *ieee)
{
u16 i;
u8 query_rate = 0;
@@ -163,7 +163,7 @@ u8 MgntQuery_TxRateExcludeCCKRates(struct rtllib_device *ieee)
return query_rate;
}
-static u8 MgntQuery_MgntFrameTxRate(struct rtllib_device *ieee)
+static u8 mgnt_query_mgnt_frame_tx_rate(struct rtllib_device *ieee)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
u8 rate;
@@ -201,7 +201,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct rtllib_device *ieee)
if (ieee->disable_mgnt_queue)
tcb_desc->queue_index = HIGH_QUEUE;
- tcb_desc->data_rate = MgntQuery_MgntFrameTxRate(ieee);
+ tcb_desc->data_rate = mgnt_query_mgnt_frame_tx_rate(ieee);
tcb_desc->ratr_index = 7;
tcb_desc->tx_dis_rate_fallback = 1;
tcb_desc->tx_use_drv_assinged_rate = 1;
@@ -277,7 +277,7 @@ softmac_ps_mgmt_xmit(struct sk_buff *skb,
if (ieee->disable_mgnt_queue)
tcb_desc->queue_index = HIGH_QUEUE;
- tcb_desc->data_rate = MgntQuery_MgntFrameTxRate(ieee);
+ tcb_desc->data_rate = mgnt_query_mgnt_frame_tx_rate(ieee);
tcb_desc->ratr_index = 7;
tcb_desc->tx_dis_rate_fallback = 1;
tcb_desc->tx_use_drv_assinged_rate = 1;
@@ -355,20 +355,19 @@ void rtllib_enable_net_monitor_mode(struct net_device *dev,
netdev_info(dev, "========>Enter Monitor Mode\n");
- ieee->AllowAllDestAddrHandler(dev, true, !init_state);
+ ieee->allow_all_dest_addr_handler(dev, true, !init_state);
}
/* Disables network monitor mode. Only packets destinated to
* us will be received.
*/
-void rtllib_disable_net_monitor_mode(struct net_device *dev,
- bool init_state)
+void rtllib_disable_net_monitor_mode(struct net_device *dev, bool init_state)
{
struct rtllib_device *ieee = netdev_priv_rsl(dev);
netdev_info(dev, "========>Exit Monitor Mode\n");
- ieee->AllowAllDestAddrHandler(dev, false, !init_state);
+ ieee->allow_all_dest_addr_handler(dev, false, !init_state);
}
static void rtllib_send_probe(struct rtllib_device *ieee)
@@ -665,13 +664,13 @@ static struct sk_buff *rtllib_pspoll_func(struct rtllib_device *ieee)
return skb;
}
-static inline int SecIsInPMKIDList(struct rtllib_device *ieee, u8 *bssid)
+static inline int sec_is_in_pmkid_list(struct rtllib_device *ieee, u8 *bssid)
{
int i = 0;
do {
- if ((ieee->PMKIDList[i].used) &&
- (memcmp(ieee->PMKIDList[i].Bssid, bssid, ETH_ALEN) == 0))
+ if ((ieee->pmkid_list[i].used) &&
+ (memcmp(ieee->pmkid_list[i].bssid, bssid, ETH_ALEN) == 0))
break;
i++;
} while (i < NUM_PMKID_CACHE);
@@ -700,7 +699,7 @@ rtllib_association_req(struct rtllib_network *beacon,
unsigned int cxvernum_ie_len = 0;
struct lib80211_crypt_data *crypt;
int encrypt;
- int PMKCacheIdx;
+ int pmk_cache_idx;
unsigned int rate_len = (beacon->rates_len ?
(beacon->rates_len + 2) : 0) +
@@ -708,7 +707,7 @@ rtllib_association_req(struct rtllib_network *beacon,
2 : 0);
unsigned int wmm_info_len = beacon->qos_data.supported ? 9 : 0;
- unsigned int turbo_info_len = beacon->Turbo_Enable ? 9 : 0;
+ unsigned int turbo_info_len = beacon->turbo_enable ? 9 : 0;
int len = 0;
@@ -722,14 +721,14 @@ rtllib_association_req(struct rtllib_network *beacon,
if ((ieee->rtllib_ap_sec_type &&
(ieee->rtllib_ap_sec_type(ieee) & SEC_ALG_TKIP)) ||
- ieee->bForcedBgMode) {
+ ieee->forced_bg_mode) {
ieee->ht_info->enable_ht = 0;
ieee->mode = WIRELESS_MODE_G;
}
if (ieee->ht_info->current_ht_support && ieee->ht_info->enable_ht) {
- ht_cap_buf = (u8 *)&ieee->ht_info->SelfHTCap;
- ht_cap_len = sizeof(ieee->ht_info->SelfHTCap);
+ ht_cap_buf = (u8 *)&ieee->ht_info->self_ht_cap;
+ ht_cap_len = sizeof(ieee->ht_info->self_ht_cap);
ht_construct_capability_element(ieee, ht_cap_buf, &ht_cap_len,
encrypt, true);
if (ieee->ht_info->current_rt2rt_aggregation) {
@@ -741,15 +740,15 @@ rtllib_association_req(struct rtllib_network *beacon,
}
}
- if (beacon->bCkipSupported)
+ if (beacon->ckip_supported)
ckip_ie_len = 30 + 2;
- if (beacon->bCcxRmEnable)
+ if (beacon->ccx_rm_enable)
ccxrm_ie_len = 6 + 2;
- if (beacon->BssCcxVerNumber >= 2)
+ if (beacon->bss_ccx_ver_number >= 2)
cxvernum_ie_len = 5 + 2;
- PMKCacheIdx = SecIsInPMKIDList(ieee, ieee->current_network.bssid);
- if (PMKCacheIdx >= 0) {
+ pmk_cache_idx = sec_is_in_pmkid_list(ieee, ieee->current_network.bssid);
+ if (pmk_cache_idx >= 0) {
wpa_ie_len += 18;
netdev_info(ieee->dev, "[PMK cache]: WPA2 IE length: %x\n",
wpa_ie_len);
@@ -818,52 +817,52 @@ rtllib_association_req(struct rtllib_network *beacon,
*tag++ = beacon->rates_ex[i];
}
- if (beacon->bCkipSupported) {
- static const u8 AironetIeOui[] = {0x00, 0x01, 0x66};
- u8 CcxAironetBuf[30];
- struct octet_string osCcxAironetIE;
+ if (beacon->ckip_supported) {
+ static const u8 aironet_ie_oui[] = {0x00, 0x01, 0x66};
+ u8 ccx_aironet_buf[30];
+ struct octet_string os_ccx_aironet_ie;
- memset(CcxAironetBuf, 0, 30);
- osCcxAironetIE.Octet = CcxAironetBuf;
- osCcxAironetIE.Length = sizeof(CcxAironetBuf);
- memcpy(osCcxAironetIE.Octet, AironetIeOui,
- sizeof(AironetIeOui));
+ memset(ccx_aironet_buf, 0, 30);
+ os_ccx_aironet_ie.octet = ccx_aironet_buf;
+ os_ccx_aironet_ie.Length = sizeof(ccx_aironet_buf);
+ memcpy(os_ccx_aironet_ie.octet, aironet_ie_oui,
+ sizeof(aironet_ie_oui));
- osCcxAironetIE.Octet[IE_CISCO_FLAG_POSITION] |=
+ os_ccx_aironet_ie.octet[IE_CISCO_FLAG_POSITION] |=
(SUPPORT_CKIP_PK | SUPPORT_CKIP_MIC);
tag = skb_put(skb, ckip_ie_len);
*tag++ = MFIE_TYPE_AIRONET;
- *tag++ = osCcxAironetIE.Length;
- memcpy(tag, osCcxAironetIE.Octet, osCcxAironetIE.Length);
- tag += osCcxAironetIE.Length;
+ *tag++ = os_ccx_aironet_ie.Length;
+ memcpy(tag, os_ccx_aironet_ie.octet, os_ccx_aironet_ie.Length);
+ tag += os_ccx_aironet_ie.Length;
}
- if (beacon->bCcxRmEnable) {
- static const u8 CcxRmCapBuf[] = {0x00, 0x40, 0x96, 0x01, 0x01,
+ if (beacon->ccx_rm_enable) {
+ static const u8 ccx_rm_cap_buf[] = {0x00, 0x40, 0x96, 0x01, 0x01,
0x00};
- struct octet_string osCcxRmCap;
+ struct octet_string os_ccx_rm_cap;
- osCcxRmCap.Octet = (u8 *)CcxRmCapBuf;
- osCcxRmCap.Length = sizeof(CcxRmCapBuf);
+ os_ccx_rm_cap.octet = (u8 *)ccx_rm_cap_buf;
+ os_ccx_rm_cap.Length = sizeof(ccx_rm_cap_buf);
tag = skb_put(skb, ccxrm_ie_len);
*tag++ = MFIE_TYPE_GENERIC;
- *tag++ = osCcxRmCap.Length;
- memcpy(tag, osCcxRmCap.Octet, osCcxRmCap.Length);
- tag += osCcxRmCap.Length;
+ *tag++ = os_ccx_rm_cap.Length;
+ memcpy(tag, os_ccx_rm_cap.octet, os_ccx_rm_cap.Length);
+ tag += os_ccx_rm_cap.Length;
}
- if (beacon->BssCcxVerNumber >= 2) {
- u8 CcxVerNumBuf[] = {0x00, 0x40, 0x96, 0x03, 0x00};
- struct octet_string osCcxVerNum;
+ if (beacon->bss_ccx_ver_number >= 2) {
+ u8 ccx_ver_num_buf[] = {0x00, 0x40, 0x96, 0x03, 0x00};
+ struct octet_string os_ccx_ver_num;
- CcxVerNumBuf[4] = beacon->BssCcxVerNumber;
- osCcxVerNum.Octet = CcxVerNumBuf;
- osCcxVerNum.Length = sizeof(CcxVerNumBuf);
+ ccx_ver_num_buf[4] = beacon->bss_ccx_ver_number;
+ os_ccx_ver_num.octet = ccx_ver_num_buf;
+ os_ccx_ver_num.Length = sizeof(ccx_ver_num_buf);
tag = skb_put(skb, cxvernum_ie_len);
*tag++ = MFIE_TYPE_GENERIC;
- *tag++ = osCcxVerNum.Length;
- memcpy(tag, osCcxVerNum.Octet, osCcxVerNum.Length);
- tag += osCcxVerNum.Length;
+ *tag++ = os_ccx_ver_num.Length;
+ memcpy(tag, os_ccx_ver_num.octet, os_ccx_ver_num.Length);
+ tag += os_ccx_ver_num.Length;
}
if (ieee->ht_info->current_ht_support && ieee->ht_info->enable_ht) {
if (ieee->ht_info->peer_ht_spec_ver != HT_SPEC_VER_EWC) {
@@ -878,11 +877,11 @@ rtllib_association_req(struct rtllib_network *beacon,
if (wpa_ie_len) {
skb_put_data(skb, ieee->wpa_ie, ieee->wpa_ie_len);
- if (PMKCacheIdx >= 0) {
+ if (pmk_cache_idx >= 0) {
tag = skb_put(skb, 18);
*tag = 1;
*(tag + 1) = 0;
- memcpy((tag + 2), &ieee->PMKIDList[PMKCacheIdx].PMKID,
+ memcpy((tag + 2), &ieee->pmkid_list[pmk_cache_idx].PMKID,
16);
}
}
@@ -1072,17 +1071,16 @@ static void rtllib_associate_complete_wq(void *data)
ieee->ht_info->enable_ht);
memset(ieee->dot11ht_oper_rate_set, 0, 16);
}
- ieee->link_detect_info.SlotNum = 2 * (1 +
+ ieee->link_detect_info.slot_num = 2 * (1 +
ieee->current_network.beacon_interval /
500);
- if (ieee->link_detect_info.NumRecvBcnInPeriod == 0 ||
- ieee->link_detect_info.NumRecvDataInPeriod == 0) {
- ieee->link_detect_info.NumRecvBcnInPeriod = 1;
- ieee->link_detect_info.NumRecvDataInPeriod = 1;
+ if (ieee->link_detect_info.num_recv_bcn_in_period == 0 ||
+ ieee->link_detect_info.num_recv_data_in_period == 0) {
+ ieee->link_detect_info.num_recv_bcn_in_period = 1;
+ ieee->link_detect_info.num_recv_data_in_period = 1;
}
- psc->LpsIdleCount = 0;
+ psc->lps_idle_count = 0;
ieee->link_change(ieee->dev);
-
}
static void rtllib_sta_send_associnfo(struct rtllib_device *ieee)
@@ -1209,18 +1207,18 @@ inline void rtllib_softmac_new_net(struct rtllib_device *ieee,
ieee->current_network.flags);
if ((rtllib_act_scanning(ieee, false)) &&
- !(ieee->softmac_features & IEEE_SOFTMAC_SCAN))
+ !(ieee->softmac_features & IEEE_SOFTMAC_SCAN))
rtllib_stop_scan_syncro(ieee);
ht_reset_iot_setting(ieee->ht_info);
ieee->wmm_acm = 0;
if (ieee->iw_mode == IW_MODE_INFRA) {
/* Join the network for the first time */
- ieee->AsocRetryCount = 0;
+ ieee->asoc_retry_count = 0;
if ((ieee->current_network.qos_data.supported == 1) &&
ieee->current_network.bssht.bd_support_ht)
ht_reset_self_and_save_peer_setting(ieee,
- &(ieee->current_network));
+ &ieee->current_network);
else
ieee->ht_info->current_ht_support = false;
@@ -1319,10 +1317,10 @@ static inline u16 assoc_parse(struct rtllib_device *ieee, struct sk_buff *skb,
status_code == WLAN_STATUS_CAPS_UNSUPPORTED) &&
((ieee->mode == WIRELESS_MODE_G) &&
(ieee->current_network.mode == WIRELESS_MODE_N_24G) &&
- (ieee->AsocRetryCount++ < (RT_ASOC_RETRY_LIMIT - 1)))) {
+ (ieee->asoc_retry_count++ < (RT_ASOC_RETRY_LIMIT - 1)))) {
ieee->ht_info->iot_action |= HT_IOT_ACT_PURE_N_MODE;
} else {
- ieee->AsocRetryCount = 0;
+ ieee->asoc_retry_count = 0;
}
return le16_to_cpu(response_head->status);
@@ -1351,8 +1349,8 @@ static short rtllib_sta_ps_sleep(struct rtllib_device *ieee, u64 *time)
u8 dtim;
struct rt_pwr_save_ctrl *psc = &ieee->pwr_save_ctrl;
- if (ieee->LPSDelayCnt) {
- ieee->LPSDelayCnt--;
+ if (ieee->lps_delay_cnt) {
+ ieee->lps_delay_cnt--;
return 0;
}
@@ -1378,45 +1376,45 @@ static short rtllib_sta_ps_sleep(struct rtllib_device *ieee, u64 *time)
return 0;
if (time) {
- if (ieee->bAwakePktSent) {
- psc->LPSAwakeIntvl = 1;
+ if (ieee->awake_pkt_sent) {
+ psc->lps_awake_intvl = 1;
} else {
- u8 MaxPeriod = 5;
+ u8 max_period = 5;
- if (psc->LPSAwakeIntvl == 0)
- psc->LPSAwakeIntvl = 1;
- psc->LPSAwakeIntvl = (psc->LPSAwakeIntvl >=
- MaxPeriod) ? MaxPeriod :
- (psc->LPSAwakeIntvl + 1);
+ if (psc->lps_awake_intvl == 0)
+ psc->lps_awake_intvl = 1;
+ psc->lps_awake_intvl = (psc->lps_awake_intvl >=
+ max_period) ? max_period :
+ (psc->lps_awake_intvl + 1);
}
{
- u8 LPSAwakeIntvl_tmp = 0;
+ u8 lps_awake_intvl_tmp = 0;
u8 period = ieee->current_network.dtim_period;
u8 count = ieee->current_network.tim.tim_count;
if (count == 0) {
- if (psc->LPSAwakeIntvl > period)
- LPSAwakeIntvl_tmp = period +
- (psc->LPSAwakeIntvl -
+ if (psc->lps_awake_intvl > period)
+ lps_awake_intvl_tmp = period +
+ (psc->lps_awake_intvl -
period) -
- ((psc->LPSAwakeIntvl - period) %
+ ((psc->lps_awake_intvl - period) %
period);
else
- LPSAwakeIntvl_tmp = psc->LPSAwakeIntvl;
+ lps_awake_intvl_tmp = psc->lps_awake_intvl;
} else {
- if (psc->LPSAwakeIntvl >
+ if (psc->lps_awake_intvl >
ieee->current_network.tim.tim_count)
- LPSAwakeIntvl_tmp = count +
- (psc->LPSAwakeIntvl - count) -
- ((psc->LPSAwakeIntvl - count) % period);
+ lps_awake_intvl_tmp = count +
+ (psc->lps_awake_intvl - count) -
+ ((psc->lps_awake_intvl - count) % period);
else
- LPSAwakeIntvl_tmp = psc->LPSAwakeIntvl;
+ lps_awake_intvl_tmp = psc->lps_awake_intvl;
}
*time = ieee->current_network.last_dtim_sta_time
+ msecs_to_jiffies(ieee->current_network.beacon_interval *
- LPSAwakeIntvl_tmp);
+ lps_awake_intvl_tmp);
}
}
@@ -1461,7 +1459,7 @@ static inline void rtllib_sta_ps(struct work_struct *work)
spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
}
- ieee->bAwakePktSent = false;
+ ieee->awake_pkt_sent = false;
} else if (sleep == 2) {
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
@@ -1553,10 +1551,10 @@ static void rtllib_process_action(struct rtllib_device *ieee,
case ACT_CAT_BA:
switch (*act) {
case ACT_ADDBAREQ:
- rtllib_rx_ADDBAReq(ieee, skb);
+ rtllib_rx_add_ba_req(ieee, skb);
break;
case ACT_ADDBARSP:
- rtllib_rx_ADDBARsp(ieee, skb);
+ rtllib_rx_add_ba_rsp(ieee, skb);
break;
case ACT_DELBA:
rtllib_rx_DELBA(ieee, skb);
@@ -1606,10 +1604,10 @@ rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
kfree(network);
return 1;
}
- memcpy(ieee->ht_info->PeerHTCapBuf,
+ memcpy(ieee->ht_info->peer_ht_cap_buf,
network->bssht.bd_ht_cap_buf,
network->bssht.bd_ht_cap_len);
- memcpy(ieee->ht_info->PeerHTInfoBuf,
+ memcpy(ieee->ht_info->peer_ht_info_buf,
network->bssht.bd_ht_info_buf,
network->bssht.bd_ht_info_len);
ieee->handle_assoc_response(ieee->dev,
@@ -1634,7 +1632,7 @@ rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
netdev_info(ieee->dev,
"Association response status code 0x%x\n",
errcode);
- if (ieee->AsocRetryCount < RT_ASOC_RETRY_LIMIT)
+ if (ieee->asoc_retry_count < RT_ASOC_RETRY_LIMIT)
schedule_delayed_work(&ieee->associate_procedure_wq, 0);
else
rtllib_associate_abort(ieee);
@@ -1648,7 +1646,7 @@ static void rtllib_rx_auth_resp(struct rtllib_device *ieee, struct sk_buff *skb)
int errcode;
u8 *challenge;
int chlen = 0;
- bool bSupportNmode = true, bHalfSupportNmode = false;
+ bool support_nmode = true, half_support_nmode = false;
errcode = auth_parse(ieee->dev, skb, &challenge, &chlen);
@@ -1664,18 +1662,18 @@ static void rtllib_rx_auth_resp(struct rtllib_device *ieee, struct sk_buff *skb)
ieee->link_state = RTLLIB_ASSOCIATING_AUTHENTICATED;
ieee->softmac_stats.rx_auth_rs_ok++;
if (!(ieee->ht_info->iot_action & HT_IOT_ACT_PURE_N_MODE)) {
- if (!ieee->GetNmodeSupportBySecCfg(ieee->dev)) {
+ if (!ieee->get_nmode_support_by_sec_cfg(ieee->dev)) {
if (is_ht_half_nmode_aps(ieee)) {
- bSupportNmode = true;
- bHalfSupportNmode = true;
+ support_nmode = true;
+ half_support_nmode = true;
} else {
- bSupportNmode = false;
- bHalfSupportNmode = false;
+ support_nmode = false;
+ half_support_nmode = false;
}
}
}
/* Dummy wirless mode setting to avoid encryption issue */
- if (bSupportNmode) {
+ if (support_nmode) {
ieee->set_wireless_mode(ieee->dev,
ieee->current_network.mode);
} else {
@@ -1684,11 +1682,11 @@ static void rtllib_rx_auth_resp(struct rtllib_device *ieee, struct sk_buff *skb)
}
if ((ieee->current_network.mode == WIRELESS_MODE_N_24G) &&
- bHalfSupportNmode) {
+ half_support_nmode) {
netdev_info(ieee->dev, "======>enter half N mode\n");
- ieee->bHalfWirelessN24GMode = true;
+ ieee->half_wireless_n24g_mode = true;
} else {
- ieee->bHalfWirelessN24GMode = false;
+ ieee->half_wireless_n24g_mode = false;
}
rtllib_associate_step2(ieee);
} else {
@@ -1734,13 +1732,11 @@ rtllib_rx_deauth(struct rtllib_device *ieee, struct sk_buff *skb)
ieee->link_state = RTLLIB_ASSOCIATING;
ieee->softmac_stats.reassoc++;
ieee->is_roaming = true;
- ieee->link_detect_info.bBusyTraffic = false;
+ ieee->link_detect_info.busy_traffic = false;
rtllib_disassociate(ieee);
- RemovePeerTS(ieee, header->addr2);
- if (!(ieee->rtllib_ap_sec_type(ieee) &
- (SEC_ALG_CCMP | SEC_ALG_TKIP)))
- schedule_delayed_work(
- &ieee->associate_procedure_wq, 5);
+ remove_peer_ts(ieee, header->addr2);
+ if (!(ieee->rtllib_ap_sec_type(ieee) & (SEC_ALG_CCMP | SEC_ALG_TKIP)))
+ schedule_delayed_work(&ieee->associate_procedure_wq, 5);
}
return 0;
}
@@ -1816,7 +1812,7 @@ void rtllib_softmac_xmit(struct rtllib_txb *txb, struct rtllib_device *ieee)
/* update the tx status */
tcb_desc = (struct cb_desc *)(txb->fragments[0]->cb +
MAX_DEV_ADDR_SIZE);
- if (tcb_desc->bMulticast)
+ if (tcb_desc->multicast)
ieee->stats.multicast++;
/* if xmit available, just xmit it immediately, else just insert it to
@@ -1998,11 +1994,11 @@ void rtllib_stop_protocol(struct rtllib_device *ieee)
if (ieee->link_state == MAC80211_LINKED) {
if (ieee->iw_mode == IW_MODE_INFRA)
- SendDisassociation(ieee, 1, WLAN_REASON_DEAUTH_LEAVING);
+ send_disassociation(ieee, 1, WLAN_REASON_DEAUTH_LEAVING);
rtllib_disassociate(ieee);
}
- RemoveAllTS(ieee);
+ remove_all_ts(ieee);
ieee->proto_stoppping = 0;
kfree(ieee->assocreq_ies);
@@ -2072,13 +2068,13 @@ int rtllib_softmac_init(struct rtllib_device *ieee)
for (i = 0; i < 5; i++)
ieee->seq_ctrl[i] = 0;
- ieee->link_detect_info.SlotIndex = 0;
- ieee->link_detect_info.SlotNum = 2;
- ieee->link_detect_info.NumRecvBcnInPeriod = 0;
- ieee->link_detect_info.NumRecvDataInPeriod = 0;
+ ieee->link_detect_info.slot_index = 0;
+ ieee->link_detect_info.slot_num = 2;
+ ieee->link_detect_info.num_recv_bcn_in_period = 0;
+ ieee->link_detect_info.num_recv_data_in_period = 0;
ieee->link_detect_info.num_tx_ok_in_period = 0;
ieee->link_detect_info.num_rx_ok_in_period = 0;
- ieee->link_detect_info.NumRxUnicastOkInPeriod = 0;
+ ieee->link_detect_info.num_rx_unicast_ok_in_period = 0;
ieee->is_aggregate_frame = false;
ieee->assoc_id = 0;
ieee->queue_stop = 0;
@@ -2101,7 +2097,7 @@ int rtllib_softmac_init(struct rtllib_device *ieee)
ieee->reg_dot11tx_ht_oper_rate_set[1] = 0xff;
ieee->reg_dot11tx_ht_oper_rate_set[4] = 0x01;
- ieee->FirstIe_InScan = false;
+ ieee->first_ie_in_scan = false;
ieee->actscanning = false;
ieee->beinretry = false;
ieee->is_set_key = false;
@@ -2148,7 +2144,7 @@ void rtllib_softmac_free(struct rtllib_device *ieee)
static inline struct sk_buff *
rtllib_disauth_skb(struct rtllib_network *beacon,
- struct rtllib_device *ieee, u16 asRsn)
+ struct rtllib_device *ieee, u16 rsn)
{
struct sk_buff *skb;
struct rtllib_disauth *disauth;
@@ -2168,13 +2164,13 @@ rtllib_disauth_skb(struct rtllib_network *beacon,
ether_addr_copy(disauth->header.addr2, ieee->dev->dev_addr);
ether_addr_copy(disauth->header.addr3, beacon->bssid);
- disauth->reason = cpu_to_le16(asRsn);
+ disauth->reason = cpu_to_le16(rsn);
return skb;
}
static inline struct sk_buff *
rtllib_disassociate_skb(struct rtllib_network *beacon,
- struct rtllib_device *ieee, u16 asRsn)
+ struct rtllib_device *ieee, u16 rsn)
{
struct sk_buff *skb;
struct rtllib_disassoc *disass;
@@ -2195,19 +2191,19 @@ rtllib_disassociate_skb(struct rtllib_network *beacon,
ether_addr_copy(disass->header.addr2, ieee->dev->dev_addr);
ether_addr_copy(disass->header.addr3, beacon->bssid);
- disass->reason = cpu_to_le16(asRsn);
+ disass->reason = cpu_to_le16(rsn);
return skb;
}
-void SendDisassociation(struct rtllib_device *ieee, bool deauth, u16 asRsn)
+void send_disassociation(struct rtllib_device *ieee, bool deauth, u16 rsn)
{
struct rtllib_network *beacon = &ieee->current_network;
struct sk_buff *skb;
if (deauth)
- skb = rtllib_disauth_skb(beacon, ieee, asRsn);
+ skb = rtllib_disauth_skb(beacon, ieee, rsn);
else
- skb = rtllib_disassociate_skb(beacon, ieee, asRsn);
+ skb = rtllib_disassociate_skb(beacon, ieee, rsn);
if (skb)
softmac_mgmt_xmit(skb, ieee);
@@ -2241,56 +2237,56 @@ u8 rtllib_ap_sec_type(struct rtllib_device *ieee)
}
}
-static void rtllib_MlmeDisassociateRequest(struct rtllib_device *rtllib,
- u8 *asSta, u8 asRsn)
+static void rtllib_mlme_disassociate_request(struct rtllib_device *rtllib,
+ u8 *addr, u8 rsn)
{
u8 i;
u8 op_mode;
- RemovePeerTS(rtllib, asSta);
+ remove_peer_ts(rtllib, addr);
- if (memcmp(rtllib->current_network.bssid, asSta, 6) == 0) {
+ if (memcmp(rtllib->current_network.bssid, addr, 6) == 0) {
rtllib->link_state = MAC80211_NOLINK;
for (i = 0; i < 6; i++)
rtllib->current_network.bssid[i] = 0x22;
op_mode = RT_OP_MODE_NO_LINK;
rtllib->op_mode = RT_OP_MODE_NO_LINK;
- rtllib->SetHwRegHandler(rtllib->dev, HW_VAR_MEDIA_STATUS,
+ rtllib->set_hw_reg_handler(rtllib->dev, HW_VAR_MEDIA_STATUS,
(u8 *)(&op_mode));
rtllib_disassociate(rtllib);
- rtllib->SetHwRegHandler(rtllib->dev, HW_VAR_BSSID,
+ rtllib->set_hw_reg_handler(rtllib->dev, HW_VAR_BSSID,
rtllib->current_network.bssid);
}
}
-static void rtllib_MgntDisconnectAP(struct rtllib_device *rtllib, u8 asRsn)
+static void rtllib_mgnt_disconnect_ap(struct rtllib_device *rtllib, u8 rsn)
{
- bool bFilterOutNonAssociatedBSSID = false;
+ bool filter_out_nonassociated_bssid = false;
- bFilterOutNonAssociatedBSSID = false;
- rtllib->SetHwRegHandler(rtllib->dev, HW_VAR_CECHK_BSSID,
- (u8 *)(&bFilterOutNonAssociatedBSSID));
- rtllib_MlmeDisassociateRequest(rtllib, rtllib->current_network.bssid,
- asRsn);
+ filter_out_nonassociated_bssid = false;
+ rtllib->set_hw_reg_handler(rtllib->dev, HW_VAR_CECHK_BSSID,
+ (u8 *)(&filter_out_nonassociated_bssid));
+ rtllib_mlme_disassociate_request(rtllib, rtllib->current_network.bssid,
+ rsn);
rtllib->link_state = MAC80211_NOLINK;
}
-bool rtllib_MgntDisconnect(struct rtllib_device *rtllib, u8 asRsn)
+bool rtllib_mgnt_disconnect(struct rtllib_device *rtllib, u8 rsn)
{
if (rtllib->ps != RTLLIB_PS_DISABLED)
rtllib->sta_wake_up(rtllib->dev);
if (rtllib->link_state == MAC80211_LINKED) {
if (rtllib->iw_mode == IW_MODE_INFRA)
- rtllib_MgntDisconnectAP(rtllib, asRsn);
+ rtllib_mgnt_disconnect_ap(rtllib, rsn);
}
return true;
}
-EXPORT_SYMBOL(rtllib_MgntDisconnect);
+EXPORT_SYMBOL(rtllib_mgnt_disconnect);
void notify_wx_assoc_event(struct rtllib_device *ieee)
{
diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
index 2afa701e5445b..d6bc74ba9092b 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
@@ -347,10 +347,10 @@ void rtllib_wx_sync_scan_wq(void *data)
/* Notify AP that I wake up again */
rtllib_sta_ps_send_null_frame(ieee, 0);
- if (ieee->link_detect_info.NumRecvBcnInPeriod == 0 ||
- ieee->link_detect_info.NumRecvDataInPeriod == 0) {
- ieee->link_detect_info.NumRecvBcnInPeriod = 1;
- ieee->link_detect_info.NumRecvDataInPeriod = 1;
+ if (ieee->link_detect_info.num_recv_bcn_in_period == 0 ||
+ ieee->link_detect_info.num_recv_data_in_period == 0) {
+ ieee->link_detect_info.num_recv_bcn_in_period = 1;
+ ieee->link_detect_info.num_recv_data_in_period = 1;
}
rtllib_wake_all_queues(ieee);
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index f7098a2ba8b0b..54100dd81505b 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -286,7 +286,7 @@ static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee,
if (ht_info->iot_action & HT_IOT_ACT_TX_NO_AGGREGATION)
return;
- if (!ieee->GetNmodeSupportBySecCfg(ieee->dev))
+ if (!ieee->get_nmode_support_by_sec_cfg(ieee->dev))
return;
if (ht_info->current_ampdu_enable) {
if (!rtllib_get_ts(ieee, (struct ts_common_info **)(&ts), hdr->addr1,
@@ -356,7 +356,7 @@ static void rtllib_query_BandwidthMode(struct rtllib_device *ieee,
if (!ht_info->current_ht_support || !ht_info->enable_ht)
return;
- if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
+ if (tcb_desc->multicast || tcb_desc->bBroadcast)
return;
if ((tcb_desc->data_rate & 0x80) == 0)
@@ -378,7 +378,7 @@ static void rtllib_query_protectionmode(struct rtllib_device *ieee,
tcb_desc->RTSSC = 0;
tcb_desc->bRTSBW = false;
- if (tcb_desc->bBroadcast || tcb_desc->bMulticast)
+ if (tcb_desc->bBroadcast || tcb_desc->multicast)
return;
if (is_broadcast_ether_addr(skb->data + 16))
@@ -595,14 +595,14 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
((((u8 *)udp)[1] == 67) &&
(((u8 *)udp)[3] == 68))) {
bdhcp = true;
- ieee->LPSDelayCnt = 200;
+ ieee->lps_delay_cnt = 200;
}
}
} else if (ether_type == ETH_P_ARP) {
netdev_info(ieee->dev,
"=================>DHCP Protocol start tx ARP pkt!!\n");
bdhcp = true;
- ieee->LPSDelayCnt =
+ ieee->lps_delay_cnt =
ieee->current_network.tim.tim_count;
}
}
@@ -832,7 +832,7 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
if (ieee->ht_info->iot_action &
HT_IOT_ACT_WA_IOT_Broadcom) {
tcb_desc->data_rate =
- MgntQuery_TxRateExcludeCCKRates(ieee);
+ mgnt_query_tx_rate_exclude_cck_rates(ieee);
tcb_desc->tx_dis_rate_fallback = false;
} else {
tcb_desc->data_rate = ieee->basic_rate;
@@ -843,11 +843,11 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
tcb_desc->tx_use_drv_assinged_rate = 1;
} else {
if (is_multicast_ether_addr(header.addr1))
- tcb_desc->bMulticast = 1;
+ tcb_desc->multicast = 1;
if (is_broadcast_ether_addr(header.addr1))
tcb_desc->bBroadcast = 1;
rtllib_txrate_selectmode(ieee, tcb_desc);
- if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
+ if (tcb_desc->multicast || tcb_desc->bBroadcast)
tcb_desc->data_rate = ieee->basic_rate;
else
tcb_desc->data_rate = rtllib_current_rate(ieee);
@@ -856,7 +856,7 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
if (ieee->ht_info->iot_action &
HT_IOT_ACT_WA_IOT_Broadcom) {
tcb_desc->data_rate =
- MgntQuery_TxRateExcludeCCKRates(ieee);
+ mgnt_query_tx_rate_exclude_cck_rates(ieee);
tcb_desc->tx_dis_rate_fallback = false;
} else {
tcb_desc->data_rate = MGN_1M;
diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c
index f92ec0faf4d56..55a3e4222cd67 100644
--- a/drivers/staging/rtl8192e/rtllib_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_wx.c
@@ -636,7 +636,7 @@ int rtllib_wx_set_mlme(struct rtllib_device *ieee,
ieee->cannot_notify = true;
- SendDisassociation(ieee, deauth, mlme->reason_code);
+ send_disassociation(ieee, deauth, mlme->reason_code);
rtllib_disassociate(ieee);
ieee->wap_set = 0;
diff --git a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
index 30e7457a9c312..b89e88d6a82d5 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
@@ -1035,8 +1035,8 @@ void rtw_get_bcn_info(struct wlan_network *pnetwork)
u16 wpa_len = 0, rsn_len = 0;
struct HT_info_element *pht_info = NULL;
struct ieee80211_ht_cap *pht_cap = NULL;
- unsigned int len;
- unsigned char *p;
+ unsigned int len;
+ unsigned char *p;
__le16 le_cap;
memcpy((u8 *)&le_cap, rtw_get_capability_from_ie(pnetwork->network.ies), 2);
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index b221913733fb6..bfb27f9027534 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -169,7 +169,6 @@ void _rtw_free_network(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwor
{
unsigned int delta_time;
u32 lifetime = SCANQUEUE_LIFETIME;
-/* _irqL irqL; */
struct __queue *free_queue = &(pmlmepriv->free_bss_pool);
if (!pnetwork)
@@ -389,7 +388,6 @@ int is_same_network(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst, u8 fea
d_cap = le16_to_cpu(tmpd);
return (src->ssid.ssid_length == dst->ssid.ssid_length) &&
- /* (src->configuration.ds_config == dst->configuration.ds_config) && */
((!memcmp(src->mac_address, dst->mac_address, ETH_ALEN))) &&
((!memcmp(src->ssid.ssid, dst->ssid.ssid, src->ssid.ssid_length))) &&
((s_cap & WLAN_CAPABILITY_IBSS) ==
@@ -1548,9 +1546,9 @@ void _rtw_join_timeout_handler(struct timer_list *t)
int do_join_r;
do_join_r = rtw_do_join(adapter);
- if (do_join_r != _SUCCESS) {
+ if (do_join_r != _SUCCESS)
continue;
- }
+
break;
} else {
rtw_indicate_disconnect(adapter);
@@ -2432,9 +2430,8 @@ void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len, u8 channe
return;
/* maybe needs check if ap supports rx ampdu. */
- if (!(phtpriv->ampdu_enable) && pregistrypriv->ampdu_enable == 1) {
+ if (!(phtpriv->ampdu_enable) && pregistrypriv->ampdu_enable == 1)
phtpriv->ampdu_enable = true;
- }
/* check Max Rx A-MPDU Size */
len = 0;
diff --git a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
index 1593980d2c6a0..0145c4da5ac06 100644
--- a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
@@ -127,9 +127,8 @@ void kfree_all_stainfo(struct sta_priv *pstapriv)
phead = get_list_head(&pstapriv->free_sta_queue);
plist = get_next(phead);
- while (phead != plist) {
+ while (phead != plist)
plist = get_next(plist);
- }
spin_unlock_bh(&pstapriv->sta_hash_lock);
}
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
index c5219a4a4919d..7a5c3a98183b4 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
@@ -954,7 +954,7 @@ static u16 hal_EfuseGetCurrentSize_WiFi(
#endif
u16 efuse_addr = 0;
u16 start_addr = 0; /* for debug */
- u8 hoffset = 0, hworden = 0;
+ u8 hworden = 0;
u8 efuse_data, word_cnts = 0;
u32 count = 0; /* for debug */
@@ -1001,16 +1001,13 @@ static u16 hal_EfuseGetCurrentSize_WiFi(
}
if (EXT_HEADER(efuse_data)) {
- hoffset = GET_HDR_OFFSET_2_0(efuse_data);
efuse_addr++;
efuse_OneByteRead(padapter, efuse_addr, &efuse_data, bPseudoTest);
if (ALL_WORDS_DISABLED(efuse_data))
continue;
- hoffset |= ((efuse_data & 0xF0) >> 1);
hworden = efuse_data & 0x0F;
} else {
- hoffset = (efuse_data>>4) & 0x0F;
hworden = efuse_data & 0x0F;
}
@@ -1047,7 +1044,7 @@ static u16 hal_EfuseGetCurrentSize_BT(struct adapter *padapter, u8 bPseudoTest)
u16 btusedbytes;
u16 efuse_addr;
u8 bank, startBank;
- u8 hoffset = 0, hworden = 0;
+ u8 hworden = 0;
u8 efuse_data, word_cnts = 0;
u16 retU2 = 0;
@@ -1085,7 +1082,6 @@ static u16 hal_EfuseGetCurrentSize_BT(struct adapter *padapter, u8 bPseudoTest)
break;
if (EXT_HEADER(efuse_data)) {
- hoffset = GET_HDR_OFFSET_2_0(efuse_data);
efuse_addr++;
efuse_OneByteRead(padapter, efuse_addr, &efuse_data, bPseudoTest);
@@ -1094,11 +1090,8 @@ static u16 hal_EfuseGetCurrentSize_BT(struct adapter *padapter, u8 bPseudoTest)
continue;
}
-/* hoffset = ((hoffset & 0xE0) >> 5) | ((efuse_data & 0xF0) >> 1); */
- hoffset |= ((efuse_data & 0xF0) >> 1);
hworden = efuse_data & 0x0F;
} else {
- hoffset = (efuse_data>>4) & 0x0F;
hworden = efuse_data & 0x0F;
}
@@ -1114,18 +1107,15 @@ static u16 hal_EfuseGetCurrentSize_BT(struct adapter *padapter, u8 bPseudoTest)
) {
if (efuse_data != 0xFF) {
if ((efuse_data&0x1F) == 0x0F) { /* extended header */
- hoffset = efuse_data;
efuse_addr++;
efuse_OneByteRead(padapter, efuse_addr, &efuse_data, bPseudoTest);
if ((efuse_data & 0x0F) == 0x0F) {
efuse_addr++;
continue;
} else {
- hoffset = ((hoffset & 0xE0) >> 5) | ((efuse_data & 0xF0) >> 1);
hworden = efuse_data & 0x0F;
}
} else {
- hoffset = (efuse_data>>4) & 0x0F;
hworden = efuse_data & 0x0F;
}
word_cnts = Efuse_CalculateWordCnts(hworden);
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index 1ff763c10064b..65a450fcdce77 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -1259,8 +1259,7 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy
goto check_need_indicate_scan_done;
}
- ssid = kzalloc(RTW_SSID_SCAN_AMOUNT * sizeof(struct ndis_802_11_ssid),
- GFP_KERNEL);
+ ssid = kcalloc(RTW_SSID_SCAN_AMOUNT, sizeof(*ssid), GFP_KERNEL);
if (!ssid) {
ret = -ENOMEM;
goto check_need_indicate_scan_done;
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index e6e89784d84b9..c3ba490e53cb4 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -350,12 +350,11 @@ static void buffer_cb(struct vchiq_mmal_instance *instance,
if (is_capturing(dev)) {
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"Grab another frame");
- vchiq_mmal_port_parameter_set(
- instance,
- dev->capture.camera_port,
- MMAL_PARAMETER_CAPTURE,
- &dev->capture.frame_count,
- sizeof(dev->capture.frame_count));
+ vchiq_mmal_port_parameter_set(instance,
+ dev->capture.camera_port,
+ MMAL_PARAMETER_CAPTURE,
+ &dev->capture.frame_count,
+ sizeof(dev->capture.frame_count));
}
if (vchiq_mmal_submit_buffer(instance, port,
&buf->mmal))
@@ -406,12 +405,11 @@ static void buffer_cb(struct vchiq_mmal_instance *instance,
is_capturing(dev)) {
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"Grab another frame as buffer has EOS");
- vchiq_mmal_port_parameter_set(
- instance,
- dev->capture.camera_port,
- MMAL_PARAMETER_CAPTURE,
- &dev->capture.frame_count,
- sizeof(dev->capture.frame_count));
+ vchiq_mmal_port_parameter_set(instance,
+ dev->capture.camera_port,
+ MMAL_PARAMETER_CAPTURE,
+ &dev->capture.frame_count,
+ sizeof(dev->capture.frame_count));
}
}
@@ -420,11 +418,10 @@ static int enable_camera(struct bcm2835_mmal_dev *dev)
int ret;
if (!dev->camera_use_count) {
- ret = vchiq_mmal_port_parameter_set(
- dev->instance,
- &dev->component[COMP_CAMERA]->control,
- MMAL_PARAMETER_CAMERA_NUM, &dev->camera_num,
- sizeof(dev->camera_num));
+ ret = vchiq_mmal_port_parameter_set(dev->instance,
+ &dev->component[COMP_CAMERA]->control,
+ MMAL_PARAMETER_CAMERA_NUM, &dev->camera_num,
+ sizeof(dev->camera_num));
if (ret < 0) {
v4l2_err(&dev->v4l2_dev,
"Failed setting camera num, ret %d\n", ret);
@@ -468,11 +465,11 @@ static int disable_camera(struct bcm2835_mmal_dev *dev)
"Failed disabling camera, ret %d\n", ret);
return -EINVAL;
}
- vchiq_mmal_port_parameter_set(
- dev->instance,
- &dev->component[COMP_CAMERA]->control,
- MMAL_PARAMETER_CAMERA_NUM, &i,
- sizeof(i));
+ vchiq_mmal_port_parameter_set(dev->instance,
+ &dev->component[COMP_CAMERA]->control,
+ MMAL_PARAMETER_CAMERA_NUM,
+ &i,
+ sizeof(i));
}
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"Camera refcount now %d\n", dev->camera_use_count);
@@ -786,9 +783,8 @@ static int vidioc_overlay(struct file *file, void *f, unsigned int on)
ret = vchiq_mmal_port_connect_tunnel(dev->instance, src,
NULL);
if (ret >= 0)
- ret = vchiq_mmal_component_disable(
- dev->instance,
- dev->component[COMP_PREVIEW]);
+ ret = vchiq_mmal_component_disable(dev->instance,
+ dev->component[COMP_PREVIEW]);
disable_camera(dev);
return ret;
@@ -1006,7 +1002,6 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-
static int mmal_setup_video_component(struct bcm2835_mmal_dev *dev,
struct v4l2_format *f)
{
@@ -1042,8 +1037,8 @@ static int mmal_setup_video_component(struct bcm2835_mmal_dev *dev,
if (overlay_enabled) {
ret = vchiq_mmal_port_connect_tunnel(dev->instance,
- preview_port,
- &dev->component[COMP_PREVIEW]->input[0]);
+ preview_port,
+ &dev->component[COMP_PREVIEW]->input[0]);
if (ret)
return ret;
@@ -1720,11 +1715,11 @@ static int mmal_init(struct bcm2835_mmal_dev *dev)
{
unsigned int enable = 1;
- vchiq_mmal_port_parameter_set(
- dev->instance,
- &dev->component[COMP_VIDEO_ENCODE]->control,
- MMAL_PARAMETER_VIDEO_IMMUTABLE_INPUT,
- &enable, sizeof(enable));
+ vchiq_mmal_port_parameter_set(dev->instance,
+ &dev->component[COMP_VIDEO_ENCODE]->control,
+ MMAL_PARAMETER_VIDEO_IMMUTABLE_INPUT,
+ &enable,
+ sizeof(enable));
vchiq_mmal_port_parameter_set(dev->instance,
&dev->component[COMP_VIDEO_ENCODE]->control,
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.c
index 933027e0011ec..68f830d755310 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.c
@@ -37,7 +37,7 @@ static int vchiq_bus_probe(struct device *dev)
return driver->probe(device);
}
-struct bus_type vchiq_bus_type = {
+const struct bus_type vchiq_bus_type = {
.name = "vchiq-bus",
.match = vchiq_bus_type_match,
.uevent = vchiq_bus_uevent,
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.h
index caa6fdf25bb1d..4db86e76edbd4 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.h
@@ -34,7 +34,7 @@ static inline struct vchiq_driver *to_vchiq_driver(struct device_driver *d)
return container_of(d, struct vchiq_driver, driver);
}
-extern struct bus_type vchiq_bus_type;
+extern const struct bus_type vchiq_bus_type;
struct vchiq_device *
vchiq_device_register(struct device *parent, const char *name);
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
index 258aa0e37f554..4c3684dd902ed 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
@@ -937,8 +937,9 @@ static int create_component(struct vchiq_mmal_instance *instance,
/* build component create message */
m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
m.u.component_create.client_component = component->client_component;
- strncpy(m.u.component_create.name, name,
- sizeof(m.u.component_create.name));
+ strscpy_pad(m.u.component_create.name, name,
+ sizeof(m.u.component_create.name));
+ m.u.component_create.pid = 0;
ret = send_synchronous_mmal_msg(instance, &m,
sizeof(m.u.component_create),
diff --git a/drivers/staging/vme_user/vme.c b/drivers/staging/vme_user/vme.c
index e9461a7a7ab8b..0cd370ab10088 100644
--- a/drivers/staging/vme_user/vme.c
+++ b/drivers/staging/vme_user/vme.c
@@ -1970,7 +1970,7 @@ static void vme_bus_remove(struct device *dev)
driver->remove(vdev);
}
-struct bus_type vme_bus_type = {
+const struct bus_type vme_bus_type = {
.name = "vme",
.match = vme_bus_match,
.probe = vme_bus_probe,
diff --git a/drivers/staging/vme_user/vme.h b/drivers/staging/vme_user/vme.h
index 06504dccd5ff0..26aa40f78a74f 100644
--- a/drivers/staging/vme_user/vme.h
+++ b/drivers/staging/vme_user/vme.h
@@ -81,7 +81,7 @@ struct vme_resource {
struct list_head *entry;
};
-extern struct bus_type vme_bus_type;
+extern const struct bus_type vme_bus_type;
/* Number of VME interrupt vectors */
#define VME_NUM_STATUSID 256
diff --git a/drivers/staging/vme_user/vme_tsi148.h b/drivers/staging/vme_user/vme_tsi148.h
index 4dd224d0b86e9..db246cbc54c39 100644
--- a/drivers/staging/vme_user/vme_tsi148.h
+++ b/drivers/staging/vme_user/vme_tsi148.h
@@ -691,8 +691,7 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
#define TSI148_LCSR_VMCTRL_RMWEN BIT(20) /* RMW Enable */
-#define TSI148_LCSR_VMCTRL_ATO_M (7 << 16) /* Master Access Time-out Mask
- */
+#define TSI148_LCSR_VMCTRL_ATO_M (7 << 16) /* Master Access Time-out Mask */
#define TSI148_LCSR_VMCTRL_ATO_32 (0 << 16) /* 32 us */
#define TSI148_LCSR_VMCTRL_ATO_128 BIT(16) /* 128 us */
#define TSI148_LCSR_VMCTRL_ATO_512 (2 << 16) /* 512 us */
@@ -753,8 +752,7 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
#define TSI148_LCSR_VCTRL_DLT_16384 (0xB << 24) /* 16384 VCLKS */
#define TSI148_LCSR_VCTRL_DLT_32768 (0xC << 24) /* 32768 VCLKS */
-#define TSI148_LCSR_VCTRL_NERBB BIT(20) /* No Early Release of Bus Busy
- */
+#define TSI148_LCSR_VCTRL_NERBB BIT(20) /* No Early Release of Bus Busy */
#define TSI148_LCSR_VCTRL_SRESET BIT(17) /* System Reset */
#define TSI148_LCSR_VCTRL_LRESET BIT(16) /* Local Reset */
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index 36183f2a64c11..688c870d89bc0 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -81,9 +81,9 @@ static void vt6655_mac_set_bb_type(void __iomem *iobase, u32 mask)
* Return Value: none
*/
static void calculate_ofdmr_parameter(unsigned char rate,
- u8 bb_type,
- unsigned char *tx_rate,
- unsigned char *rsv_time)
+ u8 bb_type,
+ unsigned char *tx_rate,
+ unsigned char *rsv_time)
{
switch (rate) {
case RATE_6M:
@@ -288,7 +288,7 @@ bool card_set_phy_parameter(struct vnt_private *priv, u8 bb_type)
* Return Value: none
*/
bool card_update_tsf(struct vnt_private *priv, unsigned char rx_rate,
- u64 bss_timestamp)
+ u64 bss_timestamp)
{
u64 local_tsf;
u64 tsf_offset = 0;
@@ -297,7 +297,7 @@ bool card_update_tsf(struct vnt_private *priv, unsigned char rx_rate,
if (bss_timestamp != local_tsf) {
tsf_offset = card_get_tsf_offset(rx_rate, bss_timestamp,
- local_tsf);
+ local_tsf);
/* adjust TSF, HW's TSF add TSF Offset reg */
tsf_offset = le64_to_cpu(tsf_offset);
iowrite32((u32)tsf_offset, priv->port_offset + MAC_REG_TSFOFST);
@@ -321,7 +321,7 @@ bool card_update_tsf(struct vnt_private *priv, unsigned char rx_rate,
* Return Value: true if succeed; otherwise false
*/
bool card_set_beacon_period(struct vnt_private *priv,
- unsigned short beacon_interval)
+ unsigned short beacon_interval)
{
u64 next_tbtt;
@@ -586,61 +586,61 @@ void card_set_rspinf(struct vnt_private *priv, u8 bb_type)
/* RSPINF_a_6 */
calculate_ofdmr_parameter(RATE_6M,
- bb_type,
- &byTxRate,
- &byRsvTime);
+ bb_type,
+ &byTxRate,
+ &byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_6);
/* RSPINF_a_9 */
calculate_ofdmr_parameter(RATE_9M,
- bb_type,
- &byTxRate,
- &byRsvTime);
+ bb_type,
+ &byTxRate,
+ &byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_9);
/* RSPINF_a_12 */
calculate_ofdmr_parameter(RATE_12M,
- bb_type,
- &byTxRate,
- &byRsvTime);
+ bb_type,
+ &byTxRate,
+ &byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_12);
/* RSPINF_a_18 */
calculate_ofdmr_parameter(RATE_18M,
- bb_type,
- &byTxRate,
- &byRsvTime);
+ bb_type,
+ &byTxRate,
+ &byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_18);
/* RSPINF_a_24 */
calculate_ofdmr_parameter(RATE_24M,
- bb_type,
- &byTxRate,
- &byRsvTime);
+ bb_type,
+ &byTxRate,
+ &byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_24);
/* RSPINF_a_36 */
calculate_ofdmr_parameter(CARDwGetOFDMControlRate((void *)priv,
- RATE_36M),
- bb_type,
- &byTxRate,
- &byRsvTime);
+ RATE_36M),
+ bb_type,
+ &byTxRate,
+ &byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_36);
/* RSPINF_a_48 */
calculate_ofdmr_parameter(CARDwGetOFDMControlRate((void *)priv,
- RATE_48M),
- bb_type,
- &byTxRate,
- &byRsvTime);
+ RATE_48M),
+ bb_type,
+ &byTxRate,
+ &byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_48);
/* RSPINF_a_54 */
calculate_ofdmr_parameter(CARDwGetOFDMControlRate((void *)priv,
- RATE_54M),
- bb_type,
- &byTxRate,
- &byRsvTime);
+ RATE_54M),
+ bb_type,
+ &byTxRate,
+ &byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_54);
/* RSPINF_a_72 */
calculate_ofdmr_parameter(CARDwGetOFDMControlRate((void *)priv,
- RATE_54M),
- bb_type,
- &byTxRate,
- &byRsvTime);
+ RATE_54M),
+ bb_type,
+ &byTxRate,
+ &byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_72);
/* Set to Page0 */
VT6655_MAC_SELECT_PAGE0(priv->port_offset);
diff --git a/drivers/staging/vt6655/rxtx.h b/drivers/staging/vt6655/rxtx.h
index a67757c9bb5ca..be1e5180d57b8 100644
--- a/drivers/staging/vt6655/rxtx.h
+++ b/drivers/staging/vt6655/rxtx.h
@@ -19,7 +19,6 @@
#define DEFAULT_MSDU_LIFETIME_RES_64us 8000 /* 64us */
#define DEFAULT_MGN_LIFETIME_RES_64us 125 /* 64us */
-
/*--------------------- Export Definitions -------------------------*/
/*--------------------- Export Variables --------------------------*/
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 6797200211836..d9a6242264b78 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -583,7 +583,7 @@ int iscsit_dataout_datapduinorder_no_fbit(
struct iscsi_pdu *pdu)
{
int i, send_recovery_r2t = 0, recovery = 0;
- u32 length = 0, offset = 0, pdu_count = 0, xfer_len = 0;
+ u32 length = 0, offset = 0, pdu_count = 0;
struct iscsit_conn *conn = cmd->conn;
struct iscsi_pdu *first_pdu = NULL;
@@ -596,7 +596,6 @@ int iscsit_dataout_datapduinorder_no_fbit(
if (cmd->pdu_list[i].seq_no == pdu->seq_no) {
if (!first_pdu)
first_pdu = &cmd->pdu_list[i];
- xfer_len += cmd->pdu_list[i].length;
pdu_count++;
} else if (pdu_count)
break;
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 8e4035ff36748..761c511aea07c 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -83,7 +83,7 @@ static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
static int tcm_loop_driver_probe(struct device *);
static void tcm_loop_driver_remove(struct device *);
-static struct bus_type tcm_loop_lld_bus = {
+static const struct bus_type tcm_loop_lld_bus = {
.name = "tcm_loop_bus",
.probe = tcm_loop_driver_probe,
.remove = tcm_loop_driver_remove,
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index c1fbcdd161826..c40217f44b1bc 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -3672,6 +3672,8 @@ static int __init target_core_init_configfs(void)
{
struct configfs_subsystem *subsys = &target_core_fabrics;
struct t10_alua_lu_gp *lu_gp;
+ struct cred *kern_cred;
+ const struct cred *old_cred;
int ret;
pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage"
@@ -3748,11 +3750,21 @@ static int __init target_core_init_configfs(void)
if (ret < 0)
goto out;
+ /* We use the kernel credentials to access the target directory */
+ kern_cred = prepare_kernel_cred(&init_task);
+ if (!kern_cred) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ old_cred = override_creds(kern_cred);
target_init_dbroot();
+ revert_creds(old_cred);
+ put_cred(kern_cred);
return 0;
out:
+ target_xcopy_release_pt();
configfs_unregister_subsystem(subsys);
core_dev_release_virtual_lun0();
rd_module_exit();
diff --git a/drivers/tc/tc-driver.c b/drivers/tc/tc-driver.c
index d45f2c1ff3414..1c9d983a5a1fa 100644
--- a/drivers/tc/tc-driver.c
+++ b/drivers/tc/tc-driver.c
@@ -95,7 +95,7 @@ static int tc_bus_match(struct device *dev, struct device_driver *drv)
return 0;
}
-struct bus_type tc_bus_type = {
+const struct bus_type tc_bus_type = {
.name = "tc",
.match = tc_bus_match,
};
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index d77d7fe99a84a..5cdf7d68687f4 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -43,7 +43,7 @@ obj-$(CONFIG_RCAR_GEN3_THERMAL) += rcar_gen3_thermal.o
obj-$(CONFIG_RZG2L_THERMAL) += rzg2l_thermal.o
obj-$(CONFIG_KIRKWOOD_THERMAL) += kirkwood_thermal.o
obj-y += samsung/
-obj-$(CONFIG_DOVE_THERMAL) += dove_thermal.o
+obj-$(CONFIG_DOVE_THERMAL) += dove_thermal.o
obj-$(CONFIG_DB8500_THERMAL) += db8500_thermal.o
obj-$(CONFIG_ARMADA_THERMAL) += armada_thermal.o
obj-$(CONFIG_IMX_THERMAL) += imx_thermal.o
diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
index 50dec24e967a0..8fd7cf1932cd4 100644
--- a/drivers/thermal/devfreq_cooling.c
+++ b/drivers/thermal/devfreq_cooling.c
@@ -214,7 +214,7 @@ static int devfreq_cooling_get_requested_power(struct thermal_cooling_device *cd
res = dfc->power_ops->get_real_power(df, power, freq, voltage);
if (!res) {
- state = dfc->capped_state;
+ state = dfc->max_state - dfc->capped_state;
/* Convert EM power into milli-Watts first */
rcu_read_lock();
diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c
index 1b17dc4c219cc..e25e48d76aa79 100644
--- a/drivers/thermal/gov_power_allocator.c
+++ b/drivers/thermal/gov_power_allocator.c
@@ -606,7 +606,7 @@ static int allocate_actors_buffer(struct power_allocator_params *params,
/* There might be no cooling devices yet. */
if (!num_actors) {
- ret = -EINVAL;
+ ret = 0;
goto clean_state;
}
@@ -679,11 +679,6 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
return -ENOMEM;
get_governor_trips(tz, params);
- if (!params->trip_max) {
- dev_warn(&tz->device, "power_allocator: missing trip_max\n");
- kfree(params);
- return -EINVAL;
- }
ret = check_power_actors(tz, params);
if (ret < 0) {
@@ -714,9 +709,10 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
else
params->sustainable_power = tz->tzp->sustainable_power;
- estimate_pid_constants(tz, tz->tzp->sustainable_power,
- params->trip_switch_on,
- params->trip_max->temperature);
+ if (params->trip_max)
+ estimate_pid_constants(tz, tz->tzp->sustainable_power,
+ params->trip_switch_on,
+ params->trip_max->temperature);
reset_pid_controller(params);
diff --git a/drivers/thermal/mediatek/auxadc_thermal.c b/drivers/thermal/mediatek/auxadc_thermal.c
index 8b0edb2048443..9ee2e7283435a 100644
--- a/drivers/thermal/mediatek/auxadc_thermal.c
+++ b/drivers/thermal/mediatek/auxadc_thermal.c
@@ -690,6 +690,9 @@ static const struct mtk_thermal_data mt7986_thermal_data = {
.adcpnp = mt7986_adcpnp,
.sensor_mux_values = mt7986_mux_values,
.version = MTK_THERMAL_V3,
+ .apmixed_buffer_ctl_reg = APMIXED_SYS_TS_CON1,
+ .apmixed_buffer_ctl_mask = GENMASK(31, 6) | BIT(3),
+ .apmixed_buffer_ctl_set = BIT(0),
};
static bool mtk_thermal_temp_is_valid(int temp)
diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
index 98d9c80bd4c62..fd4bd650c77a6 100644
--- a/drivers/thermal/mediatek/lvts_thermal.c
+++ b/drivers/thermal/mediatek/lvts_thermal.c
@@ -719,8 +719,10 @@ static int lvts_calibration_read(struct device *dev, struct lvts_domain *lvts_td
lvts_td->calib = devm_krealloc(dev, lvts_td->calib,
lvts_td->calib_len + len, GFP_KERNEL);
- if (!lvts_td->calib)
+ if (!lvts_td->calib) {
+ kfree(efuse);
return -ENOMEM;
+ }
memcpy(lvts_td->calib + lvts_td->calib_len, efuse, len);
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
index ccc2eea7f9f54..404f01cca4dab 100644
--- a/drivers/thermal/qoriq_thermal.c
+++ b/drivers/thermal/qoriq_thermal.c
@@ -57,6 +57,9 @@
#define REGS_TTRnCR(n) (0xf10 + 4 * (n)) /* Temperature Range n
* Control Register
*/
+#define NUM_TTRCR_V1 4
+#define NUM_TTRCR_MAX 16
+
#define REGS_IPBRR(n) (0xbf8 + 4 * (n)) /* IP Block Revision
* Register n
*/
@@ -71,6 +74,7 @@ struct qoriq_sensor {
struct qoriq_tmu_data {
int ver;
+ u32 ttrcr[NUM_TTRCR_MAX];
struct regmap *regmap;
struct clk *clk;
struct qoriq_sensor sensor[SITES_MAX];
@@ -182,17 +186,17 @@ static int qoriq_tmu_calibration(struct device *dev,
struct qoriq_tmu_data *data)
{
int i, val, len;
- u32 range[4];
const u32 *calibration;
struct device_node *np = dev->of_node;
len = of_property_count_u32_elems(np, "fsl,tmu-range");
- if (len < 0 || len > 4) {
+ if (len < 0 || (data->ver == TMU_VER1 && len > NUM_TTRCR_V1) ||
+ (data->ver > TMU_VER1 && len > NUM_TTRCR_MAX)) {
dev_err(dev, "invalid range data.\n");
return len;
}
- val = of_property_read_u32_array(np, "fsl,tmu-range", range, len);
+ val = of_property_read_u32_array(np, "fsl,tmu-range", data->ttrcr, len);
if (val != 0) {
dev_err(dev, "failed to read range data.\n");
return val;
@@ -200,7 +204,7 @@ static int qoriq_tmu_calibration(struct device *dev,
/* Init temperature range registers */
for (i = 0; i < len; i++)
- regmap_write(data->regmap, REGS_TTRnCR(i), range[i]);
+ regmap_write(data->regmap, REGS_TTRnCR(i), data->ttrcr[i]);
calibration = of_get_property(np, "fsl,tmu-calibration", &len);
if (calibration == NULL || len % 8) {
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index cafcb6d6e235b..a764cb1115a57 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -428,6 +428,10 @@ static const struct of_device_id rcar_gen3_thermal_dt_ids[] = {
.compatible = "renesas,r8a779g0-thermal",
.data = &rcar_gen4_thermal_info,
},
+ {
+ .compatible = "renesas,r8a779h0-thermal",
+ .data = &rcar_gen4_thermal_info,
+ },
{},
};
MODULE_DEVICE_TABLE(of, rcar_gen3_thermal_dt_ids);
diff --git a/drivers/thermal/st/st_thermal.h b/drivers/thermal/st/st_thermal.h
index 75a84e6ec6a72..8639d9165c9b9 100644
--- a/drivers/thermal/st/st_thermal.h
+++ b/drivers/thermal/st/st_thermal.h
@@ -38,10 +38,10 @@ struct st_thermal_sensor;
*
* @power_ctrl: Function for powering on/off a sensor. Clock to the
* sensor is also controlled from this function.
- * @alloc_regfields: Allocate regmap register fields, specific to a sensor.
- * @do_memmap_regmap: Memory map the thermal register space and init regmap
+ * @alloc_regfields: Allocate regmap register fields, specific to a sensor.
+ * @do_memmap_regmap: Memory map the thermal register space and init regmap
* instance or find regmap instance.
- * @register_irq: Register an interrupt handler for a sensor.
+ * @register_irq: Register an interrupt handler for a sensor.
*/
struct st_thermal_sensor_ops {
int (*power_ctrl)(struct st_thermal_sensor *, enum st_thermal_power_state);
@@ -56,15 +56,15 @@ struct st_thermal_sensor_ops {
*
* @reg_fields: Pointer to the regfields array for a sensor.
* @sys_compat: Pointer to the syscon node compatible string.
- * @ops: Pointer to private thermal ops for a sensor.
- * @calibration_val: Default calibration value to be written to the DCORRECT
+ * @ops: Pointer to private thermal ops for a sensor.
+ * @calibration_val: Default calibration value to be written to the DCORRECT
* register field for a sensor.
- * @temp_adjust_val: Value to be added/subtracted from the data read from
+ * @temp_adjust_val: Value to be added/subtracted from the data read from
* the sensor. If value needs to be added please provide a
* positive value and if it is to be subtracted please
- * provide a negative value.
- * @crit_temp: The temperature beyond which the SoC should be shutdown
- * to prevent damage.
+ * provide a negative value.
+ * @crit_temp: The temperature beyond which the SoC should be shutdown
+ * to prevent damage.
*/
struct st_thermal_compat_data {
char *sys_compat;
diff --git a/drivers/thermal/st/st_thermal_memmap.c b/drivers/thermal/st/st_thermal_memmap.c
index e8cfa83b724a7..29c2269b0fb35 100644
--- a/drivers/thermal/st/st_thermal_memmap.c
+++ b/drivers/thermal/st/st_thermal_memmap.c
@@ -27,7 +27,7 @@ static const struct reg_field st_mmap_thermal_regfields[MAX_REGFIELDS] = {
* written simultaneously for powering on and off the temperature
* sensor. regmap_update_bits() will be used to update the register.
*/
- [INT_THRESH_HI] = REG_FIELD(STIH416_MPE_INT_THRESH, 0, 7),
+ [INT_THRESH_HI] = REG_FIELD(STIH416_MPE_INT_THRESH, 0, 7),
[DCORRECT] = REG_FIELD(STIH416_MPE_CONF, 5, 9),
[OVERFLOW] = REG_FIELD(STIH416_MPE_STATUS, 9, 9),
[DATA] = REG_FIELD(STIH416_MPE_STATUS, 11, 18),
diff --git a/drivers/thermal/sun8i_thermal.c b/drivers/thermal/sun8i_thermal.c
index 6a8e386dbc8dc..3203d8bd13a8f 100644
--- a/drivers/thermal/sun8i_thermal.c
+++ b/drivers/thermal/sun8i_thermal.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@@ -50,7 +51,8 @@
#define SUN8I_THS_CTRL2_T_ACQ1(x) ((GENMASK(15, 0) & (x)) << 16)
#define SUN8I_THS_DATA_IRQ_STS(x) BIT(x + 8)
-#define SUN50I_THS_CTRL0_T_ACQ(x) ((GENMASK(15, 0) & (x)) << 16)
+#define SUN50I_THS_CTRL0_T_ACQ(x) (GENMASK(15, 0) & ((x) - 1))
+#define SUN50I_THS_CTRL0_T_SAMPLE_PER(x) ((GENMASK(15, 0) & ((x) - 1)) << 16)
#define SUN50I_THS_FILTER_EN BIT(2)
#define SUN50I_THS_FILTER_TYPE(x) (GENMASK(1, 0) & (x))
#define SUN50I_H6_THS_PC_TEMP_PERIOD(x) ((GENMASK(19, 0) & (x)) << 12)
@@ -65,6 +67,7 @@ struct tsensor {
struct ths_thermal_chip {
bool has_mod_clk;
bool has_bus_clk_reset;
+ bool needs_sram;
int sensor_num;
int offset;
int scale;
@@ -82,12 +85,16 @@ struct ths_device {
const struct ths_thermal_chip *chip;
struct device *dev;
struct regmap *regmap;
+ struct regmap_field *sram_regmap_field;
struct reset_control *reset;
struct clk *bus_clk;
struct clk *mod_clk;
struct tsensor sensor[MAX_SENSOR_NUM];
};
+/* The H616 needs to have a bit 16 in the SRAM control register cleared. */
+static const struct reg_field sun8i_ths_sram_reg_field = REG_FIELD(0x0, 16, 16);
+
/* Temp Unit: millidegree Celsius */
static int sun8i_ths_calc_temp(struct ths_device *tmdev,
int id, int reg)
@@ -188,6 +195,9 @@ static irqreturn_t sun8i_irq_thread(int irq, void *data)
int i;
for_each_set_bit(i, &irq_bitmap, tmdev->chip->sensor_num) {
+ /* We allow some zones to not register. */
+ if (IS_ERR(tmdev->sensor[i].tzd))
+ continue;
thermal_zone_device_update(tmdev->sensor[i].tzd,
THERMAL_EVENT_UNSPECIFIED);
}
@@ -221,16 +231,21 @@ static int sun50i_h6_ths_calibrate(struct ths_device *tmdev,
struct device *dev = tmdev->dev;
int i, ft_temp;
- if (!caldata[0] || callen < 2 + 2 * tmdev->chip->sensor_num)
+ if (!caldata[0])
return -EINVAL;
/*
* efuse layout:
*
- * 0 11 16 32
- * +-------+-------+-------+
- * |temp| |sensor0|sensor1|
- * +-------+-------+-------+
+ * 0 11 16 27 32 43 48 57
+ * +----------+-----------+-----------+-----------+
+ * | temp | |sensor0| |sensor1| |sensor2| |
+ * +----------+-----------+-----------+-----------+
+ * ^ ^ ^
+ * | | |
+ * | | sensor3[11:8]
+ * | sensor3[7:4]
+ * sensor3[3:0]
*
* The calibration data on the H6 is the ambient temperature and
* sensor values that are filled during the factory test stage.
@@ -243,9 +258,16 @@ static int sun50i_h6_ths_calibrate(struct ths_device *tmdev,
ft_temp = (caldata[0] & FT_TEMP_MASK) * 100;
for (i = 0; i < tmdev->chip->sensor_num; i++) {
- int sensor_reg = caldata[i + 1] & TEMP_CALIB_MASK;
- int cdata, offset;
- int sensor_temp = tmdev->chip->calc_temp(tmdev, i, sensor_reg);
+ int sensor_reg, sensor_temp, cdata, offset;
+
+ if (i == 3)
+ sensor_reg = (caldata[1] >> 12)
+ | ((caldata[2] >> 12) << 4)
+ | ((caldata[3] >> 12) << 8);
+ else
+ sensor_reg = caldata[i + 1] & TEMP_CALIB_MASK;
+
+ sensor_temp = tmdev->chip->calc_temp(tmdev, i, sensor_reg);
/*
* Calibration data is CALIBRATE_DEFAULT - (calculated
@@ -324,6 +346,34 @@ static void sun8i_ths_reset_control_assert(void *data)
reset_control_assert(data);
}
+static struct regmap *sun8i_ths_get_sram_regmap(struct device_node *node)
+{
+ struct device_node *sram_node;
+ struct platform_device *sram_pdev;
+ struct regmap *regmap = NULL;
+
+ sram_node = of_parse_phandle(node, "allwinner,sram", 0);
+ if (!sram_node)
+ return ERR_PTR(-ENODEV);
+
+ sram_pdev = of_find_device_by_node(sram_node);
+ if (!sram_pdev) {
+ /* platform device might not be probed yet */
+ regmap = ERR_PTR(-EPROBE_DEFER);
+ goto out_put_node;
+ }
+
+ /* If no regmap is found then the other device driver is at fault */
+ regmap = dev_get_regmap(&sram_pdev->dev, NULL);
+ if (!regmap)
+ regmap = ERR_PTR(-EINVAL);
+
+ platform_device_put(sram_pdev);
+out_put_node:
+ of_node_put(sram_node);
+ return regmap;
+}
+
static int sun8i_ths_resource_init(struct ths_device *tmdev)
{
struct device *dev = tmdev->dev;
@@ -368,6 +418,19 @@ static int sun8i_ths_resource_init(struct ths_device *tmdev)
if (ret)
return ret;
+ if (tmdev->chip->needs_sram) {
+ struct regmap *regmap;
+
+ regmap = sun8i_ths_get_sram_regmap(dev->of_node);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+ tmdev->sram_regmap_field = devm_regmap_field_alloc(dev,
+ regmap,
+ sun8i_ths_sram_reg_field);
+ if (IS_ERR(tmdev->sram_regmap_field))
+ return PTR_ERR(tmdev->sram_regmap_field);
+ }
+
ret = sun8i_ths_calibrate(tmdev);
if (ret)
return ret;
@@ -410,25 +473,31 @@ static int sun8i_h3_thermal_init(struct ths_device *tmdev)
return 0;
}
-/*
- * Without this undocumented value, the returned temperatures would
- * be higher than real ones by about 20C.
- */
-#define SUN50I_H6_CTRL0_UNK 0x0000002f
-
static int sun50i_h6_thermal_init(struct ths_device *tmdev)
{
int val;
+ /* The H616 needs to have a bit in the SRAM control register cleared. */
+ if (tmdev->sram_regmap_field)
+ regmap_field_write(tmdev->sram_regmap_field, 0);
+
/*
- * T_acq = 20us
- * clkin = 24MHz
- *
- * x = T_acq * clkin - 1
- * = 479
+ * The manual recommends an overall sample frequency of 50 KHz (20us,
+ * 480 cycles at 24 MHz), which provides plenty of time for both the
+ * acquisition time (>24 cycles) and the actual conversion time
+ * (>14 cycles).
+ * The lower half of the CTRL register holds the "acquire time", in
+ * clock cycles, which the manual recommends to be 2us:
+ * 24MHz * 2us = 48 cycles.
+ * The high half of THS_CTRL encodes the sample frequency, in clock
+ * cycles: 24MHz * 20us = 480 cycles.
+ * This is explained in the H616 manual, but apparently wrongly
+ * described in the H6 manual, although the BSP code does the same
+ * for both SoCs.
*/
regmap_write(tmdev->regmap, SUN50I_THS_CTRL0,
- SUN50I_H6_CTRL0_UNK | SUN50I_THS_CTRL0_T_ACQ(479));
+ SUN50I_THS_CTRL0_T_ACQ(48) |
+ SUN50I_THS_CTRL0_T_SAMPLE_PER(480));
/* average over 4 samples */
regmap_write(tmdev->regmap, SUN50I_H6_THS_MFC,
SUN50I_THS_FILTER_EN |
@@ -465,8 +534,17 @@ static int sun8i_ths_register(struct ths_device *tmdev)
i,
&tmdev->sensor[i],
&ths_ops);
- if (IS_ERR(tmdev->sensor[i].tzd))
- return PTR_ERR(tmdev->sensor[i].tzd);
+
+ /*
+ * If an individual zone fails to register for reasons
+ * other than probe deferral (eg, a bad DT) then carry
+ * on, other zones might register successfully.
+ */
+ if (IS_ERR(tmdev->sensor[i].tzd)) {
+ if (PTR_ERR(tmdev->sensor[i].tzd) == -EPROBE_DEFER)
+ return PTR_ERR(tmdev->sensor[i].tzd);
+ continue;
+ }
devm_thermal_add_hwmon_sysfs(tmdev->dev, tmdev->sensor[i].tzd);
}
@@ -618,6 +696,20 @@ static const struct ths_thermal_chip sun20i_d1_ths = {
.calc_temp = sun8i_ths_calc_temp,
};
+static const struct ths_thermal_chip sun50i_h616_ths = {
+ .sensor_num = 4,
+ .has_bus_clk_reset = true,
+ .needs_sram = true,
+ .ft_deviation = 8000,
+ .offset = 263655,
+ .scale = 810,
+ .temp_data_base = SUN50I_H6_THS_TEMP_DATA,
+ .calibrate = sun50i_h6_ths_calibrate,
+ .init = sun50i_h6_thermal_init,
+ .irq_ack = sun50i_h6_irq_ack,
+ .calc_temp = sun8i_ths_calc_temp,
+};
+
static const struct of_device_id of_ths_match[] = {
{ .compatible = "allwinner,sun8i-a83t-ths", .data = &sun8i_a83t_ths },
{ .compatible = "allwinner,sun8i-h3-ths", .data = &sun8i_h3_ths },
@@ -627,6 +719,7 @@ static const struct of_device_id of_ths_match[] = {
{ .compatible = "allwinner,sun50i-h5-ths", .data = &sun50i_h5_ths },
{ .compatible = "allwinner,sun50i-h6-ths", .data = &sun50i_h6_ths },
{ .compatible = "allwinner,sun20i-d1-ths", .data = &sun20i_d1_ths },
+ { .compatible = "allwinner,sun50i-h616-ths", .data = &sun50i_h616_ths },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, of_ths_match);
diff --git a/drivers/thermal/thermal_debugfs.c b/drivers/thermal/thermal_debugfs.c
index c617e8b9f0ddf..d78d54ae2605e 100644
--- a/drivers/thermal/thermal_debugfs.c
+++ b/drivers/thermal/thermal_debugfs.c
@@ -616,6 +616,7 @@ void thermal_debug_tz_trip_up(struct thermal_zone_device *tz,
tze->trip_stats[trip_id].timestamp = now;
tze->trip_stats[trip_id].max = max(tze->trip_stats[trip_id].max, temperature);
tze->trip_stats[trip_id].min = min(tze->trip_stats[trip_id].min, temperature);
+ tze->trip_stats[trip_id].count++;
tze->trip_stats[trip_id].avg = tze->trip_stats[trip_id].avg +
(temperature - tze->trip_stats[trip_id].avg) /
tze->trip_stats[trip_id].count;
diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
index f1cbf9aa62cfe..aa34b6e82e268 100644
--- a/drivers/thermal/thermal_of.c
+++ b/drivers/thermal/thermal_of.c
@@ -227,14 +227,18 @@ static int thermal_of_monitor_init(struct device_node *np, int *delay, int *pdel
int ret;
ret = of_property_read_u32(np, "polling-delay-passive", pdelay);
- if (ret < 0) {
- pr_err("%pOFn: missing polling-delay-passive property\n", np);
+ if (ret == -EINVAL) {
+ *pdelay = 0;
+ } else if (ret < 0) {
+ pr_err("%pOFn: Couldn't get polling-delay-passive: %d\n", np, ret);
return ret;
}
ret = of_property_read_u32(np, "polling-delay", delay);
- if (ret < 0) {
- pr_err("%pOFn: missing polling-delay property\n", np);
+ if (ret == -EINVAL) {
+ *delay = 0;
+ } else if (ret < 0) {
+ pr_err("%pOFn: Couldn't get polling-delay: %d\n", np, ret);
return ret;
}
@@ -460,7 +464,7 @@ static void thermal_of_zone_unregister(struct thermal_zone_device *tz)
* @ops: A set of thermal sensor ops
*
* Return: a valid thermal zone structure pointer on success.
- * - EINVAL: if the device tree thermal description is malformed
+ * - EINVAL: if the device tree thermal description is malformed
* - ENOMEM: if one structure can not be allocated
* - Other negative errors are returned by the underlying called functions
*/
diff --git a/drivers/thermal/thermal_trip.c b/drivers/thermal/thermal_trip.c
index 09f6050dd0416..497abf0d47cac 100644
--- a/drivers/thermal/thermal_trip.c
+++ b/drivers/thermal/thermal_trip.c
@@ -65,7 +65,6 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz)
{
const struct thermal_trip *trip;
int low = -INT_MAX, high = INT_MAX;
- bool same_trip = false;
int ret;
lockdep_assert_held(&tz->lock);
@@ -74,36 +73,22 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz)
return;
for_each_trip(tz, trip) {
- bool low_set = false;
int trip_low;
trip_low = trip->temperature - trip->hysteresis;
- if (trip_low < tz->temperature && trip_low > low) {
+ if (trip_low < tz->temperature && trip_low > low)
low = trip_low;
- low_set = true;
- same_trip = false;
- }
if (trip->temperature > tz->temperature &&
- trip->temperature < high) {
+ trip->temperature < high)
high = trip->temperature;
- same_trip = low_set;
- }
}
/* No need to change trip points */
if (tz->prev_low_trip == low && tz->prev_high_trip == high)
return;
- /*
- * If "high" and "low" are the same, skip the change unless this is the
- * first time.
- */
- if (same_trip && (tz->prev_low_trip != -INT_MAX ||
- tz->prev_high_trip != INT_MAX))
- return;
-
tz->prev_low_trip = low;
tz->prev_high_trip = high;
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index c8b3d7b780982..b44b32dcb8322 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I$(src)
obj-${CONFIG_USB4} := thunderbolt.o
thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o
thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o tmu.o usb4.o
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index d997a4c545f79..4bdb2d45e0bff 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -15,6 +15,8 @@
#include "ctl.h"
+#define CREATE_TRACE_POINTS
+#include "trace.h"
#define TB_CTL_RX_PKG_COUNT 10
#define TB_CTL_RETRIES 4
@@ -32,6 +34,7 @@
* @timeout_msec: Default timeout for non-raw control messages
* @callback: Callback called when hotplug message is received
* @callback_data: Data passed to @callback
+ * @index: Domain number. This will be output with the trace record.
*/
struct tb_ctl {
struct tb_nhi *nhi;
@@ -47,6 +50,8 @@ struct tb_ctl {
int timeout_msec;
event_cb callback;
void *callback_data;
+
+ int index;
};
@@ -369,6 +374,9 @@ static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
pkg->frame.size = len + 4;
pkg->frame.sof = type;
pkg->frame.eof = type;
+
+ trace_tb_tx(ctl->index, type, data, len);
+
cpu_to_be32_array(pkg->buffer, data, len / 4);
*(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
@@ -384,6 +392,7 @@ static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
struct ctl_pkg *pkg, size_t size)
{
+ trace_tb_event(ctl->index, type, pkg->buffer, size);
return ctl->callback(ctl->callback_data, type, pkg->buffer, size);
}
@@ -489,6 +498,9 @@ static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
* triggered from messing with the active requests.
*/
req = tb_cfg_request_find(pkg->ctl, pkg);
+
+ trace_tb_rx(pkg->ctl->index, frame->eof, pkg->buffer, frame->size, !req);
+
if (req) {
if (req->copy(req, pkg))
schedule_work(&req->work);
@@ -614,6 +626,7 @@ struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
/**
* tb_ctl_alloc() - allocate a control channel
* @nhi: Pointer to NHI
+ * @index: Domain number
* @timeout_msec: Default timeout used with non-raw control messages
* @cb: Callback called for plug events
* @cb_data: Data passed to @cb
@@ -622,14 +635,16 @@ struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
*
* Return: Returns a pointer on success or NULL on failure.
*/
-struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int timeout_msec, event_cb cb,
- void *cb_data)
+struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int index, int timeout_msec,
+ event_cb cb, void *cb_data)
{
int i;
struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
if (!ctl)
return NULL;
+
ctl->nhi = nhi;
+ ctl->index = index;
ctl->timeout_msec = timeout_msec;
ctl->callback = cb;
ctl->callback_data = cb_data;
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
index eec5c953c743c..bf930a1914723 100644
--- a/drivers/thunderbolt/ctl.h
+++ b/drivers/thunderbolt/ctl.h
@@ -21,8 +21,8 @@ struct tb_ctl;
typedef bool (*event_cb)(void *data, enum tb_cfg_pkg_type type,
const void *buf, size_t size);
-struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int timeout_msec, event_cb cb,
- void *cb_data);
+struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int index, int timeout_msec,
+ event_cb cb, void *cb_data);
void tb_ctl_start(struct tb_ctl *ctl);
void tb_ctl_stop(struct tb_ctl *ctl);
void tb_ctl_free(struct tb_ctl *ctl);
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 9fb1a64f3300b..0023017299f76 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -321,12 +321,12 @@ static void tb_domain_release(struct device *dev)
tb_ctl_free(tb->ctl);
destroy_workqueue(tb->wq);
- ida_simple_remove(&tb_domain_ida, tb->index);
+ ida_free(&tb_domain_ida, tb->index);
mutex_destroy(&tb->lock);
kfree(tb);
}
-struct device_type tb_domain_type = {
+const struct device_type tb_domain_type = {
.name = "thunderbolt_domain",
.release = tb_domain_release,
};
@@ -389,7 +389,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize
tb->nhi = nhi;
mutex_init(&tb->lock);
- tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL);
+ tb->index = ida_alloc(&tb_domain_ida, GFP_KERNEL);
if (tb->index < 0)
goto err_free;
@@ -397,7 +397,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize
if (!tb->wq)
goto err_remove_ida;
- tb->ctl = tb_ctl_alloc(nhi, timeout_msec, tb_domain_event_cb, tb);
+ tb->ctl = tb_ctl_alloc(nhi, tb->index, timeout_msec, tb_domain_event_cb, tb);
if (!tb->ctl)
goto err_destroy_wq;
@@ -413,7 +413,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize
err_destroy_wq:
destroy_workqueue(tb->wq);
err_remove_ida:
- ida_simple_remove(&tb_domain_ida, tb->index);
+ ida_free(&tb_domain_ida, tb->index);
err_free:
kfree(tb);
@@ -423,6 +423,7 @@ err_free:
/**
* tb_domain_add() - Add domain to the system
* @tb: Domain to add
+ * @reset: Issue reset to the host router
*
* Starts the domain and adds it to the system. Hotplugging devices will
* work after this has been returned successfully. In order to remove
@@ -431,7 +432,7 @@ err_free:
*
* Return: %0 in case of success and negative errno in case of error
*/
-int tb_domain_add(struct tb *tb)
+int tb_domain_add(struct tb *tb, bool reset)
{
int ret;
@@ -460,7 +461,7 @@ int tb_domain_add(struct tb *tb)
/* Start the domain */
if (tb->cm_ops->start) {
- ret = tb->cm_ops->start(tb);
+ ret = tb->cm_ops->start(tb, reset);
if (ret)
goto err_domain_del;
}
@@ -505,6 +506,10 @@ void tb_domain_remove(struct tb *tb)
mutex_unlock(&tb->lock);
flush_workqueue(tb->wq);
+
+ if (tb->cm_ops->deinit)
+ tb->cm_ops->deinit(tb);
+
device_unregister(&tb->dev);
}
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 56790d50f9e32..baf10d099c778 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -2144,7 +2144,7 @@ static int icm_runtime_resume(struct tb *tb)
return 0;
}
-static int icm_start(struct tb *tb)
+static int icm_start(struct tb *tb, bool not_used)
{
struct icm *icm = tb_priv(tb);
int ret;
diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c
index 633970fbe9b05..63cb4b6afb718 100644
--- a/drivers/thunderbolt/lc.c
+++ b/drivers/thunderbolt/lc.c
@@ -6,6 +6,8 @@
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
+#include <linux/delay.h>
+
#include "tb.h"
/**
@@ -45,6 +47,49 @@ static int find_port_lc_cap(struct tb_port *port)
return sw->cap_lc + start + phys * size;
}
+/**
+ * tb_lc_reset_port() - Trigger downstream port reset through LC
+ * @port: Port that is reset
+ *
+ * Triggers downstream port reset through link controller registers.
+ * Returns %0 in case of success negative errno otherwise. Only supports
+ * non-USB4 routers with link controller (that's Thunderbolt 2 and
+ * Thunderbolt 3).
+ */
+int tb_lc_reset_port(struct tb_port *port)
+{
+ struct tb_switch *sw = port->sw;
+ int cap, ret;
+ u32 mode;
+
+ if (sw->generation < 2)
+ return -EINVAL;
+
+ cap = find_port_lc_cap(port);
+ if (cap < 0)
+ return cap;
+
+ ret = tb_sw_read(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
+ if (ret)
+ return ret;
+
+ mode |= TB_LC_PORT_MODE_DPR;
+
+ ret = tb_sw_write(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
+ if (ret)
+ return ret;
+
+ fsleep(10000);
+
+ ret = tb_sw_read(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
+ if (ret)
+ return ret;
+
+ mode &= ~TB_LC_PORT_MODE_DPR;
+
+ return tb_sw_write(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
+}
+
static int tb_lc_set_port_configured(struct tb_port *port, bool configured)
{
bool upstream = tb_is_upstream_port(port);
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index fb4f46e51753a..7af2642b97cb8 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -48,7 +48,7 @@
static bool host_reset = true;
module_param(host_reset, bool, 0444);
-MODULE_PARM_DESC(host_reset, "reset USBv2 host router (default: true)");
+MODULE_PARM_DESC(host_reset, "reset USB4 host router (default: true)");
static int ring_interrupt_index(const struct tb_ring *ring)
{
@@ -465,7 +465,7 @@ static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
if (!nhi->pdev->msix_enabled)
return 0;
- ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL);
+ ret = ida_alloc_max(&nhi->msix_ida, MSIX_MAX_VECS - 1, GFP_KERNEL);
if (ret < 0)
return ret;
@@ -485,7 +485,7 @@ static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
return 0;
err_ida_remove:
- ida_simple_remove(&nhi->msix_ida, ring->vector);
+ ida_free(&nhi->msix_ida, ring->vector);
return ret;
}
@@ -496,7 +496,7 @@ static void ring_release_msix(struct tb_ring *ring)
return;
free_irq(ring->irq, ring);
- ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
+ ida_free(&ring->nhi->msix_ida, ring->vector);
ring->vector = 0;
ring->irq = 0;
}
@@ -1364,7 +1364,6 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nhi_check_quirks(nhi);
nhi_check_iommu(nhi);
-
nhi_reset(nhi);
res = nhi_init_msi(nhi);
@@ -1392,7 +1391,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_dbg(dev, "NHI initialized, starting thunderbolt\n");
- res = tb_domain_add(tb);
+ res = tb_domain_add(tb, host_reset);
if (res) {
/*
* At this point the RX/TX rings might already have been
diff --git a/drivers/thunderbolt/nvm.c b/drivers/thunderbolt/nvm.c
index 69fb3b0fa34fa..8901db2de327c 100644
--- a/drivers/thunderbolt/nvm.c
+++ b/drivers/thunderbolt/nvm.c
@@ -330,7 +330,7 @@ struct tb_nvm *tb_nvm_alloc(struct device *dev)
if (!nvm)
return ERR_PTR(-ENOMEM);
- ret = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL);
+ ret = ida_alloc(&nvm_ida, GFP_KERNEL);
if (ret < 0) {
kfree(nvm);
return ERR_PTR(ret);
@@ -528,7 +528,7 @@ void tb_nvm_free(struct tb_nvm *nvm)
nvmem_unregister(nvm->non_active);
nvmem_unregister(nvm->active);
vfree(nvm->buf);
- ida_simple_remove(&nvm_ida, nvm->id);
+ ida_free(&nvm_ida, nvm->id);
}
kfree(nvm);
}
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index 091a81bbdbdc9..f760e54cd9bd1 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -446,6 +446,19 @@ static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index,
return -ETIMEDOUT;
}
+/**
+ * tb_path_deactivate_hop() - Deactivate one path in path config space
+ * @port: Lane or protocol adapter
+ * @hop_index: HopID of the path to be cleared
+ *
+ * This deactivates or clears a single path config space entry at
+ * @hop_index. Returns %0 in success and negative errno otherwise.
+ */
+int tb_path_deactivate_hop(struct tb_port *port, int hop_index)
+{
+ return __tb_path_deactivate_hop(port, hop_index, true);
+}
+
static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop)
{
int i, res;
diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
index e6bfa63b40aee..e81de9c30eac9 100644
--- a/drivers/thunderbolt/quirks.c
+++ b/drivers/thunderbolt/quirks.c
@@ -43,6 +43,12 @@ static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw)
}
}
+static void quirk_block_rpm_in_redrive(struct tb_switch *sw)
+{
+ sw->quirks |= QUIRK_KEEP_POWER_IN_DP_REDRIVE;
+ tb_sw_dbg(sw, "preventing runtime PM in DP redrive mode\n");
+}
+
struct tb_quirk {
u16 hw_vendor_id;
u16 hw_device_id;
@@ -87,6 +93,14 @@ static const struct tb_quirk tb_quirks[] = {
{ 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
/*
+ * Block Runtime PM in DP redrive mode for Intel Barlow Ridge host
+ * controllers.
+ */
+ { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI, 0x0000, 0x0000,
+ quirk_block_rpm_in_redrive },
+ { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI, 0x0000, 0x0000,
+ quirk_block_rpm_in_redrive },
+ /*
* CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms.
*/
{ 0x0438, 0x0208, 0x0000, 0x0000, quirk_clx_disable },
diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
index d49d6628dbf29..6bb49bdcd6c18 100644
--- a/drivers/thunderbolt/retimer.c
+++ b/drivers/thunderbolt/retimer.c
@@ -356,7 +356,7 @@ static void tb_retimer_release(struct device *dev)
kfree(rt);
}
-struct device_type tb_retimer_type = {
+const struct device_type tb_retimer_type = {
.name = "thunderbolt_retimer",
.groups = retimer_groups,
.release = tb_retimer_release,
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index fad40c4bc7103..326433df5880e 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -676,6 +676,13 @@ int tb_port_disable(struct tb_port *port)
return __tb_port_enable(port, false);
}
+static int tb_port_reset(struct tb_port *port)
+{
+ if (tb_switch_is_usb4(port->sw))
+ return port->cap_usb4 ? usb4_port_reset(port) : 0;
+ return tb_lc_reset_port(port);
+}
+
/*
* tb_init_port() - initialize a port
*
@@ -771,7 +778,7 @@ static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
if (max_hopid < 0 || max_hopid > port_max_hopid)
max_hopid = port_max_hopid;
- return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
+ return ida_alloc_range(ida, min_hopid, max_hopid, GFP_KERNEL);
}
/**
@@ -809,7 +816,7 @@ int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
*/
void tb_port_release_in_hopid(struct tb_port *port, int hopid)
{
- ida_simple_remove(&port->in_hopids, hopid);
+ ida_free(&port->in_hopids, hopid);
}
/**
@@ -819,7 +826,7 @@ void tb_port_release_in_hopid(struct tb_port *port, int hopid)
*/
void tb_port_release_out_hopid(struct tb_port *port, int hopid)
{
- ida_simple_remove(&port->out_hopids, hopid);
+ ida_free(&port->out_hopids, hopid);
}
static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
@@ -1120,7 +1127,7 @@ int tb_port_lane_bonding_enable(struct tb_port *port)
ret = tb_port_set_link_width(port->dual_link_port,
TB_LINK_WIDTH_DUAL);
if (ret)
- goto err_lane0;
+ goto err_lane1;
}
/*
@@ -1534,29 +1541,124 @@ static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
regs->__unknown1, regs->__unknown4);
}
+static int tb_switch_reset_host(struct tb_switch *sw)
+{
+ if (sw->generation > 1) {
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ int i, ret;
+
+ /*
+ * For lane adapters we issue downstream port
+ * reset and clear up path config spaces.
+ *
+ * For protocol adapters we disable the path and
+ * clear path config space one by one (from 8 to
+ * Max Input HopID of the adapter).
+ */
+ if (tb_port_is_null(port) && !tb_is_upstream_port(port)) {
+ ret = tb_port_reset(port);
+ if (ret)
+ return ret;
+ } else if (tb_port_is_usb3_down(port) ||
+ tb_port_is_usb3_up(port)) {
+ tb_usb3_port_enable(port, false);
+ } else if (tb_port_is_dpin(port) ||
+ tb_port_is_dpout(port)) {
+ tb_dp_port_enable(port, false);
+ } else if (tb_port_is_pcie_down(port) ||
+ tb_port_is_pcie_up(port)) {
+ tb_pci_port_enable(port, false);
+ } else {
+ continue;
+ }
+
+ /* Cleanup path config space of protocol adapter */
+ for (i = TB_PATH_MIN_HOPID;
+ i <= port->config.max_in_hop_id; i++) {
+ ret = tb_path_deactivate_hop(port, i);
+ if (ret)
+ return ret;
+ }
+ }
+ } else {
+ struct tb_cfg_result res;
+
+ /* Thunderbolt 1 uses the "reset" config space packet */
+ res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
+ TB_CFG_SWITCH, 2, 2);
+ if (res.err)
+ return res.err;
+ res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
+ if (res.err > 0)
+ return -EIO;
+ else if (res.err < 0)
+ return res.err;
+ }
+
+ return 0;
+}
+
+static int tb_switch_reset_device(struct tb_switch *sw)
+{
+ return tb_port_reset(tb_switch_downstream_port(sw));
+}
+
+static bool tb_switch_enumerated(struct tb_switch *sw)
+{
+ u32 val;
+ int ret;
+
+ /*
+ * Read directly from the hardware because we use this also
+ * during system sleep where sw->config.enabled is already set
+ * by us.
+ */
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_3, 1);
+ if (ret)
+ return false;
+
+ return !!(val & ROUTER_CS_3_V);
+}
+
/**
- * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
- * @sw: Switch to reset
+ * tb_switch_reset() - Perform reset to the router
+ * @sw: Router to reset
*
- * Return: Returns 0 on success or an error code on failure.
+ * Issues reset to the router @sw. Can be used for any router. For host
+ * routers, resets all the downstream ports and cleans up path config
+ * spaces accordingly. For device routers issues downstream port reset
+ * through the parent router, so as side effect there will be unplug
+ * soon after this is finished.
+ *
+ * If the router is not enumerated does nothing.
+ *
+ * Returns %0 on success or negative errno in case of failure.
*/
int tb_switch_reset(struct tb_switch *sw)
{
- struct tb_cfg_result res;
+ int ret;
- if (sw->generation > 1)
+ /*
+ * We cannot access the port config spaces unless the router is
+ * already enumerated. If the router is not enumerated it is
+ * equal to being reset so we can skip that here.
+ */
+ if (!tb_switch_enumerated(sw))
return 0;
- tb_sw_dbg(sw, "resetting switch\n");
+ tb_sw_dbg(sw, "resetting\n");
+
+ if (tb_route(sw))
+ ret = tb_switch_reset_device(sw);
+ else
+ ret = tb_switch_reset_host(sw);
+
+ if (ret)
+ tb_sw_warn(sw, "failed to reset\n");
- res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
- TB_CFG_SWITCH, 2, 2);
- if (res.err)
- return res.err;
- res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
- if (res.err > 0)
- return -EIO;
- return res.err;
+ return ret;
}
/**
@@ -2228,7 +2330,7 @@ static const struct dev_pm_ops tb_switch_pm_ops = {
NULL)
};
-struct device_type tb_switch_type = {
+const struct device_type tb_switch_type = {
.name = "thunderbolt_device",
.release = tb_switch_release,
.uevent = tb_switch_uevent,
@@ -3078,22 +3180,29 @@ void tb_switch_unconfigure_link(struct tb_switch *sw)
{
struct tb_port *up, *down;
- if (sw->is_unplugged)
- return;
if (!tb_route(sw) || tb_switch_is_icm(sw))
return;
+ /*
+ * Unconfigure downstream port so that wake-on-connect can be
+ * configured after router unplug. No need to unconfigure upstream port
+ * since its router is unplugged.
+ */
up = tb_upstream_port(sw);
- if (tb_switch_is_usb4(up->sw))
- usb4_port_unconfigure(up);
- else
- tb_lc_unconfigure_port(up);
-
down = up->remote;
if (tb_switch_is_usb4(down->sw))
usb4_port_unconfigure(down);
else
tb_lc_unconfigure_port(down);
+
+ if (sw->is_unplugged)
+ return;
+
+ up = tb_upstream_port(sw);
+ if (tb_switch_is_usb4(up->sw))
+ usb4_port_unconfigure(up);
+ else
+ tb_lc_unconfigure_port(up);
}
static void tb_switch_credits_init(struct tb_switch *sw)
@@ -3339,7 +3448,26 @@ static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
return tb_lc_set_wake(sw, flags);
}
-int tb_switch_resume(struct tb_switch *sw)
+static void tb_switch_check_wakes(struct tb_switch *sw)
+{
+ if (device_may_wakeup(&sw->dev)) {
+ if (tb_switch_is_usb4(sw))
+ usb4_switch_check_wakes(sw);
+ }
+}
+
+/**
+ * tb_switch_resume() - Resume a switch after sleep
+ * @sw: Switch to resume
+ * @runtime: Is this resume from runtime suspend or system sleep
+ *
+ * Resumes and re-enumerates router (and all its children), if still plugged
+ * after suspend. Don't enumerate device router whose UID was changed during
+ * suspend. If this is resume from system sleep, notifies PM core about the
+ * wakes occurred during suspend. Disables all wakes, except USB4 wake of
+ * upstream port for USB4 routers that shall be always enabled.
+ */
+int tb_switch_resume(struct tb_switch *sw, bool runtime)
{
struct tb_port *port;
int err;
@@ -3388,6 +3516,9 @@ int tb_switch_resume(struct tb_switch *sw)
if (err)
return err;
+ if (!runtime)
+ tb_switch_check_wakes(sw);
+
/* Disable wakes */
tb_switch_set_wake(sw, 0);
@@ -3417,7 +3548,8 @@ int tb_switch_resume(struct tb_switch *sw)
*/
if (tb_port_unlock(port))
tb_port_warn(port, "failed to unlock port\n");
- if (port->remote && tb_switch_resume(port->remote->sw)) {
+ if (port->remote &&
+ tb_switch_resume(port->remote->sw, runtime)) {
tb_port_warn(port,
"lost during suspend, disconnecting\n");
tb_sw_set_unplugged(port->remote->sw);
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 846d2813bb1a5..3e44c78ac4092 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -17,6 +17,7 @@
#include "tunnel.h"
#define TB_TIMEOUT 100 /* ms */
+#define TB_RELEASE_BW_TIMEOUT 10000 /* ms */
/*
* Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver
@@ -75,112 +76,6 @@ struct tb_hotplug_event {
bool unplug;
};
-static void tb_init_bandwidth_groups(struct tb_cm *tcm)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
- struct tb_bandwidth_group *group = &tcm->groups[i];
-
- group->tb = tcm_to_tb(tcm);
- group->index = i + 1;
- INIT_LIST_HEAD(&group->ports);
- }
-}
-
-static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
- struct tb_port *in)
-{
- if (!group || WARN_ON(in->group))
- return;
-
- in->group = group;
- list_add_tail(&in->group_list, &group->ports);
-
- tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
-}
-
-static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
- struct tb_bandwidth_group *group = &tcm->groups[i];
-
- if (list_empty(&group->ports))
- return group;
- }
-
- return NULL;
-}
-
-static struct tb_bandwidth_group *
-tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
- struct tb_port *out)
-{
- struct tb_bandwidth_group *group;
- struct tb_tunnel *tunnel;
-
- /*
- * Find all DP tunnels that go through all the same USB4 links
- * as this one. Because we always setup tunnels the same way we
- * can just check for the routers at both ends of the tunnels
- * and if they are the same we have a match.
- */
- list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
- if (!tb_tunnel_is_dp(tunnel))
- continue;
-
- if (tunnel->src_port->sw == in->sw &&
- tunnel->dst_port->sw == out->sw) {
- group = tunnel->src_port->group;
- if (group) {
- tb_bandwidth_group_attach_port(group, in);
- return group;
- }
- }
- }
-
- /* Pick up next available group then */
- group = tb_find_free_bandwidth_group(tcm);
- if (group)
- tb_bandwidth_group_attach_port(group, in);
- else
- tb_port_warn(in, "no available bandwidth groups\n");
-
- return group;
-}
-
-static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
- struct tb_port *out)
-{
- if (usb4_dp_port_bandwidth_mode_enabled(in)) {
- int index, i;
-
- index = usb4_dp_port_group_id(in);
- for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
- if (tcm->groups[i].index == index) {
- tb_bandwidth_group_attach_port(&tcm->groups[i], in);
- return;
- }
- }
- }
-
- tb_attach_bandwidth_group(tcm, in, out);
-}
-
-static void tb_detach_bandwidth_group(struct tb_port *in)
-{
- struct tb_bandwidth_group *group = in->group;
-
- if (group) {
- in->group = NULL;
- list_del_init(&in->group_list);
-
- tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
- }
-}
-
static void tb_handle_hotplug(struct work_struct *work);
static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
@@ -472,34 +367,6 @@ static void tb_switch_discover_tunnels(struct tb_switch *sw,
}
}
-static void tb_discover_tunnels(struct tb *tb)
-{
- struct tb_cm *tcm = tb_priv(tb);
- struct tb_tunnel *tunnel;
-
- tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
-
- list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
- if (tb_tunnel_is_pci(tunnel)) {
- struct tb_switch *parent = tunnel->dst_port->sw;
-
- while (parent != tunnel->src_port->sw) {
- parent->boot = true;
- parent = tb_switch_parent(parent);
- }
- } else if (tb_tunnel_is_dp(tunnel)) {
- struct tb_port *in = tunnel->src_port;
- struct tb_port *out = tunnel->dst_port;
-
- /* Keep the domain from powering down */
- pm_runtime_get_sync(&in->sw->dev);
- pm_runtime_get_sync(&out->sw->dev);
-
- tb_discover_bandwidth_group(tcm, in, out);
- }
- }
-}
-
static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
{
if (tb_switch_is_usb4(port->sw))
@@ -681,6 +548,10 @@ static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
* Calculates consumed DP bandwidth at @port between path from @src_port
* to @dst_port. Does not take tunnel starting from @src_port and ending
* from @src_port into account.
+ *
+ * If there is bandwidth reserved for any of the groups between
+ * @src_port and @dst_port (but not yet used) that is also taken into
+ * account in the returned consumed bandwidth.
*/
static int tb_consumed_dp_bandwidth(struct tb *tb,
struct tb_port *src_port,
@@ -689,9 +560,11 @@ static int tb_consumed_dp_bandwidth(struct tb *tb,
int *consumed_up,
int *consumed_down)
{
+ int group_reserved[MAX_GROUPS] = {};
struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel;
- int ret;
+ bool downstream;
+ int i, ret;
*consumed_up = *consumed_down = 0;
@@ -700,6 +573,7 @@ static int tb_consumed_dp_bandwidth(struct tb *tb,
* their consumed bandwidth from the available.
*/
list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ const struct tb_bandwidth_group *group;
int dp_consumed_up, dp_consumed_down;
if (tb_tunnel_is_invalid(tunnel))
@@ -712,6 +586,15 @@ static int tb_consumed_dp_bandwidth(struct tb *tb,
continue;
/*
+ * Calculate what is reserved for groups crossing the
+ * same ports only once (as that is reserved for all the
+ * tunnels in the group).
+ */
+ group = tunnel->src_port->group;
+ if (group && group->reserved && !group_reserved[group->index])
+ group_reserved[group->index] = group->reserved;
+
+ /*
* Ignore the DP tunnel between src_port and dst_port
* because it is the same tunnel and we may be
* re-calculating estimated bandwidth.
@@ -729,6 +612,14 @@ static int tb_consumed_dp_bandwidth(struct tb *tb,
*consumed_down += dp_consumed_down;
}
+ downstream = tb_port_path_direction_downstream(src_port, dst_port);
+ for (i = 0; i < ARRAY_SIZE(group_reserved); i++) {
+ if (downstream)
+ *consumed_down += group_reserved[i];
+ else
+ *consumed_up += group_reserved[i];
+ }
+
return 0;
}
@@ -1181,8 +1072,6 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
* @tb: Domain structure
* @src_port: Source adapter to start the transition
* @dst_port: Destination adapter
- * @requested_up: New lower bandwidth request upstream (Mb/s)
- * @requested_down: New lower bandwidth request downstream (Mb/s)
* @keep_asym: Keep asymmetric link if preferred
*
* Goes over each link from @src_port to @dst_port and tries to
@@ -1190,8 +1079,7 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
* allows and link asymmetric preference is ignored (if @keep_asym is %false).
*/
static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
- struct tb_port *dst_port, int requested_up,
- int requested_down, bool keep_asym)
+ struct tb_port *dst_port, bool keep_asym)
{
bool clx = false, clx_disabled = false, downstream;
struct tb_switch *sw;
@@ -1230,10 +1118,10 @@ static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
* guard band 10%) as the link was configured asymmetric
* already.
*/
- if (consumed_down + requested_down >= asym_threshold)
+ if (consumed_down >= asym_threshold)
continue;
} else {
- if (consumed_up + requested_up >= asym_threshold)
+ if (consumed_up >= asym_threshold)
continue;
}
@@ -1306,7 +1194,7 @@ static void tb_configure_link(struct tb_port *down, struct tb_port *up,
struct tb_port *host_port;
host_port = tb_port_at(tb_route(sw), tb->root_switch);
- tb_configure_sym(tb, host_port, up, 0, 0, false);
+ tb_configure_sym(tb, host_port, up, false);
}
/* Set the link configured */
@@ -1464,6 +1352,297 @@ out_rpm_put:
}
}
+static void
+tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
+{
+ struct tb_tunnel *first_tunnel;
+ struct tb *tb = group->tb;
+ struct tb_port *in;
+ int ret;
+
+ tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
+ group->index);
+
+ first_tunnel = NULL;
+ list_for_each_entry(in, &group->ports, group_list) {
+ int estimated_bw, estimated_up, estimated_down;
+ struct tb_tunnel *tunnel;
+ struct tb_port *out;
+
+ if (!usb4_dp_port_bandwidth_mode_enabled(in))
+ continue;
+
+ tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
+ if (WARN_ON(!tunnel))
+ break;
+
+ if (!first_tunnel) {
+ /*
+ * Since USB3 bandwidth is shared by all DP
+ * tunnels under the host router USB4 port, even
+ * if they do not begin from the host router, we
+ * can release USB3 bandwidth just once and not
+ * for each tunnel separately.
+ */
+ first_tunnel = tunnel;
+ ret = tb_release_unused_usb3_bandwidth(tb,
+ first_tunnel->src_port, first_tunnel->dst_port);
+ if (ret) {
+ tb_tunnel_warn(tunnel,
+ "failed to release unused bandwidth\n");
+ break;
+ }
+ }
+
+ out = tunnel->dst_port;
+ ret = tb_available_bandwidth(tb, in, out, &estimated_up,
+ &estimated_down, true);
+ if (ret) {
+ tb_tunnel_warn(tunnel,
+ "failed to re-calculate estimated bandwidth\n");
+ break;
+ }
+
+ /*
+ * Estimated bandwidth includes:
+ * - already allocated bandwidth for the DP tunnel
+ * - available bandwidth along the path
+ * - bandwidth allocated for USB 3.x but not used.
+ */
+ if (tb_tunnel_direction_downstream(tunnel))
+ estimated_bw = estimated_down;
+ else
+ estimated_bw = estimated_up;
+
+ /*
+ * If there is reserved bandwidth for the group that is
+ * not yet released we report that too.
+ */
+ tb_tunnel_dbg(tunnel,
+ "re-calculated estimated bandwidth %u (+ %u reserved) = %u Mb/s\n",
+ estimated_bw, group->reserved,
+ estimated_bw + group->reserved);
+
+ if (usb4_dp_port_set_estimated_bandwidth(in,
+ estimated_bw + group->reserved))
+ tb_tunnel_warn(tunnel,
+ "failed to update estimated bandwidth\n");
+ }
+
+ if (first_tunnel)
+ tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
+ first_tunnel->dst_port);
+
+ tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
+}
+
+static void tb_recalc_estimated_bandwidth(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ int i;
+
+ tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
+
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ struct tb_bandwidth_group *group = &tcm->groups[i];
+
+ if (!list_empty(&group->ports))
+ tb_recalc_estimated_bandwidth_for_group(group);
+ }
+
+ tb_dbg(tb, "bandwidth re-calculation done\n");
+}
+
+static bool __release_group_bandwidth(struct tb_bandwidth_group *group)
+{
+ if (group->reserved) {
+ tb_dbg(group->tb, "group %d released total %d Mb/s\n", group->index,
+ group->reserved);
+ group->reserved = 0;
+ return true;
+ }
+ return false;
+}
+
+static void __configure_group_sym(struct tb_bandwidth_group *group)
+{
+ struct tb_tunnel *tunnel;
+ struct tb_port *in;
+
+ if (list_empty(&group->ports))
+ return;
+
+ /*
+ * All the tunnels in the group go through the same USB4 links
+ * so we find the first one here and pass the IN and OUT
+ * adapters to tb_configure_sym() which now transitions the
+ * links back to symmetric if bandwidth requirement < asym_threshold.
+ *
+ * We do this here to avoid unnecessary transitions (for example
+ * if the graphics released bandwidth for other tunnel in the
+ * same group).
+ */
+ in = list_first_entry(&group->ports, struct tb_port, group_list);
+ tunnel = tb_find_tunnel(group->tb, TB_TUNNEL_DP, in, NULL);
+ if (tunnel)
+ tb_configure_sym(group->tb, in, tunnel->dst_port, true);
+}
+
+static void tb_bandwidth_group_release_work(struct work_struct *work)
+{
+ struct tb_bandwidth_group *group =
+ container_of(work, typeof(*group), release_work.work);
+ struct tb *tb = group->tb;
+
+ mutex_lock(&tb->lock);
+ if (__release_group_bandwidth(group))
+ tb_recalc_estimated_bandwidth(tb);
+ __configure_group_sym(group);
+ mutex_unlock(&tb->lock);
+}
+
+static void tb_init_bandwidth_groups(struct tb_cm *tcm)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ struct tb_bandwidth_group *group = &tcm->groups[i];
+
+ group->tb = tcm_to_tb(tcm);
+ group->index = i + 1;
+ INIT_LIST_HEAD(&group->ports);
+ INIT_DELAYED_WORK(&group->release_work,
+ tb_bandwidth_group_release_work);
+ }
+}
+
+static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
+ struct tb_port *in)
+{
+ if (!group || WARN_ON(in->group))
+ return;
+
+ in->group = group;
+ list_add_tail(&in->group_list, &group->ports);
+
+ tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
+}
+
+static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ struct tb_bandwidth_group *group = &tcm->groups[i];
+
+ if (list_empty(&group->ports))
+ return group;
+ }
+
+ return NULL;
+}
+
+static struct tb_bandwidth_group *
+tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
+ struct tb_port *out)
+{
+ struct tb_bandwidth_group *group;
+ struct tb_tunnel *tunnel;
+
+ /*
+ * Find all DP tunnels that go through all the same USB4 links
+ * as this one. Because we always setup tunnels the same way we
+ * can just check for the routers at both ends of the tunnels
+ * and if they are the same we have a match.
+ */
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ if (!tb_tunnel_is_dp(tunnel))
+ continue;
+
+ if (tunnel->src_port->sw == in->sw &&
+ tunnel->dst_port->sw == out->sw) {
+ group = tunnel->src_port->group;
+ if (group) {
+ tb_bandwidth_group_attach_port(group, in);
+ return group;
+ }
+ }
+ }
+
+ /* Pick up next available group then */
+ group = tb_find_free_bandwidth_group(tcm);
+ if (group)
+ tb_bandwidth_group_attach_port(group, in);
+ else
+ tb_port_warn(in, "no available bandwidth groups\n");
+
+ return group;
+}
+
+static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
+ struct tb_port *out)
+{
+ if (usb4_dp_port_bandwidth_mode_enabled(in)) {
+ int index, i;
+
+ index = usb4_dp_port_group_id(in);
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ if (tcm->groups[i].index == index) {
+ tb_bandwidth_group_attach_port(&tcm->groups[i], in);
+ return;
+ }
+ }
+ }
+
+ tb_attach_bandwidth_group(tcm, in, out);
+}
+
+static void tb_detach_bandwidth_group(struct tb_port *in)
+{
+ struct tb_bandwidth_group *group = in->group;
+
+ if (group) {
+ in->group = NULL;
+ list_del_init(&in->group_list);
+
+ tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
+
+ /* No more tunnels so release the reserved bandwidth if any */
+ if (list_empty(&group->ports)) {
+ cancel_delayed_work(&group->release_work);
+ __release_group_bandwidth(group);
+ }
+ }
+}
+
+static void tb_discover_tunnels(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_tunnel *tunnel;
+
+ tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
+
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ if (tb_tunnel_is_pci(tunnel)) {
+ struct tb_switch *parent = tunnel->dst_port->sw;
+
+ while (parent != tunnel->src_port->sw) {
+ parent->boot = true;
+ parent = tb_switch_parent(parent);
+ }
+ } else if (tb_tunnel_is_dp(tunnel)) {
+ struct tb_port *in = tunnel->src_port;
+ struct tb_port *out = tunnel->dst_port;
+
+ /* Keep the domain from powering down */
+ pm_runtime_get_sync(&in->sw->dev);
+ pm_runtime_get_sync(&out->sw->dev);
+
+ tb_discover_bandwidth_group(tcm, in, out);
+ }
+ }
+}
+
static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
{
struct tb_port *src_port, *dst_port;
@@ -1491,7 +1670,7 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
* If bandwidth on a link is < asym_threshold
* transition the link to symmetric.
*/
- tb_configure_sym(tb, src_port, dst_port, 0, 0, true);
+ tb_configure_sym(tb, src_port, dst_port, true);
/* Now we can allow the domain to runtime suspend again */
pm_runtime_mark_last_busy(&dst_port->sw->dev);
pm_runtime_put_autosuspend(&dst_port->sw->dev);
@@ -1605,101 +1784,6 @@ out:
return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
}
-static void
-tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
-{
- struct tb_tunnel *first_tunnel;
- struct tb *tb = group->tb;
- struct tb_port *in;
- int ret;
-
- tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
- group->index);
-
- first_tunnel = NULL;
- list_for_each_entry(in, &group->ports, group_list) {
- int estimated_bw, estimated_up, estimated_down;
- struct tb_tunnel *tunnel;
- struct tb_port *out;
-
- if (!usb4_dp_port_bandwidth_mode_enabled(in))
- continue;
-
- tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
- if (WARN_ON(!tunnel))
- break;
-
- if (!first_tunnel) {
- /*
- * Since USB3 bandwidth is shared by all DP
- * tunnels under the host router USB4 port, even
- * if they do not begin from the host router, we
- * can release USB3 bandwidth just once and not
- * for each tunnel separately.
- */
- first_tunnel = tunnel;
- ret = tb_release_unused_usb3_bandwidth(tb,
- first_tunnel->src_port, first_tunnel->dst_port);
- if (ret) {
- tb_tunnel_warn(tunnel,
- "failed to release unused bandwidth\n");
- break;
- }
- }
-
- out = tunnel->dst_port;
- ret = tb_available_bandwidth(tb, in, out, &estimated_up,
- &estimated_down, true);
- if (ret) {
- tb_tunnel_warn(tunnel,
- "failed to re-calculate estimated bandwidth\n");
- break;
- }
-
- /*
- * Estimated bandwidth includes:
- * - already allocated bandwidth for the DP tunnel
- * - available bandwidth along the path
- * - bandwidth allocated for USB 3.x but not used.
- */
- tb_tunnel_dbg(tunnel,
- "re-calculated estimated bandwidth %u/%u Mb/s\n",
- estimated_up, estimated_down);
-
- if (tb_port_path_direction_downstream(in, out))
- estimated_bw = estimated_down;
- else
- estimated_bw = estimated_up;
-
- if (usb4_dp_port_set_estimated_bandwidth(in, estimated_bw))
- tb_tunnel_warn(tunnel,
- "failed to update estimated bandwidth\n");
- }
-
- if (first_tunnel)
- tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
- first_tunnel->dst_port);
-
- tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
-}
-
-static void tb_recalc_estimated_bandwidth(struct tb *tb)
-{
- struct tb_cm *tcm = tb_priv(tb);
- int i;
-
- tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
-
- for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
- struct tb_bandwidth_group *group = &tcm->groups[i];
-
- if (!list_empty(&group->ports))
- tb_recalc_estimated_bandwidth_for_group(group);
- }
-
- tb_dbg(tb, "bandwidth re-calculation done\n");
-}
-
static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
{
struct tb_port *host_port, *port;
@@ -1717,6 +1801,12 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
continue;
}
+ /* Needs to be on different routers */
+ if (in->sw == port->sw) {
+ tb_port_dbg(port, "skipping DP OUT on same router\n");
+ continue;
+ }
+
tb_port_dbg(port, "DP OUT available\n");
/*
@@ -1737,49 +1827,15 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
return NULL;
}
-static bool tb_tunnel_one_dp(struct tb *tb)
+static bool tb_tunnel_one_dp(struct tb *tb, struct tb_port *in,
+ struct tb_port *out)
{
int available_up, available_down, ret, link_nr;
struct tb_cm *tcm = tb_priv(tb);
- struct tb_port *port, *in, *out;
int consumed_up, consumed_down;
struct tb_tunnel *tunnel;
/*
- * Find pair of inactive DP IN and DP OUT adapters and then
- * establish a DP tunnel between them.
- */
- tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
-
- in = NULL;
- out = NULL;
- list_for_each_entry(port, &tcm->dp_resources, list) {
- if (!tb_port_is_dpin(port))
- continue;
-
- if (tb_port_is_enabled(port)) {
- tb_port_dbg(port, "DP IN in use\n");
- continue;
- }
-
- in = port;
- tb_port_dbg(in, "DP IN available\n");
-
- out = tb_find_dp_out(tb, port);
- if (out)
- break;
- }
-
- if (!in) {
- tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
- return false;
- }
- if (!out) {
- tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
- return false;
- }
-
- /*
* This is only applicable to links that are not bonded (so
* when Thunderbolt 1 hardware is involved somewhere in the
* topology). For these try to share the DP bandwidth between
@@ -1839,15 +1895,19 @@ static bool tb_tunnel_one_dp(struct tb *tb)
goto err_free;
}
+ /* If fail reading tunnel's consumed bandwidth, tear it down */
+ ret = tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down);
+ if (ret)
+ goto err_deactivate;
+
list_add_tail(&tunnel->list, &tcm->tunnel_list);
- tb_reclaim_usb3_bandwidth(tb, in, out);
+ tb_reclaim_usb3_bandwidth(tb, in, out);
/*
* Transition the links to asymmetric if the consumption exceeds
* the threshold.
*/
- if (!tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down))
- tb_configure_asym(tb, in, out, consumed_up, consumed_down);
+ tb_configure_asym(tb, in, out, consumed_up, consumed_down);
/* Update the domain with the new bandwidth estimation */
tb_recalc_estimated_bandwidth(tb);
@@ -1859,6 +1919,8 @@ static bool tb_tunnel_one_dp(struct tb *tb)
tb_increase_tmu_accuracy(tunnel);
return true;
+err_deactivate:
+ tb_tunnel_deactivate(tunnel);
err_free:
tb_tunnel_free(tunnel);
err_reclaim_usb:
@@ -1878,13 +1940,86 @@ err_rpm_put:
static void tb_tunnel_dp(struct tb *tb)
{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_port *port, *in, *out;
+
if (!tb_acpi_may_tunnel_dp()) {
tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
return;
}
- while (tb_tunnel_one_dp(tb))
- ;
+ /*
+ * Find pair of inactive DP IN and DP OUT adapters and then
+ * establish a DP tunnel between them.
+ */
+ tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
+
+ in = NULL;
+ out = NULL;
+ list_for_each_entry(port, &tcm->dp_resources, list) {
+ if (!tb_port_is_dpin(port))
+ continue;
+
+ if (tb_port_is_enabled(port)) {
+ tb_port_dbg(port, "DP IN in use\n");
+ continue;
+ }
+
+ in = port;
+ tb_port_dbg(in, "DP IN available\n");
+
+ out = tb_find_dp_out(tb, port);
+ if (out)
+ tb_tunnel_one_dp(tb, in, out);
+ else
+ tb_port_dbg(in, "no suitable DP OUT adapter available, not tunneling\n");
+ }
+
+ if (!in)
+ tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
+}
+
+static void tb_enter_redrive(struct tb_port *port)
+{
+ struct tb_switch *sw = port->sw;
+
+ if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
+ return;
+
+ /*
+ * If we get hot-unplug for the DP IN port of the host router
+ * and the DP resource is not available anymore it means there
+ * is a monitor connected directly to the Type-C port and we are
+ * in "redrive" mode. For this to work we cannot enter RTD3 so
+ * we bump up the runtime PM reference count here.
+ */
+ if (!tb_port_is_dpin(port))
+ return;
+ if (tb_route(sw))
+ return;
+ if (!tb_switch_query_dp_resource(sw, port)) {
+ port->redrive = true;
+ pm_runtime_get(&sw->dev);
+ tb_port_dbg(port, "enter redrive mode, keeping powered\n");
+ }
+}
+
+static void tb_exit_redrive(struct tb_port *port)
+{
+ struct tb_switch *sw = port->sw;
+
+ if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
+ return;
+
+ if (!tb_port_is_dpin(port))
+ return;
+ if (tb_route(sw))
+ return;
+ if (port->redrive && tb_switch_query_dp_resource(sw, port)) {
+ port->redrive = false;
+ pm_runtime_put(&sw->dev);
+ tb_port_dbg(port, "exit redrive mode\n");
+ }
}
static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
@@ -1903,7 +2038,10 @@ static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
}
tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
- tb_deactivate_and_free_tunnel(tunnel);
+ if (tunnel)
+ tb_deactivate_and_free_tunnel(tunnel);
+ else
+ tb_enter_redrive(port);
list_del_init(&port->list);
/*
@@ -1930,6 +2068,7 @@ static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
tb_port_dbg(port, "DP %s resource available after hotplug\n",
tb_port_is_dpin(port) ? "IN" : "OUT");
list_add_tail(&port->list, &tcm->dp_resources);
+ tb_exit_redrive(port);
/* Look for suitable DP IN <-> DP OUT pairs now */
tb_tunnel_dp(tb);
@@ -2243,8 +2382,10 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
int allocated_up, allocated_down, available_up, available_down, ret;
int requested_up_corrected, requested_down_corrected, granularity;
int max_up, max_down, max_up_rounded, max_down_rounded;
+ struct tb_bandwidth_group *group;
struct tb *tb = tunnel->tb;
struct tb_port *in, *out;
+ bool downstream;
ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
if (ret)
@@ -2270,11 +2411,11 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
*/
ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
if (ret)
- return ret;
+ goto fail;
ret = usb4_dp_port_granularity(in);
if (ret < 0)
- return ret;
+ goto fail;
granularity = ret;
max_up_rounded = roundup(max_up, granularity);
@@ -2304,24 +2445,48 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
"bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
requested_up_corrected, requested_down_corrected,
max_up_rounded, max_down_rounded);
- return -ENOBUFS;
+ ret = -ENOBUFS;
+ goto fail;
}
+ downstream = tb_tunnel_direction_downstream(tunnel);
+ group = in->group;
+
if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
(*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
- /*
- * If bandwidth on a link is < asym_threshold transition
- * the link to symmetric.
- */
- tb_configure_sym(tb, in, out, *requested_up, *requested_down, true);
- /*
- * If requested bandwidth is less or equal than what is
- * currently allocated to that tunnel we simply change
- * the reservation of the tunnel. Since all the tunnels
- * going out from the same USB4 port are in the same
- * group the released bandwidth will be taken into
- * account for the other tunnels automatically below.
- */
+ if (tunnel->bw_mode) {
+ int reserved;
+ /*
+ * If requested bandwidth is less or equal than
+ * what is currently allocated to that tunnel we
+ * simply change the reservation of the tunnel
+ * and add the released bandwidth for the group
+ * for the next 10s. Then we release it for
+ * others to use.
+ */
+ if (downstream)
+ reserved = allocated_down - *requested_down;
+ else
+ reserved = allocated_up - *requested_up;
+
+ if (reserved > 0) {
+ group->reserved += reserved;
+ tb_dbg(tb, "group %d reserved %d total %d Mb/s\n",
+ group->index, reserved, group->reserved);
+
+ /*
+ * If it was not already pending,
+ * schedule release now. If it is then
+ * postpone it for the next 10s (unless
+ * it is already running in which case
+ * the 10s already expired and we should
+ * give the reserved back to others).
+ */
+ mod_delayed_work(system_wq, &group->release_work,
+ msecs_to_jiffies(TB_RELEASE_BW_TIMEOUT));
+ }
+ }
+
return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
requested_down);
}
@@ -2332,7 +2497,7 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
*/
ret = tb_release_unused_usb3_bandwidth(tb, in, out);
if (ret)
- return ret;
+ goto fail;
/*
* Then go over all tunnels that cross the same USB4 ports (they
@@ -2344,11 +2509,15 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
if (ret)
goto reclaim;
- tb_tunnel_dbg(tunnel, "bandwidth available for allocation %d/%d Mb/s\n",
- available_up, available_down);
+ tb_tunnel_dbg(tunnel, "bandwidth available for allocation %d/%d (+ %u reserved) Mb/s\n",
+ available_up, available_down, group->reserved);
+
+ if ((*requested_up >= 0 &&
+ available_up + group->reserved >= requested_up_corrected) ||
+ (*requested_down >= 0 &&
+ available_down + group->reserved >= requested_down_corrected)) {
+ int released = 0;
- if ((*requested_up >= 0 && available_up >= requested_up_corrected) ||
- (*requested_down >= 0 && available_down >= requested_down_corrected)) {
/*
* If bandwidth on a link is >= asym_threshold
* transition the link to asymmetric.
@@ -2356,15 +2525,28 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
ret = tb_configure_asym(tb, in, out, *requested_up,
*requested_down);
if (ret) {
- tb_configure_sym(tb, in, out, 0, 0, true);
- return ret;
+ tb_configure_sym(tb, in, out, true);
+ goto fail;
}
ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
requested_down);
if (ret) {
tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n");
- tb_configure_sym(tb, in, out, 0, 0, true);
+ tb_configure_sym(tb, in, out, true);
+ }
+
+ if (downstream) {
+ if (*requested_down > available_down)
+ released = *requested_down - available_down;
+ } else {
+ if (*requested_up > available_up)
+ released = *requested_up - available_up;
+ }
+ if (released) {
+ group->reserved -= released;
+ tb_dbg(tb, "group %d released %d total %d Mb/s\n",
+ group->index, released, group->reserved);
}
} else {
ret = -ENOBUFS;
@@ -2372,6 +2554,18 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
reclaim:
tb_reclaim_usb3_bandwidth(tb, in, out);
+fail:
+ if (ret && ret != -ENODEV) {
+ /*
+ * Write back the same allocated (so no change), this
+ * makes the DPTX request fail on graphics side.
+ */
+ tb_tunnel_dbg(tunnel,
+ "failing the request by rewriting allocated %d/%d Mb/s\n",
+ allocated_up, allocated_down);
+ tb_tunnel_alloc_bandwidth(tunnel, &allocated_up, &allocated_down);
+ }
+
return ret;
}
@@ -2379,11 +2573,11 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
{
struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
int requested_bw, requested_up, requested_down, ret;
- struct tb_port *in, *out;
struct tb_tunnel *tunnel;
struct tb *tb = ev->tb;
struct tb_cm *tcm = tb_priv(tb);
struct tb_switch *sw;
+ struct tb_port *in;
pm_runtime_get_sync(&tb->dev);
@@ -2406,32 +2600,48 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
tb_port_dbg(in, "handling bandwidth allocation request\n");
+ tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
+ if (!tunnel) {
+ tb_port_warn(in, "failed to find tunnel\n");
+ goto put_sw;
+ }
+
if (!usb4_dp_port_bandwidth_mode_enabled(in)) {
- tb_port_warn(in, "bandwidth allocation mode not enabled\n");
+ if (tunnel->bw_mode) {
+ /*
+ * Reset the tunnel back to use the legacy
+ * allocation.
+ */
+ tunnel->bw_mode = false;
+ tb_port_dbg(in, "DPTX disabled bandwidth allocation mode\n");
+ } else {
+ tb_port_warn(in, "bandwidth allocation mode not enabled\n");
+ }
goto put_sw;
}
ret = usb4_dp_port_requested_bandwidth(in);
if (ret < 0) {
- if (ret == -ENODATA)
- tb_port_dbg(in, "no bandwidth request active\n");
- else
+ if (ret == -ENODATA) {
+ /*
+ * There is no request active so this means the
+ * BW allocation mode was enabled from graphics
+ * side. At this point we know that the graphics
+ * driver has read the DRPX capabilities so we
+ * can offer an better bandwidth estimatation.
+ */
+ tb_port_dbg(in, "DPTX enabled bandwidth allocation mode, updating estimated bandwidth\n");
+ tb_recalc_estimated_bandwidth(tb);
+ } else {
tb_port_warn(in, "failed to read requested bandwidth\n");
+ }
goto put_sw;
}
requested_bw = ret;
tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
- tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
- if (!tunnel) {
- tb_port_warn(in, "failed to find tunnel\n");
- goto put_sw;
- }
-
- out = tunnel->dst_port;
-
- if (tb_port_path_direction_downstream(in, out)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
requested_up = -1;
requested_down = requested_bw;
} else {
@@ -2560,6 +2770,16 @@ static void tb_stop(struct tb *tb)
tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
}
+static void tb_deinit(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ int i;
+
+ /* Cancel all the release bandwidth workers */
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++)
+ cancel_delayed_work_sync(&tcm->groups[i].release_work);
+}
+
static int tb_scan_finalize_switch(struct device *dev, void *data)
{
if (tb_is_switch(dev)) {
@@ -2581,9 +2801,10 @@ static int tb_scan_finalize_switch(struct device *dev, void *data)
return 0;
}
-static int tb_start(struct tb *tb)
+static int tb_start(struct tb *tb, bool reset)
{
struct tb_cm *tcm = tb_priv(tb);
+ bool discover = true;
int ret;
tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
@@ -2622,12 +2843,28 @@ static int tb_start(struct tb *tb)
tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES);
/* Enable TMU if it is off */
tb_switch_tmu_enable(tb->root_switch);
- /* Full scan to discover devices added before the driver was loaded. */
- tb_scan_switch(tb->root_switch);
- /* Find out tunnels created by the boot firmware */
- tb_discover_tunnels(tb);
- /* Add DP resources from the DP tunnels created by the boot firmware */
- tb_discover_dp_resources(tb);
+
+ /*
+ * Boot firmware might have created tunnels of its own. Since we
+ * cannot be sure they are usable for us, tear them down and
+ * reset the ports to handle it as new hotplug for USB4 v1
+ * routers (for USB4 v2 and beyond we already do host reset).
+ */
+ if (reset && tb_switch_is_usb4(tb->root_switch)) {
+ discover = false;
+ if (usb4_switch_version(tb->root_switch) == 1)
+ tb_switch_reset(tb->root_switch);
+ }
+
+ if (discover) {
+ /* Full scan to discover devices added before the driver was loaded. */
+ tb_scan_switch(tb->root_switch);
+ /* Find out tunnels created by the boot firmware */
+ tb_discover_tunnels(tb);
+ /* Add DP resources from the DP tunnels created by the boot firmware */
+ tb_discover_dp_resources(tb);
+ }
+
/*
* If the boot firmware did not create USB 3.x tunnels create them
* now for the whole topology.
@@ -2698,10 +2935,14 @@ static int tb_resume_noirq(struct tb *tb)
tb_dbg(tb, "resuming...\n");
- /* remove any pci devices the firmware might have setup */
- tb_switch_reset(tb->root_switch);
+ /*
+ * For non-USB4 hosts (Apple systems) remove any PCIe devices
+ * the firmware might have setup.
+ */
+ if (!tb_switch_is_usb4(tb->root_switch))
+ tb_switch_reset(tb->root_switch);
- tb_switch_resume(tb->root_switch);
+ tb_switch_resume(tb->root_switch, false);
tb_free_invalid_tunnels(tb);
tb_free_unplugged_children(tb->root_switch);
tb_restore_children(tb->root_switch);
@@ -2827,7 +3068,7 @@ static int tb_runtime_resume(struct tb *tb)
struct tb_tunnel *tunnel, *n;
mutex_lock(&tb->lock);
- tb_switch_resume(tb->root_switch);
+ tb_switch_resume(tb->root_switch, true);
tb_free_invalid_tunnels(tb);
tb_restore_children(tb->root_switch);
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
@@ -2847,6 +3088,7 @@ static int tb_runtime_resume(struct tb *tb)
static const struct tb_cm_ops tb_cm_ops = {
.start = tb_start,
.stop = tb_stop,
+ .deinit = tb_deinit,
.suspend_noirq = tb_suspend_noirq,
.resume_noirq = tb_resume_noirq,
.freeze_noirq = tb_freeze_noirq,
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 997c5a5369052..18aae4ccaed59 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -23,6 +23,8 @@
#define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0)
/* Disable CLx if not supported */
#define QUIRK_NO_CLX BIT(1)
+/* Need to keep power on while USB4 port is in redrive mode */
+#define QUIRK_KEEP_POWER_IN_DP_REDRIVE BIT(2)
/**
* struct tb_nvm - Structure holding NVM information
@@ -217,6 +219,11 @@ struct tb_switch {
* @tb: Pointer to the domain the group belongs to
* @index: Index of the group (aka Group_ID). Valid values %1-%7
* @ports: DP IN adapters belonging to this group are linked here
+ * @reserved: Bandwidth released by one tunnel in the group, available
+ * to others. This is reported as part of estimated_bw for
+ * the group.
+ * @release_work: Worker to release the @reserved if it is not used by
+ * any of the tunnels.
*
* Any tunnel that requires isochronous bandwidth (that's DP for now) is
* attached to a bandwidth group. All tunnels going through the same
@@ -227,6 +234,8 @@ struct tb_bandwidth_group {
struct tb *tb;
int index;
struct list_head ports;
+ int reserved;
+ struct delayed_work release_work;
};
/**
@@ -258,6 +267,7 @@ struct tb_bandwidth_group {
* @group_list: The adapter is linked to the group's list of ports through this
* @max_bw: Maximum possible bandwidth through this adapter if set to
* non-zero.
+ * @redrive: For DP IN, if true the adapter is in redrive mode.
*
* In USB4 terminology this structure represents an adapter (protocol or
* lane adapter).
@@ -286,6 +296,7 @@ struct tb_port {
struct tb_bandwidth_group *group;
struct list_head group_list;
unsigned int max_bw;
+ bool redrive;
};
/**
@@ -452,6 +463,8 @@ struct tb_path {
* ICM to send driver ready message to the firmware.
* @start: Starts the domain
* @stop: Stops the domain
+ * @deinit: Perform any cleanup after the domain is stopped but before
+ * it is unregistered. Called without @tb->lock taken. Optional.
* @suspend_noirq: Connection manager specific suspend_noirq
* @resume_noirq: Connection manager specific resume_noirq
* @suspend: Connection manager specific suspend
@@ -483,8 +496,9 @@ struct tb_path {
*/
struct tb_cm_ops {
int (*driver_ready)(struct tb *tb);
- int (*start)(struct tb *tb);
+ int (*start)(struct tb *tb, bool reset);
void (*stop)(struct tb *tb);
+ void (*deinit)(struct tb *tb);
int (*suspend_noirq)(struct tb *tb);
int (*resume_noirq)(struct tb *tb);
int (*suspend)(struct tb *tb);
@@ -735,10 +749,10 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer,
struct tb *icm_probe(struct tb_nhi *nhi);
struct tb *tb_probe(struct tb_nhi *nhi);
-extern struct device_type tb_domain_type;
-extern struct device_type tb_retimer_type;
-extern struct device_type tb_switch_type;
-extern struct device_type usb4_port_device_type;
+extern const struct device_type tb_domain_type;
+extern const struct device_type tb_retimer_type;
+extern const struct device_type tb_switch_type;
+extern const struct device_type usb4_port_device_type;
int tb_domain_init(void);
void tb_domain_exit(void);
@@ -746,7 +760,7 @@ int tb_xdomain_init(void);
void tb_xdomain_exit(void);
struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize);
-int tb_domain_add(struct tb *tb);
+int tb_domain_add(struct tb *tb, bool reset);
void tb_domain_remove(struct tb *tb);
int tb_domain_suspend_noirq(struct tb *tb);
int tb_domain_resume_noirq(struct tb *tb);
@@ -813,7 +827,7 @@ int tb_switch_configuration_valid(struct tb_switch *sw);
int tb_switch_add(struct tb_switch *sw);
void tb_switch_remove(struct tb_switch *sw);
void tb_switch_suspend(struct tb_switch *sw, bool runtime);
-int tb_switch_resume(struct tb_switch *sw);
+int tb_switch_resume(struct tb_switch *sw, bool runtime);
int tb_switch_reset(struct tb_switch *sw);
int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
u32 value, int timeout_msec);
@@ -1150,6 +1164,7 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
void tb_path_free(struct tb_path *path);
int tb_path_activate(struct tb_path *path);
void tb_path_deactivate(struct tb_path *path);
+int tb_path_deactivate_hop(struct tb_port *port, int hop_index);
bool tb_path_is_invalid(struct tb_path *path);
bool tb_path_port_on_path(const struct tb_path *path,
const struct tb_port *port);
@@ -1169,6 +1184,7 @@ int tb_drom_read(struct tb_switch *sw);
int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid);
+int tb_lc_reset_port(struct tb_port *port);
int tb_lc_configure_port(struct tb_port *port);
void tb_lc_unconfigure_port(struct tb_port *port);
int tb_lc_configure_xdomain(struct tb_port *port);
@@ -1272,6 +1288,7 @@ static inline bool tb_switch_is_usb4(const struct tb_switch *sw)
return usb4_switch_version(sw) > 0;
}
+void usb4_switch_check_wakes(struct tb_switch *sw);
int usb4_switch_setup(struct tb_switch *sw);
int usb4_switch_configuration_valid(struct tb_switch *sw);
int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
@@ -1301,6 +1318,7 @@ void usb4_switch_remove_ports(struct tb_switch *sw);
int usb4_port_unlock(struct tb_port *port);
int usb4_port_hotplug_enable(struct tb_port *port);
+int usb4_port_reset(struct tb_port *port);
int usb4_port_configure(struct tb_port *port);
void usb4_port_unconfigure(struct tb_port *port);
int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd);
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 6f798f6a2b848..4e43b47f9f119 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -194,6 +194,8 @@ struct tb_regs_switch_header {
#define USB4_VERSION_MAJOR_MASK GENMASK(7, 5)
#define ROUTER_CS_1 0x01
+#define ROUTER_CS_3 0x03
+#define ROUTER_CS_3_V BIT(31)
#define ROUTER_CS_4 0x04
/* Used with the router cmuv field */
#define ROUTER_CS_4_CMUV_V1 0x10
@@ -389,6 +391,7 @@ struct tb_regs_port_header {
#define PORT_CS_18_CSA BIT(22)
#define PORT_CS_18_TIP BIT(24)
#define PORT_CS_19 0x13
+#define PORT_CS_19_DPR BIT(0)
#define PORT_CS_19_PC BIT(3)
#define PORT_CS_19_PID BIT(4)
#define PORT_CS_19_WOC BIT(16)
@@ -584,6 +587,9 @@ struct tb_regs_hop {
#define TB_LC_POWER 0x740
/* Link controller registers */
+#define TB_LC_PORT_MODE 0x26
+#define TB_LC_PORT_MODE_DPR BIT(0)
+
#define TB_LC_CS_42 0x2a
#define TB_LC_CS_42_USB_PLUGGED BIT(31)
diff --git a/drivers/thunderbolt/trace.h b/drivers/thunderbolt/trace.h
new file mode 100644
index 0000000000000..4dccfcf7af6a3
--- /dev/null
+++ b/drivers/thunderbolt/trace.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Thunderbolt tracing support
+ *
+ * Copyright (C) 2024, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Gil Fine <gil.fine@intel.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM thunderbolt
+
+#if !defined(TB_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define TB_TRACE_H_
+
+#include <linux/trace_seq.h>
+#include <linux/tracepoint.h>
+
+#include "tb_msgs.h"
+
+#define tb_cfg_type_name(type) { type, #type }
+#define show_type_name(val) \
+ __print_symbolic(val, \
+ tb_cfg_type_name(TB_CFG_PKG_READ), \
+ tb_cfg_type_name(TB_CFG_PKG_WRITE), \
+ tb_cfg_type_name(TB_CFG_PKG_ERROR), \
+ tb_cfg_type_name(TB_CFG_PKG_NOTIFY_ACK), \
+ tb_cfg_type_name(TB_CFG_PKG_EVENT), \
+ tb_cfg_type_name(TB_CFG_PKG_XDOMAIN_REQ), \
+ tb_cfg_type_name(TB_CFG_PKG_XDOMAIN_RESP), \
+ tb_cfg_type_name(TB_CFG_PKG_OVERRIDE), \
+ tb_cfg_type_name(TB_CFG_PKG_RESET), \
+ tb_cfg_type_name(TB_CFG_PKG_ICM_EVENT), \
+ tb_cfg_type_name(TB_CFG_PKG_ICM_CMD), \
+ tb_cfg_type_name(TB_CFG_PKG_ICM_RESP))
+
+#ifndef TB_TRACE_HELPERS
+#define TB_TRACE_HELPERS
+static inline const char *show_data_read_write(struct trace_seq *p,
+ const u32 *data)
+{
+ const struct cfg_read_pkg *msg = (const struct cfg_read_pkg *)data;
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "offset=%#x, len=%u, port=%d, config=%#x, seq=%d, ",
+ msg->addr.offset, msg->addr.length, msg->addr.port,
+ msg->addr.space, msg->addr.seq);
+
+ return ret;
+}
+
+static inline const char *show_data_error(struct trace_seq *p, const u32 *data)
+{
+ const struct cfg_error_pkg *msg = (const struct cfg_error_pkg *)data;
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "error=%#x, port=%d, plug=%#x, ", msg->error,
+ msg->port, msg->pg);
+
+ return ret;
+}
+
+static inline const char *show_data_event(struct trace_seq *p, const u32 *data)
+{
+ const struct cfg_event_pkg *msg = (const struct cfg_event_pkg *)data;
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "port=%d, unplug=%#x, ", msg->port, msg->unplug);
+
+ return ret;
+}
+
+static inline const char *show_route(struct trace_seq *p, const u32 *data)
+{
+ const struct tb_cfg_header *header = (const struct tb_cfg_header *)data;
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "route=%llx, ", tb_cfg_get_route(header));
+
+ return ret;
+}
+
+static inline const char *show_data(struct trace_seq *p, u8 type,
+ const u32 *data, u32 length)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ const char *prefix = "";
+ int i;
+
+ show_route(p, data);
+
+ switch (type) {
+ case TB_CFG_PKG_READ:
+ case TB_CFG_PKG_WRITE:
+ show_data_read_write(p, data);
+ break;
+
+ case TB_CFG_PKG_ERROR:
+ show_data_error(p, data);
+ break;
+
+ case TB_CFG_PKG_EVENT:
+ show_data_event(p, data);
+ break;
+
+ default:
+ break;
+ }
+
+ trace_seq_printf(p, "data=[");
+ for (i = 0; i < length; i++) {
+ trace_seq_printf(p, "%s0x%08x", prefix, data[i]);
+ prefix = ", ";
+ }
+ trace_seq_printf(p, "]");
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+#endif
+
+DECLARE_EVENT_CLASS(tb_raw,
+ TP_PROTO(int index, u8 type, const void *data, size_t size),
+ TP_ARGS(index, type, data, size),
+ TP_STRUCT__entry(
+ __field(int, index)
+ __field(u8, type)
+ __field(size_t, size)
+ __dynamic_array(u32, data, size / 4)
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->type = type;
+ __entry->size = size / 4;
+ memcpy(__get_dynamic_array(data), data, size);
+ ),
+ TP_printk("type=%s, size=%zd, domain=%d, %s",
+ show_type_name(__entry->type), __entry->size, __entry->index,
+ show_data(p, __entry->type, __get_dynamic_array(data),
+ __entry->size)
+ )
+);
+
+DEFINE_EVENT(tb_raw, tb_tx,
+ TP_PROTO(int index, u8 type, const void *data, size_t size),
+ TP_ARGS(index, type, data, size)
+);
+
+DEFINE_EVENT(tb_raw, tb_event,
+ TP_PROTO(int index, u8 type, const void *data, size_t size),
+ TP_ARGS(index, type, data, size)
+);
+
+TRACE_EVENT(tb_rx,
+ TP_PROTO(int index, u8 type, const void *data, size_t size, bool dropped),
+ TP_ARGS(index, type, data, size, dropped),
+ TP_STRUCT__entry(
+ __field(int, index)
+ __field(u8, type)
+ __field(size_t, size)
+ __dynamic_array(u32, data, size / 4)
+ __field(bool, dropped)
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->type = type;
+ __entry->size = size / 4;
+ memcpy(__get_dynamic_array(data), data, size);
+ __entry->dropped = dropped;
+ ),
+ TP_printk("type=%s, dropped=%u, size=%zd, domain=%d, %s",
+ show_type_name(__entry->type), __entry->dropped,
+ __entry->size, __entry->index,
+ show_data(p, __entry->type, __get_dynamic_array(data),
+ __entry->size)
+ )
+);
+
+#endif /* TB_TRACE_H_ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 6fffb2c82d3d1..cb6609a56a03f 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -706,7 +706,7 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
"DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
out_rate, out_lanes, bw);
- if (tb_port_path_direction_downstream(in, out))
+ if (tb_tunnel_direction_downstream(tunnel))
max_bw = tunnel->max_down;
else
max_bw = tunnel->max_up;
@@ -831,7 +831,7 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
* max_up/down fields. For discovery we just read what the
* estimation was set to.
*/
- if (tb_port_path_direction_downstream(in, out))
+ if (tb_tunnel_direction_downstream(tunnel))
estimated_bw = tunnel->max_down;
else
estimated_bw = tunnel->max_up;
@@ -926,12 +926,18 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
return 0;
}
-/* max_bw is rounded up to next granularity */
+/**
+ * tb_dp_bandwidth_mode_maximum_bandwidth() - Maximum possible bandwidth
+ * @tunnel: DP tunnel to check
+ * @max_bw_rounded: Maximum bandwidth in Mb/s rounded up to the next granularity
+ *
+ * Returns maximum possible bandwidth for this tunnel in Mb/s.
+ */
static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
- int *max_bw)
+ int *max_bw_rounded)
{
struct tb_port *in = tunnel->src_port;
- int ret, rate, lanes, nrd_bw;
+ int ret, rate, lanes, max_bw;
u32 cap;
/*
@@ -947,41 +953,26 @@ static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
return ret;
rate = tb_dp_cap_get_rate_ext(cap);
- if (tb_dp_is_uhbr_rate(rate)) {
- /*
- * When UHBR is used there is no reduction in lanes so
- * we can use this directly.
- */
- lanes = tb_dp_cap_get_lanes(cap);
- } else {
- /*
- * If there is no UHBR supported then check the
- * non-reduced rate and lanes.
- */
- ret = usb4_dp_port_nrd(in, &rate, &lanes);
- if (ret)
- return ret;
- }
+ lanes = tb_dp_cap_get_lanes(cap);
- nrd_bw = tb_dp_bandwidth(rate, lanes);
+ max_bw = tb_dp_bandwidth(rate, lanes);
- if (max_bw) {
+ if (max_bw_rounded) {
ret = usb4_dp_port_granularity(in);
if (ret < 0)
return ret;
- *max_bw = roundup(nrd_bw, ret);
+ *max_bw_rounded = roundup(max_bw, ret);
}
- return nrd_bw;
+ return max_bw;
}
static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
int *consumed_up,
int *consumed_down)
{
- struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
- int ret, allocated_bw, max_bw;
+ int ret, allocated_bw, max_bw_rounded;
if (!usb4_dp_port_bandwidth_mode_enabled(in))
return -EOPNOTSUPP;
@@ -995,13 +986,13 @@ static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
return ret;
allocated_bw = ret;
- ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
+ ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded);
if (ret < 0)
return ret;
- if (allocated_bw == max_bw)
+ if (allocated_bw == max_bw_rounded)
allocated_bw = ret;
- if (tb_port_path_direction_downstream(in, out)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
*consumed_up = 0;
*consumed_down = allocated_bw;
} else {
@@ -1015,7 +1006,6 @@ static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
int *allocated_down)
{
- struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
/*
@@ -1023,20 +1013,21 @@ static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up
* Otherwise we read it from the DPRX.
*/
if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) {
- int ret, allocated_bw, max_bw;
+ int ret, allocated_bw, max_bw_rounded;
ret = usb4_dp_port_allocated_bandwidth(in);
if (ret < 0)
return ret;
allocated_bw = ret;
- ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
+ ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel,
+ &max_bw_rounded);
if (ret < 0)
return ret;
- if (allocated_bw == max_bw)
+ if (allocated_bw == max_bw_rounded)
allocated_bw = ret;
- if (tb_port_path_direction_downstream(in, out)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
*allocated_up = 0;
*allocated_down = allocated_bw;
} else {
@@ -1053,26 +1044,25 @@ static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up
static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
int *alloc_down)
{
- struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
- int max_bw, ret, tmp;
+ int max_bw_rounded, ret, tmp;
if (!usb4_dp_port_bandwidth_mode_enabled(in))
return -EOPNOTSUPP;
- ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
+ ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded);
if (ret < 0)
return ret;
- if (tb_port_path_direction_downstream(in, out)) {
- tmp = min(*alloc_down, max_bw);
+ if (tb_tunnel_direction_downstream(tunnel)) {
+ tmp = min(*alloc_down, max_bw_rounded);
ret = usb4_dp_port_allocate_bandwidth(in, tmp);
if (ret)
return ret;
*alloc_down = tmp;
*alloc_up = 0;
} else {
- tmp = min(*alloc_up, max_bw);
+ tmp = min(*alloc_up, max_bw_rounded);
ret = usb4_dp_port_allocate_bandwidth(in, tmp);
if (ret)
return ret;
@@ -1150,17 +1140,16 @@ static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
int *max_down)
{
- struct tb_port *in = tunnel->src_port;
int ret;
- if (!usb4_dp_port_bandwidth_mode_enabled(in))
+ if (!usb4_dp_port_bandwidth_mode_enabled(tunnel->src_port))
return -EOPNOTSUPP;
ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL);
if (ret < 0)
return ret;
- if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
*max_up = 0;
*max_down = ret;
} else {
@@ -1174,8 +1163,7 @@ static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
int *consumed_down)
{
- struct tb_port *in = tunnel->src_port;
- const struct tb_switch *sw = in->sw;
+ const struct tb_switch *sw = tunnel->src_port->sw;
u32 rate = 0, lanes = 0;
int ret;
@@ -1196,17 +1184,13 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
/*
* Then see if the DPRX negotiation is ready and if yes
* return that bandwidth (it may be smaller than the
- * reduced one). Otherwise return the remote (possibly
- * reduced) caps.
+ * reduced one). According to VESA spec, the DPRX
+ * negotiation shall compete in 5 seconds after tunnel
+ * established. We give it 100ms extra just in case.
*/
- ret = tb_dp_wait_dprx(tunnel, 150);
- if (ret) {
- if (ret == -ETIMEDOUT)
- ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP,
- &rate, &lanes);
- if (ret)
- return ret;
- }
+ ret = tb_dp_wait_dprx(tunnel, 5100);
+ if (ret)
+ return ret;
ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
if (ret)
return ret;
@@ -1221,7 +1205,7 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
return 0;
}
- if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
*consumed_up = 0;
*consumed_down = tb_dp_bandwidth(rate, lanes);
} else {
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index b4cff5482112d..1a27ccd08b861 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -139,6 +139,12 @@ static inline bool tb_tunnel_is_usb3(const struct tb_tunnel *tunnel)
return tunnel->type == TB_TUNNEL_USB3;
}
+static inline bool tb_tunnel_direction_downstream(const struct tb_tunnel *tunnel)
+{
+ return tb_port_path_direction_downstream(tunnel->src_port,
+ tunnel->dst_port);
+}
+
const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel);
#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index 1515eff8cc3e2..78b06e922fdac 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -155,7 +155,13 @@ static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode,
tx_dwords, rx_data, rx_dwords);
}
-static void usb4_switch_check_wakes(struct tb_switch *sw)
+/**
+ * usb4_switch_check_wakes() - Check for wakes and notify PM core about them
+ * @sw: Router whose wakes to check
+ *
+ * Checks wakes occurred during suspend and notify the PM core about them.
+ */
+void usb4_switch_check_wakes(struct tb_switch *sw)
{
bool wakeup_usb4 = false;
struct usb4_port *usb4;
@@ -163,9 +169,6 @@ static void usb4_switch_check_wakes(struct tb_switch *sw)
bool wakeup = false;
u32 val;
- if (!device_may_wakeup(&sw->dev))
- return;
-
if (tb_route(sw)) {
if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
return;
@@ -244,8 +247,6 @@ int usb4_switch_setup(struct tb_switch *sw)
u32 val = 0;
int ret;
- usb4_switch_check_wakes(sw);
-
if (!tb_route(sw))
return 0;
@@ -1113,6 +1114,45 @@ int usb4_port_hotplug_enable(struct tb_port *port)
return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
}
+/**
+ * usb4_port_reset() - Issue downstream port reset
+ * @port: USB4 port to reset
+ *
+ * Issues downstream port reset to @port.
+ */
+int usb4_port_reset(struct tb_port *port)
+{
+ int ret;
+ u32 val;
+
+ if (!port->cap_usb4)
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ val |= PORT_CS_19_DPR;
+
+ ret = tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ fsleep(10000);
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ val &= ~PORT_CS_19_DPR;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+}
+
static int usb4_port_set_configured(struct tb_port *port, bool configured)
{
int ret;
@@ -2819,8 +2859,10 @@ static int usb4_dp_port_wait_and_clear_cm_ack(struct tb_port *port,
usleep_range(50, 100);
} while (ktime_before(ktime_get(), end));
- if (val & ADP_DP_CS_8_DR)
+ if (val & ADP_DP_CS_8_DR) {
+ tb_port_warn(port, "timeout waiting for DPTX request to clear\n");
return -ETIMEDOUT;
+ }
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
diff --git a/drivers/thunderbolt/usb4_port.c b/drivers/thunderbolt/usb4_port.c
index e355bfd6343ff..5150879888cac 100644
--- a/drivers/thunderbolt/usb4_port.c
+++ b/drivers/thunderbolt/usb4_port.c
@@ -243,7 +243,7 @@ static void usb4_port_device_release(struct device *dev)
kfree(usb4);
}
-struct device_type usb4_port_device_type = {
+const struct device_type usb4_port_device_type = {
.name = "usb4_port",
.groups = usb4_port_device_groups,
.release = usb4_port_device_release,
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 9495742913d5c..940ae97987ff3 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -997,12 +997,12 @@ static void tb_service_release(struct device *dev)
struct tb_xdomain *xd = tb_service_parent(svc);
tb_service_debugfs_remove(svc);
- ida_simple_remove(&xd->service_ids, svc->id);
+ ida_free(&xd->service_ids, svc->id);
kfree(svc->key);
kfree(svc);
}
-struct device_type tb_service_type = {
+const struct device_type tb_service_type = {
.name = "thunderbolt_service",
.groups = tb_service_attr_groups,
.uevent = tb_service_uevent,
@@ -1099,7 +1099,7 @@ static void enumerate_services(struct tb_xdomain *xd)
break;
}
- id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&xd->service_ids, GFP_KERNEL);
if (id < 0) {
kfree(svc->key);
kfree(svc);
@@ -1791,13 +1791,13 @@ static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr,
switch (xd->link_width) {
case TB_LINK_WIDTH_SINGLE:
- case TB_LINK_WIDTH_ASYM_RX:
+ case TB_LINK_WIDTH_ASYM_TX:
width = 1;
break;
case TB_LINK_WIDTH_DUAL:
width = 2;
break;
- case TB_LINK_WIDTH_ASYM_TX:
+ case TB_LINK_WIDTH_ASYM_RX:
width = 3;
break;
default:
@@ -1817,13 +1817,13 @@ static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr,
switch (xd->link_width) {
case TB_LINK_WIDTH_SINGLE:
- case TB_LINK_WIDTH_ASYM_TX:
+ case TB_LINK_WIDTH_ASYM_RX:
width = 1;
break;
case TB_LINK_WIDTH_DUAL:
width = 2;
break;
- case TB_LINK_WIDTH_ASYM_RX:
+ case TB_LINK_WIDTH_ASYM_TX:
width = 3;
break;
default:
@@ -1893,7 +1893,7 @@ static const struct dev_pm_ops tb_xdomain_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume)
};
-struct device_type tb_xdomain_type = {
+const struct device_type tb_xdomain_type = {
.name = "thunderbolt_xdomain",
.release = tb_xdomain_release,
.pm = &tb_xdomain_pm_ops,
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 5646dc6242cd9..a45d423ad10f0 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -75,14 +75,9 @@ config VT_CONSOLE_SLEEP
def_bool y
depends on VT_CONSOLE && PM_SLEEP
-config HW_CONSOLE
- bool
- depends on VT
- default y
-
config VT_HW_CONSOLE_BINDING
bool "Support for binding and unbinding console drivers"
- depends on HW_CONSOLE
+ depends on VT
help
The virtual terminal is the device that interacts with the physical
terminal through console drivers. On these systems, at least one
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 7716ce0d35bc2..e27360652d9b6 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -1566,7 +1566,7 @@ fail_tty_driver_kref_put:
return error;
}
-static int __exit amiga_serial_remove(struct platform_device *pdev)
+static void __exit amiga_serial_remove(struct platform_device *pdev)
{
struct serial_state *state = platform_get_drvdata(pdev);
@@ -1576,12 +1576,10 @@ static int __exit amiga_serial_remove(struct platform_device *pdev)
free_irq(IRQ_AMIGA_TBE, state);
free_irq(IRQ_AMIGA_RBF, state);
-
- return 0;
}
static struct platform_driver amiga_serial_driver = {
- .remove = __exit_p(amiga_serial_remove),
+ .remove_new = __exit_p(amiga_serial_remove),
.driver = {
.name = "amiga-serial",
},
diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
index d27979eabfdfb..34421ec06a69e 100644
--- a/drivers/tty/goldfish.c
+++ b/drivers/tty/goldfish.c
@@ -408,7 +408,7 @@ err_unmap:
return ret;
}
-static int goldfish_tty_remove(struct platform_device *pdev)
+static void goldfish_tty_remove(struct platform_device *pdev)
{
struct goldfish_tty *qtty = platform_get_drvdata(pdev);
@@ -424,7 +424,6 @@ static int goldfish_tty_remove(struct platform_device *pdev)
if (goldfish_tty_current_line_count == 0)
goldfish_tty_delete_driver();
mutex_unlock(&goldfish_tty_lock);
- return 0;
}
#ifdef CONFIG_GOLDFISH_TTY_EARLY_CONSOLE
@@ -462,7 +461,7 @@ MODULE_DEVICE_TABLE(of, goldfish_tty_of_match);
static struct platform_driver goldfish_tty_platform_driver = {
.probe = goldfish_tty_probe,
- .remove = goldfish_tty_remove,
+ .remove_new = goldfish_tty_remove,
.driver = {
.name = "goldfish_tty",
.of_match_table = goldfish_tty_of_match,
diff --git a/drivers/tty/hvc/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c
index fdecc0d63731a..b1149bc62ca1f 100644
--- a/drivers/tty/hvc/hvc_iucv.c
+++ b/drivers/tty/hvc/hvc_iucv.c
@@ -1035,6 +1035,10 @@ static const struct attribute_group *hvc_iucv_dev_attr_groups[] = {
NULL,
};
+static void hvc_iucv_free(struct device *data)
+{
+ kfree(data);
+}
/**
* hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
@@ -1097,7 +1101,7 @@ static int __init hvc_iucv_alloc(int id, unsigned int is_console)
priv->dev->bus = &iucv_bus;
priv->dev->parent = iucv_root;
priv->dev->groups = hvc_iucv_dev_attr_groups;
- priv->dev->release = (void (*)(struct device *)) kfree;
+ priv->dev->release = hvc_iucv_free;
rc = device_register(priv->dev);
if (rc) {
put_device(priv->dev);
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index aac80b69a069c..afbf7738c7c47 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -309,7 +309,7 @@ static void mips_ejtag_fdc_console_write(struct console *c, const char *s,
unsigned int i, buf_len, cpu;
bool done_cr = false;
char buf[4];
- const char *buf_ptr = buf;
+ const u8 *buf_ptr = buf;
/* Number of bytes of input data encoded up to each byte in buf */
u8 inc[4];
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index 822a5cd055666..613cb356b918d 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -431,7 +431,7 @@ static void serdev_drv_remove(struct device *dev)
dev_pm_domain_detach(dev, true);
}
-static struct bus_type serdev_bus_type = {
+static const struct bus_type serdev_bus_type = {
.name = "serial",
.match = serdev_device_match,
.probe = serdev_drv_probe,
diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c
index e94e090cf0a1a..3d7ae7fa50186 100644
--- a/drivers/tty/serdev/serdev-ttyport.c
+++ b/drivers/tty/serdev/serdev-ttyport.c
@@ -27,19 +27,17 @@ static size_t ttyport_receive_buf(struct tty_port *port, const u8 *cp,
{
struct serdev_controller *ctrl = port->client_data;
struct serport *serport = serdev_controller_get_drvdata(ctrl);
- int ret;
+ size_t ret;
if (!test_bit(SERPORT_ACTIVE, &serport->flags))
return 0;
ret = serdev_controller_receive_buf(ctrl, cp, count);
- dev_WARN_ONCE(&ctrl->dev, ret < 0 || ret > count,
- "receive_buf returns %d (count = %zu)\n",
+ dev_WARN_ONCE(&ctrl->dev, ret > count,
+ "receive_buf returns %zu (count = %zu)\n",
ret, count);
- if (ret < 0)
- return 0;
- else if (ret > count)
+ if (ret > count)
return count;
return ret;
diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
index 8c2aaf7af7b75..53d8eee9b1c81 100644
--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
+++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
@@ -419,8 +419,8 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
struct aspeed_vuart *vuart;
struct device_node *np;
struct resource *res;
- u32 clk, prop, sirq[2];
int rc, sirq_polarity;
+ u32 prop, sirq[2];
struct clk *vclk;
np = pdev->dev.of_node;
@@ -447,53 +447,35 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
port.port.status = UPSTAT_SYNC_FIFO;
port.port.dev = &pdev->dev;
port.port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
+ port.port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP | UPF_FIXED_PORT | UPF_FIXED_TYPE |
+ UPF_NO_THRE_TEST;
port.bugs |= UART_BUG_TXRACE;
rc = sysfs_create_group(&vuart->dev->kobj, &aspeed_vuart_attr_group);
if (rc < 0)
return rc;
- if (of_property_read_u32(np, "clock-frequency", &clk)) {
+ rc = uart_read_port_properties(&port.port);
+ if (rc)
+ goto err_sysfs_remove;
+
+ /* Get clk rate through clk driver if present */
+ if (!port.port.uartclk) {
vclk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(vclk)) {
rc = dev_err_probe(dev, PTR_ERR(vclk), "clk or clock-frequency not defined\n");
goto err_sysfs_remove;
}
- clk = clk_get_rate(vclk);
+ port.port.uartclk = clk_get_rate(vclk);
}
/* If current-speed was set, then try not to change it. */
if (of_property_read_u32(np, "current-speed", &prop) == 0)
- port.port.custom_divisor = clk / (16 * prop);
-
- /* Check for shifted address mapping */
- if (of_property_read_u32(np, "reg-offset", &prop) == 0)
- port.port.mapbase += prop;
-
- /* Check for registers offset within the devices address range */
- if (of_property_read_u32(np, "reg-shift", &prop) == 0)
- port.port.regshift = prop;
+ port.port.custom_divisor = port.port.uartclk / (16 * prop);
- /* Check for fifo size */
- if (of_property_read_u32(np, "fifo-size", &prop) == 0)
- port.port.fifosize = prop;
-
- /* Check for a fixed line number */
- rc = of_alias_get_id(np, "serial");
- if (rc >= 0)
- port.port.line = rc;
-
- port.port.irq = irq_of_parse_and_map(np, 0);
port.port.handle_irq = aspeed_vuart_handle_irq;
- port.port.iotype = UPIO_MEM;
port.port.type = PORT_ASPEED_VUART;
- port.port.uartclk = clk;
- port.port.flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_IOREMAP
- | UPF_FIXED_PORT | UPF_FIXED_TYPE | UPF_NO_THRE_TEST;
-
- if (of_property_read_bool(np, "no-loopback-test"))
- port.port.flags |= UPF_SKIP_TEST;
if (port.port.fifosize)
port.capabilities = UART_CAP_FIFO;
@@ -503,7 +485,7 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
rc = serial8250_register_8250_port(&port);
if (rc < 0)
- goto err_clk_disable;
+ goto err_sysfs_remove;
vuart->line = rc;
vuart->port = serial8250_get_port(vuart->line);
@@ -529,7 +511,7 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
rc = aspeed_vuart_set_lpc_address(vuart, prop);
if (rc < 0) {
dev_err_probe(dev, rc, "invalid value in aspeed,lpc-io-reg property\n");
- goto err_clk_disable;
+ goto err_sysfs_remove;
}
rc = of_property_read_u32_array(np, "aspeed,lpc-interrupts", sirq, 2);
@@ -541,14 +523,14 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
rc = aspeed_vuart_set_sirq(vuart, sirq[0]);
if (rc < 0) {
dev_err_probe(dev, rc, "invalid sirq number in aspeed,lpc-interrupts property\n");
- goto err_clk_disable;
+ goto err_sysfs_remove;
}
sirq_polarity = aspeed_vuart_map_irq_polarity(sirq[1]);
if (sirq_polarity < 0) {
rc = dev_err_probe(dev, sirq_polarity,
"invalid sirq polarity in aspeed,lpc-interrupts property\n");
- goto err_clk_disable;
+ goto err_sysfs_remove;
}
aspeed_vuart_set_sirq_polarity(vuart, sirq_polarity);
@@ -559,8 +541,6 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
return 0;
-err_clk_disable:
- irq_dispose_mapping(port.port.irq);
err_sysfs_remove:
sysfs_remove_group(&vuart->dev->kobj, &aspeed_vuart_attr_group);
return rc;
diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c
index beac6b340acef..121a5ce860508 100644
--- a/drivers/tty/serial/8250/8250_bcm2835aux.c
+++ b/drivers/tty/serial/8250/8250_bcm2835aux.c
@@ -45,10 +45,6 @@ struct bcm2835aux_data {
u32 cntl;
};
-struct bcm2835_aux_serial_driver_data {
- resource_size_t offset;
-};
-
static void bcm2835aux_rs485_start_tx(struct uart_8250_port *up)
{
if (!(up->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
@@ -85,10 +81,9 @@ static void bcm2835aux_rs485_stop_tx(struct uart_8250_port *up)
static int bcm2835aux_serial_probe(struct platform_device *pdev)
{
- const struct bcm2835_aux_serial_driver_data *bcm_data;
+ const struct software_node *bcm2835_swnode;
struct uart_8250_port up = { };
struct bcm2835aux_data *data;
- resource_size_t offset = 0;
struct resource *res;
unsigned int uartclk;
int ret;
@@ -101,12 +96,8 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
/* initialize data */
up.capabilities = UART_CAP_FIFO | UART_CAP_MINI;
up.port.dev = &pdev->dev;
- up.port.regshift = 2;
up.port.type = PORT_16550;
- up.port.iotype = UPIO_MEM;
- up.port.fifosize = 8;
- up.port.flags = UPF_SHARE_IRQ | UPF_FIXED_PORT | UPF_FIXED_TYPE |
- UPF_SKIP_TEST | UPF_IOREMAP;
+ up.port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE | UPF_SKIP_TEST | UPF_IOREMAP;
up.port.rs485_config = serial8250_em485_config;
up.port.rs485_supported = serial8250_em485_supported;
up.rs485_start_tx = bcm2835aux_rs485_start_tx;
@@ -122,12 +113,6 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
if (IS_ERR(data->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(data->clk), "could not get clk\n");
- /* get the interrupt */
- ret = platform_get_irq(pdev, 0);
- if (ret < 0)
- return ret;
- up.port.irq = ret;
-
/* map the main registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -135,52 +120,40 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
return -EINVAL;
}
- bcm_data = device_get_match_data(&pdev->dev);
-
- /* Some UEFI implementations (e.g. tianocore/edk2 for the Raspberry Pi)
- * describe the miniuart with a base address that encompasses the auxiliary
- * registers shared between the miniuart and spi.
- *
- * This is due to historical reasons, see discussion here :
- * https://edk2.groups.io/g/devel/topic/87501357#84349
- *
- * We need to add the offset between the miniuart and auxiliary
- * registers to get the real miniuart base address.
- */
- if (bcm_data)
- offset = bcm_data->offset;
+ up.port.mapbase = res->start;
+ up.port.mapsize = resource_size(res);
+
+ bcm2835_swnode = device_get_match_data(&pdev->dev);
+ if (bcm2835_swnode) {
+ ret = device_add_software_node(&pdev->dev, bcm2835_swnode);
+ if (ret)
+ return ret;
+ }
- up.port.mapbase = res->start + offset;
- up.port.mapsize = resource_size(res) - offset;
+ ret = uart_read_port_properties(&up.port);
+ if (ret)
+ goto rm_swnode;
- /* Check for a fixed line number */
- ret = of_alias_get_id(pdev->dev.of_node, "serial");
- if (ret >= 0)
- up.port.line = ret;
+ up.port.regshift = 2;
+ up.port.fifosize = 8;
/* enable the clock as a last step */
ret = clk_prepare_enable(data->clk);
if (ret) {
- dev_err(&pdev->dev, "unable to enable uart clock - %d\n",
- ret);
- return ret;
+ dev_err_probe(&pdev->dev, ret, "unable to enable uart clock\n");
+ goto rm_swnode;
}
uartclk = clk_get_rate(data->clk);
- if (!uartclk) {
- ret = device_property_read_u32(&pdev->dev, "clock-frequency", &uartclk);
- if (ret) {
- dev_err_probe(&pdev->dev, ret, "could not get clk rate\n");
- goto dis_clk;
- }
- }
+ if (uartclk)
+ up.port.uartclk = uartclk;
/* the HW-clock divider for bcm2835aux is 8,
* but 8250 expects a divider of 16,
* so we have to multiply the actual clock by 2
* to get identical baudrates.
*/
- up.port.uartclk = uartclk * 2;
+ up.port.uartclk *= 2;
/* register the port */
ret = serial8250_register_8250_port(&up);
@@ -194,6 +167,8 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
dis_clk:
clk_disable_unprepare(data->clk);
+rm_swnode:
+ device_remove_software_node(&pdev->dev);
return ret;
}
@@ -203,10 +178,27 @@ static void bcm2835aux_serial_remove(struct platform_device *pdev)
serial8250_unregister_port(data->line);
clk_disable_unprepare(data->clk);
+ device_remove_software_node(&pdev->dev);
}
-static const struct bcm2835_aux_serial_driver_data bcm2835_acpi_data = {
- .offset = 0x40,
+/*
+ * Some UEFI implementations (e.g. tianocore/edk2 for the Raspberry Pi)
+ * describe the miniuart with a base address that encompasses the auxiliary
+ * registers shared between the miniuart and spi.
+ *
+ * This is due to historical reasons, see discussion here:
+ * https://edk2.groups.io/g/devel/topic/87501357#84349
+ *
+ * We need to add the offset between the miniuart and auxiliary registers
+ * to get the real miniuart base address.
+ */
+static const struct property_entry bcm2835_acpi_properties[] = {
+ PROPERTY_ENTRY_U32("reg-offset", 0x40),
+ { }
+};
+
+static const struct software_node bcm2835_acpi_node = {
+ .properties = bcm2835_acpi_properties,
};
static const struct of_device_id bcm2835aux_serial_match[] = {
@@ -216,7 +208,7 @@ static const struct of_device_id bcm2835aux_serial_match[] = {
MODULE_DEVICE_TABLE(of, bcm2835aux_serial_match);
static const struct acpi_device_id bcm2835aux_serial_acpi_match[] = {
- { "BCM2836", (kernel_ulong_t)&bcm2835_acpi_data },
+ { "BCM2836", (kernel_ulong_t)&bcm2835_acpi_node },
{ }
};
MODULE_DEVICE_TABLE(acpi, bcm2835aux_serial_acpi_match);
diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
index 504c4c0208577..5daa38d9c64e0 100644
--- a/drivers/tty/serial/8250/8250_bcm7271.c
+++ b/drivers/tty/serial/8250/8250_bcm7271.c
@@ -22,6 +22,7 @@
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
+#include <linux/units.h>
#include "8250.h"
@@ -187,21 +188,19 @@
#define TX_BUF_SIZE 4096
#define RX_BUF_SIZE 4096
#define RX_BUFS_COUNT 2
-#define KHZ 1000
-#define MHZ(x) ((x) * KHZ * KHZ)
static const u32 brcmstb_rate_table[] = {
- MHZ(81),
- MHZ(108),
- MHZ(64), /* Actually 64285715 for some chips */
- MHZ(48),
+ 81 * HZ_PER_MHZ,
+ 108 * HZ_PER_MHZ,
+ 64 * HZ_PER_MHZ, /* Actually 64285715 for some chips */
+ 48 * HZ_PER_MHZ,
};
static const u32 brcmstb_rate_table_7278[] = {
- MHZ(81),
- MHZ(108),
+ 81 * HZ_PER_MHZ,
+ 108 * HZ_PER_MHZ,
0,
- MHZ(48),
+ 48 * HZ_PER_MHZ,
};
struct brcmuart_priv {
@@ -936,17 +935,14 @@ static void brcmuart_init_debugfs(struct brcmuart_priv *priv,
static int brcmuart_probe(struct platform_device *pdev)
{
struct resource *regs;
- struct device_node *np = pdev->dev.of_node;
const struct of_device_id *of_id = NULL;
struct uart_8250_port *new_port;
struct device *dev = &pdev->dev;
struct brcmuart_priv *priv;
struct clk *baud_mux_clk;
struct uart_8250_port up;
- int irq;
void __iomem *membase = NULL;
resource_size_t mapbase = 0;
- u32 clk_rate = 0;
int ret;
int x;
int dma_irq;
@@ -954,15 +950,12 @@ static int brcmuart_probe(struct platform_device *pdev)
"uart", "dma_rx", "dma_tx", "dma_intr2", "dma_arb"
};
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
priv = devm_kzalloc(dev, sizeof(struct brcmuart_priv),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
- of_id = of_match_node(brcmuart_dt_ids, np);
+ of_id = of_match_node(brcmuart_dt_ids, dev->of_node);
if (!of_id || !of_id->data)
priv->rate_table = brcmstb_rate_table;
else
@@ -1012,7 +1005,23 @@ static int brcmuart_probe(struct platform_device *pdev)
}
}
- of_property_read_u32(np, "clock-frequency", &clk_rate);
+ dev_dbg(dev, "DMA is %senabled\n", priv->dma_enabled ? "" : "not ");
+
+ memset(&up, 0, sizeof(up));
+ up.port.type = PORT_BCM7271;
+ up.port.dev = dev;
+ up.port.mapbase = mapbase;
+ up.port.membase = membase;
+ up.port.handle_irq = brcmuart_handle_irq;
+ up.port.flags = UPF_BOOT_AUTOCONF | UPF_FIXED_PORT | UPF_FIXED_TYPE;
+ up.port.private_data = priv;
+
+ ret = uart_read_port_properties(&up.port);
+ if (ret)
+ goto release_dma;
+
+ up.port.regshift = 2;
+ up.port.iotype = device_is_big_endian(dev) ? UPIO_MEM32BE : UPIO_MEM32;
/* See if a Baud clock has been specified */
baud_mux_clk = devm_clk_get_optional_enabled(dev, "sw_baud");
@@ -1024,39 +1033,11 @@ static int brcmuart_probe(struct platform_device *pdev)
priv->baud_mux_clk = baud_mux_clk;
init_real_clk_rates(dev, priv);
- clk_rate = priv->default_mux_rate;
+ up.port.uartclk = priv->default_mux_rate;
} else {
dev_dbg(dev, "BAUD MUX clock not specified\n");
}
- if (clk_rate == 0) {
- ret = dev_err_probe(dev, -EINVAL, "clock-frequency or clk not defined\n");
- goto release_dma;
- }
-
- dev_dbg(dev, "DMA is %senabled\n", priv->dma_enabled ? "" : "not ");
-
- memset(&up, 0, sizeof(up));
- up.port.type = PORT_BCM7271;
- up.port.uartclk = clk_rate;
- up.port.dev = dev;
- up.port.mapbase = mapbase;
- up.port.membase = membase;
- up.port.irq = irq;
- up.port.handle_irq = brcmuart_handle_irq;
- up.port.regshift = 2;
- up.port.iotype = of_device_is_big_endian(np) ?
- UPIO_MEM32BE : UPIO_MEM32;
- up.port.flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF
- | UPF_FIXED_PORT | UPF_FIXED_TYPE;
- up.port.dev = dev;
- up.port.private_data = priv;
-
- /* Check for a fixed line number */
- ret = of_alias_get_id(np, "serial");
- if (ret >= 0)
- up.port.line = ret;
-
/* setup HR timer */
hrtimer_init(&priv->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
priv->hrt.function = brcmuart_hrtimer_func;
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index c1d43f040c43a..1300c92b8702a 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -9,7 +9,6 @@
* LCR is written whilst busy. If it is, then a busy detect interrupt is
* raised, the LCR needs to be rewritten and the uart status register read.
*/
-#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -17,7 +16,6 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/notifier.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
@@ -56,6 +54,7 @@
#define DW_UART_QUIRK_ARMADA_38X BIT(1)
#define DW_UART_QUIRK_SKIP_SET_RATE BIT(2)
#define DW_UART_QUIRK_IS_DMA_FC BIT(3)
+#define DW_UART_QUIRK_APMC0D08 BIT(4)
static inline struct dw8250_data *clk_to_dw8250_data(struct notifier_block *nb)
{
@@ -357,9 +356,9 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
long rate;
int ret;
+ clk_disable_unprepare(d->clk);
rate = clk_round_rate(d->clk, newrate);
- if (rate > 0 && p->uartclk != rate) {
- clk_disable_unprepare(d->clk);
+ if (rate > 0) {
/*
* Note that any clock-notifer worker will block in
* serial8250_update_uartclk() until we are done.
@@ -367,8 +366,8 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
ret = clk_set_rate(d->clk, newrate);
if (!ret)
p->uartclk = rate;
- clk_prepare_enable(d->clk);
}
+ clk_prepare_enable(d->clk);
dw8250_do_set_termios(p, termios, old);
}
@@ -445,44 +444,29 @@ static void dw8250_prepare_rx_dma(struct uart_8250_port *p)
static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
{
- struct device_node *np = p->dev->of_node;
+ unsigned int quirks = data->pdata ? data->pdata->quirks : 0;
- if (np) {
- unsigned int quirks = data->pdata->quirks;
- int id;
-
- /* get index of serial line, if found in DT aliases */
- id = of_alias_get_id(np, "serial");
- if (id >= 0)
- p->line = id;
#ifdef CONFIG_64BIT
- if (quirks & DW_UART_QUIRK_OCTEON) {
- p->serial_in = dw8250_serial_inq;
- p->serial_out = dw8250_serial_outq;
- p->flags = UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE;
- p->type = PORT_OCTEON;
- data->skip_autocfg = true;
- }
+ if (quirks & DW_UART_QUIRK_OCTEON) {
+ p->serial_in = dw8250_serial_inq;
+ p->serial_out = dw8250_serial_outq;
+ p->flags = UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE;
+ p->type = PORT_OCTEON;
+ data->skip_autocfg = true;
+ }
#endif
- if (of_device_is_big_endian(np)) {
- p->iotype = UPIO_MEM32BE;
- p->serial_in = dw8250_serial_in32be;
- p->serial_out = dw8250_serial_out32be;
- }
-
- if (quirks & DW_UART_QUIRK_ARMADA_38X)
- p->serial_out = dw8250_serial_out38x;
- if (quirks & DW_UART_QUIRK_SKIP_SET_RATE)
- p->set_termios = dw8250_do_set_termios;
- if (quirks & DW_UART_QUIRK_IS_DMA_FC) {
- data->data.dma.txconf.device_fc = 1;
- data->data.dma.rxconf.device_fc = 1;
- data->data.dma.prepare_tx_dma = dw8250_prepare_tx_dma;
- data->data.dma.prepare_rx_dma = dw8250_prepare_rx_dma;
- }
-
- } else if (acpi_dev_present("APMC0D08", NULL, -1)) {
+ if (quirks & DW_UART_QUIRK_ARMADA_38X)
+ p->serial_out = dw8250_serial_out38x;
+ if (quirks & DW_UART_QUIRK_SKIP_SET_RATE)
+ p->set_termios = dw8250_do_set_termios;
+ if (quirks & DW_UART_QUIRK_IS_DMA_FC) {
+ data->data.dma.txconf.device_fc = 1;
+ data->data.dma.rxconf.device_fc = 1;
+ data->data.dma.prepare_tx_dma = dw8250_prepare_tx_dma;
+ data->data.dma.prepare_rx_dma = dw8250_prepare_rx_dma;
+ }
+ if (quirks & DW_UART_QUIRK_APMC0D08) {
p->iotype = UPIO_MEM32;
p->regshift = 2;
p->serial_in = dw8250_serial_in32;
@@ -510,39 +494,21 @@ static int dw8250_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct dw8250_data *data;
struct resource *regs;
- int irq;
int err;
- u32 val;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs)
return dev_err_probe(dev, -EINVAL, "no registers defined\n");
- irq = platform_get_irq_optional(pdev, 0);
- /* no interrupt -> fall back to polling */
- if (irq == -ENXIO)
- irq = 0;
- if (irq < 0)
- return irq;
-
spin_lock_init(&p->lock);
- p->mapbase = regs->start;
- p->irq = irq;
p->handle_irq = dw8250_handle_irq;
p->pm = dw8250_do_pm;
p->type = PORT_8250;
- p->flags = UPF_SHARE_IRQ | UPF_FIXED_PORT;
+ p->flags = UPF_FIXED_PORT;
p->dev = dev;
- p->iotype = UPIO_MEM;
- p->serial_in = dw8250_serial_in;
- p->serial_out = dw8250_serial_out;
p->set_ldisc = dw8250_set_ldisc;
p->set_termios = dw8250_set_termios;
- p->membase = devm_ioremap(dev, regs->start, resource_size(regs));
- if (!p->membase)
- return -ENOMEM;
-
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -554,15 +520,35 @@ static int dw8250_probe(struct platform_device *pdev)
data->uart_16550_compatible = device_property_read_bool(dev,
"snps,uart-16550-compatible");
- err = device_property_read_u32(dev, "reg-shift", &val);
- if (!err)
- p->regshift = val;
+ p->mapbase = regs->start;
+ p->mapsize = resource_size(regs);
- err = device_property_read_u32(dev, "reg-io-width", &val);
- if (!err && val == 4) {
- p->iotype = UPIO_MEM32;
+ p->membase = devm_ioremap(dev, p->mapbase, p->mapsize);
+ if (!p->membase)
+ return -ENOMEM;
+
+ err = uart_read_port_properties(p);
+ /* no interrupt -> fall back to polling */
+ if (err == -ENXIO)
+ err = 0;
+ if (err)
+ return err;
+
+ switch (p->iotype) {
+ case UPIO_MEM:
+ p->serial_in = dw8250_serial_in;
+ p->serial_out = dw8250_serial_out;
+ break;
+ case UPIO_MEM32:
p->serial_in = dw8250_serial_in32;
p->serial_out = dw8250_serial_out32;
+ break;
+ case UPIO_MEM32BE:
+ p->serial_in = dw8250_serial_in32be;
+ p->serial_out = dw8250_serial_out32be;
+ break;
+ default:
+ return -ENODEV;
}
if (device_property_read_bool(dev, "dcd-override")) {
@@ -589,15 +575,13 @@ static int dw8250_probe(struct platform_device *pdev)
data->msr_mask_off |= UART_MSR_TERI;
}
- /* Always ask for fixed clock rate from a property. */
- device_property_read_u32(dev, "clock-frequency", &p->uartclk);
-
/* If there is separate baudclk, get the rate from it. */
data->clk = devm_clk_get_optional_enabled(dev, "baudclk");
if (data->clk == NULL)
data->clk = devm_clk_get_optional_enabled(dev, NULL);
if (IS_ERR(data->clk))
- return PTR_ERR(data->clk);
+ return dev_err_probe(dev, PTR_ERR(data->clk),
+ "failed to get baudclk\n");
INIT_WORK(&data->clk_work, dw8250_clk_work_cb);
data->clk_notifier.notifier_call = dw8250_clk_notifier_cb;
@@ -762,13 +746,18 @@ static const struct of_device_id dw8250_of_match[] = {
};
MODULE_DEVICE_TABLE(of, dw8250_of_match);
+static const struct dw8250_platform_data dw8250_apmc0d08 = {
+ .usr_reg = DW_UART_USR,
+ .quirks = DW_UART_QUIRK_APMC0D08,
+};
+
static const struct acpi_device_id dw8250_acpi_match[] = {
{ "80860F0A", (kernel_ulong_t)&dw8250_dw_apb },
{ "8086228A", (kernel_ulong_t)&dw8250_dw_apb },
{ "AMD0020", (kernel_ulong_t)&dw8250_dw_apb },
{ "AMDI0020", (kernel_ulong_t)&dw8250_dw_apb },
{ "AMDI0022", (kernel_ulong_t)&dw8250_dw_apb },
- { "APMC0D08", (kernel_ulong_t)&dw8250_dw_apb},
+ { "APMC0D08", (kernel_ulong_t)&dw8250_apmc0d08 },
{ "BRCM2032", (kernel_ulong_t)&dw8250_dw_apb },
{ "HISI0031", (kernel_ulong_t)&dw8250_dw_apb },
{ "INT33C4", (kernel_ulong_t)&dw8250_dw_apb },
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 23366f868ae3a..0440df7de1ed7 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -6,23 +6,29 @@
*
* Copyright (C) 2017 Sudip Mukherjee, All Rights Reserved.
*/
-#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/dmi.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/kernel.h>
+#include <linux/math.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/property.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <linux/serial_8250.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/tty.h>
-#include <linux/delay.h>
#include <asm/byteorder.h>
#include "8250.h"
+#include "8250_pcilib.h"
#define PCI_DEVICE_ID_ACCESSIO_COM_2S 0x1052
#define PCI_DEVICE_ID_ACCESSIO_COM_4S 0x105d
@@ -229,13 +235,12 @@ static int default_setup(struct exar8250 *priv, struct pci_dev *pcidev,
struct uart_8250_port *port)
{
const struct exar8250_board *board = priv->board;
- unsigned int bar = 0;
unsigned char status;
+ int err;
- port->port.iotype = UPIO_MEM;
- port->port.mapbase = pci_resource_start(pcidev, bar) + offset;
- port->port.membase = priv->virt + offset;
- port->port.regshift = board->reg_shift;
+ err = serial8250_pci_setup_port(pcidev, port, 0, offset, board->reg_shift);
+ if (err)
+ return err;
/*
* XR17V35x UARTs have an extra divisor register, DLD that gets enabled
@@ -375,7 +380,7 @@ static struct platform_device *__xr17v35x_register_gpio(struct pci_dev *pcidev,
return NULL;
pdev->dev.parent = &pcidev->dev;
- ACPI_COMPANION_SET(&pdev->dev, ACPI_COMPANION(&pcidev->dev));
+ device_set_node(&pdev->dev, dev_fwnode(&pcidev->dev));
if (device_add_software_node(&pdev->dev, node) < 0 ||
platform_device_add(pdev) < 0) {
@@ -713,14 +718,14 @@ exar_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
uart.port.irq = pci_irq_vector(pcidev, 0);
uart.port.dev = &pcidev->dev;
+ /* Clear interrupts */
+ exar_misc_clear(priv);
+
rc = devm_request_irq(&pcidev->dev, uart.port.irq, exar_misc_handler,
IRQF_SHARED, "exar_uart", priv);
if (rc)
return rc;
- /* Clear interrupts */
- exar_misc_clear(priv);
-
for (i = 0; i < nr_ports && i < maxnr; i++) {
rc = board->setup(priv, pcidev, &uart, i);
if (rc) {
@@ -753,28 +758,24 @@ static void exar_pci_remove(struct pci_dev *pcidev)
for (i = 0; i < priv->nr; i++)
serial8250_unregister_port(priv->line[i]);
+ /* Ensure that every init quirk is properly torn down */
if (priv->board->exit)
priv->board->exit(pcidev);
}
-static int __maybe_unused exar_suspend(struct device *dev)
+static int exar_suspend(struct device *dev)
{
- struct pci_dev *pcidev = to_pci_dev(dev);
- struct exar8250 *priv = pci_get_drvdata(pcidev);
+ struct exar8250 *priv = dev_get_drvdata(dev);
unsigned int i;
for (i = 0; i < priv->nr; i++)
if (priv->line[i] >= 0)
serial8250_suspend_port(priv->line[i]);
- /* Ensure that every init quirk is properly torn down */
- if (priv->board->exit)
- priv->board->exit(pcidev);
-
return 0;
}
-static int __maybe_unused exar_resume(struct device *dev)
+static int exar_resume(struct device *dev)
{
struct exar8250 *priv = dev_get_drvdata(dev);
unsigned int i;
@@ -788,7 +789,7 @@ static int __maybe_unused exar_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(exar_pci_pm, exar_suspend, exar_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(exar_pci_pm, exar_suspend, exar_resume);
static const struct exar8250_board pbn_fastcom335_2 = {
.num_ports = 2,
@@ -938,12 +939,13 @@ static struct pci_driver exar_pci_driver = {
.probe = exar_pci_probe,
.remove = exar_pci_remove,
.driver = {
- .pm = &exar_pci_pm,
+ .pm = pm_sleep_ptr(&exar_pci_pm),
},
.id_table = exar_pci_tbl,
};
module_pci_driver(exar_pci_driver);
+MODULE_IMPORT_NS(SERIAL_8250_PCI);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Exar Serial Driver");
MODULE_AUTHOR("Sudip Mukherjee <sudip.mukherjee@codethink.co.uk>");
diff --git a/drivers/tty/serial/8250/8250_ingenic.c b/drivers/tty/serial/8250/8250_ingenic.c
index a12f737924c0b..a2783e38a2e31 100644
--- a/drivers/tty/serial/8250/8250_ingenic.c
+++ b/drivers/tty/serial/8250/8250_ingenic.c
@@ -234,7 +234,7 @@ static int ingenic_uart_probe(struct platform_device *pdev)
struct ingenic_uart_data *data;
const struct ingenic_uart_config *cdata;
struct resource *regs;
- int irq, err, line;
+ int err;
cdata = of_device_get_match_data(&pdev->dev);
if (!cdata) {
@@ -242,10 +242,6 @@ static int ingenic_uart_probe(struct platform_device *pdev)
return -ENODEV;
}
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs) {
dev_err(&pdev->dev, "no registers defined\n");
@@ -259,21 +255,19 @@ static int ingenic_uart_probe(struct platform_device *pdev)
spin_lock_init(&uart.port.lock);
uart.port.type = PORT_16550A;
uart.port.flags = UPF_SKIP_TEST | UPF_IOREMAP | UPF_FIXED_TYPE;
- uart.port.iotype = UPIO_MEM;
uart.port.mapbase = regs->start;
- uart.port.regshift = 2;
uart.port.serial_out = ingenic_uart_serial_out;
uart.port.serial_in = ingenic_uart_serial_in;
- uart.port.irq = irq;
uart.port.dev = &pdev->dev;
- uart.port.fifosize = cdata->fifosize;
uart.tx_loadsz = cdata->tx_loadsz;
uart.capabilities = UART_CAP_FIFO | UART_CAP_RTOIE;
- /* Check for a fixed line number */
- line = of_alias_get_id(pdev->dev.of_node, "serial");
- if (line >= 0)
- uart.port.line = line;
+ err = uart_read_port_properties(&uart.port);
+ if (err)
+ return err;
+
+ uart.port.regshift = 2;
+ uart.port.fifosize = cdata->fifosize;
uart.port.membase = devm_ioremap(&pdev->dev, regs->start,
resource_size(regs));
diff --git a/drivers/tty/serial/8250/8250_lpc18xx.c b/drivers/tty/serial/8250/8250_lpc18xx.c
index 8d728a6a59914..47e1a056a60c3 100644
--- a/drivers/tty/serial/8250/8250_lpc18xx.c
+++ b/drivers/tty/serial/8250/8250_lpc18xx.c
@@ -92,11 +92,7 @@ static int lpc18xx_serial_probe(struct platform_device *pdev)
struct lpc18xx_uart_data *data;
struct uart_8250_port uart;
struct resource *res;
- int irq, ret;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
+ int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -139,19 +135,12 @@ static int lpc18xx_serial_probe(struct platform_device *pdev)
goto dis_clk_reg;
}
- ret = of_alias_get_id(pdev->dev.of_node, "serial");
- if (ret >= 0)
- uart.port.line = ret;
-
data->dma.rx_param = data;
data->dma.tx_param = data;
spin_lock_init(&uart.port.lock);
uart.port.dev = &pdev->dev;
- uart.port.irq = irq;
- uart.port.iotype = UPIO_MEM32;
uart.port.mapbase = res->start;
- uart.port.regshift = 2;
uart.port.type = PORT_16550A;
uart.port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE | UPF_SKIP_TEST;
uart.port.uartclk = clk_get_rate(data->clk_uart);
@@ -160,6 +149,13 @@ static int lpc18xx_serial_probe(struct platform_device *pdev)
uart.port.rs485_supported = lpc18xx_rs485_supported;
uart.port.serial_out = lpc18xx_uart_serial_out;
+ ret = uart_read_port_properties(&uart.port);
+ if (ret)
+ goto dis_uart_clk;
+
+ uart.port.iotype = UPIO_MEM32;
+ uart.port.regshift = 2;
+
uart.dma = &data->dma;
uart.dma->rxconf.src_maxburst = 1;
uart.dma->txconf.dst_maxburst = 1;
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 34f17a9785e79..5d1dd992d8fba 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -4,7 +4,10 @@
*
* Copyright (C) 2006 Arnd Bergmann <arnd@arndb.de>, IBM Corp.
*/
+
+#include <linux/bits.h>
#include <linux/console.h>
+#include <linux/math.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/serial_core.h>
@@ -25,6 +28,36 @@ struct of_serial_info {
int line;
};
+/* Nuvoton NPCM timeout register */
+#define UART_NPCM_TOR 7
+#define UART_NPCM_TOIE BIT(7) /* Timeout Interrupt Enable */
+
+static int npcm_startup(struct uart_port *port)
+{
+ /*
+ * Nuvoton calls the scratch register 'UART_TOR' (timeout
+ * register). Enable it, and set TIOC (timeout interrupt
+ * comparator) to be 0x20 for correct operation.
+ */
+ serial_port_out(port, UART_NPCM_TOR, UART_NPCM_TOIE | 0x20);
+
+ return serial8250_do_startup(port);
+}
+
+/* Nuvoton NPCM UARTs have a custom divisor calculation */
+static unsigned int npcm_get_divisor(struct uart_port *port, unsigned int baud,
+ unsigned int *frac)
+{
+ return DIV_ROUND_CLOSEST(port->uartclk, 16 * baud + 2) - 2;
+}
+
+static int npcm_setup(struct uart_port *port)
+{
+ port->get_divisor = npcm_get_divisor;
+ port->startup = npcm_startup;
+ return 0;
+}
+
/*
* Fill a struct uart_port for a given device node
*/
@@ -36,37 +69,22 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
struct device *dev = &ofdev->dev;
struct device_node *np = dev->of_node;
struct uart_port *port = &up->port;
- u32 clk, spd, prop;
- int ret, irq;
+ u32 spd;
+ int ret;
memset(port, 0, sizeof *port);
pm_runtime_enable(&ofdev->dev);
pm_runtime_get_sync(&ofdev->dev);
- if (of_property_read_u32(np, "clock-frequency", &clk)) {
-
- /* Get clk rate through clk driver if present */
- info->clk = devm_clk_get_enabled(dev, NULL);
- if (IS_ERR(info->clk)) {
- ret = dev_err_probe(dev, PTR_ERR(info->clk), "failed to get clock\n");
- goto err_pmruntime;
- }
-
- clk = clk_get_rate(info->clk);
- }
- /* If current-speed was set, then try not to change it. */
- if (of_property_read_u32(np, "current-speed", &spd) == 0)
- port->custom_divisor = clk / (16 * spd);
-
ret = of_address_to_resource(np, 0, &resource);
if (ret) {
dev_err_probe(dev, ret, "invalid address\n");
goto err_pmruntime;
}
- port->flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_FIXED_PORT |
- UPF_FIXED_TYPE;
+ port->dev = &ofdev->dev;
+ port->flags = UPF_BOOT_AUTOCONF | UPF_FIXED_PORT | UPF_FIXED_TYPE;
spin_lock_init(&port->lock);
if (resource_type(&resource) == IORESOURCE_IO) {
@@ -75,70 +93,31 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
} else {
port->mapbase = resource.start;
port->mapsize = resource_size(&resource);
+ port->flags |= UPF_IOREMAP;
+ }
- /* Check for shifted address mapping */
- if (of_property_read_u32(np, "reg-offset", &prop) == 0) {
- if (prop >= port->mapsize) {
- ret = dev_err_probe(dev, -EINVAL, "reg-offset %u exceeds region size %pa\n",
- prop, &port->mapsize);
- goto err_pmruntime;
- }
+ ret = uart_read_and_validate_port_properties(port);
+ if (ret)
+ goto err_pmruntime;
- port->mapbase += prop;
- port->mapsize -= prop;
+ /* Get clk rate through clk driver if present */
+ if (!port->uartclk) {
+ info->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(info->clk)) {
+ ret = dev_err_probe(dev, PTR_ERR(info->clk), "failed to get clock\n");
+ goto err_pmruntime;
}
- port->iotype = UPIO_MEM;
- if (of_property_read_u32(np, "reg-io-width", &prop) == 0) {
- switch (prop) {
- case 1:
- port->iotype = UPIO_MEM;
- break;
- case 2:
- port->iotype = UPIO_MEM16;
- break;
- case 4:
- port->iotype = of_device_is_big_endian(np) ?
- UPIO_MEM32BE : UPIO_MEM32;
- break;
- default:
- ret = dev_err_probe(dev, -EINVAL, "unsupported reg-io-width (%u)\n",
- prop);
- goto err_pmruntime;
- }
- }
- port->flags |= UPF_IOREMAP;
+ port->uartclk = clk_get_rate(info->clk);
}
+ /* If current-speed was set, then try not to change it. */
+ if (of_property_read_u32(np, "current-speed", &spd) == 0)
+ port->custom_divisor = port->uartclk / (16 * spd);
/* Compatibility with the deprecated pxa driver and 8250_pxa drivers. */
if (of_device_is_compatible(np, "mrvl,mmp-uart"))
port->regshift = 2;
- /* Check for registers offset within the devices address range */
- if (of_property_read_u32(np, "reg-shift", &prop) == 0)
- port->regshift = prop;
-
- /* Check for fifo size */
- if (of_property_read_u32(np, "fifo-size", &prop) == 0)
- port->fifosize = prop;
-
- /* Check for a fixed line number */
- ret = of_alias_get_id(np, "serial");
- if (ret >= 0)
- port->line = ret;
-
- irq = of_irq_get(np, 0);
- if (irq < 0) {
- if (irq == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
- goto err_pmruntime;
- }
- /* IRQ support not mandatory */
- irq = 0;
- }
-
- port->irq = irq;
-
info->rst = devm_reset_control_get_optional_shared(&ofdev->dev, NULL);
if (IS_ERR(info->rst)) {
ret = PTR_ERR(info->rst);
@@ -150,12 +129,6 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
goto err_pmruntime;
port->type = type;
- port->uartclk = clk;
-
- if (of_property_read_bool(np, "no-loopback-test"))
- port->flags |= UPF_SKIP_TEST;
-
- port->dev = &ofdev->dev;
port->rs485_config = serial8250_em485_config;
port->rs485_supported = serial8250_em485_supported;
up->rs485_start_tx = serial8250_em485_start_tx;
@@ -164,10 +137,17 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
switch (type) {
case PORT_RT2880:
ret = rt288x_setup(port);
- if (ret)
- goto err_pmruntime;
+ break;
+ case PORT_NPCM:
+ ret = npcm_setup(port);
+ break;
+ default:
+ /* Nothing to do */
+ ret = 0;
break;
}
+ if (ret)
+ goto err_pmruntime;
if (IS_REACHABLE(CONFIG_SERIAL_8250_FSL) &&
(of_device_is_compatible(np, "fsl,ns16550") ||
@@ -240,7 +220,6 @@ static int of_platform_serial_probe(struct platform_device *ofdev)
platform_set_drvdata(ofdev, info);
return 0;
err_dispose:
- irq_dispose_mapping(port8250.port.irq);
pm_runtime_put_sync(&ofdev->dev);
pm_runtime_disable(&ofdev->dev);
err_free:
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 6942990a333c8..66901d93089a3 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -1394,11 +1394,7 @@ static int omap8250_probe(struct platform_device *pdev)
struct uart_8250_port up;
struct resource *regs;
void __iomem *membase;
- int irq, ret;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
+ int ret;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs) {
@@ -1419,7 +1415,6 @@ static int omap8250_probe(struct platform_device *pdev)
up.port.dev = &pdev->dev;
up.port.mapbase = regs->start;
up.port.membase = membase;
- up.port.irq = irq;
/*
* It claims to be 16C750 compatible however it is a little different.
* It has EFR and has no FCR7_64byte bit. The AFE (which it claims to
@@ -1429,13 +1424,9 @@ static int omap8250_probe(struct platform_device *pdev)
* or pm callback.
*/
up.port.type = PORT_8250;
- up.port.iotype = UPIO_MEM;
- up.port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE | UPF_SOFT_FLOW |
- UPF_HARD_FLOW;
+ up.port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE | UPF_SOFT_FLOW | UPF_HARD_FLOW;
up.port.private_data = priv;
- up.port.regshift = OMAP_UART_REGSHIFT;
- up.port.fifosize = 64;
up.tx_loadsz = 64;
up.capabilities = UART_CAP_FIFO;
#ifdef CONFIG_PM
@@ -1461,14 +1452,14 @@ static int omap8250_probe(struct platform_device *pdev)
up.rs485_stop_tx = serial8250_em485_stop_tx;
up.port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
- ret = of_alias_get_id(np, "serial");
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to get alias\n");
+ ret = uart_read_port_properties(&up.port);
+ if (ret)
return ret;
- }
- up.port.line = ret;
- if (of_property_read_u32(np, "clock-frequency", &up.port.uartclk)) {
+ up.port.regshift = OMAP_UART_REGSHIFT;
+ up.port.fifosize = 64;
+
+ if (!up.port.uartclk) {
struct clk *clk;
clk = devm_clk_get(&pdev->dev, NULL);
@@ -1560,8 +1551,8 @@ static int omap8250_probe(struct platform_device *pdev)
}
#endif
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
- ret = devm_request_irq(&pdev->dev, irq, omap8250_irq, 0,
+ irq_set_status_flags(up.port.irq, IRQ_NOAUTOEN);
+ ret = devm_request_irq(&pdev->dev, up.port.irq, omap8250_irq, 0,
dev_name(&pdev->dev), priv);
if (ret < 0)
return ret;
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 0d35c77fad9eb..e2e4f99f9d347 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -5010,12 +5010,6 @@ static const struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATRO_B,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b0_bt_2_115200 },
- { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATTRO_A,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_bt_2_115200 },
- { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATTRO_B,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_bt_2_115200 },
{ PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_OCTO_A,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b0_bt_4_460800 },
diff --git a/drivers/tty/serial/8250/8250_pci1xxxx.c b/drivers/tty/serial/8250/8250_pci1xxxx.c
index 2dda737b1660b..2fbb5851f788b 100644
--- a/drivers/tty/serial/8250/8250_pci1xxxx.c
+++ b/drivers/tty/serial/8250/8250_pci1xxxx.c
@@ -7,23 +7,31 @@
* Copyright (C) 2022 Microchip Technology Inc., All Rights Reserved.
*/
+#include <linux/array_size.h>
#include <linux/bitfield.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
+#include <linux/bits.h>
+#include <linux/circ_buf.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gfp_types.h>
#include <linux/io.h>
#include <linux/iopoll.h>
-#include <linux/kernel.h>
+#include <linux/minmax.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/overflow.h>
#include <linux/pci.h>
+#include <linux/pm.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/serial_8250.h>
-#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/string.h>
-#include <linux/units.h>
+#include <linux/time.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
-#include <linux/8250_pci.h>
+#include <linux/types.h>
+#include <linux/units.h>
#include <asm/byteorder.h>
@@ -67,6 +75,7 @@
#define SYSLOCK_RETRY_CNT 1000
#define UART_RX_BYTE_FIFO 0x00
+#define UART_TX_BYTE_FIFO 0x00
#define UART_FIFO_CTL 0x02
#define UART_ACTV_REG 0x11
@@ -81,10 +90,10 @@
#define ADCL_CFG_PIN_SEL BIT(1)
#define ADCL_CFG_EN BIT(0)
-#define UART_BIT_SAMPLE_CNT 16
+#define UART_BIT_SAMPLE_CNT_8 8
+#define UART_BIT_SAMPLE_CNT_16 16
#define BAUD_CLOCK_DIV_INT_MSK GENMASK(31, 8)
#define ADCL_CFG_RTS_DELAY_MASK GENMASK(11, 8)
-#define UART_CLOCK_DEFAULT (62500 * HZ_PER_KHZ)
#define UART_WAKE_REG 0x8C
#define UART_WAKE_MASK_REG 0x90
@@ -95,13 +104,19 @@
(UART_WAKE_N_PIN | UART_WAKE_NCTS | UART_WAKE_INT)
#define UART_BAUD_CLK_DIVISOR_REG 0x54
+#define FRAC_DIV_CFG_REG 0x58
#define UART_RESET_REG 0x94
#define UART_RESET_D3_RESET_DISABLE BIT(16)
#define UART_BURST_STATUS_REG 0x9C
+#define UART_TX_BURST_FIFO 0xA0
#define UART_RX_BURST_FIFO 0xA4
+#define UART_BIT_DIVISOR_8 0x26731000
+#define UART_BIT_DIVISOR_16 0x6ef71000
+#define UART_BAUD_4MBPS 4000000
+
#define MAX_PORTS 4
#define PORT_OFFSET 0x100
#define RX_BUF_SIZE 512
@@ -109,6 +124,7 @@
#define UART_BURST_SIZE 4
#define UART_BST_STAT_RX_COUNT_MASK 0x00FF
+#define UART_BST_STAT_TX_COUNT_MASK 0xFF00
#define UART_BST_STAT_IIR_INT_PEND 0x100000
#define UART_LSR_OVERRUN_ERR_CLR 0x43
#define UART_BST_STAT_LSR_RX_MASK 0x9F000000
@@ -116,6 +132,7 @@
#define UART_BST_STAT_LSR_OVERRUN_ERR 0x2000000
#define UART_BST_STAT_LSR_PARITY_ERR 0x4000000
#define UART_BST_STAT_LSR_FRAME_ERR 0x8000000
+#define UART_BST_STAT_LSR_THRE 0x20000000
struct pci1xxxx_8250 {
unsigned int nr;
@@ -206,15 +223,21 @@ static int pci1xxxx_get_num_ports(struct pci_dev *dev)
static unsigned int pci1xxxx_get_divisor(struct uart_port *port,
unsigned int baud, unsigned int *frac)
{
+ unsigned int uart_sample_cnt;
unsigned int quot;
+ if (baud >= UART_BAUD_4MBPS)
+ uart_sample_cnt = UART_BIT_SAMPLE_CNT_8;
+ else
+ uart_sample_cnt = UART_BIT_SAMPLE_CNT_16;
+
/*
* Calculate baud rate sampling period in nanoseconds.
* Fractional part x denotes x/255 parts of a nanosecond.
*/
- quot = NSEC_PER_SEC / (baud * UART_BIT_SAMPLE_CNT);
- *frac = (NSEC_PER_SEC - quot * baud * UART_BIT_SAMPLE_CNT) *
- 255 / UART_BIT_SAMPLE_CNT / baud;
+ quot = NSEC_PER_SEC / (baud * uart_sample_cnt);
+ *frac = (NSEC_PER_SEC - quot * baud * uart_sample_cnt) *
+ 255 / uart_sample_cnt / baud;
return quot;
}
@@ -222,6 +245,11 @@ static unsigned int pci1xxxx_get_divisor(struct uart_port *port,
static void pci1xxxx_set_divisor(struct uart_port *port, unsigned int baud,
unsigned int quot, unsigned int frac)
{
+ if (baud >= UART_BAUD_4MBPS)
+ writel(UART_BIT_DIVISOR_8, port->membase + FRAC_DIV_CFG_REG);
+ else
+ writel(UART_BIT_DIVISOR_16, port->membase + FRAC_DIV_CFG_REG);
+
writel(FIELD_PREP(BAUD_CLOCK_DIV_INT_MSK, quot) | frac,
port->membase + UART_BAUD_CLK_DIVISOR_REG);
}
@@ -233,7 +261,16 @@ static int pci1xxxx_rs485_config(struct uart_port *port,
u32 delay_in_baud_periods;
u32 baud_period_in_ns;
u32 mode_cfg = 0;
+ u32 sample_cnt;
u32 clock_div;
+ u32 frac_div;
+
+ frac_div = readl(port->membase + FRAC_DIV_CFG_REG);
+
+ if (frac_div == UART_BIT_DIVISOR_16)
+ sample_cnt = UART_BIT_SAMPLE_CNT_16;
+ else
+ sample_cnt = UART_BIT_SAMPLE_CNT_8;
/*
* pci1xxxx's uart hardware supports only RTS delay after
@@ -249,7 +286,7 @@ static int pci1xxxx_rs485_config(struct uart_port *port,
clock_div = readl(port->membase + UART_BAUD_CLK_DIVISOR_REG);
baud_period_in_ns =
FIELD_GET(BAUD_CLOCK_DIV_INT_MSK, clock_div) *
- UART_BIT_SAMPLE_CNT;
+ sample_cnt;
delay_in_baud_periods =
rs485->delay_rts_after_send * NSEC_PER_MSEC /
baud_period_in_ns;
@@ -344,6 +381,105 @@ static void pci1xxxx_rx_burst(struct uart_port *port, u32 uart_status)
}
}
+static void pci1xxxx_process_write_data(struct uart_port *port,
+ struct circ_buf *xmit,
+ int *data_empty_count,
+ u32 *valid_byte_count)
+{
+ u32 valid_burst_count = *valid_byte_count / UART_BURST_SIZE;
+
+ /*
+ * Each transaction transfers data in DWORDs. If there are less than
+ * four remaining valid_byte_count to transfer or if the circular
+ * buffer has insufficient space for a DWORD, the data is transferred
+ * one byte at a time.
+ */
+ while (valid_burst_count) {
+ if (*data_empty_count - UART_BURST_SIZE < 0)
+ break;
+ if (xmit->tail > (UART_XMIT_SIZE - UART_BURST_SIZE))
+ break;
+ writel(*(unsigned int *)&xmit->buf[xmit->tail],
+ port->membase + UART_TX_BURST_FIFO);
+ *valid_byte_count -= UART_BURST_SIZE;
+ *data_empty_count -= UART_BURST_SIZE;
+ valid_burst_count -= UART_BYTE_SIZE;
+
+ xmit->tail = (xmit->tail + UART_BURST_SIZE) &
+ (UART_XMIT_SIZE - 1);
+ }
+
+ while (*valid_byte_count) {
+ if (*data_empty_count - UART_BYTE_SIZE < 0)
+ break;
+ writeb(xmit->buf[xmit->tail], port->membase +
+ UART_TX_BYTE_FIFO);
+ *data_empty_count -= UART_BYTE_SIZE;
+ *valid_byte_count -= UART_BYTE_SIZE;
+
+ /*
+ * When the tail of the circular buffer is reached, the next
+ * byte is transferred to the beginning of the buffer.
+ */
+ xmit->tail = (xmit->tail + UART_BYTE_SIZE) &
+ (UART_XMIT_SIZE - 1);
+
+ /*
+ * If there are any pending burst count, data is handled by
+ * transmitting DWORDs at a time.
+ */
+ if (valid_burst_count && (xmit->tail <
+ (UART_XMIT_SIZE - UART_BURST_SIZE)))
+ break;
+ }
+}
+
+static void pci1xxxx_tx_burst(struct uart_port *port, u32 uart_status)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+ u32 valid_byte_count;
+ int data_empty_count;
+ struct circ_buf *xmit;
+
+ xmit = &port->state->xmit;
+
+ if (port->x_char) {
+ writeb(port->x_char, port->membase + UART_TX);
+ port->icount.tx++;
+ port->x_char = 0;
+ return;
+ }
+
+ if ((uart_tx_stopped(port)) || (uart_circ_empty(xmit))) {
+ port->ops->stop_tx(port);
+ } else {
+ data_empty_count = (pci1xxxx_read_burst_status(port) &
+ UART_BST_STAT_TX_COUNT_MASK) >> 8;
+ do {
+ valid_byte_count = uart_circ_chars_pending(xmit);
+
+ pci1xxxx_process_write_data(port, xmit,
+ &data_empty_count,
+ &valid_byte_count);
+
+ port->icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ } while (data_empty_count && valid_byte_count);
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ /*
+ * With RPM enabled, we have to wait until the FIFO is empty before
+ * the HW can go idle. So we get here once again with empty FIFO and
+ * disable the interrupt and RPM in __stop_tx()
+ */
+ if (uart_circ_empty(xmit) && !(up->capabilities & UART_CAP_RPM))
+ port->ops->stop_tx(port);
+}
+
static int pci1xxxx_handle_irq(struct uart_port *port)
{
unsigned long flags;
@@ -359,6 +495,9 @@ static int pci1xxxx_handle_irq(struct uart_port *port)
if (status & UART_BST_STAT_LSR_RX_MASK)
pci1xxxx_rx_burst(port, status);
+ if (status & UART_BST_STAT_LSR_THRE)
+ pci1xxxx_tx_burst(port, status);
+
spin_unlock_irqrestore(&port->lock, flags);
return 1;
@@ -481,6 +620,17 @@ static int pci1xxxx_setup(struct pci_dev *pdev,
port->port.flags |= UPF_FIXED_TYPE | UPF_SKIP_TEST;
port->port.type = PORT_MCHP16550A;
+ /*
+ * 8250 core considers prescaller value to be always 16.
+ * The MCHP ports support downscaled mode and hence the
+ * functional UART clock can be lower, i.e. 62.5MHz, than
+ * software expects in order to support higher baud rates.
+ * Assign here 64MHz to support 4Mbps.
+ *
+ * The value itself is not really used anywhere except baud
+ * rate calculations, so we can mangle it as we wish.
+ */
+ port->port.uartclk = 64 * HZ_PER_MHZ;
port->port.set_termios = serial8250_do_set_termios;
port->port.get_divisor = pci1xxxx_get_divisor;
port->port.set_divisor = pci1xxxx_set_divisor;
@@ -594,7 +744,6 @@ static int pci1xxxx_serial_probe(struct pci_dev *pdev,
memset(&uart, 0, sizeof(uart));
uart.port.flags = UPF_SHARE_IRQ | UPF_FIXED_PORT;
- uart.port.uartclk = UART_CLOCK_DEFAULT;
uart.port.dev = dev;
if (num_vectors == max_vec_reqd)
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 8ca061d3bbb92..fc9dd5d45295d 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -38,10 +38,6 @@
#include "8250.h"
-/* Nuvoton NPCM timeout register */
-#define UART_NPCM_TOR 7
-#define UART_NPCM_TOIE BIT(7) /* Timeout Interrupt Enable */
-
/*
* Debugging.
*/
@@ -1329,9 +1325,6 @@ static void autoconfig_irq(struct uart_8250_port *up)
inb_p(ICP);
}
- if (uart_console(port))
- console_lock();
-
/* forget possible initially masked and pending IRQ */
probe_irq_off(probe_irq_on());
save_mcr = serial8250_in_MCR(up);
@@ -1371,9 +1364,6 @@ static void autoconfig_irq(struct uart_8250_port *up)
if (port->flags & UPF_FOURPORT)
outb_p(save_ICP, ICP);
- if (uart_console(port))
- console_unlock();
-
port->irq = (irq > 0) ? irq : 0;
}
@@ -2235,15 +2225,6 @@ int serial8250_do_startup(struct uart_port *port)
UART_DA830_PWREMU_MGMT_FREE);
}
- if (port->type == PORT_NPCM) {
- /*
- * Nuvoton calls the scratch register 'UART_TOR' (timeout
- * register). Enable it, and set TIOC (timeout interrupt
- * comparator) to be 0x20 for correct operation.
- */
- serial_port_out(port, UART_NPCM_TOR, UART_NPCM_TOIE | 0x20);
- }
-
#ifdef CONFIG_SERIAL_8250_RSA
/*
* If this is an RSA port, see if we can kick it up to the
@@ -2545,15 +2526,6 @@ static void serial8250_shutdown(struct uart_port *port)
serial8250_do_shutdown(port);
}
-/* Nuvoton NPCM UARTs have a custom divisor calculation */
-static unsigned int npcm_get_divisor(struct uart_8250_port *up,
- unsigned int baud)
-{
- struct uart_port *port = &up->port;
-
- return DIV_ROUND_CLOSEST(port->uartclk, 16 * baud + 2) - 2;
-}
-
static unsigned int serial8250_do_get_divisor(struct uart_port *port,
unsigned int baud,
unsigned int *frac)
@@ -2598,8 +2570,6 @@ static unsigned int serial8250_do_get_divisor(struct uart_port *port,
quot = 0x8001;
else if (magic_multiplier && baud >= port->uartclk / 12)
quot = 0x8002;
- else if (up->port.type == PORT_NPCM)
- quot = npcm_get_divisor(up, baud);
else
quot = uart_get_divisor(port, baud);
@@ -2714,12 +2684,8 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port,
*/
void serial8250_update_uartclk(struct uart_port *port, unsigned int uartclk)
{
- struct uart_8250_port *up = up_to_u8250p(port);
struct tty_port *tport = &port->state->port;
- unsigned int baud, quot, frac = 0;
- struct ktermios *termios;
struct tty_struct *tty;
- unsigned long flags;
tty = tty_port_tty_get(tport);
if (!tty) {
@@ -2740,21 +2706,7 @@ void serial8250_update_uartclk(struct uart_port *port, unsigned int uartclk)
if (!tty_port_initialized(tport))
goto out_unlock;
- termios = &tty->termios;
-
- baud = serial8250_get_baud_rate(port, termios, NULL);
- quot = serial8250_get_divisor(port, baud, &frac);
-
- serial8250_rpm_get(up);
- uart_port_lock_irqsave(port, &flags);
-
- uart_update_timeout(port, termios->c_cflag, baud);
-
- serial8250_set_divisor(port, baud, quot, frac);
- serial_port_out(port, UART_LCR, up->lcr);
-
- uart_port_unlock_irqrestore(port, flags);
- serial8250_rpm_put(up);
+ serial8250_do_set_termios(port, &tty->termios, NULL);
out_unlock:
mutex_unlock(&tport->mutex);
diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c
index 77686da42ce8e..f1a51b00b1b9d 100644
--- a/drivers/tty/serial/8250/8250_pxa.c
+++ b/drivers/tty/serial/8250/8250_pxa.c
@@ -92,11 +92,7 @@ static int serial_pxa_probe(struct platform_device *pdev)
struct uart_8250_port uart = {};
struct pxa8250_data *data;
struct resource *mmres;
- int irq, ret;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
+ int ret;
mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mmres)
@@ -114,21 +110,21 @@ static int serial_pxa_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = of_alias_get_id(pdev->dev.of_node, "serial");
- if (ret >= 0)
- uart.port.line = ret;
-
uart.port.type = PORT_XSCALE;
- uart.port.iotype = UPIO_MEM32;
uart.port.mapbase = mmres->start;
- uart.port.regshift = 2;
- uart.port.irq = irq;
- uart.port.fifosize = 64;
uart.port.flags = UPF_IOREMAP | UPF_SKIP_TEST | UPF_FIXED_TYPE;
uart.port.dev = &pdev->dev;
uart.port.uartclk = clk_get_rate(data->clk);
uart.port.pm = serial_pxa_pm;
uart.port.private_data = data;
+
+ ret = uart_read_port_properties(&uart.port);
+ if (ret)
+ return ret;
+
+ uart.port.iotype = UPIO_MEM32;
+ uart.port.regshift = 2;
+ uart.port.fifosize = 64;
uart.dl_write = serial_pxa_dl_write;
ret = serial8250_register_8250_port(&uart);
diff --git a/drivers/tty/serial/8250/8250_tegra.c b/drivers/tty/serial/8250/8250_tegra.c
index ba352262df75a..60a80d00d2519 100644
--- a/drivers/tty/serial/8250/8250_tegra.c
+++ b/drivers/tty/serial/8250/8250_tegra.c
@@ -57,25 +57,11 @@ static int tegra_uart_probe(struct platform_device *pdev)
port = &port8250.port;
spin_lock_init(&port->lock);
- port->flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_FIXED_PORT |
- UPF_FIXED_TYPE;
- port->iotype = UPIO_MEM32;
- port->regshift = 2;
+ port->flags = UPF_BOOT_AUTOCONF | UPF_FIXED_PORT | UPF_FIXED_TYPE;
port->type = PORT_TEGRA;
- port->irqflags |= IRQF_SHARED;
port->dev = &pdev->dev;
port->handle_break = tegra_uart_handle_break;
- ret = of_alias_get_id(pdev->dev.of_node, "serial");
- if (ret >= 0)
- port->line = ret;
-
- ret = platform_get_irq(pdev, 0);
- if (ret < 0)
- return ret;
-
- port->irq = ret;
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
@@ -88,12 +74,18 @@ static int tegra_uart_probe(struct platform_device *pdev)
port->mapbase = res->start;
port->mapsize = resource_size(res);
+ ret = uart_read_port_properties(port);
+ if (ret)
+ return ret;
+
+ port->iotype = UPIO_MEM32;
+ port->regshift = 2;
+
uart->rst = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
if (IS_ERR(uart->rst))
return PTR_ERR(uart->rst);
- if (device_property_read_u32(&pdev->dev, "clock-frequency",
- &port->uartclk)) {
+ if (!port->uartclk) {
uart->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(uart->clk)) {
dev_err(&pdev->dev, "failed to get clock!\n");
diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c
index 6399a38ecce2a..670d2ca0f7572 100644
--- a/drivers/tty/serial/8250/8250_uniphier.c
+++ b/drivers/tty/serial/8250/8250_uniphier.c
@@ -162,7 +162,6 @@ static int uniphier_uart_probe(struct platform_device *pdev)
struct uniphier8250_priv *priv;
struct resource *regs;
void __iomem *membase;
- int irq;
int ret;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -175,23 +174,12 @@ static int uniphier_uart_probe(struct platform_device *pdev)
if (!membase)
return -ENOMEM;
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
memset(&up, 0, sizeof(up));
- ret = of_alias_get_id(dev->of_node, "serial");
- if (ret < 0) {
- dev_err(dev, "failed to get alias id\n");
- return ret;
- }
- up.port.line = ret;
-
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(dev, "failed to get clock\n");
@@ -211,7 +199,10 @@ static int uniphier_uart_probe(struct platform_device *pdev)
up.port.mapbase = regs->start;
up.port.mapsize = resource_size(regs);
up.port.membase = membase;
- up.port.irq = irq;
+
+ ret = uart_read_port_properties(&up.port);
+ if (ret)
+ return ret;
up.port.type = PORT_16550A;
up.port.iotype = UPIO_MEM32;
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 8b9a2c4902e2c..47ff50763c048 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -149,6 +149,7 @@ config SERIAL_8250_PCI
config SERIAL_8250_EXAR
tristate "8250/16550 Exar/Commtech PCI/PCIe device support"
depends on SERIAL_8250 && PCI
+ select SERIAL_8250_PCILIB
default SERIAL_8250
help
This builds support for XR17C1xx, XR17V3xx and some Commtech
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index cf2c890a560f0..2fa3fb30dc6c7 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -348,10 +348,7 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
flag = TTY_FRAME;
}
- uart_port_unlock(&uap->port);
- sysrq = uart_handle_sysrq_char(&uap->port, ch & 255);
- uart_port_lock(&uap->port);
-
+ sysrq = uart_prepare_sysrq_char(&uap->port, ch & 255);
if (!sysrq)
uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
}
@@ -1017,7 +1014,7 @@ static void pl011_dma_rx_callback(void *data)
ret = pl011_dma_rx_trigger_dma(uap);
pl011_dma_rx_chars(uap, pending, lastbuf, false);
- uart_port_unlock_irq(&uap->port);
+ uart_unlock_and_check_sysrq(&uap->port);
/*
* Do this check after we picked the DMA chars so we don't
* get some IRQ immediately from RX.
@@ -1540,11 +1537,10 @@ static void check_apply_cts_event_workaround(struct uart_amba_port *uap)
static irqreturn_t pl011_int(int irq, void *dev_id)
{
struct uart_amba_port *uap = dev_id;
- unsigned long flags;
unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
int handled = 0;
- uart_port_lock_irqsave(&uap->port, &flags);
+ uart_port_lock(&uap->port);
status = pl011_read(uap, REG_RIS) & uap->im;
if (status) {
do {
@@ -1573,7 +1569,7 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
handled = 1;
}
- uart_port_unlock_irqrestore(&uap->port, flags);
+ uart_unlock_and_check_sysrq(&uap->port);
return IRQ_RETVAL(handled);
}
@@ -2322,13 +2318,10 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
clk_enable(uap->clk);
- local_irq_save(flags);
- if (uap->port.sysrq)
- locked = 0;
- else if (oops_in_progress)
- locked = uart_port_trylock(&uap->port);
+ if (oops_in_progress)
+ locked = uart_port_trylock_irqsave(&uap->port, &flags);
else
- uart_port_lock(&uap->port);
+ uart_port_lock_irqsave(&uap->port, &flags);
/*
* First save the CR then disable the interrupts
@@ -2354,8 +2347,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
pl011_write(old_cr, uap, REG_CR);
if (locked)
- uart_port_unlock(&uap->port);
- local_irq_restore(flags);
+ uart_port_unlock_irqrestore(&uap->port, flags);
clk_disable(uap->clk);
}
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index 8d09ace062e59..7790cbc57391a 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -378,7 +378,7 @@ static void ar933x_uart_rx_chars(struct ar933x_uart_port *up)
up->port.icount.rx++;
ch = rdata & AR933X_UART_DATA_TX_RX_MASK;
- if (uart_handle_sysrq_char(&up->port, ch))
+ if (uart_prepare_sysrq_char(&up->port, ch))
continue;
if ((up->port.ignore_status_mask & AR933X_DUMMY_STATUS_RD) == 0)
@@ -468,7 +468,7 @@ static irqreturn_t ar933x_uart_interrupt(int irq, void *dev_id)
ar933x_uart_tx_chars(up);
}
- uart_port_unlock(&up->port);
+ uart_unlock_and_check_sysrq(&up->port);
return IRQ_HANDLED;
}
@@ -627,14 +627,10 @@ static void ar933x_uart_console_write(struct console *co, const char *s,
unsigned int int_en;
int locked = 1;
- local_irq_save(flags);
-
- if (up->port.sysrq)
- locked = 0;
- else if (oops_in_progress)
- locked = uart_port_trylock(&up->port);
+ if (oops_in_progress)
+ locked = uart_port_trylock_irqsave(&up->port, &flags);
else
- uart_port_lock(&up->port);
+ uart_port_lock_irqsave(&up->port, &flags);
/*
* First save the IER then disable the interrupts
@@ -654,9 +650,7 @@ static void ar933x_uart_console_write(struct console *co, const char *s,
ar933x_uart_write(up, AR933X_UART_INT_REG, AR933X_UART_INT_ALLINTS);
if (locked)
- uart_port_unlock(&up->port);
-
- local_irq_restore(flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static int ar933x_uart_console_setup(struct console *co, char *options)
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index a3cefa153456d..34801a6f300b6 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -285,10 +285,9 @@ static void bcm_uart_do_rx(struct uart_port *port)
flag = TTY_PARITY;
}
- if (uart_handle_sysrq_char(port, c))
+ if (uart_prepare_sysrq_char(port, c))
continue;
-
if ((cstat & port->ignore_status_mask) == 0)
tty_insert_flip_char(tty_port, c, flag);
@@ -353,7 +352,7 @@ static irqreturn_t bcm_uart_interrupt(int irq, void *dev_id)
estat & UART_EXTINP_DCD_MASK);
}
- uart_port_unlock(port);
+ uart_unlock_and_check_sysrq(port);
return IRQ_HANDLED;
}
@@ -703,20 +702,14 @@ static void bcm_console_write(struct console *co, const char *s,
{
struct uart_port *port;
unsigned long flags;
- int locked;
+ int locked = 1;
port = &ports[co->index];
- local_irq_save(flags);
- if (port->sysrq) {
- /* bcm_uart_interrupt() already took the lock */
- locked = 0;
- } else if (oops_in_progress) {
- locked = uart_port_trylock(port);
- } else {
- uart_port_lock(port);
- locked = 1;
- }
+ if (oops_in_progress)
+ locked = uart_port_trylock_irqsave(port, &flags);
+ else
+ uart_port_lock_irqsave(port, &flags);
/* call helper to deal with \r\n */
uart_console_write(port, s, count, bcm_console_putchar);
@@ -725,8 +718,7 @@ static void bcm_console_write(struct console *co, const char *s,
wait_for_xmitr(port);
if (locked)
- uart_port_unlock(port);
- local_irq_restore(flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/*
diff --git a/drivers/tty/serial/fsl_linflexuart.c b/drivers/tty/serial/fsl_linflexuart.c
index 52c87876a88de..5426322b5f0ca 100644
--- a/drivers/tty/serial/fsl_linflexuart.c
+++ b/drivers/tty/serial/fsl_linflexuart.c
@@ -837,7 +837,6 @@ static int linflex_probe(struct platform_device *pdev)
return ret;
sport->dev = &pdev->dev;
- sport->type = PORT_LINFLEXUART;
sport->iotype = UPIO_MEM;
sport->irq = ret;
sport->ops = &linflex_pops;
diff --git a/drivers/tty/serial/jsm/jsm_cls.c b/drivers/tty/serial/jsm/jsm_cls.c
index 1eda48964c0b6..ddbd42c09637c 100644
--- a/drivers/tty/serial/jsm/jsm_cls.c
+++ b/drivers/tty/serial/jsm/jsm_cls.c
@@ -395,7 +395,6 @@ static void cls_copy_data_from_uart_to_queue(struct jsm_channel *ch)
* which in this case is the break signal.
*/
if (linestatus & error_mask) {
- linestatus = 0;
readb(&ch->ch_cls_uart->txrx);
continue;
}
diff --git a/drivers/tty/serial/lpc32xx_hs.c b/drivers/tty/serial/lpc32xx_hs.c
index ec20329f06036..3e4ac46de1bc3 100644
--- a/drivers/tty/serial/lpc32xx_hs.c
+++ b/drivers/tty/serial/lpc32xx_hs.c
@@ -136,20 +136,16 @@ static void lpc32xx_hsuart_console_write(struct console *co, const char *s,
int locked = 1;
touch_nmi_watchdog();
- local_irq_save(flags);
- if (up->port.sysrq)
- locked = 0;
- else if (oops_in_progress)
- locked = uart_port_trylock(&up->port);
+ if (oops_in_progress)
+ locked = uart_port_trylock_irqsave(&up->port, &flags);
else
- uart_port_lock(&up->port);
+ uart_port_lock_irqsave(&up->port, &flags);
uart_console_write(&up->port, s, count, lpc32xx_hsuart_console_putchar);
wait_for_xmit_empty(&up->port);
if (locked)
- uart_port_unlock(&up->port);
- local_irq_restore(flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static int __init lpc32xx_hsuart_console_setup(struct console *co,
@@ -233,8 +229,6 @@ static unsigned int __serial_get_clock_div(unsigned long uartclk,
hsu_rate++;
}
- if (hsu_rate > 0xFF)
- hsu_rate = 0xFF;
return goodrate;
}
@@ -268,7 +262,8 @@ static void __serial_lpc32xx_rx(struct uart_port *port)
tty_insert_flip_char(tport, 0, TTY_FRAME);
}
- tty_insert_flip_char(tport, (tmp & 0xFF), flag);
+ if (!uart_prepare_sysrq_char(port, tmp & 0xff))
+ tty_insert_flip_char(tport, (tmp & 0xFF), flag);
tmp = readl(LPC32XX_HSUART_FIFO(port->membase));
}
@@ -333,7 +328,7 @@ static irqreturn_t serial_lpc32xx_interrupt(int irq, void *dev_id)
__serial_lpc32xx_tx(port);
}
- uart_port_unlock(port);
+ uart_unlock_and_check_sysrq(port);
return IRQ_HANDLED;
}
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 10bf6d75bf9ee..14dd9cfaa9f76 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -30,6 +30,7 @@
#define MAX310X_MAJOR 204
#define MAX310X_MINOR 209
#define MAX310X_UART_NRMAX 16
+#define MAX310X_MAX_PORTS 4 /* Maximum number of UART ports per IC. */
/* MAX310X register definitions */
#define MAX310X_RHR_REG (0x00) /* RX FIFO */
@@ -66,6 +67,7 @@
#define MAX310X_BRGDIVMSB_REG (0x1d) /* Baud rate divisor MSB */
#define MAX310X_CLKSRC_REG (0x1e) /* Clock source */
#define MAX310X_REG_1F (0x1f)
+#define MAX310X_EXTREG_START (0x20) /* Only relevant in SPI mode. */
#define MAX310X_REVID_REG MAX310X_REG_1F /* Revision ID */
@@ -73,9 +75,9 @@
#define MAX310X_GLOBALCMD_REG MAX310X_REG_1F /* Global Command (WO) */
/* Extended registers */
-#define MAX310X_SPI_REVID_EXTREG MAX310X_REG_05 /* Revision ID */
-#define MAX310X_I2C_REVID_EXTREG (0x25) /* Revision ID */
-
+#define MAX310X_REVID_EXTREG (0x25) /* Revision ID
+ * (extended addressing space)
+ */
/* IRQ register bits */
#define MAX310X_IRQ_LSR_BIT (1 << 0) /* LSR interrupt */
#define MAX310X_IRQ_SPCHR_BIT (1 << 1) /* Special char interrupt */
@@ -160,14 +162,14 @@
#define MAX310X_IRDA_SIR_BIT (1 << 1) /* SIR mode enable */
/* Flow control trigger level register masks */
-#define MAX310X_FLOWLVL_HALT_MASK (0x000f) /* Flow control halt level */
-#define MAX310X_FLOWLVL_RES_MASK (0x00f0) /* Flow control resume level */
+#define MAX310X_FLOWLVL_HALT_MASK GENMASK(3, 0) /* Flow control halt level */
+#define MAX310X_FLOWLVL_RES_MASK GENMASK(7, 4) /* Flow control resume level */
#define MAX310X_FLOWLVL_HALT(words) ((words / 8) & 0x0f)
#define MAX310X_FLOWLVL_RES(words) (((words / 8) & 0x0f) << 4)
/* FIFO interrupt trigger level register masks */
-#define MAX310X_FIFOTRIGLVL_TX_MASK (0x0f) /* TX FIFO trigger level */
-#define MAX310X_FIFOTRIGLVL_RX_MASK (0xf0) /* RX FIFO trigger level */
+#define MAX310X_FIFOTRIGLVL_TX_MASK GENMASK(3, 0) /* TX FIFO trigger level */
+#define MAX310X_FIFOTRIGLVL_RX_MASK GENMASK(7, 4) /* RX FIFO trigger level */
#define MAX310X_FIFOTRIGLVL_TX(words) ((words / 8) & 0x0f)
#define MAX310X_FIFOTRIGLVL_RX(words) (((words / 8) & 0x0f) << 4)
@@ -177,7 +179,8 @@
#define MAX310X_FLOWCTRL_GPIADDR_BIT (1 << 2) /* Enables that GPIO inputs
* are used in conjunction with
* XOFF2 for definition of
- * special character */
+ * special character
+ */
#define MAX310X_FLOWCTRL_SWFLOWEN_BIT (1 << 3) /* Auto SW flow ctrl enable */
#define MAX310X_FLOWCTRL_SWFLOW0_BIT (1 << 4) /* SWFLOW bit 0 */
#define MAX310X_FLOWCTRL_SWFLOW1_BIT (1 << 5) /* SWFLOW bit 1
@@ -214,8 +217,8 @@
*/
/* PLL configuration register masks */
-#define MAX310X_PLLCFG_PREDIV_MASK (0x3f) /* PLL predivision value */
-#define MAX310X_PLLCFG_PLLFACTOR_MASK (0xc0) /* PLL multiplication factor */
+#define MAX310X_PLLCFG_PREDIV_MASK GENMASK(5, 0) /* PLL predivision value */
+#define MAX310X_PLLCFG_PLLFACTOR_MASK GENMASK(7, 6) /* PLL multiplication factor */
/* Baud rate generator configuration register bits */
#define MAX310X_BRGCFG_2XMODE_BIT (1 << 4) /* Double baud rate */
@@ -234,7 +237,7 @@
/* Misc definitions */
#define MAX310X_FIFO_SIZE (128)
-#define MAX310x_REV_MASK (0xf8)
+#define MAX310x_REV_MASK GENMASK(7, 3)
#define MAX310X_WRITE_BIT 0x80
/* Port startup definitions */
@@ -257,20 +260,21 @@
struct max310x_if_cfg {
int (*extended_reg_enable)(struct device *dev, bool enable);
-
- unsigned int rev_id_reg;
+ u8 rev_id_offset;
};
struct max310x_devtype {
struct {
unsigned short min;
unsigned short max;
- } slave_addr;
- char name[9];
+ } slave_addr; /* Relevant only in I2C mode. */
int nr;
+ char name[9];
u8 mode1;
- int (*detect)(struct device *);
- void (*power)(struct uart_port *, int);
+ u8 rev_id_val;
+ u8 rev_id_reg; /* Relevant only if rev_id_val is defined. */
+ u8 power_reg; /* Register address for power/sleep control. */
+ u8 power_bit; /* Bit for sleep or power-off mode (active high). */
};
struct max310x_one {
@@ -331,62 +335,52 @@ static void max310x_port_update(struct uart_port *port, u8 reg, u8 mask, u8 val)
regmap_update_bits(one->regmap, reg, mask, val);
}
-static int max3107_detect(struct device *dev)
+static int max310x_detect(struct device *dev)
{
struct max310x_port *s = dev_get_drvdata(dev);
unsigned int val = 0;
int ret;
- ret = regmap_read(s->regmap, MAX310X_REVID_REG, &val);
- if (ret)
- return ret;
+ /* Check if variant supports REV ID register: */
+ if (s->devtype->rev_id_val) {
+ u8 rev_id_reg = s->devtype->rev_id_reg;
- if (((val & MAX310x_REV_MASK) != MAX3107_REV_ID)) {
- dev_err(dev,
- "%s ID 0x%02x does not match\n", s->devtype->name, val);
- return -ENODEV;
- }
-
- return 0;
-}
+ /* Check if REV ID is in extended addressing space: */
+ if (s->devtype->rev_id_reg >= MAX310X_EXTREG_START) {
+ ret = s->if_cfg->extended_reg_enable(dev, true);
+ if (ret)
+ return ret;
-static int max3108_detect(struct device *dev)
-{
- struct max310x_port *s = dev_get_drvdata(dev);
- unsigned int val = 0;
- int ret;
-
- /* MAX3108 have not REV ID register, we just check default value
- * from clocksource register to make sure everything works.
- */
- ret = regmap_read(s->regmap, MAX310X_CLKSRC_REG, &val);
- if (ret)
- return ret;
-
- if (val != (MAX310X_CLKSRC_EXTCLK_BIT | MAX310X_CLKSRC_PLLBYP_BIT)) {
- dev_err(dev, "%s not present\n", s->devtype->name);
- return -ENODEV;
- }
+ /* Adjust REV ID extended addressing space address: */
+ if (s->if_cfg->rev_id_offset)
+ rev_id_reg -= s->if_cfg->rev_id_offset;
+ }
- return 0;
-}
+ regmap_read(s->regmap, rev_id_reg, &val);
-static int max3109_detect(struct device *dev)
-{
- struct max310x_port *s = dev_get_drvdata(dev);
- unsigned int val = 0;
- int ret;
-
- ret = s->if_cfg->extended_reg_enable(dev, true);
- if (ret)
- return ret;
+ if (s->devtype->rev_id_reg >= MAX310X_EXTREG_START) {
+ ret = s->if_cfg->extended_reg_enable(dev, false);
+ if (ret)
+ return ret;
+ }
- regmap_read(s->regmap, s->if_cfg->rev_id_reg, &val);
- s->if_cfg->extended_reg_enable(dev, false);
- if (((val & MAX310x_REV_MASK) != MAX3109_REV_ID)) {
- dev_err(dev,
- "%s ID 0x%02x does not match\n", s->devtype->name, val);
- return -ENODEV;
+ if (((val & MAX310x_REV_MASK) != s->devtype->rev_id_val))
+ return dev_err_probe(dev, -ENODEV,
+ "%s ID 0x%02x does not match\n",
+ s->devtype->name, val);
+ } else {
+ /*
+ * For variant without REV ID register, just check default value
+ * from clocksource register to make sure everything works.
+ */
+ ret = regmap_read(s->regmap, MAX310X_CLKSRC_REG, &val);
+ if (ret)
+ return ret;
+
+ if (val != (MAX310X_CLKSRC_EXTCLK_BIT | MAX310X_CLKSRC_PLLBYP_BIT))
+ return dev_err_probe(dev, -ENODEV,
+ "%s not present\n",
+ s->devtype->name);
}
return 0;
@@ -394,39 +388,10 @@ static int max3109_detect(struct device *dev)
static void max310x_power(struct uart_port *port, int on)
{
- max310x_port_update(port, MAX310X_MODE1_REG,
- MAX310X_MODE1_FORCESLEEP_BIT,
- on ? 0 : MAX310X_MODE1_FORCESLEEP_BIT);
- if (on)
- msleep(50);
-}
-
-static int max14830_detect(struct device *dev)
-{
- struct max310x_port *s = dev_get_drvdata(dev);
- unsigned int val = 0;
- int ret;
-
- ret = s->if_cfg->extended_reg_enable(dev, true);
- if (ret)
- return ret;
-
- regmap_read(s->regmap, s->if_cfg->rev_id_reg, &val);
- s->if_cfg->extended_reg_enable(dev, false);
- if (((val & MAX310x_REV_MASK) != MAX14830_REV_ID)) {
- dev_err(dev,
- "%s ID 0x%02x does not match\n", s->devtype->name, val);
- return -ENODEV;
- }
-
- return 0;
-}
+ struct max310x_port *s = dev_get_drvdata(port->dev);
-static void max14830_power(struct uart_port *port, int on)
-{
- max310x_port_update(port, MAX310X_BRGCFG_REG,
- MAX14830_BRGCFG_CLKDIS_BIT,
- on ? 0 : MAX14830_BRGCFG_CLKDIS_BIT);
+ max310x_port_update(port, s->devtype->power_reg, s->devtype->power_bit,
+ on ? 0 : s->devtype->power_bit);
if (on)
msleep(50);
}
@@ -435,8 +400,10 @@ static const struct max310x_devtype max3107_devtype = {
.name = "MAX3107",
.nr = 1,
.mode1 = MAX310X_MODE1_AUTOSLEEP_BIT | MAX310X_MODE1_IRQSEL_BIT,
- .detect = max3107_detect,
- .power = max310x_power,
+ .rev_id_val = MAX3107_REV_ID,
+ .rev_id_reg = MAX310X_REVID_REG,
+ .power_reg = MAX310X_MODE1_REG,
+ .power_bit = MAX310X_MODE1_FORCESLEEP_BIT,
.slave_addr = {
.min = 0x2c,
.max = 0x2f,
@@ -447,8 +414,10 @@ static const struct max310x_devtype max3108_devtype = {
.name = "MAX3108",
.nr = 1,
.mode1 = MAX310X_MODE1_AUTOSLEEP_BIT,
- .detect = max3108_detect,
- .power = max310x_power,
+ .rev_id_val = 0, /* Unsupported. */
+ .rev_id_reg = 0, /* Irrelevant when rev_id_val is not defined. */
+ .power_reg = MAX310X_MODE1_REG,
+ .power_bit = MAX310X_MODE1_FORCESLEEP_BIT,
.slave_addr = {
.min = 0x60,
.max = 0x6f,
@@ -459,8 +428,10 @@ static const struct max310x_devtype max3109_devtype = {
.name = "MAX3109",
.nr = 2,
.mode1 = MAX310X_MODE1_AUTOSLEEP_BIT,
- .detect = max3109_detect,
- .power = max310x_power,
+ .rev_id_val = MAX3109_REV_ID,
+ .rev_id_reg = MAX310X_REVID_EXTREG,
+ .power_reg = MAX310X_MODE1_REG,
+ .power_bit = MAX310X_MODE1_FORCESLEEP_BIT,
.slave_addr = {
.min = 0x60,
.max = 0x6f,
@@ -471,8 +442,10 @@ static const struct max310x_devtype max14830_devtype = {
.name = "MAX14830",
.nr = 4,
.mode1 = MAX310X_MODE1_IRQSEL_BIT,
- .detect = max14830_detect,
- .power = max14830_power,
+ .rev_id_val = MAX14830_REV_ID,
+ .rev_id_reg = MAX310X_REVID_EXTREG,
+ .power_reg = MAX310X_BRGCFG_REG,
+ .power_bit = MAX14830_BRGCFG_CLKDIS_BIT,
.slave_addr = {
.min = 0x60,
.max = 0x6f,
@@ -490,10 +463,8 @@ static bool max310x_reg_writeable(struct device *dev, unsigned int reg)
case MAX310X_RXFIFOLVL_REG:
return false;
default:
- break;
+ return true;
}
-
- return true;
}
static bool max310x_reg_volatile(struct device *dev, unsigned int reg)
@@ -512,10 +483,8 @@ static bool max310x_reg_volatile(struct device *dev, unsigned int reg)
case MAX310X_REG_1F:
return true;
default:
- break;
+ return false;
}
-
- return false;
}
static bool max310x_reg_precious(struct device *dev, unsigned int reg)
@@ -527,10 +496,8 @@ static bool max310x_reg_precious(struct device *dev, unsigned int reg)
case MAX310X_STS_IRQSTS_REG:
return true;
default:
- break;
+ return false;
}
-
- return false;
}
static bool max310x_reg_noinc(struct device *dev, unsigned int reg)
@@ -689,7 +656,8 @@ static void max310x_handle_rx(struct uart_port *port, unsigned int rxlen)
u8 ch, flag;
if (port->read_status_mask == MAX310X_LSR_RXOVR_BIT) {
- /* We are just reading, happily ignoring any error conditions.
+ /*
+ * We are just reading, happily ignoring any error conditions.
* Break condition, parity checking, framing errors -- they
* are all ignored. That means that we can do a batch-read.
*
@@ -698,7 +666,7 @@ static void max310x_handle_rx(struct uart_port *port, unsigned int rxlen)
* that the LSR register applies to the "current" character.
* That's also the reason why we cannot do batched reads when
* asked to check the individual statuses.
- * */
+ */
sts = max310x_port_read(port, MAX310X_LSR_IRQSTS_REG);
max310x_batch_read(port, one->rx_buf, rxlen);
@@ -802,8 +770,10 @@ static void max310x_handle_tx(struct uart_port *port)
to_send = (to_send > txlen) ? txlen : to_send;
if (until_end < to_send) {
- /* It's a circ buffer -- wrap around.
- * We could do that in one SPI transaction, but meh. */
+ /*
+ * It's a circ buffer -- wrap around.
+ * We could do that in one SPI transaction, but meh.
+ */
max310x_batch_write(port, xmit->buf + xmit->tail, until_end);
max310x_batch_write(port, xmit->buf, to_send - until_end);
} else {
@@ -848,6 +818,7 @@ static irqreturn_t max310x_port_irq(struct max310x_port *s, int portno)
if (ists & MAX310X_IRQ_TXEMPTY_BIT)
max310x_start_tx(port);
} while (1);
+
return res;
}
@@ -892,7 +863,8 @@ static unsigned int max310x_tx_empty(struct uart_port *port)
static unsigned int max310x_get_mctrl(struct uart_port *port)
{
- /* DCD and DSR are not wired and CTS/RTS is handled automatically
+ /*
+ * DCD and DSR are not wired and CTS/RTS is handled automatically
* so just indicate DSR and CAR asserted
*/
return TIOCM_DSR | TIOCM_CAR;
@@ -984,7 +956,8 @@ static void max310x_set_termios(struct uart_port *port,
max310x_port_write(port, MAX310X_XON1_REG, termios->c_cc[VSTART]);
max310x_port_write(port, MAX310X_XOFF1_REG, termios->c_cc[VSTOP]);
- /* Disable transmitter before enabling AutoCTS or auto transmitter
+ /*
+ * Disable transmitter before enabling AutoCTS or auto transmitter
* flow control
*/
if (termios->c_cflag & CRTSCTS || termios->c_iflag & IXOFF) {
@@ -1011,7 +984,8 @@ static void max310x_set_termios(struct uart_port *port,
}
max310x_port_write(port, MAX310X_FLOWCTRL_REG, flow);
- /* Enable transmitter after disabling AutoCTS and auto transmitter
+ /*
+ * Enable transmitter after disabling AutoCTS and auto transmitter
* flow control
*/
if (!(termios->c_cflag & CRTSCTS) && !(termios->c_iflag & IXOFF)) {
@@ -1072,10 +1046,9 @@ static int max310x_rs485_config(struct uart_port *port, struct ktermios *termios
static int max310x_startup(struct uart_port *port)
{
- struct max310x_port *s = dev_get_drvdata(port->dev);
unsigned int val;
- s->devtype->power(port, 1);
+ max310x_power(port, 1);
/* Configure MODE1 register */
max310x_port_update(port, MAX310X_MODE1_REG,
@@ -1103,8 +1076,11 @@ static int max310x_startup(struct uart_port *port)
MAX310X_MODE2_ECHOSUPR_BIT);
}
- /* Configure flow control levels */
- /* Flow control halt level 96, resume level 48 */
+ /*
+ * Configure flow control levels:
+ * resume: 48
+ * halt: 96
+ */
max310x_port_write(port, MAX310X_FLOWLVL_REG,
MAX310X_FLOWLVL_RES(48) | MAX310X_FLOWLVL_HALT(96));
@@ -1120,12 +1096,10 @@ static int max310x_startup(struct uart_port *port)
static void max310x_shutdown(struct uart_port *port)
{
- struct max310x_port *s = dev_get_drvdata(port->dev);
-
/* Disable all interrupts */
max310x_port_write(port, MAX310X_IRQEN_REG, 0);
- s->devtype->power(port, 0);
+ max310x_power(port, 0);
}
static const char *max310x_type(struct uart_port *port)
@@ -1187,7 +1161,7 @@ static int __maybe_unused max310x_suspend(struct device *dev)
for (i = 0; i < s->devtype->nr; i++) {
uart_suspend_port(&max310x_uart, &s->p[i].port);
- s->devtype->power(&s->p[i].port, 0);
+ max310x_power(&s->p[i].port, 0);
}
return 0;
@@ -1199,7 +1173,7 @@ static int __maybe_unused max310x_resume(struct device *dev)
int i;
for (i = 0; i < s->devtype->nr; i++) {
- s->devtype->power(&s->p[i].port, 1);
+ max310x_power(&s->p[i].port, 1);
uart_resume_port(&max310x_uart, &s->p[i].port);
}
@@ -1209,7 +1183,7 @@ static int __maybe_unused max310x_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(max310x_pm_ops, max310x_suspend, max310x_resume);
#ifdef CONFIG_GPIOLIB
-static int max310x_gpio_get(struct gpio_chip *chip, unsigned offset)
+static int max310x_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
unsigned int val;
struct max310x_port *s = gpiochip_get_data(chip);
@@ -1220,7 +1194,7 @@ static int max310x_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!((val >> 4) & (1 << (offset % 4)));
}
-static void max310x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static void max310x_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct max310x_port *s = gpiochip_get_data(chip);
struct uart_port *port = &s->p[offset / 4].port;
@@ -1229,7 +1203,7 @@ static void max310x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
value ? 1 << (offset % 4) : 0);
}
-static int max310x_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+static int max310x_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
{
struct max310x_port *s = gpiochip_get_data(chip);
struct uart_port *port = &s->p[offset / 4].port;
@@ -1240,7 +1214,7 @@ static int max310x_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
}
static int max310x_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
+ unsigned int offset, int value)
{
struct max310x_port *s = gpiochip_get_data(chip);
struct uart_port *port = &s->p[offset / 4].port;
@@ -1296,10 +1270,9 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
/* Alloc port structure */
s = devm_kzalloc(dev, struct_size(s, p, devtype->nr), GFP_KERNEL);
- if (!s) {
- dev_err(dev, "Error allocating port structure\n");
- return -ENOMEM;
- }
+ if (!s)
+ return dev_err_probe(dev, -ENOMEM,
+ "Error allocating port structure\n");
/* Always ask for fixed clock rate from a property. */
device_property_read_u32(dev, "clock-frequency", &uartclk);
@@ -1320,8 +1293,7 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
if (freq == 0)
freq = uartclk;
if (freq == 0) {
- dev_err(dev, "Cannot get clock rate\n");
- ret = -EINVAL;
+ ret = dev_err_probe(dev, -EINVAL, "Cannot get clock rate\n");
goto out_clk;
}
@@ -1345,7 +1317,7 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
dev_set_drvdata(dev, s);
/* Check device to ensure we are talking to what we expect */
- ret = devtype->detect(dev);
+ ret = max310x_detect(dev);
if (ret)
goto out_clk;
@@ -1427,14 +1399,13 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
/* Register port */
ret = uart_add_one_port(&max310x_uart, &s->p[i].port);
- if (ret) {
- s->p[i].port.dev = NULL;
+ if (ret)
goto out_uart;
- }
+
set_bit(line, max310x_lines);
/* Go to suspend mode */
- devtype->power(&s->p[i].port, 0);
+ max310x_power(&s->p[i].port, 0);
}
#ifdef CONFIG_GPIOLIB
@@ -1461,14 +1432,12 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
if (!ret)
return 0;
- dev_err(dev, "Unable to reguest IRQ %i\n", irq);
+ dev_err(dev, "Unable to request IRQ %i\n", irq);
out_uart:
for (i = 0; i < devtype->nr; i++) {
- if (s->p[i].port.dev) {
+ if (test_and_clear_bit(s->p[i].port.line, max310x_lines))
uart_remove_one_port(&max310x_uart, &s->p[i].port);
- clear_bit(s->p[i].port.line, max310x_lines);
- }
}
out_clk:
@@ -1486,9 +1455,11 @@ static void max310x_remove(struct device *dev)
cancel_work_sync(&s->p[i].tx_work);
cancel_work_sync(&s->p[i].md_work);
cancel_work_sync(&s->p[i].rs_work);
- uart_remove_one_port(&max310x_uart, &s->p[i].port);
- clear_bit(s->p[i].port.line, max310x_lines);
- s->devtype->power(&s->p[i].port, 0);
+
+ if (test_and_clear_bit(s->p[i].port.line, max310x_lines))
+ uart_remove_one_port(&max310x_uart, &s->p[i].port);
+
+ max310x_power(&s->p[i].port, 0);
}
clk_disable_unprepare(s->clk);
@@ -1518,6 +1489,19 @@ static struct regmap_config regcfg = {
.max_raw_write = MAX310X_FIFO_SIZE,
};
+static const char *max310x_regmap_name(u8 port_id)
+{
+ switch (port_id) {
+ case 0: return "port0";
+ case 1: return "port1";
+ case 2: return "port2";
+ case 3: return "port3";
+ default:
+ WARN_ON(true);
+ return NULL;
+ }
+}
+
#ifdef CONFIG_SPI_MASTER
static int max310x_spi_extended_reg_enable(struct device *dev, bool enable)
{
@@ -1529,13 +1513,13 @@ static int max310x_spi_extended_reg_enable(struct device *dev, bool enable)
static const struct max310x_if_cfg __maybe_unused max310x_spi_if_cfg = {
.extended_reg_enable = max310x_spi_extended_reg_enable,
- .rev_id_reg = MAX310X_SPI_REVID_EXTREG,
+ .rev_id_offset = MAX310X_EXTREG_START,
};
static int max310x_spi_probe(struct spi_device *spi)
{
const struct max310x_devtype *devtype;
- struct regmap *regmaps[4];
+ struct regmap *regmaps[MAX310X_MAX_PORTS];
unsigned int i;
int ret;
@@ -1547,12 +1531,14 @@ static int max310x_spi_probe(struct spi_device *spi)
if (ret)
return ret;
- devtype = device_get_match_data(&spi->dev);
+ devtype = spi_get_device_match_data(spi);
if (!devtype)
- devtype = (struct max310x_devtype *)spi_get_device_id(spi)->driver_data;
+ return dev_err_probe(&spi->dev, -ENODEV, "Failed to match device\n");
for (i = 0; i < devtype->nr; i++) {
u8 port_mask = i * 0x20;
+
+ regcfg.name = max310x_regmap_name(i);
regcfg.read_flag_mask = port_mask;
regcfg.write_flag_mask = port_mask | MAX310X_WRITE_BIT;
regmaps[i] = devm_regmap_init_spi(spi, &regcfg);
@@ -1600,7 +1586,7 @@ static struct regmap_config regcfg_i2c = {
.writeable_reg = max310x_reg_writeable,
.volatile_reg = max310x_reg_volatile,
.precious_reg = max310x_reg_precious,
- .max_register = MAX310X_I2C_REVID_EXTREG,
+ .max_register = MAX310X_REVID_EXTREG,
.writeable_noinc_reg = max310x_reg_noinc,
.readable_noinc_reg = max310x_reg_noinc,
.max_raw_read = MAX310X_FIFO_SIZE,
@@ -1609,7 +1595,7 @@ static struct regmap_config regcfg_i2c = {
static const struct max310x_if_cfg max310x_i2c_if_cfg = {
.extended_reg_enable = max310x_i2c_extended_reg_enable,
- .rev_id_reg = MAX310X_I2C_REVID_EXTREG,
+ .rev_id_offset = 0, /* No offset in I2C mode. */
};
static unsigned short max310x_i2c_slave_addr(unsigned short addr,
@@ -1619,10 +1605,10 @@ static unsigned short max310x_i2c_slave_addr(unsigned short addr,
* For MAX14830 and MAX3109, the slave address depends on what the
* A0 and A1 pins are tied to.
* See Table I2C Address Map of the datasheet.
- * Based on that table, the following formulas were determined.
- * UART1 - UART0 = 0x10
- * UART2 - UART1 = 0x20 + 0x10
- * UART3 - UART2 = 0x10
+ * Based on that table, the following formulas were determined:
+ * UART1 - UART0 = 0x10
+ * UART2 - UART1 = 0x20 + 0x10
+ * UART3 - UART2 = 0x10
*/
addr -= nr * 0x10;
@@ -1635,20 +1621,24 @@ static unsigned short max310x_i2c_slave_addr(unsigned short addr,
static int max310x_i2c_probe(struct i2c_client *client)
{
- const struct max310x_devtype *devtype =
- device_get_match_data(&client->dev);
+ const struct max310x_devtype *devtype;
struct i2c_client *port_client;
- struct regmap *regmaps[4];
+ struct regmap *regmaps[MAX310X_MAX_PORTS];
unsigned int i;
u8 port_addr;
+ devtype = i2c_get_match_data(client);
+ if (!devtype)
+ return dev_err_probe(&client->dev, -ENODEV, "Failed to match device\n");
+
if (client->addr < devtype->slave_addr.min ||
- client->addr > devtype->slave_addr.max)
+ client->addr > devtype->slave_addr.max)
return dev_err_probe(&client->dev, -EINVAL,
"Slave addr 0x%x outside of range [0x%x, 0x%x]\n",
client->addr, devtype->slave_addr.min,
devtype->slave_addr.max);
+ regcfg_i2c.name = max310x_regmap_name(0);
regmaps[0] = devm_regmap_init_i2c(client, &regcfg_i2c);
for (i = 1; i < devtype->nr; i++) {
@@ -1657,6 +1647,7 @@ static int max310x_i2c_probe(struct i2c_client *client)
client->adapter,
port_addr);
+ regcfg_i2c.name = max310x_regmap_name(i);
regmaps[i] = devm_regmap_init_i2c(port_client, &regcfg_i2c);
}
@@ -1669,6 +1660,15 @@ static void max310x_i2c_remove(struct i2c_client *client)
max310x_remove(&client->dev);
}
+static const struct i2c_device_id max310x_i2c_id_table[] = {
+ { "max3107", (kernel_ulong_t)&max3107_devtype, },
+ { "max3108", (kernel_ulong_t)&max3108_devtype, },
+ { "max3109", (kernel_ulong_t)&max3109_devtype, },
+ { "max14830", (kernel_ulong_t)&max14830_devtype, },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max310x_i2c_id_table);
+
static struct i2c_driver max310x_i2c_driver = {
.driver = {
.name = MAX310X_NAME,
@@ -1677,6 +1677,7 @@ static struct i2c_driver max310x_i2c_driver = {
},
.probe = max310x_i2c_probe,
.remove = max310x_i2c_remove,
+ .id_table = max310x_i2c_id_table,
};
#endif
diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
index 8690a45239e09..b0604d6da0257 100644
--- a/drivers/tty/serial/mcf.c
+++ b/drivers/tty/serial/mcf.c
@@ -470,33 +470,6 @@ static struct mcf_uart mcf_ports[4];
#if defined(CONFIG_SERIAL_MCF_CONSOLE)
/****************************************************************************/
-int __init early_mcf_setup(struct mcf_platform_uart *platp)
-{
- struct uart_port *port;
- int i;
-
- for (i = 0; ((i < MCF_MAXPORTS) && (platp[i].mapbase)); i++) {
- port = &mcf_ports[i].port;
-
- port->line = i;
- port->type = PORT_MCF;
- port->mapbase = platp[i].mapbase;
- port->membase = (platp[i].membase) ? platp[i].membase :
- (unsigned char __iomem *) port->mapbase;
- port->iotype = SERIAL_IO_MEM;
- port->irq = platp[i].irq;
- port->uartclk = MCF_BUSCLK;
- port->flags = UPF_BOOT_AUTOCONF;
- port->rs485_config = mcf_config_rs485;
- port->rs485_supported = mcf_rs485_supported;
- port->ops = &mcf_uart_ops;
- }
-
- return 0;
-}
-
-/****************************************************************************/
-
static void mcf_console_putc(struct console *co, const char c)
{
struct uart_port *port = &(mcf_ports + co->index)->port;
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index 8395688f5ee92..6feac459c0cf4 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -220,7 +220,7 @@ static void meson_receive_chars(struct uart_port *port)
continue;
}
- if (uart_handle_sysrq_char(port, ch))
+ if (uart_prepare_sysrq_char(port, ch))
continue;
if ((status & port->ignore_status_mask) == 0)
@@ -248,7 +248,7 @@ static irqreturn_t meson_uart_interrupt(int irq, void *dev_id)
meson_uart_start_tx(port);
}
- uart_port_unlock(port);
+ uart_unlock_and_check_sysrq(port);
return IRQ_HANDLED;
}
@@ -556,18 +556,13 @@ static void meson_serial_port_write(struct uart_port *port, const char *s,
u_int count)
{
unsigned long flags;
- int locked;
+ int locked = 1;
u32 val, tmp;
- local_irq_save(flags);
- if (port->sysrq) {
- locked = 0;
- } else if (oops_in_progress) {
- locked = uart_port_trylock(port);
- } else {
- uart_port_lock(port);
- locked = 1;
- }
+ if (oops_in_progress)
+ locked = uart_port_trylock_irqsave(port, &flags);
+ else
+ uart_port_lock_irqsave(port, &flags);
val = readl(port->membase + AML_UART_CONTROL);
tmp = val & ~(AML_UART_TX_INT_EN | AML_UART_RX_INT_EN);
@@ -577,8 +572,7 @@ static void meson_serial_port_write(struct uart_port *port, const char *s,
writel(val, port->membase + AML_UART_CONTROL);
if (locked)
- uart_port_unlock(port);
- local_irq_restore(flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void meson_serial_console_write(struct console *co, const char *s,
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index e24204ad35def..d27c4c8c84e13 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -588,16 +588,14 @@ static void msm_complete_rx_dma(void *args)
if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK))
flag = TTY_NORMAL;
- uart_port_unlock_irqrestore(port, flags);
- sysrq = uart_handle_sysrq_char(port, dma->virt[i]);
- uart_port_lock_irqsave(port, &flags);
+ sysrq = uart_prepare_sysrq_char(port, dma->virt[i]);
if (!sysrq)
tty_insert_flip_char(tport, dma->virt[i], flag);
}
msm_start_rx_dma(msm_port);
done:
- uart_port_unlock_irqrestore(port, flags);
+ uart_unlock_and_check_sysrq_irqrestore(port, flags);
if (count)
tty_flip_buffer_push(tport);
@@ -763,9 +761,7 @@ static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK))
flag = TTY_NORMAL;
- uart_port_unlock(port);
- sysrq = uart_handle_sysrq_char(port, buf[i]);
- uart_port_lock(port);
+ sysrq = uart_prepare_sysrq_char(port, buf[i]);
if (!sysrq)
tty_insert_flip_char(tport, buf[i], flag);
}
@@ -825,9 +821,7 @@ static void msm_handle_rx(struct uart_port *port)
else if (sr & MSM_UART_SR_PAR_FRAME_ERR)
flag = TTY_FRAME;
- uart_port_unlock(port);
- sysrq = uart_handle_sysrq_char(port, c);
- uart_port_lock(port);
+ sysrq = uart_prepare_sysrq_char(port, c);
if (!sysrq)
tty_insert_flip_char(tport, c, flag);
}
@@ -948,11 +942,10 @@ static irqreturn_t msm_uart_irq(int irq, void *dev_id)
struct uart_port *port = dev_id;
struct msm_port *msm_port = to_msm_port(port);
struct msm_dma *dma = &msm_port->rx_dma;
- unsigned long flags;
unsigned int misr;
u32 val;
- uart_port_lock_irqsave(port, &flags);
+ uart_port_lock(port);
misr = msm_read(port, MSM_UART_MISR);
msm_write(port, 0, MSM_UART_IMR); /* disable interrupt */
@@ -984,7 +977,7 @@ static irqreturn_t msm_uart_irq(int irq, void *dev_id)
msm_handle_delta_cts(port);
msm_write(port, msm_port->imr, MSM_UART_IMR); /* restore interrupt */
- uart_port_unlock_irqrestore(port, flags);
+ uart_unlock_and_check_sysrq(port);
return IRQ_HANDLED;
}
@@ -1621,14 +1614,10 @@ static void __msm_console_write(struct uart_port *port, const char *s,
num_newlines++;
count += num_newlines;
- local_irq_save(flags);
-
- if (port->sysrq)
- locked = 0;
- else if (oops_in_progress)
- locked = uart_port_trylock(port);
+ if (oops_in_progress)
+ locked = uart_port_trylock_irqsave(port, &flags);
else
- uart_port_lock(port);
+ uart_port_lock_irqsave(port, &flags);
if (is_uartdm)
msm_reset_dm_count(port, count);
@@ -1667,9 +1656,7 @@ static void __msm_console_write(struct uart_port *port, const char *s,
}
if (locked)
- uart_port_unlock(port);
-
- local_irq_restore(flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void msm_console_write(struct console *co, const char *s,
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 4749331fe618c..1e8853eae5042 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1086,11 +1086,13 @@ static void mxs_auart_set_ldisc(struct uart_port *port,
static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
{
- u32 istat;
+ u32 istat, stat;
struct mxs_auart_port *s = context;
u32 mctrl_temp = s->mctrl_prev;
- u32 stat = mxs_read(s, REG_STAT);
+ uart_port_lock(&s->port);
+
+ stat = mxs_read(s, REG_STAT);
istat = mxs_read(s, REG_INTR);
/* ack irq */
@@ -1126,6 +1128,8 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
istat &= ~AUART_INTR_TXIS;
}
+ uart_port_unlock(&s->port);
+
return IRQ_HANDLED;
}
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index f5a0b401af63b..9be1c871cf116 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -508,7 +508,7 @@ static void serial_omap_rdi(struct uart_omap_port *up, unsigned int lsr)
up->port.icount.rx++;
- if (uart_handle_sysrq_char(&up->port, ch))
+ if (uart_prepare_sysrq_char(&up->port, ch))
return;
uart_insert_char(&up->port, lsr, UART_LSR_OE, ch, TTY_NORMAL);
@@ -563,7 +563,7 @@ static irqreturn_t serial_omap_irq(int irq, void *dev_id)
}
} while (max_count--);
- uart_port_unlock(&up->port);
+ uart_unlock_and_check_sysrq(&up->port);
tty_flip_buffer_push(&up->port.state->port);
@@ -1212,13 +1212,10 @@ serial_omap_console_write(struct console *co, const char *s,
unsigned int ier;
int locked = 1;
- local_irq_save(flags);
- if (up->port.sysrq)
- locked = 0;
- else if (oops_in_progress)
- locked = uart_port_trylock(&up->port);
+ if (oops_in_progress)
+ locked = uart_port_trylock_irqsave(&up->port, &flags);
else
- uart_port_lock(&up->port);
+ uart_port_lock_irqsave(&up->port, &flags);
/*
* First save the IER then disable the interrupts
@@ -1245,8 +1242,7 @@ serial_omap_console_write(struct console *co, const char *s,
check_modem_status(up);
if (locked)
- uart_port_unlock(&up->port);
- local_irq_restore(flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static int __init
diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c
index d9fe85397741d..8b60ac0ad7cd3 100644
--- a/drivers/tty/serial/owl-uart.c
+++ b/drivers/tty/serial/owl-uart.c
@@ -199,6 +199,7 @@ static void owl_uart_receive_chars(struct uart_port *port)
stat = owl_uart_read(port, OWL_UART_STAT);
while (!(stat & OWL_UART_STAT_RFEM)) {
char flag = TTY_NORMAL;
+ bool sysrq;
if (stat & OWL_UART_STAT_RXER)
port->icount.overrun++;
@@ -217,7 +218,9 @@ static void owl_uart_receive_chars(struct uart_port *port)
val = owl_uart_read(port, OWL_UART_RXDAT);
val &= 0xff;
- if ((stat & port->ignore_status_mask) == 0)
+ sysrq = uart_prepare_sysrq_char(port, val);
+
+ if (!sysrq && (stat & port->ignore_status_mask) == 0)
tty_insert_flip_char(&port->state->port, val, flag);
stat = owl_uart_read(port, OWL_UART_STAT);
@@ -229,10 +232,9 @@ static void owl_uart_receive_chars(struct uart_port *port)
static irqreturn_t owl_uart_irq(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
- unsigned long flags;
u32 stat;
- uart_port_lock_irqsave(port, &flags);
+ uart_port_lock(port);
stat = owl_uart_read(port, OWL_UART_STAT);
@@ -246,7 +248,7 @@ static irqreturn_t owl_uart_irq(int irq, void *dev_id)
stat |= OWL_UART_STAT_RIP | OWL_UART_STAT_TIP;
owl_uart_write(port, stat, OWL_UART_STAT);
- uart_port_unlock_irqrestore(port, flags);
+ uart_unlock_and_check_sysrq(port);
return IRQ_HANDLED;
}
@@ -508,18 +510,12 @@ static void owl_uart_port_write(struct uart_port *port, const char *s,
{
u32 old_ctl, val;
unsigned long flags;
- int locked;
+ int locked = 1;
- local_irq_save(flags);
-
- if (port->sysrq)
- locked = 0;
- else if (oops_in_progress)
- locked = uart_port_trylock(port);
- else {
- uart_port_lock(port);
- locked = 1;
- }
+ if (oops_in_progress)
+ locked = uart_port_trylock_irqsave(port, &flags);
+ else
+ uart_port_lock_irqsave(port, &flags);
old_ctl = owl_uart_read(port, OWL_UART_CTL);
val = old_ctl | OWL_UART_CTL_TRFS_TX;
@@ -541,9 +537,7 @@ static void owl_uart_port_write(struct uart_port *port, const char *s,
owl_uart_write(port, old_ctl, OWL_UART_CTL);
if (locked)
- uart_port_unlock(port);
-
- local_irq_restore(flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void owl_uart_console_write(struct console *co, const char *s,
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 436cc6d52a11b..89257cddf5405 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -237,9 +237,6 @@ struct eg20t_port {
#define IRQ_NAME_SIZE 17
char irq_name[IRQ_NAME_SIZE];
-
- /* protect the eg20t_port private structure and io access to membase */
- spinlock_t lock;
};
/**
@@ -567,7 +564,7 @@ static int pch_uart_hal_read(struct eg20t_port *priv, unsigned char *buf,
if (uart_handle_break(port))
continue;
}
- if (uart_handle_sysrq_char(port, rbr))
+ if (uart_prepare_sysrq_char(port, rbr))
continue;
buf[i++] = rbr;
@@ -599,16 +596,14 @@ static void pch_uart_hal_set_break(struct eg20t_port *priv, int on)
iowrite8(lcr, priv->membase + UART_LCR);
}
-static int push_rx(struct eg20t_port *priv, const unsigned char *buf,
- int size)
+static void push_rx(struct eg20t_port *priv, const unsigned char *buf,
+ int size)
{
struct uart_port *port = &priv->port;
struct tty_port *tport = &port->state->port;
tty_insert_flip_string(tport, buf, size);
tty_flip_buffer_push(tport);
-
- return 0;
}
static int dma_push_rx(struct eg20t_port *priv, int size)
@@ -761,7 +756,7 @@ static int handle_rx_to(struct eg20t_port *priv)
{
struct pch_uart_buffer *buf;
int rx_size;
- int ret;
+
if (!priv->start_rx) {
pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT |
PCH_UART_HAL_RX_ERR_INT);
@@ -770,19 +765,12 @@ static int handle_rx_to(struct eg20t_port *priv)
buf = &priv->rxbuf;
do {
rx_size = pch_uart_hal_read(priv, buf->buf, buf->size);
- ret = push_rx(priv, buf->buf, rx_size);
- if (ret)
- return 0;
+ push_rx(priv, buf->buf, rx_size);
} while (rx_size == buf->size);
return PCH_UART_HANDLED_RX_INT;
}
-static int handle_rx(struct eg20t_port *priv)
-{
- return handle_rx_to(priv);
-}
-
static int dma_handle_rx(struct eg20t_port *priv)
{
struct uart_port *port = &priv->port;
@@ -1019,11 +1007,10 @@ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
u8 lsr;
int ret = 0;
unsigned char iid;
- unsigned long flags;
int next = 1;
u8 msr;
- spin_lock_irqsave(&priv->lock, flags);
+ uart_port_lock(&priv->port);
handled = 0;
while (next) {
iid = pch_uart_hal_get_iid(priv);
@@ -1051,7 +1038,7 @@ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
PCH_UART_HAL_RX_INT |
PCH_UART_HAL_RX_ERR_INT);
} else {
- ret = handle_rx(priv);
+ ret = handle_rx_to(priv);
}
break;
case PCH_UART_IID_RDR_TO: /* Received Data Ready
@@ -1083,7 +1070,7 @@ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
handled |= (unsigned int)ret;
}
- spin_unlock_irqrestore(&priv->lock, flags);
+ uart_unlock_and_check_sysrq(&priv->port);
return IRQ_RETVAL(handled);
}
@@ -1194,9 +1181,9 @@ static void pch_uart_break_ctl(struct uart_port *port, int ctl)
unsigned long flags;
priv = container_of(port, struct eg20t_port, port);
- spin_lock_irqsave(&priv->lock, flags);
+ uart_port_lock_irqsave(&priv->port, &flags);
pch_uart_hal_set_break(priv, ctl);
- spin_unlock_irqrestore(&priv->lock, flags);
+ uart_port_unlock_irqrestore(&priv->port, flags);
}
/* Grab any interrupt resources and initialise any low level driver state. */
@@ -1346,8 +1333,7 @@ static void pch_uart_set_termios(struct uart_port *port,
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
- spin_lock_irqsave(&priv->lock, flags);
- uart_port_lock(port);
+ uart_port_lock_irqsave(port, &flags);
uart_update_timeout(port, termios->c_cflag, baud);
rtn = pch_uart_hal_set_line(priv, baud, parity, bits, stb);
@@ -1360,8 +1346,7 @@ static void pch_uart_set_termios(struct uart_port *port,
tty_termios_encode_baud_rate(termios, baud, baud);
out:
- uart_port_unlock(port);
- spin_unlock_irqrestore(&priv->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *pch_uart_type(struct uart_port *port)
@@ -1565,27 +1550,17 @@ pch_console_write(struct console *co, const char *s, unsigned int count)
{
struct eg20t_port *priv;
unsigned long flags;
- int priv_locked = 1;
- int port_locked = 1;
+ int locked = 1;
u8 ier;
priv = pch_uart_ports[co->index];
touch_nmi_watchdog();
- local_irq_save(flags);
- if (priv->port.sysrq) {
- /* call to uart_handle_sysrq_char already took the priv lock */
- priv_locked = 0;
- /* serial8250_handle_port() already took the port lock */
- port_locked = 0;
- } else if (oops_in_progress) {
- priv_locked = spin_trylock(&priv->lock);
- port_locked = uart_port_trylock(&priv->port);
- } else {
- spin_lock(&priv->lock);
- uart_port_lock(&priv->port);
- }
+ if (oops_in_progress)
+ locked = uart_port_trylock_irqsave(&priv->port, &flags);
+ else
+ uart_port_lock_irqsave(&priv->port, &flags);
/*
* First save the IER then disable the interrupts
@@ -1603,11 +1578,8 @@ pch_console_write(struct console *co, const char *s, unsigned int count)
wait_for_xmitr(priv, UART_LSR_BOTH_EMPTY);
iowrite8(ier, priv->membase + UART_IER);
- if (port_locked)
- uart_port_unlock(&priv->port);
- if (priv_locked)
- spin_unlock(&priv->lock);
- local_irq_restore(flags);
+ if (locked)
+ uart_port_unlock_irqrestore(&priv->port, flags);
}
static int __init pch_console_setup(struct console *co, char *options)
@@ -1704,8 +1676,6 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
pci_enable_msi(pdev);
pci_set_master(pdev);
- spin_lock_init(&priv->lock);
-
iobase = pci_resource_start(pdev, 0);
mapbase = pci_resource_start(pdev, 1);
priv->mapbase = mapbase;
@@ -1735,8 +1705,6 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
KBUILD_MODNAME ":" PCH_UART_DRIVER_DEVICE "%d",
priv->port.line);
- spin_lock_init(&priv->port.lock);
-
pci_set_drvdata(pdev, priv);
priv->trigger_level = 1;
priv->fcr = 0;
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index c8bf08c19c647..92195f984de1b 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -210,7 +210,6 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap)
{
struct tty_port *port;
unsigned char ch, r1, drop, flag;
- int loops = 0;
/* Sanity check, make sure the old bug is no longer happening */
if (uap->port.state == NULL) {
@@ -291,25 +290,12 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap)
if (r1 & Rx_OVR)
tty_insert_flip_char(port, 0, TTY_OVERRUN);
next_char:
- /* We can get stuck in an infinite loop getting char 0 when the
- * line is in a wrong HW state, we break that here.
- * When that happens, I disable the receive side of the driver.
- * Note that what I've been experiencing is a real irq loop where
- * I'm getting flooded regardless of the actual port speed.
- * Something strange is going on with the HW
- */
- if ((++loops) > 1000)
- goto flood;
ch = read_zsreg(uap, R0);
if (!(ch & Rx_CH_AV))
break;
}
return true;
- flood:
- pmz_interrupt_control(uap, 0);
- pmz_error("pmz: rx irq flood !\n");
- return true;
}
static void pmz_status_handle(struct uart_pmac_port *uap)
@@ -1507,12 +1493,12 @@ static int pmz_attach(struct macio_dev *mdev, const struct of_device_id *match)
* That one should not be called, macio isn't really a hotswap device,
* we don't expect one of those serial ports to go away...
*/
-static int pmz_detach(struct macio_dev *mdev)
+static void pmz_detach(struct macio_dev *mdev)
{
struct uart_pmac_port *uap = dev_get_drvdata(&mdev->ofdev.dev);
if (!uap)
- return -ENODEV;
+ return;
uart_remove_one_port(&pmz_uart_reg, &uap->port);
@@ -1523,11 +1509,8 @@ static int pmz_detach(struct macio_dev *mdev)
dev_set_drvdata(&mdev->ofdev.dev, NULL);
uap->dev = NULL;
uap->port.dev = NULL;
-
- return 0;
}
-
static int pmz_suspend(struct macio_dev *mdev, pm_message_t pm_state)
{
struct uart_pmac_port *uap = dev_get_drvdata(&mdev->ofdev.dev);
@@ -1717,18 +1700,13 @@ static int __init pmz_attach(struct platform_device *pdev)
return uart_add_one_port(&pmz_uart_reg, &uap->port);
}
-static int __exit pmz_detach(struct platform_device *pdev)
+static void __exit pmz_detach(struct platform_device *pdev)
{
struct uart_pmac_port *uap = platform_get_drvdata(pdev);
- if (!uap)
- return -ENODEV;
-
uart_remove_one_port(&pmz_uart_reg, &uap->port);
uap->port.dev = NULL;
-
- return 0;
}
#endif /* !CONFIG_PPC_PMAC */
@@ -1797,7 +1775,7 @@ static struct macio_driver pmz_driver = {
#else
static struct platform_driver pmz_driver = {
- .remove = __exit_p(pmz_detach),
+ .remove_new = __exit_p(pmz_detach),
.driver = {
.name = "scc",
},
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index 46e70e155aab2..e395ff29c1a2c 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -151,7 +151,7 @@ static inline void receive_chars(struct uart_pxa_port *up, int *status)
flag = TTY_FRAME;
}
- if (uart_handle_sysrq_char(&up->port, ch))
+ if (uart_prepare_sysrq_char(&up->port, ch))
goto ignore_char;
uart_insert_char(&up->port, *status, UART_LSR_OE, ch, flag);
@@ -232,7 +232,7 @@ static inline irqreturn_t serial_pxa_irq(int irq, void *dev_id)
check_modem_status(up);
if (lsr & UART_LSR_THRE)
transmit_chars(up);
- uart_port_unlock(&up->port);
+ uart_unlock_and_check_sysrq(&up->port);
return IRQ_HANDLED;
}
@@ -604,13 +604,10 @@ serial_pxa_console_write(struct console *co, const char *s, unsigned int count)
int locked = 1;
clk_enable(up->clk);
- local_irq_save(flags);
- if (up->port.sysrq)
- locked = 0;
- else if (oops_in_progress)
- locked = uart_port_trylock(&up->port);
+ if (oops_in_progress)
+ locked = uart_port_trylock_irqsave(&up->port, &flags);
else
- uart_port_lock(&up->port);
+ uart_port_lock_irqsave(&up->port, &flags);
/*
* First save the IER then disable the interrupts
@@ -628,10 +625,8 @@ serial_pxa_console_write(struct console *co, const char *s, unsigned int count)
serial_out(up, UART_IER, ier);
if (locked)
- uart_port_unlock(&up->port);
- local_irq_restore(flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
clk_disable(up->clk);
-
}
#ifdef CONFIG_CONSOLE_POLL
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 99e08737f293c..f9f7ac1a10df3 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -488,18 +488,16 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s,
geni_status = readl(uport->membase + SE_GENI_STATUS);
- /* Cancel the current write to log the fault */
if (!locked) {
- geni_se_cancel_m_cmd(&port->se);
- if (!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
- M_CMD_CANCEL_EN, true)) {
- geni_se_abort_m_cmd(&port->se);
- qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
- M_CMD_ABORT_EN, true);
- writel(M_CMD_ABORT_EN, uport->membase +
- SE_GENI_M_IRQ_CLEAR);
- }
- writel(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
+ /*
+ * We can only get here if an oops is in progress then we were
+ * unable to get the lock. This means we can't safely access
+ * our state variables like tx_remaining. About the best we
+ * can do is wait for the FIFO to be empty before we start our
+ * transfer, so we'll do that.
+ */
+ qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_TX_FIFO_NOT_EMPTY_EN, false);
} else if ((geni_status & M_GENI_CMD_ACTIVE) && !port->tx_remaining) {
/*
* It seems we can't interrupt existing transfers if all data
@@ -516,11 +514,12 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s,
__qcom_geni_serial_console_write(uport, s, count);
- if (port->tx_remaining)
- qcom_geni_serial_setup_tx(uport, port->tx_remaining);
- if (locked)
+ if (locked) {
+ if (port->tx_remaining)
+ qcom_geni_serial_setup_tx(uport, port->tx_remaining);
uart_port_unlock_irqrestore(uport, flags);
+ }
}
static void handle_rx_console(struct uart_port *uport, u32 bytes, bool drop)
diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c
index 13deb355cf1bc..82def9b8632a5 100644
--- a/drivers/tty/serial/rda-uart.c
+++ b/drivers/tty/serial/rda-uart.c
@@ -394,7 +394,8 @@ static void rda_uart_receive_chars(struct uart_port *port)
val &= 0xff;
port->icount.rx++;
- tty_insert_flip_char(&port->state->port, val, flag);
+ if (!uart_prepare_sysrq_char(port, val))
+ tty_insert_flip_char(&port->state->port, val, flag);
status = rda_uart_read(port, RDA_UART_STATUS);
}
@@ -405,10 +406,9 @@ static void rda_uart_receive_chars(struct uart_port *port)
static irqreturn_t rda_interrupt(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
- unsigned long flags;
u32 val, irq_mask;
- uart_port_lock_irqsave(port, &flags);
+ uart_port_lock(port);
/* Clear IRQ cause */
val = rda_uart_read(port, RDA_UART_IRQ_CAUSE);
@@ -425,7 +425,7 @@ static irqreturn_t rda_interrupt(int irq, void *dev_id)
rda_uart_send_chars(port);
}
- uart_port_unlock_irqrestore(port, flags);
+ uart_unlock_and_check_sysrq(port);
return IRQ_HANDLED;
}
@@ -590,18 +590,12 @@ static void rda_uart_port_write(struct uart_port *port, const char *s,
{
u32 old_irq_mask;
unsigned long flags;
- int locked;
-
- local_irq_save(flags);
+ int locked = 1;
- if (port->sysrq) {
- locked = 0;
- } else if (oops_in_progress) {
- locked = uart_port_trylock(port);
- } else {
- uart_port_lock(port);
- locked = 1;
- }
+ if (oops_in_progress)
+ locked = uart_port_trylock_irqsave(port, &flags);
+ else
+ uart_port_lock_irqsave(port, &flags);
old_irq_mask = rda_uart_read(port, RDA_UART_IRQ_MASK);
rda_uart_write(port, 0, RDA_UART_IRQ_MASK);
@@ -615,9 +609,7 @@ static void rda_uart_port_write(struct uart_port *port, const char *s,
rda_uart_write(port, old_irq_mask, RDA_UART_IRQ_MASK);
if (locked)
- uart_port_unlock(port);
-
- local_irq_restore(flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void rda_uart_console_write(struct console *co, const char *s,
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index 71d17d804fdab..a2d07e05c5026 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -21,26 +21,28 @@
* BJD, 04-Nov-2004
*/
-#include <linux/dmaengine.h>
+#include <linux/console.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
#include <linux/dma-mapping.h>
-#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
#include <linux/math.h>
#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/serial_s3c.h>
+#include <linux/slab.h>
#include <linux/sysrq.h>
-#include <linux/console.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
-#include <linux/serial_core.h>
-#include <linux/serial.h>
-#include <linux/serial_s3c.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/cpufreq.h>
-#include <linux/of.h>
+#include <linux/types.h>
+
#include <asm/irq.h>
/* UART name and device definitions */
@@ -73,21 +75,21 @@ struct s3c24xx_uart_info {
enum s3c24xx_port_type type;
unsigned int port_type;
unsigned int fifosize;
- unsigned long rx_fifomask;
- unsigned long rx_fifoshift;
- unsigned long rx_fifofull;
- unsigned long tx_fifomask;
- unsigned long tx_fifoshift;
- unsigned long tx_fifofull;
- unsigned int def_clk_sel;
- unsigned long num_clks;
- unsigned long clksel_mask;
- unsigned long clksel_shift;
- unsigned long ucon_mask;
+ u32 rx_fifomask;
+ u32 rx_fifoshift;
+ u32 rx_fifofull;
+ u32 tx_fifomask;
+ u32 tx_fifoshift;
+ u32 tx_fifofull;
+ u32 clksel_mask;
+ u32 clksel_shift;
+ u32 ucon_mask;
+ u8 def_clk_sel;
+ u8 num_clks;
+ u8 iotype;
/* uart port features */
-
- unsigned int has_divslot:1;
+ bool has_divslot;
};
struct s3c24xx_serial_drv_data {
@@ -196,7 +198,7 @@ static void wr_reg(const struct uart_port *port, u32 reg, u32 val)
/* Byte-order aware bit setting/clearing functions. */
static inline void s3c24xx_set_bit(const struct uart_port *port, int idx,
- unsigned int reg)
+ u32 reg)
{
unsigned long flags;
u32 val;
@@ -209,7 +211,7 @@ static inline void s3c24xx_set_bit(const struct uart_port *port, int idx,
}
static inline void s3c24xx_clear_bit(const struct uart_port *port, int idx,
- unsigned int reg)
+ u32 reg)
{
unsigned long flags;
u32 val;
@@ -233,7 +235,7 @@ static inline const char *s3c24xx_serial_portname(const struct uart_port *port)
return to_platform_device(port->dev)->name;
}
-static int s3c24xx_serial_txempty_nofifo(const struct uart_port *port)
+static bool s3c24xx_serial_txempty_nofifo(const struct uart_port *port)
{
return rd_regl(port, S3C2410_UTRSTAT) & S3C2410_UTRSTAT_TXE;
}
@@ -242,8 +244,8 @@ static void s3c24xx_serial_rx_enable(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
unsigned long flags;
- unsigned int ucon, ufcon;
int count = 10000;
+ u32 ucon, ufcon;
uart_port_lock_irqsave(port, &flags);
@@ -266,7 +268,7 @@ static void s3c24xx_serial_rx_disable(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
unsigned long flags;
- unsigned int ucon;
+ u32 ucon;
uart_port_lock_irqsave(port, &flags);
@@ -587,8 +589,8 @@ static inline const struct s3c2410_uartcfg
return ourport->cfg;
}
-static int s3c24xx_serial_rx_fifocnt(const struct s3c24xx_uart_port *ourport,
- unsigned long ufstat)
+static unsigned int
+s3c24xx_serial_rx_fifocnt(const struct s3c24xx_uart_port *ourport, u32 ufstat)
{
const struct s3c24xx_uart_info *info = ourport->info;
@@ -660,7 +662,7 @@ static void s3c64xx_start_rx_dma(struct s3c24xx_uart_port *ourport)
static void enable_rx_dma(struct s3c24xx_uart_port *ourport)
{
struct uart_port *port = &ourport->port;
- unsigned int ucon;
+ u32 ucon;
/* set Rx mode to DMA mode */
ucon = rd_regl(port, S3C2410_UCON);
@@ -683,7 +685,7 @@ static void enable_rx_dma(struct s3c24xx_uart_port *ourport)
static void enable_rx_pio(struct s3c24xx_uart_port *ourport)
{
struct uart_port *port = &ourport->port;
- unsigned int ucon;
+ u32 ucon;
/* set Rx mode to DMA mode */
ucon = rd_regl(port, S3C2410_UCON);
@@ -708,13 +710,14 @@ static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport);
static irqreturn_t s3c24xx_serial_rx_chars_dma(void *dev_id)
{
- unsigned int utrstat, received;
struct s3c24xx_uart_port *ourport = dev_id;
struct uart_port *port = &ourport->port;
struct s3c24xx_uart_dma *dma = ourport->dma;
struct tty_struct *tty = tty_port_tty_get(&ourport->port.state->port);
struct tty_port *t = &port->state->port;
struct dma_tx_state state;
+ unsigned int received;
+ u32 utrstat;
utrstat = rd_regl(port, S3C2410_UTRSTAT);
rd_regl(port, S3C2410_UFSTAT);
@@ -756,9 +759,9 @@ finish:
static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport)
{
struct uart_port *port = &ourport->port;
- unsigned int ufcon, ufstat, uerstat;
+ unsigned int max_count = port->fifosize;
unsigned int fifocnt = 0;
- int max_count = port->fifosize;
+ u32 ufcon, ufstat, uerstat;
u8 ch, flag;
while (max_count-- > 0) {
@@ -778,7 +781,7 @@ static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport)
ch = rd_reg(port, S3C2410_URXH);
if (port->flags & UPF_CONS_FLOW) {
- int txe = s3c24xx_serial_txempty_nofifo(port);
+ bool txe = s3c24xx_serial_txempty_nofifo(port);
if (ourport->rx_enabled) {
if (!txe) {
@@ -942,7 +945,7 @@ static irqreturn_t s3c64xx_serial_handle_irq(int irq, void *id)
{
const struct s3c24xx_uart_port *ourport = id;
const struct uart_port *port = &ourport->port;
- unsigned int pend = rd_regl(port, S3C64XX_UINTP);
+ u32 pend = rd_regl(port, S3C64XX_UINTP);
irqreturn_t ret = IRQ_HANDLED;
if (pend & S3C64XX_UINTM_RXD_MSK) {
@@ -961,7 +964,7 @@ static irqreturn_t apple_serial_handle_irq(int irq, void *id)
{
const struct s3c24xx_uart_port *ourport = id;
const struct uart_port *port = &ourport->port;
- unsigned int pend = rd_regl(port, S3C2410_UTRSTAT);
+ u32 pend = rd_regl(port, S3C2410_UTRSTAT);
irqreturn_t ret = IRQ_NONE;
if (pend & (APPLE_S5L_UTRSTAT_RXTHRESH | APPLE_S5L_UTRSTAT_RXTO)) {
@@ -980,24 +983,23 @@ static irqreturn_t apple_serial_handle_irq(int irq, void *id)
static unsigned int s3c24xx_serial_tx_empty(struct uart_port *port)
{
const struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
- unsigned long ufstat = rd_regl(port, S3C2410_UFSTAT);
- unsigned long ufcon = rd_regl(port, S3C2410_UFCON);
+ u32 ufstat = rd_regl(port, S3C2410_UFSTAT);
+ u32 ufcon = rd_regl(port, S3C2410_UFCON);
if (ufcon & S3C2410_UFCON_FIFOMODE) {
- if ((ufstat & info->tx_fifomask) != 0 ||
+ if ((ufstat & info->tx_fifomask) ||
(ufstat & info->tx_fifofull))
return 0;
-
- return 1;
+ return TIOCSER_TEMT;
}
- return s3c24xx_serial_txempty_nofifo(port);
+ return s3c24xx_serial_txempty_nofifo(port) ? TIOCSER_TEMT : 0;
}
/* no modem control lines */
static unsigned int s3c24xx_serial_get_mctrl(struct uart_port *port)
{
- unsigned int umstat = rd_reg(port, S3C2410_UMSTAT);
+ u32 umstat = rd_reg(port, S3C2410_UMSTAT);
if (umstat & S3C2410_UMSTAT_CTS)
return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
@@ -1007,8 +1009,8 @@ static unsigned int s3c24xx_serial_get_mctrl(struct uart_port *port)
static void s3c24xx_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
- unsigned int umcon = rd_regl(port, S3C2410_UMCON);
- unsigned int ucon = rd_regl(port, S3C2410_UCON);
+ u32 umcon = rd_regl(port, S3C2410_UMCON);
+ u32 ucon = rd_regl(port, S3C2410_UCON);
if (mctrl & TIOCM_RTS)
umcon |= S3C2410_UMCOM_RTS_LOW;
@@ -1028,7 +1030,7 @@ static void s3c24xx_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
static void s3c24xx_serial_break_ctl(struct uart_port *port, int break_state)
{
unsigned long flags;
- unsigned int ucon;
+ u32 ucon;
uart_port_lock_irqsave(port, &flags);
@@ -1186,7 +1188,7 @@ static void apple_s5l_serial_shutdown(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
- unsigned int ucon;
+ u32 ucon;
ucon = rd_regl(port, S3C2410_UCON);
ucon &= ~(APPLE_S5L_UCON_TXTHRESH_ENA_MSK |
@@ -1212,7 +1214,7 @@ static int s3c64xx_serial_startup(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
unsigned long flags;
- unsigned int ufcon;
+ u32 ufcon;
int ret;
wr_regl(port, S3C64XX_UINTM, 0xf);
@@ -1257,7 +1259,7 @@ static int apple_s5l_serial_startup(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
unsigned long flags;
- unsigned int ufcon;
+ u32 ufcon;
int ret;
wr_regl(port, S3C2410_UTRSTAT, APPLE_S5L_UTRSTAT_ALL_FLAGS);
@@ -1292,8 +1294,6 @@ static int apple_s5l_serial_startup(struct uart_port *port)
return ret;
}
-/* power power management control */
-
static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level,
unsigned int old)
{
@@ -1339,10 +1339,10 @@ static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level,
#define MAX_CLK_NAME_LENGTH 15
-static inline int s3c24xx_serial_getsource(struct uart_port *port)
+static inline u8 s3c24xx_serial_getsource(struct uart_port *port)
{
const struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
- unsigned int ucon;
+ u32 ucon;
if (info->num_clks == 1)
return 0;
@@ -1352,11 +1352,10 @@ static inline int s3c24xx_serial_getsource(struct uart_port *port)
return ucon >> info->clksel_shift;
}
-static void s3c24xx_serial_setsource(struct uart_port *port,
- unsigned int clk_sel)
+static void s3c24xx_serial_setsource(struct uart_port *port, u8 clk_sel)
{
const struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
- unsigned int ucon;
+ u32 ucon;
if (info->num_clks == 1)
return;
@@ -1372,14 +1371,15 @@ static void s3c24xx_serial_setsource(struct uart_port *port,
static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport,
unsigned int req_baud, struct clk **best_clk,
- unsigned int *clk_num)
+ u8 *clk_num)
{
const struct s3c24xx_uart_info *info = ourport->info;
struct clk *clk;
unsigned long rate;
- unsigned int cnt, baud, quot, best_quot = 0;
+ unsigned int baud, quot, best_quot = 0;
char clkname[MAX_CLK_NAME_LENGTH];
int calc_deviation, deviation = (1 << 30) - 1;
+ u8 cnt;
for (cnt = 0; cnt < info->num_clks; cnt++) {
/* Keep selected clock if provided */
@@ -1472,10 +1472,10 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
struct s3c24xx_uart_port *ourport = to_ourport(port);
struct clk *clk = ERR_PTR(-EINVAL);
unsigned long flags;
- unsigned int baud, quot, clk_sel = 0;
- unsigned int ulcon;
- unsigned int umcon;
+ unsigned int baud, quot;
unsigned int udivslot = 0;
+ u32 ulcon, umcon;
+ u8 clk_sel = 0;
/*
* We don't support modem control lines.
@@ -1737,12 +1737,12 @@ static struct uart_driver s3c24xx_uart_drv = {
static struct s3c24xx_uart_port s3c24xx_serial_ports[UART_NR];
-static void s3c24xx_serial_init_port_default(int index) {
+static void s3c24xx_serial_init_port_default(int index)
+{
struct uart_port *port = &s3c24xx_serial_ports[index].port;
spin_lock_init(&port->lock);
- port->iotype = UPIO_MEM;
port->uartclk = 0;
port->fifosize = 16;
port->flags = UPF_BOOT_AUTOCONF;
@@ -1758,7 +1758,7 @@ static void s3c24xx_serial_resetport(struct uart_port *port,
const struct s3c2410_uartcfg *cfg)
{
const struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
+ u32 ucon = rd_regl(port, S3C2410_UCON);
ucon &= (info->clksel_mask | info->ucon_mask);
wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
@@ -1776,10 +1776,9 @@ static int s3c24xx_serial_enable_baudclk(struct s3c24xx_uart_port *ourport)
struct device *dev = ourport->port.dev;
const struct s3c24xx_uart_info *info = ourport->info;
char clk_name[MAX_CLK_NAME_LENGTH];
- unsigned int clk_sel;
struct clk *clk;
- int clk_num;
int ret;
+ u8 clk_sel, clk_num;
clk_sel = ourport->cfg->clk_sel ? : info->def_clk_sel;
for (clk_num = 0; clk_num < info->num_clks; clk_num++) {
@@ -1904,7 +1903,7 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
wr_regl(port, S3C64XX_UINTSP, 0xf);
break;
case TYPE_APPLE_S5L: {
- unsigned int ucon;
+ u32 ucon;
ucon = rd_regl(port, S3C2410_UCON);
ucon &= ~(APPLE_S5L_UCON_TXTHRESH_ENA_MSK |
@@ -1952,7 +1951,7 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct s3c24xx_uart_port *ourport;
int index = probe_index;
- int ret, prop = 0;
+ int ret, prop = 0, fifosize_prop = 1;
if (np) {
ret = of_alias_get_id(np, "serial");
@@ -1989,9 +1988,11 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
break;
}
+ ourport->port.iotype = ourport->info->iotype;
+
if (np) {
- of_property_read_u32(np,
- "samsung,uart-fifosize", &ourport->port.fifosize);
+ fifosize_prop = of_property_read_u32(np, "samsung,uart-fifosize",
+ &ourport->port.fifosize);
if (of_property_read_u32(np, "reg-io-width", &prop) == 0) {
switch (prop) {
@@ -2009,10 +2010,13 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
}
}
- if (ourport->drv_data->fifosize[index])
- ourport->port.fifosize = ourport->drv_data->fifosize[index];
- else if (ourport->info->fifosize)
- ourport->port.fifosize = ourport->info->fifosize;
+ if (fifosize_prop) {
+ if (ourport->drv_data->fifosize[index])
+ ourport->port.fifosize = ourport->drv_data->fifosize[index];
+ else if (ourport->info->fifosize)
+ ourport->port.fifosize = ourport->info->fifosize;
+ }
+
ourport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SAMSUNG_CONSOLE);
/*
@@ -2058,9 +2062,8 @@ static void s3c24xx_serial_remove(struct platform_device *dev)
{
struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
- if (port) {
+ if (port)
uart_remove_one_port(&s3c24xx_uart_drv, port);
- }
uart_unregister_driver(&s3c24xx_uart_drv);
}
@@ -2106,7 +2109,7 @@ static int s3c24xx_serial_resume_noirq(struct device *dev)
/* restore IRQ mask */
switch (ourport->info->type) {
case TYPE_S3C6400: {
- unsigned int uintm = 0xf;
+ u32 uintm = 0xf;
if (ourport->tx_enabled)
uintm &= ~S3C64XX_UINTM_TXD_MSK;
@@ -2122,7 +2125,7 @@ static int s3c24xx_serial_resume_noirq(struct device *dev)
break;
}
case TYPE_APPLE_S5L: {
- unsigned int ucon;
+ u32 ucon;
int ret;
ret = clk_prepare_enable(ourport->clk);
@@ -2183,27 +2186,27 @@ static const struct dev_pm_ops s3c24xx_serial_pm_ops = {
static struct uart_port *cons_uart;
-static int
-s3c24xx_serial_console_txrdy(struct uart_port *port, unsigned int ufcon)
+static bool
+s3c24xx_serial_console_txrdy(struct uart_port *port, u32 ufcon)
{
const struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
- unsigned long ufstat, utrstat;
+ u32 ufstat, utrstat;
if (ufcon & S3C2410_UFCON_FIFOMODE) {
/* fifo mode - check amount of data in fifo registers... */
ufstat = rd_regl(port, S3C2410_UFSTAT);
- return (ufstat & info->tx_fifofull) ? 0 : 1;
+ return !(ufstat & info->tx_fifofull);
}
/* in non-fifo mode, we go and use the tx buffer empty */
utrstat = rd_regl(port, S3C2410_UTRSTAT);
- return (utrstat & S3C2410_UTRSTAT_TXE) ? 1 : 0;
+ return utrstat & S3C2410_UTRSTAT_TXE;
}
static bool
-s3c24xx_port_configured(unsigned int ucon)
+s3c24xx_port_configured(u32 ucon)
{
/* consider the serial port configured if the tx/rx mode set */
return (ucon & 0xf) != 0;
@@ -2218,7 +2221,7 @@ s3c24xx_port_configured(unsigned int ucon)
static int s3c24xx_serial_get_poll_char(struct uart_port *port)
{
const struct s3c24xx_uart_port *ourport = to_ourport(port);
- unsigned int ufstat;
+ u32 ufstat;
ufstat = rd_regl(port, S3C2410_UFSTAT);
if (s3c24xx_serial_rx_fifocnt(ourport, ufstat) == 0)
@@ -2230,8 +2233,8 @@ static int s3c24xx_serial_get_poll_char(struct uart_port *port)
static void s3c24xx_serial_put_poll_char(struct uart_port *port,
unsigned char c)
{
- unsigned int ufcon = rd_regl(port, S3C2410_UFCON);
- unsigned int ucon = rd_regl(port, S3C2410_UCON);
+ u32 ufcon = rd_regl(port, S3C2410_UFCON);
+ u32 ucon = rd_regl(port, S3C2410_UCON);
/* not possible to xmit on unconfigured port */
if (!s3c24xx_port_configured(ucon))
@@ -2247,7 +2250,7 @@ static void s3c24xx_serial_put_poll_char(struct uart_port *port,
static void
s3c24xx_serial_console_putchar(struct uart_port *port, unsigned char ch)
{
- unsigned int ufcon = rd_regl(port, S3C2410_UFCON);
+ u32 ufcon = rd_regl(port, S3C2410_UFCON);
while (!s3c24xx_serial_console_txrdy(port, ufcon))
cpu_relax();
@@ -2258,7 +2261,7 @@ static void
s3c24xx_serial_console_write(struct console *co, const char *s,
unsigned int count)
{
- unsigned int ucon = rd_regl(cons_uart, S3C2410_UCON);
+ u32 ucon = rd_regl(cons_uart, S3C2410_UCON);
unsigned long flags;
bool locked = true;
@@ -2285,12 +2288,10 @@ s3c24xx_serial_get_options(struct uart_port *port, int *baud,
int *parity, int *bits)
{
struct clk *clk;
- unsigned int ulcon;
- unsigned int ucon;
- unsigned int ubrdiv;
unsigned long rate;
- unsigned int clk_sel;
+ u32 ulcon, ucon, ubrdiv;
char clk_name[MAX_CLK_NAME_LENGTH];
+ u8 clk_sel;
ulcon = rd_regl(port, S3C2410_ULCON);
ucon = rd_regl(port, S3C2410_UCON);
@@ -2399,8 +2400,9 @@ static const struct s3c24xx_serial_drv_data s3c6400_serial_drv_data = {
.name = "Samsung S3C6400 UART",
.type = TYPE_S3C6400,
.port_type = PORT_S3C6400,
+ .iotype = UPIO_MEM,
.fifosize = 64,
- .has_divslot = 1,
+ .has_divslot = true,
.rx_fifomask = S3C2440_UFSTAT_RXMASK,
.rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
.rx_fifofull = S3C2440_UFSTAT_RXFULL,
@@ -2428,7 +2430,8 @@ static const struct s3c24xx_serial_drv_data s5pv210_serial_drv_data = {
.name = "Samsung S5PV210 UART",
.type = TYPE_S3C6400,
.port_type = PORT_S3C6400,
- .has_divslot = 1,
+ .iotype = UPIO_MEM,
+ .has_divslot = true,
.rx_fifomask = S5PV210_UFSTAT_RXMASK,
.rx_fifoshift = S5PV210_UFSTAT_RXSHIFT,
.rx_fifofull = S5PV210_UFSTAT_RXFULL,
@@ -2452,12 +2455,13 @@ static const struct s3c24xx_serial_drv_data s5pv210_serial_drv_data = {
#endif
#if defined(CONFIG_ARCH_EXYNOS)
-#define EXYNOS_COMMON_SERIAL_DRV_DATA() \
+#define EXYNOS_COMMON_SERIAL_DRV_DATA \
.info = { \
.name = "Samsung Exynos UART", \
.type = TYPE_S3C6400, \
.port_type = PORT_S3C6400, \
- .has_divslot = 1, \
+ .iotype = UPIO_MEM, \
+ .has_divslot = true, \
.rx_fifomask = S5PV210_UFSTAT_RXMASK, \
.rx_fifoshift = S5PV210_UFSTAT_RXSHIFT, \
.rx_fifofull = S5PV210_UFSTAT_RXFULL, \
@@ -2476,39 +2480,57 @@ static const struct s3c24xx_serial_drv_data s5pv210_serial_drv_data = {
} \
static const struct s3c24xx_serial_drv_data exynos4210_serial_drv_data = {
- EXYNOS_COMMON_SERIAL_DRV_DATA(),
+ EXYNOS_COMMON_SERIAL_DRV_DATA,
.fifosize = { 256, 64, 16, 16 },
};
static const struct s3c24xx_serial_drv_data exynos5433_serial_drv_data = {
- EXYNOS_COMMON_SERIAL_DRV_DATA(),
+ EXYNOS_COMMON_SERIAL_DRV_DATA,
.fifosize = { 64, 256, 16, 256 },
};
static const struct s3c24xx_serial_drv_data exynos850_serial_drv_data = {
- EXYNOS_COMMON_SERIAL_DRV_DATA(),
+ EXYNOS_COMMON_SERIAL_DRV_DATA,
.fifosize = { 256, 64, 64, 64 },
};
-/*
- * Common drv_data struct for platforms that specify samsung,uart-fifosize in
- * device tree.
- */
-static const struct s3c24xx_serial_drv_data exynos_fifoszdt_serial_drv_data = {
- EXYNOS_COMMON_SERIAL_DRV_DATA(),
+static const struct s3c24xx_serial_drv_data gs101_serial_drv_data = {
+ .info = {
+ .name = "Google GS101 UART",
+ .type = TYPE_S3C6400,
+ .port_type = PORT_S3C6400,
+ .iotype = UPIO_MEM32,
+ .has_divslot = true,
+ .rx_fifomask = S5PV210_UFSTAT_RXMASK,
+ .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT,
+ .rx_fifofull = S5PV210_UFSTAT_RXFULL,
+ .tx_fifofull = S5PV210_UFSTAT_TXFULL,
+ .tx_fifomask = S5PV210_UFSTAT_TXMASK,
+ .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT,
+ .def_clk_sel = S3C2410_UCON_CLKSEL0,
+ .num_clks = 1,
+ .clksel_mask = 0,
+ .clksel_shift = 0,
+ },
+ .def_cfg = {
+ .ucon = S5PV210_UCON_DEFAULT,
+ .ufcon = S5PV210_UFCON_DEFAULT,
+ .has_fracval = 1,
+ },
+ /* samsung,uart-fifosize must be specified in the device tree. */
.fifosize = { 0 },
};
#define EXYNOS4210_SERIAL_DRV_DATA (&exynos4210_serial_drv_data)
#define EXYNOS5433_SERIAL_DRV_DATA (&exynos5433_serial_drv_data)
#define EXYNOS850_SERIAL_DRV_DATA (&exynos850_serial_drv_data)
-#define EXYNOS_FIFOSZDT_DRV_DATA (&exynos_fifoszdt_serial_drv_data)
+#define GS101_SERIAL_DRV_DATA (&gs101_serial_drv_data)
#else
#define EXYNOS4210_SERIAL_DRV_DATA NULL
#define EXYNOS5433_SERIAL_DRV_DATA NULL
#define EXYNOS850_SERIAL_DRV_DATA NULL
-#define EXYNOS_FIFOSZDT_DRV_DATA NULL
+#define GS101_SERIAL_DRV_DATA NULL
#endif
#ifdef CONFIG_ARCH_APPLE
@@ -2517,6 +2539,7 @@ static const struct s3c24xx_serial_drv_data s5l_serial_drv_data = {
.name = "Apple S5L UART",
.type = TYPE_APPLE_S5L,
.port_type = PORT_8250,
+ .iotype = UPIO_MEM,
.fifosize = 16,
.rx_fifomask = S3C2410_UFSTAT_RXMASK,
.rx_fifoshift = S3C2410_UFSTAT_RXSHIFT,
@@ -2546,8 +2569,9 @@ static const struct s3c24xx_serial_drv_data artpec8_serial_drv_data = {
.name = "Axis ARTPEC-8 UART",
.type = TYPE_S3C6400,
.port_type = PORT_S3C6400,
+ .iotype = UPIO_MEM,
.fifosize = 64,
- .has_divslot = 1,
+ .has_divslot = true,
.rx_fifomask = S5PV210_UFSTAT_RXMASK,
.rx_fifoshift = S5PV210_UFSTAT_RXSHIFT,
.rx_fifofull = S5PV210_UFSTAT_RXFULL,
@@ -2594,7 +2618,7 @@ static const struct platform_device_id s3c24xx_serial_driver_ids[] = {
.driver_data = (kernel_ulong_t)ARTPEC8_SERIAL_DRV_DATA,
}, {
.name = "gs101-uart",
- .driver_data = (kernel_ulong_t)EXYNOS_FIFOSZDT_DRV_DATA,
+ .driver_data = (kernel_ulong_t)GS101_SERIAL_DRV_DATA,
},
{ },
};
@@ -2617,7 +2641,7 @@ static const struct of_device_id s3c24xx_uart_dt_match[] = {
{ .compatible = "axis,artpec8-uart",
.data = ARTPEC8_SERIAL_DRV_DATA },
{ .compatible = "google,gs101-uart",
- .data = EXYNOS_FIFOSZDT_DRV_DATA },
+ .data = GS101_SERIAL_DRV_DATA },
{},
};
MODULE_DEVICE_TABLE(of, s3c24xx_uart_dt_match);
@@ -2716,7 +2740,8 @@ static int samsung_early_read(struct console *con, char *s, unsigned int n)
{
struct earlycon_device *dev = con->data;
const struct samsung_early_console_data *data = dev->port.private_data;
- int ch, ufstat, num_read = 0;
+ int num_read = 0;
+ u32 ch, ufstat;
while (num_read < n) {
ufstat = rd_regl(&dev->port, S3C2410_UFSTAT);
@@ -2785,6 +2810,17 @@ OF_EARLYCON_DECLARE(exynos4210, "samsung,exynos4210-uart",
OF_EARLYCON_DECLARE(artpec8, "axis,artpec8-uart",
s5pv210_early_console_setup);
+static int __init gs101_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ /* gs101 always expects MMIO32 register accesses. */
+ device->port.iotype = UPIO_MEM32;
+
+ return s5pv210_early_console_setup(device, opt);
+}
+
+OF_EARLYCON_DECLARE(gs101, "google,gs101-uart", gs101_early_console_setup);
+
/* Apple S5L */
static int __init apple_s5l_early_console_setup(struct earlycon_device *device,
const char *opt)
diff --git a/drivers/tty/serial/serial_base.h b/drivers/tty/serial/serial_base.h
index c74c548f0db62..b6c38d2edfd40 100644
--- a/drivers/tty/serial/serial_base.h
+++ b/drivers/tty/serial/serial_base.h
@@ -22,6 +22,7 @@ struct serial_ctrl_device {
struct serial_port_device {
struct device dev;
struct uart_port *port;
+ unsigned int tx_enabled:1;
};
int serial_base_ctrl_init(void);
@@ -30,6 +31,9 @@ void serial_base_ctrl_exit(void);
int serial_base_port_init(void);
void serial_base_port_exit(void);
+void serial_base_port_startup(struct uart_port *port);
+void serial_base_port_shutdown(struct uart_port *port);
+
int serial_base_driver_register(struct device_driver *driver);
void serial_base_driver_unregister(struct device_driver *driver);
diff --git a/drivers/tty/serial/serial_base_bus.c b/drivers/tty/serial/serial_base_bus.c
index 3dfcf20c4eb68..4df2a4b10445a 100644
--- a/drivers/tty/serial/serial_base_bus.c
+++ b/drivers/tty/serial/serial_base_bus.c
@@ -41,7 +41,7 @@ static int serial_base_match(struct device *dev, struct device_driver *drv)
return 0;
}
-static struct bus_type serial_base_bus_type = {
+static const struct bus_type serial_base_bus_type = {
.name = "serial-base",
.match = serial_base_match,
};
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index d6a58a9e072a1..c476d884356db 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -156,7 +156,7 @@ static void __uart_start(struct uart_state *state)
* enabled, serial_port_runtime_resume() calls start_tx() again
* after enabling the device.
*/
- if (pm_runtime_active(&port_dev->dev))
+ if (!pm_runtime_enabled(port->dev) || pm_runtime_active(&port_dev->dev))
port->ops->start_tx(port);
pm_runtime_mark_last_busy(&port_dev->dev);
pm_runtime_put_autosuspend(&port_dev->dev);
@@ -323,16 +323,26 @@ static int uart_startup(struct tty_struct *tty, struct uart_state *state,
bool init_hw)
{
struct tty_port *port = &state->port;
+ struct uart_port *uport;
int retval;
if (tty_port_initialized(port))
- return 0;
+ goto out_base_port_startup;
retval = uart_port_startup(tty, state, init_hw);
- if (retval)
+ if (retval) {
set_bit(TTY_IO_ERROR, &tty->flags);
+ return retval;
+ }
- return retval;
+out_base_port_startup:
+ uport = uart_port_check(state);
+ if (!uport)
+ return -EIO;
+
+ serial_base_port_startup(uport);
+
+ return 0;
}
/*
@@ -355,6 +365,9 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
if (tty)
set_bit(TTY_IO_ERROR, &tty->flags);
+ if (uport)
+ serial_base_port_shutdown(uport);
+
if (tty_port_initialized(port)) {
tty_port_set_initialized(port, false);
@@ -1775,6 +1788,7 @@ static void uart_tty_port_shutdown(struct tty_port *port)
uport->ops->stop_rx(uport);
uart_port_unlock_irq(uport);
+ serial_base_port_shutdown(uport);
uart_port_shutdown(port);
/*
@@ -1788,6 +1802,7 @@ static void uart_tty_port_shutdown(struct tty_port *port)
* Free the transmit buffer.
*/
uart_port_lock_irq(uport);
+ uart_circ_clear(&state->xmit);
buf = state->xmit.buf;
state->xmit.buf = NULL;
uart_port_unlock_irq(uport);
@@ -2608,7 +2623,12 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
port->type = PORT_UNKNOWN;
flags |= UART_CONFIG_TYPE;
}
+ /* Synchronize with possible boot console. */
+ if (uart_console(port))
+ console_lock();
port->ops->config_port(port, flags);
+ if (uart_console(port))
+ console_unlock();
}
if (port->type != PORT_UNKNOWN) {
@@ -2616,6 +2636,10 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
uart_report_port(drv, port);
+ /* Synchronize with possible boot console. */
+ if (uart_console(port))
+ console_lock();
+
/* Power up port for set_mctrl() */
uart_change_pm(state, UART_PM_STATE_ON);
@@ -2632,6 +2656,9 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
uart_rs485_config(port);
+ if (uart_console(port))
+ console_unlock();
+
/*
* If this driver supports console, and it hasn't been
* successfully registered yet, try to re-register it.
diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c
index 72b6f4f326e2b..7e3a1c7b097c3 100644
--- a/drivers/tty/serial/serial_port.c
+++ b/drivers/tty/serial/serial_port.c
@@ -8,7 +8,10 @@
#include <linux/device.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/serial_core.h>
#include <linux/spinlock.h>
@@ -36,8 +39,12 @@ static int serial_port_runtime_resume(struct device *dev)
/* Flush any pending TX for the port */
uart_port_lock_irqsave(port, &flags);
+ if (!port_dev->tx_enabled)
+ goto unlock;
if (__serial_port_busy(port))
port->ops->start_tx(port);
+
+unlock:
uart_port_unlock_irqrestore(port, flags);
out:
@@ -57,6 +64,11 @@ static int serial_port_runtime_suspend(struct device *dev)
return 0;
uart_port_lock_irqsave(port, &flags);
+ if (!port_dev->tx_enabled) {
+ uart_port_unlock_irqrestore(port, flags);
+ return 0;
+ }
+
busy = __serial_port_busy(port);
if (busy)
port->ops->start_tx(port);
@@ -68,6 +80,31 @@ static int serial_port_runtime_suspend(struct device *dev)
return busy ? -EBUSY : 0;
}
+static void serial_base_port_set_tx(struct uart_port *port,
+ struct serial_port_device *port_dev,
+ bool enabled)
+{
+ unsigned long flags;
+
+ uart_port_lock_irqsave(port, &flags);
+ port_dev->tx_enabled = enabled;
+ uart_port_unlock_irqrestore(port, flags);
+}
+
+void serial_base_port_startup(struct uart_port *port)
+{
+ struct serial_port_device *port_dev = port->port_dev;
+
+ serial_base_port_set_tx(port, port_dev, true);
+}
+
+void serial_base_port_shutdown(struct uart_port *port)
+{
+ struct serial_port_device *port_dev = port->port_dev;
+
+ serial_base_port_set_tx(port, port_dev, false);
+}
+
static DEFINE_RUNTIME_DEV_PM_OPS(serial_port_pm,
serial_port_runtime_suspend,
serial_port_runtime_resume, NULL);
@@ -105,6 +142,148 @@ void uart_remove_one_port(struct uart_driver *drv, struct uart_port *port)
}
EXPORT_SYMBOL(uart_remove_one_port);
+/**
+ * __uart_read_properties - read firmware properties of the given UART port
+ * @port: corresponding port
+ * @use_defaults: apply defaults (when %true) or validate the values (when %false)
+ *
+ * The following device properties are supported:
+ * - clock-frequency (optional)
+ * - fifo-size (optional)
+ * - no-loopback-test (optional)
+ * - reg-shift (defaults may apply)
+ * - reg-offset (value may be validated)
+ * - reg-io-width (defaults may apply or value may be validated)
+ * - interrupts (OF only)
+ * - serial [alias ID] (OF only)
+ *
+ * If the port->dev is of struct platform_device type the interrupt line
+ * will be retrieved via platform_get_irq() call against that device.
+ * Otherwise it will be assigned by fwnode_irq_get() call. In both cases
+ * the index 0 of the resource is used.
+ *
+ * The caller is responsible to initialize the following fields of the @port
+ * ->dev (must be valid)
+ * ->flags
+ * ->mapbase
+ * ->mapsize
+ * ->regshift (if @use_defaults is false)
+ * before calling this function. Alternatively the above mentioned fields
+ * may be zeroed, in such case the only ones, that have associated properties
+ * found, will be set to the respective values.
+ *
+ * If no error happened, the ->irq, ->mapbase, ->mapsize will be altered.
+ * The ->iotype is always altered.
+ *
+ * When @use_defaults is true and the respective property is not found
+ * the following values will be applied:
+ * ->regshift = 0
+ * In this case IRQ must be provided, otherwise an error will be returned.
+ *
+ * When @use_defaults is false and the respective property is found
+ * the following values will be validated:
+ * - reg-io-width (->iotype)
+ * - reg-offset (->mapsize against ->mapbase)
+ *
+ * Returns: 0 on success or negative errno on failure
+ */
+static int __uart_read_properties(struct uart_port *port, bool use_defaults)
+{
+ struct device *dev = port->dev;
+ u32 value;
+ int ret;
+
+ /* Read optional UART functional clock frequency */
+ device_property_read_u32(dev, "clock-frequency", &port->uartclk);
+
+ /* Read the registers alignment (default: 8-bit) */
+ ret = device_property_read_u32(dev, "reg-shift", &value);
+ if (ret)
+ port->regshift = use_defaults ? 0 : port->regshift;
+ else
+ port->regshift = value;
+
+ /* Read the registers I/O access type (default: MMIO 8-bit) */
+ ret = device_property_read_u32(dev, "reg-io-width", &value);
+ if (ret) {
+ port->iotype = UPIO_MEM;
+ } else {
+ switch (value) {
+ case 1:
+ port->iotype = UPIO_MEM;
+ break;
+ case 2:
+ port->iotype = UPIO_MEM16;
+ break;
+ case 4:
+ port->iotype = device_is_big_endian(dev) ? UPIO_MEM32BE : UPIO_MEM32;
+ break;
+ default:
+ if (!use_defaults) {
+ dev_err(dev, "Unsupported reg-io-width (%u)\n", value);
+ return -EINVAL;
+ }
+ port->iotype = UPIO_UNKNOWN;
+ break;
+ }
+ }
+
+ /* Read the address mapping base offset (default: no offset) */
+ ret = device_property_read_u32(dev, "reg-offset", &value);
+ if (ret)
+ value = 0;
+
+ /* Check for shifted address mapping overflow */
+ if (!use_defaults && port->mapsize < value) {
+ dev_err(dev, "reg-offset %u exceeds region size %pa\n", value, &port->mapsize);
+ return -EINVAL;
+ }
+
+ port->mapbase += value;
+ port->mapsize -= value;
+
+ /* Read optional FIFO size */
+ device_property_read_u32(dev, "fifo-size", &port->fifosize);
+
+ if (device_property_read_bool(dev, "no-loopback-test"))
+ port->flags |= UPF_SKIP_TEST;
+
+ /* Get index of serial line, if found in DT aliases */
+ ret = of_alias_get_id(dev_of_node(dev), "serial");
+ if (ret >= 0)
+ port->line = ret;
+
+ if (dev_is_platform(dev))
+ ret = platform_get_irq(to_platform_device(dev), 0);
+ else
+ ret = fwnode_irq_get(dev_fwnode(dev), 0);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ if (ret > 0)
+ port->irq = ret;
+ else if (use_defaults)
+ /* By default IRQ support is mandatory */
+ return ret;
+ else
+ port->irq = 0;
+
+ port->flags |= UPF_SHARE_IRQ;
+
+ return 0;
+}
+
+int uart_read_port_properties(struct uart_port *port)
+{
+ return __uart_read_properties(port, true);
+}
+EXPORT_SYMBOL_GPL(uart_read_port_properties);
+
+int uart_read_and_validate_port_properties(struct uart_port *port)
+{
+ return __uart_read_properties(port, false);
+}
+EXPORT_SYMBOL_GPL(uart_read_and_validate_port_properties);
+
static struct device_driver serial_port_driver = {
.name = "port",
.suppress_bind_attrs = true,
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index e1897894a4ef5..abba397229581 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -23,9 +23,10 @@
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
-
#include <linux/io.h>
+#include <asm/txx9/generic.h>
+
#define PASS_LIMIT 256
#if !defined(CONFIG_SERIAL_TXX9_STDSERIAL)
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index a85e7b9a2e492..e512eaa57ed56 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -576,13 +576,13 @@ static void sci_start_tx(struct uart_port *port)
#ifdef CONFIG_SERIAL_SH_SCI_DMA
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
- u16 new, scr = serial_port_in(port, SCSCR);
+ u16 new, scr = sci_serial_in(port, SCSCR);
if (s->chan_tx)
new = scr | SCSCR_TDRQE;
else
new = scr & ~SCSCR_TDRQE;
if (new != scr)
- serial_port_out(port, SCSCR, new);
+ sci_serial_out(port, SCSCR, new);
}
if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
@@ -599,7 +599,7 @@ static void sci_start_tx(struct uart_port *port)
if (!s->chan_tx || s->cfg->regtype == SCIx_RZ_SCIFA_REGTYPE ||
port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
/* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
- ctrl = serial_port_in(port, SCSCR);
+ ctrl = sci_serial_in(port, SCSCR);
/*
* For SCI, TE (transmit enable) must be set after setting TIE
@@ -609,7 +609,7 @@ static void sci_start_tx(struct uart_port *port)
if (port->type == PORT_SCI)
ctrl |= SCSCR_TE;
- serial_port_out(port, SCSCR, ctrl | SCSCR_TIE);
+ sci_serial_out(port, SCSCR, ctrl | SCSCR_TIE);
}
}
@@ -618,14 +618,14 @@ static void sci_stop_tx(struct uart_port *port)
unsigned short ctrl;
/* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
- ctrl = serial_port_in(port, SCSCR);
+ ctrl = sci_serial_in(port, SCSCR);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
ctrl &= ~SCSCR_TDRQE;
ctrl &= ~SCSCR_TIE;
- serial_port_out(port, SCSCR, ctrl);
+ sci_serial_out(port, SCSCR, ctrl);
#ifdef CONFIG_SERIAL_SH_SCI_DMA
if (to_sci_port(port)->chan_tx &&
@@ -640,41 +640,40 @@ static void sci_start_rx(struct uart_port *port)
{
unsigned short ctrl;
- ctrl = serial_port_in(port, SCSCR) | port_rx_irq_mask(port);
+ ctrl = sci_serial_in(port, SCSCR) | port_rx_irq_mask(port);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
ctrl &= ~SCSCR_RDRQE;
- serial_port_out(port, SCSCR, ctrl);
+ sci_serial_out(port, SCSCR, ctrl);
}
static void sci_stop_rx(struct uart_port *port)
{
unsigned short ctrl;
- ctrl = serial_port_in(port, SCSCR);
+ ctrl = sci_serial_in(port, SCSCR);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
ctrl &= ~SCSCR_RDRQE;
ctrl &= ~port_rx_irq_mask(port);
- serial_port_out(port, SCSCR, ctrl);
+ sci_serial_out(port, SCSCR, ctrl);
}
static void sci_clear_SCxSR(struct uart_port *port, unsigned int mask)
{
if (port->type == PORT_SCI) {
/* Just store the mask */
- serial_port_out(port, SCxSR, mask);
+ sci_serial_out(port, SCxSR, mask);
} else if (to_sci_port(port)->params->overrun_mask == SCIFA_ORER) {
/* SCIFA/SCIFB and SCIF on SH7705/SH7720/SH7721 */
/* Only clear the status bits we want to clear */
- serial_port_out(port, SCxSR,
- serial_port_in(port, SCxSR) & mask);
+ sci_serial_out(port, SCxSR, sci_serial_in(port, SCxSR) & mask);
} else {
/* Store the mask, clear parity/framing errors */
- serial_port_out(port, SCxSR, mask & ~(SCIF_FERC | SCIF_PERC));
+ sci_serial_out(port, SCxSR, mask & ~(SCIF_FERC | SCIF_PERC));
}
}
@@ -688,7 +687,7 @@ static int sci_poll_get_char(struct uart_port *port)
int c;
do {
- status = serial_port_in(port, SCxSR);
+ status = sci_serial_in(port, SCxSR);
if (status & SCxSR_ERRORS(port)) {
sci_clear_SCxSR(port, SCxSR_ERROR_CLEAR(port));
continue;
@@ -699,10 +698,10 @@ static int sci_poll_get_char(struct uart_port *port)
if (!(status & SCxSR_RDxF(port)))
return NO_POLL_CHAR;
- c = serial_port_in(port, SCxRDR);
+ c = sci_serial_in(port, SCxRDR);
/* Dummy read */
- serial_port_in(port, SCxSR);
+ sci_serial_in(port, SCxSR);
sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
return c;
@@ -714,10 +713,10 @@ static void sci_poll_put_char(struct uart_port *port, unsigned char c)
unsigned short status;
do {
- status = serial_port_in(port, SCxSR);
+ status = sci_serial_in(port, SCxSR);
} while (!(status & SCxSR_TDxE(port)));
- serial_port_out(port, SCxTDR, c);
+ sci_serial_out(port, SCxTDR, c);
sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
}
#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE ||
@@ -736,8 +735,8 @@ static void sci_init_pins(struct uart_port *port, unsigned int cflag)
}
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
- u16 data = serial_port_in(port, SCPDR);
- u16 ctrl = serial_port_in(port, SCPCR);
+ u16 data = sci_serial_in(port, SCPDR);
+ u16 ctrl = sci_serial_in(port, SCPCR);
/* Enable RXD and TXD pin functions */
ctrl &= ~(SCPCR_RXDC | SCPCR_TXDC);
@@ -756,10 +755,10 @@ static void sci_init_pins(struct uart_port *port, unsigned int cflag)
/* Enable CTS# pin function */
ctrl &= ~SCPCR_CTSC;
}
- serial_port_out(port, SCPDR, data);
- serial_port_out(port, SCPCR, ctrl);
+ sci_serial_out(port, SCPDR, data);
+ sci_serial_out(port, SCPCR, ctrl);
} else if (sci_getreg(port, SCSPTR)->size) {
- u16 status = serial_port_in(port, SCSPTR);
+ u16 status = sci_serial_in(port, SCSPTR);
/* RTS# is always output; and active low, unless autorts */
status |= SCSPTR_RTSIO;
@@ -769,7 +768,7 @@ static void sci_init_pins(struct uart_port *port, unsigned int cflag)
status &= ~SCSPTR_RTSDT;
/* CTS# and SCK are inputs */
status &= ~(SCSPTR_CTSIO | SCSPTR_SCKIO);
- serial_port_out(port, SCSPTR, status);
+ sci_serial_out(port, SCSPTR, status);
}
}
@@ -781,13 +780,13 @@ static int sci_txfill(struct uart_port *port)
reg = sci_getreg(port, SCTFDR);
if (reg->size)
- return serial_port_in(port, SCTFDR) & fifo_mask;
+ return sci_serial_in(port, SCTFDR) & fifo_mask;
reg = sci_getreg(port, SCFDR);
if (reg->size)
- return serial_port_in(port, SCFDR) >> 8;
+ return sci_serial_in(port, SCFDR) >> 8;
- return !(serial_port_in(port, SCxSR) & SCI_TDRE);
+ return !(sci_serial_in(port, SCxSR) & SCI_TDRE);
}
static int sci_txroom(struct uart_port *port)
@@ -803,13 +802,13 @@ static int sci_rxfill(struct uart_port *port)
reg = sci_getreg(port, SCRFDR);
if (reg->size)
- return serial_port_in(port, SCRFDR) & fifo_mask;
+ return sci_serial_in(port, SCRFDR) & fifo_mask;
reg = sci_getreg(port, SCFDR);
if (reg->size)
- return serial_port_in(port, SCFDR) & fifo_mask;
+ return sci_serial_in(port, SCFDR) & fifo_mask;
- return (serial_port_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
+ return (sci_serial_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
}
/* ********************************************************************** *
@@ -824,14 +823,14 @@ static void sci_transmit_chars(struct uart_port *port)
unsigned short ctrl;
int count;
- status = serial_port_in(port, SCxSR);
+ status = sci_serial_in(port, SCxSR);
if (!(status & SCxSR_TDxE(port))) {
- ctrl = serial_port_in(port, SCSCR);
+ ctrl = sci_serial_in(port, SCSCR);
if (uart_circ_empty(xmit))
ctrl &= ~SCSCR_TIE;
else
ctrl |= SCSCR_TIE;
- serial_port_out(port, SCSCR, ctrl);
+ sci_serial_out(port, SCSCR, ctrl);
return;
}
@@ -847,15 +846,15 @@ static void sci_transmit_chars(struct uart_port *port)
c = xmit->buf[xmit->tail];
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
} else if (port->type == PORT_SCI && uart_circ_empty(xmit)) {
- ctrl = serial_port_in(port, SCSCR);
+ ctrl = sci_serial_in(port, SCSCR);
ctrl &= ~SCSCR_TE;
- serial_port_out(port, SCSCR, ctrl);
+ sci_serial_out(port, SCSCR, ctrl);
return;
} else {
break;
}
- serial_port_out(port, SCxTDR, c);
+ sci_serial_out(port, SCxTDR, c);
port->icount.tx++;
} while (--count > 0);
@@ -866,10 +865,10 @@ static void sci_transmit_chars(struct uart_port *port)
uart_write_wakeup(port);
if (uart_circ_empty(xmit)) {
if (port->type == PORT_SCI) {
- ctrl = serial_port_in(port, SCSCR);
+ ctrl = sci_serial_in(port, SCSCR);
ctrl &= ~SCSCR_TIE;
ctrl |= SCSCR_TEIE;
- serial_port_out(port, SCSCR, ctrl);
+ sci_serial_out(port, SCSCR, ctrl);
}
sci_stop_tx(port);
@@ -883,7 +882,7 @@ static void sci_receive_chars(struct uart_port *port)
unsigned short status;
unsigned char flag;
- status = serial_port_in(port, SCxSR);
+ status = sci_serial_in(port, SCxSR);
if (!(status & SCxSR_RDxF(port)))
return;
@@ -896,7 +895,7 @@ static void sci_receive_chars(struct uart_port *port)
break;
if (port->type == PORT_SCI) {
- char c = serial_port_in(port, SCxRDR);
+ char c = sci_serial_in(port, SCxRDR);
if (uart_handle_sysrq_char(port, c))
count = 0;
else
@@ -907,11 +906,11 @@ static void sci_receive_chars(struct uart_port *port)
if (port->type == PORT_SCIF ||
port->type == PORT_HSCIF) {
- status = serial_port_in(port, SCxSR);
- c = serial_port_in(port, SCxRDR);
+ status = sci_serial_in(port, SCxSR);
+ c = sci_serial_in(port, SCxRDR);
} else {
- c = serial_port_in(port, SCxRDR);
- status = serial_port_in(port, SCxSR);
+ c = sci_serial_in(port, SCxRDR);
+ status = sci_serial_in(port, SCxSR);
}
if (uart_handle_sysrq_char(port, c)) {
count--; i--;
@@ -932,7 +931,7 @@ static void sci_receive_chars(struct uart_port *port)
}
}
- serial_port_in(port, SCxSR); /* dummy read */
+ sci_serial_in(port, SCxSR); /* dummy read */
sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
copied += count;
@@ -944,8 +943,8 @@ static void sci_receive_chars(struct uart_port *port)
tty_flip_buffer_push(tport);
} else {
/* TTY buffers full; read from RX reg to prevent lockup */
- serial_port_in(port, SCxRDR);
- serial_port_in(port, SCxSR); /* dummy read */
+ sci_serial_in(port, SCxRDR);
+ sci_serial_in(port, SCxSR); /* dummy read */
sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
}
}
@@ -953,7 +952,7 @@ static void sci_receive_chars(struct uart_port *port)
static int sci_handle_errors(struct uart_port *port)
{
int copied = 0;
- unsigned short status = serial_port_in(port, SCxSR);
+ unsigned short status = sci_serial_in(port, SCxSR);
struct tty_port *tport = &port->state->port;
struct sci_port *s = to_sci_port(port);
@@ -1000,10 +999,10 @@ static int sci_handle_fifo_overrun(struct uart_port *port)
if (!reg->size)
return 0;
- status = serial_port_in(port, s->params->overrun_reg);
+ status = sci_serial_in(port, s->params->overrun_reg);
if (status & s->params->overrun_mask) {
status &= ~s->params->overrun_mask;
- serial_port_out(port, s->params->overrun_reg, status);
+ sci_serial_out(port, s->params->overrun_reg, status);
port->icount.overrun++;
@@ -1018,7 +1017,7 @@ static int sci_handle_fifo_overrun(struct uart_port *port)
static int sci_handle_breaks(struct uart_port *port)
{
int copied = 0;
- unsigned short status = serial_port_in(port, SCxSR);
+ unsigned short status = sci_serial_in(port, SCxSR);
struct tty_port *tport = &port->state->port;
if (uart_handle_break(port))
@@ -1051,7 +1050,7 @@ static int scif_set_rtrg(struct uart_port *port, int rx_trig)
/* HSCIF can be set to an arbitrary level. */
if (sci_getreg(port, HSRTRGR)->size) {
- serial_port_out(port, HSRTRGR, rx_trig);
+ sci_serial_out(port, HSRTRGR, rx_trig);
return rx_trig;
}
@@ -1092,9 +1091,9 @@ static int scif_set_rtrg(struct uart_port *port, int rx_trig)
return 1;
}
- serial_port_out(port, SCFCR,
- (serial_port_in(port, SCFCR) &
- ~(SCFCR_RTRG1 | SCFCR_RTRG0)) | bits);
+ sci_serial_out(port, SCFCR,
+ (sci_serial_in(port, SCFCR) &
+ ~(SCFCR_RTRG1 | SCFCR_RTRG0)) | bits);
return rx_trig;
}
@@ -1102,9 +1101,9 @@ static int scif_set_rtrg(struct uart_port *port, int rx_trig)
static int scif_rtrg_enabled(struct uart_port *port)
{
if (sci_getreg(port, HSRTRGR)->size)
- return serial_port_in(port, HSRTRGR) != 0;
+ return sci_serial_in(port, HSRTRGR) != 0;
else
- return (serial_port_in(port, SCFCR) &
+ return (sci_serial_in(port, SCFCR) &
(SCFCR_RTRG0 | SCFCR_RTRG1)) != 0;
}
@@ -1219,8 +1218,8 @@ static void sci_dma_tx_complete(void *arg)
s->cookie_tx = -EINVAL;
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB ||
s->cfg->regtype == SCIx_RZ_SCIFA_REGTYPE) {
- u16 ctrl = serial_port_in(port, SCSCR);
- serial_port_out(port, SCSCR, ctrl & ~SCSCR_TIE);
+ u16 ctrl = sci_serial_in(port, SCSCR);
+ sci_serial_out(port, SCSCR, ctrl & ~SCSCR_TIE);
if (s->cfg->regtype == SCIx_RZ_SCIFA_REGTYPE) {
/* Switch irq from DMA to SCIF */
dmaengine_pause(s->chan_tx_saved);
@@ -1296,7 +1295,7 @@ static void sci_dma_rx_reenable_irq(struct sci_port *s)
u16 scr;
/* Direct new serial port interrupts back to CPU */
- scr = serial_port_in(port, SCSCR);
+ scr = sci_serial_in(port, SCSCR);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB ||
s->cfg->regtype == SCIx_RZ_SCIFA_REGTYPE) {
enable_irq(s->irqs[SCIx_RXI_IRQ]);
@@ -1305,7 +1304,7 @@ static void sci_dma_rx_reenable_irq(struct sci_port *s)
else
scr &= ~SCSCR_RDRQE;
}
- serial_port_out(port, SCSCR, scr | SCSCR_RIE);
+ sci_serial_out(port, SCSCR, scr | SCSCR_RIE);
}
static void sci_dma_rx_complete(void *arg)
@@ -1714,8 +1713,8 @@ static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
#ifdef CONFIG_SERIAL_SH_SCI_DMA
if (s->chan_rx) {
- u16 scr = serial_port_in(port, SCSCR);
- u16 ssr = serial_port_in(port, SCxSR);
+ u16 scr = sci_serial_in(port, SCSCR);
+ u16 ssr = sci_serial_in(port, SCxSR);
/* Disable future Rx interrupts */
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB ||
@@ -1733,10 +1732,10 @@ static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
scr &= ~SCSCR_RIE;
}
- serial_port_out(port, SCSCR, scr);
+ sci_serial_out(port, SCSCR, scr);
/* Clear current interrupt */
- serial_port_out(port, SCxSR,
- ssr & ~(SCIF_DR | SCxSR_RDxF(port)));
+ sci_serial_out(port, SCxSR,
+ ssr & ~(SCIF_DR | SCxSR_RDxF(port)));
dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u us\n",
jiffies, s->rx_timeout);
start_hrtimer_us(&s->rx_timer, s->rx_timeout);
@@ -1786,9 +1785,9 @@ static irqreturn_t sci_tx_end_interrupt(int irq, void *ptr)
return sci_tx_interrupt(irq, ptr);
uart_port_lock_irqsave(port, &flags);
- ctrl = serial_port_in(port, SCSCR);
+ ctrl = sci_serial_in(port, SCSCR);
ctrl &= ~(SCSCR_TE | SCSCR_TEIE);
- serial_port_out(port, SCSCR, ctrl);
+ sci_serial_out(port, SCSCR, ctrl);
uart_port_unlock_irqrestore(port, flags);
return IRQ_HANDLED;
@@ -1802,7 +1801,7 @@ static irqreturn_t sci_br_interrupt(int irq, void *ptr)
sci_handle_breaks(port);
/* drop invalid character received before break was detected */
- serial_port_in(port, SCxRDR);
+ sci_serial_in(port, SCxRDR);
sci_clear_SCxSR(port, SCxSR_BREAK_CLEAR(port));
@@ -1816,7 +1815,7 @@ static irqreturn_t sci_er_interrupt(int irq, void *ptr)
if (s->irqs[SCIx_ERI_IRQ] == s->irqs[SCIx_BRI_IRQ]) {
/* Break and Error interrupts are muxed */
- unsigned short ssr_status = serial_port_in(port, SCxSR);
+ unsigned short ssr_status = sci_serial_in(port, SCxSR);
/* Break Interrupt */
if (ssr_status & SCxSR_BRK(port))
@@ -1831,7 +1830,7 @@ static irqreturn_t sci_er_interrupt(int irq, void *ptr)
if (port->type == PORT_SCI) {
if (sci_handle_errors(port)) {
/* discard character in rx buffer */
- serial_port_in(port, SCxSR);
+ sci_serial_in(port, SCxSR);
sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
}
} else {
@@ -1856,12 +1855,12 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
struct sci_port *s = to_sci_port(port);
irqreturn_t ret = IRQ_NONE;
- ssr_status = serial_port_in(port, SCxSR);
- scr_status = serial_port_in(port, SCSCR);
+ ssr_status = sci_serial_in(port, SCxSR);
+ scr_status = sci_serial_in(port, SCSCR);
if (s->params->overrun_reg == SCxSR)
orer_status = ssr_status;
else if (sci_getreg(port, s->params->overrun_reg)->size)
- orer_status = serial_port_in(port, s->params->overrun_reg);
+ orer_status = sci_serial_in(port, s->params->overrun_reg);
err_enabled = scr_status & port_rx_irq_mask(port);
@@ -2038,7 +2037,7 @@ static void sci_free_irq(struct sci_port *port)
static unsigned int sci_tx_empty(struct uart_port *port)
{
- unsigned short status = serial_port_in(port, SCxSR);
+ unsigned short status = sci_serial_in(port, SCxSR);
unsigned short in_tx_fifo = sci_txfill(port);
return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
@@ -2047,27 +2046,27 @@ static unsigned int sci_tx_empty(struct uart_port *port)
static void sci_set_rts(struct uart_port *port, bool state)
{
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
- u16 data = serial_port_in(port, SCPDR);
+ u16 data = sci_serial_in(port, SCPDR);
/* Active low */
if (state)
data &= ~SCPDR_RTSD;
else
data |= SCPDR_RTSD;
- serial_port_out(port, SCPDR, data);
+ sci_serial_out(port, SCPDR, data);
/* RTS# is output */
- serial_port_out(port, SCPCR,
- serial_port_in(port, SCPCR) | SCPCR_RTSC);
+ sci_serial_out(port, SCPCR,
+ sci_serial_in(port, SCPCR) | SCPCR_RTSC);
} else if (sci_getreg(port, SCSPTR)->size) {
- u16 ctrl = serial_port_in(port, SCSPTR);
+ u16 ctrl = sci_serial_in(port, SCSPTR);
/* Active low */
if (state)
ctrl &= ~SCSPTR_RTSDT;
else
ctrl |= SCSPTR_RTSDT;
- serial_port_out(port, SCSPTR, ctrl);
+ sci_serial_out(port, SCSPTR, ctrl);
}
}
@@ -2075,10 +2074,10 @@ static bool sci_get_cts(struct uart_port *port)
{
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
/* Active low */
- return !(serial_port_in(port, SCPDR) & SCPDR_CTSD);
+ return !(sci_serial_in(port, SCPDR) & SCPDR_CTSD);
} else if (sci_getreg(port, SCSPTR)->size) {
/* Active low */
- return !(serial_port_in(port, SCSPTR) & SCSPTR_CTSDT);
+ return !(sci_serial_in(port, SCSPTR) & SCSPTR_CTSDT);
}
return true;
@@ -2108,9 +2107,8 @@ static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
*/
reg = sci_getreg(port, SCFCR);
if (reg->size)
- serial_port_out(port, SCFCR,
- serial_port_in(port, SCFCR) |
- SCFCR_LOOP);
+ sci_serial_out(port, SCFCR,
+ sci_serial_in(port, SCFCR) | SCFCR_LOOP);
}
mctrl_gpio_set(s->gpios, mctrl);
@@ -2120,21 +2118,21 @@ static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
if (!(mctrl & TIOCM_RTS)) {
/* Disable Auto RTS */
- serial_port_out(port, SCFCR,
- serial_port_in(port, SCFCR) & ~SCFCR_MCE);
+ sci_serial_out(port, SCFCR,
+ sci_serial_in(port, SCFCR) & ~SCFCR_MCE);
/* Clear RTS */
sci_set_rts(port, 0);
} else if (s->autorts) {
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
/* Enable RTS# pin function */
- serial_port_out(port, SCPCR,
- serial_port_in(port, SCPCR) & ~SCPCR_RTSC);
+ sci_serial_out(port, SCPCR,
+ sci_serial_in(port, SCPCR) & ~SCPCR_RTSC);
}
/* Enable Auto RTS */
- serial_port_out(port, SCFCR,
- serial_port_in(port, SCFCR) | SCFCR_MCE);
+ sci_serial_out(port, SCFCR,
+ sci_serial_in(port, SCFCR) | SCFCR_MCE);
} else {
/* Set RTS */
sci_set_rts(port, 1);
@@ -2187,8 +2185,8 @@ static void sci_break_ctl(struct uart_port *port, int break_state)
}
uart_port_lock_irqsave(port, &flags);
- scsptr = serial_port_in(port, SCSPTR);
- scscr = serial_port_in(port, SCSCR);
+ scsptr = sci_serial_in(port, SCSPTR);
+ scscr = sci_serial_in(port, SCSCR);
if (break_state == -1) {
scsptr = (scsptr | SCSPTR_SPB2IO) & ~SCSPTR_SPB2DT;
@@ -2198,8 +2196,8 @@ static void sci_break_ctl(struct uart_port *port, int break_state)
scscr |= SCSCR_TE;
}
- serial_port_out(port, SCSPTR, scsptr);
- serial_port_out(port, SCSCR, scscr);
+ sci_serial_out(port, SCSPTR, scsptr);
+ sci_serial_out(port, SCSCR, scscr);
uart_port_unlock_irqrestore(port, flags);
}
@@ -2239,9 +2237,9 @@ static void sci_shutdown(struct uart_port *port)
* Stop RX and TX, disable related interrupts, keep clock source
* and HSCIF TOT bits
*/
- scr = serial_port_in(port, SCSCR);
- serial_port_out(port, SCSCR, scr &
- (SCSCR_CKE1 | SCSCR_CKE0 | s->hscif_tot));
+ scr = sci_serial_in(port, SCSCR);
+ sci_serial_out(port, SCSCR,
+ scr & (SCSCR_CKE1 | SCSCR_CKE0 | s->hscif_tot));
uart_port_unlock_irqrestore(port, flags);
#ifdef CONFIG_SERIAL_SH_SCI_DMA
@@ -2390,19 +2388,19 @@ static void sci_reset(struct uart_port *port)
unsigned int status;
struct sci_port *s = to_sci_port(port);
- serial_port_out(port, SCSCR, s->hscif_tot); /* TE=0, RE=0, CKE1=0 */
+ sci_serial_out(port, SCSCR, s->hscif_tot); /* TE=0, RE=0, CKE1=0 */
reg = sci_getreg(port, SCFCR);
if (reg->size)
- serial_port_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
+ sci_serial_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
sci_clear_SCxSR(port,
SCxSR_RDxF_CLEAR(port) & SCxSR_ERROR_CLEAR(port) &
SCxSR_BREAK_CLEAR(port));
if (sci_getreg(port, SCLSR)->size) {
- status = serial_port_in(port, SCLSR);
+ status = sci_serial_in(port, SCLSR);
status &= ~(SCLSR_TO | SCLSR_ORER);
- serial_port_out(port, SCLSR, status);
+ sci_serial_out(port, SCLSR, status);
}
if (s->rx_trigger > 1) {
@@ -2540,8 +2538,8 @@ done:
* It controls the mux to select (H)SCK or frequency divided clock.
*/
if (best_clk >= 0 && sci_getreg(port, SCCKS)->size) {
- serial_port_out(port, SCDL, dl);
- serial_port_out(port, SCCKS, sccks);
+ sci_serial_out(port, SCDL, dl);
+ sci_serial_out(port, SCCKS, sccks);
}
uart_port_lock_irqsave(port, &flags);
@@ -2554,7 +2552,7 @@ done:
bits = tty_get_frame_size(termios->c_cflag);
if (sci_getreg(port, SEMR)->size)
- serial_port_out(port, SEMR, 0);
+ sci_serial_out(port, SEMR, 0);
if (best_clk >= 0) {
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
@@ -2569,9 +2567,9 @@ done:
case 27: smr_val |= SCSMR_SRC_27; break;
}
smr_val |= cks;
- serial_port_out(port, SCSCR, scr_val | s->hscif_tot);
- serial_port_out(port, SCSMR, smr_val);
- serial_port_out(port, SCBRR, brr);
+ sci_serial_out(port, SCSCR, scr_val | s->hscif_tot);
+ sci_serial_out(port, SCSMR, smr_val);
+ sci_serial_out(port, SCBRR, brr);
if (sci_getreg(port, HSSRR)->size) {
unsigned int hssrr = srr | HSCIF_SRE;
/* Calculate deviation from intended rate at the
@@ -2593,7 +2591,7 @@ done:
HSCIF_SRHP_MASK;
hssrr |= HSCIF_SRDE;
}
- serial_port_out(port, HSSRR, hssrr);
+ sci_serial_out(port, HSSRR, hssrr);
}
/* Wait one bit interval */
@@ -2601,10 +2599,10 @@ done:
} else {
/* Don't touch the bit rate configuration */
scr_val = s->cfg->scscr & (SCSCR_CKE1 | SCSCR_CKE0);
- smr_val |= serial_port_in(port, SCSMR) &
+ smr_val |= sci_serial_in(port, SCSMR) &
(SCSMR_CKEDG | SCSMR_SRC_MASK | SCSMR_CKS);
- serial_port_out(port, SCSCR, scr_val | s->hscif_tot);
- serial_port_out(port, SCSMR, smr_val);
+ sci_serial_out(port, SCSCR, scr_val | s->hscif_tot);
+ sci_serial_out(port, SCSMR, smr_val);
}
sci_init_pins(port, termios->c_cflag);
@@ -2613,7 +2611,7 @@ done:
s->autorts = false;
reg = sci_getreg(port, SCFCR);
if (reg->size) {
- unsigned short ctrl = serial_port_in(port, SCFCR);
+ unsigned short ctrl = sci_serial_in(port, SCFCR);
if ((port->flags & UPF_HARD_FLOW) &&
(termios->c_cflag & CRTSCTS)) {
@@ -2630,7 +2628,7 @@ done:
*/
ctrl &= ~(SCFCR_RFRST | SCFCR_TFRST);
- serial_port_out(port, SCFCR, ctrl);
+ sci_serial_out(port, SCFCR, ctrl);
}
if (port->flags & UPF_HARD_FLOW) {
/* Refresh (Auto) RTS */
@@ -2645,7 +2643,7 @@ done:
if (port->type != PORT_SCI)
scr_val |= SCSCR_TE;
scr_val |= SCSCR_RE | (s->cfg->scscr & ~(SCSCR_CKE1 | SCSCR_CKE0));
- serial_port_out(port, SCSCR, scr_val | s->hscif_tot);
+ sci_serial_out(port, SCSCR, scr_val | s->hscif_tot);
if ((srr + 1 == 5) &&
(port->type == PORT_SCIFA || port->type == PORT_SCIFB)) {
/*
@@ -3017,9 +3015,6 @@ static int sci_init_single(struct platform_device *dev,
port->irq = sci_port->irqs[SCIx_RXI_IRQ];
port->irqflags = 0;
- port->serial_in = sci_serial_in;
- port->serial_out = sci_serial_out;
-
return 0;
}
@@ -3056,21 +3051,21 @@ static void serial_console_write(struct console *co, const char *s,
uart_port_lock_irqsave(port, &flags);
/* first save SCSCR then disable interrupts, keep clock source */
- ctrl = serial_port_in(port, SCSCR);
+ ctrl = sci_serial_in(port, SCSCR);
ctrl_temp = SCSCR_RE | SCSCR_TE |
(sci_port->cfg->scscr & ~(SCSCR_CKE1 | SCSCR_CKE0)) |
(ctrl & (SCSCR_CKE1 | SCSCR_CKE0));
- serial_port_out(port, SCSCR, ctrl_temp | sci_port->hscif_tot);
+ sci_serial_out(port, SCSCR, ctrl_temp | sci_port->hscif_tot);
uart_console_write(port, s, count, serial_console_putchar);
/* wait until fifo is empty and last bit has been transmitted */
bits = SCxSR_TDxE(port) | SCxSR_TEND(port);
- while ((serial_port_in(port, SCxSR) & bits) != bits)
+ while ((sci_serial_in(port, SCxSR) & bits) != bits)
cpu_relax();
/* restore the SCSCR */
- serial_port_out(port, SCSCR, ctrl);
+ sci_serial_out(port, SCSCR, ctrl);
if (locked)
uart_port_unlock_irqrestore(port, flags);
@@ -3503,8 +3498,6 @@ static int __init early_console_setup(struct earlycon_device *device,
if (!device->port.membase)
return -ENODEV;
- device->port.serial_in = sci_serial_in;
- device->port.serial_out = sci_serial_out;
device->port.type = type;
memcpy(&sci_ports[0].port, &device->port, sizeof(struct uart_port));
port_cfg.type = type;
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
index a4cc569a78a25..0670fd9f84967 100644
--- a/drivers/tty/serial/sifive.c
+++ b/drivers/tty/serial/sifive.c
@@ -412,7 +412,8 @@ static void __ssp_receive_chars(struct sifive_serial_port *ssp)
break;
ssp->port.icount.rx++;
- uart_insert_char(&ssp->port, 0, 0, ch, TTY_NORMAL);
+ if (!uart_prepare_sysrq_char(&ssp->port, ch))
+ uart_insert_char(&ssp->port, 0, 0, ch, TTY_NORMAL);
}
tty_flip_buffer_push(&ssp->port.state->port);
@@ -534,7 +535,7 @@ static irqreturn_t sifive_serial_irq(int irq, void *dev_id)
if (ip & SIFIVE_SERIAL_IP_TXWM_MASK)
__ssp_transmit_chars(ssp);
- uart_port_unlock(&ssp->port);
+ uart_unlock_and_check_sysrq(&ssp->port);
return IRQ_HANDLED;
}
@@ -791,13 +792,10 @@ static void sifive_serial_console_write(struct console *co, const char *s,
if (!ssp)
return;
- local_irq_save(flags);
- if (ssp->port.sysrq)
- locked = 0;
- else if (oops_in_progress)
- locked = uart_port_trylock(&ssp->port);
+ if (oops_in_progress)
+ locked = uart_port_trylock_irqsave(&ssp->port, &flags);
else
- uart_port_lock(&ssp->port);
+ uart_port_lock_irqsave(&ssp->port, &flags);
ier = __ssp_readl(ssp, SIFIVE_SERIAL_IE_OFFS);
__ssp_writel(0, SIFIVE_SERIAL_IE_OFFS, ssp);
@@ -807,8 +805,7 @@ static void sifive_serial_console_write(struct console *co, const char *s,
__ssp_writel(ier, SIFIVE_SERIAL_IE_OFFS, ssp);
if (locked)
- uart_port_unlock(&ssp->port);
- local_irq_restore(flags);
+ uart_port_unlock_irqrestore(&ssp->port, flags);
}
static int sifive_serial_console_setup(struct console *co, char *options)
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index bbb5595d7e24c..a23e595518488 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -465,6 +465,7 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
const struct ktermios *old)
{
struct asc_port *ascport = to_asc_port(port);
+ bool manual_rts, toggle_rts = false;
struct gpio_desc *gpiod;
unsigned int baud;
u32 ctrl_val;
@@ -518,25 +519,13 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
/* If flow-control selected, stop handling RTS manually */
if (ascport->rts) {
- devm_gpiod_put(port->dev, ascport->rts);
- ascport->rts = NULL;
-
- pinctrl_select_state(ascport->pinctrl,
- ascport->states[DEFAULT]);
+ toggle_rts = true;
+ manual_rts = false;
}
} else {
/* If flow-control disabled, it's safe to handle RTS manually */
- if (!ascport->rts && ascport->states[NO_HW_FLOWCTRL]) {
- pinctrl_select_state(ascport->pinctrl,
- ascport->states[NO_HW_FLOWCTRL]);
-
- gpiod = devm_gpiod_get(port->dev, "rts", GPIOD_OUT_LOW);
- if (!IS_ERR(gpiod)) {
- gpiod_set_consumer_name(gpiod,
- port->dev->of_node->name);
- ascport->rts = gpiod;
- }
- }
+ if (!ascport->rts && ascport->states[NO_HW_FLOWCTRL])
+ manual_rts = toggle_rts = true;
}
if ((baud < 19200) && !ascport->force_m1) {
@@ -595,6 +584,25 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
asc_out(port, ASC_CTL, (ctrl_val | ASC_CTL_RUN));
uart_port_unlock_irqrestore(port, flags);
+
+ if (toggle_rts) {
+ if (manual_rts) {
+ pinctrl_select_state(ascport->pinctrl,
+ ascport->states[NO_HW_FLOWCTRL]);
+
+ gpiod = devm_gpiod_get(port->dev, "rts", GPIOD_OUT_LOW);
+ if (!IS_ERR(gpiod)) {
+ gpiod_set_consumer_name(gpiod,
+ port->dev->of_node->name);
+ ascport->rts = gpiod;
+ }
+ } else {
+ devm_gpiod_put(port->dev, ascport->rts);
+ ascport->rts = NULL;
+ pinctrl_select_state(ascport->pinctrl,
+ ascport->states[DEFAULT]);
+ }
+ }
}
static const char *asc_type(struct uart_port *port)
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 693e932d6feb5..4fa5a03ebac08 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -9,6 +9,7 @@
* Inspired by st-asc.c from STMicroelectronics (c)
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/delay.h>
@@ -39,60 +40,64 @@
/* Register offsets */
static struct stm32_usart_info __maybe_unused stm32f4_info = {
.ofs = {
- .isr = 0x00,
- .rdr = 0x04,
- .tdr = 0x04,
- .brr = 0x08,
- .cr1 = 0x0c,
- .cr2 = 0x10,
- .cr3 = 0x14,
- .gtpr = 0x18,
- .rtor = UNDEF_REG,
- .rqr = UNDEF_REG,
- .icr = UNDEF_REG,
+ .isr = 0x00,
+ .rdr = 0x04,
+ .tdr = 0x04,
+ .brr = 0x08,
+ .cr1 = 0x0c,
+ .cr2 = 0x10,
+ .cr3 = 0x14,
+ .gtpr = 0x18,
+ .rtor = UNDEF_REG,
+ .rqr = UNDEF_REG,
+ .icr = UNDEF_REG,
+ .presc = UNDEF_REG,
+ .hwcfgr1 = UNDEF_REG,
},
.cfg = {
.uart_enable_bit = 13,
.has_7bits_data = false,
- .fifosize = 1,
}
};
static struct stm32_usart_info __maybe_unused stm32f7_info = {
.ofs = {
- .cr1 = 0x00,
- .cr2 = 0x04,
- .cr3 = 0x08,
- .brr = 0x0c,
- .gtpr = 0x10,
- .rtor = 0x14,
- .rqr = 0x18,
- .isr = 0x1c,
- .icr = 0x20,
- .rdr = 0x24,
- .tdr = 0x28,
+ .cr1 = 0x00,
+ .cr2 = 0x04,
+ .cr3 = 0x08,
+ .brr = 0x0c,
+ .gtpr = 0x10,
+ .rtor = 0x14,
+ .rqr = 0x18,
+ .isr = 0x1c,
+ .icr = 0x20,
+ .rdr = 0x24,
+ .tdr = 0x28,
+ .presc = UNDEF_REG,
+ .hwcfgr1 = UNDEF_REG,
},
.cfg = {
.uart_enable_bit = 0,
.has_7bits_data = true,
.has_swap = true,
- .fifosize = 1,
}
};
static struct stm32_usart_info __maybe_unused stm32h7_info = {
.ofs = {
- .cr1 = 0x00,
- .cr2 = 0x04,
- .cr3 = 0x08,
- .brr = 0x0c,
- .gtpr = 0x10,
- .rtor = 0x14,
- .rqr = 0x18,
- .isr = 0x1c,
- .icr = 0x20,
- .rdr = 0x24,
- .tdr = 0x28,
+ .cr1 = 0x00,
+ .cr2 = 0x04,
+ .cr3 = 0x08,
+ .brr = 0x0c,
+ .gtpr = 0x10,
+ .rtor = 0x14,
+ .rqr = 0x18,
+ .isr = 0x1c,
+ .icr = 0x20,
+ .rdr = 0x24,
+ .tdr = 0x28,
+ .presc = 0x2c,
+ .hwcfgr1 = 0x3f0,
},
.cfg = {
.uart_enable_bit = 0,
@@ -100,7 +105,6 @@ static struct stm32_usart_info __maybe_unused stm32h7_info = {
.has_swap = true,
.has_wakeup = true,
.has_fifo = true,
- .fifosize = 16,
}
};
@@ -857,6 +861,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
u32 sr;
unsigned int size;
+ irqreturn_t ret = IRQ_NONE;
sr = readl_relaxed(port->membase + ofs->isr);
@@ -865,11 +870,14 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
(sr & USART_SR_TC)) {
stm32_usart_tc_interrupt_disable(port);
stm32_usart_rs485_rts_disable(port);
+ ret = IRQ_HANDLED;
}
- if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
+ if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG) {
writel_relaxed(USART_ICR_RTOCF,
port->membase + ofs->icr);
+ ret = IRQ_HANDLED;
+ }
if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) {
/* Clear wake up flag and disable wake up interrupt */
@@ -878,6 +886,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
pm_wakeup_event(tport->tty->dev, 0);
+ ret = IRQ_HANDLED;
}
/*
@@ -892,6 +901,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
uart_unlock_and_check_sysrq(port);
if (size)
tty_flip_buffer_push(tport);
+ ret = IRQ_HANDLED;
}
}
@@ -899,6 +909,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
uart_port_lock(port);
stm32_usart_transmit_chars(port);
uart_port_unlock(port);
+ ret = IRQ_HANDLED;
}
/* Receiver timeout irq for DMA RX */
@@ -908,9 +919,10 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
uart_unlock_and_check_sysrq(port);
if (size)
tty_flip_buffer_push(tport);
+ ret = IRQ_HANDLED;
}
- return IRQ_HANDLED;
+ return ret;
}
static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
@@ -1080,6 +1092,7 @@ static int stm32_usart_startup(struct uart_port *port)
val |= USART_CR2_SWAP;
writel_relaxed(val, port->membase + ofs->cr2);
}
+ stm32_port->throttled = false;
/* RX FIFO Flush */
if (ofs->rqr != UNDEF_REG)
@@ -1147,6 +1160,8 @@ static void stm32_usart_shutdown(struct uart_port *port)
free_irq(port->irq, port);
}
+static const unsigned int stm32_usart_presc_val[] = {1, 2, 4, 6, 8, 10, 12, 16, 32, 64, 128, 256};
+
static void stm32_usart_set_termios(struct uart_port *port,
struct ktermios *termios,
const struct ktermios *old)
@@ -1155,17 +1170,19 @@ static void stm32_usart_set_termios(struct uart_port *port,
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
struct serial_rs485 *rs485conf = &port->rs485;
- unsigned int baud, bits;
+ unsigned int baud, bits, uart_clk, uart_clk_pres;
u32 usartdiv, mantissa, fraction, oversampling;
tcflag_t cflag = termios->c_cflag;
- u32 cr1, cr2, cr3, isr;
+ u32 cr1, cr2, cr3, isr, brr, presc;
unsigned long flags;
int ret;
if (!stm32_port->hw_flow_control)
cflag &= ~CRTSCTS;
- baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 8);
+ uart_clk = clk_get_rate(stm32_port->clk);
+
+ baud = uart_get_baud_rate(port, termios, old, 0, uart_clk / 8);
uart_port_lock_irqsave(port, &flags);
@@ -1267,27 +1284,48 @@ static void stm32_usart_set_termios(struct uart_port *port,
cr3 |= USART_CR3_CTSE | USART_CR3_RTSE;
}
- usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
+ for (presc = 0; presc <= USART_PRESC_MAX; presc++) {
+ uart_clk_pres = DIV_ROUND_CLOSEST(uart_clk, stm32_usart_presc_val[presc]);
+ usartdiv = DIV_ROUND_CLOSEST(uart_clk_pres, baud);
- /*
- * The USART supports 16 or 8 times oversampling.
- * By default we prefer 16 times oversampling, so that the receiver
- * has a better tolerance to clock deviations.
- * 8 times oversampling is only used to achieve higher speeds.
- */
- if (usartdiv < 16) {
- oversampling = 8;
- cr1 |= USART_CR1_OVER8;
- stm32_usart_set_bits(port, ofs->cr1, USART_CR1_OVER8);
- } else {
- oversampling = 16;
- cr1 &= ~USART_CR1_OVER8;
- stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_OVER8);
+ /*
+ * The USART supports 16 or 8 times oversampling.
+ * By default we prefer 16 times oversampling, so that the receiver
+ * has a better tolerance to clock deviations.
+ * 8 times oversampling is only used to achieve higher speeds.
+ */
+ if (usartdiv < 16) {
+ oversampling = 8;
+ cr1 |= USART_CR1_OVER8;
+ stm32_usart_set_bits(port, ofs->cr1, USART_CR1_OVER8);
+ } else {
+ oversampling = 16;
+ cr1 &= ~USART_CR1_OVER8;
+ stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_OVER8);
+ }
+
+ mantissa = (usartdiv / oversampling) << USART_BRR_DIV_M_SHIFT;
+ fraction = usartdiv % oversampling;
+ brr = mantissa | fraction;
+
+ if (FIELD_FIT(USART_BRR_MASK, brr)) {
+ if (ofs->presc != UNDEF_REG) {
+ port->uartclk = uart_clk_pres;
+ writel_relaxed(presc, port->membase + ofs->presc);
+ } else if (presc) {
+ /* We need a prescaler but we don't have it (STM32F4, STM32F7) */
+ dev_err(port->dev,
+ "unable to set baudrate, input clock is too high");
+ }
+ break;
+ } else if (presc == USART_PRESC_MAX) {
+ /* Even with prescaler and brr at max value we can't set baudrate */
+ dev_err(port->dev, "unable to set baudrate, input clock is too high");
+ break;
+ }
}
- mantissa = (usartdiv / oversampling) << USART_BRR_DIV_M_SHIFT;
- fraction = usartdiv % oversampling;
- writel_relaxed(mantissa | fraction, port->membase + ofs->brr);
+ writel_relaxed(brr, port->membase + ofs->brr);
uart_update_timeout(port, cflag, baud);
@@ -1471,37 +1509,57 @@ static const struct uart_ops stm32_uart_ops = {
#endif /* CONFIG_CONSOLE_POLL */
};
-/*
- * STM32H7 RX & TX FIFO threshold configuration (CR3 RXFTCFG / TXFTCFG)
- * Note: 1 isn't a valid value in RXFTCFG / TXFTCFG. In this case,
- * RXNEIE / TXEIE can be used instead of threshold irqs: RXFTIE / TXFTIE.
- * So, RXFTCFG / TXFTCFG bitfields values are encoded as array index + 1.
- */
-static const u32 stm32h7_usart_fifo_thresh_cfg[] = { 1, 2, 4, 8, 12, 14, 16 };
+struct stm32_usart_thresh_ratio {
+ int mul;
+ int div;
+};
-static void stm32_usart_get_ftcfg(struct platform_device *pdev, const char *p,
- int *ftcfg)
+static const struct stm32_usart_thresh_ratio stm32h7_usart_fifo_thresh_cfg[] = {
+ {1, 8}, {1, 4}, {1, 2}, {3, 4}, {7, 8}, {1, 1} };
+
+static int stm32_usart_get_thresh_value(u32 fifo_size, int index)
{
- u32 bytes, i;
+ return fifo_size * stm32h7_usart_fifo_thresh_cfg[index].mul /
+ stm32h7_usart_fifo_thresh_cfg[index].div;
+}
- /* DT option to get RX & TX FIFO threshold (default to 8 bytes) */
+static int stm32_usart_get_ftcfg(struct platform_device *pdev, struct stm32_port *stm32port,
+ const char *p, int *ftcfg)
+{
+ const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
+ u32 bytes, i, cfg8;
+ int fifo_size;
+
+ if (WARN_ON(ofs->hwcfgr1 == UNDEF_REG))
+ return 1;
+
+ cfg8 = FIELD_GET(USART_HWCFGR1_CFG8,
+ readl_relaxed(stm32port->port.membase + ofs->hwcfgr1));
+
+ /* On STM32H7, hwcfgr is not present, so returned value will be 0 */
+ fifo_size = cfg8 ? 1 << cfg8 : STM32H7_USART_FIFO_SIZE;
+
+ /* DT option to get RX & TX FIFO threshold (default to half fifo size) */
if (of_property_read_u32(pdev->dev.of_node, p, &bytes))
- bytes = 8;
+ bytes = fifo_size / 2;
- for (i = 0; i < ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg); i++)
- if (stm32h7_usart_fifo_thresh_cfg[i] >= bytes)
+ if (bytes < stm32_usart_get_thresh_value(fifo_size, 0)) {
+ *ftcfg = -EINVAL;
+ return fifo_size;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg); i++) {
+ if (stm32_usart_get_thresh_value(fifo_size, i) >= bytes)
break;
+ }
if (i >= ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg))
i = ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg) - 1;
- dev_dbg(&pdev->dev, "%s set to %d bytes\n", p,
- stm32h7_usart_fifo_thresh_cfg[i]);
+ dev_dbg(&pdev->dev, "%s set to %d/%d bytes\n", p,
+ stm32_usart_get_thresh_value(fifo_size, i), fifo_size);
- /* Provide FIFO threshold ftcfg (1 is invalid: threshold irq unused) */
- if (i)
- *ftcfg = i - 1;
- else
- *ftcfg = -EINVAL;
+ *ftcfg = i;
+ return fifo_size;
}
static void stm32_usart_deinit_port(struct stm32_port *stm32port)
@@ -1531,7 +1589,6 @@ static int stm32_usart_init_port(struct stm32_port *stm32port,
port->flags = UPF_BOOT_AUTOCONF;
port->ops = &stm32_uart_ops;
port->dev = &pdev->dev;
- port->fifosize = stm32port->info->cfg.fifosize;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE);
port->irq = irq;
port->rs485_config = stm32_usart_config_rs485;
@@ -1547,14 +1604,6 @@ static int stm32_usart_init_port(struct stm32_port *stm32port,
stm32port->swap = stm32port->info->cfg.has_swap &&
of_property_read_bool(pdev->dev.of_node, "rx-tx-swap");
- stm32port->fifoen = stm32port->info->cfg.has_fifo;
- if (stm32port->fifoen) {
- stm32_usart_get_ftcfg(pdev, "rx-threshold",
- &stm32port->rxftcfg);
- stm32_usart_get_ftcfg(pdev, "tx-threshold",
- &stm32port->txftcfg);
- }
-
port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(port->membase))
return PTR_ERR(port->membase);
@@ -1577,6 +1626,15 @@ static int stm32_usart_init_port(struct stm32_port *stm32port,
goto err_clk;
}
+ stm32port->fifoen = stm32port->info->cfg.has_fifo;
+ if (stm32port->fifoen) {
+ stm32_usart_get_ftcfg(pdev, stm32port, "rx-threshold", &stm32port->rxftcfg);
+ port->fifosize = stm32_usart_get_ftcfg(pdev, stm32port, "tx-threshold",
+ &stm32port->txftcfg);
+ } else {
+ port->fifosize = 1;
+ }
+
stm32port->gpios = mctrl_gpio_init(&stm32port->port, 0);
if (IS_ERR(stm32port->gpios)) {
ret = PTR_ERR(stm32port->gpios);
diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
index f59f831b2a10c..af20258ccc7a0 100644
--- a/drivers/tty/serial/stm32-usart.h
+++ b/drivers/tty/serial/stm32-usart.h
@@ -9,17 +9,19 @@
#define DRIVER_NAME "stm32-usart"
struct stm32_usart_offsets {
- u8 cr1;
- u8 cr2;
- u8 cr3;
- u8 brr;
- u8 gtpr;
- u8 rtor;
- u8 rqr;
- u8 isr;
- u8 icr;
- u8 rdr;
- u8 tdr;
+ u16 cr1;
+ u16 cr2;
+ u16 cr3;
+ u16 brr;
+ u16 gtpr;
+ u16 rtor;
+ u16 rqr;
+ u16 isr;
+ u16 icr;
+ u16 rdr;
+ u16 tdr;
+ u16 presc;
+ u16 hwcfgr1;
};
struct stm32_usart_config {
@@ -28,7 +30,6 @@ struct stm32_usart_config {
bool has_swap;
bool has_wakeup;
bool has_fifo;
- int fifosize;
};
struct stm32_usart_info {
@@ -36,7 +37,7 @@ struct stm32_usart_info {
struct stm32_usart_config cfg;
};
-#define UNDEF_REG 0xff
+#define UNDEF_REG 0xffff
/* USART_SR (F4) / USART_ISR (F7) */
#define USART_SR_PE BIT(0)
@@ -71,6 +72,7 @@ struct stm32_usart_info {
#define USART_BRR_DIV_M_MASK GENMASK(15, 4)
#define USART_BRR_DIV_M_SHIFT 4
#define USART_BRR_04_R_SHIFT 1
+#define USART_BRR_MASK (USART_BRR_DIV_M_MASK | USART_BRR_DIV_F_MASK)
/* USART_CR1 */
#define USART_CR1_SBK BIT(0)
@@ -176,8 +178,16 @@ struct stm32_usart_info {
#define USART_ICR_CMCF BIT(17) /* F7 */
#define USART_ICR_WUCF BIT(20) /* H7 */
+/* USART_PRESC */
+#define USART_PRESC GENMASK(3, 0) /* H7 */
+#define USART_PRESC_MAX 0b1011
+
+/* USART_HWCFCR1 */
+#define USART_HWCFGR1_CFG8 GENMASK(31, 28) /* MP1 */
+
#define STM32_SERIAL_NAME "ttySTM"
-#define STM32_MAX_PORTS 8
+#define STM32_MAX_PORTS 9
+#define STM32H7_USART_FIFO_SIZE 16
#define RX_BUF_L 4096 /* dma rx buffer length */
#define RX_BUF_P (RX_BUF_L / 2) /* dma rx buffer period */
diff --git a/drivers/tty/serial/sunplus-uart.c b/drivers/tty/serial/sunplus-uart.c
index 99f5285819d4b..f5e29eb4a4ce4 100644
--- a/drivers/tty/serial/sunplus-uart.c
+++ b/drivers/tty/serial/sunplus-uart.c
@@ -260,7 +260,7 @@ static void receive_chars(struct uart_port *port)
if (port->ignore_status_mask & SUP_DUMMY_READ)
goto ignore_char;
- if (uart_handle_sysrq_char(port, ch))
+ if (uart_prepare_sysrq_char(port, ch))
goto ignore_char;
uart_insert_char(port, lsr, SUP_UART_LSR_OE, ch, flag);
@@ -287,7 +287,7 @@ static irqreturn_t sunplus_uart_irq(int irq, void *args)
if (isc & SUP_UART_ISC_TX)
transmit_chars(port);
- uart_port_unlock(port);
+ uart_unlock_and_check_sysrq(port);
return IRQ_HANDLED;
}
@@ -512,22 +512,16 @@ static void sunplus_console_write(struct console *co,
unsigned long flags;
int locked = 1;
- local_irq_save(flags);
-
- if (sunplus_console_ports[co->index]->port.sysrq)
- locked = 0;
- else if (oops_in_progress)
- locked = uart_port_trylock(&sunplus_console_ports[co->index]->port);
+ if (oops_in_progress)
+ locked = uart_port_trylock_irqsave(&sunplus_console_ports[co->index]->port, &flags);
else
- uart_port_lock(&sunplus_console_ports[co->index]->port);
+ uart_port_lock_irqsave(&sunplus_console_ports[co->index]->port, &flags);
uart_console_write(&sunplus_console_ports[co->index]->port, s, count,
sunplus_uart_console_putchar);
if (locked)
- uart_port_unlock(&sunplus_console_ports[co->index]->port);
-
- local_irq_restore(flags);
+ uart_port_unlock_irqrestore(&sunplus_console_ports[co->index]->port, flags);
}
static int __init sunplus_console_setup(struct console *co, char *options)
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 920762d7b4a4b..5f48ec37cb25f 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -22,7 +22,9 @@
#include <linux/of.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
-#include <linux/iopoll.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/delay.h>
#define CDNS_UART_TTY_NAME "ttyPS"
#define CDNS_UART_NAME "xuartps"
@@ -193,6 +195,9 @@ MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
* @clk_rate_change_nb: Notifier block for clock changes
* @quirks: Flags for RXBS support.
* @cts_override: Modem control state override
+ * @gpiod_rts: Pointer to the gpio descriptor
+ * @rs485_tx_started: RS485 tx state
+ * @tx_timer: Timer for tx
*/
struct cdns_uart {
struct uart_port *port;
@@ -203,10 +208,21 @@ struct cdns_uart {
struct notifier_block clk_rate_change_nb;
u32 quirks;
bool cts_override;
+ struct gpio_desc *gpiod_rts;
+ bool rs485_tx_started;
+ struct hrtimer tx_timer;
};
struct cdns_platform_data {
u32 quirks;
};
+
+struct serial_rs485 cdns_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND |
+ SER_RS485_RTS_AFTER_SEND,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
#define to_cdns_uart(_nb) container_of(_nb, struct cdns_uart, \
clk_rate_change_nb)
@@ -306,17 +322,114 @@ static void cdns_uart_handle_rx(void *dev_id, unsigned int isrstatus)
}
/**
- * cdns_uart_handle_tx - Handle the bytes to be Txed.
+ * cdns_rts_gpio_enable - Configure RTS/GPIO to high/low
+ * @cdns_uart: Handle to the cdns_uart
+ * @enable: Value to be set to RTS/GPIO
+ */
+static void cdns_rts_gpio_enable(struct cdns_uart *cdns_uart, bool enable)
+{
+ u32 val;
+
+ if (cdns_uart->gpiod_rts) {
+ gpiod_set_value(cdns_uart->gpiod_rts, enable);
+ } else {
+ val = readl(cdns_uart->port->membase + CDNS_UART_MODEMCR);
+ if (enable)
+ val |= CDNS_UART_MODEMCR_RTS;
+ else
+ val &= ~CDNS_UART_MODEMCR_RTS;
+ writel(val, cdns_uart->port->membase + CDNS_UART_MODEMCR);
+ }
+}
+
+/**
+ * cdns_rs485_tx_setup - Tx setup specific to rs485
+ * @cdns_uart: Handle to the cdns_uart
+ */
+static void cdns_rs485_tx_setup(struct cdns_uart *cdns_uart)
+{
+ bool enable;
+
+ enable = cdns_uart->port->rs485.flags & SER_RS485_RTS_ON_SEND;
+ cdns_rts_gpio_enable(cdns_uart, enable);
+
+ cdns_uart->rs485_tx_started = true;
+}
+
+/**
+ * cdns_rs485_rx_setup - Rx setup specific to rs485
+ * @cdns_uart: Handle to the cdns_uart
+ */
+static void cdns_rs485_rx_setup(struct cdns_uart *cdns_uart)
+{
+ bool enable;
+
+ enable = cdns_uart->port->rs485.flags & SER_RS485_RTS_AFTER_SEND;
+ cdns_rts_gpio_enable(cdns_uart, enable);
+
+ cdns_uart->rs485_tx_started = false;
+}
+
+/**
+ * cdns_uart_tx_empty - Check whether TX is empty
+ * @port: Handle to the uart port structure
+ *
+ * Return: TIOCSER_TEMT on success, 0 otherwise
+ */
+static unsigned int cdns_uart_tx_empty(struct uart_port *port)
+{
+ unsigned int status;
+
+ status = readl(port->membase + CDNS_UART_SR);
+ status &= (CDNS_UART_SR_TXEMPTY | CDNS_UART_SR_TACTIVE);
+ return (status == CDNS_UART_SR_TXEMPTY) ? TIOCSER_TEMT : 0;
+}
+
+/**
+ * cdns_rs485_rx_callback - Timer rx callback handler for rs485.
+ * @t: Handle to the hrtimer structure
+ */
+static enum hrtimer_restart cdns_rs485_rx_callback(struct hrtimer *t)
+{
+ struct cdns_uart *cdns_uart = container_of(t, struct cdns_uart, tx_timer);
+
+ /*
+ * Default Rx should be setup, because Rx signaling path
+ * need to enable to receive data.
+ */
+ cdns_rs485_rx_setup(cdns_uart);
+
+ return HRTIMER_NORESTART;
+}
+
+/**
+ * cdns_calc_after_tx_delay - calculate delay required for after tx.
+ * @cdns_uart: Handle to the cdns_uart
+ */
+static u64 cdns_calc_after_tx_delay(struct cdns_uart *cdns_uart)
+{
+ /*
+ * Frame time + stop bit time + rs485.delay_rts_after_send
+ */
+ return cdns_uart->port->frame_time
+ + DIV_ROUND_UP(cdns_uart->port->frame_time, 7)
+ + (u64)cdns_uart->port->rs485.delay_rts_after_send * NSEC_PER_MSEC;
+}
+
+/**
+ * cdns_uart_handle_tx - Handle the bytes to be transmitted.
* @dev_id: Id of the UART port
* Return: None
*/
static void cdns_uart_handle_tx(void *dev_id)
{
struct uart_port *port = (struct uart_port *)dev_id;
+ struct cdns_uart *cdns_uart = port->private_data;
struct circ_buf *xmit = &port->state->xmit;
unsigned int numbytes;
- if (uart_circ_empty(xmit)) {
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ /* Disable the TX Empty interrupt */
writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_IDR);
return;
}
@@ -332,6 +445,16 @@ static void cdns_uart_handle_tx(void *dev_id)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
+
+ /* Enable the TX Empty interrupt */
+ writel(CDNS_UART_IXR_TXEMPTY, cdns_uart->port->membase + CDNS_UART_IER);
+
+ if (cdns_uart->port->rs485.flags & SER_RS485_ENABLED &&
+ (uart_circ_empty(xmit) || uart_tx_stopped(port))) {
+ cdns_uart->tx_timer.function = &cdns_rs485_rx_callback;
+ hrtimer_start(&cdns_uart->tx_timer,
+ ns_to_ktime(cdns_calc_after_tx_delay(cdns_uart)), HRTIMER_MODE_REL);
+ }
}
/**
@@ -565,12 +688,28 @@ static int cdns_uart_clk_notifier_cb(struct notifier_block *nb,
#endif
/**
+ * cdns_rs485_tx_callback - Timer tx callback handler for rs485.
+ * @t: Handle to the hrtimer structure
+ */
+static enum hrtimer_restart cdns_rs485_tx_callback(struct hrtimer *t)
+{
+ struct cdns_uart *cdns_uart = container_of(t, struct cdns_uart, tx_timer);
+
+ uart_port_lock(cdns_uart->port);
+ cdns_uart_handle_tx(cdns_uart->port);
+ uart_port_unlock(cdns_uart->port);
+
+ return HRTIMER_NORESTART;
+}
+
+/**
* cdns_uart_start_tx - Start transmitting bytes
* @port: Handle to the uart port structure
*/
static void cdns_uart_start_tx(struct uart_port *port)
{
unsigned int status;
+ struct cdns_uart *cdns_uart = port->private_data;
if (uart_tx_stopped(port))
return;
@@ -587,12 +726,19 @@ static void cdns_uart_start_tx(struct uart_port *port)
if (uart_circ_empty(&port->state->xmit))
return;
+ /* Clear the TX Empty interrupt */
writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_ISR);
+ if (cdns_uart->port->rs485.flags & SER_RS485_ENABLED) {
+ if (!cdns_uart->rs485_tx_started) {
+ cdns_uart->tx_timer.function = &cdns_rs485_tx_callback;
+ cdns_rs485_tx_setup(cdns_uart);
+ return hrtimer_start(&cdns_uart->tx_timer,
+ ms_to_ktime(port->rs485.delay_rts_before_send),
+ HRTIMER_MODE_REL);
+ }
+ }
cdns_uart_handle_tx(port);
-
- /* Enable the TX Empty interrupt */
- writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_IER);
}
/**
@@ -602,6 +748,10 @@ static void cdns_uart_start_tx(struct uart_port *port)
static void cdns_uart_stop_tx(struct uart_port *port)
{
unsigned int regval;
+ struct cdns_uart *cdns_uart = port->private_data;
+
+ if (cdns_uart->port->rs485.flags & SER_RS485_ENABLED)
+ cdns_rs485_rx_setup(cdns_uart);
regval = readl(port->membase + CDNS_UART_CR);
regval |= CDNS_UART_CR_TX_DIS;
@@ -627,21 +777,6 @@ static void cdns_uart_stop_rx(struct uart_port *port)
}
/**
- * cdns_uart_tx_empty - Check whether TX is empty
- * @port: Handle to the uart port structure
- *
- * Return: TIOCSER_TEMT on success, 0 otherwise
- */
-static unsigned int cdns_uart_tx_empty(struct uart_port *port)
-{
- unsigned int status;
-
- status = readl(port->membase + CDNS_UART_SR) &
- (CDNS_UART_SR_TXEMPTY | CDNS_UART_SR_TACTIVE);
- return (status == CDNS_UART_SR_TXEMPTY) ? TIOCSER_TEMT : 0;
-}
-
-/**
* cdns_uart_break_ctl - Based on the input ctl we have to start or stop
* transmitting char breaks
* @port: Handle to the uart port structure
@@ -829,6 +964,9 @@ static int cdns_uart_startup(struct uart_port *port)
(CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST))
cpu_relax();
+ if (cdns_uart->port->rs485.flags & SER_RS485_ENABLED)
+ cdns_rs485_rx_setup(cdns_uart);
+
/*
* Clear the RX disable bit and then set the RX enable bit to enable
* the receiver.
@@ -888,6 +1026,10 @@ static void cdns_uart_shutdown(struct uart_port *port)
{
int status;
unsigned long flags;
+ struct cdns_uart *cdns_uart = port->private_data;
+
+ if (cdns_uart->port->rs485.flags & SER_RS485_ENABLED)
+ hrtimer_cancel(&cdns_uart->tx_timer);
uart_port_lock_irqsave(port, &flags);
@@ -1033,6 +1175,8 @@ static void cdns_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
if (mctrl & TIOCM_RTS)
val |= CDNS_UART_MODEMCR_RTS;
+ if (cdns_uart_data->gpiod_rts)
+ gpiod_set_value(cdns_uart_data->gpiod_rts, !(mctrl & TIOCM_RTS));
if (mctrl & TIOCM_DTR)
val |= CDNS_UART_MODEMCR_DTR;
if (mctrl & TIOCM_LOOP)
@@ -1456,6 +1600,39 @@ MODULE_DEVICE_TABLE(of, cdns_uart_of_match);
static int instances;
/**
+ * cdns_rs485_config - Called when an application calls TIOCSRS485 ioctl.
+ * @port: Pointer to the uart_port structure
+ * @termios: Pointer to the ktermios structure
+ * @rs485: Pointer to the serial_rs485 structure
+ *
+ * Return: 0
+ */
+static int cdns_rs485_config(struct uart_port *port, struct ktermios *termios,
+ struct serial_rs485 *rs485)
+{
+ u32 val;
+ struct cdns_uart *cdns_uart = port->private_data;
+
+ if (rs485->flags & SER_RS485_ENABLED) {
+ dev_dbg(port->dev, "Setting UART to RS485\n");
+ /* Make sure auto RTS is disabled */
+ val = readl(port->membase + CDNS_UART_MODEMCR);
+ val &= ~CDNS_UART_MODEMCR_FCM;
+ writel(val, port->membase + CDNS_UART_MODEMCR);
+
+ /* Timer setup */
+ hrtimer_init(&cdns_uart->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ cdns_uart->tx_timer.function = &cdns_rs485_tx_callback;
+
+ /* Disable transmitter and make Rx setup*/
+ cdns_uart_stop_tx(port);
+ } else {
+ hrtimer_cancel(&cdns_uart->tx_timer);
+ }
+ return 0;
+}
+
+/**
* cdns_uart_probe - Platform driver probe
* @pdev: Pointer to the platform device structure
*
@@ -1597,9 +1774,23 @@ static int cdns_uart_probe(struct platform_device *pdev)
port->private_data = cdns_uart_data;
port->read_status_mask = CDNS_UART_IXR_TXEMPTY | CDNS_UART_IXR_RXTRIG |
CDNS_UART_IXR_OVERRUN | CDNS_UART_IXR_TOUT;
+ port->rs485_config = cdns_rs485_config;
+ port->rs485_supported = cdns_rs485_supported;
cdns_uart_data->port = port;
platform_set_drvdata(pdev, port);
+ rc = uart_get_rs485_mode(port);
+ if (rc)
+ goto err_out_clk_notifier;
+
+ cdns_uart_data->gpiod_rts = devm_gpiod_get_optional(&pdev->dev, "rts",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(cdns_uart_data->gpiod_rts)) {
+ rc = PTR_ERR(cdns_uart_data->gpiod_rts);
+ dev_err(port->dev, "xuartps: devm_gpiod_get_optional failed\n");
+ goto err_out_clk_notifier;
+ }
+
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, UART_AUTOSUSPEND_TIMEOUT);
pm_runtime_set_active(&pdev->dev);
@@ -1618,6 +1809,8 @@ static int cdns_uart_probe(struct platform_device *pdev)
console_port = port;
}
#endif
+ if (cdns_uart_data->port->rs485.flags & SER_RS485_ENABLED)
+ cdns_rs485_rx_setup(cdns_uart_data);
rc = uart_add_one_port(&cdns_uart_uart_driver, port);
if (rc) {
@@ -1646,6 +1839,7 @@ err_out_pm_disable:
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
+err_out_clk_notifier:
#ifdef CONFIG_COMMON_CLK
clk_notifier_unregister(cdns_uart_data->uartclk,
&cdns_uart_data->clk_rate_change_nb);
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index f8883afbeeba6..79f0ff94ce00d 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -7,6 +7,7 @@
#include <linux/errno.h>
#include <linux/minmax.h>
#include <linux/tty.h>
+#include <linux/tty_buffer.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/timer.h>
diff --git a/drivers/tty/vt/Makefile b/drivers/tty/vt/Makefile
index b3dfe9d5717e3..2c8ce8b592ed2 100644
--- a/drivers/tty/vt/Makefile
+++ b/drivers/tty/vt/Makefile
@@ -5,9 +5,9 @@
FONTMAPFILE = cp437.uni
obj-$(CONFIG_VT) += vt_ioctl.o vc_screen.o \
- selection.o keyboard.o
+ selection.o keyboard.o \
+ vt.o defkeymap.o
obj-$(CONFIG_CONSOLE_TRANSLATIONS) += consolemap.o consolemap_deftbl.o
-obj-$(CONFIG_HW_CONSOLE) += vt.o defkeymap.o
# Files generated that shall be removed upon make clean
clean-files := consolemap_deftbl.c defkeymap.c
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index 8967c3a0d9169..564341f1a74f3 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -7,7 +7,7 @@
* 'int set_selection_kernel(struct tiocl_selection *, struct tty_struct *)'
* 'void clear_selection(void)'
* 'int paste_selection(struct tty_struct *)'
- * 'int sel_loadlut(char __user *)'
+ * 'int sel_loadlut(u32 __user *)'
*
* Now that /dev/vcs exists, most of this can disappear again.
*/
@@ -73,10 +73,12 @@ sel_pos(int n, bool unicode)
}
/**
- * clear_selection - remove current selection
+ * clear_selection - remove current selection
*
- * Remove the current selection highlight, if any from the console
- * holding the selection. The caller must hold the console lock.
+ * Remove the current selection highlight, if any from the console holding the
+ * selection.
+ *
+ * Locking: The caller must hold the console lock.
*/
void clear_selection(void)
{
@@ -88,7 +90,7 @@ void clear_selection(void)
}
EXPORT_SYMBOL_GPL(clear_selection);
-bool vc_is_sel(struct vc_data *vc)
+bool vc_is_sel(const struct vc_data *vc)
{
return vc == vc_sel.cons;
}
@@ -110,18 +112,25 @@ static inline int inword(const u32 c)
}
/**
- * sel_loadlut() - load the LUT table
- * @p: user table
+ * sel_loadlut() - load the LUT table
+ * @lut: user table
+ *
+ * Load the LUT table from user space. Make a temporary copy so a partial
+ * update doesn't make a mess.
*
- * Load the LUT table from user space. The caller must hold the console
- * lock. Make a temporary copy so a partial update doesn't make a mess.
+ * Locking: The console lock is acquired.
*/
-int sel_loadlut(char __user *p)
+int sel_loadlut(u32 __user *lut)
{
u32 tmplut[ARRAY_SIZE(inwordLut)];
- if (copy_from_user(tmplut, (u32 __user *)(p+4), sizeof(inwordLut)))
+
+ if (copy_from_user(tmplut, lut, sizeof(inwordLut)))
return -EFAULT;
+
+ console_lock();
memcpy(inwordLut, tmplut, sizeof(inwordLut));
+ console_unlock();
+
return 0;
}
@@ -166,14 +175,14 @@ static int store_utf8(u32 c, char *p)
}
/**
- * set_selection_user - set the current selection.
- * @sel: user selection info
- * @tty: the console tty
+ * set_selection_user - set the current selection.
+ * @sel: user selection info
+ * @tty: the console tty
*
- * Invoked by the ioctl handle for the vt layer.
+ * Invoked by the ioctl handle for the vt layer.
*
- * The entire selection process is managed under the console_lock. It's
- * a lot under the lock but its hardly a performance path
+ * Locking: The entire selection process is managed under the console_lock.
+ * It's a lot under the lock but its hardly a performance path.
*/
int set_selection_user(const struct tiocl_selection __user *sel,
struct tty_struct *tty)
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 38a765eadbe2b..9b5b98dfc8b40 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -145,7 +145,7 @@ static void gotoxy(struct vc_data *vc, int new_x, int new_y);
static void save_cur(struct vc_data *vc);
static void reset_terminal(struct vc_data *vc, int do_clear);
static void con_flush_chars(struct tty_struct *tty);
-static int set_vesa_blanking(char __user *p);
+static int set_vesa_blanking(u8 __user *mode);
static void set_cursor(struct vc_data *vc);
static void hide_cursor(struct vc_data *vc);
static void console_callback(struct work_struct *ignored);
@@ -175,7 +175,7 @@ int do_poke_blanked_console;
int console_blanked;
EXPORT_SYMBOL(console_blanked);
-static int vesa_blank_mode; /* 0:none 1:suspendV 2:suspendH 3:powerdown */
+static enum vesa_blank_mode vesa_blank_mode;
static int vesa_off_interval;
static int blankinterval;
core_param(consoleblank, blankinterval, int, 0444);
@@ -286,18 +286,20 @@ static inline bool con_should_update(const struct vc_data *vc)
return con_is_visible(vc) && !console_blanked;
}
-static inline unsigned short *screenpos(const struct vc_data *vc, int offset,
- bool viewed)
+static inline u16 *screenpos(const struct vc_data *vc, unsigned int offset,
+ bool viewed)
{
- unsigned short *p;
-
- if (!viewed)
- p = (unsigned short *)(vc->vc_origin + offset);
- else if (!vc->vc_sw->con_screen_pos)
- p = (unsigned short *)(vc->vc_visible_origin + offset);
+ unsigned long origin = viewed ? vc->vc_visible_origin : vc->vc_origin;
+
+ return (u16 *)(origin + offset);
+}
+
+static void con_putc(struct vc_data *vc, u16 ca, unsigned int y, unsigned int x)
+{
+ if (vc->vc_sw->con_putc)
+ vc->vc_sw->con_putc(vc, ca, y, x);
else
- p = vc->vc_sw->con_screen_pos(vc, offset);
- return p;
+ vc->vc_sw->con_putcs(vc, &ca, 1, y, x);
}
/* Called from the keyboard irq path.. */
@@ -591,18 +593,12 @@ static void con_scroll(struct vc_data *vc, unsigned int top,
static void do_update_region(struct vc_data *vc, unsigned long start, int count)
{
unsigned int xx, yy, offset;
- u16 *p;
+ u16 *p = (u16 *)start;
+
+ offset = (start - vc->vc_origin) / 2;
+ xx = offset % vc->vc_cols;
+ yy = offset / vc->vc_cols;
- p = (u16 *) start;
- if (!vc->vc_sw->con_getxy) {
- offset = (start - vc->vc_origin) / 2;
- xx = offset % vc->vc_cols;
- yy = offset / vc->vc_cols;
- } else {
- int nxx, nyy;
- start = vc->vc_sw->con_getxy(vc, start, &nxx, &nyy);
- xx = nxx; yy = nyy;
- }
for(;;) {
u16 attrib = scr_readw(p) & 0xff00;
int startx = xx;
@@ -625,10 +621,6 @@ static void do_update_region(struct vc_data *vc, unsigned long start, int count)
break;
xx = 0;
yy++;
- if (vc->vc_sw->con_getxy) {
- p = (u16 *)start;
- start = vc->vc_sw->con_getxy(vc, start, NULL, NULL);
- }
}
}
@@ -703,7 +695,7 @@ static void update_attr(struct vc_data *vc)
/* Note: inverting the screen twice should revert to the original state */
void invert_screen(struct vc_data *vc, int offset, int count, bool viewed)
{
- unsigned short *p;
+ u16 *p;
WARN_CONSOLE_UNLOCKED();
@@ -762,7 +754,7 @@ void complement_pos(struct vc_data *vc, int offset)
old_offset < vc->vc_screenbuf_size) {
scr_writew(old, screenpos(vc, old_offset, true));
if (con_should_update(vc))
- vc->vc_sw->con_putc(vc, old, oldy, oldx);
+ con_putc(vc, old, oldy, oldx);
notify_update(vc);
}
@@ -771,15 +763,14 @@ void complement_pos(struct vc_data *vc, int offset)
if (offset != -1 && offset >= 0 &&
offset < vc->vc_screenbuf_size) {
unsigned short new;
- unsigned short *p;
- p = screenpos(vc, offset, true);
+ u16 *p = screenpos(vc, offset, true);
old = scr_readw(p);
new = old ^ vc->vc_complement_mask;
scr_writew(new, p);
if (con_should_update(vc)) {
oldx = (offset >> 1) % vc->vc_cols;
oldy = (offset >> 1) / vc->vc_cols;
- vc->vc_sw->con_putc(vc, new, oldy, oldx);
+ con_putc(vc, new, oldy, oldx);
}
notify_update(vc);
}
@@ -833,7 +824,7 @@ static void add_softcursor(struct vc_data *vc)
i ^= CUR_FG;
scr_writew(i, (u16 *)vc->vc_pos);
if (con_should_update(vc))
- vc->vc_sw->con_putc(vc, i, vc->state.y, vc->state.x);
+ con_putc(vc, i, vc->state.y, vc->state.x);
}
static void hide_softcursor(struct vc_data *vc)
@@ -841,8 +832,8 @@ static void hide_softcursor(struct vc_data *vc)
if (softcursor_original != -1) {
scr_writew(softcursor_original, (u16 *)vc->vc_pos);
if (con_should_update(vc))
- vc->vc_sw->con_putc(vc, softcursor_original,
- vc->state.y, vc->state.x);
+ con_putc(vc, softcursor_original, vc->state.y,
+ vc->state.x);
softcursor_original = -1;
}
}
@@ -852,7 +843,7 @@ static void hide_cursor(struct vc_data *vc)
if (vc_is_sel(vc))
clear_selection();
- vc->vc_sw->con_cursor(vc, CM_ERASE);
+ vc->vc_sw->con_cursor(vc, false);
hide_softcursor(vc);
}
@@ -865,7 +856,7 @@ static void set_cursor(struct vc_data *vc)
clear_selection();
add_softcursor(vc);
if (CUR_SIZE(vc->vc_cursor_type) != CUR_NONE)
- vc->vc_sw->con_cursor(vc, CM_DRAW);
+ vc->vc_sw->con_cursor(vc, true);
} else
hide_cursor(vc);
}
@@ -897,21 +888,18 @@ static void flush_scrollback(struct vc_data *vc)
WARN_CONSOLE_UNLOCKED();
set_origin(vc);
- if (vc->vc_sw->con_flush_scrollback) {
- vc->vc_sw->con_flush_scrollback(vc);
- } else if (con_is_visible(vc)) {
- /*
- * When no con_flush_scrollback method is provided then the
- * legacy way for flushing the scrollback buffer is to use
- * a side effect of the con_switch method. We do it only on
- * the foreground console as background consoles have no
- * scrollback buffers in that case and we obviously don't
- * want to switch to them.
- */
- hide_cursor(vc);
- vc->vc_sw->con_switch(vc);
- set_cursor(vc);
- }
+ if (!con_is_visible(vc))
+ return;
+
+ /*
+ * The legacy way for flushing the scrollback buffer is to use a side
+ * effect of the con_switch method. We do it only on the foreground
+ * console as background consoles have no scrollback buffers in that
+ * case and we obviously don't want to switch to them.
+ */
+ hide_cursor(vc);
+ vc->vc_sw->con_switch(vc);
+ set_cursor(vc);
}
/*
@@ -962,7 +950,7 @@ void redraw_screen(struct vc_data *vc, int is_switch)
}
if (redraw) {
- int update;
+ bool update;
int old_was_color = vc->vc_can_do_color;
set_origin(vc);
@@ -999,7 +987,7 @@ int vc_cons_allocated(unsigned int i)
return (i < MAX_NR_CONSOLES && vc_cons[i].d);
}
-static void visual_init(struct vc_data *vc, int num, int init)
+static void visual_init(struct vc_data *vc, int num, bool init)
{
/* ++Geert: vc->vc_sw->con_init determines console size */
if (vc->vc_sw)
@@ -1083,7 +1071,7 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */
vc->port.ops = &vc_port_ops;
INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
- visual_init(vc, currcons, 1);
+ visual_init(vc, currcons, true);
if (!*vc->uni_pagedict_loc)
con_set_default_unimap(vc);
@@ -1115,51 +1103,44 @@ err_free:
}
static inline int resize_screen(struct vc_data *vc, int width, int height,
- int user)
+ bool from_user)
{
/* Resizes the resolution of the display adapater */
int err = 0;
if (vc->vc_sw->con_resize)
- err = vc->vc_sw->con_resize(vc, width, height, user);
+ err = vc->vc_sw->con_resize(vc, width, height, from_user);
return err;
}
/**
- * vc_do_resize - resizing method for the tty
- * @tty: tty being resized
- * @vc: virtual console private data
- * @cols: columns
- * @lines: lines
+ * vc_do_resize - resizing method for the tty
+ * @tty: tty being resized
+ * @vc: virtual console private data
+ * @cols: columns
+ * @lines: lines
+ * @from_user: invoked by a user?
*
- * Resize a virtual console, clipping according to the actual constraints.
- * If the caller passes a tty structure then update the termios winsize
- * information and perform any necessary signal handling.
+ * Resize a virtual console, clipping according to the actual constraints. If
+ * the caller passes a tty structure then update the termios winsize
+ * information and perform any necessary signal handling.
*
- * Caller must hold the console semaphore. Takes the termios rwsem and
- * ctrl.lock of the tty IFF a tty is passed.
+ * Locking: Caller must hold the console semaphore. Takes the termios rwsem and
+ * ctrl.lock of the tty IFF a tty is passed.
*/
-
static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
- unsigned int cols, unsigned int lines)
+ unsigned int cols, unsigned int lines, bool from_user)
{
unsigned long old_origin, new_origin, new_scr_end, rlth, rrem, err = 0;
unsigned long end;
unsigned int old_rows, old_row_size, first_copied_row;
unsigned int new_cols, new_rows, new_row_size, new_screen_size;
- unsigned int user;
unsigned short *oldscreen, *newscreen;
u32 **new_uniscr = NULL;
WARN_CONSOLE_UNLOCKED();
- if (!vc)
- return -ENXIO;
-
- user = vc->vc_resize_user;
- vc->vc_resize_user = 0;
-
if (cols > VC_MAXCOL || lines > VC_MAXROW)
return -EINVAL;
@@ -1185,7 +1166,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
* to deal with possible errors from the code below, we call
* the resize_screen here as well.
*/
- return resize_screen(vc, new_cols, new_rows, user);
+ return resize_screen(vc, new_cols, new_rows, from_user);
}
if (new_screen_size > KMALLOC_MAX_SIZE || !new_screen_size)
@@ -1208,7 +1189,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
old_rows = vc->vc_rows;
old_row_size = vc->vc_size_row;
- err = resize_screen(vc, new_cols, new_rows, user);
+ err = resize_screen(vc, new_cols, new_rows, from_user);
if (err) {
kfree(newscreen);
vc_uniscr_free(new_uniscr);
@@ -1295,34 +1276,35 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
}
/**
- * vc_resize - resize a VT
- * @vc: virtual console
- * @cols: columns
- * @rows: rows
+ * __vc_resize - resize a VT
+ * @vc: virtual console
+ * @cols: columns
+ * @rows: rows
+ * @from_user: invoked by a user?
+ *
+ * Resize a virtual console as seen from the console end of things. We use the
+ * common vc_do_resize() method to update the structures.
*
- * Resize a virtual console as seen from the console end of things. We
- * use the common vc_do_resize methods to update the structures. The
- * caller must hold the console sem to protect console internals and
- * vc->port.tty
+ * Locking: The caller must hold the console sem to protect console internals
+ * and @vc->port.tty.
*/
-
-int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int rows)
+int __vc_resize(struct vc_data *vc, unsigned int cols, unsigned int rows,
+ bool from_user)
{
- return vc_do_resize(vc->port.tty, vc, cols, rows);
+ return vc_do_resize(vc->port.tty, vc, cols, rows, from_user);
}
-EXPORT_SYMBOL(vc_resize);
+EXPORT_SYMBOL(__vc_resize);
/**
- * vt_resize - resize a VT
- * @tty: tty to resize
- * @ws: winsize attributes
+ * vt_resize - resize a VT
+ * @tty: tty to resize
+ * @ws: winsize attributes
*
- * Resize a virtual terminal. This is called by the tty layer as we
- * register our own handler for resizing. The mutual helper does all
- * the actual work.
+ * Resize a virtual terminal. This is called by the tty layer as we register
+ * our own handler for resizing. The mutual helper does all the actual work.
*
- * Takes the console sem and the called methods then take the tty
- * termios_rwsem and the tty ctrl.lock in that order.
+ * Locking: Takes the console sem and the called methods then take the tty
+ * termios_rwsem and the tty ctrl.lock in that order.
*/
static int vt_resize(struct tty_struct *tty, struct winsize *ws)
{
@@ -1330,7 +1312,7 @@ static int vt_resize(struct tty_struct *tty, struct winsize *ws)
int ret;
console_lock();
- ret = vc_do_resize(tty, vc, ws->ws_col, ws->ws_row);
+ ret = vc_do_resize(tty, vc, ws->ws_col, ws->ws_row, false);
console_unlock();
return ret;
}
@@ -1503,36 +1485,43 @@ static inline void del(struct vc_data *vc)
/* ignored */
}
-static void csi_J(struct vc_data *vc, int vpar)
+enum CSI_J {
+ CSI_J_CURSOR_TO_END = 0,
+ CSI_J_START_TO_CURSOR = 1,
+ CSI_J_VISIBLE = 2,
+ CSI_J_FULL = 3,
+};
+
+static void csi_J(struct vc_data *vc, enum CSI_J vpar)
{
+ unsigned short *start;
unsigned int count;
- unsigned short * start;
switch (vpar) {
- case 0: /* erase from cursor to end of display */
- vc_uniscr_clear_line(vc, vc->state.x,
- vc->vc_cols - vc->state.x);
- vc_uniscr_clear_lines(vc, vc->state.y + 1,
- vc->vc_rows - vc->state.y - 1);
- count = (vc->vc_scr_end - vc->vc_pos) >> 1;
- start = (unsigned short *)vc->vc_pos;
- break;
- case 1: /* erase from start to cursor */
- vc_uniscr_clear_line(vc, 0, vc->state.x + 1);
- vc_uniscr_clear_lines(vc, 0, vc->state.y);
- count = ((vc->vc_pos - vc->vc_origin) >> 1) + 1;
- start = (unsigned short *)vc->vc_origin;
- break;
- case 3: /* include scrollback */
- flush_scrollback(vc);
- fallthrough;
- case 2: /* erase whole display */
- vc_uniscr_clear_lines(vc, 0, vc->vc_rows);
- count = vc->vc_cols * vc->vc_rows;
- start = (unsigned short *)vc->vc_origin;
- break;
- default:
- return;
+ case CSI_J_CURSOR_TO_END:
+ vc_uniscr_clear_line(vc, vc->state.x,
+ vc->vc_cols - vc->state.x);
+ vc_uniscr_clear_lines(vc, vc->state.y + 1,
+ vc->vc_rows - vc->state.y - 1);
+ count = (vc->vc_scr_end - vc->vc_pos) >> 1;
+ start = (unsigned short *)vc->vc_pos;
+ break;
+ case CSI_J_START_TO_CURSOR:
+ vc_uniscr_clear_line(vc, 0, vc->state.x + 1);
+ vc_uniscr_clear_lines(vc, 0, vc->state.y);
+ count = ((vc->vc_pos - vc->vc_origin) >> 1) + 1;
+ start = (unsigned short *)vc->vc_origin;
+ break;
+ case CSI_J_FULL:
+ flush_scrollback(vc);
+ fallthrough;
+ case CSI_J_VISIBLE:
+ vc_uniscr_clear_lines(vc, 0, vc->vc_rows);
+ count = vc->vc_cols * vc->vc_rows;
+ start = (unsigned short *)vc->vc_origin;
+ break;
+ default:
+ return;
}
scr_memsetw(start, vc->vc_video_erase_char, 2 * count);
if (con_should_update(vc))
@@ -1540,27 +1529,33 @@ static void csi_J(struct vc_data *vc, int vpar)
vc->vc_need_wrap = 0;
}
-static void csi_K(struct vc_data *vc, int vpar)
+enum {
+ CSI_K_CURSOR_TO_LINEEND = 0,
+ CSI_K_LINESTART_TO_CURSOR = 1,
+ CSI_K_LINE = 2,
+};
+
+static void csi_K(struct vc_data *vc)
{
unsigned int count;
unsigned short *start = (unsigned short *)vc->vc_pos;
int offset;
- switch (vpar) {
- case 0: /* erase from cursor to end of line */
- offset = 0;
- count = vc->vc_cols - vc->state.x;
- break;
- case 1: /* erase from start of line to cursor */
- offset = -vc->state.x;
- count = vc->state.x + 1;
- break;
- case 2: /* erase whole line */
- offset = -vc->state.x;
- count = vc->vc_cols;
- break;
- default:
- return;
+ switch (vc->vc_par[0]) {
+ case CSI_K_CURSOR_TO_LINEEND:
+ offset = 0;
+ count = vc->vc_cols - vc->state.x;
+ break;
+ case CSI_K_LINESTART_TO_CURSOR:
+ offset = -vc->state.x;
+ count = vc->state.x + 1;
+ break;
+ case CSI_K_LINE:
+ offset = -vc->state.x;
+ count = vc->vc_cols;
+ break;
+ default:
+ return;
}
vc_uniscr_clear_line(vc, vc->state.x + offset, count);
scr_memsetw(start + offset, vc->vc_video_erase_char, 2 * count);
@@ -1569,20 +1564,15 @@ static void csi_K(struct vc_data *vc, int vpar)
do_update_region(vc, (unsigned long)(start + offset), count);
}
-/* erase the following vpar positions */
-static void csi_X(struct vc_data *vc, unsigned int vpar)
+/* erase the following count positions */
+static void csi_X(struct vc_data *vc)
{ /* not vt100? */
- unsigned int count;
-
- if (!vpar)
- vpar++;
-
- count = min(vpar, vc->vc_cols - vc->state.x);
+ unsigned int count = clamp(vc->vc_par[0], 1, vc->vc_cols - vc->state.x);
vc_uniscr_clear_line(vc, vc->state.x, count);
scr_memsetw((unsigned short *)vc->vc_pos, vc->vc_video_erase_char, 2 * count);
if (con_should_update(vc))
- vc->vc_sw->con_clear(vc, vc->state.y, vc->state.x, 1, count);
+ vc->vc_sw->con_clear(vc, vc->state.y, vc->state.x, count);
vc->vc_need_wrap = 0;
}
@@ -1598,7 +1588,7 @@ static void default_attr(struct vc_data *vc)
struct rgb { u8 r; u8 g; u8 b; };
-static void rgb_from_256(int i, struct rgb *c)
+static void rgb_from_256(unsigned int i, struct rgb *c)
{
if (i < 8) { /* Standard colours. */
c->r = i&1 ? 0xaa : 0x00;
@@ -1609,9 +1599,12 @@ static void rgb_from_256(int i, struct rgb *c)
c->g = i&2 ? 0xff : 0x55;
c->b = i&4 ? 0xff : 0x55;
} else if (i < 232) { /* 6x6x6 colour cube. */
- c->r = (i - 16) / 36 * 85 / 2;
- c->g = (i - 16) / 6 % 6 * 85 / 2;
- c->b = (i - 16) % 6 * 85 / 2;
+ i -= 16;
+ c->b = i % 6 * 255 / 6;
+ i /= 6;
+ c->g = i % 6 * 255 / 6;
+ i /= 6;
+ c->r = i * 255 / 6;
} else /* Grayscale ramp. */
c->r = c->g = c->b = i * 10 - 2312;
}
@@ -1681,6 +1674,39 @@ static int vc_t416_color(struct vc_data *vc, int i,
return i;
}
+enum {
+ CSI_m_DEFAULT = 0,
+ CSI_m_BOLD = 1,
+ CSI_m_HALF_BRIGHT = 2,
+ CSI_m_ITALIC = 3,
+ CSI_m_UNDERLINE = 4,
+ CSI_m_BLINK = 5,
+ CSI_m_REVERSE = 7,
+ CSI_m_PRI_FONT = 10,
+ CSI_m_ALT_FONT1 = 11,
+ CSI_m_ALT_FONT2 = 12,
+ CSI_m_DOUBLE_UNDERLINE = 21,
+ CSI_m_NORMAL_INTENSITY = 22,
+ CSI_m_NO_ITALIC = 23,
+ CSI_m_NO_UNDERLINE = 24,
+ CSI_m_NO_BLINK = 25,
+ CSI_m_NO_REVERSE = 27,
+ CSI_m_FG_COLOR_BEG = 30,
+ CSI_m_FG_COLOR_END = 37,
+ CSI_m_FG_COLOR = 38,
+ CSI_m_DEFAULT_FG_COLOR = 39,
+ CSI_m_BG_COLOR_BEG = 40,
+ CSI_m_BG_COLOR_END = 47,
+ CSI_m_BG_COLOR = 48,
+ CSI_m_DEFAULT_BG_COLOR = 49,
+ CSI_m_BRIGHT_FG_COLOR_BEG = 90,
+ CSI_m_BRIGHT_FG_COLOR_END = 97,
+ CSI_m_BRIGHT_FG_COLOR_OFF = CSI_m_BRIGHT_FG_COLOR_BEG - CSI_m_FG_COLOR_BEG,
+ CSI_m_BRIGHT_BG_COLOR_BEG = 100,
+ CSI_m_BRIGHT_BG_COLOR_END = 107,
+ CSI_m_BRIGHT_BG_COLOR_OFF = CSI_m_BRIGHT_BG_COLOR_BEG - CSI_m_BG_COLOR_BEG,
+};
+
/* console_lock is held */
static void csi_m(struct vc_data *vc)
{
@@ -1688,33 +1714,33 @@ static void csi_m(struct vc_data *vc)
for (i = 0; i <= vc->vc_npar; i++)
switch (vc->vc_par[i]) {
- case 0: /* all attributes off */
+ case CSI_m_DEFAULT: /* all attributes off */
default_attr(vc);
break;
- case 1:
+ case CSI_m_BOLD:
vc->state.intensity = VCI_BOLD;
break;
- case 2:
+ case CSI_m_HALF_BRIGHT:
vc->state.intensity = VCI_HALF_BRIGHT;
break;
- case 3:
+ case CSI_m_ITALIC:
vc->state.italic = true;
break;
- case 21:
+ case CSI_m_DOUBLE_UNDERLINE:
/*
* No console drivers support double underline, so
* convert it to a single underline.
*/
- case 4:
+ case CSI_m_UNDERLINE:
vc->state.underline = true;
break;
- case 5:
+ case CSI_m_BLINK:
vc->state.blink = true;
break;
- case 7:
+ case CSI_m_REVERSE:
vc->state.reverse = true;
break;
- case 10: /* ANSI X3.64-1979 (SCO-ish?)
+ case CSI_m_PRI_FONT: /* ANSI X3.64-1979 (SCO-ish?)
* Select primary font, don't display control chars if
* defined, don't set bit 8 on output.
*/
@@ -1722,7 +1748,7 @@ static void csi_m(struct vc_data *vc)
vc->vc_disp_ctrl = 0;
vc->vc_toggle_meta = 0;
break;
- case 11: /* ANSI X3.64-1979 (SCO-ish?)
+ case CSI_m_ALT_FONT1: /* ANSI X3.64-1979 (SCO-ish?)
* Select first alternate font, lets chars < 32 be
* displayed as ROM chars.
*/
@@ -1730,7 +1756,7 @@ static void csi_m(struct vc_data *vc)
vc->vc_disp_ctrl = 1;
vc->vc_toggle_meta = 0;
break;
- case 12: /* ANSI X3.64-1979 (SCO-ish?)
+ case CSI_m_ALT_FONT2: /* ANSI X3.64-1979 (SCO-ish?)
* Select second alternate font, toggle high bit
* before displaying as ROM char.
*/
@@ -1738,47 +1764,51 @@ static void csi_m(struct vc_data *vc)
vc->vc_disp_ctrl = 1;
vc->vc_toggle_meta = 1;
break;
- case 22:
+ case CSI_m_NORMAL_INTENSITY:
vc->state.intensity = VCI_NORMAL;
break;
- case 23:
+ case CSI_m_NO_ITALIC:
vc->state.italic = false;
break;
- case 24:
+ case CSI_m_NO_UNDERLINE:
vc->state.underline = false;
break;
- case 25:
+ case CSI_m_NO_BLINK:
vc->state.blink = false;
break;
- case 27:
+ case CSI_m_NO_REVERSE:
vc->state.reverse = false;
break;
- case 38:
+ case CSI_m_FG_COLOR:
i = vc_t416_color(vc, i, rgb_foreground);
break;
- case 48:
+ case CSI_m_BG_COLOR:
i = vc_t416_color(vc, i, rgb_background);
break;
- case 39:
+ case CSI_m_DEFAULT_FG_COLOR:
vc->state.color = (vc->vc_def_color & 0x0f) |
(vc->state.color & 0xf0);
break;
- case 49:
+ case CSI_m_DEFAULT_BG_COLOR:
vc->state.color = (vc->vc_def_color & 0xf0) |
(vc->state.color & 0x0f);
break;
- default:
- if (vc->vc_par[i] >= 90 && vc->vc_par[i] <= 107) {
- if (vc->vc_par[i] < 100)
- vc->state.intensity = VCI_BOLD;
- vc->vc_par[i] -= 60;
- }
- if (vc->vc_par[i] >= 30 && vc->vc_par[i] <= 37)
- vc->state.color = color_table[vc->vc_par[i] - 30]
- | (vc->state.color & 0xf0);
- else if (vc->vc_par[i] >= 40 && vc->vc_par[i] <= 47)
- vc->state.color = (color_table[vc->vc_par[i] - 40] << 4)
- | (vc->state.color & 0x0f);
+ case CSI_m_BRIGHT_FG_COLOR_BEG ... CSI_m_BRIGHT_FG_COLOR_END:
+ vc->state.intensity = VCI_BOLD;
+ vc->vc_par[i] -= CSI_m_BRIGHT_FG_COLOR_OFF;
+ fallthrough;
+ case CSI_m_FG_COLOR_BEG ... CSI_m_FG_COLOR_END:
+ vc->vc_par[i] -= CSI_m_FG_COLOR_BEG;
+ vc->state.color = color_table[vc->vc_par[i]] |
+ (vc->state.color & 0xf0);
+ break;
+ case CSI_m_BRIGHT_BG_COLOR_BEG ... CSI_m_BRIGHT_BG_COLOR_END:
+ vc->vc_par[i] -= CSI_m_BRIGHT_BG_COLOR_OFF;
+ fallthrough;
+ case CSI_m_BG_COLOR_BEG ... CSI_m_BG_COLOR_END:
+ vc->vc_par[i] -= CSI_m_BG_COLOR_BEG;
+ vc->state.color = (color_table[vc->vc_par[i]] << 4) |
+ (vc->state.color & 0x0f);
break;
}
update_attr(vc);
@@ -1832,133 +1862,175 @@ int mouse_reporting(void)
return vc_cons[fg_console].d->vc_report_mouse;
}
+enum {
+ CSI_DEC_hl_CURSOR_KEYS = 1, /* CKM: cursor keys send ^[Ox/^[[x */
+ CSI_DEC_hl_132_COLUMNS = 3, /* COLM: 80/132 mode switch */
+ CSI_DEC_hl_REVERSE_VIDEO = 5, /* SCNM */
+ CSI_DEC_hl_ORIGIN_MODE = 6, /* OM: origin relative/absolute */
+ CSI_DEC_hl_AUTOWRAP = 7, /* AWM */
+ CSI_DEC_hl_AUTOREPEAT = 8, /* ARM */
+ CSI_DEC_hl_MOUSE_X10 = 9,
+ CSI_DEC_hl_SHOW_CURSOR = 25, /* TCEM */
+ CSI_DEC_hl_MOUSE_VT200 = 1000,
+};
+
/* console_lock is held */
-static void set_mode(struct vc_data *vc, int on_off)
+static void csi_DEC_hl(struct vc_data *vc, bool on_off)
{
- int i;
+ unsigned int i;
for (i = 0; i <= vc->vc_npar; i++)
- if (vc->vc_priv == EPdec) {
- switch(vc->vc_par[i]) { /* DEC private modes set/reset */
- case 1: /* Cursor keys send ^[Ox/^[[x */
- if (on_off)
- set_kbd(vc, decckm);
- else
- clr_kbd(vc, decckm);
- break;
- case 3: /* 80/132 mode switch unimplemented */
+ switch (vc->vc_par[i]) {
+ case CSI_DEC_hl_CURSOR_KEYS:
+ if (on_off)
+ set_kbd(vc, decckm);
+ else
+ clr_kbd(vc, decckm);
+ break;
+ case CSI_DEC_hl_132_COLUMNS: /* unimplemented */
#if 0
- vc_resize(deccolm ? 132 : 80, vc->vc_rows);
- /* this alone does not suffice; some user mode
- utility has to change the hardware regs */
+ vc_resize(deccolm ? 132 : 80, vc->vc_rows);
+ /* this alone does not suffice; some user mode
+ utility has to change the hardware regs */
#endif
- break;
- case 5: /* Inverted screen on/off */
- if (vc->vc_decscnm != on_off) {
- vc->vc_decscnm = on_off;
- invert_screen(vc, 0,
- vc->vc_screenbuf_size,
- false);
- update_attr(vc);
- }
- break;
- case 6: /* Origin relative/absolute */
- vc->vc_decom = on_off;
- gotoxay(vc, 0, 0);
- break;
- case 7: /* Autowrap on/off */
- vc->vc_decawm = on_off;
- break;
- case 8: /* Autorepeat on/off */
- if (on_off)
- set_kbd(vc, decarm);
- else
- clr_kbd(vc, decarm);
- break;
- case 9:
- vc->vc_report_mouse = on_off ? 1 : 0;
- break;
- case 25: /* Cursor on/off */
- vc->vc_deccm = on_off;
- break;
- case 1000:
- vc->vc_report_mouse = on_off ? 2 : 0;
- break;
- }
- } else {
- switch(vc->vc_par[i]) { /* ANSI modes set/reset */
- case 3: /* Monitor (display ctrls) */
- vc->vc_disp_ctrl = on_off;
- break;
- case 4: /* Insert Mode on/off */
- vc->vc_decim = on_off;
- break;
- case 20: /* Lf, Enter == CrLf/Lf */
- if (on_off)
- set_kbd(vc, lnm);
- else
- clr_kbd(vc, lnm);
- break;
+ break;
+ case CSI_DEC_hl_REVERSE_VIDEO:
+ if (vc->vc_decscnm != on_off) {
+ vc->vc_decscnm = on_off;
+ invert_screen(vc, 0, vc->vc_screenbuf_size,
+ false);
+ update_attr(vc);
}
+ break;
+ case CSI_DEC_hl_ORIGIN_MODE:
+ vc->vc_decom = on_off;
+ gotoxay(vc, 0, 0);
+ break;
+ case CSI_DEC_hl_AUTOWRAP:
+ vc->vc_decawm = on_off;
+ break;
+ case CSI_DEC_hl_AUTOREPEAT:
+ if (on_off)
+ set_kbd(vc, decarm);
+ else
+ clr_kbd(vc, decarm);
+ break;
+ case CSI_DEC_hl_MOUSE_X10:
+ vc->vc_report_mouse = on_off ? 1 : 0;
+ break;
+ case CSI_DEC_hl_SHOW_CURSOR:
+ vc->vc_deccm = on_off;
+ break;
+ case CSI_DEC_hl_MOUSE_VT200:
+ vc->vc_report_mouse = on_off ? 2 : 0;
+ break;
}
}
+enum {
+ CSI_hl_DISPLAY_CTRL = 3, /* handle ansi control chars */
+ CSI_hl_INSERT = 4, /* IRM: insert/replace */
+ CSI_hl_AUTO_NL = 20, /* LNM: Enter == CrLf/Lf */
+};
+
/* console_lock is held */
-static void setterm_command(struct vc_data *vc)
+static void csi_hl(struct vc_data *vc, bool on_off)
+{
+ unsigned int i;
+
+ for (i = 0; i <= vc->vc_npar; i++)
+ switch (vc->vc_par[i]) { /* ANSI modes set/reset */
+ case CSI_hl_DISPLAY_CTRL:
+ vc->vc_disp_ctrl = on_off;
+ break;
+ case CSI_hl_INSERT:
+ vc->vc_decim = on_off;
+ break;
+ case CSI_hl_AUTO_NL:
+ if (on_off)
+ set_kbd(vc, lnm);
+ else
+ clr_kbd(vc, lnm);
+ break;
+ }
+}
+
+enum CSI_right_square_bracket {
+ CSI_RSB_COLOR_FOR_UNDERLINE = 1,
+ CSI_RSB_COLOR_FOR_HALF_BRIGHT = 2,
+ CSI_RSB_MAKE_CUR_COLOR_DEFAULT = 8,
+ CSI_RSB_BLANKING_INTERVAL = 9,
+ CSI_RSB_BELL_FREQUENCY = 10,
+ CSI_RSB_BELL_DURATION = 11,
+ CSI_RSB_BRING_CONSOLE_TO_FRONT = 12,
+ CSI_RSB_UNBLANK = 13,
+ CSI_RSB_VESA_OFF_INTERVAL = 14,
+ CSI_RSB_BRING_PREV_CONSOLE_TO_FRONT = 15,
+ CSI_RSB_CURSOR_BLINK_INTERVAL = 16,
+};
+
+/*
+ * csi_RSB - csi+] (Right Square Bracket) handler
+ *
+ * These are linux console private sequences.
+ *
+ * console_lock is held
+ */
+static void csi_RSB(struct vc_data *vc)
{
switch (vc->vc_par[0]) {
- case 1: /* set color for underline mode */
+ case CSI_RSB_COLOR_FOR_UNDERLINE:
if (vc->vc_can_do_color && vc->vc_par[1] < 16) {
vc->vc_ulcolor = color_table[vc->vc_par[1]];
if (vc->state.underline)
update_attr(vc);
}
break;
- case 2: /* set color for half intensity mode */
+ case CSI_RSB_COLOR_FOR_HALF_BRIGHT:
if (vc->vc_can_do_color && vc->vc_par[1] < 16) {
vc->vc_halfcolor = color_table[vc->vc_par[1]];
if (vc->state.intensity == VCI_HALF_BRIGHT)
update_attr(vc);
}
break;
- case 8: /* store colors as defaults */
+ case CSI_RSB_MAKE_CUR_COLOR_DEFAULT:
vc->vc_def_color = vc->vc_attr;
if (vc->vc_hi_font_mask == 0x100)
vc->vc_def_color >>= 1;
default_attr(vc);
update_attr(vc);
break;
- case 9: /* set blanking interval */
+ case CSI_RSB_BLANKING_INTERVAL:
blankinterval = min(vc->vc_par[1], 60U) * 60;
poke_blanked_console();
break;
- case 10: /* set bell frequency in Hz */
+ case CSI_RSB_BELL_FREQUENCY:
if (vc->vc_npar >= 1)
vc->vc_bell_pitch = vc->vc_par[1];
else
vc->vc_bell_pitch = DEFAULT_BELL_PITCH;
break;
- case 11: /* set bell duration in msec */
+ case CSI_RSB_BELL_DURATION:
if (vc->vc_npar >= 1)
vc->vc_bell_duration = (vc->vc_par[1] < 2000) ?
msecs_to_jiffies(vc->vc_par[1]) : 0;
else
vc->vc_bell_duration = DEFAULT_BELL_DURATION;
break;
- case 12: /* bring specified console to the front */
+ case CSI_RSB_BRING_CONSOLE_TO_FRONT:
if (vc->vc_par[1] >= 1 && vc_cons_allocated(vc->vc_par[1] - 1))
set_console(vc->vc_par[1] - 1);
break;
- case 13: /* unblank the screen */
+ case CSI_RSB_UNBLANK:
poke_blanked_console();
break;
- case 14: /* set vesa powerdown interval */
+ case CSI_RSB_VESA_OFF_INTERVAL:
vesa_off_interval = min(vc->vc_par[1], 60U) * 60 * HZ;
break;
- case 15: /* activate the previous console */
+ case CSI_RSB_BRING_PREV_CONSOLE_TO_FRONT:
set_console(last_console);
break;
- case 16: /* set cursor blink duration in msec */
+ case CSI_RSB_CURSOR_BLINK_INTERVAL:
if (vc->vc_npar >= 1 && vc->vc_par[1] >= 50 &&
vc->vc_par[1] <= USHRT_MAX)
vc->vc_cur_blink_ms = vc->vc_par[1];
@@ -1971,41 +2043,32 @@ static void setterm_command(struct vc_data *vc)
/* console_lock is held */
static void csi_at(struct vc_data *vc, unsigned int nr)
{
- if (nr > vc->vc_cols - vc->state.x)
- nr = vc->vc_cols - vc->state.x;
- else if (!nr)
- nr = 1;
+ nr = clamp(nr, 1, vc->vc_cols - vc->state.x);
insert_char(vc, nr);
}
/* console_lock is held */
-static void csi_L(struct vc_data *vc, unsigned int nr)
+static void csi_L(struct vc_data *vc)
{
- if (nr > vc->vc_rows - vc->state.y)
- nr = vc->vc_rows - vc->state.y;
- else if (!nr)
- nr = 1;
+ unsigned int nr = clamp(vc->vc_par[0], 1, vc->vc_rows - vc->state.y);
+
con_scroll(vc, vc->state.y, vc->vc_bottom, SM_DOWN, nr);
vc->vc_need_wrap = 0;
}
/* console_lock is held */
-static void csi_P(struct vc_data *vc, unsigned int nr)
+static void csi_P(struct vc_data *vc)
{
- if (nr > vc->vc_cols - vc->state.x)
- nr = vc->vc_cols - vc->state.x;
- else if (!nr)
- nr = 1;
+ unsigned int nr = clamp(vc->vc_par[0], 1, vc->vc_cols - vc->state.x);
+
delete_char(vc, nr);
}
/* console_lock is held */
-static void csi_M(struct vc_data *vc, unsigned int nr)
+static void csi_M(struct vc_data *vc)
{
- if (nr > vc->vc_rows - vc->state.y)
- nr = vc->vc_rows - vc->state.y;
- else if (!nr)
- nr=1;
+ unsigned int nr = clamp(vc->vc_par[0], 1, vc->vc_rows - vc->state.y);
+
con_scroll(vc, vc->state.y, vc->vc_bottom, SM_UP, nr);
vc->vc_need_wrap = 0;
}
@@ -2028,9 +2091,48 @@ static void restore_cur(struct vc_data *vc)
vc->vc_need_wrap = 0;
}
-enum { ESnormal, ESesc, ESsquare, ESgetpars, ESfunckey,
- EShash, ESsetG0, ESsetG1, ESpercent, EScsiignore, ESnonstd,
- ESpalette, ESosc, ESapc, ESpm, ESdcs };
+/**
+ * enum vc_ctl_state - control characters state of a vt
+ *
+ * @ESnormal: initial state, no control characters parsed
+ * @ESesc: ESC parsed
+ * @ESsquare: CSI parsed -- modifiers/parameters/ctrl chars expected
+ * @ESgetpars: CSI parsed -- parameters/ctrl chars expected
+ * @ESfunckey: CSI [ parsed
+ * @EShash: ESC # parsed
+ * @ESsetG0: ESC ( parsed
+ * @ESsetG1: ESC ) parsed
+ * @ESpercent: ESC % parsed
+ * @EScsiignore: CSI [0x20-0x3f] parsed
+ * @ESnonstd: OSC parsed
+ * @ESpalette: OSC P parsed
+ * @ESosc: OSC [0-9] parsed
+ * @ESANSI_first: first state for ignoring ansi control sequences
+ * @ESapc: ESC _ parsed
+ * @ESpm: ESC ^ parsed
+ * @ESdcs: ESC P parsed
+ * @ESANSI_last: last state for ignoring ansi control sequences
+ */
+enum vc_ctl_state {
+ ESnormal,
+ ESesc,
+ ESsquare,
+ ESgetpars,
+ ESfunckey,
+ EShash,
+ ESsetG0,
+ ESsetG1,
+ ESpercent,
+ EScsiignore,
+ ESnonstd,
+ ESpalette,
+ ESosc,
+ ESANSI_first = ESosc,
+ ESapc,
+ ESpm,
+ ESdcs,
+ ESANSI_last = ESdcs,
+};
/* console_lock is held (except via vc_init()) */
static void reset_terminal(struct vc_data *vc, int do_clear)
@@ -2078,10 +2180,10 @@ static void reset_terminal(struct vc_data *vc, int do_clear)
gotoxy(vc, 0, 0);
save_cur(vc);
if (do_clear)
- csi_J(vc, 2);
+ csi_J(vc, CSI_J_VISIBLE);
}
-static void vc_setGx(struct vc_data *vc, unsigned int which, int c)
+static void vc_setGx(struct vc_data *vc, unsigned int which, u8 c)
{
unsigned char *charset = &vc->state.Gx_charset[which];
@@ -2104,36 +2206,54 @@ static void vc_setGx(struct vc_data *vc, unsigned int which, int c)
vc->vc_translate = set_translate(*charset, vc);
}
-/* is this state an ANSI control string? */
-static bool ansi_control_string(unsigned int state)
+static bool ansi_control_string(enum vc_ctl_state state)
{
- if (state == ESosc || state == ESapc || state == ESpm || state == ESdcs)
- return true;
- return false;
+ return state >= ESANSI_first && state <= ESANSI_last;
}
-/* console_lock is held */
-static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
+enum {
+ ASCII_NULL = 0,
+ ASCII_BELL = 7,
+ ASCII_BACKSPACE = 8,
+ ASCII_IGNORE_FIRST = ASCII_BACKSPACE,
+ ASCII_HTAB = 9,
+ ASCII_LINEFEED = 10,
+ ASCII_VTAB = 11,
+ ASCII_FORMFEED = 12,
+ ASCII_CAR_RET = 13,
+ ASCII_IGNORE_LAST = ASCII_CAR_RET,
+ ASCII_SHIFTOUT = 14,
+ ASCII_SHIFTIN = 15,
+ ASCII_CANCEL = 24,
+ ASCII_SUBSTITUTE = 26,
+ ASCII_ESCAPE = 27,
+ ASCII_CSI_IGNORE_FIRST = ' ', /* 0x2x, 0x3a and 0x3c - 0x3f */
+ ASCII_CSI_IGNORE_LAST = '?',
+ ASCII_DEL = 127,
+ ASCII_EXT_CSI = 128 + ASCII_ESCAPE,
+};
+
+/*
+ * Handle ascii characters in control sequences and change states accordingly.
+ * E.g. ESC sets the state of vc to ESesc.
+ *
+ * Returns: true if @c handled.
+ */
+static bool handle_ascii(struct tty_struct *tty, struct vc_data *vc, u8 c)
{
- /*
- * Control characters can be used in the _middle_
- * of an escape sequence, aside from ANSI control strings.
- */
- if (ansi_control_string(vc->vc_state) && c >= 8 && c <= 13)
- return;
switch (c) {
- case 0:
- return;
- case 7:
+ case ASCII_NULL:
+ return true;
+ case ASCII_BELL:
if (ansi_control_string(vc->vc_state))
vc->vc_state = ESnormal;
else if (vc->vc_bell_duration)
kd_mksound(vc->vc_bell_pitch, vc->vc_bell_duration);
- return;
- case 8:
+ return true;
+ case ASCII_BACKSPACE:
bs(vc);
- return;
- case 9:
+ return true;
+ case ASCII_HTAB:
vc->vc_pos -= (vc->state.x << 1);
vc->state.x = find_next_bit(vc->vc_tab_stop,
@@ -2144,119 +2264,330 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
vc->vc_pos += (vc->state.x << 1);
notify_write(vc, '\t');
- return;
- case 10: case 11: case 12:
+ return true;
+ case ASCII_LINEFEED:
+ case ASCII_VTAB:
+ case ASCII_FORMFEED:
lf(vc);
if (!is_kbd(vc, lnm))
- return;
+ return true;
fallthrough;
- case 13:
+ case ASCII_CAR_RET:
cr(vc);
- return;
- case 14:
+ return true;
+ case ASCII_SHIFTOUT:
vc->state.charset = 1;
vc->vc_translate = set_translate(vc->state.Gx_charset[1], vc);
vc->vc_disp_ctrl = 1;
- return;
- case 15:
+ return true;
+ case ASCII_SHIFTIN:
vc->state.charset = 0;
vc->vc_translate = set_translate(vc->state.Gx_charset[0], vc);
vc->vc_disp_ctrl = 0;
- return;
- case 24: case 26:
+ return true;
+ case ASCII_CANCEL:
+ case ASCII_SUBSTITUTE:
vc->vc_state = ESnormal;
- return;
- case 27:
+ return true;
+ case ASCII_ESCAPE:
vc->vc_state = ESesc;
- return;
- case 127:
+ return true;
+ case ASCII_DEL:
del(vc);
- return;
- case 128+27:
+ return true;
+ case ASCII_EXT_CSI:
vc->vc_state = ESsquare;
- return;
+ return true;
}
- switch(vc->vc_state) {
- case ESesc:
- vc->vc_state = ESnormal;
- switch (c) {
- case '[':
- vc->vc_state = ESsquare;
- return;
- case ']':
- vc->vc_state = ESnonstd;
- return;
- case '_':
- vc->vc_state = ESapc;
- return;
- case '^':
- vc->vc_state = ESpm;
- return;
- case '%':
- vc->vc_state = ESpercent;
- return;
- case 'E':
- cr(vc);
- lf(vc);
- return;
- case 'M':
- ri(vc);
- return;
- case 'D':
- lf(vc);
- return;
- case 'H':
- if (vc->state.x < VC_TABSTOPS_COUNT)
- set_bit(vc->state.x, vc->vc_tab_stop);
- return;
- case 'P':
- vc->vc_state = ESdcs;
- return;
- case 'Z':
+
+ return false;
+}
+
+/*
+ * Handle a character (@c) following an ESC (when @vc is in the ESesc state).
+ * E.g. previous ESC with @c == '[' here yields the ESsquare state (that is:
+ * CSI).
+ */
+static void handle_esc(struct tty_struct *tty, struct vc_data *vc, u8 c)
+{
+ vc->vc_state = ESnormal;
+ switch (c) {
+ case '[':
+ vc->vc_state = ESsquare;
+ break;
+ case ']':
+ vc->vc_state = ESnonstd;
+ break;
+ case '_':
+ vc->vc_state = ESapc;
+ break;
+ case '^':
+ vc->vc_state = ESpm;
+ break;
+ case '%':
+ vc->vc_state = ESpercent;
+ break;
+ case 'E':
+ cr(vc);
+ lf(vc);
+ break;
+ case 'M':
+ ri(vc);
+ break;
+ case 'D':
+ lf(vc);
+ break;
+ case 'H':
+ if (vc->state.x < VC_TABSTOPS_COUNT)
+ set_bit(vc->state.x, vc->vc_tab_stop);
+ break;
+ case 'P':
+ vc->vc_state = ESdcs;
+ break;
+ case 'Z':
+ respond_ID(tty);
+ break;
+ case '7':
+ save_cur(vc);
+ break;
+ case '8':
+ restore_cur(vc);
+ break;
+ case '(':
+ vc->vc_state = ESsetG0;
+ break;
+ case ')':
+ vc->vc_state = ESsetG1;
+ break;
+ case '#':
+ vc->vc_state = EShash;
+ break;
+ case 'c':
+ reset_terminal(vc, 1);
+ break;
+ case '>': /* Numeric keypad */
+ clr_kbd(vc, kbdapplic);
+ break;
+ case '=': /* Appl. keypad */
+ set_kbd(vc, kbdapplic);
+ break;
+ }
+}
+
+/*
+ * Handle special DEC control sequences ("ESC [ ? parameters char"). Parameters
+ * are in @vc->vc_par and the char is in @c here.
+ */
+static void csi_DEC(struct tty_struct *tty, struct vc_data *vc, u8 c)
+{
+ switch (c) {
+ case 'h':
+ csi_DEC_hl(vc, true);
+ break;
+ case 'l':
+ csi_DEC_hl(vc, false);
+ break;
+ case 'c':
+ if (vc->vc_par[0])
+ vc->vc_cursor_type = CUR_MAKE(vc->vc_par[0],
+ vc->vc_par[1],
+ vc->vc_par[2]);
+ else
+ vc->vc_cursor_type = cur_default;
+ break;
+ case 'm':
+ clear_selection();
+ if (vc->vc_par[0])
+ vc->vc_complement_mask = vc->vc_par[0] << 8 | vc->vc_par[1];
+ else
+ vc->vc_complement_mask = vc->vc_s_complement_mask;
+ break;
+ case 'n':
+ if (vc->vc_par[0] == 5)
+ status_report(tty);
+ else if (vc->vc_par[0] == 6)
+ cursor_report(vc, tty);
+ break;
+ }
+}
+
+/*
+ * Handle Control Sequence Introducer control characters. That is
+ * "ESC [ parameters char". Parameters are in @vc->vc_par and the char is in
+ * @c here.
+ */
+static void csi_ECMA(struct tty_struct *tty, struct vc_data *vc, u8 c)
+{
+ switch (c) {
+ case 'G':
+ case '`':
+ if (vc->vc_par[0])
+ vc->vc_par[0]--;
+ gotoxy(vc, vc->vc_par[0], vc->state.y);
+ break;
+ case 'A':
+ if (!vc->vc_par[0])
+ vc->vc_par[0]++;
+ gotoxy(vc, vc->state.x, vc->state.y - vc->vc_par[0]);
+ break;
+ case 'B':
+ case 'e':
+ if (!vc->vc_par[0])
+ vc->vc_par[0]++;
+ gotoxy(vc, vc->state.x, vc->state.y + vc->vc_par[0]);
+ break;
+ case 'C':
+ case 'a':
+ if (!vc->vc_par[0])
+ vc->vc_par[0]++;
+ gotoxy(vc, vc->state.x + vc->vc_par[0], vc->state.y);
+ break;
+ case 'D':
+ if (!vc->vc_par[0])
+ vc->vc_par[0]++;
+ gotoxy(vc, vc->state.x - vc->vc_par[0], vc->state.y);
+ break;
+ case 'E':
+ if (!vc->vc_par[0])
+ vc->vc_par[0]++;
+ gotoxy(vc, 0, vc->state.y + vc->vc_par[0]);
+ break;
+ case 'F':
+ if (!vc->vc_par[0])
+ vc->vc_par[0]++;
+ gotoxy(vc, 0, vc->state.y - vc->vc_par[0]);
+ break;
+ case 'd':
+ if (vc->vc_par[0])
+ vc->vc_par[0]--;
+ gotoxay(vc, vc->state.x ,vc->vc_par[0]);
+ break;
+ case 'H':
+ case 'f':
+ if (vc->vc_par[0])
+ vc->vc_par[0]--;
+ if (vc->vc_par[1])
+ vc->vc_par[1]--;
+ gotoxay(vc, vc->vc_par[1], vc->vc_par[0]);
+ break;
+ case 'J':
+ csi_J(vc, vc->vc_par[0]);
+ break;
+ case 'K':
+ csi_K(vc);
+ break;
+ case 'L':
+ csi_L(vc);
+ break;
+ case 'M':
+ csi_M(vc);
+ break;
+ case 'P':
+ csi_P(vc);
+ break;
+ case 'c':
+ if (!vc->vc_par[0])
respond_ID(tty);
- return;
- case '7':
- save_cur(vc);
- return;
- case '8':
- restore_cur(vc);
- return;
- case '(':
- vc->vc_state = ESsetG0;
- return;
- case ')':
- vc->vc_state = ESsetG1;
- return;
- case '#':
- vc->vc_state = EShash;
- return;
- case 'c':
- reset_terminal(vc, 1);
- return;
- case '>': /* Numeric keypad */
- clr_kbd(vc, kbdapplic);
- return;
- case '=': /* Appl. keypad */
- set_kbd(vc, kbdapplic);
- return;
+ break;
+ case 'g':
+ if (!vc->vc_par[0] && vc->state.x < VC_TABSTOPS_COUNT)
+ set_bit(vc->state.x, vc->vc_tab_stop);
+ else if (vc->vc_par[0] == 3)
+ bitmap_zero(vc->vc_tab_stop, VC_TABSTOPS_COUNT);
+ break;
+ case 'h':
+ csi_hl(vc, true);
+ break;
+ case 'l':
+ csi_hl(vc, false);
+ break;
+ case 'm':
+ csi_m(vc);
+ break;
+ case 'n':
+ if (vc->vc_par[0] == 5)
+ status_report(tty);
+ else if (vc->vc_par[0] == 6)
+ cursor_report(vc, tty);
+ break;
+ case 'q': /* DECLL - but only 3 leds */
+ /* map 0,1,2,3 to 0,1,2,4 */
+ if (vc->vc_par[0] < 4)
+ vt_set_led_state(vc->vc_num,
+ (vc->vc_par[0] < 3) ? vc->vc_par[0] : 4);
+ break;
+ case 'r':
+ if (!vc->vc_par[0])
+ vc->vc_par[0]++;
+ if (!vc->vc_par[1])
+ vc->vc_par[1] = vc->vc_rows;
+ /* Minimum allowed region is 2 lines */
+ if (vc->vc_par[0] < vc->vc_par[1] &&
+ vc->vc_par[1] <= vc->vc_rows) {
+ vc->vc_top = vc->vc_par[0] - 1;
+ vc->vc_bottom = vc->vc_par[1];
+ gotoxay(vc, 0, 0);
}
+ break;
+ case 's':
+ save_cur(vc);
+ break;
+ case 'u':
+ restore_cur(vc);
+ break;
+ case 'X':
+ csi_X(vc);
+ break;
+ case '@':
+ csi_at(vc, vc->vc_par[0]);
+ break;
+ case ']':
+ csi_RSB(vc);
+ break;
+ }
+
+}
+
+static void vc_reset_params(struct vc_data *vc)
+{
+ memset(vc->vc_par, 0, sizeof(vc->vc_par));
+ vc->vc_npar = 0;
+}
+
+/* console_lock is held */
+static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, u8 c)
+{
+ /*
+ * Control characters can be used in the _middle_
+ * of an escape sequence, aside from ANSI control strings.
+ */
+ if (ansi_control_string(vc->vc_state) && c >= ASCII_IGNORE_FIRST &&
+ c <= ASCII_IGNORE_LAST)
+ return;
+
+ if (handle_ascii(tty, vc, c))
+ return;
+
+ switch(vc->vc_state) {
+ case ESesc: /* ESC */
+ handle_esc(tty, vc, c);
return;
- case ESnonstd:
- if (c=='P') { /* palette escape sequence */
- for (vc->vc_npar = 0; vc->vc_npar < NPAR; vc->vc_npar++)
- vc->vc_par[vc->vc_npar] = 0;
- vc->vc_npar = 0;
+ case ESnonstd: /* ESC ] aka OSC */
+ switch (c) {
+ case 'P': /* palette escape sequence */
+ vc_reset_params(vc);
vc->vc_state = ESpalette;
return;
- } else if (c=='R') { /* reset palette */
+ case 'R': /* reset palette */
reset_palette(vc);
- vc->vc_state = ESnormal;
- } else if (c>='0' && c<='9')
+ break;
+ case '0' ... '9':
vc->vc_state = ESosc;
- else
- vc->vc_state = ESnormal;
+ return;
+ }
+ vc->vc_state = ESnormal;
return;
- case ESpalette:
+ case ESpalette: /* ESC ] P aka OSC P */
if (isxdigit(c)) {
vc->vc_par[vc->vc_npar++] = hex_to_bin(c);
if (vc->vc_npar == 7) {
@@ -2273,16 +2604,14 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
} else
vc->vc_state = ESnormal;
return;
- case ESsquare:
- for (vc->vc_npar = 0; vc->vc_npar < NPAR; vc->vc_npar++)
- vc->vc_par[vc->vc_npar] = 0;
- vc->vc_npar = 0;
+ case ESsquare: /* ESC [ aka CSI, parameters or modifiers expected */
+ vc_reset_params(vc);
+
vc->vc_state = ESgetpars;
- if (c == '[') { /* Function key */
- vc->vc_state=ESfunckey;
- return;
- }
switch (c) {
+ case '[': /* Function key */
+ vc->vc_state = ESfunckey;
+ return;
case '?':
vc->vc_priv = EPdec;
return;
@@ -2298,182 +2627,44 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
}
vc->vc_priv = EPecma;
fallthrough;
- case ESgetpars:
- if (c == ';' && vc->vc_npar < NPAR - 1) {
- vc->vc_npar++;
- return;
- } else if (c>='0' && c<='9') {
+ case ESgetpars: /* ESC [ aka CSI, parameters expected */
+ switch (c) {
+ case ';':
+ if (vc->vc_npar < NPAR - 1) {
+ vc->vc_npar++;
+ return;
+ }
+ break;
+ case '0' ... '9':
vc->vc_par[vc->vc_npar] *= 10;
vc->vc_par[vc->vc_npar] += c - '0';
return;
}
- if (c >= 0x20 && c <= 0x3f) { /* 0x2x, 0x3a and 0x3c - 0x3f */
+ if (c >= ASCII_CSI_IGNORE_FIRST && c <= ASCII_CSI_IGNORE_LAST) {
vc->vc_state = EScsiignore;
return;
}
+
+ /* parameters done, handle the control char @c */
+
vc->vc_state = ESnormal;
- switch(c) {
- case 'h':
- if (vc->vc_priv <= EPdec)
- set_mode(vc, 1);
- return;
- case 'l':
- if (vc->vc_priv <= EPdec)
- set_mode(vc, 0);
- return;
- case 'c':
- if (vc->vc_priv == EPdec) {
- if (vc->vc_par[0])
- vc->vc_cursor_type =
- CUR_MAKE(vc->vc_par[0],
- vc->vc_par[1],
- vc->vc_par[2]);
- else
- vc->vc_cursor_type = cur_default;
- return;
- }
- break;
- case 'm':
- if (vc->vc_priv == EPdec) {
- clear_selection();
- if (vc->vc_par[0])
- vc->vc_complement_mask = vc->vc_par[0] << 8 | vc->vc_par[1];
- else
- vc->vc_complement_mask = vc->vc_s_complement_mask;
- return;
- }
- break;
- case 'n':
- if (vc->vc_priv == EPecma) {
- if (vc->vc_par[0] == 5)
- status_report(tty);
- else if (vc->vc_par[0] == 6)
- cursor_report(vc, tty);
- }
- return;
- }
- if (vc->vc_priv != EPecma) {
- vc->vc_priv = EPecma;
- return;
- }
- switch(c) {
- case 'G': case '`':
- if (vc->vc_par[0])
- vc->vc_par[0]--;
- gotoxy(vc, vc->vc_par[0], vc->state.y);
- return;
- case 'A':
- if (!vc->vc_par[0])
- vc->vc_par[0]++;
- gotoxy(vc, vc->state.x, vc->state.y - vc->vc_par[0]);
- return;
- case 'B': case 'e':
- if (!vc->vc_par[0])
- vc->vc_par[0]++;
- gotoxy(vc, vc->state.x, vc->state.y + vc->vc_par[0]);
- return;
- case 'C': case 'a':
- if (!vc->vc_par[0])
- vc->vc_par[0]++;
- gotoxy(vc, vc->state.x + vc->vc_par[0], vc->state.y);
- return;
- case 'D':
- if (!vc->vc_par[0])
- vc->vc_par[0]++;
- gotoxy(vc, vc->state.x - vc->vc_par[0], vc->state.y);
- return;
- case 'E':
- if (!vc->vc_par[0])
- vc->vc_par[0]++;
- gotoxy(vc, 0, vc->state.y + vc->vc_par[0]);
- return;
- case 'F':
- if (!vc->vc_par[0])
- vc->vc_par[0]++;
- gotoxy(vc, 0, vc->state.y - vc->vc_par[0]);
- return;
- case 'd':
- if (vc->vc_par[0])
- vc->vc_par[0]--;
- gotoxay(vc, vc->state.x ,vc->vc_par[0]);
- return;
- case 'H': case 'f':
- if (vc->vc_par[0])
- vc->vc_par[0]--;
- if (vc->vc_par[1])
- vc->vc_par[1]--;
- gotoxay(vc, vc->vc_par[1], vc->vc_par[0]);
- return;
- case 'J':
- csi_J(vc, vc->vc_par[0]);
- return;
- case 'K':
- csi_K(vc, vc->vc_par[0]);
- return;
- case 'L':
- csi_L(vc, vc->vc_par[0]);
- return;
- case 'M':
- csi_M(vc, vc->vc_par[0]);
- return;
- case 'P':
- csi_P(vc, vc->vc_par[0]);
- return;
- case 'c':
- if (!vc->vc_par[0])
- respond_ID(tty);
- return;
- case 'g':
- if (!vc->vc_par[0] && vc->state.x < VC_TABSTOPS_COUNT)
- set_bit(vc->state.x, vc->vc_tab_stop);
- else if (vc->vc_par[0] == 3)
- bitmap_zero(vc->vc_tab_stop, VC_TABSTOPS_COUNT);
- return;
- case 'm':
- csi_m(vc);
- return;
- case 'q': /* DECLL - but only 3 leds */
- /* map 0,1,2,3 to 0,1,2,4 */
- if (vc->vc_par[0] < 4)
- vt_set_led_state(vc->vc_num,
- (vc->vc_par[0] < 3) ? vc->vc_par[0] : 4);
- return;
- case 'r':
- if (!vc->vc_par[0])
- vc->vc_par[0]++;
- if (!vc->vc_par[1])
- vc->vc_par[1] = vc->vc_rows;
- /* Minimum allowed region is 2 lines */
- if (vc->vc_par[0] < vc->vc_par[1] &&
- vc->vc_par[1] <= vc->vc_rows) {
- vc->vc_top = vc->vc_par[0] - 1;
- vc->vc_bottom = vc->vc_par[1];
- gotoxay(vc, 0, 0);
- }
- return;
- case 's':
- save_cur(vc);
- return;
- case 'u':
- restore_cur(vc);
- return;
- case 'X':
- csi_X(vc, vc->vc_par[0]);
+
+ switch (vc->vc_priv) {
+ case EPdec:
+ csi_DEC(tty, vc, c);
return;
- case '@':
- csi_at(vc, vc->vc_par[0]);
+ case EPecma:
+ csi_ECMA(tty, vc, c);
return;
- case ']': /* setterm functions */
- setterm_command(vc);
+ default:
return;
}
- return;
case EScsiignore:
- if (c >= 20 && c <= 0x3f)
+ if (c >= ASCII_CSI_IGNORE_FIRST && c <= ASCII_CSI_IGNORE_LAST)
return;
vc->vc_state = ESnormal;
return;
- case ESpercent:
+ case ESpercent: /* ESC % */
vc->vc_state = ESnormal;
switch (c) {
case '@': /* defined in ISO 2022 */
@@ -2485,36 +2676,36 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
return;
}
return;
- case ESfunckey:
+ case ESfunckey: /* ESC [ [ aka CSI [ */
vc->vc_state = ESnormal;
return;
- case EShash:
+ case EShash: /* ESC # */
vc->vc_state = ESnormal;
if (c == '8') {
/* DEC screen alignment test. kludge :-) */
vc->vc_video_erase_char =
(vc->vc_video_erase_char & 0xff00) | 'E';
- csi_J(vc, 2);
+ csi_J(vc, CSI_J_VISIBLE);
vc->vc_video_erase_char =
(vc->vc_video_erase_char & 0xff00) | ' ';
do_update_region(vc, vc->vc_origin, vc->vc_screenbuf_size / 2);
}
return;
- case ESsetG0:
+ case ESsetG0: /* ESC ( */
vc_setGx(vc, 0, c);
vc->vc_state = ESnormal;
return;
- case ESsetG1:
+ case ESsetG1: /* ESC ) */
vc_setGx(vc, 1, c);
vc->vc_state = ESnormal;
return;
- case ESapc:
+ case ESapc: /* ESC _ */
return;
- case ESosc:
+ case ESosc: /* ESC ] [0-9] aka OSC [0-9] */
return;
- case ESpm:
+ case ESpm: /* ESC ^ */
return;
- case ESdcs:
+ case ESdcs: /* ESC P */
return;
default:
vc->vc_state = ESnormal;
@@ -2588,33 +2779,39 @@ static inline int vc_translate_ascii(const struct vc_data *vc, int c)
/**
- * vc_sanitize_unicode - Replace invalid Unicode code points with U+FFFD
- * @c: the received character, or U+FFFD for invalid sequences.
+ * vc_sanitize_unicode - Replace invalid Unicode code points with ``U+FFFD``
+ * @c: the received code point
*/
static inline int vc_sanitize_unicode(const int c)
{
- if ((c >= 0xd800 && c <= 0xdfff) || c == 0xfffe || c == 0xffff)
+ if (c >= 0xd800 && c <= 0xdfff)
return 0xfffd;
return c;
}
/**
- * vc_translate_unicode - Combine UTF-8 into Unicode in @vc_utf_char
+ * vc_translate_unicode - Combine UTF-8 into Unicode in &vc_data.vc_utf_char
* @vc: virtual console
- * @c: character to translate
- * @rescan: we return true if we need more (continuation) data
+ * @c: UTF-8 byte to translate
+ * @rescan: set to true iff @c wasn't consumed here and needs to be re-processed
+ *
+ * * &vc_data.vc_utf_char is the being-constructed Unicode code point.
+ * * &vc_data.vc_utf_count is the number of continuation bytes still expected to
+ * arrive.
+ * * &vc_data.vc_npar is the number of continuation bytes arrived so far.
*
- * @vc_utf_char is the being-constructed unicode character.
- * @vc_utf_count is the number of continuation bytes still expected to arrive.
- * @vc_npar is the number of continuation bytes arrived so far.
+ * Return:
+ * * %-1 - Input OK so far, @c consumed, further bytes expected.
+ * * %0xFFFD - Possibility 1: input invalid, @c may have been consumed (see
+ * desc. of @rescan). Possibility 2: input OK, @c consumed,
+ * ``U+FFFD`` is the resulting code point. ``U+FFFD`` is valid,
+ * ``REPLACEMENT CHARACTER``.
+ * * otherwise - Input OK, @c consumed, resulting code point returned.
*/
static int vc_translate_unicode(struct vc_data *vc, int c, bool *rescan)
{
- static const u32 utf8_length_changes[] = {
- 0x0000007f, 0x000007ff, 0x0000ffff,
- 0x001fffff, 0x03ffffff, 0x7fffffff
- };
+ static const u32 utf8_length_changes[] = {0x7f, 0x7ff, 0xffff, 0x10ffff};
/* Continuation byte received */
if ((c & 0xc0) == 0x80) {
@@ -2660,14 +2857,7 @@ static int vc_translate_unicode(struct vc_data *vc, int c, bool *rescan)
} else if ((c & 0xf8) == 0xf0) {
vc->vc_utf_count = 3;
vc->vc_utf_char = (c & 0x07);
- } else if ((c & 0xfc) == 0xf8) {
- vc->vc_utf_count = 4;
- vc->vc_utf_char = (c & 0x03);
- } else if ((c & 0xfe) == 0xfc) {
- vc->vc_utf_count = 5;
- vc->vc_utf_char = (c & 0x01);
} else {
- /* 254 and 255 are invalid */
return 0xfffd;
}
@@ -2711,9 +2901,13 @@ static bool vc_is_control(struct vc_data *vc, int tc, int c)
* as cursor movement) and should not be displayed as a glyph unless
* the disp_ctrl mode is explicitly enabled.
*/
- static const u32 CTRL_ACTION = 0x0d00ff81;
+ static const u32 CTRL_ACTION = BIT(ASCII_NULL) |
+ GENMASK(ASCII_SHIFTIN, ASCII_BELL) | BIT(ASCII_CANCEL) |
+ BIT(ASCII_SUBSTITUTE) | BIT(ASCII_ESCAPE);
/* Cannot be overridden by disp_ctrl */
- static const u32 CTRL_ALWAYS = 0x0800f501;
+ static const u32 CTRL_ALWAYS = BIT(ASCII_NULL) | BIT(ASCII_BACKSPACE) |
+ BIT(ASCII_LINEFEED) | BIT(ASCII_SHIFTIN) | BIT(ASCII_SHIFTOUT) |
+ BIT(ASCII_CAR_RET) | BIT(ASCII_FORMFEED) | BIT(ASCII_ESCAPE);
if (vc->vc_state != ESnormal)
return true;
@@ -2730,17 +2924,17 @@ static bool vc_is_control(struct vc_data *vc, int tc, int c)
* useless without them; to display an arbitrary font position use the
* direct-to-font zone in UTF-8 mode.
*/
- if (c < 32) {
+ if (c < BITS_PER_TYPE(CTRL_ALWAYS)) {
if (vc->vc_disp_ctrl)
return CTRL_ALWAYS & BIT(c);
else
return vc->vc_utf || (CTRL_ACTION & BIT(c));
}
- if (c == 127 && !vc->vc_disp_ctrl)
+ if (c == ASCII_DEL && !vc->vc_disp_ctrl)
return true;
- if (c == 128 + 27)
+ if (c == ASCII_EXT_CSI)
return true;
return false;
@@ -2852,7 +3046,7 @@ static int do_con_write(struct tty_struct *tty, const u8 *buf, int count)
};
int c, tc, n = 0;
unsigned int currcons;
- struct vc_data *vc;
+ struct vc_data *vc = tty->driver_data;
struct vt_notifier_param param;
bool rescan;
@@ -2860,13 +3054,6 @@ static int do_con_write(struct tty_struct *tty, const u8 *buf, int count)
return count;
console_lock();
- vc = tty->driver_data;
- if (vc == NULL) {
- pr_err("vt: argh, driver_data is NULL !\n");
- console_unlock();
- return 0;
- }
-
currcons = vc->vc_num;
if (!vc_cons_allocated(currcons)) {
/* could this happen? */
@@ -2883,7 +3070,7 @@ static int do_con_write(struct tty_struct *tty, const u8 *buf, int count)
param.vc = vc;
while (!tty->flow.stopped && count) {
- int orig = *buf;
+ u8 orig = *buf;
buf++;
n++;
count--;
@@ -2992,16 +3179,16 @@ struct tty_driver *console_driver;
#ifdef CONFIG_VT_CONSOLE
/**
- * vt_kmsg_redirect() - Sets/gets the kernel message console
- * @new: The new virtual terminal number or -1 if the console should stay
- * unchanged
+ * vt_kmsg_redirect() - sets/gets the kernel message console
+ * @new: the new virtual terminal number or -1 if the console should stay
+ * unchanged
*
* By default, the kernel messages are always printed on the current virtual
* console. However, the user may modify that default with the
- * TIOCL_SETKMSGREDIRECT ioctl call.
+ * %TIOCL_SETKMSGREDIRECT ioctl call.
*
* This function sets the kernel message console to be @new. It returns the old
- * virtual console number. The virtual terminal number 0 (both as parameter and
+ * virtual console number. The virtual terminal number %0 (both as parameter and
* return value) means no redirection (i.e. always printed on the currently
* active console).
*
@@ -3009,8 +3196,8 @@ struct tty_driver *console_driver;
* value is not modified. You may use the macro vt_get_kmsg_redirect() in that
* case to make the code more understandable.
*
- * When the kernel is compiled without CONFIG_VT_CONSOLE, this function ignores
- * the parameter and always returns 0.
+ * When the kernel is compiled without %CONFIG_VT_CONSOLE, this function ignores
+ * the parameter and always returns %0.
*/
int vt_kmsg_redirect(int new)
{
@@ -3065,22 +3252,23 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
cnt = 0;
while (count--) {
c = *b++;
- if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) {
+ if (c == ASCII_LINEFEED || c == ASCII_CAR_RET ||
+ c == ASCII_BACKSPACE || vc->vc_need_wrap) {
if (cnt && con_is_visible(vc))
vc->vc_sw->con_putcs(vc, start, cnt, vc->state.y, start_x);
cnt = 0;
- if (c == 8) { /* backspace */
+ if (c == ASCII_BACKSPACE) {
bs(vc);
start = (ushort *)vc->vc_pos;
start_x = vc->state.x;
continue;
}
- if (c != 13)
+ if (c != ASCII_CAR_RET)
lf(vc);
cr(vc);
start = (ushort *)vc->vc_pos;
start_x = vc->state.x;
- if (c == 10 || c == 13)
+ if (c == ASCII_LINEFEED || c == ASCII_CAR_RET)
continue;
}
vc_uniscr_putc(vc, c);
@@ -3144,6 +3332,8 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
{
char type, data;
char __user *p = (char __user *)arg;
+ void __user *param_aligned32 = (u32 __user *)arg + 1;
+ void __user *param = (void __user *)arg + 1;
int lines;
int ret;
@@ -3157,8 +3347,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
case TIOCL_SETSEL:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- return set_selection_user((struct tiocl_selection
- __user *)(p+1), tty);
+ return set_selection_user(param, tty);
case TIOCL_PASTESEL:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -3171,10 +3360,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
case TIOCL_SELLOADLUT:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- console_lock();
- ret = sel_loadlut(p);
- console_unlock();
- break;
+ return sel_loadlut(param_aligned32);
case TIOCL_GETSHIFTSTATE:
/*
* Make it possible to react to Shift+Mousebutton. Note that
@@ -3190,10 +3376,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
console_unlock();
return put_user(data, p);
case TIOCL_SETVESABLANK:
- console_lock();
- ret = set_vesa_blanking(p);
- console_unlock();
- break;
+ return set_vesa_blanking(param);
case TIOCL_GETKMSGREDIRECT:
data = vt_get_kmsg_redirect();
return put_user(data, p);
@@ -3214,7 +3397,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
*/
return fg_console;
case TIOCL_SCROLLCONSOLE:
- if (get_user(lines, (s32 __user *)(p+4)))
+ if (get_user(lines, (s32 __user *)param_aligned32))
return -EFAULT;
/*
@@ -3312,16 +3495,13 @@ static void con_start(struct tty_struct *tty)
static void con_flush_chars(struct tty_struct *tty)
{
- struct vc_data *vc;
+ struct vc_data *vc = tty->driver_data;
if (in_interrupt()) /* from flush_to_ldisc */
return;
- /* if we race with con_close(), vt may be null */
console_lock();
- vc = tty->driver_data;
- if (vc)
- set_cursor(vc);
+ set_cursor(vc);
console_unlock();
}
@@ -3471,7 +3651,7 @@ static int __init con_init(void)
vc_cons[currcons].d = vc = kzalloc(sizeof(struct vc_data), GFP_NOWAIT);
INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
tty_port_init(&vc->port);
- visual_init(vc, currcons, 1);
+ visual_init(vc, currcons, true);
/* Assuming vc->vc_{cols,rows,screenbuf_size} are sane here. */
vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT);
vc_init(vc, currcons || !vc->vc_sw->con_save_screen);
@@ -3481,7 +3661,7 @@ static int __init con_init(void)
set_origin(vc);
save_screen(vc);
gotoxy(vc, vc->state.x, vc->state.y);
- csi_J(vc, 0);
+ csi_J(vc, CSI_J_CURSOR_TO_END);
update_screen(vc);
pr_info("Console: %s %s %dx%d\n",
vc->vc_can_do_color ? "colour" : "mono",
@@ -3640,7 +3820,7 @@ static int do_bind_con_driver(const struct consw *csw, int first, int last,
old_was_color = vc->vc_can_do_color;
vc->vc_sw->con_deinit(vc);
vc->vc_origin = (unsigned long)vc->vc_screenbuf;
- visual_init(vc, i, 0);
+ visual_init(vc, i, false);
set_origin(vc);
update_attr(vc);
@@ -3930,7 +4110,7 @@ static void vtconsole_deinit_device(struct con_driver *con)
* RETURNS: zero if unbound, nonzero if bound
*
* Drivers can call this and if zero, they should release
- * all resources allocated on con_startup()
+ * all resources allocated on &consw.con_startup()
*/
int con_is_bound(const struct consw *csw)
{
@@ -3970,15 +4150,9 @@ EXPORT_SYMBOL(con_is_visible);
* Called when the console is taken over by the kernel debugger, this
* function needs to save the current console state, then put the console
* into a state suitable for the kernel debugger.
- *
- * RETURNS:
- * Zero on success, nonzero if a failure occurred when trying to prepare
- * the console for the debugger.
*/
-int con_debug_enter(struct vc_data *vc)
+void con_debug_enter(struct vc_data *vc)
{
- int ret = 0;
-
saved_fg_console = fg_console;
saved_last_console = last_console;
saved_want_console = want_console;
@@ -3987,7 +4161,7 @@ int con_debug_enter(struct vc_data *vc)
vc->vc_mode = KD_TEXT;
console_blanked = 0;
if (vc->vc_sw->con_debug_enter)
- ret = vc->vc_sw->con_debug_enter(vc);
+ vc->vc_sw->con_debug_enter(vc);
#ifdef CONFIG_KGDB_KDB
/* Set the initial LINES variable if it is not already set */
if (vc->vc_rows < 999) {
@@ -4017,7 +4191,6 @@ int con_debug_enter(struct vc_data *vc)
}
}
#endif /* CONFIG_KGDB_KDB */
- return ret;
}
EXPORT_SYMBOL_GPL(con_debug_enter);
@@ -4026,15 +4199,10 @@ EXPORT_SYMBOL_GPL(con_debug_enter);
*
* Restore the console state to what it was before the kernel debugger
* was invoked.
- *
- * RETURNS:
- * Zero on success, nonzero if a failure occurred when trying to restore
- * the console.
*/
-int con_debug_leave(void)
+void con_debug_leave(void)
{
struct vc_data *vc;
- int ret = 0;
fg_console = saved_fg_console;
last_console = saved_last_console;
@@ -4044,8 +4212,7 @@ int con_debug_leave(void)
vc = vc_cons[fg_console].d;
if (vc->vc_sw->con_debug_leave)
- ret = vc->vc_sw->con_debug_leave(vc);
- return ret;
+ vc->vc_sw->con_debug_leave(vc);
}
EXPORT_SYMBOL_GPL(con_debug_leave);
@@ -4275,14 +4442,17 @@ postcore_initcall(vtconsole_class_init);
* Screen blanking
*/
-static int set_vesa_blanking(char __user *p)
+static int set_vesa_blanking(u8 __user *mode_user)
{
- unsigned int mode;
+ u8 mode;
- if (get_user(mode, p + 1))
+ if (get_user(mode, mode_user))
return -EFAULT;
- vesa_blank_mode = (mode < 4) ? mode : 0;
+ console_lock();
+ vesa_blank_mode = (mode <= VESA_BLANK_MAX) ? mode : VESA_NO_BLANKING;
+ console_unlock();
+
return 0;
}
@@ -4307,7 +4477,7 @@ void do_blank_screen(int entering_gfx)
if (entering_gfx) {
hide_cursor(vc);
save_screen(vc);
- vc->vc_sw->con_blank(vc, -1, 1);
+ vc->vc_sw->con_blank(vc, VESA_VSYNC_SUSPEND, 1);
console_blanked = fg_console + 1;
blank_state = blank_off;
set_origin(vc);
@@ -4328,7 +4498,8 @@ void do_blank_screen(int entering_gfx)
save_screen(vc);
/* In case we need to reset origin, blanking hook returns 1 */
- i = vc->vc_sw->con_blank(vc, vesa_off_interval ? 1 : (vesa_blank_mode + 1), 0);
+ i = vc->vc_sw->con_blank(vc, vesa_off_interval ? VESA_VSYNC_SUSPEND :
+ (vesa_blank_mode + 1), 0);
console_blanked = fg_console + 1;
if (i)
set_origin(vc);
@@ -4379,7 +4550,7 @@ void do_unblank_screen(int leaving_gfx)
}
console_blanked = 0;
- if (vc->vc_sw->con_blank(vc, 0, leaving_gfx))
+ if (vc->vc_sw->con_blank(vc, VESA_NO_BLANKING, leaving_gfx))
/* Low-level driver cannot restore -> do it ourselves */
update_screen(vc);
if (console_blank_hook)
@@ -4584,7 +4755,7 @@ out:
return rc;
}
-static int con_font_set(struct vc_data *vc, struct console_font_op *op)
+static int con_font_set(struct vc_data *vc, const struct console_font_op *op)
{
struct console_font font;
int rc = -EINVAL;
@@ -4748,43 +4919,3 @@ void vcs_scr_updated(struct vc_data *vc)
{
notify_update(vc);
}
-
-void vc_scrolldelta_helper(struct vc_data *c, int lines,
- unsigned int rolled_over, void *base, unsigned int size)
-{
- unsigned long ubase = (unsigned long)base;
- ptrdiff_t scr_end = (void *)c->vc_scr_end - base;
- ptrdiff_t vorigin = (void *)c->vc_visible_origin - base;
- ptrdiff_t origin = (void *)c->vc_origin - base;
- int margin = c->vc_size_row * 4;
- int from, wrap, from_off, avail;
-
- /* Turn scrollback off */
- if (!lines) {
- c->vc_visible_origin = c->vc_origin;
- return;
- }
-
- /* Do we have already enough to allow jumping from 0 to the end? */
- if (rolled_over > scr_end + margin) {
- from = scr_end;
- wrap = rolled_over + c->vc_size_row;
- } else {
- from = 0;
- wrap = size;
- }
-
- from_off = (vorigin - from + wrap) % wrap + lines * c->vc_size_row;
- avail = (origin - from + wrap) % wrap;
-
- /* Only a little piece would be left? Show all incl. the piece! */
- if (avail < 2 * margin)
- margin = 0;
- if (from_off < margin)
- from_off = 0;
- if (from_off > avail - margin)
- from_off = avail;
-
- c->vc_visible_origin = ubase + (from + from_off) % wrap;
-}
-EXPORT_SYMBOL_GPL(vc_scrolldelta_helper);
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 8c685b5014044..4b91072f3a4e9 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -714,8 +714,7 @@ static int vt_resizex(struct vc_data *vc, struct vt_consize __user *cs)
vcp->vc_scan_lines = v.v_vlin;
if (v.v_clin)
vcp->vc_cell_height = v.v_clin;
- vcp->vc_resize_user = 1;
- ret = vc_resize(vcp, v.v_cols, v.v_rows);
+ ret = __vc_resize(vcp, v.v_cols, v.v_rows, true);
if (ret) {
vcp->vc_scan_lines = save_scan_lines;
vcp->vc_cell_height = save_cell_height;
@@ -923,9 +922,8 @@ int vt_ioctl(struct tty_struct *tty,
vc = vc_cons[i].d;
if (vc) {
- vc->vc_resize_user = 1;
/* FIXME: review v tty lock */
- vc_resize(vc_cons[i].d, cc, ll);
+ __vc_resize(vc_cons[i].d, cc, ll, true);
}
}
console_unlock();
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 0787456c2b892..768bf87cd80d3 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -94,7 +94,7 @@ void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds)
val = ufshcd_readl(hba, REG_UFS_MCQ_CFG);
val &= ~MCQ_CFG_MAC_MASK;
- val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds);
+ val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds - 1);
ufshcd_writel(hba, val, REG_UFS_MCQ_CFG);
}
EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac);
@@ -258,9 +258,7 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_write_cqis);
* Current MCQ specification doesn't provide a Task Tag or its equivalent in
* the Completion Queue Entry. Find the Task Tag using an indirect method.
*/
-static int ufshcd_mcq_get_tag(struct ufs_hba *hba,
- struct ufs_hw_queue *hwq,
- struct cq_entry *cqe)
+static int ufshcd_mcq_get_tag(struct ufs_hba *hba, struct cq_entry *cqe)
{
u64 addr;
@@ -278,7 +276,7 @@ static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
struct ufs_hw_queue *hwq)
{
struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq);
- int tag = ufshcd_mcq_get_tag(hba, hwq, cqe);
+ int tag = ufshcd_mcq_get_tag(hba, cqe);
if (cqe->command_desc_base_addr) {
ufshcd_compl_one_cqe(hba, tag, cqe);
@@ -399,6 +397,12 @@ void ufshcd_mcq_enable_esi(struct ufs_hba *hba)
}
EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi);
+void ufshcd_mcq_enable(struct ufs_hba *hba)
+{
+ ufshcd_rmwl(hba, MCQ_MODE_SELECT, MCQ_MODE_SELECT, REG_UFS_MEM_CFG);
+}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_enable);
+
void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg)
{
ufshcd_writel(hba, msg->address_lo, REG_UFS_ESILBA);
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index e6d12289e0170..3d049967f6bc4 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -405,6 +405,53 @@ static ssize_t wb_flush_threshold_store(struct device *dev,
return count;
}
+/**
+ * pm_qos_enable_show - sysfs handler to show pm qos enable value
+ * @dev: device associated with the UFS controller
+ * @attr: sysfs attribute handle
+ * @buf: buffer for sysfs file
+ *
+ * Print 1 if PM QoS feature is enabled, 0 if disabled.
+ *
+ * Returns number of characters written to @buf.
+ */
+static ssize_t pm_qos_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", hba->pm_qos_enabled);
+}
+
+/**
+ * pm_qos_enable_store - sysfs handler to store value
+ * @dev: device associated with the UFS controller
+ * @attr: sysfs attribute handle
+ * @buf: buffer for sysfs file
+ * @count: stores buffer characters count
+ *
+ * Input 0 to disable PM QoS and 1 value to enable.
+ * Default state: 1
+ *
+ * Return: number of characters written to @buf on success, < 0 upon failure.
+ */
+static ssize_t pm_qos_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ bool value;
+
+ if (kstrtobool(buf, &value))
+ return -EINVAL;
+
+ if (value)
+ ufshcd_pm_qos_init(hba);
+ else
+ ufshcd_pm_qos_exit(hba);
+
+ return count;
+}
+
static DEVICE_ATTR_RW(rpm_lvl);
static DEVICE_ATTR_RO(rpm_target_dev_state);
static DEVICE_ATTR_RO(rpm_target_link_state);
@@ -416,6 +463,7 @@ static DEVICE_ATTR_RW(wb_on);
static DEVICE_ATTR_RW(enable_wb_buf_flush);
static DEVICE_ATTR_RW(wb_flush_threshold);
static DEVICE_ATTR_RW(rtc_update_ms);
+static DEVICE_ATTR_RW(pm_qos_enable);
static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_rpm_lvl.attr,
@@ -429,6 +477,7 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_enable_wb_buf_flush.attr,
&dev_attr_wb_flush_threshold.attr,
&dev_attr_rtc_update_ms.attr,
+ &dev_attr_pm_qos_enable.attr,
NULL
};
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index eac7fff6992d0..a0f8e930167d7 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -1015,6 +1015,48 @@ static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
}
/**
+ * ufshcd_pm_qos_init - initialize PM QoS request
+ * @hba: per adapter instance
+ */
+void ufshcd_pm_qos_init(struct ufs_hba *hba)
+{
+
+ if (hba->pm_qos_enabled)
+ return;
+
+ cpu_latency_qos_add_request(&hba->pm_qos_req, PM_QOS_DEFAULT_VALUE);
+
+ if (cpu_latency_qos_request_active(&hba->pm_qos_req))
+ hba->pm_qos_enabled = true;
+}
+
+/**
+ * ufshcd_pm_qos_exit - remove request from PM QoS
+ * @hba: per adapter instance
+ */
+void ufshcd_pm_qos_exit(struct ufs_hba *hba)
+{
+ if (!hba->pm_qos_enabled)
+ return;
+
+ cpu_latency_qos_remove_request(&hba->pm_qos_req);
+ hba->pm_qos_enabled = false;
+}
+
+/**
+ * ufshcd_pm_qos_update - update PM QoS request
+ * @hba: per adapter instance
+ * @on: If True, vote for perf PM QoS mode otherwise power save mode
+ */
+static void ufshcd_pm_qos_update(struct ufs_hba *hba, bool on)
+{
+ if (!hba->pm_qos_enabled)
+ return;
+
+ cpu_latency_qos_update_request(&hba->pm_qos_req, on ? 0 : PM_QOS_DEFAULT_VALUE);
+}
+
+/**
* ufshcd_set_clk_freq - set UFS controller clock frequencies
* @hba: per adapter instance
* @scale_up: If True, set max possible frequency othewise set low frequency
@@ -1160,8 +1202,11 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
hba->devfreq->previous_freq);
else
ufshcd_set_clk_freq(hba, !scale_up);
+ goto out;
}
+ ufshcd_pm_qos_update(hba, scale_up);
+
out:
trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
(scale_up ? "up" : "down"),
@@ -3172,7 +3217,9 @@ retry:
/* MCQ mode */
if (is_mcq_enabled(hba)) {
- err = ufshcd_clear_cmd(hba, lrbp->task_tag);
+ /* successfully cleared the command, retry if needed */
+ if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0)
+ err = -EAGAIN;
hba->dev_cmd.complete = NULL;
return err;
}
@@ -5602,7 +5649,6 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp;
struct scsi_cmnd *cmd;
unsigned long flags;
- u32 hwq_num, utag;
int tag;
for (tag = 0; tag < hba->nutrs; tag++) {
@@ -5612,9 +5658,7 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
test_bit(SCMD_STATE_COMPLETE, &cmd->state))
continue;
- utag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd));
- hwq_num = blk_mq_unique_tag_to_hwq(utag);
- hwq = &hba->uhq[hwq_num];
+ hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
if (force_compl) {
ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
@@ -7987,11 +8031,13 @@ out:
static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
{
+ struct Scsi_Host *shost = sdev->host;
+
scsi_autopm_get_device(sdev);
blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev);
if (sdev->rpm_autosuspend)
pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev,
- RPM_AUTOSUSPEND_DELAY_MS);
+ shost->rpm_autosuspend_delay);
scsi_autopm_put_device(sdev);
}
@@ -8801,9 +8847,7 @@ static void ufshcd_config_mcq(struct ufs_hba *hba)
hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED;
- /* Select MCQ mode */
- ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x1,
- REG_UFS_MEM_CFG);
+ ufshcd_mcq_enable(hba);
hba->mcq_enabled = true;
dev_info(hba->dev, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n",
@@ -9065,7 +9109,6 @@ static const struct scsi_host_template ufshcd_driver_template = {
.track_queue_depth = 1,
.skip_settle_delay = 1,
.sdev_groups = ufshcd_driver_groups,
- .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS,
};
static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
@@ -9280,6 +9323,8 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
if (ret)
return ret;
+ if (!ufshcd_is_clkscaling_supported(hba))
+ ufshcd_pm_qos_update(hba, on);
out:
if (ret) {
list_for_each_entry(clki, head, list) {
@@ -9457,6 +9502,7 @@ out:
static void ufshcd_hba_exit(struct ufs_hba *hba)
{
if (hba->is_powered) {
+ ufshcd_pm_qos_exit(hba);
ufshcd_exit_clk_scaling(hba);
ufshcd_exit_clk_gating(hba);
if (hba->eh_wq)
@@ -9476,7 +9522,17 @@ static int ufshcd_execute_start_stop(struct scsi_device *sdev,
struct scsi_sense_hdr *sshdr)
{
const unsigned char cdb[6] = { START_STOP, 0, 0, 0, pwr_mode << 4, 0 };
+ struct scsi_failure failure_defs[] = {
+ {
+ .allowed = 2,
+ .result = SCMD_FAILURE_RESULT_ANY,
+ },
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args args = {
+ .failures = &failures,
.sshdr = sshdr,
.req_flags = BLK_MQ_REQ_PM,
.scmd_flags = SCMD_FAIL_IF_RECOVERING,
@@ -9502,7 +9558,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
struct scsi_sense_hdr sshdr;
struct scsi_device *sdp;
unsigned long flags;
- int ret, retries;
+ int ret;
spin_lock_irqsave(hba->host->host_lock, flags);
sdp = hba->ufs_device_wlun;
@@ -9528,15 +9584,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
* callbacks hence set the RQF_PM flag so that it doesn't resume the
* already suspended childs.
*/
- for (retries = 3; retries > 0; --retries) {
- ret = ufshcd_execute_start_stop(sdp, pwr_mode, &sshdr);
- /*
- * scsi_execute() only returns a negative value if the request
- * queue is dying.
- */
- if (ret <= 0)
- break;
- }
+ ret = ufshcd_execute_start_stop(sdp, pwr_mode, &sshdr);
if (ret) {
sdev_printk(KERN_WARNING, sdp,
"START_STOP failed for power mode: %d, result %x\n",
@@ -9745,7 +9793,10 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
/* UFS device & link must be active before we enter in this function */
if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
- ret = -EINVAL;
+ /* Wait err handler finish or trigger err recovery */
+ if (!ufshcd_eh_in_progress(hba))
+ ufshcd_force_error_recovery(hba);
+ ret = -EBUSY;
goto enable_scaling;
}
@@ -10109,6 +10160,7 @@ static int ufshcd_suspend(struct ufs_hba *hba)
ufshcd_vreg_set_lpm(hba);
/* Put the host controller in low power mode if possible */
ufshcd_hba_vreg_set_lpm(hba);
+ ufshcd_pm_qos_update(hba, false);
return ret;
}
@@ -10520,6 +10572,10 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
host->max_cmd_len = UFS_CDB_SIZE;
host->queuecommand_may_block = !!(hba->caps & UFSHCD_CAP_CLK_GATING);
+ /* Use default RPM delay if host not set */
+ if (host->rpm_autosuspend_delay == 0)
+ host->rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS;
+
hba->max_pwr_info.is_valid = false;
/* Initialize work queues */
@@ -10655,6 +10711,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
ufs_sysfs_add_nodes(hba->dev);
device_enable_async_suspend(dev);
+ ufshcd_pm_qos_init(hba);
return 0;
free_tmf_queue:
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index 776bca4f70c88..b8a8801322e2d 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -17,7 +17,6 @@
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
-#include <linux/pm_qos.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/soc/mediatek/mtk_sip_svc.h>
@@ -626,21 +625,9 @@ static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
dev_info(hba->dev, "caps: 0x%x", host->caps);
}
-static void ufs_mtk_boost_pm_qos(struct ufs_hba *hba, bool boost)
-{
- struct ufs_mtk_host *host = ufshcd_get_variant(hba);
-
- if (!host || !host->pm_qos_init)
- return;
-
- cpu_latency_qos_update_request(&host->pm_qos_req,
- boost ? 0 : PM_QOS_DEFAULT_VALUE);
-}
-
static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool scale_up)
{
ufs_mtk_boost_crypt(hba, scale_up);
- ufs_mtk_boost_pm_qos(hba, scale_up);
}
static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
@@ -660,6 +647,45 @@ static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
}
}
+static void ufs_mtk_mcq_disable_irq(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ u32 irq, i;
+
+ if (!is_mcq_enabled(hba))
+ return;
+
+ if (host->mcq_nr_intr == 0)
+ return;
+
+ for (i = 0; i < host->mcq_nr_intr; i++) {
+ irq = host->mcq_intr_info[i].irq;
+ disable_irq(irq);
+ }
+ host->is_mcq_intr_enabled = false;
+}
+
+static void ufs_mtk_mcq_enable_irq(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ u32 irq, i;
+
+ if (!is_mcq_enabled(hba))
+ return;
+
+ if (host->mcq_nr_intr == 0)
+ return;
+
+ if (host->is_mcq_intr_enabled == true)
+ return;
+
+ for (i = 0; i < host->mcq_nr_intr; i++) {
+ irq = host->mcq_intr_info[i].irq;
+ enable_irq(irq);
+ }
+ host->is_mcq_intr_enabled = true;
+}
+
/**
* ufs_mtk_setup_clocks - enables/disable clocks
* @hba: host controller instance
@@ -703,8 +729,10 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
if (clk_pwr_off)
ufs_mtk_pwr_ctrl(hba, false);
+ ufs_mtk_mcq_disable_irq(hba);
} else if (on && status == POST_CHANGE) {
ufs_mtk_pwr_ctrl(hba, true);
+ ufs_mtk_mcq_enable_irq(hba);
}
return ret;
@@ -893,6 +921,7 @@ static int ufs_mtk_init(struct ufs_hba *hba)
const struct of_device_id *id;
struct device *dev = hba->dev;
struct ufs_mtk_host *host;
+ struct Scsi_Host *shost = hba->host;
int err = 0;
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
@@ -937,6 +966,9 @@ static int ufs_mtk_init(struct ufs_hba *hba)
/* Enable clk scaling*/
hba->caps |= UFSHCD_CAP_CLK_SCALING;
+ /* Set runtime pm delay to replace default */
+ shost->rpm_autosuspend_delay = MTK_RPM_AUTOSUSPEND_DELAY_MS;
+
hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_INTR;
hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC;
@@ -959,10 +991,6 @@ static int ufs_mtk_init(struct ufs_hba *hba)
host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
- /* Initialize pm-qos request */
- cpu_latency_qos_add_request(&host->pm_qos_req, PM_QOS_DEFAULT_VALUE);
- host->pm_qos_init = true;
-
goto out;
out_variant_clear:
@@ -1206,25 +1234,29 @@ static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
return err;
err = ufshcd_uic_hibern8_exit(hba);
- if (!err)
- ufshcd_set_link_active(hba);
- else
+ if (err)
return err;
- if (!hba->mcq_enabled) {
- err = ufshcd_make_hba_operational(hba);
- } else {
- ufs_mtk_config_mcq(hba, false);
- ufshcd_mcq_make_queues_operational(hba);
- ufshcd_mcq_config_mac(hba, hba->nutrs);
- /* Enable MCQ mode */
- ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x1,
- REG_UFS_MEM_CFG);
+ /* Check link state to make sure exit h8 success */
+ ufs_mtk_wait_idle_state(hba, 5);
+ err = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
+ if (err) {
+ dev_warn(hba->dev, "exit h8 state fail, err=%d\n", err);
+ return err;
}
+ ufshcd_set_link_active(hba);
+ err = ufshcd_make_hba_operational(hba);
if (err)
return err;
+ if (is_mcq_enabled(hba)) {
+ ufs_mtk_config_mcq(hba, false);
+ ufshcd_mcq_make_queues_operational(hba);
+ ufshcd_mcq_config_mac(hba, hba->nutrs);
+ ufshcd_mcq_enable(hba);
+ }
+
return 0;
}
diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h
index f76e80d91729c..fb53882f42ca8 100644
--- a/drivers/ufs/host/ufs-mediatek.h
+++ b/drivers/ufs/host/ufs-mediatek.h
@@ -7,7 +7,6 @@
#define _UFS_MEDIATEK_H
#include <linux/bitops.h>
-#include <linux/pm_qos.h>
#include <linux/soc/mediatek/mtk_sip_svc.h>
/*
@@ -167,7 +166,6 @@ struct ufs_mtk_mcq_intr_info {
struct ufs_mtk_host {
struct phy *mphy;
- struct pm_qos_request pm_qos_req;
struct regulator *reg_va09;
struct reset_control *hci_reset;
struct reset_control *unipro_reset;
@@ -178,7 +176,6 @@ struct ufs_mtk_host {
struct ufs_mtk_hw_ver hw_ver;
enum ufs_mtk_host_caps caps;
bool mphy_powered_on;
- bool pm_qos_init;
bool unipro_lpm;
bool ref_clk_enabled;
u16 ref_clk_ungating_wait_us;
@@ -186,10 +183,14 @@ struct ufs_mtk_host {
u32 ip_ver;
bool mcq_set_intr;
+ bool is_mcq_intr_enabled;
int mcq_nr_intr;
struct ufs_mtk_mcq_intr_info mcq_intr_info[UFSHCD_MAX_Q_NR];
};
+/* MTK delay of autosuspend: 500 ms */
+#define MTK_RPM_AUTOSUSPEND_DELAY_MS 500
+
/*
* Multi-VCC by Numbering
*/
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 8fde5204e88b0..7a00004bfd036 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -47,7 +47,7 @@ enum {
TSTBUS_MAX,
};
-#define QCOM_UFS_MAX_GEAR 4
+#define QCOM_UFS_MAX_GEAR 5
#define QCOM_UFS_MAX_LANE 2
enum {
@@ -67,26 +67,32 @@ static const struct __ufs_qcom_bw_table {
[MODE_PWM][UFS_PWM_G2][UFS_LANE_1] = { 1844, 1000 },
[MODE_PWM][UFS_PWM_G3][UFS_LANE_1] = { 3688, 1000 },
[MODE_PWM][UFS_PWM_G4][UFS_LANE_1] = { 7376, 1000 },
+ [MODE_PWM][UFS_PWM_G5][UFS_LANE_1] = { 14752, 1000 },
[MODE_PWM][UFS_PWM_G1][UFS_LANE_2] = { 1844, 1000 },
[MODE_PWM][UFS_PWM_G2][UFS_LANE_2] = { 3688, 1000 },
[MODE_PWM][UFS_PWM_G3][UFS_LANE_2] = { 7376, 1000 },
[MODE_PWM][UFS_PWM_G4][UFS_LANE_2] = { 14752, 1000 },
+ [MODE_PWM][UFS_PWM_G5][UFS_LANE_2] = { 29504, 1000 },
[MODE_HS_RA][UFS_HS_G1][UFS_LANE_1] = { 127796, 1000 },
[MODE_HS_RA][UFS_HS_G2][UFS_LANE_1] = { 255591, 1000 },
[MODE_HS_RA][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 },
[MODE_HS_RA][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 },
+ [MODE_HS_RA][UFS_HS_G5][UFS_LANE_1] = { 5836800, 409600 },
[MODE_HS_RA][UFS_HS_G1][UFS_LANE_2] = { 255591, 1000 },
[MODE_HS_RA][UFS_HS_G2][UFS_LANE_2] = { 511181, 1000 },
[MODE_HS_RA][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 },
[MODE_HS_RA][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 },
+ [MODE_HS_RA][UFS_HS_G5][UFS_LANE_2] = { 5836800, 819200 },
[MODE_HS_RB][UFS_HS_G1][UFS_LANE_1] = { 149422, 1000 },
[MODE_HS_RB][UFS_HS_G2][UFS_LANE_1] = { 298189, 1000 },
[MODE_HS_RB][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 },
[MODE_HS_RB][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 },
+ [MODE_HS_RB][UFS_HS_G5][UFS_LANE_1] = { 5836800, 409600 },
[MODE_HS_RB][UFS_HS_G1][UFS_LANE_2] = { 298189, 1000 },
[MODE_HS_RB][UFS_HS_G2][UFS_LANE_2] = { 596378, 1000 },
[MODE_HS_RB][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 },
[MODE_HS_RB][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 },
+ [MODE_HS_RB][UFS_HS_G5][UFS_LANE_2] = { 5836800, 819200 },
[MODE_MAX][0][0] = { 7643136, 307200 },
};
@@ -738,8 +744,17 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
* the second init can program the optimal PHY settings. This allows one to start
* the first init with either the minimum or the maximum support gear.
*/
- if (hba->ufshcd_state == UFSHCD_STATE_RESET)
- host->phy_gear = dev_req_params->gear_tx;
+ if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
+ /*
+ * Skip REINIT if the negotiated gear matches with the
+ * initial phy_gear. Otherwise, update the phy_gear to
+ * program the optimal gear setting during REINIT.
+ */
+ if (host->phy_gear == dev_req_params->gear_tx)
+ hba->quirks &= ~UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
+ else
+ host->phy_gear = dev_req_params->gear_tx;
+ }
/* enable the device ref clock before changing to HS mode */
if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
@@ -843,15 +858,20 @@ static void ufs_qcom_set_phy_gear(struct ufs_qcom_host *host)
struct ufs_host_params *host_params = &host->host_params;
u32 val, dev_major;
+ /*
+ * Default to powering up the PHY to the max gear possible, which is
+ * backwards compatible with lower gears but not optimal from
+ * a power usage point of view. After device negotiation, if the
+ * gear is lower a reinit will be performed to program the PHY
+ * to the ideal gear for this combo of controller and device.
+ */
host->phy_gear = host_params->hs_tx_gear;
if (host->hw_ver.major < 0x4) {
/*
- * For controllers whose major HW version is < 4, power up the
- * PHY using minimum supported gear (UFS_HS_G2). Switching to
- * max gear will be performed during reinit if supported.
- * For newer controllers, whose major HW version is >= 4, power
- * up the PHY using max supported gear.
+ * These controllers only have one PHY init sequence,
+ * let's power up the PHY using that (the minimum supported
+ * gear, UFS_HS_G2).
*/
host->phy_gear = UFS_HS_G2;
} else if (host->hw_ver.major >= 0x5) {
@@ -1196,8 +1216,10 @@ static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up)
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk) &&
- !strcmp(clki->name, "core_clk_unipro")) {
- if (is_scale_up)
+ !strcmp(clki->name, "core_clk_unipro")) {
+ if (!clki->max_freq)
+ cycles_in_1us = 150; /* default for backwards compatibility */
+ else if (is_scale_up)
cycles_in_1us = ceil(clki->max_freq, (1000 * 1000));
else
cycles_in_1us = ceil(clk_get_rate(clki->clk), (1000 * 1000));
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 2d572f6c8ec83..009158fef2a8f 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -24,6 +24,7 @@
#include <linux/kobject.h>
#include <linux/cdev.h>
#include <linux/uio_driver.h>
+#include <linux/dma-mapping.h>
#define UIO_MAX_DEVICES (1U << MINORBITS)
@@ -759,6 +760,49 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
vma->vm_page_prot);
}
+static int uio_mmap_dma_coherent(struct vm_area_struct *vma)
+{
+ struct uio_device *idev = vma->vm_private_data;
+ struct uio_mem *mem;
+ void *addr;
+ int ret = 0;
+ int mi;
+
+ mi = uio_find_mem_index(vma);
+ if (mi < 0)
+ return -EINVAL;
+
+ mem = idev->info->mem + mi;
+
+ if (mem->addr & ~PAGE_MASK)
+ return -ENODEV;
+ if (mem->dma_addr & ~PAGE_MASK)
+ return -ENODEV;
+ if (!mem->dma_device)
+ return -ENODEV;
+ if (vma->vm_end - vma->vm_start > mem->size)
+ return -EINVAL;
+
+ dev_warn(mem->dma_device,
+ "use of UIO_MEM_DMA_COHERENT is highly discouraged");
+
+ /*
+ * UIO uses offset to index into the maps for a device.
+ * We need to clear vm_pgoff for dma_mmap_coherent.
+ */
+ vma->vm_pgoff = 0;
+
+ addr = (void *)(uintptr_t)mem->addr;
+ ret = dma_mmap_coherent(mem->dma_device,
+ vma,
+ addr,
+ mem->dma_addr,
+ vma->vm_end - vma->vm_start);
+ vma->vm_pgoff = mi;
+
+ return ret;
+}
+
static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
{
struct uio_listener *listener = filep->private_data;
@@ -806,6 +850,9 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
case UIO_MEM_VIRTUAL:
ret = uio_mmap_logical(vma);
break;
+ case UIO_MEM_DMA_COHERENT:
+ ret = uio_mmap_dma_coherent(vma);
+ break;
default:
ret = -EINVAL;
}
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index 5313307c2754a..13cc35ab5d29a 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -36,7 +36,6 @@ struct uio_dmem_genirq_platdata {
struct platform_device *pdev;
unsigned int dmem_region_start;
unsigned int num_dmem_regions;
- void *dmem_region_vaddr[MAX_UIO_MAPS];
struct mutex alloc_lock;
unsigned int refcnt;
};
@@ -50,7 +49,6 @@ static int uio_dmem_genirq_open(struct uio_info *info, struct inode *inode)
{
struct uio_dmem_genirq_platdata *priv = info->priv;
struct uio_mem *uiomem;
- int dmem_region = priv->dmem_region_start;
uiomem = &priv->uioinfo->mem[priv->dmem_region_start];
@@ -61,11 +59,8 @@ static int uio_dmem_genirq_open(struct uio_info *info, struct inode *inode)
break;
addr = dma_alloc_coherent(&priv->pdev->dev, uiomem->size,
- (dma_addr_t *)&uiomem->addr, GFP_KERNEL);
- if (!addr) {
- uiomem->addr = DMEM_MAP_ERROR;
- }
- priv->dmem_region_vaddr[dmem_region++] = addr;
+ &uiomem->dma_addr, GFP_KERNEL);
+ uiomem->addr = addr ? (uintptr_t) addr : DMEM_MAP_ERROR;
++uiomem;
}
priv->refcnt++;
@@ -80,7 +75,6 @@ static int uio_dmem_genirq_release(struct uio_info *info, struct inode *inode)
{
struct uio_dmem_genirq_platdata *priv = info->priv;
struct uio_mem *uiomem;
- int dmem_region = priv->dmem_region_start;
/* Tell the Runtime PM code that the device has become idle */
pm_runtime_put_sync(&priv->pdev->dev);
@@ -93,13 +87,12 @@ static int uio_dmem_genirq_release(struct uio_info *info, struct inode *inode)
while (!priv->refcnt && uiomem < &priv->uioinfo->mem[MAX_UIO_MAPS]) {
if (!uiomem->size)
break;
- if (priv->dmem_region_vaddr[dmem_region]) {
- dma_free_coherent(&priv->pdev->dev, uiomem->size,
- priv->dmem_region_vaddr[dmem_region],
- uiomem->addr);
+ if (uiomem->addr) {
+ dma_free_coherent(uiomem->dma_device, uiomem->size,
+ (void *) (uintptr_t) uiomem->addr,
+ uiomem->dma_addr);
}
uiomem->addr = DMEM_MAP_ERROR;
- ++dmem_region;
++uiomem;
}
@@ -264,7 +257,8 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
" dynamic and fixed memory regions.\n");
break;
}
- uiomem->memtype = UIO_MEM_PHYS;
+ uiomem->memtype = UIO_MEM_DMA_COHERENT;
+ uiomem->dma_device = &pdev->dev;
uiomem->addr = DMEM_MAP_ERROR;
uiomem->size = pdata->dynamic_region_sizes[i];
++uiomem;
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index 20d9762331bd7..6be3462b109ff 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -181,12 +181,14 @@ hv_uio_cleanup(struct hv_device *dev, struct hv_uio_private_data *pdata)
{
if (pdata->send_gpadl.gpadl_handle) {
vmbus_teardown_gpadl(dev->channel, &pdata->send_gpadl);
- vfree(pdata->send_buf);
+ if (!pdata->send_gpadl.decrypted)
+ vfree(pdata->send_buf);
}
if (pdata->recv_gpadl.gpadl_handle) {
vmbus_teardown_gpadl(dev->channel, &pdata->recv_gpadl);
- vfree(pdata->recv_buf);
+ if (!pdata->recv_gpadl.decrypted)
+ vfree(pdata->recv_buf);
}
}
@@ -295,7 +297,8 @@ hv_uio_probe(struct hv_device *dev,
ret = vmbus_establish_gpadl(channel, pdata->recv_buf,
RECV_BUFFER_SIZE, &pdata->recv_gpadl);
if (ret) {
- vfree(pdata->recv_buf);
+ if (!pdata->recv_gpadl.decrypted)
+ vfree(pdata->recv_buf);
goto fail_close;
}
@@ -317,7 +320,8 @@ hv_uio_probe(struct hv_device *dev,
ret = vmbus_establish_gpadl(channel, pdata->send_buf,
SEND_BUFFER_SIZE, &pdata->send_gpadl);
if (ret) {
- vfree(pdata->send_buf);
+ if (!pdata->send_gpadl.decrypted)
+ vfree(pdata->send_buf);
goto fail_close;
}
diff --git a/drivers/uio/uio_pruss.c b/drivers/uio/uio_pruss.c
index 77e2dc4048855..f67881cba645b 100644
--- a/drivers/uio/uio_pruss.c
+++ b/drivers/uio/uio_pruss.c
@@ -191,9 +191,11 @@ static int pruss_probe(struct platform_device *pdev)
p->mem[1].size = sram_pool_sz;
p->mem[1].memtype = UIO_MEM_PHYS;
- p->mem[2].addr = gdev->ddr_paddr;
+ p->mem[2].addr = (uintptr_t) gdev->ddr_vaddr;
+ p->mem[2].dma_addr = gdev->ddr_paddr;
p->mem[2].size = extram_pool_sz;
- p->mem[2].memtype = UIO_MEM_PHYS;
+ p->mem[2].memtype = UIO_MEM_DMA_COHERENT;
+ p->mem[2].dma_device = dev;
p->name = devm_kasprintf(dev, GFP_KERNEL, "pruss_evt%d", cnt);
p->version = DRV_VERSION;
diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c
index ee917f1b091c8..8b936a2e93a0d 100644
--- a/drivers/usb/cdns3/drd.c
+++ b/drivers/usb/cdns3/drd.c
@@ -435,7 +435,7 @@ int cdns_drd_init(struct cdns *cdns)
writel(1, &cdns->otg_v1_regs->simulate);
cdns->version = CDNS3_CONTROLLER_V1;
} else {
- dev_err(cdns->dev, "not supporte DID=0x%08x\n", state);
+ dev_err(cdns->dev, "not supported DID=0x%08x\n", state);
return -EINVAL;
}
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index 351ede4b5de20..58e3ca7e47939 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -116,3 +116,30 @@ config USB_AUTOSUSPEND_DELAY
The default value Linux has always had is 2 seconds. Change
this value if you want a different delay and cannot modify
the command line or module parameter.
+
+config USB_DEFAULT_AUTHORIZATION_MODE
+ int "Default authorization mode for USB devices"
+ range 0 2
+ default 1
+ depends on USB
+ help
+ Select the default USB device authorization mode. Can be overridden
+ with usbcore.authorized_default command line or module parameter.
+
+ This option allows you to choose whether USB devices that are
+ connected to the system can be used by default, or if they are
+ locked down.
+
+ With value 0 all connected USB devices with the exception of root
+ hub require user space authorization before they can be used.
+
+ With value 1 (default) no user space authorization is required to
+ use connected USB devices.
+
+ With value 2 all connected USB devices with exception of internal
+ USB devices require user space authorization before they can be
+ used. Note that in this mode the differentiation between internal
+ and external USB devices relies on ACPI, and on systems without
+ ACPI selecting value 2 is analogous to selecting value 0.
+
+ If unsure, keep the default value.
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index e01b1913d02bf..e02ba15f6e34f 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1710,9 +1710,7 @@ int usb_autoresume_device(struct usb_device *udev)
{
int status;
- status = pm_runtime_get_sync(&udev->dev);
- if (status < 0)
- pm_runtime_put_sync(&udev->dev);
+ status = pm_runtime_resume_and_get(&udev->dev);
dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n",
__func__, atomic_read(&udev->dev.power.usage_count),
status);
@@ -1818,9 +1816,7 @@ int usb_autopm_get_interface(struct usb_interface *intf)
{
int status;
- status = pm_runtime_get_sync(&intf->dev);
- if (status < 0)
- pm_runtime_put_sync(&intf->dev);
+ status = pm_runtime_resume_and_get(&intf->dev);
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
__func__, atomic_read(&intf->dev.power.usage_count),
status);
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index a2530811cf7de..4b38b87a13438 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -141,7 +141,7 @@ static void ep_device_release(struct device *dev)
kfree(ep_dev);
}
-struct device_type usb_ep_device_type = {
+const struct device_type usb_ep_device_type = {
.name = "usb_endpoint",
.release = ep_device_release,
};
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index edf74458474a1..c0e005670d67d 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -357,12 +357,10 @@ static const u8 ss_rh_config_descriptor[] = {
#define USB_AUTHORIZE_ALL 1
#define USB_AUTHORIZE_INTERNAL 2
-static int authorized_default = USB_AUTHORIZE_WIRED;
+static int authorized_default = CONFIG_USB_DEFAULT_AUTHORIZATION_MODE;
module_param(authorized_default, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(authorized_default,
- "Default USB device authorization: 0 is not authorized, 1 is "
- "authorized, 2 is authorized for internal devices, -1 is "
- "authorized (default, same as 1)");
+ "Default USB device authorization: 0 is not authorized, 1 is authorized (default), 2 is authorized for internal devices, -1 is authorized (same as 1)");
/*-------------------------------------------------------------------------*/
/**
@@ -2795,10 +2793,16 @@ int usb_add_hcd(struct usb_hcd *hcd,
struct usb_device *rhdev;
struct usb_hcd *shared_hcd;
- if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) {
- hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
- if (IS_ERR(hcd->phy_roothub))
- return PTR_ERR(hcd->phy_roothub);
+ if (!hcd->skip_phy_initialization) {
+ if (usb_hcd_is_primary_hcd(hcd)) {
+ hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
+ if (IS_ERR(hcd->phy_roothub))
+ return PTR_ERR(hcd->phy_roothub);
+ } else {
+ hcd->phy_roothub = usb_phy_roothub_alloc_usb3_phy(hcd->self.sysdev);
+ if (IS_ERR(hcd->phy_roothub))
+ return PTR_ERR(hcd->phy_roothub);
+ }
retval = usb_phy_roothub_init(hcd->phy_roothub);
if (retval)
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index e38a4124f6102..9446660e231bb 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -37,6 +37,7 @@
#include <asm/byteorder.h>
#include "hub.h"
+#include "phy.h"
#include "otg_productlist.h"
#define USB_VENDOR_GENESYS_LOGIC 0x05e3
@@ -129,7 +130,6 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
#define HUB_DEBOUNCE_STEP 25
#define HUB_DEBOUNCE_STABLE 100
-static void hub_release(struct kref *kref);
static int usb_reset_and_verify_device(struct usb_device *udev);
static int hub_port_disable(struct usb_hub *hub, int port1, int set_state);
static bool hub_port_warm_reset_required(struct usb_hub *hub, int port1,
@@ -634,6 +634,34 @@ static int hub_ext_port_status(struct usb_hub *hub, int port1, int type,
ret = 0;
}
mutex_unlock(&hub->status_mutex);
+
+ /*
+ * There is no need to lock status_mutex here, because status_mutex
+ * protects hub->status, and the phy driver only checks the port
+ * status without changing the status.
+ */
+ if (!ret) {
+ struct usb_device *hdev = hub->hdev;
+
+ /*
+ * Only roothub will be notified of connection changes,
+ * since the USB PHY only cares about changes at the next
+ * level.
+ */
+ if (is_root_hub(hdev)) {
+ struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
+ bool connect;
+ bool connect_change;
+
+ connect_change = *change & USB_PORT_STAT_C_CONNECTION;
+ connect = *status & USB_PORT_STAT_CONNECTION;
+ if (connect_change && connect)
+ usb_phy_roothub_notify_connect(hcd->phy_roothub, port1 - 1);
+ else if (connect_change)
+ usb_phy_roothub_notify_disconnect(hcd->phy_roothub, port1 - 1);
+ }
+ }
+
return ret;
}
@@ -691,14 +719,14 @@ static void kick_hub_wq(struct usb_hub *hub)
*/
intf = to_usb_interface(hub->intfdev);
usb_autopm_get_interface_no_resume(intf);
- kref_get(&hub->kref);
+ hub_get(hub);
if (queue_work(hub_wq, &hub->events))
return;
/* the work has already been scheduled */
usb_autopm_put_interface_async(intf);
- kref_put(&hub->kref, hub_release);
+ hub_put(hub);
}
void usb_kick_hub_wq(struct usb_device *hdev)
@@ -1066,7 +1094,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
goto init2;
goto init3;
}
- kref_get(&hub->kref);
+ hub_get(hub);
/* The superspeed hub except for root hub has to use Hub Depth
* value as an offset into the route string to locate the bits
@@ -1314,7 +1342,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
device_unlock(&hdev->dev);
}
- kref_put(&hub->kref, hub_release);
+ hub_put(hub);
}
/* Implement the continuations for the delays above */
@@ -1730,6 +1758,16 @@ static void hub_release(struct kref *kref)
kfree(hub);
}
+void hub_get(struct usb_hub *hub)
+{
+ kref_get(&hub->kref);
+}
+
+void hub_put(struct usb_hub *hub)
+{
+ kref_put(&hub->kref, hub_release);
+}
+
static unsigned highspeed_hubs;
static void hub_disconnect(struct usb_interface *intf)
@@ -1778,7 +1816,7 @@ static void hub_disconnect(struct usb_interface *intf)
onboard_hub_destroy_pdevs(&hub->onboard_hub_devs);
- kref_put(&hub->kref, hub_release);
+ hub_put(hub);
}
static bool hub_descriptor_is_sane(struct usb_host_interface *desc)
@@ -5905,7 +5943,7 @@ out_hdev_lock:
/* Balance the stuff in kick_hub_wq() and allow autosuspend */
usb_autopm_put_interface(intf);
- kref_put(&hub->kref, hub_release);
+ hub_put(hub);
kcov_remote_stop();
}
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index 43ce21c96a511..183b69dc29554 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -129,6 +129,8 @@ extern void usb_hub_remove_port_device(struct usb_hub *hub,
extern int usb_hub_set_port_power(struct usb_device *hdev, struct usb_hub *hub,
int port1, bool set);
extern struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev);
+extern void hub_get(struct usb_hub *hub);
+extern void hub_put(struct usb_hub *hub);
extern int hub_port_debounce(struct usb_hub *hub, int port1,
bool must_be_connected);
extern int usb_clear_port_feature(struct usb_device *hdev,
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 077dfe48d01c1..d2b2787be4092 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1198,6 +1198,8 @@ EXPORT_SYMBOL_GPL(usb_get_status);
* same status code used to report a true stall.
*
* This call is synchronous, and may not be used in an interrupt context.
+ * If a thread in your driver uses this call, make sure your disconnect()
+ * method can wait for it to complete.
*
* Return: Zero on success, or else the status code returned by the
* underlying usb_control_msg() call.
@@ -1516,7 +1518,8 @@ void usb_enable_interface(struct usb_device *dev,
* This call is synchronous, and may not be used in an interrupt context.
* Also, drivers must not change altsettings while urbs are scheduled for
* endpoints in that interface; all such urbs must first be completed
- * (perhaps forced by unlinking).
+ * (perhaps forced by unlinking). If a thread in your driver uses this call,
+ * make sure your disconnect() method can wait for it to complete.
*
* Return: Zero on success, or else the status code returned by the
* underlying usb_control_msg() call.
@@ -1849,7 +1852,7 @@ static int usb_if_uevent(const struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-struct device_type usb_if_device_type = {
+const struct device_type usb_if_device_type = {
.name = "usb_interface",
.release = usb_release_interface,
.uevent = usb_if_uevent,
diff --git a/drivers/usb/core/of.c b/drivers/usb/core/of.c
index db4ccf9ce3d9b..f1a499ee482c3 100644
--- a/drivers/usb/core/of.c
+++ b/drivers/usb/core/of.c
@@ -8,6 +8,7 @@
*/
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <linux/usb/of.h>
/**
@@ -75,6 +76,76 @@ bool usb_of_has_combined_node(struct usb_device *udev)
}
EXPORT_SYMBOL_GPL(usb_of_has_combined_node);
+static bool usb_of_has_devices_or_graph(const struct usb_device *hub)
+{
+ const struct device_node *np = hub->dev.of_node;
+ struct device_node *child;
+
+ if (of_graph_is_present(np))
+ return true;
+
+ for_each_child_of_node(np, child)
+ if (of_property_present(child, "reg"))
+ return true;
+
+ return false;
+}
+
+/**
+ * usb_of_get_connect_type() - get a USB hub's port connect_type
+ * @hub: hub to which port is for @port1
+ * @port1: one-based index of port
+ *
+ * Get the connect_type of @port1 based on the device node for @hub. If the
+ * port is described in the OF graph, the connect_type is "hotplug". If the
+ * @hub has a child device has with a 'reg' property equal to @port1 the
+ * connect_type is "hard-wired". If there isn't an OF graph or child node at
+ * all then the connect_type is "unknown". Otherwise, the port is considered
+ * "unused" because it isn't described at all.
+ *
+ * Return: A connect_type for @port1 based on the device node for @hub.
+ */
+enum usb_port_connect_type usb_of_get_connect_type(struct usb_device *hub, int port1)
+{
+ struct device_node *np, *child, *ep, *remote_np;
+ enum usb_port_connect_type connect_type;
+
+ /* Only set connect_type if binding has ports/hardwired devices. */
+ if (!usb_of_has_devices_or_graph(hub))
+ return USB_PORT_CONNECT_TYPE_UNKNOWN;
+
+ /* Assume port is unused if there's a graph or a child node. */
+ connect_type = USB_PORT_NOT_USED;
+
+ np = hub->dev.of_node;
+ /*
+ * Hotplug ports are connected to an available remote node, e.g.
+ * usb-a-connector compatible node, in the OF graph.
+ */
+ if (of_graph_is_present(np)) {
+ ep = of_graph_get_endpoint_by_regs(np, port1, -1);
+ if (ep) {
+ remote_np = of_graph_get_remote_port_parent(ep);
+ of_node_put(ep);
+ if (of_device_is_available(remote_np))
+ connect_type = USB_PORT_CONNECT_TYPE_HOT_PLUG;
+ of_node_put(remote_np);
+ }
+ }
+
+ /*
+ * Hard-wired ports are child nodes with a reg property corresponding
+ * to the port number, i.e. a usb device.
+ */
+ child = usb_of_get_device_node(hub, port1);
+ if (of_device_is_available(child))
+ connect_type = USB_PORT_CONNECT_TYPE_HARD_WIRED;
+ of_node_put(child);
+
+ return connect_type;
+}
+EXPORT_SYMBOL_GPL(usb_of_get_connect_type);
+
/**
* usb_of_get_interface_node() - get a USB interface node
* @udev: USB device of interface
diff --git a/drivers/usb/core/phy.c b/drivers/usb/core/phy.c
index fb1588e7c2823..faa20054ad5a1 100644
--- a/drivers/usb/core/phy.c
+++ b/drivers/usb/core/phy.c
@@ -19,6 +19,30 @@ struct usb_phy_roothub {
struct list_head list;
};
+/* Allocate the roothub_entry by specific name of phy */
+static int usb_phy_roothub_add_phy_by_name(struct device *dev, const char *name,
+ struct list_head *list)
+{
+ struct usb_phy_roothub *roothub_entry;
+ struct phy *phy;
+
+ phy = devm_of_phy_get(dev, dev->of_node, name);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ roothub_entry = devm_kzalloc(dev, sizeof(*roothub_entry), GFP_KERNEL);
+ if (!roothub_entry)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&roothub_entry->list);
+
+ roothub_entry->phy = phy;
+
+ list_add_tail(&roothub_entry->list, list);
+
+ return 0;
+}
+
static int usb_phy_roothub_add_phy(struct device *dev, int index,
struct list_head *list)
{
@@ -65,6 +89,9 @@ struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev)
INIT_LIST_HEAD(&phy_roothub->list);
+ if (!usb_phy_roothub_add_phy_by_name(dev, "usb2-phy", &phy_roothub->list))
+ return phy_roothub;
+
for (i = 0; i < num_phys; i++) {
err = usb_phy_roothub_add_phy(dev, i, &phy_roothub->list);
if (err)
@@ -75,6 +102,41 @@ struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev)
}
EXPORT_SYMBOL_GPL(usb_phy_roothub_alloc);
+/**
+ * usb_phy_roothub_alloc_usb3_phy - alloc the roothub
+ * @dev: the device of the host controller
+ *
+ * Allocate the usb phy roothub if the host use a generic usb3-phy.
+ *
+ * Return: On success, a pointer to the usb_phy_roothub. Otherwise,
+ * %NULL if no use usb3 phy or %-ENOMEM if out of memory.
+ */
+struct usb_phy_roothub *usb_phy_roothub_alloc_usb3_phy(struct device *dev)
+{
+ struct usb_phy_roothub *phy_roothub;
+ int num_phys;
+
+ if (!IS_ENABLED(CONFIG_GENERIC_PHY))
+ return NULL;
+
+ num_phys = of_count_phandle_with_args(dev->of_node, "phys",
+ "#phy-cells");
+ if (num_phys <= 0)
+ return NULL;
+
+ phy_roothub = devm_kzalloc(dev, sizeof(*phy_roothub), GFP_KERNEL);
+ if (!phy_roothub)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&phy_roothub->list);
+
+ if (!usb_phy_roothub_add_phy_by_name(dev, "usb3-phy", &phy_roothub->list))
+ return phy_roothub;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(usb_phy_roothub_alloc_usb3_phy);
+
int usb_phy_roothub_init(struct usb_phy_roothub *phy_roothub)
{
struct usb_phy_roothub *roothub_entry;
@@ -172,6 +234,64 @@ int usb_phy_roothub_calibrate(struct usb_phy_roothub *phy_roothub)
}
EXPORT_SYMBOL_GPL(usb_phy_roothub_calibrate);
+/**
+ * usb_phy_roothub_notify_connect() - connect notification
+ * @phy_roothub: the phy of roothub, if the host use a generic phy.
+ * @port: the port index for connect
+ *
+ * If the phy needs to get connection status, the callback can be used.
+ * Returns: %0 if successful, a negative error code otherwise
+ */
+int usb_phy_roothub_notify_connect(struct usb_phy_roothub *phy_roothub, int port)
+{
+ struct usb_phy_roothub *roothub_entry;
+ struct list_head *head;
+ int err;
+
+ if (!phy_roothub)
+ return 0;
+
+ head = &phy_roothub->list;
+
+ list_for_each_entry(roothub_entry, head, list) {
+ err = phy_notify_connect(roothub_entry->phy, port);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(usb_phy_roothub_notify_connect);
+
+/**
+ * usb_phy_roothub_notify_disconnect() - disconnect notification
+ * @phy_roothub: the phy of roothub, if the host use a generic phy.
+ * @port: the port index for disconnect
+ *
+ * If the phy needs to get connection status, the callback can be used.
+ * Returns: %0 if successful, a negative error code otherwise
+ */
+int usb_phy_roothub_notify_disconnect(struct usb_phy_roothub *phy_roothub, int port)
+{
+ struct usb_phy_roothub *roothub_entry;
+ struct list_head *head;
+ int err;
+
+ if (!phy_roothub)
+ return 0;
+
+ head = &phy_roothub->list;
+
+ list_for_each_entry(roothub_entry, head, list) {
+ err = phy_notify_disconnect(roothub_entry->phy, port);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(usb_phy_roothub_notify_disconnect);
+
int usb_phy_roothub_power_on(struct usb_phy_roothub *phy_roothub)
{
struct usb_phy_roothub *roothub_entry;
diff --git a/drivers/usb/core/phy.h b/drivers/usb/core/phy.h
index 20a267cd986b2..88b49c0ea6b58 100644
--- a/drivers/usb/core/phy.h
+++ b/drivers/usb/core/phy.h
@@ -12,6 +12,7 @@ struct device;
struct usb_phy_roothub;
struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev);
+struct usb_phy_roothub *usb_phy_roothub_alloc_usb3_phy(struct device *dev);
int usb_phy_roothub_init(struct usb_phy_roothub *phy_roothub);
int usb_phy_roothub_exit(struct usb_phy_roothub *phy_roothub);
@@ -19,6 +20,8 @@ int usb_phy_roothub_exit(struct usb_phy_roothub *phy_roothub);
int usb_phy_roothub_set_mode(struct usb_phy_roothub *phy_roothub,
enum phy_mode mode);
int usb_phy_roothub_calibrate(struct usb_phy_roothub *phy_roothub);
+int usb_phy_roothub_notify_connect(struct usb_phy_roothub *phy_roothub, int port);
+int usb_phy_roothub_notify_disconnect(struct usb_phy_roothub *phy_roothub, int port);
int usb_phy_roothub_power_on(struct usb_phy_roothub *phy_roothub);
void usb_phy_roothub_power_off(struct usb_phy_roothub *phy_roothub);
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index 4d63496f98b6c..0e1262a077aea 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/pm_qos.h>
#include <linux/component.h>
+#include <linux/usb/of.h>
#include "hub.h"
@@ -55,11 +56,22 @@ static ssize_t disable_show(struct device *dev,
u16 portstatus, unused;
bool disabled;
int rc;
+ struct kernfs_node *kn;
+ hub_get(hub);
rc = usb_autopm_get_interface(intf);
if (rc < 0)
- return rc;
+ goto out_hub_get;
+ /*
+ * Prevent deadlock if another process is concurrently
+ * trying to unregister hdev.
+ */
+ kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
+ if (!kn) {
+ rc = -ENODEV;
+ goto out_autopm;
+ }
usb_lock_device(hdev);
if (hub->disconnected) {
rc = -ENODEV;
@@ -69,9 +81,13 @@ static ssize_t disable_show(struct device *dev,
usb_hub_port_status(hub, port1, &portstatus, &unused);
disabled = !usb_port_is_power_on(hub, portstatus);
-out_hdev_lock:
+ out_hdev_lock:
usb_unlock_device(hdev);
+ sysfs_unbreak_active_protection(kn);
+ out_autopm:
usb_autopm_put_interface(intf);
+ out_hub_get:
+ hub_put(hub);
if (rc)
return rc;
@@ -89,15 +105,26 @@ static ssize_t disable_store(struct device *dev, struct device_attribute *attr,
int port1 = port_dev->portnum;
bool disabled;
int rc;
+ struct kernfs_node *kn;
rc = kstrtobool(buf, &disabled);
if (rc)
return rc;
+ hub_get(hub);
rc = usb_autopm_get_interface(intf);
if (rc < 0)
- return rc;
+ goto out_hub_get;
+ /*
+ * Prevent deadlock if another process is concurrently
+ * trying to unregister hdev.
+ */
+ kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
+ if (!kn) {
+ rc = -ENODEV;
+ goto out_autopm;
+ }
usb_lock_device(hdev);
if (hub->disconnected) {
rc = -ENODEV;
@@ -118,9 +145,13 @@ static ssize_t disable_store(struct device *dev, struct device_attribute *attr,
if (!rc)
rc = count;
-out_hdev_lock:
+ out_hdev_lock:
usb_unlock_device(hdev);
+ sysfs_unbreak_active_protection(kn);
+ out_autopm:
usb_autopm_put_interface(intf);
+ out_hub_get:
+ hub_put(hub);
return rc;
}
@@ -418,8 +449,10 @@ static void usb_port_shutdown(struct device *dev)
{
struct usb_port *port_dev = to_usb_port(dev);
- if (port_dev->child)
+ if (port_dev->child) {
usb_disable_usb2_hardware_lpm(port_dev->child);
+ usb_unlocked_disable_lpm(port_dev->child);
+ }
}
static const struct dev_pm_ops usb_port_pm_ops = {
@@ -429,7 +462,7 @@ static const struct dev_pm_ops usb_port_pm_ops = {
#endif
};
-struct device_type usb_port_device_type = {
+const struct device_type usb_port_device_type = {
.name = "usb_port",
.release = usb_port_device_release,
.pm = &usb_port_pm_ops,
@@ -709,6 +742,7 @@ int usb_hub_create_port_device(struct usb_hub *hub, int port1)
return -ENOMEM;
}
+ port_dev->connect_type = usb_of_get_connect_type(hdev, port1);
hub->ports[port1 - 1] = port_dev;
port_dev->portnum = port1;
set_bit(port1, hub->power_bits);
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 5d21718afb05c..d83231d6736ac 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -273,9 +273,10 @@ static ssize_t avoid_reset_quirk_store(struct device *dev,
const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
- int val, rc;
+ bool val;
+ int rc;
- if (sscanf(buf, "%d", &val) != 1 || val < 0 || val > 1)
+ if (kstrtobool(buf, &val) != 0)
return -EINVAL;
rc = usb_lock_device_interruptible(udev);
if (rc < 0)
@@ -322,13 +323,14 @@ static ssize_t persist_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
- int value, rc;
+ bool value;
+ int rc;
/* Hubs are always enabled for USB_PERSIST */
if (udev->descriptor.bDeviceClass == USB_CLASS_HUB)
return -EPERM;
- if (sscanf(buf, "%d", &value) != 1)
+ if (kstrtobool(buf, &value) != 0)
return -EINVAL;
rc = usb_lock_device_interruptible(udev);
@@ -739,14 +741,14 @@ static ssize_t authorized_store(struct device *dev,
{
ssize_t result;
struct usb_device *usb_dev = to_usb_device(dev);
- unsigned val;
- result = sscanf(buf, "%u\n", &val);
- if (result != 1)
+ bool val;
+
+ if (kstrtobool(buf, &val) != 0)
result = -EINVAL;
- else if (val == 0)
- result = usb_deauthorize_device(usb_dev);
- else
+ else if (val)
result = usb_authorize_device(usb_dev);
+ else
+ result = usb_deauthorize_device(usb_dev);
return result < 0 ? result : size;
}
static DEVICE_ATTR_IGNORE_LOCKDEP(authorized, S_IRUGO | S_IWUSR,
@@ -847,16 +849,10 @@ static const struct attribute_group dev_string_attr_grp = {
.is_visible = dev_string_attrs_are_visible,
};
-const struct attribute_group *usb_device_groups[] = {
- &dev_attr_grp,
- &dev_string_attr_grp,
- NULL
-};
-
/* Binary descriptors */
static ssize_t
-read_descriptors(struct file *filp, struct kobject *kobj,
+descriptors_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
@@ -878,7 +874,7 @@ read_descriptors(struct file *filp, struct kobject *kobj,
srclen = sizeof(struct usb_device_descriptor);
} else {
src = udev->rawdescriptors[cfgno];
- srclen = __le16_to_cpu(udev->config[cfgno].desc.
+ srclen = le16_to_cpu(udev->config[cfgno].desc.
wTotalLength);
}
if (off < srclen) {
@@ -893,11 +889,69 @@ read_descriptors(struct file *filp, struct kobject *kobj,
}
return count - nleft;
}
+static BIN_ATTR_RO(descriptors, 18 + 65535); /* dev descr + max-size raw descriptor */
+
+static ssize_t
+bos_descriptors_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct usb_device *udev = to_usb_device(dev);
+ struct usb_host_bos *bos = udev->bos;
+ struct usb_bos_descriptor *desc;
+ size_t desclen, n = 0;
+
+ if (bos) {
+ desc = bos->desc;
+ desclen = le16_to_cpu(desc->wTotalLength);
+ if (off < desclen) {
+ n = min(count, desclen - (size_t) off);
+ memcpy(buf, (void *) desc + off, n);
+ }
+ }
+ return n;
+}
+static BIN_ATTR_RO(bos_descriptors, 65535); /* max-size BOS */
+
+/* When modifying this list, be sure to modify dev_bin_attrs_are_visible()
+ * accordingly.
+ */
+static struct bin_attribute *dev_bin_attrs[] = {
+ &bin_attr_descriptors,
+ &bin_attr_bos_descriptors,
+ NULL
+};
+
+static umode_t dev_bin_attrs_are_visible(struct kobject *kobj,
+ struct bin_attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct usb_device *udev = to_usb_device(dev);
+
+ /*
+ * There's no need to check if the descriptors attribute should
+ * be visible because all devices have a device descriptor. The
+ * bos_descriptors attribute should be visible if and only if
+ * the device has a BOS, so check if it exists here.
+ */
+ if (a == &bin_attr_bos_descriptors) {
+ if (udev->bos == NULL)
+ return 0;
+ }
+ return a->attr.mode;
+}
+
+static const struct attribute_group dev_bin_attr_grp = {
+ .bin_attrs = dev_bin_attrs,
+ .is_bin_visible = dev_bin_attrs_are_visible,
+};
-static struct bin_attribute dev_bin_attr_descriptors = {
- .attr = {.name = "descriptors", .mode = 0444},
- .read = read_descriptors,
- .size = 18 + 65535, /* dev descr + max-size raw descriptor */
+const struct attribute_group *usb_device_groups[] = {
+ &dev_attr_grp,
+ &dev_string_attr_grp,
+ &dev_bin_attr_grp,
+ NULL
};
/*
@@ -1015,10 +1069,6 @@ int usb_create_sysfs_dev_files(struct usb_device *udev)
struct device *dev = &udev->dev;
int retval;
- retval = device_create_bin_file(dev, &dev_bin_attr_descriptors);
- if (retval)
- goto error;
-
retval = add_persist_attributes(dev);
if (retval)
goto error;
@@ -1048,7 +1098,6 @@ void usb_remove_sysfs_dev_files(struct usb_device *udev)
remove_power_attributes(dev);
remove_persist_attributes(dev);
- device_remove_bin_file(dev, &dev_bin_attr_descriptors);
}
/* Interface Association Descriptor fields */
@@ -1168,14 +1217,24 @@ static ssize_t interface_authorized_store(struct device *dev,
{
struct usb_interface *intf = to_usb_interface(dev);
bool val;
+ struct kernfs_node *kn;
if (kstrtobool(buf, &val) != 0)
return -EINVAL;
- if (val)
+ if (val) {
usb_authorize_interface(intf);
- else
- usb_deauthorize_interface(intf);
+ } else {
+ /*
+ * Prevent deadlock if another process is concurrently
+ * trying to unregister intf.
+ */
+ kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
+ if (kn) {
+ usb_deauthorize_interface(intf);
+ sysfs_unbreak_active_protection(kn);
+ }
+ }
return count;
}
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index a34b22537d7cc..7f8a912d4fe2a 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -142,12 +142,19 @@ int usb_acpi_set_power_state(struct usb_device *hdev, int index, bool enable)
}
EXPORT_SYMBOL_GPL(usb_acpi_set_power_state);
-static enum usb_port_connect_type usb_acpi_get_connect_type(acpi_handle handle,
- struct acpi_pld_info *pld)
+/*
+ * Private to usb-acpi, all the core needs to know is that
+ * port_dev->location is non-zero when it has been set by the firmware.
+ */
+#define USB_ACPI_LOCATION_VALID (1 << 31)
+
+static void
+usb_acpi_get_connect_type(struct usb_port *port_dev, acpi_handle *handle)
{
enum usb_port_connect_type connect_type = USB_PORT_CONNECT_TYPE_UNKNOWN;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *upc = NULL;
+ struct acpi_pld_info *pld = NULL;
acpi_status status;
/*
@@ -158,6 +165,12 @@ static enum usb_port_connect_type usb_acpi_get_connect_type(acpi_handle handle,
* a usb device is directly hard-wired to the port. If no visible and
* no connectable, the port would be not used.
*/
+
+ status = acpi_get_physical_device_location(handle, &pld);
+ if (ACPI_SUCCESS(status) && pld)
+ port_dev->location = USB_ACPI_LOCATION_VALID |
+ pld->group_token << 8 | pld->group_position;
+
status = acpi_evaluate_object(handle, "_UPC", NULL, &buffer);
if (ACPI_FAILURE(status))
goto out;
@@ -166,25 +179,22 @@ static enum usb_port_connect_type usb_acpi_get_connect_type(acpi_handle handle,
if (!upc || (upc->type != ACPI_TYPE_PACKAGE) || upc->package.count != 4)
goto out;
+ /* UPC states port is connectable */
if (upc->package.elements[0].integer.value)
- if (pld->user_visible)
+ if (!pld)
+ ; /* keep connect_type as unknown */
+ else if (pld->user_visible)
connect_type = USB_PORT_CONNECT_TYPE_HOT_PLUG;
else
connect_type = USB_PORT_CONNECT_TYPE_HARD_WIRED;
- else if (!pld->user_visible)
+ else
connect_type = USB_PORT_NOT_USED;
out:
+ port_dev->connect_type = connect_type;
kfree(upc);
- return connect_type;
+ ACPI_FREE(pld);
}
-
-/*
- * Private to usb-acpi, all the core needs to know is that
- * port_dev->location is non-zero when it has been set by the firmware.
- */
-#define USB_ACPI_LOCATION_VALID (1 << 31)
-
static struct acpi_device *
usb_acpi_get_companion_for_port(struct usb_port *port_dev)
{
@@ -222,22 +232,12 @@ static struct acpi_device *
usb_acpi_find_companion_for_port(struct usb_port *port_dev)
{
struct acpi_device *adev;
- struct acpi_pld_info *pld;
- acpi_handle *handle;
- acpi_status status;
adev = usb_acpi_get_companion_for_port(port_dev);
if (!adev)
return NULL;
- handle = adev->handle;
- status = acpi_get_physical_device_location(handle, &pld);
- if (ACPI_SUCCESS(status) && pld) {
- port_dev->location = USB_ACPI_LOCATION_VALID
- | pld->group_token << 8 | pld->group_position;
- port_dev->connect_type = usb_acpi_get_connect_type(handle, pld);
- ACPI_FREE(pld);
- }
+ usb_acpi_get_connect_type(port_dev, adev->handle);
return adev;
}
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index dc8d9228a5e75..a0c432b14b20b 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -592,7 +592,7 @@ static char *usb_devnode(const struct device *dev,
usb_dev->bus->busnum, usb_dev->devnum);
}
-struct device_type usb_device_type = {
+const struct device_type usb_device_type = {
.name = "usb_device",
.release = usb_release_dev,
.uevent = usb_dev_uevent,
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index bfecb50773b6b..b8324ea05b20f 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -144,10 +144,10 @@ static inline int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
extern const struct class usbmisc_class;
extern const struct bus_type usb_bus_type;
extern struct mutex usb_port_peer_mutex;
-extern struct device_type usb_device_type;
-extern struct device_type usb_if_device_type;
-extern struct device_type usb_ep_device_type;
-extern struct device_type usb_port_device_type;
+extern const struct device_type usb_device_type;
+extern const struct device_type usb_if_device_type;
+extern const struct device_type usb_ep_device_type;
+extern const struct device_type usb_port_device_type;
extern struct usb_device_driver usb_generic_driver;
static inline int is_usb_device(const struct device *dev)
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index c92a1da46a014..a141f83aba0cc 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -729,8 +729,14 @@ struct dwc2_dregs_backup {
* struct dwc2_hregs_backup - Holds host registers state before
* entering partial power down
* @hcfg: Backup of HCFG register
+ * @hflbaddr: Backup of HFLBADDR register
* @haintmsk: Backup of HAINTMSK register
+ * @hcchar: Backup of HCCHAR register
+ * @hcsplt: Backup of HCSPLT register
* @hcintmsk: Backup of HCINTMSK register
+ * @hctsiz: Backup of HCTSIZ register
+ * @hdma: Backup of HCDMA register
+ * @hcdmab: Backup of HCDMAB register
* @hprt0: Backup of HPTR0 register
* @hfir: Backup of HFIR register
* @hptxfsiz: Backup of HPTXFSIZ register
@@ -738,8 +744,14 @@ struct dwc2_dregs_backup {
*/
struct dwc2_hregs_backup {
u32 hcfg;
+ u32 hflbaddr;
u32 haintmsk;
+ u32 hcchar[MAX_EPS_CHANNELS];
+ u32 hcsplt[MAX_EPS_CHANNELS];
u32 hcintmsk[MAX_EPS_CHANNELS];
+ u32 hctsiz[MAX_EPS_CHANNELS];
+ u32 hcidma[MAX_EPS_CHANNELS];
+ u32 hcidmab[MAX_EPS_CHANNELS];
u32 hprt0;
u32 hfir;
u32 hptxfsiz;
@@ -1086,6 +1098,7 @@ struct dwc2_hsotg {
bool needs_byte_swap;
/* DWC OTG HW Release versions */
+#define DWC2_CORE_REV_4_30a 0x4f54430a
#define DWC2_CORE_REV_2_71a 0x4f54271a
#define DWC2_CORE_REV_2_72a 0x4f54272a
#define DWC2_CORE_REV_2_80a 0x4f54280a
@@ -1323,6 +1336,7 @@ int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg);
int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg);
void dwc2_enable_acg(struct dwc2_hsotg *hsotg);
+void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg, bool remotewakeup);
/* This function should be called on every hardware interrupt. */
irqreturn_t dwc2_handle_common_intr(int irq, void *dev);
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index 158ede7538548..26d752a4c3ca9 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -297,7 +297,8 @@ static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
/* Exit gadget mode clock gating. */
if (hsotg->params.power_down ==
- DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended)
+ DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended &&
+ !hsotg->params.no_clock_gating)
dwc2_gadget_exit_clock_gating(hsotg, 0);
}
@@ -322,10 +323,11 @@ static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
* @hsotg: Programming view of DWC_otg controller
*
*/
-static void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg)
+void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg, bool remotewakeup)
{
u32 glpmcfg;
- u32 i = 0;
+ u32 pcgctl;
+ u32 dctl;
if (hsotg->lx_state != DWC2_L1) {
dev_err(hsotg->dev, "Core isn't in DWC2_L1 state\n");
@@ -334,37 +336,57 @@ static void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg)
glpmcfg = dwc2_readl(hsotg, GLPMCFG);
if (dwc2_is_device_mode(hsotg)) {
- dev_dbg(hsotg->dev, "Exit from L1 state\n");
+ dev_dbg(hsotg->dev, "Exit from L1 state, remotewakeup=%d\n", remotewakeup);
glpmcfg &= ~GLPMCFG_ENBLSLPM;
- glpmcfg &= ~GLPMCFG_HIRD_THRES_EN;
+ glpmcfg &= ~GLPMCFG_HIRD_THRES_MASK;
dwc2_writel(hsotg, glpmcfg, GLPMCFG);
- do {
- glpmcfg = dwc2_readl(hsotg, GLPMCFG);
+ pcgctl = dwc2_readl(hsotg, PCGCTL);
+ pcgctl &= ~PCGCTL_ENBL_SLEEP_GATING;
+ dwc2_writel(hsotg, pcgctl, PCGCTL);
- if (!(glpmcfg & (GLPMCFG_COREL1RES_MASK |
- GLPMCFG_L1RESUMEOK | GLPMCFG_SLPSTS)))
- break;
+ glpmcfg = dwc2_readl(hsotg, GLPMCFG);
+ if (glpmcfg & GLPMCFG_ENBESL) {
+ glpmcfg |= GLPMCFG_RSTRSLPSTS;
+ dwc2_writel(hsotg, glpmcfg, GLPMCFG);
+ }
+
+ if (remotewakeup) {
+ if (dwc2_hsotg_wait_bit_set(hsotg, GLPMCFG, GLPMCFG_L1RESUMEOK, 1000)) {
+ dev_warn(hsotg->dev, "%s: timeout GLPMCFG_L1RESUMEOK\n", __func__);
+ goto fail;
+ return;
+ }
+
+ dctl = dwc2_readl(hsotg, DCTL);
+ dctl |= DCTL_RMTWKUPSIG;
+ dwc2_writel(hsotg, dctl, DCTL);
- udelay(1);
- } while (++i < 200);
+ if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, GINTSTS_WKUPINT, 1000)) {
+ dev_warn(hsotg->dev, "%s: timeout GINTSTS_WKUPINT\n", __func__);
+ goto fail;
+ return;
+ }
+ }
- if (i == 200) {
- dev_err(hsotg->dev, "Failed to exit L1 sleep state in 200us.\n");
+ glpmcfg = dwc2_readl(hsotg, GLPMCFG);
+ if (glpmcfg & GLPMCFG_COREL1RES_MASK || glpmcfg & GLPMCFG_SLPSTS ||
+ glpmcfg & GLPMCFG_L1RESUMEOK) {
+ goto fail;
return;
}
- dwc2_gadget_init_lpm(hsotg);
+
+ /* Inform gadget to exit from L1 */
+ call_gadget(hsotg, resume);
+ /* Change to L0 state */
+ hsotg->lx_state = DWC2_L0;
+ hsotg->bus_suspended = false;
+fail: dwc2_gadget_init_lpm(hsotg);
} else {
/* TODO */
dev_err(hsotg->dev, "Host side LPM is not supported.\n");
return;
}
-
- /* Change to L0 state */
- hsotg->lx_state = DWC2_L0;
-
- /* Inform gadget to exit from L1 */
- call_gadget(hsotg, resume);
}
/*
@@ -385,7 +407,7 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "%s lxstate = %d\n", __func__, hsotg->lx_state);
if (hsotg->lx_state == DWC2_L1) {
- dwc2_wakeup_from_lpm_l1(hsotg);
+ dwc2_wakeup_from_lpm_l1(hsotg, false);
return;
}
@@ -408,7 +430,8 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
/* Exit gadget mode clock gating. */
if (hsotg->params.power_down ==
- DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended)
+ DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended &&
+ !hsotg->params.no_clock_gating)
dwc2_gadget_exit_clock_gating(hsotg, 0);
} else {
/* Change to L0 state */
@@ -425,7 +448,8 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
}
if (hsotg->params.power_down ==
- DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended)
+ DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended &&
+ !hsotg->params.no_clock_gating)
dwc2_host_exit_clock_gating(hsotg, 1);
/*
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index b517a7216de22..b2f6da5b65ccd 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -1415,6 +1415,10 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
ep->name, req, req->length, req->buf, req->no_interrupt,
req->zero, req->short_not_ok);
+ if (hs->lx_state == DWC2_L1) {
+ dwc2_wakeup_from_lpm_l1(hs, true);
+ }
+
/* Prevent new request submission when controller is suspended */
if (hs->lx_state != DWC2_L0) {
dev_dbg(hs->dev, "%s: submit request only in active state\n",
@@ -3727,6 +3731,12 @@ irq_retry:
if (hsotg->in_ppd && hsotg->lx_state == DWC2_L2)
dwc2_exit_partial_power_down(hsotg, 0, true);
+ /* Exit gadget mode clock gating. */
+ if (hsotg->params.power_down ==
+ DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended &&
+ !hsotg->params.no_clock_gating)
+ dwc2_gadget_exit_clock_gating(hsotg, 0);
+
hsotg->lx_state = DWC2_L0;
}
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 35c7a4df8e717..dd5b1c5691e11 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -2701,8 +2701,11 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions(
hsotg->available_host_channels--;
}
qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
- if (dwc2_assign_and_init_hc(hsotg, qh))
+ if (dwc2_assign_and_init_hc(hsotg, qh)) {
+ if (hsotg->params.uframe_sched)
+ hsotg->available_host_channels++;
break;
+ }
/*
* Move the QH from the periodic ready schedule to the
@@ -2735,8 +2738,11 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions(
hsotg->available_host_channels--;
}
- if (dwc2_assign_and_init_hc(hsotg, qh))
+ if (dwc2_assign_and_init_hc(hsotg, qh)) {
+ if (hsotg->params.uframe_sched)
+ hsotg->available_host_channels++;
break;
+ }
/*
* Move the QH from the non-periodic inactive schedule to the
@@ -4143,6 +4149,8 @@ void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
urb->actual_length);
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+ if (!hsotg->params.dma_desc_enable)
+ urb->start_frame = qtd->qh->start_active_frame;
urb->error_count = dwc2_hcd_urb_get_error_count(qtd->urb);
for (i = 0; i < urb->number_of_packets; ++i) {
urb->iso_frame_desc[i].actual_length =
@@ -4649,7 +4657,7 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
}
if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE &&
- hsotg->bus_suspended) {
+ hsotg->bus_suspended && !hsotg->params.no_clock_gating) {
if (dwc2_is_device_mode(hsotg))
dwc2_gadget_exit_clock_gating(hsotg, 0);
else
@@ -5406,9 +5414,16 @@ int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
/* Backup Host regs */
hr = &hsotg->hr_backup;
hr->hcfg = dwc2_readl(hsotg, HCFG);
+ hr->hflbaddr = dwc2_readl(hsotg, HFLBADDR);
hr->haintmsk = dwc2_readl(hsotg, HAINTMSK);
- for (i = 0; i < hsotg->params.host_channels; ++i)
+ for (i = 0; i < hsotg->params.host_channels; ++i) {
+ hr->hcchar[i] = dwc2_readl(hsotg, HCCHAR(i));
+ hr->hcsplt[i] = dwc2_readl(hsotg, HCSPLT(i));
hr->hcintmsk[i] = dwc2_readl(hsotg, HCINTMSK(i));
+ hr->hctsiz[i] = dwc2_readl(hsotg, HCTSIZ(i));
+ hr->hcidma[i] = dwc2_readl(hsotg, HCDMA(i));
+ hr->hcidmab[i] = dwc2_readl(hsotg, HCDMAB(i));
+ }
hr->hprt0 = dwc2_read_hprt0(hsotg);
hr->hfir = dwc2_readl(hsotg, HFIR);
@@ -5442,10 +5457,17 @@ int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
hr->valid = false;
dwc2_writel(hsotg, hr->hcfg, HCFG);
+ dwc2_writel(hsotg, hr->hflbaddr, HFLBADDR);
dwc2_writel(hsotg, hr->haintmsk, HAINTMSK);
- for (i = 0; i < hsotg->params.host_channels; ++i)
+ for (i = 0; i < hsotg->params.host_channels; ++i) {
+ dwc2_writel(hsotg, hr->hcchar[i], HCCHAR(i));
+ dwc2_writel(hsotg, hr->hcsplt[i], HCSPLT(i));
dwc2_writel(hsotg, hr->hcintmsk[i], HCINTMSK(i));
+ dwc2_writel(hsotg, hr->hctsiz[i], HCTSIZ(i));
+ dwc2_writel(hsotg, hr->hcidma[i], HCDMA(i));
+ dwc2_writel(hsotg, hr->hcidmab[i], HCDMAB(i));
+ }
dwc2_writel(hsotg, hr->hprt0, HPRT0);
dwc2_writel(hsotg, hr->hfir, HFIR);
@@ -5610,10 +5632,12 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
dwc2_writel(hsotg, hr->hcfg, HCFG);
/* De-assert Wakeup Logic */
- gpwrdn = dwc2_readl(hsotg, GPWRDN);
- gpwrdn &= ~GPWRDN_PMUACTV;
- dwc2_writel(hsotg, gpwrdn, GPWRDN);
- udelay(10);
+ if (!(rem_wakeup && hsotg->hw_params.snpsid >= DWC2_CORE_REV_4_30a)) {
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn &= ~GPWRDN_PMUACTV;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+ }
hprt0 = hr->hprt0;
hprt0 |= HPRT0_PWR;
@@ -5638,6 +5662,13 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
hprt0 |= HPRT0_RES;
dwc2_writel(hsotg, hprt0, HPRT0);
+ /* De-assert Wakeup Logic */
+ if ((rem_wakeup && hsotg->hw_params.snpsid >= DWC2_CORE_REV_4_30a)) {
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn &= ~GPWRDN_PMUACTV;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+ }
/* Wait for Resume time and then program HPRT again */
mdelay(100);
hprt0 &= ~HPRT0_RES;
diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
index 6b4d825e97a2d..994a78ad084b1 100644
--- a/drivers/usb/dwc2/hcd_ddma.c
+++ b/drivers/usb/dwc2/hcd_ddma.c
@@ -559,7 +559,7 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
idx = qh->td_last;
inc = qh->host_interval;
hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
- cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
+ cur_idx = idx;
next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed);
/*
@@ -866,20 +866,27 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
{
struct dwc2_dma_desc *dma_desc;
struct dwc2_hcd_iso_packet_desc *frame_desc;
+ u16 frame_desc_idx;
+ struct urb *usb_urb;
u16 remain = 0;
int rc = 0;
if (!qtd->urb)
return -EINVAL;
+ usb_urb = qtd->urb->priv;
+
dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
sizeof(struct dwc2_dma_desc)),
sizeof(struct dwc2_dma_desc),
DMA_FROM_DEVICE);
dma_desc = &qh->desc_list[idx];
+ frame_desc_idx = (idx - qtd->isoc_td_first) & (usb_urb->number_of_packets - 1);
- frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
+ frame_desc = &qtd->urb->iso_descs[frame_desc_idx];
+ if (idx == qtd->isoc_td_first)
+ usb_urb->start_frame = dwc2_hcd_get_frame_number(hsotg);
dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
if (chan->ep_is_in)
remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >>
@@ -900,7 +907,7 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
frame_desc->status = 0;
}
- if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
+ if (++qtd->isoc_frame_index == usb_urb->number_of_packets) {
/*
* urb->status is not used for isoc transfers here. The
* individual frame_desc status are used instead.
@@ -1005,11 +1012,11 @@ static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
return;
idx = dwc2_desclist_idx_inc(idx, qh->host_interval,
chan->speed);
- if (!rc)
+ if (rc == 0)
continue;
- if (rc == DWC2_CMPL_DONE)
- break;
+ if (rc == DWC2_CMPL_DONE || rc == DWC2_CMPL_STOP)
+ goto stop_scan;
/* rc == DWC2_CMPL_STOP */
diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h
index 13abdd5f67529..12f8c7f86dc98 100644
--- a/drivers/usb/dwc2/hw.h
+++ b/drivers/usb/dwc2/hw.h
@@ -698,7 +698,7 @@
#define TXSTS_QTOP_TOKEN_MASK (0x3 << 25)
#define TXSTS_QTOP_TOKEN_SHIFT 25
#define TXSTS_QTOP_TERMINATE BIT(24)
-#define TXSTS_QSPCAVAIL_MASK (0xff << 16)
+#define TXSTS_QSPCAVAIL_MASK (0x7f << 16)
#define TXSTS_QSPCAVAIL_SHIFT 16
#define TXSTS_FSPCAVAIL_MASK (0xffff << 0)
#define TXSTS_FSPCAVAIL_SHIFT 0
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index b1d48019e944f..7b84416dfc2b1 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -331,7 +331,7 @@ static void dwc2_driver_remove(struct platform_device *dev)
/* Exit clock gating when driver is removed. */
if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE &&
- hsotg->bus_suspended) {
+ hsotg->bus_suspended && !hsotg->params.no_clock_gating) {
if (dwc2_is_device_mode(hsotg))
dwc2_gadget_exit_clock_gating(hsotg, 0);
else
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 5fc27b20df630..31078f3d41b88 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -131,7 +131,7 @@ config USB_DWC3_QCOM
tristate "Qualcomm Platform"
depends on ARCH_QCOM || COMPILE_TEST
depends on EXTCON || !EXTCON
- depends on (OF || ACPI)
+ depends on OF
default USB_DWC3
help
Some Qualcomm SoCs use DesignWare Core IP for USB2/3
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 3e55838c00014..31684cdaaae30 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1519,6 +1519,8 @@ static void dwc3_get_properties(struct dwc3 *dwc)
else
dwc->sysdev = dwc->dev;
+ dwc->sys_wakeup = device_may_wakeup(dwc->sysdev);
+
ret = device_property_read_string(dev, "usb-psy-name", &usb_psy_name);
if (ret >= 0) {
dwc->usb_psy = power_supply_get_by_name(usb_psy_name);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index e120611a5174f..7e80dd3d466b8 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -755,6 +755,7 @@ struct dwc3_ep {
#define DWC3_EP_PENDING_CLEAR_STALL BIT(11)
#define DWC3_EP_TXFIFO_RESIZED BIT(12)
#define DWC3_EP_DELAY_STOP BIT(13)
+#define DWC3_EP_RESOURCE_ALLOCATED BIT(14)
/* This last one is specific to EP0 */
#define DWC3_EP0_DIR_IN BIT(31)
@@ -1132,6 +1133,7 @@ struct dwc3_scratchpad_array {
* 3 - Reserved
* @dis_metastability_quirk: set to disable metastability quirk.
* @dis_split_quirk: set to disable split boundary.
+ * @sys_wakeup: set if the device may do system wakeup.
* @wakeup_configured: set if the device is configured for remote wakeup.
* @suspended: set to track suspend event due to U3/L2.
* @imod_interval: set the interrupt moderation interval in 250ns
@@ -1257,6 +1259,7 @@ struct dwc3 {
#define DWC31_REVISION_170A 0x3137302a
#define DWC31_REVISION_180A 0x3138302a
#define DWC31_REVISION_190A 0x3139302a
+#define DWC31_REVISION_200A 0x3230302a
#define DWC32_REVISION_ANY 0x0
#define DWC32_REVISION_100A 0x3130302a
@@ -1355,6 +1358,7 @@ struct dwc3 {
unsigned dis_split_quirk:1;
unsigned async_callbacks:1;
+ unsigned sys_wakeup:1;
unsigned wakeup_configured:1;
unsigned suspended:1;
diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c
index 90a587bc29b74..fad151e78fd66 100644
--- a/drivers/usb/dwc3/dwc3-am62.c
+++ b/drivers/usb/dwc3/dwc3-am62.c
@@ -97,9 +97,15 @@
#define USBSS_VBUS_STAT_SESSVALID BIT(2)
#define USBSS_VBUS_STAT_VBUSVALID BIT(0)
-/* Mask for PHY PLL REFCLK */
+/* USB_PHY_CTRL register bits in CTRL_MMR */
+#define PHY_CORE_VOLTAGE_MASK BIT(31)
#define PHY_PLL_REFCLK_MASK GENMASK(3, 0)
+/* USB PHY2 register offsets */
+#define USB_PHY_PLL_REG12 0x130
+#define USB_PHY_PLL_LDO_REF_EN BIT(5)
+#define USB_PHY_PLL_LDO_REF_EN_EN BIT(4)
+
#define DWC3_AM62_AUTOSUSPEND_DELAY 100
struct dwc3_am62 {
@@ -162,6 +168,13 @@ static int phy_syscon_pll_refclk(struct dwc3_am62 *am62)
am62->offset = args.args[0];
+ /* Core voltage. PHY_CORE_VOLTAGE bit Recommended to be 0 always */
+ ret = regmap_update_bits(am62->syscon, am62->offset, PHY_CORE_VOLTAGE_MASK, 0);
+ if (ret) {
+ dev_err(dev, "failed to set phy core voltage\n");
+ return ret;
+ }
+
ret = regmap_update_bits(am62->syscon, am62->offset, PHY_PLL_REFCLK_MASK, am62->rate_code);
if (ret) {
dev_err(dev, "failed to set phy pll reference clock rate\n");
@@ -176,8 +189,9 @@ static int dwc3_ti_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *node = pdev->dev.of_node;
struct dwc3_am62 *am62;
- int i, ret;
unsigned long rate;
+ void __iomem *phy;
+ int i, ret;
u32 reg;
am62 = devm_kzalloc(dev, sizeof(*am62), GFP_KERNEL);
@@ -219,6 +233,17 @@ static int dwc3_ti_probe(struct platform_device *pdev)
if (ret)
return ret;
+ /* Workaround Errata i2409 */
+ phy = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "can't map PHY IOMEM resource. Won't apply i2409 fix.\n");
+ phy = NULL;
+ } else {
+ reg = readl(phy + USB_PHY_PLL_REG12);
+ reg |= USB_PHY_PLL_LDO_REF_EN | USB_PHY_PLL_LDO_REF_EN_EN;
+ writel(reg, phy + USB_PHY_PLL_REG12);
+ }
+
/* VBUS divider select */
am62->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider");
reg = dwc3_ti_readl(am62, USBSS_PHY_CONFIG);
@@ -267,21 +292,15 @@ err_pm_disable:
return ret;
}
-static int dwc3_ti_remove_core(struct device *dev, void *c)
-{
- struct platform_device *pdev = to_platform_device(dev);
-
- platform_device_unregister(pdev);
- return 0;
-}
-
static void dwc3_ti_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dwc3_am62 *am62 = platform_get_drvdata(pdev);
u32 reg;
- device_for_each_child(dev, NULL, dwc3_ti_remove_core);
+ pm_runtime_get_sync(dev);
+ device_init_wakeup(dev, false);
+ of_platform_depopulate(dev);
/* Clear mode valid bit */
reg = dwc3_ti_readl(am62, USBSS_MODE_CONTROL);
@@ -289,7 +308,6 @@ static void dwc3_ti_remove(struct platform_device *pdev)
dwc3_ti_writel(am62, USBSS_MODE_CONTROL, reg);
pm_runtime_put_sync(dev);
- clk_disable_unprepare(am62->usb2_refclk);
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
}
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index d1539fc9eabda..be7be00ecb349 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -52,8 +52,7 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
if (of_device_is_compatible(np, "rockchip,rk3399-dwc3"))
simple->need_reset = true;
- simple->resets = of_reset_control_array_get(np, false, true,
- true);
+ simple->resets = of_reset_control_array_get_optional_exclusive(np);
if (IS_ERR(simple->resets)) {
ret = PTR_ERR(simple->resets);
dev_err(dev, "failed to get device resets, err=%d\n", ret);
@@ -173,6 +172,7 @@ static const struct of_device_id of_dwc3_simple_match[] = {
{ .compatible = "sprd,sc9860-dwc3" },
{ .compatible = "allwinner,sun50i-h6-dwc3" },
{ .compatible = "hisilicon,hi3670-dwc3" },
+ { .compatible = "hisilicon,hi3798mv200-dwc3" },
{ .compatible = "intel,keembay-dwc3" },
{ /* Sentinel */ }
};
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 39564e17f3b07..497deed38c0c1 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -51,7 +51,6 @@
#define PCI_DEVICE_ID_INTEL_MTLP 0x7ec1
#define PCI_DEVICE_ID_INTEL_MTLS 0x7f6f
#define PCI_DEVICE_ID_INTEL_MTL 0x7e7e
-#define PCI_DEVICE_ID_INTEL_ARLH 0x7ec1
#define PCI_DEVICE_ID_INTEL_ARLH_PCH 0x777e
#define PCI_DEVICE_ID_INTEL_TGL 0x9a15
#define PCI_DEVICE_ID_AMD_MR 0x163a
@@ -423,7 +422,6 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, MTLP, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, MTL, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, MTLS, &dwc3_pci_intel_swnode) },
- { PCI_DEVICE_DATA(INTEL, ARLH, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, ARLH_PCH, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, TGL, &dwc3_pci_intel_swnode) },
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index dbd6a5b2b2892..f6b2fab49d5e6 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -4,7 +4,6 @@
* Inspired by dwc3-of-simple.c
*/
-#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/clk.h>
@@ -53,22 +52,10 @@
#define APPS_USB_AVG_BW 0
#define APPS_USB_PEAK_BW MBps_to_icc(40)
-struct dwc3_acpi_pdata {
- u32 qscratch_base_offset;
- u32 qscratch_base_size;
- u32 dwc3_core_base_size;
- int qusb2_phy_irq_index;
- int dp_hs_phy_irq_index;
- int dm_hs_phy_irq_index;
- int ss_phy_irq_index;
- bool is_urs;
-};
-
struct dwc3_qcom {
struct device *dev;
void __iomem *qscratch_base;
struct platform_device *dwc3;
- struct platform_device *urs_usb;
struct clk **clks;
int num_clocks;
struct reset_control *resets;
@@ -84,8 +71,6 @@ struct dwc3_qcom {
struct notifier_block vbus_nb;
struct notifier_block host_nb;
- const struct dwc3_acpi_pdata *acpi_pdata;
-
enum usb_dr_mode mode;
bool is_suspended;
bool pm_suspended;
@@ -248,9 +233,6 @@ static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom)
struct device *dev = qcom->dev;
int ret;
- if (has_acpi_companion(dev))
- return 0;
-
qcom->icc_path_ddr = of_icc_get(dev, "usb-ddr");
if (IS_ERR(qcom->icc_path_ddr)) {
return dev_err_probe(dev, PTR_ERR(qcom->icc_path_ddr),
@@ -519,31 +501,13 @@ static void dwc3_qcom_select_utmi_clk(struct dwc3_qcom *qcom)
PIPE_UTMI_CLK_DIS);
}
-static int dwc3_qcom_get_irq(struct platform_device *pdev,
- const char *name, int num)
-{
- struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
- struct platform_device *pdev_irq = qcom->urs_usb ? qcom->urs_usb : pdev;
- struct device_node *np = pdev->dev.of_node;
- int ret;
-
- if (np)
- ret = platform_get_irq_byname_optional(pdev_irq, name);
- else
- ret = platform_get_irq_optional(pdev_irq, num);
-
- return ret;
-}
-
static int dwc3_qcom_setup_irq(struct platform_device *pdev)
{
struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
- const struct dwc3_acpi_pdata *pdata = qcom->acpi_pdata;
int irq;
int ret;
- irq = dwc3_qcom_get_irq(pdev, "qusb2_phy",
- pdata ? pdata->qusb2_phy_irq_index : -1);
+ irq = platform_get_irq_byname_optional(pdev, "qusb2_phy");
if (irq > 0) {
/* Keep wakeup interrupts disabled until suspend */
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
@@ -557,8 +521,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
qcom->qusb2_phy_irq = irq;
}
- irq = dwc3_qcom_get_irq(pdev, "dp_hs_phy_irq",
- pdata ? pdata->dp_hs_phy_irq_index : -1);
+ irq = platform_get_irq_byname_optional(pdev, "dp_hs_phy_irq");
if (irq > 0) {
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
@@ -571,8 +534,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
qcom->dp_hs_phy_irq = irq;
}
- irq = dwc3_qcom_get_irq(pdev, "dm_hs_phy_irq",
- pdata ? pdata->dm_hs_phy_irq_index : -1);
+ irq = platform_get_irq_byname_optional(pdev, "dm_hs_phy_irq");
if (irq > 0) {
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
@@ -585,8 +547,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
qcom->dm_hs_phy_irq = irq;
}
- irq = dwc3_qcom_get_irq(pdev, "ss_phy_irq",
- pdata ? pdata->ss_phy_irq_index : -1);
+ irq = platform_get_irq_byname_optional(pdev, "ss_phy_irq");
if (irq > 0) {
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
@@ -649,88 +610,6 @@ static int dwc3_qcom_clk_init(struct dwc3_qcom *qcom, int count)
return 0;
}
-static const struct property_entry dwc3_qcom_acpi_properties[] = {
- PROPERTY_ENTRY_STRING("dr_mode", "host"),
- {}
-};
-
-static const struct software_node dwc3_qcom_swnode = {
- .properties = dwc3_qcom_acpi_properties,
-};
-
-static int dwc3_qcom_acpi_register_core(struct platform_device *pdev)
-{
- struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
- struct device *dev = &pdev->dev;
- struct resource *res, *child_res = NULL;
- struct platform_device *pdev_irq = qcom->urs_usb ? qcom->urs_usb :
- pdev;
- int irq;
- int ret;
-
- qcom->dwc3 = platform_device_alloc("dwc3", PLATFORM_DEVID_AUTO);
- if (!qcom->dwc3)
- return -ENOMEM;
-
- qcom->dwc3->dev.parent = dev;
- qcom->dwc3->dev.type = dev->type;
- qcom->dwc3->dev.dma_mask = dev->dma_mask;
- qcom->dwc3->dev.dma_parms = dev->dma_parms;
- qcom->dwc3->dev.coherent_dma_mask = dev->coherent_dma_mask;
-
- child_res = kcalloc(2, sizeof(*child_res), GFP_KERNEL);
- if (!child_res) {
- platform_device_put(qcom->dwc3);
- return -ENOMEM;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to get memory resource\n");
- ret = -ENODEV;
- goto out;
- }
-
- child_res[0].flags = res->flags;
- child_res[0].start = res->start;
- child_res[0].end = child_res[0].start +
- qcom->acpi_pdata->dwc3_core_base_size;
-
- irq = platform_get_irq(pdev_irq, 0);
- if (irq < 0) {
- ret = irq;
- goto out;
- }
- child_res[1].flags = IORESOURCE_IRQ;
- child_res[1].start = child_res[1].end = irq;
-
- ret = platform_device_add_resources(qcom->dwc3, child_res, 2);
- if (ret) {
- dev_err(&pdev->dev, "failed to add resources\n");
- goto out;
- }
-
- ret = device_add_software_node(&qcom->dwc3->dev, &dwc3_qcom_swnode);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to add properties\n");
- goto out;
- }
-
- ret = platform_device_add(qcom->dwc3);
- if (ret) {
- dev_err(&pdev->dev, "failed to add device\n");
- device_remove_software_node(&qcom->dwc3->dev);
- goto out;
- }
- kfree(child_res);
- return 0;
-
-out:
- platform_device_put(qcom->dwc3);
- kfree(child_res);
- return ret;
-}
-
static int dwc3_qcom_of_register_core(struct platform_device *pdev)
{
struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
@@ -763,57 +642,12 @@ node_put:
return ret;
}
-static struct platform_device *dwc3_qcom_create_urs_usb_platdev(struct device *dev)
-{
- struct platform_device *urs_usb = NULL;
- struct fwnode_handle *fwh;
- struct acpi_device *adev;
- char name[8];
- int ret;
- int id;
-
- /* Figure out device id */
- ret = sscanf(fwnode_get_name(dev->fwnode), "URS%d", &id);
- if (!ret)
- return NULL;
-
- /* Find the child using name */
- snprintf(name, sizeof(name), "USB%d", id);
- fwh = fwnode_get_named_child_node(dev->fwnode, name);
- if (!fwh)
- return NULL;
-
- adev = to_acpi_device_node(fwh);
- if (!adev)
- goto err_put_handle;
-
- urs_usb = acpi_create_platform_device(adev, NULL);
- if (IS_ERR_OR_NULL(urs_usb))
- goto err_put_handle;
-
- return urs_usb;
-
-err_put_handle:
- fwnode_handle_put(fwh);
-
- return urs_usb;
-}
-
-static void dwc3_qcom_destroy_urs_usb_platdev(struct platform_device *urs_usb)
-{
- struct fwnode_handle *fwh = urs_usb->dev.fwnode;
-
- platform_device_unregister(urs_usb);
- fwnode_handle_put(fwh);
-}
-
static int dwc3_qcom_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct dwc3_qcom *qcom;
- struct resource *res, *parent_res = NULL;
- struct resource local_res;
+ struct resource *res;
int ret, i;
bool ignore_pipe_clk;
bool wakeup_source;
@@ -825,14 +659,6 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, qcom);
qcom->dev = &pdev->dev;
- if (has_acpi_companion(dev)) {
- qcom->acpi_pdata = acpi_device_get_match_data(dev);
- if (!qcom->acpi_pdata) {
- dev_err(&pdev->dev, "no supporting ACPI device data\n");
- return -EINVAL;
- }
- }
-
qcom->resets = devm_reset_control_array_get_optional_exclusive(dev);
if (IS_ERR(qcom->resets)) {
return dev_err_probe(&pdev->dev, PTR_ERR(qcom->resets),
@@ -861,40 +687,16 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (np) {
- parent_res = res;
- } else {
- memcpy(&local_res, res, sizeof(struct resource));
- parent_res = &local_res;
-
- parent_res->start = res->start +
- qcom->acpi_pdata->qscratch_base_offset;
- parent_res->end = parent_res->start +
- qcom->acpi_pdata->qscratch_base_size;
-
- if (qcom->acpi_pdata->is_urs) {
- qcom->urs_usb = dwc3_qcom_create_urs_usb_platdev(dev);
- if (IS_ERR_OR_NULL(qcom->urs_usb)) {
- dev_err(dev, "failed to create URS USB platdev\n");
- if (!qcom->urs_usb)
- ret = -ENODEV;
- else
- ret = PTR_ERR(qcom->urs_usb);
- goto clk_disable;
- }
- }
- }
-
- qcom->qscratch_base = devm_ioremap_resource(dev, parent_res);
+ qcom->qscratch_base = devm_ioremap_resource(dev, res);
if (IS_ERR(qcom->qscratch_base)) {
ret = PTR_ERR(qcom->qscratch_base);
- goto free_urs;
+ goto clk_disable;
}
ret = dwc3_qcom_setup_irq(pdev);
if (ret) {
dev_err(dev, "failed to setup IRQs, err=%d\n", ret);
- goto free_urs;
+ goto clk_disable;
}
/*
@@ -906,14 +708,10 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
if (ignore_pipe_clk)
dwc3_qcom_select_utmi_clk(qcom);
- if (np)
- ret = dwc3_qcom_of_register_core(pdev);
- else
- ret = dwc3_qcom_acpi_register_core(pdev);
-
+ ret = dwc3_qcom_of_register_core(pdev);
if (ret) {
dev_err(dev, "failed to register DWC3 Core, err=%d\n", ret);
- goto free_urs;
+ goto clk_disable;
}
ret = dwc3_qcom_interconnect_init(qcom);
@@ -945,16 +743,8 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
interconnect_exit:
dwc3_qcom_interconnect_exit(qcom);
depopulate:
- if (np) {
- of_platform_depopulate(&pdev->dev);
- } else {
- device_remove_software_node(&qcom->dwc3->dev);
- platform_device_del(qcom->dwc3);
- }
+ of_platform_depopulate(&pdev->dev);
platform_device_put(qcom->dwc3);
-free_urs:
- if (qcom->urs_usb)
- dwc3_qcom_destroy_urs_usb_platdev(qcom->urs_usb);
clk_disable:
for (i = qcom->num_clocks - 1; i >= 0; i--) {
clk_disable_unprepare(qcom->clks[i]);
@@ -969,21 +759,12 @@ reset_assert:
static void dwc3_qcom_remove(struct platform_device *pdev)
{
struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
- struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
int i;
- if (np) {
- of_platform_depopulate(&pdev->dev);
- } else {
- device_remove_software_node(&qcom->dwc3->dev);
- platform_device_del(qcom->dwc3);
- }
+ of_platform_depopulate(&pdev->dev);
platform_device_put(qcom->dwc3);
- if (qcom->urs_usb)
- dwc3_qcom_destroy_urs_usb_platdev(qcom->urs_usb);
-
for (i = qcom->num_clocks - 1; i >= 0; i--) {
clk_disable_unprepare(qcom->clks[i]);
clk_put(qcom->clks[i]);
@@ -1053,38 +834,6 @@ static const struct of_device_id dwc3_qcom_of_match[] = {
};
MODULE_DEVICE_TABLE(of, dwc3_qcom_of_match);
-#ifdef CONFIG_ACPI
-static const struct dwc3_acpi_pdata sdm845_acpi_pdata = {
- .qscratch_base_offset = SDM845_QSCRATCH_BASE_OFFSET,
- .qscratch_base_size = SDM845_QSCRATCH_SIZE,
- .dwc3_core_base_size = SDM845_DWC3_CORE_SIZE,
- .qusb2_phy_irq_index = 1,
- .dp_hs_phy_irq_index = 4,
- .dm_hs_phy_irq_index = 3,
- .ss_phy_irq_index = 2
-};
-
-static const struct dwc3_acpi_pdata sdm845_acpi_urs_pdata = {
- .qscratch_base_offset = SDM845_QSCRATCH_BASE_OFFSET,
- .qscratch_base_size = SDM845_QSCRATCH_SIZE,
- .dwc3_core_base_size = SDM845_DWC3_CORE_SIZE,
- .qusb2_phy_irq_index = 1,
- .dp_hs_phy_irq_index = 4,
- .dm_hs_phy_irq_index = 3,
- .ss_phy_irq_index = 2,
- .is_urs = true,
-};
-
-static const struct acpi_device_id dwc3_qcom_acpi_match[] = {
- { "QCOM2430", (unsigned long)&sdm845_acpi_pdata },
- { "QCOM0304", (unsigned long)&sdm845_acpi_urs_pdata },
- { "QCOM0497", (unsigned long)&sdm845_acpi_urs_pdata },
- { "QCOM04A6", (unsigned long)&sdm845_acpi_pdata },
- { },
-};
-MODULE_DEVICE_TABLE(acpi, dwc3_qcom_acpi_match);
-#endif
-
static struct platform_driver dwc3_qcom_driver = {
.probe = dwc3_qcom_probe,
.remove_new = dwc3_qcom_remove,
@@ -1092,7 +841,6 @@ static struct platform_driver dwc3_qcom_driver = {
.name = "dwc3-qcom",
.pm = &dwc3_qcom_dev_pm_ops,
.of_match_table = dwc3_qcom_of_match,
- .acpi_match_table = ACPI_PTR(dwc3_qcom_acpi_match),
},
};
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 6ae8a36f21cf6..d96ffbe520397 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -226,7 +226,8 @@ void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
/* reinitialize physical ep1 */
dep = dwc->eps[1];
- dep->flags = DWC3_EP_ENABLED;
+ dep->flags &= DWC3_EP_RESOURCE_ALLOCATED;
+ dep->flags |= DWC3_EP_ENABLED;
/* stall is always issued on EP0 */
dep = dwc->eps[0];
@@ -646,6 +647,7 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
return -EINVAL;
case USB_STATE_ADDRESS:
+ dwc3_gadget_start_config(dwc, 2);
dwc3_gadget_clear_tx_fifos(dwc);
ret = dwc3_ep0_delegate_req(dwc, ctrl);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 28f49400f3e8b..4df2661f66751 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -519,77 +519,56 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep)
static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
+ int ret;
+
+ if (dep->flags & DWC3_EP_RESOURCE_ALLOCATED)
+ return 0;
memset(&params, 0x00, sizeof(params));
params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
- return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE,
+ ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE,
&params);
+ if (ret)
+ return ret;
+
+ dep->flags |= DWC3_EP_RESOURCE_ALLOCATED;
+ return 0;
}
/**
- * dwc3_gadget_start_config - configure ep resources
- * @dep: endpoint that is being enabled
- *
- * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's
- * completion, it will set Transfer Resource for all available endpoints.
- *
- * The assignment of transfer resources cannot perfectly follow the data book
- * due to the fact that the controller driver does not have all knowledge of the
- * configuration in advance. It is given this information piecemeal by the
- * composite gadget framework after every SET_CONFIGURATION and
- * SET_INTERFACE. Trying to follow the databook programming model in this
- * scenario can cause errors. For two reasons:
- *
- * 1) The databook says to do %DWC3_DEPCMD_DEPSTARTCFG for every
- * %USB_REQ_SET_CONFIGURATION and %USB_REQ_SET_INTERFACE (8.1.5). This is
- * incorrect in the scenario of multiple interfaces.
- *
- * 2) The databook does not mention doing more %DWC3_DEPCMD_DEPXFERCFG for new
- * endpoint on alt setting (8.1.6).
- *
- * The following simplified method is used instead:
+ * dwc3_gadget_start_config - reset endpoint resources
+ * @dwc: pointer to the DWC3 context
+ * @resource_index: DEPSTARTCFG.XferRscIdx value (must be 0 or 2)
*
- * All hardware endpoints can be assigned a transfer resource and this setting
- * will stay persistent until either a core reset or hibernation. So whenever we
- * do a %DWC3_DEPCMD_DEPSTARTCFG(0) we can go ahead and do
- * %DWC3_DEPCMD_DEPXFERCFG for every hardware endpoint as well. We are
- * guaranteed that there are as many transfer resources as endpoints.
+ * Set resource_index=0 to reset all endpoints' resources allocation. Do this as
+ * part of the power-on/soft-reset initialization.
*
- * This function is called for each endpoint when it is being enabled but is
- * triggered only when called for EP0-out, which always happens first, and which
- * should only happen in one of the above conditions.
+ * Set resource_index=2 to reset only non-control endpoints' resources. Do this
+ * on receiving the SET_CONFIGURATION request or hibernation resume.
*/
-static int dwc3_gadget_start_config(struct dwc3_ep *dep)
+int dwc3_gadget_start_config(struct dwc3 *dwc, unsigned int resource_index)
{
struct dwc3_gadget_ep_cmd_params params;
- struct dwc3 *dwc;
u32 cmd;
int i;
int ret;
- if (dep->number)
- return 0;
+ if (resource_index != 0 && resource_index != 2)
+ return -EINVAL;
memset(&params, 0x00, sizeof(params));
cmd = DWC3_DEPCMD_DEPSTARTCFG;
- dwc = dep->dwc;
+ cmd |= DWC3_DEPCMD_PARAM(resource_index);
- ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
+ ret = dwc3_send_gadget_ep_cmd(dwc->eps[0], cmd, &params);
if (ret)
return ret;
- for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
- struct dwc3_ep *dep = dwc->eps[i];
-
- if (!dep)
- continue;
-
- ret = dwc3_gadget_set_xfer_resource(dep);
- if (ret)
- return ret;
- }
+ /* Reset resource allocation flags */
+ for (i = resource_index; i < dwc->num_eps && dwc->eps[i]; i++)
+ dwc->eps[i]->flags &= ~DWC3_EP_RESOURCE_ALLOCATED;
return 0;
}
@@ -884,16 +863,18 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
ret = dwc3_gadget_resize_tx_fifos(dep);
if (ret)
return ret;
-
- ret = dwc3_gadget_start_config(dep);
- if (ret)
- return ret;
}
ret = dwc3_gadget_set_ep_config(dep, action);
if (ret)
return ret;
+ if (!(dep->flags & DWC3_EP_RESOURCE_ALLOCATED)) {
+ ret = dwc3_gadget_set_xfer_resource(dep);
+ if (ret)
+ return ret;
+ }
+
if (!(dep->flags & DWC3_EP_ENABLED)) {
struct dwc3_trb *trb_st_hw;
struct dwc3_trb *trb_link;
@@ -1047,7 +1028,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
dep->stream_capable = false;
dep->type = 0;
- mask = DWC3_EP_TXFIFO_RESIZED;
+ mask = DWC3_EP_TXFIFO_RESIZED | DWC3_EP_RESOURCE_ALLOCATED;
/*
* dwc3_remove_requests() can exit early if DWC3 EP delayed stop is
* set. Do not clear DEP flags, so that the end transfer command will
@@ -2913,6 +2894,12 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
/* Start with SuperSpeed Default */
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
+ ret = dwc3_gadget_start_config(dwc, 0);
+ if (ret) {
+ dev_err(dwc->dev, "failed to config endpoints\n");
+ return ret;
+ }
+
dep = dwc->eps[0];
dep->flags = 0;
ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
@@ -2968,6 +2955,9 @@ static int dwc3_gadget_start(struct usb_gadget *g,
dwc->gadget_driver = driver;
spin_unlock_irqrestore(&dwc->lock, flags);
+ if (dwc->sys_wakeup)
+ device_wakeup_enable(dwc->sysdev);
+
return 0;
}
@@ -2983,6 +2973,9 @@ static int dwc3_gadget_stop(struct usb_gadget *g)
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
+ if (dwc->sys_wakeup)
+ device_wakeup_disable(dwc->sysdev);
+
spin_lock_irqsave(&dwc->lock, flags);
dwc->gadget_driver = NULL;
dwc->max_cfg_eps = 0;
@@ -3428,7 +3421,7 @@ static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep,
struct dwc3_request *req, const struct dwc3_event_depevt *event,
int status)
{
- struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue];
+ struct dwc3_trb *trb;
struct scatterlist *sg = req->sg;
struct scatterlist *s;
unsigned int num_queued = req->num_queued_sgs;
@@ -4664,6 +4657,10 @@ int dwc3_gadget_init(struct dwc3 *dwc)
else
dwc3_gadget_set_speed(dwc->gadget, dwc->maximum_speed);
+ /* No system wakeup if no gadget driver bound */
+ if (dwc->sys_wakeup)
+ device_wakeup_disable(dwc->sysdev);
+
return 0;
err5:
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 55a56cf67d736..d73e735e40810 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -119,6 +119,7 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol);
void dwc3_ep0_send_delayed_status(struct dwc3 *dwc);
void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt);
+int dwc3_gadget_start_config(struct dwc3 *dwc, unsigned int resource_index);
/**
* dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index 43230915323c7..0204787df81d5 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -11,8 +11,52 @@
#include <linux/of.h>
#include <linux/platform_device.h>
+#include "../host/xhci-port.h"
+#include "../host/xhci-ext-caps.h"
+#include "../host/xhci-caps.h"
#include "core.h"
+#define XHCI_HCSPARAMS1 0x4
+#define XHCI_PORTSC_BASE 0x400
+
+/**
+ * dwc3_power_off_all_roothub_ports - Power off all Root hub ports
+ * @dwc: Pointer to our controller context structure
+ */
+static void dwc3_power_off_all_roothub_ports(struct dwc3 *dwc)
+{
+ void __iomem *xhci_regs;
+ u32 op_regs_base;
+ int port_num;
+ u32 offset;
+ u32 reg;
+ int i;
+
+ /* xhci regs is not mapped yet, do it temperary here */
+ if (dwc->xhci_resources[0].start) {
+ xhci_regs = ioremap(dwc->xhci_resources[0].start, DWC3_XHCI_REGS_END);
+ if (!xhci_regs) {
+ dev_err(dwc->dev, "Failed to ioremap xhci_regs\n");
+ return;
+ }
+
+ op_regs_base = HC_LENGTH(readl(xhci_regs));
+ reg = readl(xhci_regs + XHCI_HCSPARAMS1);
+ port_num = HCS_MAX_PORTS(reg);
+
+ for (i = 1; i <= port_num; i++) {
+ offset = op_regs_base + XHCI_PORTSC_BASE + 0x10 * (i - 1);
+ reg = readl(xhci_regs + offset);
+ reg &= ~PORT_POWER;
+ writel(reg, xhci_regs + offset);
+ }
+
+ iounmap(xhci_regs);
+ } else {
+ dev_err(dwc->dev, "xhci base reg invalid\n");
+ }
+}
+
static void dwc3_host_fill_xhci_irq_res(struct dwc3 *dwc,
int irq, char *name)
{
@@ -66,6 +110,12 @@ int dwc3_host_init(struct dwc3 *dwc)
int ret, irq;
int prop_idx = 0;
+ /*
+ * Some platforms need to power off all Root hub ports immediately after DWC3 set to host
+ * mode to avoid VBUS glitch happen when xhci get reset later.
+ */
+ dwc3_power_off_all_roothub_ports(dwc);
+
irq = dwc3_host_get_irq(dwc);
if (irq < 0)
return irq;
@@ -123,6 +173,14 @@ int dwc3_host_init(struct dwc3 *dwc)
goto err;
}
+ if (dwc->sys_wakeup) {
+ /* Restore wakeup setting if switched from device */
+ device_wakeup_enable(dwc->sysdev);
+
+ /* Pass on wakeup setting to the new xhci platform device */
+ device_init_wakeup(&xhci->dev, true);
+ }
+
return 0;
err:
platform_device_put(xhci);
@@ -131,6 +189,9 @@ err:
void dwc3_host_exit(struct dwc3 *dwc)
{
+ if (dwc->sys_wakeup)
+ device_init_wakeup(&dwc->xhci->dev, false);
+
platform_device_unregister(dwc->xhci);
dwc->xhci = NULL;
}
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index b3592bcb0f966..566ff0b1282a8 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -190,6 +190,7 @@ config USB_F_MASS_STORAGE
tristate
config USB_F_FS
+ select DMA_SHARED_BUFFER
tristate
config USB_F_UAC1
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 6bff6cb937891..f855f1fc8e5e1 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -15,6 +15,9 @@
/* #define VERBOSE_DEBUG */
#include <linux/blkdev.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-resv.h>
#include <linux/pagemap.h>
#include <linux/export.h>
#include <linux/fs_parser.h>
@@ -43,6 +46,10 @@
#define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */
+#define DMABUF_ENQUEUE_TIMEOUT_MS 5000
+
+MODULE_IMPORT_NS(DMA_BUF);
+
/* Reference counter handling */
static void ffs_data_get(struct ffs_data *ffs);
static void ffs_data_put(struct ffs_data *ffs);
@@ -124,6 +131,25 @@ struct ffs_ep {
u8 num;
};
+struct ffs_dmabuf_priv {
+ struct list_head entry;
+ struct kref ref;
+ struct ffs_data *ffs;
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ enum dma_data_direction dir;
+ spinlock_t lock;
+ u64 context;
+ struct usb_request *req; /* P: ffs->eps_lock */
+ struct usb_ep *ep; /* P: ffs->eps_lock */
+};
+
+struct ffs_dma_fence {
+ struct dma_fence base;
+ struct ffs_dmabuf_priv *priv;
+ struct work_struct work;
+};
+
struct ffs_epfile {
/* Protects ep->ep and ep->req. */
struct mutex mutex;
@@ -197,6 +223,11 @@ struct ffs_epfile {
unsigned char isoc; /* P: ffs->eps_lock */
unsigned char _pad;
+
+ /* Protects dmabufs */
+ struct mutex dmabufs_mutex;
+ struct list_head dmabufs; /* P: dmabufs_mutex */
+ atomic_t seqno;
};
struct ffs_buffer {
@@ -934,31 +965,44 @@ static ssize_t __ffs_epfile_read_data(struct ffs_epfile *epfile,
return ret;
}
-static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
+static struct ffs_ep *ffs_epfile_wait_ep(struct file *file)
{
struct ffs_epfile *epfile = file->private_data;
- struct usb_request *req;
struct ffs_ep *ep;
- char *data = NULL;
- ssize_t ret, data_len = -EINVAL;
- int halt;
-
- /* Are we still active? */
- if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
- return -ENODEV;
+ int ret;
/* Wait for endpoint to be enabled */
ep = epfile->ep;
if (!ep) {
if (file->f_flags & O_NONBLOCK)
- return -EAGAIN;
+ return ERR_PTR(-EAGAIN);
ret = wait_event_interruptible(
epfile->ffs->wait, (ep = epfile->ep));
if (ret)
- return -EINTR;
+ return ERR_PTR(-EINTR);
}
+ return ep;
+}
+
+static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
+{
+ struct ffs_epfile *epfile = file->private_data;
+ struct usb_request *req;
+ struct ffs_ep *ep;
+ char *data = NULL;
+ ssize_t ret, data_len = -EINVAL;
+ int halt;
+
+ /* Are we still active? */
+ if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
+ return -ENODEV;
+
+ ep = ffs_epfile_wait_ep(file);
+ if (IS_ERR(ep))
+ return PTR_ERR(ep);
+
/* Do we halt? */
halt = (!io_data->read == !epfile->in);
if (halt && epfile->isoc)
@@ -1258,10 +1302,58 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
return res;
}
+static void ffs_dmabuf_release(struct kref *ref)
+{
+ struct ffs_dmabuf_priv *priv = container_of(ref, struct ffs_dmabuf_priv, ref);
+ struct dma_buf_attachment *attach = priv->attach;
+ struct dma_buf *dmabuf = attach->dmabuf;
+
+ pr_vdebug("FFS DMABUF release\n");
+ dma_resv_lock(dmabuf->resv, NULL);
+ dma_buf_unmap_attachment(attach, priv->sgt, priv->dir);
+ dma_resv_unlock(dmabuf->resv);
+
+ dma_buf_detach(attach->dmabuf, attach);
+ dma_buf_put(dmabuf);
+ kfree(priv);
+}
+
+static void ffs_dmabuf_get(struct dma_buf_attachment *attach)
+{
+ struct ffs_dmabuf_priv *priv = attach->importer_priv;
+
+ kref_get(&priv->ref);
+}
+
+static void ffs_dmabuf_put(struct dma_buf_attachment *attach)
+{
+ struct ffs_dmabuf_priv *priv = attach->importer_priv;
+
+ kref_put(&priv->ref, ffs_dmabuf_release);
+}
+
static int
ffs_epfile_release(struct inode *inode, struct file *file)
{
struct ffs_epfile *epfile = inode->i_private;
+ struct ffs_dmabuf_priv *priv, *tmp;
+ struct ffs_data *ffs = epfile->ffs;
+
+ mutex_lock(&epfile->dmabufs_mutex);
+
+ /* Close all attached DMABUFs */
+ list_for_each_entry_safe(priv, tmp, &epfile->dmabufs, entry) {
+ /* Cancel any pending transfer */
+ spin_lock_irq(&ffs->eps_lock);
+ if (priv->ep && priv->req)
+ usb_ep_dequeue(priv->ep, priv->req);
+ spin_unlock_irq(&ffs->eps_lock);
+
+ list_del(&priv->entry);
+ ffs_dmabuf_put(priv->attach);
+ }
+
+ mutex_unlock(&epfile->dmabufs_mutex);
__ffs_epfile_read_buffer_free(epfile);
ffs_data_closed(epfile->ffs);
@@ -1269,6 +1361,357 @@ ffs_epfile_release(struct inode *inode, struct file *file)
return 0;
}
+static void ffs_dmabuf_cleanup(struct work_struct *work)
+{
+ struct ffs_dma_fence *dma_fence =
+ container_of(work, struct ffs_dma_fence, work);
+ struct ffs_dmabuf_priv *priv = dma_fence->priv;
+ struct dma_buf_attachment *attach = priv->attach;
+ struct dma_fence *fence = &dma_fence->base;
+
+ ffs_dmabuf_put(attach);
+ dma_fence_put(fence);
+}
+
+static void ffs_dmabuf_signal_done(struct ffs_dma_fence *dma_fence, int ret)
+{
+ struct ffs_dmabuf_priv *priv = dma_fence->priv;
+ struct dma_fence *fence = &dma_fence->base;
+ bool cookie = dma_fence_begin_signalling();
+
+ dma_fence_get(fence);
+ fence->error = ret;
+ dma_fence_signal(fence);
+ dma_fence_end_signalling(cookie);
+
+ /*
+ * The fence will be unref'd in ffs_dmabuf_cleanup.
+ * It can't be done here, as the unref functions might try to lock
+ * the resv object, which would deadlock.
+ */
+ INIT_WORK(&dma_fence->work, ffs_dmabuf_cleanup);
+ queue_work(priv->ffs->io_completion_wq, &dma_fence->work);
+}
+
+static void ffs_epfile_dmabuf_io_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ pr_vdebug("FFS: DMABUF transfer complete, status=%d\n", req->status);
+ ffs_dmabuf_signal_done(req->context, req->status);
+ usb_ep_free_request(ep, req);
+}
+
+static const char *ffs_dmabuf_get_driver_name(struct dma_fence *fence)
+{
+ return "functionfs";
+}
+
+static const char *ffs_dmabuf_get_timeline_name(struct dma_fence *fence)
+{
+ return "";
+}
+
+static void ffs_dmabuf_fence_release(struct dma_fence *fence)
+{
+ struct ffs_dma_fence *dma_fence =
+ container_of(fence, struct ffs_dma_fence, base);
+
+ kfree(dma_fence);
+}
+
+static const struct dma_fence_ops ffs_dmabuf_fence_ops = {
+ .get_driver_name = ffs_dmabuf_get_driver_name,
+ .get_timeline_name = ffs_dmabuf_get_timeline_name,
+ .release = ffs_dmabuf_fence_release,
+};
+
+static int ffs_dma_resv_lock(struct dma_buf *dmabuf, bool nonblock)
+{
+ if (!nonblock)
+ return dma_resv_lock_interruptible(dmabuf->resv, NULL);
+
+ if (!dma_resv_trylock(dmabuf->resv))
+ return -EBUSY;
+
+ return 0;
+}
+
+static struct dma_buf_attachment *
+ffs_dmabuf_find_attachment(struct ffs_epfile *epfile, struct dma_buf *dmabuf)
+{
+ struct device *dev = epfile->ffs->gadget->dev.parent;
+ struct dma_buf_attachment *attach = NULL;
+ struct ffs_dmabuf_priv *priv;
+
+ mutex_lock(&epfile->dmabufs_mutex);
+
+ list_for_each_entry(priv, &epfile->dmabufs, entry) {
+ if (priv->attach->dev == dev
+ && priv->attach->dmabuf == dmabuf) {
+ attach = priv->attach;
+ break;
+ }
+ }
+
+ if (attach)
+ ffs_dmabuf_get(attach);
+
+ mutex_unlock(&epfile->dmabufs_mutex);
+
+ return attach ?: ERR_PTR(-EPERM);
+}
+
+static int ffs_dmabuf_attach(struct file *file, int fd)
+{
+ bool nonblock = file->f_flags & O_NONBLOCK;
+ struct ffs_epfile *epfile = file->private_data;
+ struct usb_gadget *gadget = epfile->ffs->gadget;
+ struct dma_buf_attachment *attach;
+ struct ffs_dmabuf_priv *priv;
+ enum dma_data_direction dir;
+ struct sg_table *sg_table;
+ struct dma_buf *dmabuf;
+ int err;
+
+ if (!gadget || !gadget->sg_supported)
+ return -EPERM;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ attach = dma_buf_attach(dmabuf, gadget->dev.parent);
+ if (IS_ERR(attach)) {
+ err = PTR_ERR(attach);
+ goto err_dmabuf_put;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ err = -ENOMEM;
+ goto err_dmabuf_detach;
+ }
+
+ dir = epfile->in ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ err = ffs_dma_resv_lock(dmabuf, nonblock);
+ if (err)
+ goto err_free_priv;
+
+ sg_table = dma_buf_map_attachment(attach, dir);
+ dma_resv_unlock(dmabuf->resv);
+
+ if (IS_ERR(sg_table)) {
+ err = PTR_ERR(sg_table);
+ goto err_free_priv;
+ }
+
+ attach->importer_priv = priv;
+
+ priv->sgt = sg_table;
+ priv->dir = dir;
+ priv->ffs = epfile->ffs;
+ priv->attach = attach;
+ spin_lock_init(&priv->lock);
+ kref_init(&priv->ref);
+ priv->context = dma_fence_context_alloc(1);
+
+ mutex_lock(&epfile->dmabufs_mutex);
+ list_add(&priv->entry, &epfile->dmabufs);
+ mutex_unlock(&epfile->dmabufs_mutex);
+
+ return 0;
+
+err_free_priv:
+ kfree(priv);
+err_dmabuf_detach:
+ dma_buf_detach(dmabuf, attach);
+err_dmabuf_put:
+ dma_buf_put(dmabuf);
+
+ return err;
+}
+
+static int ffs_dmabuf_detach(struct file *file, int fd)
+{
+ struct ffs_epfile *epfile = file->private_data;
+ struct ffs_data *ffs = epfile->ffs;
+ struct device *dev = ffs->gadget->dev.parent;
+ struct ffs_dmabuf_priv *priv, *tmp;
+ struct dma_buf *dmabuf;
+ int ret = -EPERM;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ mutex_lock(&epfile->dmabufs_mutex);
+
+ list_for_each_entry_safe(priv, tmp, &epfile->dmabufs, entry) {
+ if (priv->attach->dev == dev
+ && priv->attach->dmabuf == dmabuf) {
+ /* Cancel any pending transfer */
+ spin_lock_irq(&ffs->eps_lock);
+ if (priv->ep && priv->req)
+ usb_ep_dequeue(priv->ep, priv->req);
+ spin_unlock_irq(&ffs->eps_lock);
+
+ list_del(&priv->entry);
+
+ /* Unref the reference from ffs_dmabuf_attach() */
+ ffs_dmabuf_put(priv->attach);
+ ret = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&epfile->dmabufs_mutex);
+ dma_buf_put(dmabuf);
+
+ return ret;
+}
+
+static int ffs_dmabuf_transfer(struct file *file,
+ const struct usb_ffs_dmabuf_transfer_req *req)
+{
+ bool nonblock = file->f_flags & O_NONBLOCK;
+ struct ffs_epfile *epfile = file->private_data;
+ struct dma_buf_attachment *attach;
+ struct ffs_dmabuf_priv *priv;
+ struct ffs_dma_fence *fence;
+ struct usb_request *usb_req;
+ enum dma_resv_usage resv_dir;
+ struct dma_buf *dmabuf;
+ unsigned long timeout;
+ struct ffs_ep *ep;
+ bool cookie;
+ u32 seqno;
+ long retl;
+ int ret;
+
+ if (req->flags & ~USB_FFS_DMABUF_TRANSFER_MASK)
+ return -EINVAL;
+
+ dmabuf = dma_buf_get(req->fd);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ if (req->length > dmabuf->size || req->length == 0) {
+ ret = -EINVAL;
+ goto err_dmabuf_put;
+ }
+
+ attach = ffs_dmabuf_find_attachment(epfile, dmabuf);
+ if (IS_ERR(attach)) {
+ ret = PTR_ERR(attach);
+ goto err_dmabuf_put;
+ }
+
+ priv = attach->importer_priv;
+
+ ep = ffs_epfile_wait_ep(file);
+ if (IS_ERR(ep)) {
+ ret = PTR_ERR(ep);
+ goto err_attachment_put;
+ }
+
+ ret = ffs_dma_resv_lock(dmabuf, nonblock);
+ if (ret)
+ goto err_attachment_put;
+
+ /* Make sure we don't have writers */
+ timeout = nonblock ? 0 : msecs_to_jiffies(DMABUF_ENQUEUE_TIMEOUT_MS);
+ retl = dma_resv_wait_timeout(dmabuf->resv,
+ dma_resv_usage_rw(epfile->in),
+ true, timeout);
+ if (retl == 0)
+ retl = -EBUSY;
+ if (retl < 0) {
+ ret = (int)retl;
+ goto err_resv_unlock;
+ }
+
+ ret = dma_resv_reserve_fences(dmabuf->resv, 1);
+ if (ret)
+ goto err_resv_unlock;
+
+ fence = kmalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence) {
+ ret = -ENOMEM;
+ goto err_resv_unlock;
+ }
+
+ fence->priv = priv;
+
+ spin_lock_irq(&epfile->ffs->eps_lock);
+
+ /* In the meantime, endpoint got disabled or changed. */
+ if (epfile->ep != ep) {
+ ret = -ESHUTDOWN;
+ goto err_fence_put;
+ }
+
+ usb_req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC);
+ if (!usb_req) {
+ ret = -ENOMEM;
+ goto err_fence_put;
+ }
+
+ /*
+ * usb_ep_queue() guarantees that all transfers are processed in the
+ * order they are enqueued, so we can use a simple incrementing
+ * sequence number for the dma_fence.
+ */
+ seqno = atomic_add_return(1, &epfile->seqno);
+
+ dma_fence_init(&fence->base, &ffs_dmabuf_fence_ops,
+ &priv->lock, priv->context, seqno);
+
+ resv_dir = epfile->in ? DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ;
+
+ dma_resv_add_fence(dmabuf->resv, &fence->base, resv_dir);
+ dma_resv_unlock(dmabuf->resv);
+
+ /* Now that the dma_fence is in place, queue the transfer. */
+
+ usb_req->length = req->length;
+ usb_req->buf = NULL;
+ usb_req->sg = priv->sgt->sgl;
+ usb_req->num_sgs = sg_nents_for_len(priv->sgt->sgl, req->length);
+ usb_req->sg_was_mapped = true;
+ usb_req->context = fence;
+ usb_req->complete = ffs_epfile_dmabuf_io_complete;
+
+ cookie = dma_fence_begin_signalling();
+ ret = usb_ep_queue(ep->ep, usb_req, GFP_ATOMIC);
+ dma_fence_end_signalling(cookie);
+ if (!ret) {
+ priv->req = usb_req;
+ priv->ep = ep->ep;
+ } else {
+ pr_warn("FFS: Failed to queue DMABUF: %d\n", ret);
+ ffs_dmabuf_signal_done(fence, ret);
+ usb_ep_free_request(ep->ep, usb_req);
+ }
+
+ spin_unlock_irq(&epfile->ffs->eps_lock);
+ dma_buf_put(dmabuf);
+
+ return ret;
+
+err_fence_put:
+ spin_unlock_irq(&epfile->ffs->eps_lock);
+ dma_fence_put(&fence->base);
+err_resv_unlock:
+ dma_resv_unlock(dmabuf->resv);
+err_attachment_put:
+ ffs_dmabuf_put(attach);
+err_dmabuf_put:
+ dma_buf_put(dmabuf);
+
+ return ret;
+}
+
static long ffs_epfile_ioctl(struct file *file, unsigned code,
unsigned long value)
{
@@ -1279,17 +1722,48 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
return -ENODEV;
- /* Wait for endpoint to be enabled */
- ep = epfile->ep;
- if (!ep) {
- if (file->f_flags & O_NONBLOCK)
- return -EAGAIN;
+ switch (code) {
+ case FUNCTIONFS_DMABUF_ATTACH:
+ {
+ int fd;
- ret = wait_event_interruptible(
- epfile->ffs->wait, (ep = epfile->ep));
- if (ret)
- return -EINTR;
+ if (copy_from_user(&fd, (void __user *)value, sizeof(fd))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ return ffs_dmabuf_attach(file, fd);
}
+ case FUNCTIONFS_DMABUF_DETACH:
+ {
+ int fd;
+
+ if (copy_from_user(&fd, (void __user *)value, sizeof(fd))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ return ffs_dmabuf_detach(file, fd);
+ }
+ case FUNCTIONFS_DMABUF_TRANSFER:
+ {
+ struct usb_ffs_dmabuf_transfer_req req;
+
+ if (copy_from_user(&req, (void __user *)value, sizeof(req))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ return ffs_dmabuf_transfer(file, &req);
+ }
+ default:
+ break;
+ }
+
+ /* Wait for endpoint to be enabled */
+ ep = ffs_epfile_wait_ep(file);
+ if (IS_ERR(ep))
+ return PTR_ERR(ep);
spin_lock_irq(&epfile->ffs->eps_lock);
@@ -1863,6 +2337,8 @@ static int ffs_epfiles_create(struct ffs_data *ffs)
for (i = 1; i <= count; ++i, ++epfile) {
epfile->ffs = ffs;
mutex_init(&epfile->mutex);
+ mutex_init(&epfile->dmabufs_mutex);
+ INIT_LIST_HEAD(&epfile->dmabufs);
if (ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
sprintf(epfile->name, "ep%02x", ffs->eps_addrmap[i]);
else
@@ -3445,6 +3921,25 @@ static inline struct f_fs_opts *to_ffs_opts(struct config_item *item)
func_inst.group);
}
+static ssize_t f_fs_opts_ready_show(struct config_item *item, char *page)
+{
+ struct f_fs_opts *opts = to_ffs_opts(item);
+ int ready;
+
+ ffs_dev_lock();
+ ready = opts->dev->desc_ready;
+ ffs_dev_unlock();
+
+ return sprintf(page, "%d\n", ready);
+}
+
+CONFIGFS_ATTR_RO(f_fs_opts_, ready);
+
+static struct configfs_attribute *ffs_attrs[] = {
+ &f_fs_opts_attr_ready,
+ NULL,
+};
+
static void ffs_attr_release(struct config_item *item)
{
struct f_fs_opts *opts = to_ffs_opts(item);
@@ -3458,6 +3953,7 @@ static struct configfs_item_operations ffs_item_ops = {
static const struct config_item_type ffs_func_type = {
.ct_item_ops = &ffs_item_ops,
+ .ct_attrs = ffs_attrs,
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 28f4e6552e845..0acc32ed99609 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -878,7 +878,7 @@ static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
if (alt > 1)
goto fail;
- if (ncm->port.in_ep->enabled) {
+ if (ncm->netdev) {
DBG(cdev, "reset ncm\n");
ncm->netdev = NULL;
gether_disconnect(&ncm->port);
@@ -1367,7 +1367,7 @@ static void ncm_disable(struct usb_function *f)
DBG(cdev, "ncm deactivated\n");
- if (ncm->port.in_ep->enabled) {
+ if (ncm->netdev) {
ncm->netdev = NULL;
gether_disconnect(&ncm->port);
}
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 3c5a6f6ac3414..444212c0b5a98 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -718,7 +718,7 @@ static const struct net_device_ops eth_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static struct device_type gadget_type = {
+static const struct device_type gadget_type = {
.name = "gadget",
};
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index dd3241fc6939d..d41f5f31dadd5 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -35,6 +35,9 @@ uvc_video_encode_header(struct uvc_video *video, struct uvc_buffer *buf,
data[1] = UVC_STREAM_EOH | video->fid;
+ if (video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE)
+ data[1] |= UVC_STREAM_ERR;
+
if (video->queue.buf_used == 0 && ts.tv_sec) {
/* dwClockFrequency is 48 MHz */
u32 pts = ((u64)ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC) * 48;
@@ -370,6 +373,7 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
struct uvc_video *video = ureq->video;
struct uvc_video_queue *queue = &video->queue;
struct uvc_buffer *last_buf;
+ struct usb_request *to_queue = req;
unsigned long flags;
bool is_bulk = video->max_payload_size;
int ret = 0;
@@ -397,7 +401,8 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
case -EXDEV:
uvcg_dbg(&video->uvc->func, "VS request missed xfer.\n");
- queue->flags |= UVC_QUEUE_DROP_INCOMPLETE;
+ if (req->length != 0)
+ queue->flags |= UVC_QUEUE_DROP_INCOMPLETE;
break;
case -ESHUTDOWN: /* disconnect from host. */
@@ -425,59 +430,59 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
* we're still streaming before queueing the usb_request
* back to req_free
*/
- if (video->is_enabled) {
+ if (!video->is_enabled) {
+ uvc_video_free_request(ureq, ep);
+ spin_unlock_irqrestore(&video->req_lock, flags);
+ uvcg_queue_cancel(queue, 0);
+
+ return;
+ }
+
+ /*
+ * Here we check whether any request is available in the ready
+ * list. If it is, queue it to the ep and add the current
+ * usb_request to the req_free list - for video_pump to fill in.
+ * Otherwise, just use the current usb_request to queue a 0
+ * length request to the ep. Since we always add to the req_free
+ * list if we dequeue from the ready list, there will never
+ * be a situation where the req_free list is completely out of
+ * requests and cannot recover.
+ */
+ to_queue->length = 0;
+ if (!list_empty(&video->req_ready)) {
+ to_queue = list_first_entry(&video->req_ready,
+ struct usb_request, list);
+ list_del(&to_queue->list);
+ list_add_tail(&req->list, &video->req_free);
/*
- * Here we check whether any request is available in the ready
- * list. If it is, queue it to the ep and add the current
- * usb_request to the req_free list - for video_pump to fill in.
- * Otherwise, just use the current usb_request to queue a 0
- * length request to the ep. Since we always add to the req_free
- * list if we dequeue from the ready list, there will never
- * be a situation where the req_free list is completely out of
- * requests and cannot recover.
+ * Queue work to the wq as well since it is possible that a
+ * buffer may not have been completely encoded with the set of
+ * in-flight usb requests for whih the complete callbacks are
+ * firing.
+ * In that case, if we do not queue work to the worker thread,
+ * the buffer will never be marked as complete - and therefore
+ * not be returned to userpsace. As a result,
+ * dequeue -> queue -> dequeue flow of uvc buffers will not
+ * happen.
*/
- struct usb_request *to_queue = req;
-
- to_queue->length = 0;
- if (!list_empty(&video->req_ready)) {
- to_queue = list_first_entry(&video->req_ready,
- struct usb_request, list);
- list_del(&to_queue->list);
- list_add_tail(&req->list, &video->req_free);
- /*
- * Queue work to the wq as well since it is possible that a
- * buffer may not have been completely encoded with the set of
- * in-flight usb requests for whih the complete callbacks are
- * firing.
- * In that case, if we do not queue work to the worker thread,
- * the buffer will never be marked as complete - and therefore
- * not be returned to userpsace. As a result,
- * dequeue -> queue -> dequeue flow of uvc buffers will not
- * happen.
- */
- queue_work(video->async_wq, &video->pump);
- }
+ queue_work(video->async_wq, &video->pump);
+ }
+ /*
+ * Queue to the endpoint. The actual queueing to ep will
+ * only happen on one thread - the async_wq for bulk endpoints
+ * and this thread for isoc endpoints.
+ */
+ ret = uvcg_video_usb_req_queue(video, to_queue, !is_bulk);
+ if (ret < 0) {
/*
- * Queue to the endpoint. The actual queueing to ep will
- * only happen on one thread - the async_wq for bulk endpoints
- * and this thread for isoc endpoints.
+ * Endpoint error, but the stream is still enabled.
+ * Put request back in req_free for it to be cleaned
+ * up later.
*/
- ret = uvcg_video_usb_req_queue(video, to_queue, !is_bulk);
- if (ret < 0) {
- /*
- * Endpoint error, but the stream is still enabled.
- * Put request back in req_free for it to be cleaned
- * up later.
- */
- list_add_tail(&to_queue->list, &video->req_free);
- }
- } else {
- uvc_video_free_request(ureq, ep);
- ret = 0;
+ list_add_tail(&to_queue->list, &video->req_free);
}
+
spin_unlock_irqrestore(&video->req_lock, flags);
- if (ret < 0)
- uvcg_queue_cancel(queue, 0);
}
static int
@@ -594,10 +599,7 @@ static void uvcg_video_pump(struct work_struct *work)
*/
spin_lock_irqsave(&queue->irqlock, flags);
buf = uvcg_queue_head(queue);
-
- if (buf != NULL) {
- video->encode(req, video, buf);
- } else {
+ if (!buf) {
/*
* Either the queue has been disconnected or no video buffer
* available for bulk transfer. Either way, stop processing
@@ -607,6 +609,8 @@ static void uvcg_video_pump(struct work_struct *work)
break;
}
+ video->encode(req, video, buf);
+
spin_unlock_irqrestore(&queue->irqlock, flags);
spin_lock_irqsave(&video->req_lock, flags);
@@ -623,14 +627,7 @@ static void uvcg_video_pump(struct work_struct *work)
uvcg_queue_cancel(queue, 0);
break;
}
-
- /* The request is owned by the endpoint / ready list. */
- req = NULL;
}
-
- if (!req)
- return;
-
spin_lock_irqsave(&video->req_lock, flags);
if (video->is_enabled)
list_add_tail(&req->list, &video->req_free);
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index d59f94464b870..b3a9d18a8dcd1 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -292,7 +292,9 @@ int usb_ep_queue(struct usb_ep *ep,
{
int ret = 0;
- if (WARN_ON_ONCE(!ep->enabled && ep->address)) {
+ if (!ep->enabled && ep->address) {
+ pr_debug("USB gadget: queue request to disabled ep 0x%x (%s)\n",
+ ep->address, ep->name);
ret = -ESHUTDOWN;
goto out;
}
@@ -903,6 +905,11 @@ int usb_gadget_map_request_by_dev(struct device *dev,
if (req->length == 0)
return 0;
+ if (req->sg_was_mapped) {
+ req->num_mapped_sgs = req->num_sgs;
+ return 0;
+ }
+
if (req->num_sgs) {
int mapped;
@@ -948,7 +955,7 @@ EXPORT_SYMBOL_GPL(usb_gadget_map_request);
void usb_gadget_unmap_request_by_dev(struct device *dev,
struct usb_request *req, int is_in)
{
- if (req->length == 0)
+ if (req->length == 0 || req->sg_was_mapped)
return;
if (req->num_mapped_sgs) {
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index e8042c158f6dc..3432ebfae9787 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -13,7 +13,7 @@
* code from Dave Liu and Shlomi Gridish.
*/
-#undef VERBOSE
+#define pr_fmt(x) "udc: " x
#include <linux/module.h>
#include <linux/kernel.h>
@@ -183,9 +183,9 @@ __acquires(ep->udc->lock)
usb_gadget_unmap_request(&ep->udc->gadget, &req->req, ep_is_in(ep));
if (status && (status != -ESHUTDOWN))
- VDBG("complete %s req %p stat %d len %u/%u",
- ep->ep.name, &req->req, status,
- req->req.actual, req->req.length);
+ dev_vdbg(&udc->gadget.dev, "complete %s req %p stat %d len %u/%u\n",
+ ep->ep.name, &req->req, status,
+ req->req.actual, req->req.length);
ep->stopped = 1;
@@ -285,7 +285,7 @@ static int dr_controller_setup(struct fsl_udc *udc)
timeout = jiffies + FSL_UDC_RESET_TIMEOUT;
while (fsl_readl(&dr_regs->usbcmd) & USB_CMD_CTRL_RESET) {
if (time_after(jiffies, timeout)) {
- ERR("udc reset timeout!\n");
+ dev_err(&udc->gadget.dev, "udc reset timeout!\n");
return -ETIMEDOUT;
}
cpu_relax();
@@ -308,9 +308,10 @@ static int dr_controller_setup(struct fsl_udc *udc)
tmp &= USB_EP_LIST_ADDRESS_MASK;
fsl_writel(tmp, &dr_regs->endpointlistaddr);
- VDBG("vir[qh_base] is %p phy[qh_base] is 0x%8x reg is 0x%8x",
- udc->ep_qh, (int)tmp,
- fsl_readl(&dr_regs->endpointlistaddr));
+ dev_vdbg(&udc->gadget.dev,
+ "vir[qh_base] is %p phy[qh_base] is 0x%8x reg is 0x%8x\n",
+ udc->ep_qh, (int)tmp,
+ fsl_readl(&dr_regs->endpointlistaddr));
max_no_of_ep = (0x0000001F & fsl_readl(&dr_regs->dccparams));
for (ep_num = 1; ep_num < max_no_of_ep; ep_num++) {
@@ -498,7 +499,7 @@ static void struct_ep_qh_setup(struct fsl_udc *udc, unsigned char ep_num,
tmp = max_pkt_len << EP_QUEUE_HEAD_MAX_PKT_LEN_POS;
break;
default:
- VDBG("error ep type is %d", ep_type);
+ dev_vdbg(&udc->gadget.dev, "error ep type is %d\n", ep_type);
return;
}
if (zlt)
@@ -611,10 +612,10 @@ static int fsl_ep_enable(struct usb_ep *_ep,
spin_unlock_irqrestore(&udc->lock, flags);
retval = 0;
- VDBG("enabled %s (ep%d%s) maxpacket %d",ep->ep.name,
- ep->ep.desc->bEndpointAddress & 0x0f,
- (desc->bEndpointAddress & USB_DIR_IN)
- ? "in" : "out", max);
+ dev_vdbg(&udc->gadget.dev, "enabled %s (ep%d%s) maxpacket %d\n",
+ ep->ep.name, ep->ep.desc->bEndpointAddress & 0x0f,
+ (desc->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
+ max);
en_done:
return retval;
}
@@ -633,7 +634,10 @@ static int fsl_ep_disable(struct usb_ep *_ep)
ep = container_of(_ep, struct fsl_ep, ep);
if (!_ep || !ep->ep.desc) {
- VDBG("%s not enabled", _ep ? ep->ep.name : NULL);
+ /*
+ * dev_vdbg(&udc->gadget.dev, "%s not enabled\n",
+ * _ep ? ep->ep.name : NULL);
+ */
return -EINVAL;
}
@@ -659,7 +663,7 @@ static int fsl_ep_disable(struct usb_ep *_ep)
ep->stopped = 1;
spin_unlock_irqrestore(&udc->lock, flags);
- VDBG("disabled %s OK", _ep->name);
+ dev_vdbg(&udc->gadget.dev, "disabled %s OK\n", _ep->name);
return 0;
}
@@ -719,8 +723,8 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
{
u32 temp, bitmask, tmp_stat;
- /* VDBG("QH addr Register 0x%8x", dr_regs->endpointlistaddr);
- VDBG("ep_qh[%d] addr is 0x%8x", i, (u32)&(ep->udc->ep_qh[i])); */
+ /* dev_vdbg(&udc->gadget.dev, "QH addr Register 0x%8x\n", dr_regs->endpointlistaddr);
+ dev_vdbg(&udc->gadget.dev, "ep_qh[%d] addr is 0x%8x\n", i, (u32)&(ep->udc->ep_qh[i])); */
bitmask = ep_is_in(ep)
? (1 << (ep_index(ep) + 16))
@@ -808,7 +812,7 @@ static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length,
*is_last = 0;
if ((*is_last) == 0)
- VDBG("multi-dtd request!");
+ dev_vdbg(&udc_controller->gadget.dev, "multi-dtd request!\n");
/* Fill in the transfer size; set active bit */
swap_temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
@@ -820,7 +824,7 @@ static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length,
mb();
- VDBG("length = %d address= 0x%x", *length, (int)*dma);
+ dev_vdbg(&udc_controller->gadget.dev, "length = %d address= 0x%x\n", *length, (int)*dma);
return dtd;
}
@@ -864,18 +868,18 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
{
struct fsl_ep *ep = container_of(_ep, struct fsl_ep, ep);
struct fsl_req *req = container_of(_req, struct fsl_req, req);
- struct fsl_udc *udc;
+ struct fsl_udc *udc = ep->udc;
unsigned long flags;
int ret;
/* catch various bogus parameters */
if (!_req || !req->req.complete || !req->req.buf
|| !list_empty(&req->queue)) {
- VDBG("%s, bad params", __func__);
+ dev_vdbg(&udc->gadget.dev, "%s, bad params\n", __func__);
return -EINVAL;
}
- if (unlikely(!_ep || !ep->ep.desc)) {
- VDBG("%s, bad ep", __func__);
+ if (unlikely(!ep->ep.desc)) {
+ dev_vdbg(&udc->gadget.dev, "%s, bad ep\n", __func__);
return -EINVAL;
}
if (usb_endpoint_xfer_isoc(ep->ep.desc)) {
@@ -883,7 +887,6 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
return -EMSGSIZE;
}
- udc = ep->udc;
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
@@ -1036,8 +1039,8 @@ static int fsl_ep_set_halt(struct usb_ep *_ep, int value)
udc->ep0_dir = 0;
}
out:
- VDBG(" %s %s halt stat %d", ep->ep.name,
- value ? "set" : "clear", status);
+ dev_vdbg(&udc->gadget.dev, "%s %s halt stat %d\n", ep->ep.name,
+ value ? "set" : "clear", status);
return status;
}
@@ -1105,7 +1108,8 @@ static void fsl_ep_fifo_flush(struct usb_ep *_ep)
/* Wait until flush complete */
while (fsl_readl(&dr_regs->endptflush)) {
if (time_after(jiffies, timeout)) {
- ERR("ep flush timeout\n");
+ dev_err(&udc_controller->gadget.dev,
+ "ep flush timeout\n");
return;
}
cpu_relax();
@@ -1177,7 +1181,7 @@ static int fsl_vbus_session(struct usb_gadget *gadget, int is_active)
udc = container_of(gadget, struct fsl_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
- VDBG("VBUS %s", is_active ? "on" : "off");
+ dev_vdbg(&gadget->dev, "VBUS %s\n", is_active ? "on" : "off");
udc->vbus_active = (is_active != 0);
if (can_pullup(udc))
fsl_writel((fsl_readl(&dr_regs->usbcmd) | USB_CMD_RUN_STOP),
@@ -1543,7 +1547,7 @@ static void ep0_req_complete(struct fsl_udc *udc, struct fsl_ep *ep0,
udc->ep0_state = WAIT_FOR_SETUP;
break;
case WAIT_FOR_SETUP:
- ERR("Unexpected ep0 packets\n");
+ dev_err(&udc->gadget.dev, "Unexpected ep0 packets\n");
break;
default:
ep0stall(udc);
@@ -1612,7 +1616,7 @@ static int process_ep_req(struct fsl_udc *udc, int pipe,
errors = hc32_to_cpu(curr_td->size_ioc_sts);
if (errors & DTD_ERROR_MASK) {
if (errors & DTD_STATUS_HALTED) {
- ERR("dTD error %08x QH=%d\n", errors, pipe);
+ dev_err(&udc->gadget.dev, "dTD error %08x QH=%d\n", errors, pipe);
/* Clear the errors and Halt condition */
tmp = hc32_to_cpu(curr_qh->size_ioc_int_sts);
tmp &= ~errors;
@@ -1623,32 +1627,35 @@ static int process_ep_req(struct fsl_udc *udc, int pipe,
break;
}
if (errors & DTD_STATUS_DATA_BUFF_ERR) {
- VDBG("Transfer overflow");
+ dev_vdbg(&udc->gadget.dev, "Transfer overflow\n");
status = -EPROTO;
break;
} else if (errors & DTD_STATUS_TRANSACTION_ERR) {
- VDBG("ISO error");
+ dev_vdbg(&udc->gadget.dev, "ISO error\n");
status = -EILSEQ;
break;
} else
- ERR("Unknown error has occurred (0x%x)!\n",
+ dev_err(&udc->gadget.dev,
+ "Unknown error has occurred (0x%x)!\n",
errors);
} else if (hc32_to_cpu(curr_td->size_ioc_sts)
& DTD_STATUS_ACTIVE) {
- VDBG("Request not complete");
+ dev_vdbg(&udc->gadget.dev, "Request not complete\n");
status = REQ_UNCOMPLETE;
return status;
} else if (remaining_length) {
if (direction) {
- VDBG("Transmit dTD remaining length not zero");
+ dev_vdbg(&udc->gadget.dev,
+ "Transmit dTD remaining length not zero\n");
status = -EPROTO;
break;
} else {
break;
}
} else {
- VDBG("dTD transmitted successful");
+ dev_vdbg(&udc->gadget.dev,
+ "dTD transmitted successful\n");
}
if (j != curr_req->dtd_count - 1)
@@ -1691,7 +1698,7 @@ static void dtd_complete_irq(struct fsl_udc *udc)
/* If the ep is configured */
if (!curr_ep->ep.name) {
- WARNING("Invalid EP?");
+ dev_warn(&udc->gadget.dev, "Invalid EP?\n");
continue;
}
@@ -1700,8 +1707,9 @@ static void dtd_complete_irq(struct fsl_udc *udc)
queue) {
status = process_ep_req(udc, i, curr_req);
- VDBG("status of process_ep_req= %d, ep = %d",
- status, ep_num);
+ dev_vdbg(&udc->gadget.dev,
+ "status of process_ep_req= %d, ep = %d\n",
+ status, ep_num);
if (status == REQ_UNCOMPLETE)
break;
/* write back status to req */
@@ -1820,7 +1828,7 @@ static void reset_irq(struct fsl_udc *udc)
while (fsl_readl(&dr_regs->endpointprime)) {
/* Wait until all endptprime bits cleared */
if (time_after(jiffies, timeout)) {
- ERR("Timeout for reset\n");
+ dev_err(&udc->gadget.dev, "Timeout for reset\n");
break;
}
cpu_relax();
@@ -1830,7 +1838,7 @@ static void reset_irq(struct fsl_udc *udc)
fsl_writel(0xffffffff, &dr_regs->endptflush);
if (fsl_readl(&dr_regs->portsc1) & PORTSCX_PORT_RESET) {
- VDBG("Bus reset");
+ dev_vdbg(&udc->gadget.dev, "Bus reset\n");
/* Bus is reseting */
udc->bus_reset = 1;
/* Reset all the queues, include XD, dTD, EP queue
@@ -1838,7 +1846,7 @@ static void reset_irq(struct fsl_udc *udc)
reset_queues(udc, true);
udc->usb_state = USB_STATE_DEFAULT;
} else {
- VDBG("Controller reset");
+ dev_vdbg(&udc->gadget.dev, "Controller reset\n");
/* initialize usb hw reg except for regs for EP, not
* touch usbintr reg */
dr_controller_setup(udc);
@@ -1872,7 +1880,7 @@ static irqreturn_t fsl_udc_irq(int irq, void *_udc)
/* Clear notification bits */
fsl_writel(irq_src, &dr_regs->usbsts);
- /* VDBG("irq_src [0x%8x]", irq_src); */
+ /* dev_vdbg(&udc->gadget.dev, "irq_src [0x%8x]", irq_src); */
/* Need to resume? */
if (udc->usb_state == USB_STATE_SUSPENDED)
@@ -1881,7 +1889,7 @@ static irqreturn_t fsl_udc_irq(int irq, void *_udc)
/* USB Interrupt */
if (irq_src & USB_STS_INT) {
- VDBG("Packet int");
+ dev_vdbg(&udc->gadget.dev, "Packet int\n");
/* Setup package, we only support ep0 as control ep */
if (fsl_readl(&dr_regs->endptsetupstat) & EP_SETUP_STATUS_EP0) {
tripwire_handler(udc, 0,
@@ -1910,7 +1918,7 @@ static irqreturn_t fsl_udc_irq(int irq, void *_udc)
/* Reset Received */
if (irq_src & USB_STS_RESET) {
- VDBG("reset int");
+ dev_vdbg(&udc->gadget.dev, "reset int\n");
reset_irq(udc);
status = IRQ_HANDLED;
}
@@ -1922,7 +1930,7 @@ static irqreturn_t fsl_udc_irq(int irq, void *_udc)
}
if (irq_src & (USB_STS_ERR | USB_STS_SYS_ERR)) {
- VDBG("Error IRQ %x", irq_src);
+ dev_vdbg(&udc->gadget.dev, "Error IRQ %x\n", irq_src);
}
spin_unlock_irqrestore(&udc->lock, flags);
@@ -1958,7 +1966,7 @@ static int fsl_udc_start(struct usb_gadget *g,
udc_controller->transceiver->otg,
&udc_controller->gadget);
if (retval < 0) {
- ERR("can't bind to transceiver\n");
+ dev_err(&udc_controller->gadget.dev, "can't bind to transceiver\n");
udc_controller->driver = NULL;
return retval;
}
@@ -2243,7 +2251,7 @@ static int struct_udc_setup(struct fsl_udc *udc,
udc->eps = kcalloc(udc->max_ep, sizeof(struct fsl_ep), GFP_KERNEL);
if (!udc->eps) {
- ERR("kmalloc udc endpoint status failed\n");
+ dev_err(&udc->gadget.dev, "kmalloc udc endpoint status failed\n");
goto eps_alloc_failed;
}
@@ -2258,7 +2266,7 @@ static int struct_udc_setup(struct fsl_udc *udc,
udc->ep_qh = dma_alloc_coherent(&pdev->dev, size,
&udc->ep_qh_dma, GFP_KERNEL);
if (!udc->ep_qh) {
- ERR("malloc QHs for udc failed\n");
+ dev_err(&udc->gadget.dev, "malloc QHs for udc failed\n");
goto ep_queue_alloc_failed;
}
@@ -2269,14 +2277,14 @@ static int struct_udc_setup(struct fsl_udc *udc,
udc->status_req = container_of(fsl_alloc_request(NULL, GFP_KERNEL),
struct fsl_req, req);
if (!udc->status_req) {
- ERR("kzalloc for udc status request failed\n");
+ dev_err(&udc->gadget.dev, "kzalloc for udc status request failed\n");
goto udc_status_alloc_failed;
}
/* allocate a small amount of memory to get valid address */
udc->status_req->req.buf = kmalloc(8, GFP_KERNEL);
if (!udc->status_req->req.buf) {
- ERR("kzalloc for udc request buffer failed\n");
+ dev_err(&udc->gadget.dev, "kzalloc for udc request buffer failed\n");
goto udc_req_buf_alloc_failed;
}
@@ -2373,7 +2381,7 @@ static int fsl_udc_probe(struct platform_device *pdev)
if (pdata->operating_mode == FSL_USB2_DR_OTG) {
udc_controller->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
if (IS_ERR_OR_NULL(udc_controller->transceiver)) {
- ERR("Can't find OTG driver!\n");
+ dev_err(&udc_controller->gadget.dev, "Can't find OTG driver!\n");
ret = -ENODEV;
goto err_kfree;
}
@@ -2389,7 +2397,7 @@ static int fsl_udc_probe(struct platform_device *pdev)
if (pdata->operating_mode == FSL_USB2_DR_DEVICE) {
if (!request_mem_region(res->start, resource_size(res),
driver_name)) {
- ERR("request mem region for %s failed\n", pdev->name);
+ dev_err(&udc_controller->gadget.dev, "request mem region for %s failed\n", pdev->name);
ret = -EBUSY;
goto err_kfree;
}
@@ -2420,7 +2428,7 @@ static int fsl_udc_probe(struct platform_device *pdev)
/* Read Device Controller Capability Parameters register */
dccparams = fsl_readl(&dr_regs->dccparams);
if (!(dccparams & DCCPARAMS_DC)) {
- ERR("This SOC doesn't support device role\n");
+ dev_err(&udc_controller->gadget.dev, "This SOC doesn't support device role\n");
ret = -ENODEV;
goto err_exit;
}
@@ -2438,14 +2446,14 @@ static int fsl_udc_probe(struct platform_device *pdev)
ret = request_irq(udc_controller->irq, fsl_udc_irq, IRQF_SHARED,
driver_name, udc_controller);
if (ret != 0) {
- ERR("cannot request irq %d err %d\n",
+ dev_err(&udc_controller->gadget.dev, "cannot request irq %d err %d\n",
udc_controller->irq, ret);
goto err_exit;
}
/* Initialize the udc structure including QH member and other member */
if (struct_udc_setup(udc_controller, pdev)) {
- ERR("Can't initialize udc data structure\n");
+ dev_err(&udc_controller->gadget.dev, "Can't initialize udc data structure\n");
ret = -ENOMEM;
goto err_free_irq;
}
@@ -2486,7 +2494,7 @@ static int fsl_udc_probe(struct platform_device *pdev)
/* setup the udc->eps[] for non-control endpoints and link
* to gadget.ep_list */
for (i = 1; i < (int)(udc_controller->max_ep / 2); i++) {
- char name[14];
+ char name[16];
sprintf(name, "ep%dout", i);
struct_ep_setup(udc_controller, i * 2, name, 1);
@@ -2666,6 +2674,15 @@ static const struct platform_device_id fsl_udc_devtype[] = {
}
};
MODULE_DEVICE_TABLE(platform, fsl_udc_devtype);
+
+static const struct of_device_id fsl_udc_dt_ids[] = {
+ { .compatible = "fsl-usb2-dr" },
+ { .compatible = "fsl-usb2-mph" },
+ { .compatible = "fsl,mpc5121-usb2-dr" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_udc_dt_ids);
+
static struct platform_driver udc_driver = {
.probe = fsl_udc_probe,
.remove_new = fsl_udc_remove,
@@ -2675,6 +2692,7 @@ static struct platform_driver udc_driver = {
.resume = fsl_udc_resume,
.driver = {
.name = driver_name,
+ .of_match_table = fsl_udc_dt_ids,
/* udc suspend/resume called from OTG driver */
.suspend = fsl_udc_otg_suspend,
.resume = fsl_udc_otg_resume,
diff --git a/drivers/usb/gadget/udc/fsl_usb2_udc.h b/drivers/usb/gadget/udc/fsl_usb2_udc.h
index 2efc5a930b48e..cc1756f3e89d1 100644
--- a/drivers/usb/gadget/udc/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/udc/fsl_usb2_udc.h
@@ -508,53 +508,6 @@ struct fsl_udc {
/*-------------------------------------------------------------------------*/
-#ifdef DEBUG
-#define DBG(fmt, args...) printk(KERN_DEBUG "[%s] " fmt "\n", \
- __func__, ## args)
-#else
-#define DBG(fmt, args...) do{}while(0)
-#endif
-
-#if 0
-static void dump_msg(const char *label, const u8 * buf, unsigned int length)
-{
- unsigned int start, num, i;
- char line[52], *p;
-
- if (length >= 512)
- return;
- DBG("%s, length %u:\n", label, length);
- start = 0;
- while (length > 0) {
- num = min(length, 16u);
- p = line;
- for (i = 0; i < num; ++i) {
- if (i == 8)
- *p++ = ' ';
- sprintf(p, " %02x", buf[i]);
- p += 3;
- }
- *p = 0;
- printk(KERN_DEBUG "%6x: %s\n", start, line);
- buf += num;
- start += num;
- length -= num;
- }
-}
-#endif
-
-#ifdef VERBOSE
-#define VDBG DBG
-#else
-#define VDBG(stuff...) do{}while(0)
-#endif
-
-#define ERR(stuff...) pr_err("udc: " stuff)
-#define WARNING(stuff...) pr_warn("udc: " stuff)
-#define INFO(stuff...) pr_info("udc: " stuff)
-
-/*-------------------------------------------------------------------------*/
-
/* ### Add board specific defines here
*/
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 12e76bb62c209..19bbc38f3d35d 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -2650,7 +2650,7 @@ net2272_plat_probe(struct platform_device *pdev)
goto err_req;
}
- ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW);
+ ret = net2272_probe_fin(dev, irqflags);
if (ret)
goto err_io;
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index 61424cfd2e1cb..1a6317e4b2a32 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -24,7 +24,6 @@
#include <linux/byteorder/generic.h>
#include <linux/platform_data/pxa2xx_udc.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/usb.h>
#include <linux/usb/ch9.h>
diff --git a/drivers/usb/gadget/udc/snps_udc_plat.c b/drivers/usb/gadget/udc/snps_udc_plat.c
index 547af2ed9e5e0..ba5a066905077 100644
--- a/drivers/usb/gadget/udc/snps_udc_plat.c
+++ b/drivers/usb/gadget/udc/snps_udc_plat.c
@@ -8,7 +8,6 @@
#include <linux/extcon.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/module.h>
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
index cb85168fd00c2..7aa46d426f31b 100644
--- a/drivers/usb/gadget/udc/tegra-xudc.c
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
@@ -3491,8 +3491,8 @@ static void tegra_xudc_device_params_init(struct tegra_xudc *xudc)
static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
{
- int err = 0, usb3;
- unsigned int i;
+ int err = 0, usb3_companion_port;
+ unsigned int i, j;
xudc->utmi_phy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
sizeof(*xudc->utmi_phy), GFP_KERNEL);
@@ -3520,7 +3520,7 @@ static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
if (IS_ERR(xudc->utmi_phy[i])) {
err = PTR_ERR(xudc->utmi_phy[i]);
dev_err_probe(xudc->dev, err,
- "failed to get usb2-%d PHY\n", i);
+ "failed to get PHY for phy-name usb2-%d\n", i);
goto clean_up;
} else if (xudc->utmi_phy[i]) {
/* Get usb-phy, if utmi phy is available */
@@ -3539,19 +3539,30 @@ static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
}
/* Get USB3 phy */
- usb3 = tegra_xusb_padctl_get_usb3_companion(xudc->padctl, i);
- if (usb3 < 0)
+ usb3_companion_port = tegra_xusb_padctl_get_usb3_companion(xudc->padctl, i);
+ if (usb3_companion_port < 0)
continue;
- snprintf(phy_name, sizeof(phy_name), "usb3-%d", usb3);
- xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
- if (IS_ERR(xudc->usb3_phy[i])) {
- err = PTR_ERR(xudc->usb3_phy[i]);
- dev_err_probe(xudc->dev, err,
- "failed to get usb3-%d PHY\n", usb3);
- goto clean_up;
- } else if (xudc->usb3_phy[i])
- dev_dbg(xudc->dev, "usb3-%d PHY registered", usb3);
+ for (j = 0; j < xudc->soc->num_phys; j++) {
+ snprintf(phy_name, sizeof(phy_name), "usb3-%d", j);
+ xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
+ if (IS_ERR(xudc->usb3_phy[i])) {
+ err = PTR_ERR(xudc->usb3_phy[i]);
+ dev_err_probe(xudc->dev, err,
+ "failed to get PHY for phy-name usb3-%d\n", j);
+ goto clean_up;
+ } else if (xudc->usb3_phy[i]) {
+ int usb2_port =
+ tegra_xusb_padctl_get_port_number(xudc->utmi_phy[i]);
+ int usb3_port =
+ tegra_xusb_padctl_get_port_number(xudc->usb3_phy[i]);
+ if (usb3_port == usb3_companion_port) {
+ dev_dbg(xudc->dev, "USB2 port %d is paired with USB3 port %d for device mode port %d\n",
+ usb2_port, usb3_port, i);
+ break;
+ }
+ }
+ }
}
return err;
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 6c47ab0a491d5..ad145a54ca74e 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -66,6 +66,15 @@ struct orion_ehci_hcd {
static struct hc_driver __read_mostly ehci_orion_hc_driver;
/*
+ * Legacy DMA mask is 32 bit.
+ * AC5 has the DDR starting at 8GB, hence it requires
+ * a larger (34-bit) DMA mask, in order for DMA allocations
+ * to succeed:
+ */
+static const u64 dma_mask_orion = DMA_BIT_MASK(32);
+static const u64 dma_mask_ac5 = DMA_BIT_MASK(34);
+
+/*
* Implement Orion USB controller specification guidelines
*/
static void orion_usb_phy_v1_setup(struct usb_hcd *hcd)
@@ -211,6 +220,7 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
int irq, err;
enum orion_ehci_phy_ver phy_version;
struct orion_ehci_hcd *priv;
+ u64 *dma_mask_ptr;
if (usb_disabled())
return -ENODEV;
@@ -228,7 +238,8 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
* set. Since shared usb code relies on it, set it here for
* now. Once we have dma capability bindings this can go away.
*/
- err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ dma_mask_ptr = (u64 *)of_device_get_match_data(&pdev->dev);
+ err = dma_coerce_mask_and_coherent(&pdev->dev, *dma_mask_ptr);
if (err)
goto err;
@@ -332,8 +343,9 @@ static void ehci_orion_drv_remove(struct platform_device *pdev)
}
static const struct of_device_id ehci_orion_dt_ids[] = {
- { .compatible = "marvell,orion-ehci", },
- { .compatible = "marvell,armada-3700-ehci", },
+ { .compatible = "marvell,orion-ehci", .data = &dma_mask_orion},
+ { .compatible = "marvell,armada-3700-ehci", .data = &dma_mask_orion},
+ { .compatible = "marvell,ac5-ehci", .data = &dma_mask_ac5},
{},
};
MODULE_DEVICE_TABLE(of, ehci_orion_dt_ids);
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index 357d9aee38a37..3348c25ddb18d 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -27,7 +27,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/platform_data/usb-ohci-pxa27x.h>
#include <linux/platform_data/pxa2xx_udc.h>
#include <linux/platform_device.h>
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 0956495bba575..2b871540bb500 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -585,6 +585,7 @@ done(struct sl811 *sl811, struct sl811h_ep *ep, u8 bank)
finish_request(sl811, ep, urb, urbstat);
}
+#ifdef QUIRK2
static inline u8 checkdone(struct sl811 *sl811)
{
u8 ctl;
@@ -616,6 +617,7 @@ static inline u8 checkdone(struct sl811 *sl811)
#endif
return irqstat;
}
+#endif
static irqreturn_t sl811h_irq(struct usb_hcd *hcd)
{
diff --git a/drivers/usb/host/xhci-caps.h b/drivers/usb/host/xhci-caps.h
new file mode 100644
index 0000000000000..9e94cebf4a56d
--- /dev/null
+++ b/drivers/usb/host/xhci-caps.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* hc_capbase bitmasks */
+/* bits 7:0 - how long is the Capabilities register */
+#define HC_LENGTH(p) XHCI_HC_LENGTH(p)
+/* bits 31:16 */
+#define HC_VERSION(p) (((p) >> 16) & 0xffff)
+
+/* HCSPARAMS1 - hcs_params1 - bitmasks */
+/* bits 0:7, Max Device Slots */
+#define HCS_MAX_SLOTS(p) (((p) >> 0) & 0xff)
+#define HCS_SLOTS_MASK 0xff
+/* bits 8:18, Max Interrupters */
+#define HCS_MAX_INTRS(p) (((p) >> 8) & 0x7ff)
+/* bits 24:31, Max Ports - max value is 0x7F = 127 ports */
+#define HCS_MAX_PORTS(p) (((p) >> 24) & 0x7f)
+
+/* HCSPARAMS2 - hcs_params2 - bitmasks */
+/* bits 0:3, frames or uframes that SW needs to queue transactions
+ * ahead of the HW to meet periodic deadlines */
+#define HCS_IST(p) (((p) >> 0) & 0xf)
+/* bits 4:7, max number of Event Ring segments */
+#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf)
+/* bits 21:25 Hi 5 bits of Scratchpad buffers SW must allocate for the HW */
+/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
+/* bits 27:31 Lo 5 bits of Scratchpad buffers SW must allocate for the HW */
+#define HCS_MAX_SCRATCHPAD(p) ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f))
+
+/* HCSPARAMS3 - hcs_params3 - bitmasks */
+/* bits 0:7, Max U1 to U0 latency for the roothub ports */
+#define HCS_U1_LATENCY(p) (((p) >> 0) & 0xff)
+/* bits 16:31, Max U2 to U0 latency for the roothub ports */
+#define HCS_U2_LATENCY(p) (((p) >> 16) & 0xffff)
+
+/* HCCPARAMS - hcc_params - bitmasks */
+/* true: HC can use 64-bit address pointers */
+#define HCC_64BIT_ADDR(p) ((p) & (1 << 0))
+/* true: HC can do bandwidth negotiation */
+#define HCC_BANDWIDTH_NEG(p) ((p) & (1 << 1))
+/* true: HC uses 64-byte Device Context structures
+ * FIXME 64-byte context structures aren't supported yet.
+ */
+#define HCC_64BYTE_CONTEXT(p) ((p) & (1 << 2))
+/* true: HC has port power switches */
+#define HCC_PPC(p) ((p) & (1 << 3))
+/* true: HC has port indicators */
+#define HCS_INDICATOR(p) ((p) & (1 << 4))
+/* true: HC has Light HC Reset Capability */
+#define HCC_LIGHT_RESET(p) ((p) & (1 << 5))
+/* true: HC supports latency tolerance messaging */
+#define HCC_LTC(p) ((p) & (1 << 6))
+/* true: no secondary Stream ID Support */
+#define HCC_NSS(p) ((p) & (1 << 7))
+/* true: HC supports Stopped - Short Packet */
+#define HCC_SPC(p) ((p) & (1 << 9))
+/* true: HC has Contiguous Frame ID Capability */
+#define HCC_CFC(p) ((p) & (1 << 11))
+/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */
+#define HCC_MAX_PSA(p) (1 << ((((p) >> 12) & 0xf) + 1))
+/* Extended Capabilities pointer from PCI base - section 5.3.6 */
+#define HCC_EXT_CAPS(p) XHCI_HCC_EXT_CAPS(p)
+
+#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
+
+/* db_off bitmask - bits 0:1 reserved */
+#define DBOFF_MASK (~0x3)
+
+/* run_regs_off bitmask - bits 0:4 reserved */
+#define RTSOFF_MASK (~0x1f)
+
+/* HCCPARAMS2 - hcc_params2 - bitmasks */
+/* true: HC supports U3 entry Capability */
+#define HCC2_U3C(p) ((p) & (1 << 0))
+/* true: HC supports Configure endpoint command Max exit latency too large */
+#define HCC2_CMC(p) ((p) & (1 << 1))
+/* true: HC supports Force Save context Capability */
+#define HCC2_FSC(p) ((p) & (1 << 2))
+/* true: HC supports Compliance Transition Capability */
+#define HCC2_CTC(p) ((p) & (1 << 3))
+/* true: HC support Large ESIT payload Capability > 48k */
+#define HCC2_LEC(p) ((p) & (1 << 4))
+/* true: HC support Configuration Information Capability */
+#define HCC2_CIC(p) ((p) & (1 << 5))
+/* true: HC support Extended TBC Capability, Isoc burst count > 65535 */
+#define HCC2_ETC(p) ((p) & (1 << 6))
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index d82935d31126d..8a9869ef0db66 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -634,7 +634,8 @@ static int xhci_dbc_start(struct xhci_dbc *dbc)
return ret;
}
- return mod_delayed_work(system_wq, &dbc->event_work, 1);
+ return mod_delayed_work(system_wq, &dbc->event_work,
+ msecs_to_jiffies(dbc->poll_interval));
}
static void xhci_dbc_stop(struct xhci_dbc *dbc)
@@ -899,8 +900,10 @@ static void xhci_dbc_handle_events(struct work_struct *work)
enum evtreturn evtr;
struct xhci_dbc *dbc;
unsigned long flags;
+ unsigned int poll_interval;
dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
+ poll_interval = dbc->poll_interval;
spin_lock_irqsave(&dbc->lock, flags);
evtr = xhci_dbc_do_handle_events(dbc);
@@ -916,13 +919,18 @@ static void xhci_dbc_handle_events(struct work_struct *work)
dbc->driver->disconnect(dbc);
break;
case EVT_DONE:
+ /* set fast poll rate if there are pending data transfers */
+ if (!list_empty(&dbc->eps[BULK_OUT].list_pending) ||
+ !list_empty(&dbc->eps[BULK_IN].list_pending))
+ poll_interval = 1;
break;
default:
dev_info(dbc->dev, "stop handling dbc events\n");
return;
}
- mod_delayed_work(system_wq, &dbc->event_work, 1);
+ mod_delayed_work(system_wq, &dbc->event_work,
+ msecs_to_jiffies(poll_interval));
}
static const char * const dbc_state_strings[DS_MAX] = {
@@ -1175,6 +1183,7 @@ xhci_alloc_dbc(struct device *dev, void __iomem *base, const struct dbc_driver *
dbc->idVendor = DBC_VENDOR_ID;
dbc->bcdDevice = DBC_DEVICE_REV;
dbc->bInterfaceProtocol = DBC_PROTOCOL;
+ dbc->poll_interval = DBC_POLL_INTERVAL_DEFAULT;
if (readl(&dbc->regs->control) & DBC_CTRL_DBC_ENABLE)
goto err;
diff --git a/drivers/usb/host/xhci-dbgcap.h b/drivers/usb/host/xhci-dbgcap.h
index e39e3ae1677ae..92661b555c2a2 100644
--- a/drivers/usb/host/xhci-dbgcap.h
+++ b/drivers/usb/host/xhci-dbgcap.h
@@ -94,6 +94,7 @@ struct dbc_ep {
#define DBC_QUEUE_SIZE 16
#define DBC_WRITE_BUF_SIZE 8192
+#define DBC_POLL_INTERVAL_DEFAULT 64 /* milliseconds */
/*
* Private structure for DbC hardware state:
@@ -140,6 +141,7 @@ struct xhci_dbc {
enum dbc_state state;
struct delayed_work event_work;
+ unsigned int poll_interval; /* ms */
unsigned resume_required:1;
struct dbc_ep eps[2];
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 0980ade2a234a..61f083de6e196 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -448,38 +448,6 @@ u32 xhci_port_state_to_neutral(u32 state)
}
EXPORT_SYMBOL_GPL(xhci_port_state_to_neutral);
-/**
- * xhci_find_slot_id_by_port() - Find slot id of a usb device on a roothub port
- * @hcd: pointer to hcd of the roothub
- * @xhci: pointer to xhci structure
- * @port: one-based port number of the port in this roothub.
- *
- * Return: Slot id of the usb device connected to the root port, 0 if not found
- */
-
-int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
- u16 port)
-{
- int slot_id;
- int i;
- enum usb_device_speed speed;
-
- slot_id = 0;
- for (i = 0; i < MAX_HC_SLOTS; i++) {
- if (!xhci->devs[i] || !xhci->devs[i]->udev)
- continue;
- speed = xhci->devs[i]->udev->speed;
- if (((speed >= USB_SPEED_SUPER) == (hcd->speed >= HCD_USB3))
- && xhci->devs[i]->fake_port == port) {
- slot_id = i;
- break;
- }
- }
-
- return slot_id;
-}
-EXPORT_SYMBOL_GPL(xhci_find_slot_id_by_port);
-
/*
* Stop device
* It issues stop endpoint command for EP 0 to 30. And wait the last command
@@ -930,7 +898,6 @@ static int xhci_handle_usb2_port_link_resume(struct xhci_port *port,
struct xhci_bus_state *bus_state;
struct xhci_hcd *xhci;
struct usb_hcd *hcd;
- int slot_id;
u32 wIndex;
hcd = port->rhub->hcd;
@@ -986,13 +953,11 @@ static int xhci_handle_usb2_port_link_resume(struct xhci_port *port,
spin_lock_irqsave(&xhci->lock, *flags);
if (time_left) {
- slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- wIndex + 1);
- if (!slot_id) {
+ if (!port->slot_id) {
xhci_dbg(xhci, "slot_id is zero\n");
return -ENODEV;
}
- xhci_ring_device(xhci, slot_id);
+ xhci_ring_device(xhci, port->slot_id);
} else {
int port_status = readl(port->addr);
@@ -1202,7 +1167,6 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
unsigned long flags;
u32 temp, status;
int retval = 0;
- int slot_id;
struct xhci_bus_state *bus_state;
u16 link_state = 0;
u16 wake_mask = 0;
@@ -1332,15 +1296,13 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
goto error;
}
- slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- portnum1);
- if (!slot_id) {
+ if (!port->slot_id) {
xhci_warn(xhci, "slot_id is zero\n");
goto error;
}
/* unlock to execute stop endpoint commands */
spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_stop_device(xhci, slot_id, 1);
+ xhci_stop_device(xhci, port->slot_id, 1);
spin_lock_irqsave(&xhci->lock, flags);
xhci_set_link_state(xhci, port, XDEV_U3);
@@ -1463,14 +1425,12 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
if (link_state == USB_SS_PORT_LS_U3) {
int retries = 16;
- slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- portnum1);
- if (slot_id) {
+ if (port->slot_id) {
/* unlock to execute stop endpoint
* commands */
spin_unlock_irqrestore(&xhci->lock,
flags);
- xhci_stop_device(xhci, slot_id, 1);
+ xhci_stop_device(xhci, port->slot_id, 1);
spin_lock_irqsave(&xhci->lock, flags);
}
xhci_set_link_state(xhci, port, USB_SS_PORT_LS_U3);
@@ -1584,13 +1544,11 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
}
bus_state->port_c_suspend |= 1 << wIndex;
- slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- portnum1);
- if (!slot_id) {
+ if (!port->slot_id) {
xhci_dbg(xhci, "slot_id is zero\n");
goto error;
}
- xhci_ring_device(xhci, slot_id);
+ xhci_ring_device(xhci, port->slot_id);
break;
case USB_PORT_FEAT_C_SUSPEND:
bus_state->port_c_suspend &= ~(1 << wIndex);
@@ -1821,10 +1779,7 @@ retry:
if (!portsc_buf[port_index])
continue;
if (test_bit(port_index, &bus_state->bus_suspended)) {
- int slot_id;
-
- slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- port_index + 1);
+ int slot_id = ports[port_index]->slot_id;
if (slot_id) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_stop_device(xhci, slot_id, 1);
@@ -1877,7 +1832,6 @@ int xhci_bus_resume(struct usb_hcd *hcd)
struct xhci_bus_state *bus_state;
unsigned long flags;
int max_ports, port_index;
- int slot_id;
int sret;
u32 next_state;
u32 temp, portsc;
@@ -1970,9 +1924,8 @@ int xhci_bus_resume(struct usb_hcd *hcd)
continue;
}
xhci_test_and_clear_bit(xhci, ports[port_index], PORT_PLC);
- slot_id = xhci_find_slot_id_by_port(hcd, xhci, port_index + 1);
- if (slot_id)
- xhci_ring_device(xhci, slot_id);
+ if (ports[port_index]->slot_id)
+ xhci_ring_device(xhci, ports[port_index]->slot_id);
}
(void) readl(&xhci->op_regs->command);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index a7716202a8dd5..69dd866698833 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -84,7 +84,7 @@ static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
struct xhci_segment *seg;
seg = first->next;
- while (seg != first) {
+ while (seg && seg != first) {
struct xhci_segment *next = seg->next;
xhci_segment_free(xhci, seg);
seg = next;
@@ -351,17 +351,10 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
next = xhci_segment_alloc(xhci, cycle_state, max_packet, num,
flags);
- if (!next) {
- prev = *first;
- while (prev) {
- next = prev->next;
- xhci_segment_free(xhci, prev);
- prev = next;
- }
- return -ENOMEM;
- }
- xhci_link_segments(prev, next, type, chain_links);
+ if (!next)
+ goto free_segments;
+ xhci_link_segments(prev, next, type, chain_links);
prev = next;
num++;
}
@@ -369,6 +362,10 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
*last = prev;
return 0;
+
+free_segments:
+ xhci_free_segments_for_ring(xhci, *first);
+ return -ENOMEM;
}
/*
@@ -444,19 +441,11 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
if (ret)
return -ENOMEM;
- if (ring->type == TYPE_STREAM)
+ if (ring->type == TYPE_STREAM) {
ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
ring, first, last, flags);
- if (ret) {
- struct xhci_segment *next;
- do {
- next = first->next;
- xhci_segment_free(xhci, first);
- if (first == last)
- break;
- first = next;
- } while (true);
- return ret;
+ if (ret)
+ goto free_segments;
}
xhci_link_rings(xhci, ring, first, last, num_new_segs);
@@ -466,6 +455,10 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
ring->num_segs);
return 0;
+
+free_segments:
+ xhci_free_segments_for_ring(xhci, first);
+ return ret;
}
struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
@@ -789,15 +782,14 @@ static void xhci_free_tt_info(struct xhci_hcd *xhci,
bool slot_found = false;
/* If the device never made it past the Set Address stage,
- * it may not have the real_port set correctly.
+ * it may not have the root hub port pointer set correctly.
*/
- if (virt_dev->real_port == 0 ||
- virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
- xhci_dbg(xhci, "Bad real port.\n");
+ if (!virt_dev->rhub_port) {
+ xhci_dbg(xhci, "Bad rhub port.\n");
return;
}
- tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
+ tt_list_head = &(xhci->rh_bw[virt_dev->rhub_port->hw_portnum].tts);
list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
/* Multi-TT hubs will have more than one entry */
if (tt_info->slot_id == slot_id) {
@@ -834,7 +826,7 @@ int xhci_alloc_tt_info(struct xhci_hcd *xhci,
goto free_tts;
INIT_LIST_HEAD(&tt_info->tt_list);
list_add(&tt_info->tt_list,
- &xhci->rh_bw[virt_dev->real_port - 1].tts);
+ &xhci->rh_bw[virt_dev->rhub_port->hw_portnum].tts);
tt_info->slot_id = virt_dev->udev->slot_id;
if (tt->multi)
tt_info->ttport = i+1;
@@ -908,6 +900,8 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
if (dev->udev && dev->udev->slot_id)
dev->udev->slot_id = 0;
+ if (dev->rhub_port && dev->rhub_port->slot_id == slot_id)
+ dev->rhub_port->slot_id = 0;
kfree(xhci->devs[slot_id]);
xhci->devs[slot_id] = NULL;
}
@@ -929,13 +923,12 @@ static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_i
if (!vdev)
return;
- if (vdev->real_port == 0 ||
- vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
- xhci_dbg(xhci, "Bad vdev->real_port.\n");
+ if (!vdev->rhub_port) {
+ xhci_dbg(xhci, "Bad rhub port.\n");
goto out;
}
- tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
+ tt_list_head = &(xhci->rh_bw[vdev->rhub_port->hw_portnum].tts);
list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
/* is this a hub device that added a tt_info to the tts list */
if (tt_info->slot_id == slot_id) {
@@ -1051,16 +1044,16 @@ void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
* The xHCI roothub may have ports of differing speeds in any order in the port
* status registers.
*
- * The xHCI hardware wants to know the roothub port number that the USB device
+ * The xHCI hardware wants to know the roothub port that the USB device
* is attached to (or the roothub port its ancestor hub is attached to). All we
* know is the index of that port under either the USB 2.0 or the USB 3.0
* roothub, but that doesn't give us the real index into the HW port status
- * registers. Call xhci_find_raw_port_number() to get real index.
+ * registers.
*/
-static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
- struct usb_device *udev)
+static struct xhci_port *xhci_find_rhub_port(struct xhci_hcd *xhci, struct usb_device *udev)
{
struct usb_device *top_dev;
+ struct xhci_hub *rhub;
struct usb_hcd *hcd;
if (udev->speed >= USB_SPEED_SUPER)
@@ -1072,7 +1065,8 @@ static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
top_dev = top_dev->parent)
/* Found device below root hub */;
- return xhci_find_raw_port_number(hcd, top_dev->portnum);
+ rhub = xhci_get_rhub(hcd);
+ return rhub->ports[top_dev->portnum - 1];
}
/* Setup an xHCI virtual device for a Set Address command */
@@ -1081,9 +1075,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
struct xhci_virt_device *dev;
struct xhci_ep_ctx *ep0_ctx;
struct xhci_slot_ctx *slot_ctx;
- u32 port_num;
u32 max_packets;
- struct usb_device *top_dev;
dev = xhci->devs[udev->slot_id];
/* Slot ID 0 is reserved */
@@ -1124,18 +1116,15 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
return -EINVAL;
}
/* Find the root hub port this device is under */
- port_num = xhci_find_real_port_number(xhci, udev);
- if (!port_num)
+ dev->rhub_port = xhci_find_rhub_port(xhci, udev);
+ if (!dev->rhub_port)
return -EINVAL;
- slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
- /* Set the port number in the virtual_device to the faked port number */
- for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
- top_dev = top_dev->parent)
- /* Found device below root hub */;
- dev->fake_port = top_dev->portnum;
- dev->real_port = port_num;
- xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
- xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
+ /* Slot ID is set to the device directly below the root hub */
+ if (!udev->parent->parent)
+ dev->rhub_port->slot_id = udev->slot_id;
+ slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(dev->rhub_port->hw_portnum + 1));
+ xhci_dbg(xhci, "Slot ID %d: HW portnum %d, hcd portnum %d\n",
+ udev->slot_id, dev->rhub_port->hw_portnum, dev->rhub_port->hcd_portnum);
/* Find the right bandwidth table that this device will be a part of.
* If this is a full speed device attached directly to a root port (or a
@@ -1144,12 +1133,12 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
* will never be created for the HS root hub.
*/
if (!udev->tt || !udev->tt->hub->parent) {
- dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
+ dev->bw_table = &xhci->rh_bw[dev->rhub_port->hw_portnum].bw_table;
} else {
struct xhci_root_port_bw_info *rh_bw;
struct xhci_tt_bw_info *tt_bw;
- rh_bw = &xhci->rh_bw[port_num - 1];
+ rh_bw = &xhci->rh_bw[dev->rhub_port->hw_portnum];
/* Find the right TT. */
list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
if (tt_bw->slot_id != udev->tt->hub->slot_id)
@@ -2533,7 +2522,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
if (xhci_add_interrupter(xhci, ir, 0))
goto fail;
- xhci->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX;
+ ir->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX;
/*
* XXX: Might need to set the Interrupter Moderation Register to
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index 61f3f8bbdcead..27eb384a39634 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -122,10 +122,6 @@ static u32 get_bw_boundary(enum usb_device_speed speed)
* each HS root port is treated as a single bandwidth domain,
* but each SS root port is treated as two bandwidth domains, one for IN eps,
* one for OUT eps.
-* @real_port value is defined as follow according to xHCI spec:
-* 1 for SSport0, ..., N+1 for SSportN, N+2 for HSport0, N+3 for HSport1, etc
-* so the bandwidth domain array is organized as follow for simplification:
-* SSport0-OUT, SSport0-IN, ..., SSportX-OUT, SSportX-IN, HSport0, ..., HSportY
*/
static struct mu3h_sch_bw_info *
get_bw_info(struct xhci_hcd_mtk *mtk, struct usb_device *udev,
@@ -136,19 +132,19 @@ get_bw_info(struct xhci_hcd_mtk *mtk, struct usb_device *udev,
int bw_index;
virt_dev = xhci->devs[udev->slot_id];
- if (!virt_dev->real_port) {
- WARN_ONCE(1, "%s invalid real_port\n", dev_name(&udev->dev));
+ if (!virt_dev->rhub_port) {
+ WARN_ONCE(1, "%s invalid rhub port\n", dev_name(&udev->dev));
return NULL;
}
if (udev->speed >= USB_SPEED_SUPER) {
if (usb_endpoint_dir_out(&ep->desc))
- bw_index = (virt_dev->real_port - 1) * 2;
+ bw_index = (virt_dev->rhub_port->hw_portnum) * 2;
else
- bw_index = (virt_dev->real_port - 1) * 2 + 1;
+ bw_index = (virt_dev->rhub_port->hw_portnum) * 2 + 1;
} else {
/* add one more for each SS port */
- bw_index = virt_dev->real_port + xhci->usb3_rhub.num_ports - 1;
+ bw_index = virt_dev->rhub_port->hw_portnum + xhci->usb3_rhub.num_ports;
}
return &mtk->sch_array[bw_index];
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index b534ca9752be4..93b6976480188 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -307,8 +307,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_RESET_ON_RESUME;
}
- if (pdev->vendor == PCI_VENDOR_ID_AMD)
+ if (pdev->vendor == PCI_VENDOR_ID_AMD) {
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ if (pdev->device == 0x43f7)
+ xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
+ }
if ((pdev->vendor == PCI_VENDOR_ID_AMD) &&
((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) ||
@@ -820,7 +823,6 @@ static int xhci_pci_poweroff_late(struct usb_hcd *hcd, bool do_wakeup)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_port *port;
struct usb_device *udev;
- unsigned int slot_id;
u32 portsc;
int i;
@@ -843,15 +845,14 @@ static int xhci_pci_poweroff_late(struct usb_hcd *hcd, bool do_wakeup)
if ((portsc & PORT_PLS_MASK) != XDEV_U3)
continue;
- slot_id = xhci_find_slot_id_by_port(port->rhub->hcd, xhci,
- port->hcd_portnum + 1);
- if (!slot_id || !xhci->devs[slot_id]) {
+ if (!port->slot_id || !xhci->devs[port->slot_id]) {
xhci_err(xhci, "No dev for slot_id %d for port %d-%d in U3\n",
- slot_id, port->rhub->hcd->self.busnum, port->hcd_portnum + 1);
+ port->slot_id, port->rhub->hcd->self.busnum,
+ port->hcd_portnum + 1);
continue;
}
- udev = xhci->devs[slot_id]->udev;
+ udev = xhci->devs[port->slot_id]->udev;
/* if wakeup is enabled then don't disable the port */
if (udev->do_remote_wakeup && do_wakeup)
diff --git a/drivers/usb/host/xhci-port.h b/drivers/usb/host/xhci-port.h
new file mode 100644
index 0000000000000..f19efb966d180
--- /dev/null
+++ b/drivers/usb/host/xhci-port.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* PORTSC - Port Status and Control Register - port_status_base bitmasks */
+/* true: device connected */
+#define PORT_CONNECT (1 << 0)
+/* true: port enabled */
+#define PORT_PE (1 << 1)
+/* bit 2 reserved and zeroed */
+/* true: port has an over-current condition */
+#define PORT_OC (1 << 3)
+/* true: port reset signaling asserted */
+#define PORT_RESET (1 << 4)
+/* Port Link State - bits 5:8
+ * A read gives the current link PM state of the port,
+ * a write with Link State Write Strobe set sets the link state.
+ */
+#define PORT_PLS_MASK (0xf << 5)
+#define XDEV_U0 (0x0 << 5)
+#define XDEV_U1 (0x1 << 5)
+#define XDEV_U2 (0x2 << 5)
+#define XDEV_U3 (0x3 << 5)
+#define XDEV_DISABLED (0x4 << 5)
+#define XDEV_RXDETECT (0x5 << 5)
+#define XDEV_INACTIVE (0x6 << 5)
+#define XDEV_POLLING (0x7 << 5)
+#define XDEV_RECOVERY (0x8 << 5)
+#define XDEV_HOT_RESET (0x9 << 5)
+#define XDEV_COMP_MODE (0xa << 5)
+#define XDEV_TEST_MODE (0xb << 5)
+#define XDEV_RESUME (0xf << 5)
+
+/* true: port has power (see HCC_PPC) */
+#define PORT_POWER (1 << 9)
+/* bits 10:13 indicate device speed:
+ * 0 - undefined speed - port hasn't be initialized by a reset yet
+ * 1 - full speed
+ * 2 - low speed
+ * 3 - high speed
+ * 4 - super speed
+ * 5-15 reserved
+ */
+#define DEV_SPEED_MASK (0xf << 10)
+#define XDEV_FS (0x1 << 10)
+#define XDEV_LS (0x2 << 10)
+#define XDEV_HS (0x3 << 10)
+#define XDEV_SS (0x4 << 10)
+#define XDEV_SSP (0x5 << 10)
+#define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0<<10))
+#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS)
+#define DEV_LOWSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_LS)
+#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS)
+#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS)
+#define DEV_SUPERSPEEDPLUS(p) (((p) & DEV_SPEED_MASK) == XDEV_SSP)
+#define DEV_SUPERSPEED_ANY(p) (((p) & DEV_SPEED_MASK) >= XDEV_SS)
+#define DEV_PORT_SPEED(p) (((p) >> 10) & 0x0f)
+
+/* Bits 20:23 in the Slot Context are the speed for the device */
+#define SLOT_SPEED_FS (XDEV_FS << 10)
+#define SLOT_SPEED_LS (XDEV_LS << 10)
+#define SLOT_SPEED_HS (XDEV_HS << 10)
+#define SLOT_SPEED_SS (XDEV_SS << 10)
+#define SLOT_SPEED_SSP (XDEV_SSP << 10)
+/* Port Indicator Control */
+#define PORT_LED_OFF (0 << 14)
+#define PORT_LED_AMBER (1 << 14)
+#define PORT_LED_GREEN (2 << 14)
+#define PORT_LED_MASK (3 << 14)
+/* Port Link State Write Strobe - set this when changing link state */
+#define PORT_LINK_STROBE (1 << 16)
+/* true: connect status change */
+#define PORT_CSC (1 << 17)
+/* true: port enable change */
+#define PORT_PEC (1 << 18)
+/* true: warm reset for a USB 3.0 device is done. A "hot" reset puts the port
+ * into an enabled state, and the device into the default state. A "warm" reset
+ * also resets the link, forcing the device through the link training sequence.
+ * SW can also look at the Port Reset register to see when warm reset is done.
+ */
+#define PORT_WRC (1 << 19)
+/* true: over-current change */
+#define PORT_OCC (1 << 20)
+/* true: reset change - 1 to 0 transition of PORT_RESET */
+#define PORT_RC (1 << 21)
+/* port link status change - set on some port link state transitions:
+ * Transition Reason
+ * ------------------------------------------------------------------------------
+ * - U3 to Resume Wakeup signaling from a device
+ * - Resume to Recovery to U0 USB 3.0 device resume
+ * - Resume to U0 USB 2.0 device resume
+ * - U3 to Recovery to U0 Software resume of USB 3.0 device complete
+ * - U3 to U0 Software resume of USB 2.0 device complete
+ * - U2 to U0 L1 resume of USB 2.1 device complete
+ * - U0 to U0 (???) L1 entry rejection by USB 2.1 device
+ * - U0 to disabled L1 entry error with USB 2.1 device
+ * - Any state to inactive Error on USB 3.0 port
+ */
+#define PORT_PLC (1 << 22)
+/* port configure error change - port failed to configure its link partner */
+#define PORT_CEC (1 << 23)
+#define PORT_CHANGE_MASK (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
+ PORT_RC | PORT_PLC | PORT_CEC)
+
+
+/* Cold Attach Status - xHC can set this bit to report device attached during
+ * Sx state. Warm port reset should be perfomed to clear this bit and move port
+ * to connected state.
+ */
+#define PORT_CAS (1 << 24)
+/* wake on connect (enable) */
+#define PORT_WKCONN_E (1 << 25)
+/* wake on disconnect (enable) */
+#define PORT_WKDISC_E (1 << 26)
+/* wake on over-current (enable) */
+#define PORT_WKOC_E (1 << 27)
+/* bits 28:29 reserved */
+/* true: device is non-removable - for USB 3.0 roothub emulation */
+#define PORT_DEV_REMOVE (1 << 30)
+/* Initiate a warm port reset - complete when PORT_WRC is '1' */
+#define PORT_WR (1 << 31)
+
+/* We mark duplicate entries with -1 */
+#define DUPLICATE_ENTRY ((u8)(-1))
+
+/* Port Power Management Status and Control - port_power_base bitmasks */
+/* Inactivity timer value for transitions into U1, in microseconds.
+ * Timeout can be up to 127us. 0xFF means an infinite timeout.
+ */
+#define PORT_U1_TIMEOUT(p) ((p) & 0xff)
+#define PORT_U1_TIMEOUT_MASK 0xff
+/* Inactivity timer value for transitions into U2 */
+#define PORT_U2_TIMEOUT(p) (((p) & 0xff) << 8)
+#define PORT_U2_TIMEOUT_MASK (0xff << 8)
+/* Bits 24:31 for port testing */
+
+/* USB2 Protocol PORTSPMSC */
+#define PORT_L1S_MASK 7
+#define PORT_L1S_SUCCESS 1
+#define PORT_RWE (1 << 3)
+#define PORT_HIRD(p) (((p) & 0xf) << 4)
+#define PORT_HIRD_MASK (0xf << 4)
+#define PORT_L1DS_MASK (0xff << 8)
+#define PORT_L1DS(p) (((p) & 0xff) << 8)
+#define PORT_HLE (1 << 16)
+#define PORT_TEST_MODE_SHIFT 28
+
+/* USB3 Protocol PORTLI Port Link Information */
+#define PORT_RX_LANES(p) (((p) >> 16) & 0xf)
+#define PORT_TX_LANES(p) (((p) >> 20) & 0xf)
+
+/* USB2 Protocol PORTHLPMC */
+#define PORT_HIRDM(p)((p) & 3)
+#define PORT_L1_TIMEOUT(p)(((p) & 0xff) << 2)
+#define PORT_BESLD(p)(((p) & 0xf) << 10)
+
+/* use 512 microseconds as USB2 LPM L1 default timeout. */
+#define XHCI_L1_TIMEOUT 512
+
+/* Set default HIRD/BESL value to 4 (350/400us) for USB2 L1 LPM resume latency.
+ * Safe to use with mixed HIRD and BESL systems (host and device) and is used
+ * by other operating systems.
+ *
+ * XHCI 1.0 errata 8/14/12 Table 13 notes:
+ * "Software should choose xHC BESL/BESLD field values that do not violate a
+ * device's resume latency requirements,
+ * e.g. not program values > '4' if BLC = '1' and a HIRD device is attached,
+ * or not program values < '4' if BLC = '0' and a BESL device is attached.
+ */
+#define XHCI_DEFAULT_BESL 4
+
+/*
+ * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports
+ * to complete link training. usually link trainig completes much faster
+ * so check status 10 times with 36ms sleep in places we need to wait for
+ * polling to complete.
+ */
+#define XHCI_PORT_POLLING_LFPS_TIME 36
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 4f64b814d4aa2..575f0fd9c9f11 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -113,6 +113,12 @@ static bool last_td_in_urb(struct xhci_td *td)
return urb_priv->num_tds_done == urb_priv->num_tds;
}
+static bool unhandled_event_trb(struct xhci_ring *ring)
+{
+ return ((le32_to_cpu(ring->dequeue->event_cmd.flags) & TRB_CYCLE) ==
+ ring->cycle_state);
+}
+
static void inc_td_cnt(struct urb *urb)
{
struct urb_priv *urb_priv = urb->hcpriv;
@@ -1154,6 +1160,15 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
break;
ep->ep_state &= ~EP_STOP_CMD_PENDING;
return;
+ case EP_STATE_STOPPED:
+ /*
+ * NEC uPD720200 sometimes sets this state and fails with
+ * Context Error while continuing to process TRBs.
+ * Be conservative and trust EP_CTX_STATE on other chips.
+ */
+ if (!(xhci->quirks & XHCI_NEC_HOST))
+ break;
+ fallthrough;
case EP_STATE_RUNNING:
/* Race, HW handled stop ep cmd before ep was running */
xhci_dbg(xhci, "Stop ep completion ctx error, ep is running\n");
@@ -1870,7 +1885,6 @@ static void handle_port_status(struct xhci_hcd *xhci,
u32 port_id;
u32 portsc, cmd_reg;
int max_ports;
- int slot_id;
unsigned int hcd_portnum;
struct xhci_bus_state *bus_state;
bool bogus_port_status = false;
@@ -1922,9 +1936,8 @@ static void handle_port_status(struct xhci_hcd *xhci,
if (hcd->speed >= HCD_USB3 &&
(portsc & PORT_PLS_MASK) == XDEV_INACTIVE) {
- slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
- if (slot_id && xhci->devs[slot_id])
- xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR;
+ if (port->slot_id && xhci->devs[port->slot_id])
+ xhci->devs[port->slot_id]->flags |= VDEV_PORT_ERROR;
}
if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) {
@@ -1982,9 +1995,8 @@ static void handle_port_status(struct xhci_hcd *xhci,
* so the roothub behavior is consistent with external
* USB 3.0 hub behavior.
*/
- slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
- if (slot_id && xhci->devs[slot_id])
- xhci_ring_device(xhci, slot_id);
+ if (port->slot_id && xhci->devs[port->slot_id])
+ xhci_ring_device(xhci, port->slot_id);
if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) {
xhci_test_and_clear_bit(xhci, port, PORT_PLC);
usb_wakeup_notification(hcd->self.root_hub,
@@ -2816,7 +2828,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
td_num--;
/* Is this a TRB in the currently executing TD? */
- ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
+ ep_seg = trb_in_td(xhci, td->start_seg, td->first_trb,
td->last_trb, ep_trb_dma, false);
/*
@@ -2884,9 +2896,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
"part of current TD ep_index %d "
"comp_code %u\n", ep_index,
trb_comp_code);
- trb_in_td(xhci, ep_ring->deq_seg,
- ep_ring->dequeue, td->last_trb,
- ep_trb_dma, true);
+ trb_in_td(xhci, td->start_seg, td->first_trb,
+ td->last_trb, ep_trb_dma, true);
return -ESHUTDOWN;
}
}
@@ -2962,32 +2973,18 @@ err_out:
}
/*
- * This function handles all OS-owned events on the event ring. It may drop
+ * This function handles one OS-owned event on the event ring. It may drop
* xhci->lock between event processing (e.g. to pass up port status changes).
- * Returns >0 for "possibly more events to process" (caller should call again),
- * otherwise 0 if done. In future, <0 returns should indicate error code.
*/
-static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
+static int xhci_handle_event_trb(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
+ union xhci_trb *event)
{
- union xhci_trb *event;
u32 trb_type;
- /* Event ring hasn't been allocated yet. */
- if (!ir || !ir->event_ring || !ir->event_ring->dequeue) {
- xhci_err(xhci, "ERROR interrupter not ready\n");
- return -ENOMEM;
- }
-
- event = ir->event_ring->dequeue;
- /* Does the HC or OS own the TRB? */
- if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
- ir->event_ring->cycle_state)
- return 0;
-
trace_xhci_handle_event(ir->event_ring, &event->generic);
/*
- * Barrier between reading the TRB_CYCLE (valid) flag above and any
+ * Barrier between reading the TRB_CYCLE (valid) flag before, and any
* speculative reads of the event's flags/data below.
*/
rmb();
@@ -3017,18 +3014,11 @@ static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
* to make sure a watchdog timer didn't mark the host as non-responsive.
*/
if (xhci->xhc_state & XHCI_STATE_DYING) {
- xhci_dbg(xhci, "xHCI host dying, returning from "
- "event handler.\n");
- return 0;
+ xhci_dbg(xhci, "xHCI host dying, returning from event handler.\n");
+ return -ENODEV;
}
- /* Update SW event ring dequeue pointer */
- inc_deq(xhci, ir->event_ring);
-
- /* Are there more items on the event ring? Caller will call us again to
- * check.
- */
- return 1;
+ return 0;
}
/*
@@ -3038,30 +3028,26 @@ static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
*/
static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
struct xhci_interrupter *ir,
- union xhci_trb *event_ring_deq,
bool clear_ehb)
{
u64 temp_64;
dma_addr_t deq;
temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
- /* If necessary, update the HW's version of the event ring deq ptr. */
- if (event_ring_deq != ir->event_ring->dequeue) {
- deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
- ir->event_ring->dequeue);
- if (deq == 0)
- xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
- /*
- * Per 4.9.4, Software writes to the ERDP register shall
- * always advance the Event Ring Dequeue Pointer value.
- */
- if ((temp_64 & ERST_PTR_MASK) == (deq & ERST_PTR_MASK))
- return;
+ deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
+ ir->event_ring->dequeue);
+ if (deq == 0)
+ xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
+ /*
+ * Per 4.9.4, Software writes to the ERDP register shall always advance
+ * the Event Ring Dequeue Pointer value.
+ */
+ if ((temp_64 & ERST_PTR_MASK) == (deq & ERST_PTR_MASK) && !clear_ehb)
+ return;
- /* Update HC event ring dequeue pointer */
- temp_64 = ir->event_ring->deq_seg->num & ERST_DESI_MASK;
- temp_64 |= deq & ERST_PTR_MASK;
- }
+ /* Update HC event ring dequeue pointer */
+ temp_64 = ir->event_ring->deq_seg->num & ERST_DESI_MASK;
+ temp_64 |= deq & ERST_PTR_MASK;
/* Clear the event handler busy flag (RW1C) */
if (clear_ehb)
@@ -3069,6 +3055,76 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
xhci_write_64(xhci, temp_64, &ir->ir_set->erst_dequeue);
}
+/* Clear the interrupt pending bit for a specific interrupter. */
+static void xhci_clear_interrupt_pending(struct xhci_hcd *xhci,
+ struct xhci_interrupter *ir)
+{
+ if (!ir->ip_autoclear) {
+ u32 irq_pending;
+
+ irq_pending = readl(&ir->ir_set->irq_pending);
+ irq_pending |= IMAN_IP;
+ writel(irq_pending, &ir->ir_set->irq_pending);
+ }
+}
+
+/*
+ * Handle all OS-owned events on an interrupter event ring. It may drop
+ * and reaquire xhci->lock between event processing.
+ */
+static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
+{
+ int event_loop = 0;
+ int err;
+ u64 temp;
+
+ xhci_clear_interrupt_pending(xhci, ir);
+
+ /* Event ring hasn't been allocated yet. */
+ if (!ir->event_ring || !ir->event_ring->dequeue) {
+ xhci_err(xhci, "ERROR interrupter event ring not ready\n");
+ return -ENOMEM;
+ }
+
+ if (xhci->xhc_state & XHCI_STATE_DYING ||
+ xhci->xhc_state & XHCI_STATE_HALTED) {
+ xhci_dbg(xhci, "xHCI dying, ignoring interrupt. Shouldn't IRQs be disabled?\n");
+
+ /* Clear the event handler busy flag (RW1C) */
+ temp = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
+ xhci_write_64(xhci, temp | ERST_EHB, &ir->ir_set->erst_dequeue);
+ return -ENODEV;
+ }
+
+ /* Process all OS owned event TRBs on this event ring */
+ while (unhandled_event_trb(ir->event_ring)) {
+ err = xhci_handle_event_trb(xhci, ir, ir->event_ring->dequeue);
+
+ /*
+ * If half a segment of events have been handled in one go then
+ * update ERDP, and force isoc trbs to interrupt more often
+ */
+ if (event_loop++ > TRBS_PER_SEGMENT / 2) {
+ xhci_update_erst_dequeue(xhci, ir, false);
+
+ if (ir->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN)
+ ir->isoc_bei_interval = ir->isoc_bei_interval / 2;
+
+ event_loop = 0;
+ }
+
+ /* Update SW event ring dequeue pointer */
+ inc_deq(xhci, ir->event_ring);
+
+ if (err)
+ break;
+ }
+
+ xhci_update_erst_dequeue(xhci, ir, true);
+
+ return 0;
+}
+
/*
* xHCI spec says we can get an interrupt, and if the HC has an error condition,
* we might get bad data out of the event ring. Section 4.10.2.7 has a list of
@@ -3077,24 +3133,21 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
irqreturn_t xhci_irq(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- union xhci_trb *event_ring_deq;
- struct xhci_interrupter *ir;
- irqreturn_t ret = IRQ_NONE;
- u64 temp_64;
+ irqreturn_t ret = IRQ_HANDLED;
u32 status;
- int event_loop = 0;
spin_lock(&xhci->lock);
/* Check if the xHC generated the interrupt, or the irq is shared */
status = readl(&xhci->op_regs->status);
if (status == ~(u32)0) {
xhci_hc_died(xhci);
- ret = IRQ_HANDLED;
goto out;
}
- if (!(status & STS_EINT))
+ if (!(status & STS_EINT)) {
+ ret = IRQ_NONE;
goto out;
+ }
if (status & STS_HCE) {
xhci_warn(xhci, "WARNING: Host Controller Error\n");
@@ -3104,7 +3157,6 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
if (status & STS_FATAL) {
xhci_warn(xhci, "WARNING: Host System Error\n");
xhci_halt(xhci);
- ret = IRQ_HANDLED;
goto out;
}
@@ -3117,48 +3169,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
writel(status, &xhci->op_regs->status);
/* This is the handler of the primary interrupter */
- ir = xhci->interrupters[0];
- if (!hcd->msi_enabled) {
- u32 irq_pending;
- irq_pending = readl(&ir->ir_set->irq_pending);
- irq_pending |= IMAN_IP;
- writel(irq_pending, &ir->ir_set->irq_pending);
- }
-
- if (xhci->xhc_state & XHCI_STATE_DYING ||
- xhci->xhc_state & XHCI_STATE_HALTED) {
- xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
- "Shouldn't IRQs be disabled?\n");
- /* Clear the event handler busy flag (RW1C);
- * the event ring should be empty.
- */
- temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
- xhci_write_64(xhci, temp_64 | ERST_EHB,
- &ir->ir_set->erst_dequeue);
- ret = IRQ_HANDLED;
- goto out;
- }
-
- event_ring_deq = ir->event_ring->dequeue;
- /* FIXME this should be a delayed service routine
- * that clears the EHB.
- */
- while (xhci_handle_event(xhci, ir) > 0) {
- if (event_loop++ < TRBS_PER_SEGMENT / 2)
- continue;
- xhci_update_erst_dequeue(xhci, ir, event_ring_deq, false);
- event_ring_deq = ir->event_ring->dequeue;
-
- /* ring is half-full, force isoc trbs to interrupt more often */
- if (xhci->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN)
- xhci->isoc_bei_interval = xhci->isoc_bei_interval / 2;
-
- event_loop = 0;
- }
-
- xhci_update_erst_dequeue(xhci, ir, event_ring_deq, true);
- ret = IRQ_HANDLED;
-
+ xhci_handle_events(xhci, xhci->interrupters[0]);
out:
spin_unlock(&xhci->lock);
@@ -4019,7 +4030,8 @@ static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
}
/* Check if we should generate event interrupt for a TD in an isoc URB */
-static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i)
+static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i,
+ struct xhci_interrupter *ir)
{
if (xhci->hci_version < 0x100)
return false;
@@ -4030,8 +4042,8 @@ static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i)
* If AVOID_BEI is set the host handles full event rings poorly,
* generate an event at least every 8th TD to clear the event ring
*/
- if (i && xhci->quirks & XHCI_AVOID_BEI)
- return !!(i % xhci->isoc_bei_interval);
+ if (i && ir->isoc_bei_interval && xhci->quirks & XHCI_AVOID_BEI)
+ return !!(i % ir->isoc_bei_interval);
return true;
}
@@ -4040,6 +4052,7 @@ static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i)
static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
+ struct xhci_interrupter *ir;
struct xhci_ring *ep_ring;
struct urb_priv *urb_priv;
struct xhci_td *td;
@@ -4057,6 +4070,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
xep = &xhci->devs[slot_id]->eps[ep_index];
ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
+ ir = xhci->interrupters[0];
num_tds = urb->number_of_packets;
if (num_tds < 1) {
@@ -4144,7 +4158,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
td->last_trb = ep_ring->enqueue;
td->last_trb_seg = ep_ring->enq_seg;
field |= TRB_IOC;
- if (trb_block_event_intr(xhci, num_tds, i))
+ if (trb_block_event_intr(xhci, num_tds, i, ir))
field |= TRB_BEI;
}
/* Calculate TRB length */
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index ac47b1c0544a6..5762564b9d733 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -172,8 +172,7 @@ DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
__field(void *, vdev)
__field(unsigned long long, out_ctx)
__field(unsigned long long, in_ctx)
- __field(u8, fake_port)
- __field(u8, real_port)
+ __field(int, slot_id)
__field(u16, current_mel)
),
@@ -181,13 +180,12 @@ DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
__entry->vdev = vdev;
__entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
__entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
- __entry->fake_port = (u8) vdev->fake_port;
- __entry->real_port = (u8) vdev->real_port;
+ __entry->slot_id = (int) vdev->slot_id;
__entry->current_mel = (u16) vdev->current_mel;
),
- TP_printk("vdev %p ctx %llx | %llx fake_port %d real_port %d current_mel %d",
- __entry->vdev, __entry->in_ctx, __entry->out_ctx,
- __entry->fake_port, __entry->real_port, __entry->current_mel
+ TP_printk("vdev %p slot %d ctx %llx | %llx current_mel %d",
+ __entry->vdev, __entry->slot_id, __entry->in_ctx,
+ __entry->out_ctx, __entry->current_mel
)
);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index c057c42c36f4c..8579603edaff1 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -346,6 +346,23 @@ static int xhci_disable_interrupter(struct xhci_interrupter *ir)
return 0;
}
+/* interrupt moderation interval imod_interval in nanoseconds */
+static int xhci_set_interrupter_moderation(struct xhci_interrupter *ir,
+ u32 imod_interval)
+{
+ u32 imod;
+
+ if (!ir || !ir->ir_set || imod_interval > U16_MAX * 250)
+ return -EINVAL;
+
+ imod = readl(&ir->ir_set->irq_control);
+ imod &= ~ER_IRQ_INTERVAL_MASK;
+ imod |= (imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
+ writel(imod, &ir->ir_set->irq_control);
+
+ return 0;
+}
+
static void compliance_mode_recovery(struct timer_list *t)
{
struct xhci_hcd *xhci;
@@ -528,7 +545,6 @@ static int xhci_run_finished(struct xhci_hcd *xhci)
*/
int xhci_run(struct usb_hcd *hcd)
{
- u32 temp;
u64 temp_64;
int ret;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
@@ -538,6 +554,9 @@ int xhci_run(struct usb_hcd *hcd)
*/
hcd->uses_new_polling = 1;
+ if (hcd->msi_enabled)
+ ir->ip_autoclear = true;
+
if (!usb_hcd_is_primary_hcd(hcd))
return xhci_run_finished(xhci);
@@ -548,12 +567,7 @@ int xhci_run(struct usb_hcd *hcd)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"ERST deq = 64'h%0lx", (long unsigned int) temp_64);
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Set the interrupt modulation register");
- temp = readl(&ir->ir_set->irq_control);
- temp &= ~ER_IRQ_INTERVAL_MASK;
- temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
- writel(temp, &ir->ir_set->irq_control);
+ xhci_set_interrupter_moderation(ir, xhci->imod_interval);
if (xhci->quirks & XHCI_NEC_HOST) {
struct xhci_command *command;
@@ -780,19 +794,7 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
seg = seg->next;
} while (seg != ring->deq_seg);
- /* Reset the software enqueue and dequeue pointers */
- ring->deq_seg = ring->first_seg;
- ring->dequeue = ring->first_seg->trbs;
- ring->enq_seg = ring->deq_seg;
- ring->enqueue = ring->dequeue;
-
- ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
- /*
- * Ring is now zeroed, so the HW should look for change of ownership
- * when the cycle bit is set to 1.
- */
- ring->cycle_state = 1;
-
+ xhci_initialize_ring_info(ring, 1);
/*
* Reset the hardware dequeue pointer.
* Yes, this will need to be re-written after resume, but we're paranoid
@@ -1217,6 +1219,8 @@ static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb)
temp = kzalloc_node(buf_len, GFP_ATOMIC,
dev_to_node(hcd->self.sysdev));
+ if (!temp)
+ return -ENOMEM;
if (usb_urb_dir_out(urb))
sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
@@ -2259,7 +2263,7 @@ static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
struct xhci_tt_bw_info *tt_info;
/* Find the bandwidth table for the root port this TT is attached to. */
- bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
+ bw_table = &xhci->rh_bw[virt_dev->rhub_port->hw_portnum].bw_table;
tt_info = virt_dev->tt_info;
/* If this TT already had active endpoints, the bandwidth for this TT
* has already been added. Removing all periodic endpoints (and thus
@@ -2377,7 +2381,7 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci,
if (virt_dev->tt_info) {
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Recalculating BW for rootport %u",
- virt_dev->real_port);
+ virt_dev->rhub_port->hw_portnum + 1);
if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
xhci_warn(xhci, "Not enough bandwidth on HS bus for "
"newly activated TT.\n");
@@ -2390,7 +2394,7 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci,
} else {
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Recalculating BW for rootport %u",
- virt_dev->real_port);
+ virt_dev->rhub_port->hw_portnum + 1);
}
/* Add in how much bandwidth will be used for interval zero, or the
@@ -2487,14 +2491,12 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci,
bw_used += overhead + packet_size;
if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
- unsigned int port_index = virt_dev->real_port - 1;
-
/* OK, we're manipulating a HS device attached to a
* root port bandwidth domain. Include the number of active TTs
* in the bandwidth used.
*/
bw_used += TT_HS_OVERHEAD *
- xhci->rh_bw[port_index].num_active_tts;
+ xhci->rh_bw[virt_dev->rhub_port->hw_portnum].num_active_tts;
}
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
@@ -2681,7 +2683,7 @@ void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
if (!virt_dev->tt_info)
return;
- rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
+ rh_bw_info = &xhci->rh_bw[virt_dev->rhub_port->hw_portnum];
if (old_active_eps == 0 &&
virt_dev->tt_info->active_eps != 0) {
rh_bw_info->num_active_tts += 1;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 6f82d404883f9..6f4bf98a62824 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -22,6 +22,9 @@
#include "xhci-ext-caps.h"
#include "pci-quirks.h"
+#include "xhci-port.h"
+#include "xhci-caps.h"
+
/* max buffer size for trace and debug messages */
#define XHCI_MSG_MAX 500
@@ -62,90 +65,6 @@ struct xhci_cap_regs {
/* Reserved up to (CAPLENGTH - 0x1C) */
};
-/* hc_capbase bitmasks */
-/* bits 7:0 - how long is the Capabilities register */
-#define HC_LENGTH(p) XHCI_HC_LENGTH(p)
-/* bits 31:16 */
-#define HC_VERSION(p) (((p) >> 16) & 0xffff)
-
-/* HCSPARAMS1 - hcs_params1 - bitmasks */
-/* bits 0:7, Max Device Slots */
-#define HCS_MAX_SLOTS(p) (((p) >> 0) & 0xff)
-#define HCS_SLOTS_MASK 0xff
-/* bits 8:18, Max Interrupters */
-#define HCS_MAX_INTRS(p) (((p) >> 8) & 0x7ff)
-/* bits 24:31, Max Ports - max value is 0x7F = 127 ports */
-#define HCS_MAX_PORTS(p) (((p) >> 24) & 0x7f)
-
-/* HCSPARAMS2 - hcs_params2 - bitmasks */
-/* bits 0:3, frames or uframes that SW needs to queue transactions
- * ahead of the HW to meet periodic deadlines */
-#define HCS_IST(p) (((p) >> 0) & 0xf)
-/* bits 4:7, max number of Event Ring segments */
-#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf)
-/* bits 21:25 Hi 5 bits of Scratchpad buffers SW must allocate for the HW */
-/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
-/* bits 27:31 Lo 5 bits of Scratchpad buffers SW must allocate for the HW */
-#define HCS_MAX_SCRATCHPAD(p) ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f))
-
-/* HCSPARAMS3 - hcs_params3 - bitmasks */
-/* bits 0:7, Max U1 to U0 latency for the roothub ports */
-#define HCS_U1_LATENCY(p) (((p) >> 0) & 0xff)
-/* bits 16:31, Max U2 to U0 latency for the roothub ports */
-#define HCS_U2_LATENCY(p) (((p) >> 16) & 0xffff)
-
-/* HCCPARAMS - hcc_params - bitmasks */
-/* true: HC can use 64-bit address pointers */
-#define HCC_64BIT_ADDR(p) ((p) & (1 << 0))
-/* true: HC can do bandwidth negotiation */
-#define HCC_BANDWIDTH_NEG(p) ((p) & (1 << 1))
-/* true: HC uses 64-byte Device Context structures
- * FIXME 64-byte context structures aren't supported yet.
- */
-#define HCC_64BYTE_CONTEXT(p) ((p) & (1 << 2))
-/* true: HC has port power switches */
-#define HCC_PPC(p) ((p) & (1 << 3))
-/* true: HC has port indicators */
-#define HCS_INDICATOR(p) ((p) & (1 << 4))
-/* true: HC has Light HC Reset Capability */
-#define HCC_LIGHT_RESET(p) ((p) & (1 << 5))
-/* true: HC supports latency tolerance messaging */
-#define HCC_LTC(p) ((p) & (1 << 6))
-/* true: no secondary Stream ID Support */
-#define HCC_NSS(p) ((p) & (1 << 7))
-/* true: HC supports Stopped - Short Packet */
-#define HCC_SPC(p) ((p) & (1 << 9))
-/* true: HC has Contiguous Frame ID Capability */
-#define HCC_CFC(p) ((p) & (1 << 11))
-/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */
-#define HCC_MAX_PSA(p) (1 << ((((p) >> 12) & 0xf) + 1))
-/* Extended Capabilities pointer from PCI base - section 5.3.6 */
-#define HCC_EXT_CAPS(p) XHCI_HCC_EXT_CAPS(p)
-
-#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
-
-/* db_off bitmask - bits 0:1 reserved */
-#define DBOFF_MASK (~0x3)
-
-/* run_regs_off bitmask - bits 0:4 reserved */
-#define RTSOFF_MASK (~0x1f)
-
-/* HCCPARAMS2 - hcc_params2 - bitmasks */
-/* true: HC supports U3 entry Capability */
-#define HCC2_U3C(p) ((p) & (1 << 0))
-/* true: HC supports Configure endpoint command Max exit latency too large */
-#define HCC2_CMC(p) ((p) & (1 << 1))
-/* true: HC supports Force Save context Capability */
-#define HCC2_FSC(p) ((p) & (1 << 2))
-/* true: HC supports Compliance Transition Capability */
-#define HCC2_CTC(p) ((p) & (1 << 3))
-/* true: HC support Large ESIT payload Capability > 48k */
-#define HCC2_LEC(p) ((p) & (1 << 4))
-/* true: HC support Configuration Information Capability */
-#define HCC2_CIC(p) ((p) & (1 << 5))
-/* true: HC support Extended TBC Capability, Isoc burst count > 65535 */
-#define HCC2_ETC(p) ((p) & (1 << 6))
-
/* Number of registers per port */
#define NUM_PORT_REGS 4
@@ -291,181 +210,6 @@ struct xhci_op_regs {
#define CONFIG_CIE (1 << 9)
/* bits 10:31 - reserved and should be preserved */
-/* PORTSC - Port Status and Control Register - port_status_base bitmasks */
-/* true: device connected */
-#define PORT_CONNECT (1 << 0)
-/* true: port enabled */
-#define PORT_PE (1 << 1)
-/* bit 2 reserved and zeroed */
-/* true: port has an over-current condition */
-#define PORT_OC (1 << 3)
-/* true: port reset signaling asserted */
-#define PORT_RESET (1 << 4)
-/* Port Link State - bits 5:8
- * A read gives the current link PM state of the port,
- * a write with Link State Write Strobe set sets the link state.
- */
-#define PORT_PLS_MASK (0xf << 5)
-#define XDEV_U0 (0x0 << 5)
-#define XDEV_U1 (0x1 << 5)
-#define XDEV_U2 (0x2 << 5)
-#define XDEV_U3 (0x3 << 5)
-#define XDEV_DISABLED (0x4 << 5)
-#define XDEV_RXDETECT (0x5 << 5)
-#define XDEV_INACTIVE (0x6 << 5)
-#define XDEV_POLLING (0x7 << 5)
-#define XDEV_RECOVERY (0x8 << 5)
-#define XDEV_HOT_RESET (0x9 << 5)
-#define XDEV_COMP_MODE (0xa << 5)
-#define XDEV_TEST_MODE (0xb << 5)
-#define XDEV_RESUME (0xf << 5)
-
-/* true: port has power (see HCC_PPC) */
-#define PORT_POWER (1 << 9)
-/* bits 10:13 indicate device speed:
- * 0 - undefined speed - port hasn't be initialized by a reset yet
- * 1 - full speed
- * 2 - low speed
- * 3 - high speed
- * 4 - super speed
- * 5-15 reserved
- */
-#define DEV_SPEED_MASK (0xf << 10)
-#define XDEV_FS (0x1 << 10)
-#define XDEV_LS (0x2 << 10)
-#define XDEV_HS (0x3 << 10)
-#define XDEV_SS (0x4 << 10)
-#define XDEV_SSP (0x5 << 10)
-#define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0<<10))
-#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS)
-#define DEV_LOWSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_LS)
-#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS)
-#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS)
-#define DEV_SUPERSPEEDPLUS(p) (((p) & DEV_SPEED_MASK) == XDEV_SSP)
-#define DEV_SUPERSPEED_ANY(p) (((p) & DEV_SPEED_MASK) >= XDEV_SS)
-#define DEV_PORT_SPEED(p) (((p) >> 10) & 0x0f)
-
-/* Bits 20:23 in the Slot Context are the speed for the device */
-#define SLOT_SPEED_FS (XDEV_FS << 10)
-#define SLOT_SPEED_LS (XDEV_LS << 10)
-#define SLOT_SPEED_HS (XDEV_HS << 10)
-#define SLOT_SPEED_SS (XDEV_SS << 10)
-#define SLOT_SPEED_SSP (XDEV_SSP << 10)
-/* Port Indicator Control */
-#define PORT_LED_OFF (0 << 14)
-#define PORT_LED_AMBER (1 << 14)
-#define PORT_LED_GREEN (2 << 14)
-#define PORT_LED_MASK (3 << 14)
-/* Port Link State Write Strobe - set this when changing link state */
-#define PORT_LINK_STROBE (1 << 16)
-/* true: connect status change */
-#define PORT_CSC (1 << 17)
-/* true: port enable change */
-#define PORT_PEC (1 << 18)
-/* true: warm reset for a USB 3.0 device is done. A "hot" reset puts the port
- * into an enabled state, and the device into the default state. A "warm" reset
- * also resets the link, forcing the device through the link training sequence.
- * SW can also look at the Port Reset register to see when warm reset is done.
- */
-#define PORT_WRC (1 << 19)
-/* true: over-current change */
-#define PORT_OCC (1 << 20)
-/* true: reset change - 1 to 0 transition of PORT_RESET */
-#define PORT_RC (1 << 21)
-/* port link status change - set on some port link state transitions:
- * Transition Reason
- * ------------------------------------------------------------------------------
- * - U3 to Resume Wakeup signaling from a device
- * - Resume to Recovery to U0 USB 3.0 device resume
- * - Resume to U0 USB 2.0 device resume
- * - U3 to Recovery to U0 Software resume of USB 3.0 device complete
- * - U3 to U0 Software resume of USB 2.0 device complete
- * - U2 to U0 L1 resume of USB 2.1 device complete
- * - U0 to U0 (???) L1 entry rejection by USB 2.1 device
- * - U0 to disabled L1 entry error with USB 2.1 device
- * - Any state to inactive Error on USB 3.0 port
- */
-#define PORT_PLC (1 << 22)
-/* port configure error change - port failed to configure its link partner */
-#define PORT_CEC (1 << 23)
-#define PORT_CHANGE_MASK (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
- PORT_RC | PORT_PLC | PORT_CEC)
-
-
-/* Cold Attach Status - xHC can set this bit to report device attached during
- * Sx state. Warm port reset should be perfomed to clear this bit and move port
- * to connected state.
- */
-#define PORT_CAS (1 << 24)
-/* wake on connect (enable) */
-#define PORT_WKCONN_E (1 << 25)
-/* wake on disconnect (enable) */
-#define PORT_WKDISC_E (1 << 26)
-/* wake on over-current (enable) */
-#define PORT_WKOC_E (1 << 27)
-/* bits 28:29 reserved */
-/* true: device is non-removable - for USB 3.0 roothub emulation */
-#define PORT_DEV_REMOVE (1 << 30)
-/* Initiate a warm port reset - complete when PORT_WRC is '1' */
-#define PORT_WR (1 << 31)
-
-/* We mark duplicate entries with -1 */
-#define DUPLICATE_ENTRY ((u8)(-1))
-
-/* Port Power Management Status and Control - port_power_base bitmasks */
-/* Inactivity timer value for transitions into U1, in microseconds.
- * Timeout can be up to 127us. 0xFF means an infinite timeout.
- */
-#define PORT_U1_TIMEOUT(p) ((p) & 0xff)
-#define PORT_U1_TIMEOUT_MASK 0xff
-/* Inactivity timer value for transitions into U2 */
-#define PORT_U2_TIMEOUT(p) (((p) & 0xff) << 8)
-#define PORT_U2_TIMEOUT_MASK (0xff << 8)
-/* Bits 24:31 for port testing */
-
-/* USB2 Protocol PORTSPMSC */
-#define PORT_L1S_MASK 7
-#define PORT_L1S_SUCCESS 1
-#define PORT_RWE (1 << 3)
-#define PORT_HIRD(p) (((p) & 0xf) << 4)
-#define PORT_HIRD_MASK (0xf << 4)
-#define PORT_L1DS_MASK (0xff << 8)
-#define PORT_L1DS(p) (((p) & 0xff) << 8)
-#define PORT_HLE (1 << 16)
-#define PORT_TEST_MODE_SHIFT 28
-
-/* USB3 Protocol PORTLI Port Link Information */
-#define PORT_RX_LANES(p) (((p) >> 16) & 0xf)
-#define PORT_TX_LANES(p) (((p) >> 20) & 0xf)
-
-/* USB2 Protocol PORTHLPMC */
-#define PORT_HIRDM(p)((p) & 3)
-#define PORT_L1_TIMEOUT(p)(((p) & 0xff) << 2)
-#define PORT_BESLD(p)(((p) & 0xf) << 10)
-
-/* use 512 microseconds as USB2 LPM L1 default timeout. */
-#define XHCI_L1_TIMEOUT 512
-
-/* Set default HIRD/BESL value to 4 (350/400us) for USB2 L1 LPM resume latency.
- * Safe to use with mixed HIRD and BESL systems (host and device) and is used
- * by other operating systems.
- *
- * XHCI 1.0 errata 8/14/12 Table 13 notes:
- * "Software should choose xHC BESL/BESLD field values that do not violate a
- * device's resume latency requirements,
- * e.g. not program values > '4' if BLC = '1' and a HIRD device is attached,
- * or not program values < '4' if BLC = '0' and a BESL device is attached.
- */
-#define XHCI_DEFAULT_BESL 4
-
-/*
- * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports
- * to complete link training. usually link trainig completes much faster
- * so check status 10 times with 36ms sleep in places we need to wait for
- * polling to complete.
- */
-#define XHCI_PORT_POLLING_LFPS_TIME 36
-
/**
* struct xhci_intr_reg - Interrupt Register Set
* @irq_pending: IMAN - Interrupt Management Register. Used to enable
@@ -995,8 +739,7 @@ struct xhci_virt_device {
/* Used for addressing devices and configuration changes */
struct xhci_container_ctx *in_ctx;
struct xhci_virt_ep eps[EP_CTX_PER_DEV];
- u8 fake_port;
- u8 real_port;
+ struct xhci_port *rhub_port;
struct xhci_interval_bw_table *bw_table;
struct xhci_tt_bw_info *tt_info;
/*
@@ -1688,6 +1431,8 @@ struct xhci_interrupter {
struct xhci_erst erst;
struct xhci_intr_reg __iomem *ir_set;
unsigned int intr_num;
+ bool ip_autoclear;
+ u32 isoc_bei_interval;
/* For interrupter registers save and restore over suspend/resume */
u32 s3_irq_pending;
u32 s3_irq_control;
@@ -1717,6 +1462,8 @@ struct xhci_port {
unsigned int lpm_incapable:1;
unsigned long resume_timestamp;
bool rexit_active;
+ /* Slot ID is the index of the device directly connected to the port */
+ int slot_id;
struct completion rexit_done;
struct completion u3exit_done;
};
@@ -1760,7 +1507,6 @@ struct xhci_hcd {
u8 isoc_threshold;
/* imod_interval in ns (I * 250ns) */
u32 imod_interval;
- u32 isoc_bei_interval;
int event_ring_max;
/* 4KB min, 128MB max */
int page_size;
@@ -2200,8 +1946,6 @@ unsigned long xhci_get_resuming_ports(struct usb_hcd *hcd);
#endif /* CONFIG_PM */
u32 xhci_port_state_to_neutral(u32 state);
-int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
- u16 port);
void xhci_ring_device(struct xhci_hcd *xhci, int slot_id);
/* xHCI contexts */
diff --git a/drivers/usb/image/mdc800.c b/drivers/usb/image/mdc800.c
index 67f098579fb45..7b7e1554ea20e 100644
--- a/drivers/usb/image/mdc800.c
+++ b/drivers/usb/image/mdc800.c
@@ -631,7 +631,6 @@ static int mdc800_device_open (struct inode* inode, struct file *file)
mdc800->camera_busy=0;
mdc800->camera_request_ready=0;
- retval=0;
mdc800->irq_urb->dev = mdc800->dev;
retval = usb_submit_urb (mdc800->irq_urb, GFP_KERNEL);
if (retval) {
diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c
index 0dd2b032c90b9..d8049275a023c 100644
--- a/drivers/usb/misc/onboard_usb_hub.c
+++ b/drivers/usb/misc/onboard_usb_hub.c
@@ -78,7 +78,7 @@ static int onboard_hub_power_on(struct onboard_hub *hub)
err = regulator_bulk_enable(hub->pdata->num_supplies, hub->supplies);
if (err) {
dev_err(hub->dev, "failed to enable supplies: %pe\n", ERR_PTR(err));
- return err;
+ goto disable_clk;
}
fsleep(hub->pdata->reset_us);
@@ -87,6 +87,10 @@ static int onboard_hub_power_on(struct onboard_hub *hub)
hub->is_powered_on = true;
return 0;
+
+disable_clk:
+ clk_disable_unprepare(hub->clk);
+ return err;
}
static int onboard_hub_power_off(struct onboard_hub *hub)
@@ -260,7 +264,7 @@ static int onboard_hub_probe(struct platform_device *pdev)
if (!hub)
return -ENOMEM;
- hub->pdata = device_get_match_data(&pdev->dev);
+ hub->pdata = device_get_match_data(dev);
if (!hub->pdata)
return -EINVAL;
@@ -454,6 +458,8 @@ static const struct usb_device_id onboard_hub_id_table[] = {
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x5411) }, /* RTS5411 USB 2.1 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x0414) }, /* RTS5414 USB 3.2 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x5414) }, /* RTS5414 USB 2.1 */
+ { USB_DEVICE(VENDOR_ID_TI, 0x8025) }, /* TI USB8020B 3.0 */
+ { USB_DEVICE(VENDOR_ID_TI, 0x8027) }, /* TI USB8020B 2.0 */
{ USB_DEVICE(VENDOR_ID_TI, 0x8140) }, /* TI USB8041 3.0 */
{ USB_DEVICE(VENDOR_ID_TI, 0x8142) }, /* TI USB8041 2.0 */
{ USB_DEVICE(VENDOR_ID_VIA, 0x0817) }, /* VIA VL817 3.1 */
diff --git a/drivers/usb/misc/onboard_usb_hub.h b/drivers/usb/misc/onboard_usb_hub.h
index f360d5cf8d8a0..b4b15d45f84d6 100644
--- a/drivers/usb/misc/onboard_usb_hub.h
+++ b/drivers/usb/misc/onboard_usb_hub.h
@@ -26,6 +26,11 @@ static const struct onboard_hub_pdata realtek_rts5411_data = {
.num_supplies = 1,
};
+static const struct onboard_hub_pdata ti_tusb8020b_data = {
+ .reset_us = 3000,
+ .num_supplies = 1,
+};
+
static const struct onboard_hub_pdata ti_tusb8041_data = {
.reset_us = 3000,
.num_supplies = 1,
@@ -62,6 +67,8 @@ static const struct of_device_id onboard_hub_match[] = {
{ .compatible = "usb424,2517", .data = &microchip_usb424_data, },
{ .compatible = "usb424,2744", .data = &microchip_usb5744_data, },
{ .compatible = "usb424,5744", .data = &microchip_usb5744_data, },
+ { .compatible = "usb451,8025", .data = &ti_tusb8020b_data, },
+ { .compatible = "usb451,8027", .data = &ti_tusb8020b_data, },
{ .compatible = "usb451,8140", .data = &ti_tusb8041_data, },
{ .compatible = "usb451,8142", .data = &ti_tusb8041_data, },
{ .compatible = "usb4b4,6504", .data = &cypress_hx3_data, },
diff --git a/drivers/usb/misc/usb-ljca.c b/drivers/usb/misc/usb-ljca.c
index 35770e608c649..2d30fc1be3066 100644
--- a/drivers/usb/misc/usb-ljca.c
+++ b/drivers/usb/misc/usb-ljca.c
@@ -518,8 +518,10 @@ static int ljca_new_client_device(struct ljca_adapter *adap, u8 type, u8 id,
int ret;
client = kzalloc(sizeof *client, GFP_KERNEL);
- if (!client)
+ if (!client) {
+ kfree(data);
return -ENOMEM;
+ }
client->type = type;
client->id = id;
@@ -535,8 +537,10 @@ static int ljca_new_client_device(struct ljca_adapter *adap, u8 type, u8 id,
auxdev->dev.release = ljca_auxdev_release;
ret = auxiliary_device_init(auxdev);
- if (ret)
+ if (ret) {
+ kfree(data);
goto err_free;
+ }
ljca_auxdev_acpi_bind(adap, auxdev, adr, id);
@@ -590,12 +594,8 @@ static int ljca_enumerate_gpio(struct ljca_adapter *adap)
valid_pin[i] = get_unaligned_le32(&desc->bank_desc[i].valid_pins);
bitmap_from_arr32(gpio_info->valid_pin_map, valid_pin, gpio_num);
- ret = ljca_new_client_device(adap, LJCA_CLIENT_GPIO, 0, "ljca-gpio",
+ return ljca_new_client_device(adap, LJCA_CLIENT_GPIO, 0, "ljca-gpio",
gpio_info, LJCA_GPIO_ACPI_ADR);
- if (ret)
- kfree(gpio_info);
-
- return ret;
}
static int ljca_enumerate_i2c(struct ljca_adapter *adap)
@@ -629,10 +629,8 @@ static int ljca_enumerate_i2c(struct ljca_adapter *adap)
ret = ljca_new_client_device(adap, LJCA_CLIENT_I2C, i,
"ljca-i2c", i2c_info,
LJCA_I2C1_ACPI_ADR + i);
- if (ret) {
- kfree(i2c_info);
+ if (ret)
return ret;
- }
}
return 0;
@@ -669,10 +667,8 @@ static int ljca_enumerate_spi(struct ljca_adapter *adap)
ret = ljca_new_client_device(adap, LJCA_CLIENT_SPI, i,
"ljca-spi", spi_info,
LJCA_SPI1_ACPI_ADR + i);
- if (ret) {
- kfree(spi_info);
+ if (ret)
return ret;
- }
}
return 0;
diff --git a/drivers/usb/mtu3/mtu3_host.c b/drivers/usb/mtu3/mtu3_host.c
index 9f2be22af8440..7c657ea2dabd1 100644
--- a/drivers/usb/mtu3/mtu3_host.c
+++ b/drivers/usb/mtu3/mtu3_host.c
@@ -34,6 +34,18 @@
#define WC0_SSUSB0_CDEN BIT(6)
#define WC0_IS_SPM_EN BIT(1)
+/* mt8195 */
+#define PERI_WK_CTRL0_8195 0x04
+#define WC0_IS_P_95 BIT(30) /* polarity */
+#define WC0_IS_C_95(x) ((u32)(((x) & 0x7) << 27))
+#define WC0_IS_EN_P3_95 BIT(26)
+#define WC0_IS_EN_P2_95 BIT(25)
+
+#define PERI_WK_CTRL1_8195 0x20
+#define WC1_IS_C_95(x) ((u32)(((x) & 0xf) << 28))
+#define WC1_IS_P_95 BIT(12)
+#define WC1_IS_EN_P0_95 BIT(6)
+
/* mt2712 etc */
#define PERI_SSUSB_SPM_CTRL 0x0
#define SSC_IP_SLEEP_EN BIT(4)
@@ -44,6 +56,9 @@ enum ssusb_uwk_vers {
SSUSB_UWK_V2,
SSUSB_UWK_V1_1 = 101, /* specific revision 1.01 */
SSUSB_UWK_V1_2, /* specific revision 1.02 */
+ SSUSB_UWK_V1_3, /* mt8195 IP0 */
+ SSUSB_UWK_V1_5 = 105, /* mt8195 IP2 */
+ SSUSB_UWK_V1_6, /* mt8195 IP3 */
};
/*
@@ -70,6 +85,21 @@ static void ssusb_wakeup_ip_sleep_set(struct ssusb_mtk *ssusb, bool enable)
msk = WC0_SSUSB0_CDEN | WC0_IS_SPM_EN;
val = enable ? msk : 0;
break;
+ case SSUSB_UWK_V1_3:
+ reg = ssusb->uwk_reg_base + PERI_WK_CTRL1_8195;
+ msk = WC1_IS_EN_P0_95 | WC1_IS_C_95(0xf) | WC1_IS_P_95;
+ val = enable ? (WC1_IS_EN_P0_95 | WC1_IS_C_95(0x1)) : 0;
+ break;
+ case SSUSB_UWK_V1_5:
+ reg = ssusb->uwk_reg_base + PERI_WK_CTRL0_8195;
+ msk = WC0_IS_EN_P2_95 | WC0_IS_C_95(0x7) | WC0_IS_P_95;
+ val = enable ? (WC0_IS_EN_P2_95 | WC0_IS_C_95(0x1)) : 0;
+ break;
+ case SSUSB_UWK_V1_6:
+ reg = ssusb->uwk_reg_base + PERI_WK_CTRL0_8195;
+ msk = WC0_IS_EN_P3_95 | WC0_IS_C_95(0x7) | WC0_IS_P_95;
+ val = enable ? (WC0_IS_EN_P3_95 | WC0_IS_C_95(0x1)) : 0;
+ break;
case SSUSB_UWK_V2:
reg = ssusb->uwk_reg_base + PERI_SSUSB_SPM_CTRL;
msk = SSC_IP_SLEEP_EN | SSC_SPM_INT_EN;
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 051c6da7cf6d7..55df0ee413d8e 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1744,7 +1744,6 @@ static inline void musb_g_init_endpoints(struct musb *musb)
{
u8 epnum;
struct musb_hw_ep *hw_ep;
- unsigned count = 0;
/* initialize endpoint list just once */
INIT_LIST_HEAD(&(musb->g.ep_list));
@@ -1754,17 +1753,14 @@ static inline void musb_g_init_endpoints(struct musb *musb)
epnum++, hw_ep++) {
if (hw_ep->is_shared_fifo /* || !epnum */) {
init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
- count++;
} else {
if (hw_ep->max_packet_sz_tx) {
init_peripheral_ep(musb, &hw_ep->ep_in,
epnum, 1);
- count++;
}
if (hw_ep->max_packet_sz_rx) {
init_peripheral_ep(musb, &hw_ep->ep_out,
epnum, 0);
- count++;
}
}
}
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index 9ab50f26db607..fdcffebf415cd 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -74,33 +74,26 @@ static void nop_reset(struct usb_phy_generic *nop)
}
/* interface to regulator framework */
-static void nop_set_vbus_draw(struct usb_phy_generic *nop, unsigned mA)
+static int nop_set_vbus(struct usb_otg *otg, bool enable)
{
- struct regulator *vbus_draw = nop->vbus_draw;
- int enabled;
- int ret;
+ int ret = 0;
+ struct usb_phy_generic *nop = dev_get_drvdata(otg->usb_phy->dev);
- if (!vbus_draw)
- return;
+ if (!nop->vbus_draw)
+ return 0;
- enabled = nop->vbus_draw_enabled;
- if (mA) {
- regulator_set_current_limit(vbus_draw, 0, 1000 * mA);
- if (!enabled) {
- ret = regulator_enable(vbus_draw);
- if (ret < 0)
- return;
- nop->vbus_draw_enabled = 1;
- }
- } else {
- if (enabled) {
- ret = regulator_disable(vbus_draw);
- if (ret < 0)
- return;
- nop->vbus_draw_enabled = 0;
- }
+ if (enable && !nop->vbus_draw_enabled) {
+ ret = regulator_enable(nop->vbus_draw);
+ if (ret)
+ nop->vbus_draw_enabled = false;
+ else
+ nop->vbus_draw_enabled = true;
+
+ } else if (!enable && nop->vbus_draw_enabled) {
+ ret = regulator_disable(nop->vbus_draw);
+ nop->vbus_draw_enabled = false;
}
- nop->mA = mA;
+ return ret;
}
@@ -120,14 +113,9 @@ static irqreturn_t nop_gpio_vbus_thread(int irq, void *data)
otg->state = OTG_STATE_B_PERIPHERAL;
nop->phy.last_event = status;
- /* drawing a "unit load" is *always* OK, except for OTG */
- nop_set_vbus_draw(nop, 100);
-
atomic_notifier_call_chain(&nop->phy.notifier, status,
otg->gadget);
} else {
- nop_set_vbus_draw(nop, 0);
-
status = USB_EVENT_NONE;
otg->state = OTG_STATE_B_IDLE;
nop->phy.last_event = status;
@@ -284,6 +272,7 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop)
nop->phy.otg->usb_phy = &nop->phy;
nop->phy.otg->set_host = nop_set_host;
nop->phy.otg->set_peripheral = nop_set_peripheral;
+ nop->phy.otg->set_vbus = nop_set_vbus;
return 0;
}
@@ -341,6 +330,9 @@ static void usb_phy_generic_remove(struct platform_device *pdev)
struct usb_phy_generic *nop = platform_get_drvdata(pdev);
usb_remove_phy(&nop->phy);
+
+ if (nop->vbus_draw && nop->vbus_draw_enabled)
+ regulator_disable(nop->vbus_draw);
}
static const struct of_device_id nop_xceiv_dt_ids[] = {
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index 4b468bde19cfb..06e0fb23566ce 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -699,7 +699,7 @@ out:
}
EXPORT_SYMBOL_GPL(usb_add_phy);
-static struct device_type usb_phy_dev_type = {
+static const struct device_type usb_phy_dev_type = {
.name = "usb_phy",
.uevent = usb_phy_uevent,
};
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index 70165dd86b5de..d7aa913ceb8a0 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -7,6 +7,7 @@
* Hans de Goede <hdegoede@redhat.com>
*/
+#include <linux/component.h>
#include <linux/usb/role.h>
#include <linux/property.h>
#include <linux/device.h>
@@ -36,6 +37,32 @@ struct usb_role_switch {
#define to_role_switch(d) container_of(d, struct usb_role_switch, dev)
+static int connector_bind(struct device *dev, struct device *connector, void *data)
+{
+ int ret;
+
+ ret = sysfs_create_link(&dev->kobj, &connector->kobj, "connector");
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_link(&connector->kobj, &dev->kobj, "usb-role-switch");
+ if (ret)
+ sysfs_remove_link(&dev->kobj, "connector");
+
+ return ret;
+}
+
+static void connector_unbind(struct device *dev, struct device *connector, void *data)
+{
+ sysfs_remove_link(&connector->kobj, "usb-role-switch");
+ sysfs_remove_link(&dev->kobj, "connector");
+}
+
+static const struct component_ops connector_ops = {
+ .bind = connector_bind,
+ .unbind = connector_unbind,
+};
+
/**
* usb_role_switch_set_role - Set USB role for a switch
* @sw: USB role switch
@@ -361,6 +388,12 @@ usb_role_switch_register(struct device *parent,
return ERR_PTR(ret);
}
+ if (dev_fwnode(&sw->dev)) {
+ ret = component_add(&sw->dev, &connector_ops);
+ if (ret)
+ dev_warn(&sw->dev, "failed to add component\n");
+ }
+
sw->registered = true;
/* TODO: Symlinks for the host port and the device controller. */
@@ -377,10 +410,12 @@ EXPORT_SYMBOL_GPL(usb_role_switch_register);
*/
void usb_role_switch_unregister(struct usb_role_switch *sw)
{
- if (!IS_ERR_OR_NULL(sw)) {
- sw->registered = false;
- device_unregister(&sw->dev);
- }
+ if (IS_ERR_OR_NULL(sw))
+ return;
+ sw->registered = false;
+ if (dev_fwnode(&sw->dev))
+ component_del(&sw->dev, &connector_ops);
+ device_unregister(&sw->dev);
}
EXPORT_SYMBOL_GPL(usb_role_switch_unregister);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 923e0ed85444b..21fd26609252b 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -56,6 +56,8 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
{ USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
{ USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
+ { USB_DEVICE(0x04BF, 0x1301) }, /* TDK Corporation NC0110013M - Network Controller */
+ { USB_DEVICE(0x04BF, 0x1303) }, /* TDK Corporation MM0110113M - i3 Micro Module */
{ USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
{ USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
@@ -144,6 +146,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
{ USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
+ { USB_DEVICE(0x10C4, 0x863C) }, /* MGP Instruments PDS100 */
{ USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
{ USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
{ USB_DEVICE(0x10C4, 0x87ED) }, /* IMST USB-Stick for Smart Meter */
@@ -177,6 +180,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */
{ USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */
{ USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
+ { USB_DEVICE(0x11CA, 0x0212) }, /* Verifone USB to Printer (UART, CP2102) */
{ USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */
{ USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
{ USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 13a56783830df..76a04ab411006 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1077,6 +1077,8 @@ static const struct usb_device_id id_table_combined[] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_FALCONIA_JTAG_UNBUF_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ /* GMC devices */
+ { USB_DEVICE(GMC_VID, GMC_Z216C_PID) },
{ } /* Terminating entry */
};
@@ -2610,7 +2612,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
struct device *ddev = &port->dev;
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct ktermios *termios = &tty->termios;
- unsigned int cflag = termios->c_cflag;
+ unsigned int cflag;
u16 value, index;
int ret;
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 21a2b5a25fc09..5ee60ba2a73cd 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1606,3 +1606,9 @@
#define UBLOX_VID 0x1546
#define UBLOX_C099F9P_ZED_PID 0x0502
#define UBLOX_C099F9P_ODIN_PID 0x0503
+
+/*
+ * GMC devices
+ */
+#define GMC_VID 0x1cd7
+#define GMC_Z216C_PID 0x0217 /* GMC Z216C Adapter IR-USB */
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 93b17e0e05a33..0a783985197c3 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -921,7 +921,6 @@ static void usa28_indat_callback(struct urb *urb)
port = urb->context;
p_priv = usb_get_serial_port_data(port);
- data = urb->transfer_buffer;
if (urb != p_priv->in_urbs[p_priv->in_flip])
return;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 2ae124c49d448..8a5846d4adf67 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -255,6 +255,10 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_EM061K_LMS 0x0124
#define QUECTEL_PRODUCT_EC25 0x0125
#define QUECTEL_PRODUCT_EM060K_128 0x0128
+#define QUECTEL_PRODUCT_EM060K_129 0x0129
+#define QUECTEL_PRODUCT_EM060K_12a 0x012a
+#define QUECTEL_PRODUCT_EM060K_12b 0x012b
+#define QUECTEL_PRODUCT_EM060K_12c 0x012c
#define QUECTEL_PRODUCT_EG91 0x0191
#define QUECTEL_PRODUCT_EG95 0x0195
#define QUECTEL_PRODUCT_BG96 0x0296
@@ -613,6 +617,11 @@ static void option_instat_callback(struct urb *urb);
/* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */
#define LUAT_PRODUCT_AIR720U 0x4e00
+/* MeiG Smart Technology products */
+#define MEIGSMART_VENDOR_ID 0x2dee
+/* MeiG Smart SLM320 based on UNISOC UIS8910 */
+#define MEIGSMART_PRODUCT_SLM320 0x4d41
+
/* Device flags */
/* Highest interface number which can be used with NCTRL() and RSVD() */
@@ -1213,6 +1222,18 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x40) },
@@ -1355,6 +1376,12 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990 (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a0, 0xff), /* Telit FN20C04 (rmnet) */
+ .driver_info = RSVD(0) | NCTRL(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a4, 0xff), /* Telit FN20C04 (rmnet) */
+ .driver_info = RSVD(0) | NCTRL(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a9, 0xff), /* Telit FN20C04 (rmnet) */
+ .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -2047,6 +2074,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
.driver_info = RSVD(4) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, 0x9b05), /* Longsung U8300 */
+ .driver_info = RSVD(4) | RSVD(5) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, 0x9b3c), /* Longsung U9300 */
+ .driver_info = RSVD(0) | RSVD(4) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
@@ -2267,21 +2298,36 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) }, /* Fibocom FM160 (MBIM mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0115, 0xff), /* Fibocom FM135 (laptop MBIM) */
+ .driver_info = RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a3, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
.driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a04, 0xff) }, /* Fibocom FM650-CN (ECM mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a05, 0xff) }, /* Fibocom FM650-CN (NCM mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a06, 0xff) }, /* Fibocom FM650-CN (RNDIS mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a07, 0xff) }, /* Fibocom FM650-CN (MBIM mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
+ { USB_DEVICE(0x33f8, 0x0104), /* Rolling RW101-GL (laptop RMNET) */
+ .driver_info = RSVD(4) | RSVD(5) },
+ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a2, 0xff) }, /* Rolling RW101-GL (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a3, 0xff) }, /* Rolling RW101-GL (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a4, 0xff), /* Rolling RW101-GL (laptop MBIM) */
+ .driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0115, 0xff), /* Rolling RW135-GL (laptop MBIM) */
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM320, 0xff, 0, 0) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index 6365cfe5402cb..fa07f6ff9ecc8 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -409,7 +409,6 @@ static void oti6858_set_termios(struct tty_struct *tty,
cflag = tty->termios.c_cflag;
spin_lock_irqsave(&priv->lock, flags);
- divisor = priv->pending_setup.divisor;
frame_fmt = priv->pending_setup.frame_fmt;
control = priv->pending_setup.control;
spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/usb/storage/freecom.c b/drivers/usb/storage/freecom.c
index 2b098b55c4cbb..c3ce51c2dabde 100644
--- a/drivers/usb/storage/freecom.c
+++ b/drivers/usb/storage/freecom.c
@@ -534,7 +534,6 @@ static void pdump(struct us_data *us, void *ibuffer, int length)
}
line[offset] = 0;
usb_stor_dbg(us, "%s\n", line);
- offset = 0;
}
#endif
diff --git a/drivers/usb/storage/sddr55.c b/drivers/usb/storage/sddr55.c
index 15dc25801cdcc..0aa079405d23c 100644
--- a/drivers/usb/storage/sddr55.c
+++ b/drivers/usb/storage/sddr55.c
@@ -196,7 +196,7 @@ static int sddr55_read_data(struct us_data *us,
unsigned char *buffer;
unsigned int pba;
- unsigned long address;
+ unsigned int address;
unsigned short pages;
unsigned int len, offset;
@@ -316,7 +316,7 @@ static int sddr55_write_data(struct us_data *us,
unsigned int pba;
unsigned int new_pba;
- unsigned long address;
+ unsigned int address;
unsigned short pages;
int i;
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 71ace274761f1..08953f0d4532a 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -533,7 +533,7 @@ static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp,
* daft to me.
*/
-static struct urb *uas_submit_sense_urb(struct scsi_cmnd *cmnd, gfp_t gfp)
+static int uas_submit_sense_urb(struct scsi_cmnd *cmnd, gfp_t gfp)
{
struct uas_dev_info *devinfo = cmnd->device->hostdata;
struct urb *urb;
@@ -541,30 +541,28 @@ static struct urb *uas_submit_sense_urb(struct scsi_cmnd *cmnd, gfp_t gfp)
urb = uas_alloc_sense_urb(devinfo, gfp, cmnd);
if (!urb)
- return NULL;
+ return -ENOMEM;
usb_anchor_urb(urb, &devinfo->sense_urbs);
err = usb_submit_urb(urb, gfp);
if (err) {
usb_unanchor_urb(urb);
uas_log_cmd_state(cmnd, "sense submit err", err);
usb_free_urb(urb);
- return NULL;
}
- return urb;
+ return err;
}
static int uas_submit_urbs(struct scsi_cmnd *cmnd,
struct uas_dev_info *devinfo)
{
struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd);
- struct urb *urb;
int err;
lockdep_assert_held(&devinfo->lock);
if (cmdinfo->state & SUBMIT_STATUS_URB) {
- urb = uas_submit_sense_urb(cmnd, GFP_ATOMIC);
- if (!urb)
- return SCSI_MLQUEUE_DEVICE_BUSY;
+ err = uas_submit_sense_urb(cmnd, GFP_ATOMIC);
+ if (err)
+ return err;
cmdinfo->state &= ~SUBMIT_STATUS_URB;
}
@@ -572,7 +570,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
cmdinfo->data_in_urb = uas_alloc_data_urb(devinfo, GFP_ATOMIC,
cmnd, DMA_FROM_DEVICE);
if (!cmdinfo->data_in_urb)
- return SCSI_MLQUEUE_DEVICE_BUSY;
+ return -ENOMEM;
cmdinfo->state &= ~ALLOC_DATA_IN_URB;
}
@@ -582,7 +580,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
if (err) {
usb_unanchor_urb(cmdinfo->data_in_urb);
uas_log_cmd_state(cmnd, "data in submit err", err);
- return SCSI_MLQUEUE_DEVICE_BUSY;
+ return err;
}
cmdinfo->state &= ~SUBMIT_DATA_IN_URB;
cmdinfo->state |= DATA_IN_URB_INFLIGHT;
@@ -592,7 +590,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
cmdinfo->data_out_urb = uas_alloc_data_urb(devinfo, GFP_ATOMIC,
cmnd, DMA_TO_DEVICE);
if (!cmdinfo->data_out_urb)
- return SCSI_MLQUEUE_DEVICE_BUSY;
+ return -ENOMEM;
cmdinfo->state &= ~ALLOC_DATA_OUT_URB;
}
@@ -602,7 +600,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
if (err) {
usb_unanchor_urb(cmdinfo->data_out_urb);
uas_log_cmd_state(cmnd, "data out submit err", err);
- return SCSI_MLQUEUE_DEVICE_BUSY;
+ return err;
}
cmdinfo->state &= ~SUBMIT_DATA_OUT_URB;
cmdinfo->state |= DATA_OUT_URB_INFLIGHT;
@@ -611,7 +609,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
if (cmdinfo->state & ALLOC_CMD_URB) {
cmdinfo->cmd_urb = uas_alloc_cmd_urb(devinfo, GFP_ATOMIC, cmnd);
if (!cmdinfo->cmd_urb)
- return SCSI_MLQUEUE_DEVICE_BUSY;
+ return -ENOMEM;
cmdinfo->state &= ~ALLOC_CMD_URB;
}
@@ -621,7 +619,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
if (err) {
usb_unanchor_urb(cmdinfo->cmd_urb);
uas_log_cmd_state(cmnd, "cmd submit err", err);
- return SCSI_MLQUEUE_DEVICE_BUSY;
+ return err;
}
cmdinfo->cmd_urb = NULL;
cmdinfo->state &= ~SUBMIT_CMD_URB;
@@ -698,7 +696,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd)
* of queueing, no matter how fatal the error
*/
if (err == -ENODEV) {
- set_host_byte(cmnd, DID_ERROR);
+ set_host_byte(cmnd, DID_NO_CONNECT);
scsi_done(cmnd);
goto zombie;
}
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index f8ea3054be542..038dc51f429dd 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -50,13 +50,17 @@ enum {
enum dp_state {
DP_STATE_IDLE,
DP_STATE_ENTER,
+ DP_STATE_ENTER_PRIME,
DP_STATE_UPDATE,
DP_STATE_CONFIGURE,
+ DP_STATE_CONFIGURE_PRIME,
DP_STATE_EXIT,
+ DP_STATE_EXIT_PRIME,
};
struct dp_altmode {
struct typec_displayport_data data;
+ struct typec_displayport_data data_prime;
enum dp_state state;
bool hpd;
@@ -67,6 +71,7 @@ struct dp_altmode {
struct typec_altmode *alt;
const struct typec_altmode *port;
struct fwnode_handle *connector_fwnode;
+ struct typec_altmode *plug_prime;
};
static int dp_altmode_notify(struct dp_altmode *dp)
@@ -99,12 +104,18 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
conf |= DP_CONF_UFP_U_AS_DFP_D;
pin_assign = DP_CAP_UFP_D_PIN_ASSIGN(dp->alt->vdo) &
DP_CAP_DFP_D_PIN_ASSIGN(dp->port->vdo);
+ /* Account for active cable capabilities */
+ if (dp->plug_prime)
+ pin_assign &= DP_CAP_DFP_D_PIN_ASSIGN(dp->plug_prime->vdo);
break;
case DP_STATUS_CON_UFP_D:
case DP_STATUS_CON_BOTH: /* NOTE: First acting as DP source */
conf |= DP_CONF_UFP_U_AS_UFP_D;
pin_assign = DP_CAP_PIN_ASSIGN_UFP_D(dp->alt->vdo) &
DP_CAP_PIN_ASSIGN_DFP_D(dp->port->vdo);
+ /* Account for active cable capabilities */
+ if (dp->plug_prime)
+ pin_assign &= DP_CAP_UFP_D_PIN_ASSIGN(dp->plug_prime->vdo);
break;
default:
break;
@@ -130,6 +141,8 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
}
dp->data.conf = conf;
+ if (dp->plug_prime)
+ dp->data_prime.conf = conf;
return 0;
}
@@ -143,13 +156,16 @@ static int dp_altmode_status_update(struct dp_altmode *dp)
if (configured && (dp->data.status & DP_STATUS_SWITCH_TO_USB)) {
dp->data.conf = 0;
- dp->state = DP_STATE_CONFIGURE;
+ dp->data_prime.conf = 0;
+ dp->state = dp->plug_prime ? DP_STATE_CONFIGURE_PRIME :
+ DP_STATE_CONFIGURE;
} else if (dp->data.status & DP_STATUS_EXIT_DP_MODE) {
dp->state = DP_STATE_EXIT;
} else if (!(con & DP_CONF_CURRENTLY(dp->data.conf))) {
ret = dp_altmode_configure(dp, con);
if (!ret) {
- dp->state = DP_STATE_CONFIGURE;
+ dp->state = dp->plug_prime ? DP_STATE_CONFIGURE_PRIME :
+ DP_STATE_CONFIGURE;
if (dp->hpd != hpd) {
dp->hpd = hpd;
dp->pending_hpd = true;
@@ -209,6 +225,19 @@ static int dp_altmode_configure_vdm(struct dp_altmode *dp, u32 conf)
return ret;
}
+static int dp_altmode_configure_vdm_cable(struct dp_altmode *dp, u32 conf)
+{
+ int svdm_version = typec_altmode_get_cable_svdm_version(dp->plug_prime);
+ u32 header;
+
+ if (svdm_version < 0)
+ return svdm_version;
+
+ header = DP_HEADER(dp, svdm_version, DP_CMD_CONFIGURE);
+
+ return typec_cable_altmode_vdm(dp->plug_prime, TYPEC_PLUG_SOP_P, header, &conf, 2);
+}
+
static void dp_altmode_work(struct work_struct *work)
{
struct dp_altmode *dp = container_of(work, struct dp_altmode, work);
@@ -225,6 +254,19 @@ static void dp_altmode_work(struct work_struct *work)
if (ret && ret != -EBUSY)
dev_err(&dp->alt->dev, "failed to enter mode\n");
break;
+ case DP_STATE_ENTER_PRIME:
+ ret = typec_cable_altmode_enter(dp->alt, TYPEC_PLUG_SOP_P, NULL);
+ /*
+ * If we fail to enter Alt Mode on SOP', then we should drop the
+ * plug from the driver and attempt to run the driver without
+ * it.
+ */
+ if (ret && ret != -EBUSY) {
+ dev_err(&dp->alt->dev, "plug failed to enter mode\n");
+ dp->state = DP_STATE_ENTER;
+ goto disable_prime;
+ }
+ break;
case DP_STATE_UPDATE:
svdm_version = typec_altmode_get_svdm_version(dp->alt);
if (svdm_version < 0)
@@ -243,10 +285,24 @@ static void dp_altmode_work(struct work_struct *work)
dev_err(&dp->alt->dev,
"unable to send Configure command (%d)\n", ret);
break;
+ case DP_STATE_CONFIGURE_PRIME:
+ ret = dp_altmode_configure_vdm_cable(dp, dp->data_prime.conf);
+ if (ret) {
+ dev_err(&dp->plug_prime->dev,
+ "unable to send Configure command (%d)\n",
+ ret);
+ dp->state = DP_STATE_CONFIGURE;
+ goto disable_prime;
+ }
+ break;
case DP_STATE_EXIT:
if (typec_altmode_exit(dp->alt))
dev_err(&dp->alt->dev, "Exit Mode Failed!\n");
break;
+ case DP_STATE_EXIT_PRIME:
+ if (typec_cable_altmode_exit(dp->plug_prime, TYPEC_PLUG_SOP_P))
+ dev_err(&dp->plug_prime->dev, "Exit Mode Failed!\n");
+ break;
default:
break;
}
@@ -254,6 +310,13 @@ static void dp_altmode_work(struct work_struct *work)
dp->state = DP_STATE_IDLE;
mutex_unlock(&dp->lock);
+ return;
+
+disable_prime:
+ typec_altmode_put_plug(dp->plug_prime);
+ dp->plug_prime = NULL;
+ schedule_work(&dp->work);
+ mutex_unlock(&dp->lock);
}
static void dp_altmode_attention(struct typec_altmode *alt, const u32 vdo)
@@ -314,6 +377,8 @@ static int dp_altmode_vdm(struct typec_altmode *alt,
dp->hpd = false;
sysfs_notify(&dp->alt->dev.kobj, "displayport", "hpd");
}
+ if (dp->plug_prime)
+ dp->state = DP_STATE_EXIT_PRIME;
break;
case DP_CMD_STATUS_UPDATE:
dp->data.status = *vdo;
@@ -348,10 +413,84 @@ err_unlock:
return ret;
}
+static int dp_cable_altmode_vdm(struct typec_altmode *alt, enum typec_plug_index sop,
+ const u32 hdr, const u32 *vdo, int count)
+{
+ struct dp_altmode *dp = typec_altmode_get_drvdata(alt);
+ int cmd_type = PD_VDO_CMDT(hdr);
+ int cmd = PD_VDO_CMD(hdr);
+ int ret = 0;
+
+ mutex_lock(&dp->lock);
+
+ if (dp->state != DP_STATE_IDLE) {
+ ret = -EBUSY;
+ goto err_unlock;
+ }
+
+ switch (cmd_type) {
+ case CMDT_RSP_ACK:
+ switch (cmd) {
+ case CMD_ENTER_MODE:
+ typec_altmode_update_active(dp->plug_prime, true);
+ dp->state = DP_STATE_ENTER;
+ break;
+ case CMD_EXIT_MODE:
+ dp->data_prime.status = 0;
+ dp->data_prime.conf = 0;
+ typec_altmode_update_active(dp->plug_prime, false);
+ break;
+ case DP_CMD_CONFIGURE:
+ dp->state = DP_STATE_CONFIGURE;
+ break;
+ default:
+ break;
+ }
+ break;
+ case CMDT_RSP_NAK:
+ switch (cmd) {
+ case DP_CMD_CONFIGURE:
+ dp->data_prime.conf = 0;
+ /* Attempt to configure on SOP, drop plug */
+ typec_altmode_put_plug(dp->plug_prime);
+ dp->plug_prime = NULL;
+ dp->state = DP_STATE_CONFIGURE;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (dp->state != DP_STATE_IDLE)
+ schedule_work(&dp->work);
+
+err_unlock:
+ mutex_unlock(&dp->lock);
+ return ret;
+}
+
static int dp_altmode_activate(struct typec_altmode *alt, int activate)
{
- return activate ? typec_altmode_enter(alt, NULL) :
- typec_altmode_exit(alt);
+ struct dp_altmode *dp = typec_altmode_get_drvdata(alt);
+ int ret;
+
+ if (activate) {
+ if (dp->plug_prime) {
+ ret = typec_cable_altmode_enter(alt, TYPEC_PLUG_SOP_P, NULL);
+ if (ret < 0) {
+ typec_altmode_put_plug(dp->plug_prime);
+ dp->plug_prime = NULL;
+ } else {
+ return ret;
+ }
+ }
+ return typec_altmode_enter(alt, NULL);
+ } else {
+ return typec_altmode_exit(alt);
+ }
}
static const struct typec_altmode_ops dp_altmode_ops = {
@@ -360,6 +499,10 @@ static const struct typec_altmode_ops dp_altmode_ops = {
.activate = dp_altmode_activate,
};
+static const struct typec_cable_ops dp_cable_ops = {
+ .vdm = dp_cable_altmode_vdm,
+};
+
static const char * const configurations[] = {
[DP_CONF_USB] = "USB",
[DP_CONF_DFP_D] = "source",
@@ -501,6 +644,7 @@ pin_assignment_store(struct device *dev, struct device_attribute *attr,
/* Only send Configure command if a configuration has been set */
if (dp->alt->active && DP_CONF_CURRENTLY(dp->data.conf)) {
+ /* todo: send manual configure over SOP'*/
ret = dp_altmode_configure_vdm(dp, conf);
if (ret)
goto out_unlock;
@@ -579,6 +723,7 @@ static const struct attribute_group *displayport_groups[] = {
int dp_altmode_probe(struct typec_altmode *alt)
{
const struct typec_altmode *port = typec_altmode_get_partner(alt);
+ struct typec_altmode *plug = typec_altmode_get_plug(alt, TYPEC_PLUG_SOP_P);
struct fwnode_handle *fwnode;
struct dp_altmode *dp;
@@ -603,6 +748,13 @@ int dp_altmode_probe(struct typec_altmode *alt)
alt->desc = "DisplayPort";
alt->ops = &dp_altmode_ops;
+ if (plug) {
+ plug->desc = "Displayport";
+ plug->cable_ops = &dp_cable_ops;
+ }
+
+ dp->plug_prime = plug;
+
fwnode = dev_fwnode(alt->dev.parent->parent); /* typec_port fwnode */
if (fwnode_property_present(fwnode, "displayport"))
dp->connector_fwnode = fwnode_find_reference(fwnode, "displayport", 0);
@@ -612,8 +764,10 @@ int dp_altmode_probe(struct typec_altmode *alt)
dp->connector_fwnode = NULL;
typec_altmode_set_drvdata(alt, dp);
+ if (plug)
+ typec_altmode_set_drvdata(plug, dp);
- dp->state = DP_STATE_ENTER;
+ dp->state = plug ? DP_STATE_ENTER_PRIME : DP_STATE_ENTER;
schedule_work(&dp->work);
return 0;
@@ -625,6 +779,7 @@ void dp_altmode_remove(struct typec_altmode *alt)
struct dp_altmode *dp = typec_altmode_get_drvdata(alt);
cancel_work_sync(&dp->work);
+ typec_altmode_put_plug(dp->plug_prime);
if (dp->connector_fwnode) {
drm_connector_oob_hotplug_event(dp->connector_fwnode,
diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
index e95ec7e382bb7..6ea103e1abae9 100644
--- a/drivers/usb/typec/bus.c
+++ b/drivers/usb/typec/bus.c
@@ -245,6 +245,108 @@ typec_altmode_get_partner(struct typec_altmode *adev)
EXPORT_SYMBOL_GPL(typec_altmode_get_partner);
/* -------------------------------------------------------------------------- */
+/* API for cable alternate modes */
+
+/**
+ * typec_cable_altmode_enter - Enter Mode
+ * @adev: The alternate mode
+ * @sop: Cable plug target for Enter Mode command
+ * @vdo: VDO for the Enter Mode command
+ *
+ * Alternate mode drivers use this function to enter mode on the cable plug.
+ * If the alternate mode does not require VDO, @vdo must be NULL.
+ */
+int typec_cable_altmode_enter(struct typec_altmode *adev, enum typec_plug_index sop, u32 *vdo)
+{
+ struct altmode *partner = to_altmode(adev)->partner;
+ struct typec_altmode *pdev;
+
+ if (!adev || adev->active)
+ return 0;
+
+ if (!partner)
+ return -ENODEV;
+
+ pdev = &partner->adev;
+
+ if (!pdev->active)
+ return -EPERM;
+
+ if (!pdev->cable_ops || !pdev->cable_ops->enter)
+ return -EOPNOTSUPP;
+
+ return pdev->cable_ops->enter(pdev, sop, vdo);
+}
+EXPORT_SYMBOL_GPL(typec_cable_altmode_enter);
+
+/**
+ * typec_cable_altmode_exit - Exit Mode
+ * @adev: The alternate mode
+ * @sop: Cable plug target for Exit Mode command
+ *
+ * The alternate mode drivers use this function to exit mode on the cable plug.
+ */
+int typec_cable_altmode_exit(struct typec_altmode *adev, enum typec_plug_index sop)
+{
+ struct altmode *partner = to_altmode(adev)->partner;
+ struct typec_altmode *pdev;
+
+ if (!adev || !adev->active)
+ return 0;
+
+ if (!partner)
+ return -ENODEV;
+
+ pdev = &partner->adev;
+
+ if (!pdev->cable_ops || !pdev->cable_ops->exit)
+ return -EOPNOTSUPP;
+
+ return pdev->cable_ops->exit(pdev, sop);
+}
+EXPORT_SYMBOL_GPL(typec_cable_altmode_exit);
+
+/**
+ * typec_cable_altmode_vdm - Send Vendor Defined Messages (VDM) between the cable plug and port.
+ * @adev: Alternate mode handle
+ * @sop: Cable plug target for VDM
+ * @header: VDM Header
+ * @vdo: Array of Vendor Defined Data Objects
+ * @count: Number of Data Objects
+ *
+ * The alternate mode drivers use this function for SVID specific communication
+ * with the cable plugs. The port drivers use it to deliver the Structured VDMs
+ * received from the cable plugs to the alternate mode drivers.
+ */
+int typec_cable_altmode_vdm(struct typec_altmode *adev, enum typec_plug_index sop,
+ const u32 header, const u32 *vdo, int count)
+{
+ struct altmode *altmode;
+ struct typec_altmode *pdev;
+
+ if (!adev)
+ return 0;
+
+ altmode = to_altmode(adev);
+
+ if (is_typec_plug(adev->dev.parent)) {
+ if (!altmode->partner)
+ return -ENODEV;
+ pdev = &altmode->partner->adev;
+ } else {
+ if (!altmode->plug[sop])
+ return -ENODEV;
+ pdev = &altmode->plug[sop]->adev;
+ }
+
+ if (!pdev->cable_ops || !pdev->cable_ops->vdm)
+ return -EOPNOTSUPP;
+
+ return pdev->cable_ops->vdm(pdev, sop, header, vdo, count);
+}
+EXPORT_SYMBOL_GPL(typec_cable_altmode_vdm);
+
+/* -------------------------------------------------------------------------- */
/* API for the alternate mode drivers */
/**
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 015aa92533536..9610e647a8d48 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -21,7 +21,7 @@
static DEFINE_IDA(typec_index_ida);
-struct class typec_class = {
+const struct class typec_class = {
.name = "typec",
};
@@ -1310,6 +1310,7 @@ static ssize_t select_usb_power_delivery_store(struct device *dev,
{
struct typec_port *port = to_typec_port(dev);
struct usb_power_delivery *pd;
+ int ret;
if (!port->ops || !port->ops->pd_set)
return -EOPNOTSUPP;
@@ -1318,7 +1319,11 @@ static ssize_t select_usb_power_delivery_store(struct device *dev,
if (!pd)
return -EINVAL;
- return port->ops->pd_set(port, pd);
+ ret = port->ops->pd_set(port, pd);
+ if (ret)
+ return ret;
+
+ return size;
}
static ssize_t select_usb_power_delivery_show(struct device *dev,
@@ -2132,6 +2137,46 @@ int typec_get_negotiated_svdm_version(struct typec_port *port)
EXPORT_SYMBOL_GPL(typec_get_negotiated_svdm_version);
/**
+ * typec_get_cable_svdm_version - Get cable negotiated SVDM Version
+ * @port: USB Type-C Port.
+ *
+ * Get the negotiated SVDM Version for the cable. The Version is set to the port
+ * default value based on the PD Revision during cable registration, and updated
+ * after a successful Discover Identity if the negotiated value is less than the
+ * default.
+ *
+ * Returns usb_pd_svdm_ver if the cable has been registered otherwise -ENODEV.
+ */
+int typec_get_cable_svdm_version(struct typec_port *port)
+{
+ enum usb_pd_svdm_ver svdm_version;
+ struct device *cable_dev;
+
+ cable_dev = device_find_child(&port->dev, NULL, cable_match);
+ if (!cable_dev)
+ return -ENODEV;
+
+ svdm_version = to_typec_cable(cable_dev)->svdm_version;
+ put_device(cable_dev);
+
+ return svdm_version;
+}
+EXPORT_SYMBOL_GPL(typec_get_cable_svdm_version);
+
+/**
+ * typec_cable_set_svdm_version - Set negotiated Structured VDM (SVDM) Version
+ * @cable: USB Type-C Active Cable that supports SVDM
+ * @svdm_version: Negotiated SVDM Version
+ *
+ * This routine is used to save the negotiated SVDM Version.
+ */
+void typec_cable_set_svdm_version(struct typec_cable *cable, enum usb_pd_svdm_ver svdm_version)
+{
+ cable->svdm_version = svdm_version;
+}
+EXPORT_SYMBOL_GPL(typec_cable_set_svdm_version);
+
+/**
* typec_get_drvdata - Return private driver data pointer
* @port: USB Type-C port
*/
@@ -2281,6 +2326,25 @@ void typec_port_register_altmodes(struct typec_port *port,
EXPORT_SYMBOL_GPL(typec_port_register_altmodes);
/**
+ * typec_port_register_cable_ops - Register typec_cable_ops to port altmodes
+ * @altmodes: USB Type-C Port's altmode vector
+ * @max_altmodes: The maximum number of alt modes supported by the port
+ * @ops: Cable alternate mode vector
+ */
+void typec_port_register_cable_ops(struct typec_altmode **altmodes, int max_altmodes,
+ const struct typec_cable_ops *ops)
+{
+ int i;
+
+ for (i = 0; i < max_altmodes; i++) {
+ if (!altmodes[i])
+ return;
+ altmodes[i]->cable_ops = ops;
+ }
+}
+EXPORT_SYMBOL_GPL(typec_port_register_cable_ops);
+
+/**
* typec_register_port - Register a USB Type-C Port
* @parent: Parent device
* @cap: Description of the port
diff --git a/drivers/usb/typec/class.h b/drivers/usb/typec/class.h
index c36761ba3f599..7485cdb9dd201 100644
--- a/drivers/usb/typec/class.h
+++ b/drivers/usb/typec/class.h
@@ -23,6 +23,7 @@ struct typec_cable {
struct usb_pd_identity *identity;
unsigned int active:1;
u16 pd_revision; /* 0300H = "3.0" */
+ enum usb_pd_svdm_ver svdm_version;
};
struct typec_partner {
@@ -92,9 +93,9 @@ extern const struct device_type typec_port_dev_type;
#define is_typec_plug(dev) ((dev)->type == &typec_plug_dev_type)
#define is_typec_port(dev) ((dev)->type == &typec_port_dev_type)
-extern struct class typec_mux_class;
-extern struct class retimer_class;
-extern struct class typec_class;
+extern const struct class typec_mux_class;
+extern const struct class retimer_class;
+extern const struct class typec_class;
#if defined(CONFIG_ACPI)
int typec_link_ports(struct typec_port *connector);
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index 80dd91938d960..49926d6e72c71 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -469,6 +469,6 @@ void *typec_mux_get_drvdata(struct typec_mux_dev *mux_dev)
}
EXPORT_SYMBOL_GPL(typec_mux_get_drvdata);
-struct class typec_mux_class = {
+const struct class typec_mux_class = {
.name = "typec_mux",
};
diff --git a/drivers/usb/typec/mux/Kconfig b/drivers/usb/typec/mux/Kconfig
index d2cb5e733e573..399c7b0983df3 100644
--- a/drivers/usb/typec/mux/Kconfig
+++ b/drivers/usb/typec/mux/Kconfig
@@ -36,6 +36,16 @@ config TYPEC_MUX_INTEL_PMC
control the USB role switch and also the multiplexer/demultiplexer
switches used with USB Type-C Alternate Modes.
+config TYPEC_MUX_IT5205
+ tristate "ITE IT5205 Type-C USB Alt Mode Passive MUX driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Driver for the ITE IT5205 Type-C USB Alternate Mode Passive MUX
+ which provides support for muxing DisplayPort and sideband signals
+ on a common USB Type-C connector.
+ If compiled as a module, the module will be named it5205.
+
config TYPEC_MUX_NB7VPQ904M
tristate "On Semiconductor NB7VPQ904M Type-C redriver driver"
depends on I2C
diff --git a/drivers/usb/typec/mux/Makefile b/drivers/usb/typec/mux/Makefile
index 57dc9ac6f8dcf..bb96f30267af0 100644
--- a/drivers/usb/typec/mux/Makefile
+++ b/drivers/usb/typec/mux/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_TYPEC_MUX_FSA4480) += fsa4480.o
obj-$(CONFIG_TYPEC_MUX_GPIO_SBU) += gpio-sbu-mux.o
obj-$(CONFIG_TYPEC_MUX_PI3USB30532) += pi3usb30532.o
obj-$(CONFIG_TYPEC_MUX_INTEL_PMC) += intel_pmc_mux.o
+obj-$(CONFIG_TYPEC_MUX_IT5205) += it5205.o
obj-$(CONFIG_TYPEC_MUX_NB7VPQ904M) += nb7vpq904m.o
obj-$(CONFIG_TYPEC_MUX_PTN36502) += ptn36502.o
obj-$(CONFIG_TYPEC_MUX_WCD939X_USBSS) += wcd939x-usbss.o
diff --git a/drivers/usb/typec/mux/it5205.c b/drivers/usb/typec/mux/it5205.c
new file mode 100644
index 0000000000000..4357cc67a8672
--- /dev/null
+++ b/drivers/usb/typec/mux/it5205.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ITE IT5205 Type-C USB alternate mode passive mux
+ *
+ * Copyright (c) 2020 MediaTek Inc.
+ * Copyright (c) 2024 Collabora Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/usb/tcpm.h>
+#include <linux/usb/typec.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_mux.h>
+
+#define IT5205_REG_CHIP_ID(x) (0x4 + (x))
+#define IT5205FN_CHIP_ID 0x35303235 /* "5025" -> "5205" */
+
+/* MUX power down register */
+#define IT5205_REG_MUXPDR 0x10
+#define IT5205_MUX_POWER_DOWN BIT(0)
+
+/* MUX control register */
+#define IT5205_REG_MUXCR 0x11
+#define IT5205_POLARITY_INVERTED BIT(4)
+#define IT5205_DP_USB_CTRL_MASK GENMASK(3, 0)
+#define IT5205_DP 0x0f
+#define IT5205_DP_USB 0x03
+#define IT5205_USB 0x07
+
+/* Vref Select Register */
+#define IT5205_REG_VSR 0x10
+#define IT5205_VREF_SELECT_MASK GENMASK(5, 4)
+#define IT5205_VREF_SELECT_3_3V 0x00
+#define IT5205_VREF_SELECT_OFF 0x20
+
+/* CSBU Over Voltage Protection Register */
+#define IT5205_REG_CSBUOVPSR 0x1e
+#define IT5205_OVP_SELECT_MASK GENMASK(5, 4)
+#define IT5205_OVP_3_90V 0x00
+#define IT5205_OVP_3_68V 0x10
+#define IT5205_OVP_3_62V 0x20
+#define IT5205_OVP_3_57V 0x30
+
+/* CSBU Switch Register */
+#define IT5205_REG_CSBUSR 0x22
+#define IT5205_CSBUSR_SWITCH BIT(0)
+
+/* Interrupt Switch Register */
+#define IT5205_REG_ISR 0x25
+#define IT5205_ISR_CSBU_MASK BIT(4)
+#define IT5205_ISR_CSBU_OVP BIT(0)
+
+struct it5205 {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct typec_switch_dev *sw;
+ struct typec_mux_dev *mux;
+};
+
+static int it5205_switch_set(struct typec_switch_dev *sw, enum typec_orientation orientation)
+{
+ struct it5205 *it = typec_switch_get_drvdata(sw);
+
+ switch (orientation) {
+ case TYPEC_ORIENTATION_NORMAL:
+ regmap_update_bits(it->regmap, IT5205_REG_MUXCR,
+ IT5205_POLARITY_INVERTED, 0);
+ break;
+ case TYPEC_ORIENTATION_REVERSE:
+ regmap_update_bits(it->regmap, IT5205_REG_MUXCR,
+ IT5205_POLARITY_INVERTED, IT5205_POLARITY_INVERTED);
+ break;
+ case TYPEC_ORIENTATION_NONE:
+ fallthrough;
+ default:
+ regmap_write(it->regmap, IT5205_REG_MUXCR, 0);
+ break;
+ }
+
+ return 0;
+}
+
+static int it5205_mux_set(struct typec_mux_dev *mux, struct typec_mux_state *state)
+{
+ struct it5205 *it = typec_mux_get_drvdata(mux);
+ u8 val;
+
+ if (state->mode >= TYPEC_STATE_MODAL &&
+ state->alt->svid != USB_TYPEC_DP_SID)
+ return -EINVAL;
+
+ switch (state->mode) {
+ case TYPEC_STATE_USB:
+ val = IT5205_USB;
+ break;
+ case TYPEC_DP_STATE_C:
+ fallthrough;
+ case TYPEC_DP_STATE_E:
+ val = IT5205_DP;
+ break;
+ case TYPEC_DP_STATE_D:
+ val = IT5205_DP_USB;
+ break;
+ case TYPEC_STATE_SAFE:
+ fallthrough;
+ default:
+ val = 0;
+ break;
+ }
+
+ return regmap_update_bits(it->regmap, IT5205_REG_MUXCR,
+ IT5205_DP_USB_CTRL_MASK, val);
+}
+
+static irqreturn_t it5205_irq_handler(int irq, void *data)
+{
+ struct it5205 *it = data;
+ int ret;
+ u32 val;
+
+ ret = regmap_read(it->regmap, IT5205_REG_ISR, &val);
+ if (ret)
+ return IRQ_NONE;
+
+ if (val & IT5205_ISR_CSBU_OVP) {
+ dev_warn(&it->client->dev, "Overvoltage detected!\n");
+
+ /* Reset CSBU */
+ regmap_update_bits(it->regmap, IT5205_REG_CSBUSR,
+ IT5205_CSBUSR_SWITCH, 0);
+ regmap_update_bits(it->regmap, IT5205_REG_CSBUSR,
+ IT5205_CSBUSR_SWITCH, IT5205_CSBUSR_SWITCH);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void it5205_enable_ovp(struct it5205 *it)
+{
+ /* Select Vref 3.3v */
+ regmap_update_bits(it->regmap, IT5205_REG_VSR,
+ IT5205_VREF_SELECT_MASK, IT5205_VREF_SELECT_3_3V);
+
+ /* Trigger OVP at 3.68V */
+ regmap_update_bits(it->regmap, IT5205_REG_CSBUOVPSR,
+ IT5205_OVP_SELECT_MASK, IT5205_OVP_3_68V);
+
+ /* Unmask OVP interrupt */
+ regmap_update_bits(it->regmap, IT5205_REG_ISR,
+ IT5205_ISR_CSBU_MASK, 0);
+
+ /* Enable CSBU Interrupt */
+ regmap_update_bits(it->regmap, IT5205_REG_CSBUSR,
+ IT5205_CSBUSR_SWITCH, IT5205_CSBUSR_SWITCH);
+}
+
+static const struct regmap_config it5205_regmap = {
+ .max_register = 0x2f,
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int it5205_probe(struct i2c_client *client)
+{
+ struct typec_switch_desc sw_desc = { };
+ struct typec_mux_desc mux_desc = { };
+ struct device *dev = &client->dev;
+ struct it5205 *it;
+ u32 val, chipid = 0;
+ int i, ret;
+
+ it = devm_kzalloc(dev, sizeof(*it), GFP_KERNEL);
+ if (!it)
+ return -ENOMEM;
+
+ ret = devm_regulator_get_enable(dev, "vcc");
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get regulator\n");
+
+ it->client = client;
+
+ it->regmap = devm_regmap_init_i2c(client, &it5205_regmap);
+ if (IS_ERR(it->regmap))
+ return dev_err_probe(dev, PTR_ERR(it->regmap),
+ "Failed to init regmap\n");
+
+ /* IT5205 needs a long time to power up after enabling regulator */
+ msleep(50);
+
+ /* Unset poweroff bit */
+ ret = regmap_write(it->regmap, IT5205_REG_MUXPDR, 0);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to set power on\n");
+
+ /* Read the 32 bits ChipID */
+ for (i = 3; i >= 0; i--) {
+ ret = regmap_read(it->regmap, IT5205_REG_CHIP_ID(i), &val);
+ if (ret)
+ return ret;
+
+ chipid |= val << (i * 8);
+ }
+
+ if (chipid != IT5205FN_CHIP_ID)
+ return dev_err_probe(dev, -EINVAL,
+ "Unknown ChipID 0x%x\n", chipid);
+
+ /* Initialize as USB mode with default (non-inverted) polarity */
+ ret = regmap_write(it->regmap, IT5205_REG_MUXCR, IT5205_USB);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot set mode to USB\n");
+
+ sw_desc.drvdata = it;
+ sw_desc.fwnode = dev_fwnode(dev);
+ sw_desc.set = it5205_switch_set;
+
+ it->sw = typec_switch_register(dev, &sw_desc);
+ if (IS_ERR(it->sw))
+ return dev_err_probe(dev, PTR_ERR(it->sw),
+ "failed to register typec switch\n");
+
+ mux_desc.drvdata = it;
+ mux_desc.fwnode = dev_fwnode(dev);
+ mux_desc.set = it5205_mux_set;
+
+ it->mux = typec_mux_register(dev, &mux_desc);
+ if (IS_ERR(it->mux)) {
+ typec_switch_unregister(it->sw);
+ return dev_err_probe(dev, PTR_ERR(it->mux),
+ "failed to register typec mux\n");
+ }
+
+ i2c_set_clientdata(client, it);
+
+ if (of_property_read_bool(dev->of_node, "ite,ovp-enable") && client->irq) {
+ it5205_enable_ovp(it);
+
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ it5205_irq_handler,
+ IRQF_ONESHOT, dev_name(dev), it);
+ if (ret) {
+ typec_mux_unregister(it->mux);
+ typec_switch_unregister(it->sw);
+ return dev_err_probe(dev, ret, "Failed to request irq\n");
+ }
+ }
+
+ return 0;
+}
+
+static void it5205_remove(struct i2c_client *client)
+{
+ struct it5205 *it = i2c_get_clientdata(client);
+
+ typec_mux_unregister(it->mux);
+ typec_switch_unregister(it->sw);
+}
+
+static const struct i2c_device_id it5205_table[] = {
+ { "it5205" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, it5205_table);
+
+static const struct of_device_id it5205_of_table[] = {
+ { .compatible = "ite,it5205" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, it5205_of_table);
+
+static struct i2c_driver it5205_driver = {
+ .driver = {
+ .name = "it5205",
+ .of_match_table = it5205_of_table,
+ },
+ .probe = it5205_probe,
+ .remove = it5205_remove,
+ .id_table = it5205_table,
+};
+module_i2c_driver(it5205_driver);
+
+MODULE_AUTHOR("Tianping Fang <tianping.fang@mediatek.com>");
+MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>");
+MODULE_DESCRIPTION("ITE IT5205 alternate mode passive MUX driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/typec/pd.c b/drivers/usb/typec/pd.c
index b9cca2be76fce..d78c04a421bc2 100644
--- a/drivers/usb/typec/pd.c
+++ b/drivers/usb/typec/pd.c
@@ -157,7 +157,7 @@ static const struct attribute_group source_fixed_supply_group = {
};
__ATTRIBUTE_GROUPS(source_fixed_supply);
-static struct device_type source_fixed_supply_type = {
+static const struct device_type source_fixed_supply_type = {
.name = "pdo",
.release = pdo_release,
.groups = source_fixed_supply_groups,
@@ -182,7 +182,7 @@ static const struct attribute_group sink_fixed_supply_group = {
};
__ATTRIBUTE_GROUPS(sink_fixed_supply);
-static struct device_type sink_fixed_supply_type = {
+static const struct device_type sink_fixed_supply_type = {
.name = "pdo",
.release = pdo_release,
.groups = sink_fixed_supply_groups,
@@ -213,7 +213,7 @@ static struct attribute *source_variable_supply_attrs[] = {
};
ATTRIBUTE_GROUPS(source_variable_supply);
-static struct device_type source_variable_supply_type = {
+static const struct device_type source_variable_supply_type = {
.name = "pdo",
.release = pdo_release,
.groups = source_variable_supply_groups,
@@ -227,7 +227,7 @@ static struct attribute *sink_variable_supply_attrs[] = {
};
ATTRIBUTE_GROUPS(sink_variable_supply);
-static struct device_type sink_variable_supply_type = {
+static const struct device_type sink_variable_supply_type = {
.name = "pdo",
.release = pdo_release,
.groups = sink_variable_supply_groups,
@@ -258,7 +258,7 @@ static struct attribute *source_battery_attrs[] = {
};
ATTRIBUTE_GROUPS(source_battery);
-static struct device_type source_battery_type = {
+static const struct device_type source_battery_type = {
.name = "pdo",
.release = pdo_release,
.groups = source_battery_groups,
@@ -272,7 +272,7 @@ static struct attribute *sink_battery_attrs[] = {
};
ATTRIBUTE_GROUPS(sink_battery);
-static struct device_type sink_battery_type = {
+static const struct device_type sink_battery_type = {
.name = "pdo",
.release = pdo_release,
.groups = sink_battery_groups,
@@ -339,7 +339,7 @@ static struct attribute *source_pps_attrs[] = {
};
ATTRIBUTE_GROUPS(source_pps);
-static struct device_type source_pps_type = {
+static const struct device_type source_pps_type = {
.name = "pdo",
.release = pdo_release,
.groups = source_pps_groups,
@@ -353,7 +353,7 @@ static struct attribute *sink_pps_attrs[] = {
};
ATTRIBUTE_GROUPS(sink_pps);
-static struct device_type sink_pps_type = {
+static const struct device_type sink_pps_type = {
.name = "pdo",
.release = pdo_release,
.groups = sink_pps_groups,
@@ -371,30 +371,30 @@ static const char * const apdo_supply_name[] = {
[APDO_TYPE_PPS] = "programmable_supply",
};
-static struct device_type *source_type[] = {
+static const struct device_type *source_type[] = {
[PDO_TYPE_FIXED] = &source_fixed_supply_type,
[PDO_TYPE_BATT] = &source_battery_type,
[PDO_TYPE_VAR] = &source_variable_supply_type,
};
-static struct device_type *source_apdo_type[] = {
+static const struct device_type *source_apdo_type[] = {
[APDO_TYPE_PPS] = &source_pps_type,
};
-static struct device_type *sink_type[] = {
+static const struct device_type *sink_type[] = {
[PDO_TYPE_FIXED] = &sink_fixed_supply_type,
[PDO_TYPE_BATT] = &sink_battery_type,
[PDO_TYPE_VAR] = &sink_variable_supply_type,
};
-static struct device_type *sink_apdo_type[] = {
+static const struct device_type *sink_apdo_type[] = {
[APDO_TYPE_PPS] = &sink_pps_type,
};
/* REVISIT: Export when EPR_*_Capabilities need to be supported. */
static int add_pdo(struct usb_power_delivery_capabilities *cap, u32 pdo, int position)
{
- struct device_type *type;
+ const struct device_type *type;
const char *name;
struct pdo *p;
int ret;
@@ -460,7 +460,7 @@ static void pd_capabilities_release(struct device *dev)
kfree(to_usb_power_delivery_capabilities(dev));
}
-static struct device_type pd_capabilities_type = {
+static const struct device_type pd_capabilities_type = {
.name = "capabilities",
.release = pd_capabilities_release,
};
@@ -575,7 +575,7 @@ static void pd_release(struct device *dev)
kfree(pd);
}
-static struct device_type pd_type = {
+static const struct device_type pd_type = {
.name = "usb_power_delivery",
.release = pd_release,
.groups = pd_groups,
diff --git a/drivers/usb/typec/retimer.c b/drivers/usb/typec/retimer.c
index 4a7d1b5c4d866..b519fcf358caf 100644
--- a/drivers/usb/typec/retimer.c
+++ b/drivers/usb/typec/retimer.c
@@ -155,6 +155,6 @@ void *typec_retimer_get_drvdata(struct typec_retimer *retimer)
}
EXPORT_SYMBOL_GPL(typec_retimer_get_drvdata);
-struct class retimer_class = {
+const struct class retimer_class = {
.name = "retimer",
};
diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
index bc21006e979c6..ef18a448b7406 100644
--- a/drivers/usb/typec/tcpm/fusb302.c
+++ b/drivers/usb/typec/tcpm/fusb302.c
@@ -1467,7 +1467,7 @@ static int fusb302_pd_read_message(struct fusb302_chip *chip,
if ((!len) && (pd_header_type_le(msg->header) == PD_CTRL_GOOD_CRC))
tcpm_pd_transmit_complete(chip->tcpm_port, TCPC_TX_SUCCESS);
else
- tcpm_pd_receive(chip->tcpm_port, msg);
+ tcpm_pd_receive(chip->tcpm_port, msg, TCPC_TX_SOP);
return ret;
}
diff --git a/drivers/usb/typec/tcpm/qcom/Makefile b/drivers/usb/typec/tcpm/qcom/Makefile
index dc1e8832e197a..cc23042b94878 100644
--- a/drivers/usb/typec/tcpm/qcom/Makefile
+++ b/drivers/usb/typec/tcpm/qcom/Makefile
@@ -3,4 +3,5 @@
obj-$(CONFIG_TYPEC_QCOM_PMIC) += qcom_pmic_tcpm.o
qcom_pmic_tcpm-y += qcom_pmic_typec.o \
qcom_pmic_typec_port.o \
- qcom_pmic_typec_pdphy.o
+ qcom_pmic_typec_pdphy.o \
+ qcom_pmic_typec_pdphy_stub.o \
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
index 1a2b4bddaa97e..e48412cdcb0fb 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
@@ -20,130 +20,15 @@
#include <drm/bridge/aux-bridge.h>
+#include "qcom_pmic_typec.h"
#include "qcom_pmic_typec_pdphy.h"
#include "qcom_pmic_typec_port.h"
struct pmic_typec_resources {
- struct pmic_typec_pdphy_resources *pdphy_res;
- struct pmic_typec_port_resources *port_res;
+ const struct pmic_typec_pdphy_resources *pdphy_res;
+ const struct pmic_typec_port_resources *port_res;
};
-struct pmic_typec {
- struct device *dev;
- struct tcpm_port *tcpm_port;
- struct tcpc_dev tcpc;
- struct pmic_typec_pdphy *pmic_typec_pdphy;
- struct pmic_typec_port *pmic_typec_port;
- bool vbus_enabled;
- struct mutex lock; /* VBUS state serialization */
-};
-
-#define tcpc_to_tcpm(_tcpc_) container_of(_tcpc_, struct pmic_typec, tcpc)
-
-static int qcom_pmic_typec_get_vbus(struct tcpc_dev *tcpc)
-{
- struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
- int ret;
-
- mutex_lock(&tcpm->lock);
- ret = tcpm->vbus_enabled || qcom_pmic_typec_port_get_vbus(tcpm->pmic_typec_port);
- mutex_unlock(&tcpm->lock);
-
- return ret;
-}
-
-static int qcom_pmic_typec_set_vbus(struct tcpc_dev *tcpc, bool on, bool sink)
-{
- struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
- int ret = 0;
-
- mutex_lock(&tcpm->lock);
- if (tcpm->vbus_enabled == on)
- goto done;
-
- ret = qcom_pmic_typec_port_set_vbus(tcpm->pmic_typec_port, on);
- if (ret)
- goto done;
-
- tcpm->vbus_enabled = on;
- tcpm_vbus_change(tcpm->tcpm_port);
-
-done:
- dev_dbg(tcpm->dev, "set_vbus set: %d result %d\n", on, ret);
- mutex_unlock(&tcpm->lock);
-
- return ret;
-}
-
-static int qcom_pmic_typec_set_vconn(struct tcpc_dev *tcpc, bool on)
-{
- struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
-
- return qcom_pmic_typec_port_set_vconn(tcpm->pmic_typec_port, on);
-}
-
-static int qcom_pmic_typec_get_cc(struct tcpc_dev *tcpc,
- enum typec_cc_status *cc1,
- enum typec_cc_status *cc2)
-{
- struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
-
- return qcom_pmic_typec_port_get_cc(tcpm->pmic_typec_port, cc1, cc2);
-}
-
-static int qcom_pmic_typec_set_cc(struct tcpc_dev *tcpc,
- enum typec_cc_status cc)
-{
- struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
-
- return qcom_pmic_typec_port_set_cc(tcpm->pmic_typec_port, cc);
-}
-
-static int qcom_pmic_typec_set_polarity(struct tcpc_dev *tcpc,
- enum typec_cc_polarity pol)
-{
- /* Polarity is set separately by phy-qcom-qmp.c */
- return 0;
-}
-
-static int qcom_pmic_typec_start_toggling(struct tcpc_dev *tcpc,
- enum typec_port_type port_type,
- enum typec_cc_status cc)
-{
- struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
-
- return qcom_pmic_typec_port_start_toggling(tcpm->pmic_typec_port,
- port_type, cc);
-}
-
-static int qcom_pmic_typec_set_roles(struct tcpc_dev *tcpc, bool attached,
- enum typec_role power_role,
- enum typec_data_role data_role)
-{
- struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
-
- return qcom_pmic_typec_pdphy_set_roles(tcpm->pmic_typec_pdphy,
- data_role, power_role);
-}
-
-static int qcom_pmic_typec_set_pd_rx(struct tcpc_dev *tcpc, bool on)
-{
- struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
-
- return qcom_pmic_typec_pdphy_set_pd_rx(tcpm->pmic_typec_pdphy, on);
-}
-
-static int qcom_pmic_typec_pd_transmit(struct tcpc_dev *tcpc,
- enum tcpm_transmit_type type,
- const struct pd_message *msg,
- unsigned int negotiated_rev)
-{
- struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
-
- return qcom_pmic_typec_pdphy_pd_transmit(tcpm->pmic_typec_pdphy, type,
- msg, negotiated_rev);
-}
-
static int qcom_pmic_typec_init(struct tcpc_dev *tcpc)
{
return 0;
@@ -157,7 +42,7 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
const struct pmic_typec_resources *res;
struct regmap *regmap;
struct device *bridge_dev;
- u32 base[2];
+ u32 base;
int ret;
res = of_device_get_match_data(dev);
@@ -170,16 +55,6 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
tcpm->dev = dev;
tcpm->tcpc.init = qcom_pmic_typec_init;
- tcpm->tcpc.get_vbus = qcom_pmic_typec_get_vbus;
- tcpm->tcpc.set_vbus = qcom_pmic_typec_set_vbus;
- tcpm->tcpc.set_cc = qcom_pmic_typec_set_cc;
- tcpm->tcpc.get_cc = qcom_pmic_typec_get_cc;
- tcpm->tcpc.set_polarity = qcom_pmic_typec_set_polarity;
- tcpm->tcpc.set_vconn = qcom_pmic_typec_set_vconn;
- tcpm->tcpc.start_toggling = qcom_pmic_typec_start_toggling;
- tcpm->tcpc.set_pd_rx = qcom_pmic_typec_set_pd_rx;
- tcpm->tcpc.set_roles = qcom_pmic_typec_set_roles;
- tcpm->tcpc.pd_transmit = qcom_pmic_typec_pd_transmit;
regmap = dev_get_regmap(dev->parent, NULL);
if (!regmap) {
@@ -187,29 +62,30 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
return -ENODEV;
}
- ret = of_property_read_u32_array(np, "reg", base, 2);
+ ret = of_property_read_u32_index(np, "reg", 0, &base);
if (ret)
return ret;
- tcpm->pmic_typec_port = qcom_pmic_typec_port_alloc(dev);
- if (IS_ERR(tcpm->pmic_typec_port))
- return PTR_ERR(tcpm->pmic_typec_port);
-
- tcpm->pmic_typec_pdphy = qcom_pmic_typec_pdphy_alloc(dev);
- if (IS_ERR(tcpm->pmic_typec_pdphy))
- return PTR_ERR(tcpm->pmic_typec_pdphy);
-
- ret = qcom_pmic_typec_port_probe(pdev, tcpm->pmic_typec_port,
- res->port_res, regmap, base[0]);
+ ret = qcom_pmic_typec_port_probe(pdev, tcpm,
+ res->port_res, regmap, base);
if (ret)
return ret;
- ret = qcom_pmic_typec_pdphy_probe(pdev, tcpm->pmic_typec_pdphy,
- res->pdphy_res, regmap, base[1]);
- if (ret)
- return ret;
+ if (res->pdphy_res) {
+ ret = of_property_read_u32_index(np, "reg", 1, &base);
+ if (ret)
+ return ret;
+
+ ret = qcom_pmic_typec_pdphy_probe(pdev, tcpm,
+ res->pdphy_res, regmap, base);
+ if (ret)
+ return ret;
+ } else {
+ ret = qcom_pmic_typec_pdphy_stub_probe(pdev, tcpm);
+ if (ret)
+ return ret;
+ }
- mutex_init(&tcpm->lock);
platform_set_drvdata(pdev, tcpm);
tcpm->tcpc.fwnode = device_get_named_child_node(tcpm->dev, "connector");
@@ -226,13 +102,11 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
goto fwnode_remove;
}
- ret = qcom_pmic_typec_port_start(tcpm->pmic_typec_port,
- tcpm->tcpm_port);
+ ret = tcpm->port_start(tcpm, tcpm->tcpm_port);
if (ret)
goto fwnode_remove;
- ret = qcom_pmic_typec_pdphy_start(tcpm->pmic_typec_pdphy,
- tcpm->tcpm_port);
+ ret = tcpm->pdphy_start(tcpm, tcpm->tcpm_port);
if (ret)
goto fwnode_remove;
@@ -248,91 +122,25 @@ static void qcom_pmic_typec_remove(struct platform_device *pdev)
{
struct pmic_typec *tcpm = platform_get_drvdata(pdev);
- qcom_pmic_typec_pdphy_stop(tcpm->pmic_typec_pdphy);
- qcom_pmic_typec_port_stop(tcpm->pmic_typec_port);
+ tcpm->pdphy_stop(tcpm);
+ tcpm->port_stop(tcpm);
tcpm_unregister_port(tcpm->tcpm_port);
fwnode_remove_software_node(tcpm->tcpc.fwnode);
}
-static struct pmic_typec_pdphy_resources pm8150b_pdphy_res = {
- .irq_params = {
- {
- .virq = PMIC_PDPHY_SIG_TX_IRQ,
- .irq_name = "sig-tx",
- },
- {
- .virq = PMIC_PDPHY_SIG_RX_IRQ,
- .irq_name = "sig-rx",
- },
- {
- .virq = PMIC_PDPHY_MSG_TX_IRQ,
- .irq_name = "msg-tx",
- },
- {
- .virq = PMIC_PDPHY_MSG_RX_IRQ,
- .irq_name = "msg-rx",
- },
- {
- .virq = PMIC_PDPHY_MSG_TX_FAIL_IRQ,
- .irq_name = "msg-tx-failed",
- },
- {
- .virq = PMIC_PDPHY_MSG_TX_DISCARD_IRQ,
- .irq_name = "msg-tx-discarded",
- },
- {
- .virq = PMIC_PDPHY_MSG_RX_DISCARD_IRQ,
- .irq_name = "msg-rx-discarded",
- },
- },
- .nr_irqs = 7,
-};
-
-static struct pmic_typec_port_resources pm8150b_port_res = {
- .irq_params = {
- {
- .irq_name = "vpd-detect",
- .virq = PMIC_TYPEC_VPD_IRQ,
- },
-
- {
- .irq_name = "cc-state-change",
- .virq = PMIC_TYPEC_CC_STATE_IRQ,
- },
- {
- .irq_name = "vconn-oc",
- .virq = PMIC_TYPEC_VCONN_OC_IRQ,
- },
-
- {
- .irq_name = "vbus-change",
- .virq = PMIC_TYPEC_VBUS_IRQ,
- },
-
- {
- .irq_name = "attach-detach",
- .virq = PMIC_TYPEC_ATTACH_DETACH_IRQ,
- },
- {
- .irq_name = "legacy-cable-detect",
- .virq = PMIC_TYPEC_LEGACY_CABLE_IRQ,
- },
-
- {
- .irq_name = "try-snk-src-detect",
- .virq = PMIC_TYPEC_TRY_SNK_SRC_IRQ,
- },
- },
- .nr_irqs = 7,
+static const struct pmic_typec_resources pm8150b_typec_res = {
+ .pdphy_res = &pm8150b_pdphy_res,
+ .port_res = &pm8150b_port_res,
};
-static struct pmic_typec_resources pm8150b_typec_res = {
- .pdphy_res = &pm8150b_pdphy_res,
+static const struct pmic_typec_resources pmi632_typec_res = {
+ /* PD PHY not present */
.port_res = &pm8150b_port_res,
};
static const struct of_device_id qcom_pmic_typec_table[] = {
{ .compatible = "qcom,pm8150b-typec", .data = &pm8150b_typec_res },
+ { .compatible = "qcom,pmi632-typec", .data = &pmi632_typec_res },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_pmic_typec_table);
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.h b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.h
new file mode 100644
index 0000000000000..3c75820c91876
--- /dev/null
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.h
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023, Linaro Ltd. All rights reserved.
+ */
+
+#ifndef __QCOM_PMIC_TYPEC_H__
+#define __QCOM_PMIC_TYPEC_H__
+
+struct pmic_typec {
+ struct device *dev;
+ struct tcpm_port *tcpm_port;
+ struct tcpc_dev tcpc;
+ struct pmic_typec_pdphy *pmic_typec_pdphy;
+ struct pmic_typec_port *pmic_typec_port;
+
+ int (*pdphy_start)(struct pmic_typec *tcpm,
+ struct tcpm_port *tcpm_port);
+ void (*pdphy_stop)(struct pmic_typec *tcpm);
+
+ int (*port_start)(struct pmic_typec *tcpm,
+ struct tcpm_port *tcpm_port);
+ void (*port_stop)(struct pmic_typec *tcpm);
+};
+
+#define tcpc_to_tcpm(_tcpc_) container_of(_tcpc_, struct pmic_typec, tcpc)
+
+#endif
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
index 52c81378e36ef..6560f4fc98d5a 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
@@ -14,8 +14,74 @@
#include <linux/slab.h>
#include <linux/usb/pd.h>
#include <linux/usb/tcpm.h>
+#include "qcom_pmic_typec.h"
#include "qcom_pmic_typec_pdphy.h"
+/* PD PHY register offsets and bit fields */
+#define USB_PDPHY_MSG_CONFIG_REG 0x40
+#define MSG_CONFIG_PORT_DATA_ROLE BIT(3)
+#define MSG_CONFIG_PORT_POWER_ROLE BIT(2)
+#define MSG_CONFIG_SPEC_REV_MASK (BIT(1) | BIT(0))
+
+#define USB_PDPHY_EN_CONTROL_REG 0x46
+#define CONTROL_ENABLE BIT(0)
+
+#define USB_PDPHY_RX_STATUS_REG 0x4A
+#define RX_FRAME_TYPE (BIT(0) | BIT(1) | BIT(2))
+
+#define USB_PDPHY_FRAME_FILTER_REG 0x4C
+#define FRAME_FILTER_EN_HARD_RESET BIT(5)
+#define FRAME_FILTER_EN_SOP BIT(0)
+
+#define USB_PDPHY_TX_SIZE_REG 0x42
+#define TX_SIZE_MASK 0xF
+
+#define USB_PDPHY_TX_CONTROL_REG 0x44
+#define TX_CONTROL_RETRY_COUNT(n) (((n) & 0x3) << 5)
+#define TX_CONTROL_FRAME_TYPE(n) (((n) & 0x7) << 2)
+#define TX_CONTROL_FRAME_TYPE_CABLE_RESET (0x1 << 2)
+#define TX_CONTROL_SEND_SIGNAL BIT(1)
+#define TX_CONTROL_SEND_MSG BIT(0)
+
+#define USB_PDPHY_RX_SIZE_REG 0x48
+
+#define USB_PDPHY_RX_ACKNOWLEDGE_REG 0x4B
+#define RX_BUFFER_TOKEN BIT(0)
+
+#define USB_PDPHY_BIST_MODE_REG 0x4E
+#define BIST_MODE_MASK 0xF
+#define BIST_ENABLE BIT(7)
+#define PD_MSG_BIST 0x3
+#define PD_BIST_TEST_DATA_MODE 0x8
+
+#define USB_PDPHY_TX_BUFFER_HDR_REG 0x60
+#define USB_PDPHY_TX_BUFFER_DATA_REG 0x62
+
+#define USB_PDPHY_RX_BUFFER_REG 0x80
+
+/* VDD regulator */
+#define VDD_PDPHY_VOL_MIN 2800000 /* uV */
+#define VDD_PDPHY_VOL_MAX 3300000 /* uV */
+#define VDD_PDPHY_HPM_LOAD 3000 /* uA */
+
+/* Message Spec Rev field */
+#define PD_MSG_HDR_REV(hdr) (((hdr) >> 6) & 3)
+
+/* timers */
+#define RECEIVER_RESPONSE_TIME 15 /* tReceiverResponse */
+#define HARD_RESET_COMPLETE_TIME 5 /* tHardResetComplete */
+
+/* Interrupt numbers */
+#define PMIC_PDPHY_SIG_TX_IRQ 0x0
+#define PMIC_PDPHY_SIG_RX_IRQ 0x1
+#define PMIC_PDPHY_MSG_TX_IRQ 0x2
+#define PMIC_PDPHY_MSG_RX_IRQ 0x3
+#define PMIC_PDPHY_MSG_TX_FAIL_IRQ 0x4
+#define PMIC_PDPHY_MSG_TX_DISCARD_IRQ 0x5
+#define PMIC_PDPHY_MSG_RX_DISCARD_IRQ 0x6
+#define PMIC_PDPHY_FR_SWAP_IRQ 0x7
+
+
struct pmic_typec_pdphy_irq_data {
int virq;
int irq;
@@ -231,11 +297,13 @@ done:
return ret;
}
-int qcom_pmic_typec_pdphy_pd_transmit(struct pmic_typec_pdphy *pmic_typec_pdphy,
- enum tcpm_transmit_type type,
- const struct pd_message *msg,
- unsigned int negotiated_rev)
+static int qcom_pmic_typec_pdphy_pd_transmit(struct tcpc_dev *tcpc,
+ enum tcpm_transmit_type type,
+ const struct pd_message *msg,
+ unsigned int negotiated_rev)
{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct pmic_typec_pdphy *pmic_typec_pdphy = tcpm->pmic_typec_pdphy;
struct device *dev = pmic_typec_pdphy->dev;
int ret;
@@ -299,7 +367,7 @@ done:
if (!ret) {
dev_vdbg(dev, "pd_receive: handing %d bytes to tcpm\n", size);
- tcpm_pd_receive(pmic_typec_pdphy->tcpm_port, &msg);
+ tcpm_pd_receive(pmic_typec_pdphy->tcpm_port, &msg, TCPC_TX_SOP);
}
}
@@ -336,8 +404,10 @@ static irqreturn_t qcom_pmic_typec_pdphy_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-int qcom_pmic_typec_pdphy_set_pd_rx(struct pmic_typec_pdphy *pmic_typec_pdphy, bool on)
+static int qcom_pmic_typec_pdphy_set_pd_rx(struct tcpc_dev *tcpc, bool on)
{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct pmic_typec_pdphy *pmic_typec_pdphy = tcpm->pmic_typec_pdphy;
unsigned long flags;
int ret;
@@ -353,9 +423,12 @@ int qcom_pmic_typec_pdphy_set_pd_rx(struct pmic_typec_pdphy *pmic_typec_pdphy, b
return ret;
}
-int qcom_pmic_typec_pdphy_set_roles(struct pmic_typec_pdphy *pmic_typec_pdphy,
- bool data_role_host, bool power_role_src)
+static int qcom_pmic_typec_pdphy_set_roles(struct tcpc_dev *tcpc, bool attached,
+ enum typec_role power_role,
+ enum typec_data_role data_role)
{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct pmic_typec_pdphy *pmic_typec_pdphy = tcpm->pmic_typec_pdphy;
struct device *dev = pmic_typec_pdphy->dev;
unsigned long flags;
int ret;
@@ -366,12 +439,13 @@ int qcom_pmic_typec_pdphy_set_roles(struct pmic_typec_pdphy *pmic_typec_pdphy,
pmic_typec_pdphy->base + USB_PDPHY_MSG_CONFIG_REG,
MSG_CONFIG_PORT_DATA_ROLE |
MSG_CONFIG_PORT_POWER_ROLE,
- data_role_host << 3 | power_role_src << 2);
+ (data_role == TYPEC_HOST ? MSG_CONFIG_PORT_DATA_ROLE : 0) |
+ (power_role == TYPEC_SOURCE ? MSG_CONFIG_PORT_POWER_ROLE : 0));
spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
dev_dbg(dev, "pdphy_set_roles: data_role_host=%d power_role_src=%d\n",
- data_role_host, power_role_src);
+ data_role, power_role);
return ret;
}
@@ -435,9 +509,10 @@ done:
return ret;
}
-int qcom_pmic_typec_pdphy_start(struct pmic_typec_pdphy *pmic_typec_pdphy,
- struct tcpm_port *tcpm_port)
+static int qcom_pmic_typec_pdphy_start(struct pmic_typec *tcpm,
+ struct tcpm_port *tcpm_port)
{
+ struct pmic_typec_pdphy *pmic_typec_pdphy = tcpm->pmic_typec_pdphy;
int i;
int ret;
@@ -457,8 +532,9 @@ int qcom_pmic_typec_pdphy_start(struct pmic_typec_pdphy *pmic_typec_pdphy,
return 0;
}
-void qcom_pmic_typec_pdphy_stop(struct pmic_typec_pdphy *pmic_typec_pdphy)
+static void qcom_pmic_typec_pdphy_stop(struct pmic_typec *tcpm)
{
+ struct pmic_typec_pdphy *pmic_typec_pdphy = tcpm->pmic_typec_pdphy;
int i;
for (i = 0; i < pmic_typec_pdphy->nr_irqs; i++)
@@ -469,21 +545,21 @@ void qcom_pmic_typec_pdphy_stop(struct pmic_typec_pdphy *pmic_typec_pdphy)
regulator_disable(pmic_typec_pdphy->vdd_pdphy);
}
-struct pmic_typec_pdphy *qcom_pmic_typec_pdphy_alloc(struct device *dev)
-{
- return devm_kzalloc(dev, sizeof(struct pmic_typec_pdphy), GFP_KERNEL);
-}
-
int qcom_pmic_typec_pdphy_probe(struct platform_device *pdev,
- struct pmic_typec_pdphy *pmic_typec_pdphy,
- struct pmic_typec_pdphy_resources *res,
+ struct pmic_typec *tcpm,
+ const struct pmic_typec_pdphy_resources *res,
struct regmap *regmap,
u32 base)
{
+ struct pmic_typec_pdphy *pmic_typec_pdphy;
struct device *dev = &pdev->dev;
struct pmic_typec_pdphy_irq_data *irq_data;
int i, ret, irq;
+ pmic_typec_pdphy = devm_kzalloc(dev, sizeof(*pmic_typec_pdphy), GFP_KERNEL);
+ if (!pmic_typec_pdphy)
+ return -ENOMEM;
+
if (!res->nr_irqs || res->nr_irqs > PMIC_PDPHY_MAX_IRQS)
return -EINVAL;
@@ -522,5 +598,48 @@ int qcom_pmic_typec_pdphy_probe(struct platform_device *pdev,
return ret;
}
+ tcpm->pmic_typec_pdphy = pmic_typec_pdphy;
+
+ tcpm->tcpc.set_pd_rx = qcom_pmic_typec_pdphy_set_pd_rx;
+ tcpm->tcpc.set_roles = qcom_pmic_typec_pdphy_set_roles;
+ tcpm->tcpc.pd_transmit = qcom_pmic_typec_pdphy_pd_transmit;
+
+ tcpm->pdphy_start = qcom_pmic_typec_pdphy_start;
+ tcpm->pdphy_stop = qcom_pmic_typec_pdphy_stop;
+
return 0;
}
+
+const struct pmic_typec_pdphy_resources pm8150b_pdphy_res = {
+ .irq_params = {
+ {
+ .virq = PMIC_PDPHY_SIG_TX_IRQ,
+ .irq_name = "sig-tx",
+ },
+ {
+ .virq = PMIC_PDPHY_SIG_RX_IRQ,
+ .irq_name = "sig-rx",
+ },
+ {
+ .virq = PMIC_PDPHY_MSG_TX_IRQ,
+ .irq_name = "msg-tx",
+ },
+ {
+ .virq = PMIC_PDPHY_MSG_RX_IRQ,
+ .irq_name = "msg-rx",
+ },
+ {
+ .virq = PMIC_PDPHY_MSG_TX_FAIL_IRQ,
+ .irq_name = "msg-tx-failed",
+ },
+ {
+ .virq = PMIC_PDPHY_MSG_TX_DISCARD_IRQ,
+ .irq_name = "msg-tx-discarded",
+ },
+ {
+ .virq = PMIC_PDPHY_MSG_RX_DISCARD_IRQ,
+ .irq_name = "msg-rx-discarded",
+ },
+ },
+ .nr_irqs = 7,
+};
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.h b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.h
index e67954e31b149..04dee20293cfa 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.h
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.h
@@ -8,74 +8,6 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/usb/tcpm.h>
-
-#define USB_PDPHY_MAX_DATA_OBJ_LEN 28
-#define USB_PDPHY_MSG_HDR_LEN 2
-
-/* PD PHY register offsets and bit fields */
-#define USB_PDPHY_MSG_CONFIG_REG 0x40
-#define MSG_CONFIG_PORT_DATA_ROLE BIT(3)
-#define MSG_CONFIG_PORT_POWER_ROLE BIT(2)
-#define MSG_CONFIG_SPEC_REV_MASK (BIT(1) | BIT(0))
-
-#define USB_PDPHY_EN_CONTROL_REG 0x46
-#define CONTROL_ENABLE BIT(0)
-
-#define USB_PDPHY_RX_STATUS_REG 0x4A
-#define RX_FRAME_TYPE (BIT(0) | BIT(1) | BIT(2))
-
-#define USB_PDPHY_FRAME_FILTER_REG 0x4C
-#define FRAME_FILTER_EN_HARD_RESET BIT(5)
-#define FRAME_FILTER_EN_SOP BIT(0)
-
-#define USB_PDPHY_TX_SIZE_REG 0x42
-#define TX_SIZE_MASK 0xF
-
-#define USB_PDPHY_TX_CONTROL_REG 0x44
-#define TX_CONTROL_RETRY_COUNT(n) (((n) & 0x3) << 5)
-#define TX_CONTROL_FRAME_TYPE(n) (((n) & 0x7) << 2)
-#define TX_CONTROL_FRAME_TYPE_CABLE_RESET (0x1 << 2)
-#define TX_CONTROL_SEND_SIGNAL BIT(1)
-#define TX_CONTROL_SEND_MSG BIT(0)
-
-#define USB_PDPHY_RX_SIZE_REG 0x48
-
-#define USB_PDPHY_RX_ACKNOWLEDGE_REG 0x4B
-#define RX_BUFFER_TOKEN BIT(0)
-
-#define USB_PDPHY_BIST_MODE_REG 0x4E
-#define BIST_MODE_MASK 0xF
-#define BIST_ENABLE BIT(7)
-#define PD_MSG_BIST 0x3
-#define PD_BIST_TEST_DATA_MODE 0x8
-
-#define USB_PDPHY_TX_BUFFER_HDR_REG 0x60
-#define USB_PDPHY_TX_BUFFER_DATA_REG 0x62
-
-#define USB_PDPHY_RX_BUFFER_REG 0x80
-
-/* VDD regulator */
-#define VDD_PDPHY_VOL_MIN 2800000 /* uV */
-#define VDD_PDPHY_VOL_MAX 3300000 /* uV */
-#define VDD_PDPHY_HPM_LOAD 3000 /* uA */
-
-/* Message Spec Rev field */
-#define PD_MSG_HDR_REV(hdr) (((hdr) >> 6) & 3)
-
-/* timers */
-#define RECEIVER_RESPONSE_TIME 15 /* tReceiverResponse */
-#define HARD_RESET_COMPLETE_TIME 5 /* tHardResetComplete */
-
-/* Interrupt numbers */
-#define PMIC_PDPHY_SIG_TX_IRQ 0x0
-#define PMIC_PDPHY_SIG_RX_IRQ 0x1
-#define PMIC_PDPHY_MSG_TX_IRQ 0x2
-#define PMIC_PDPHY_MSG_RX_IRQ 0x3
-#define PMIC_PDPHY_MSG_TX_FAIL_IRQ 0x4
-#define PMIC_PDPHY_MSG_TX_DISCARD_IRQ 0x5
-#define PMIC_PDPHY_MSG_RX_DISCARD_IRQ 0x6
-#define PMIC_PDPHY_FR_SWAP_IRQ 0x7
/* Resources */
#define PMIC_PDPHY_MAX_IRQS 0x08
@@ -87,33 +19,19 @@ struct pmic_typec_pdphy_irq_params {
struct pmic_typec_pdphy_resources {
unsigned int nr_irqs;
- struct pmic_typec_pdphy_irq_params irq_params[PMIC_PDPHY_MAX_IRQS];
+ const struct pmic_typec_pdphy_irq_params irq_params[PMIC_PDPHY_MAX_IRQS];
};
/* API */
struct pmic_typec_pdphy;
-struct pmic_typec_pdphy *qcom_pmic_typec_pdphy_alloc(struct device *dev);
-
+extern const struct pmic_typec_pdphy_resources pm8150b_pdphy_res;
int qcom_pmic_typec_pdphy_probe(struct platform_device *pdev,
- struct pmic_typec_pdphy *pmic_typec_pdphy,
- struct pmic_typec_pdphy_resources *res,
+ struct pmic_typec *tcpm,
+ const struct pmic_typec_pdphy_resources *res,
struct regmap *regmap,
u32 base);
-
-int qcom_pmic_typec_pdphy_start(struct pmic_typec_pdphy *pmic_typec_pdphy,
- struct tcpm_port *tcpm_port);
-
-void qcom_pmic_typec_pdphy_stop(struct pmic_typec_pdphy *pmic_typec_pdphy);
-
-int qcom_pmic_typec_pdphy_set_roles(struct pmic_typec_pdphy *pmic_typec_pdphy,
- bool power_role_src, bool data_role_host);
-
-int qcom_pmic_typec_pdphy_set_pd_rx(struct pmic_typec_pdphy *pmic_typec_pdphy, bool on);
-
-int qcom_pmic_typec_pdphy_pd_transmit(struct pmic_typec_pdphy *pmic_typec_pdphy,
- enum tcpm_transmit_type type,
- const struct pd_message *msg,
- unsigned int negotiated_rev);
+int qcom_pmic_typec_pdphy_stub_probe(struct platform_device *pdev,
+ struct pmic_typec *tcpm);
#endif /* __QCOM_PMIC_TYPEC_PDPHY_H__ */
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy_stub.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy_stub.c
new file mode 100644
index 0000000000000..df79059cda675
--- /dev/null
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy_stub.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024, Linaro Ltd. All rights reserved.
+ */
+
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/usb/pd.h>
+#include <linux/usb/tcpm.h>
+#include "qcom_pmic_typec.h"
+#include "qcom_pmic_typec_pdphy.h"
+
+static int qcom_pmic_typec_pdphy_stub_pd_transmit(struct tcpc_dev *tcpc,
+ enum tcpm_transmit_type type,
+ const struct pd_message *msg,
+ unsigned int negotiated_rev)
+{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct device *dev = tcpm->dev;
+
+ dev_dbg(dev, "pdphy_transmit: type=%d\n", type);
+
+ tcpm_pd_transmit_complete(tcpm->tcpm_port,
+ TCPC_TX_SUCCESS);
+
+ return 0;
+}
+
+static int qcom_pmic_typec_pdphy_stub_set_pd_rx(struct tcpc_dev *tcpc, bool on)
+{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct device *dev = tcpm->dev;
+
+ dev_dbg(dev, "set_pd_rx: %s\n", on ? "on" : "off");
+
+ return 0;
+}
+
+static int qcom_pmic_typec_pdphy_stub_set_roles(struct tcpc_dev *tcpc, bool attached,
+ enum typec_role power_role,
+ enum typec_data_role data_role)
+{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct device *dev = tcpm->dev;
+
+ dev_dbg(dev, "pdphy_set_roles: data_role_host=%d power_role_src=%d\n",
+ data_role, power_role);
+
+ return 0;
+}
+
+static int qcom_pmic_typec_pdphy_stub_start(struct pmic_typec *tcpm,
+ struct tcpm_port *tcpm_port)
+{
+ return 0;
+}
+
+static void qcom_pmic_typec_pdphy_stub_stop(struct pmic_typec *tcpm)
+{
+}
+
+int qcom_pmic_typec_pdphy_stub_probe(struct platform_device *pdev,
+ struct pmic_typec *tcpm)
+{
+ tcpm->tcpc.set_pd_rx = qcom_pmic_typec_pdphy_stub_set_pd_rx;
+ tcpm->tcpc.set_roles = qcom_pmic_typec_pdphy_stub_set_roles;
+ tcpm->tcpc.pd_transmit = qcom_pmic_typec_pdphy_stub_pd_transmit;
+
+ tcpm->pdphy_start = qcom_pmic_typec_pdphy_stub_start;
+ tcpm->pdphy_stop = qcom_pmic_typec_pdphy_stub_stop;
+
+ return 0;
+}
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
index a8f3f4d3a4509..a747baa297849 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
@@ -16,8 +16,147 @@
#include <linux/usb/tcpm.h>
#include <linux/usb/typec_mux.h>
#include <linux/workqueue.h>
+
+#include "qcom_pmic_typec.h"
#include "qcom_pmic_typec_port.h"
+#define TYPEC_SNK_STATUS_REG 0x06
+#define DETECTED_SNK_TYPE_MASK GENMASK(6, 0)
+#define SNK_DAM_MASK GENMASK(6, 4)
+#define SNK_DAM_500MA BIT(6)
+#define SNK_DAM_1500MA BIT(5)
+#define SNK_DAM_3000MA BIT(4)
+#define SNK_RP_STD BIT(3)
+#define SNK_RP_1P5 BIT(2)
+#define SNK_RP_3P0 BIT(1)
+#define SNK_RP_SHORT BIT(0)
+
+#define TYPEC_SRC_STATUS_REG 0x08
+#define DETECTED_SRC_TYPE_MASK GENMASK(4, 0)
+#define SRC_HIGH_BATT BIT(5)
+#define SRC_DEBUG_ACCESS BIT(4)
+#define SRC_RD_OPEN BIT(3)
+#define SRC_RD_RA_VCONN BIT(2)
+#define SRC_RA_OPEN BIT(1)
+#define AUDIO_ACCESS_RA_RA BIT(0)
+
+#define TYPEC_STATE_MACHINE_STATUS_REG 0x09
+#define TYPEC_ATTACH_DETACH_STATE BIT(5)
+
+#define TYPEC_SM_STATUS_REG 0x0A
+#define TYPEC_SM_VBUS_VSAFE5V BIT(5)
+#define TYPEC_SM_VBUS_VSAFE0V BIT(6)
+#define TYPEC_SM_USBIN_LT_LV BIT(7)
+
+#define TYPEC_MISC_STATUS_REG 0x0B
+#define TYPEC_WATER_DETECTION_STATUS BIT(7)
+#define SNK_SRC_MODE BIT(6)
+#define TYPEC_VBUS_DETECT BIT(5)
+#define TYPEC_VBUS_ERROR_STATUS BIT(4)
+#define TYPEC_DEBOUNCE_DONE BIT(3)
+#define CC_ORIENTATION BIT(1)
+#define CC_ATTACHED BIT(0)
+
+#define LEGACY_CABLE_STATUS_REG 0x0D
+#define TYPEC_LEGACY_CABLE_STATUS BIT(1)
+#define TYPEC_NONCOMP_LEGACY_CABLE_STATUS BIT(0)
+
+#define TYPEC_U_USB_STATUS_REG 0x0F
+#define U_USB_GROUND_NOVBUS BIT(6)
+#define U_USB_GROUND BIT(4)
+#define U_USB_FMB1 BIT(3)
+#define U_USB_FLOAT1 BIT(2)
+#define U_USB_FMB2 BIT(1)
+#define U_USB_FLOAT2 BIT(0)
+
+#define TYPEC_MODE_CFG_REG 0x44
+#define TYPEC_TRY_MODE_MASK GENMASK(4, 3)
+#define EN_TRY_SNK BIT(4)
+#define EN_TRY_SRC BIT(3)
+#define TYPEC_POWER_ROLE_CMD_MASK GENMASK(2, 0)
+#define EN_SRC_ONLY BIT(2)
+#define EN_SNK_ONLY BIT(1)
+#define TYPEC_DISABLE_CMD BIT(0)
+
+#define TYPEC_VCONN_CONTROL_REG 0x46
+#define VCONN_EN_ORIENTATION BIT(2)
+#define VCONN_EN_VALUE BIT(1)
+#define VCONN_EN_SRC BIT(0)
+
+#define TYPEC_CCOUT_CONTROL_REG 0x48
+#define TYPEC_CCOUT_BUFFER_EN BIT(2)
+#define TYPEC_CCOUT_VALUE BIT(1)
+#define TYPEC_CCOUT_SRC BIT(0)
+
+#define DEBUG_ACCESS_SRC_CFG_REG 0x4C
+#define EN_UNORIENTED_DEBUG_ACCESS_SRC BIT(0)
+
+#define TYPE_C_CRUDE_SENSOR_CFG_REG 0x4e
+#define EN_SRC_CRUDE_SENSOR BIT(1)
+#define EN_SNK_CRUDE_SENSOR BIT(0)
+
+#define TYPEC_EXIT_STATE_CFG_REG 0x50
+#define BYPASS_VSAFE0V_DURING_ROLE_SWAP BIT(3)
+#define SEL_SRC_UPPER_REF BIT(2)
+#define USE_TPD_FOR_EXITING_ATTACHSRC BIT(1)
+#define EXIT_SNK_BASED_ON_CC BIT(0)
+
+#define TYPEC_CURRSRC_CFG_REG 0x52
+#define TYPEC_SRC_RP_SEL_330UA BIT(1)
+#define TYPEC_SRC_RP_SEL_180UA BIT(0)
+#define TYPEC_SRC_RP_SEL_80UA 0
+#define TYPEC_SRC_RP_SEL_MASK GENMASK(1, 0)
+
+#define TYPEC_INTERRUPT_EN_CFG_1_REG 0x5E
+#define TYPEC_LEGACY_CABLE_INT_EN BIT(7)
+#define TYPEC_NONCOMPLIANT_LEGACY_CABLE_INT_EN BIT(6)
+#define TYPEC_TRYSOURCE_DETECT_INT_EN BIT(5)
+#define TYPEC_TRYSINK_DETECT_INT_EN BIT(4)
+#define TYPEC_CCOUT_DETACH_INT_EN BIT(3)
+#define TYPEC_CCOUT_ATTACH_INT_EN BIT(2)
+#define TYPEC_VBUS_DEASSERT_INT_EN BIT(1)
+#define TYPEC_VBUS_ASSERT_INT_EN BIT(0)
+
+#define TYPEC_INTERRUPT_EN_CFG_2_REG 0x60
+#define TYPEC_SRC_BATT_HPWR_INT_EN BIT(6)
+#define MICRO_USB_STATE_CHANGE_INT_EN BIT(5)
+#define TYPEC_STATE_MACHINE_CHANGE_INT_EN BIT(4)
+#define TYPEC_DEBUG_ACCESS_DETECT_INT_EN BIT(3)
+#define TYPEC_WATER_DETECTION_INT_EN BIT(2)
+#define TYPEC_VBUS_ERROR_INT_EN BIT(1)
+#define TYPEC_DEBOUNCE_DONE_INT_EN BIT(0)
+
+#define TYPEC_DEBOUNCE_OPTION_REG 0x62
+#define REDUCE_TCCDEBOUNCE_TO_2MS BIT(2)
+
+#define TYPE_C_SBU_CFG_REG 0x6A
+#define SEL_SBU1_ISRC_VAL 0x04
+#define SEL_SBU2_ISRC_VAL 0x01
+
+#define TYPEC_U_USB_CFG_REG 0x70
+#define EN_MICRO_USB_FACTORY_MODE BIT(1)
+#define EN_MICRO_USB_MODE BIT(0)
+
+#define TYPEC_PMI632_U_USB_WATER_PROTECTION_CFG_REG 0x72
+
+#define TYPEC_U_USB_WATER_PROTECTION_CFG_REG 0x73
+#define EN_MICRO_USB_WATER_PROTECTION BIT(4)
+#define MICRO_USB_DETECTION_ON_TIME_CFG_MASK GENMASK(3, 2)
+#define MICRO_USB_DETECTION_PERIOD_CFG_MASK GENMASK(1, 0)
+
+#define TYPEC_PMI632_MICRO_USB_MODE_REG 0x73
+#define MICRO_USB_MODE_ONLY BIT(0)
+
+/* Interrupt numbers */
+#define PMIC_TYPEC_OR_RID_IRQ 0x0
+#define PMIC_TYPEC_VPD_IRQ 0x1
+#define PMIC_TYPEC_CC_STATE_IRQ 0x2
+#define PMIC_TYPEC_VCONN_OC_IRQ 0x3
+#define PMIC_TYPEC_VBUS_IRQ 0x4
+#define PMIC_TYPEC_ATTACH_DETACH_IRQ 0x5
+#define PMIC_TYPEC_LEGACY_CABLE_IRQ 0x6
+#define PMIC_TYPEC_TRY_SNK_SRC_IRQ 0x7
+
struct pmic_typec_port_irq_data {
int virq;
int irq;
@@ -33,6 +172,8 @@ struct pmic_typec_port {
struct pmic_typec_port_irq_data *irq_data;
struct regulator *vdd_vbus;
+ bool vbus_enabled;
+ struct mutex vbus_lock; /* VBUS state serialization */
int cc;
bool debouncing_cc;
@@ -131,7 +272,7 @@ done:
return IRQ_HANDLED;
}
-int qcom_pmic_typec_port_get_vbus(struct pmic_typec_port *pmic_typec_port)
+static int qcom_pmic_typec_port_vbus_detect(struct pmic_typec_port *pmic_typec_port)
{
struct device *dev = pmic_typec_port->dev;
unsigned int misc;
@@ -148,7 +289,7 @@ int qcom_pmic_typec_port_get_vbus(struct pmic_typec_port *pmic_typec_port)
return !!(misc & TYPEC_VBUS_DETECT);
}
-int qcom_pmic_typec_port_set_vbus(struct pmic_typec_port *pmic_typec_port, bool on)
+static int qcom_pmic_typec_port_vbus_toggle(struct pmic_typec_port *pmic_typec_port, bool on)
{
u32 sm_stat;
u32 val;
@@ -179,10 +320,49 @@ int qcom_pmic_typec_port_set_vbus(struct pmic_typec_port *pmic_typec_port, bool
return 0;
}
-int qcom_pmic_typec_port_get_cc(struct pmic_typec_port *pmic_typec_port,
- enum typec_cc_status *cc1,
- enum typec_cc_status *cc2)
+static int qcom_pmic_typec_port_get_vbus(struct tcpc_dev *tcpc)
+{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct pmic_typec_port *pmic_typec_port = tcpm->pmic_typec_port;
+ int ret;
+
+ mutex_lock(&pmic_typec_port->vbus_lock);
+ ret = pmic_typec_port->vbus_enabled || qcom_pmic_typec_port_vbus_detect(pmic_typec_port);
+ mutex_unlock(&pmic_typec_port->vbus_lock);
+
+ return ret;
+}
+
+static int qcom_pmic_typec_port_set_vbus(struct tcpc_dev *tcpc, bool on, bool sink)
+{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct pmic_typec_port *pmic_typec_port = tcpm->pmic_typec_port;
+ int ret = 0;
+
+ mutex_lock(&pmic_typec_port->vbus_lock);
+ if (pmic_typec_port->vbus_enabled == on)
+ goto done;
+
+ ret = qcom_pmic_typec_port_vbus_toggle(pmic_typec_port, on);
+ if (ret)
+ goto done;
+
+ pmic_typec_port->vbus_enabled = on;
+ tcpm_vbus_change(tcpm->tcpm_port);
+
+done:
+ dev_dbg(tcpm->dev, "set_vbus set: %d result %d\n", on, ret);
+ mutex_unlock(&pmic_typec_port->vbus_lock);
+
+ return ret;
+}
+
+static int qcom_pmic_typec_port_get_cc(struct tcpc_dev *tcpc,
+ enum typec_cc_status *cc1,
+ enum typec_cc_status *cc2)
{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct pmic_typec_port *pmic_typec_port = tcpm->pmic_typec_port;
struct device *dev = pmic_typec_port->dev;
unsigned int misc, val;
bool attached;
@@ -275,9 +455,11 @@ static void qcom_pmic_set_cc_debounce(struct pmic_typec_port *pmic_typec_port)
msecs_to_jiffies(2));
}
-int qcom_pmic_typec_port_set_cc(struct pmic_typec_port *pmic_typec_port,
- enum typec_cc_status cc)
+static int qcom_pmic_typec_port_set_cc(struct tcpc_dev *tcpc,
+ enum typec_cc_status cc)
{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct pmic_typec_port *pmic_typec_port = tcpm->pmic_typec_port;
struct device *dev = pmic_typec_port->dev;
unsigned int mode, currsrc;
unsigned int misc;
@@ -341,8 +523,17 @@ done:
return ret;
}
-int qcom_pmic_typec_port_set_vconn(struct pmic_typec_port *pmic_typec_port, bool on)
+static int qcom_pmic_typec_port_set_polarity(struct tcpc_dev *tcpc,
+ enum typec_cc_polarity pol)
+{
+ /* Polarity is set separately by phy-qcom-qmp.c */
+ return 0;
+}
+
+static int qcom_pmic_typec_port_set_vconn(struct tcpc_dev *tcpc, bool on)
{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct pmic_typec_port *pmic_typec_port = tcpm->pmic_typec_port;
struct device *dev = pmic_typec_port->dev;
unsigned int orientation, misc, mask, value;
unsigned long flags;
@@ -377,10 +568,12 @@ done:
return ret;
}
-int qcom_pmic_typec_port_start_toggling(struct pmic_typec_port *pmic_typec_port,
- enum typec_port_type port_type,
- enum typec_cc_status cc)
+static int qcom_pmic_typec_port_start_toggling(struct tcpc_dev *tcpc,
+ enum typec_port_type port_type,
+ enum typec_cc_status cc)
{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct pmic_typec_port *pmic_typec_port = tcpm->pmic_typec_port;
struct device *dev = pmic_typec_port->dev;
unsigned int misc;
u8 mode = 0;
@@ -441,9 +634,10 @@ done:
(TYPEC_STATE_MACHINE_CHANGE_INT_EN | TYPEC_VBUS_ERROR_INT_EN | \
TYPEC_DEBOUNCE_DONE_INT_EN)
-int qcom_pmic_typec_port_start(struct pmic_typec_port *pmic_typec_port,
- struct tcpm_port *tcpm_port)
+static int qcom_pmic_typec_port_start(struct pmic_typec *tcpm,
+ struct tcpm_port *tcpm_port)
{
+ struct pmic_typec_port *pmic_typec_port = tcpm->pmic_typec_port;
int i;
int mask;
int ret;
@@ -491,29 +685,30 @@ done:
return ret;
}
-void qcom_pmic_typec_port_stop(struct pmic_typec_port *pmic_typec_port)
+static void qcom_pmic_typec_port_stop(struct pmic_typec *tcpm)
{
+ struct pmic_typec_port *pmic_typec_port = tcpm->pmic_typec_port;
int i;
for (i = 0; i < pmic_typec_port->nr_irqs; i++)
disable_irq(pmic_typec_port->irq_data[i].irq);
}
-struct pmic_typec_port *qcom_pmic_typec_port_alloc(struct device *dev)
-{
- return devm_kzalloc(dev, sizeof(struct pmic_typec_port), GFP_KERNEL);
-}
-
int qcom_pmic_typec_port_probe(struct platform_device *pdev,
- struct pmic_typec_port *pmic_typec_port,
- struct pmic_typec_port_resources *res,
+ struct pmic_typec *tcpm,
+ const struct pmic_typec_port_resources *res,
struct regmap *regmap,
u32 base)
{
struct device *dev = &pdev->dev;
struct pmic_typec_port_irq_data *irq_data;
+ struct pmic_typec_port *pmic_typec_port;
int i, ret, irq;
+ pmic_typec_port = devm_kzalloc(dev, sizeof(*pmic_typec_port), GFP_KERNEL);
+ if (!pmic_typec_port)
+ return -ENOMEM;
+
if (!res->nr_irqs || res->nr_irqs > PMIC_TYPEC_MAX_IRQS)
return -EINVAL;
@@ -522,6 +717,8 @@ int qcom_pmic_typec_port_probe(struct platform_device *pdev,
if (!irq_data)
return -ENOMEM;
+ mutex_init(&pmic_typec_port->vbus_lock);
+
pmic_typec_port->vdd_vbus = devm_regulator_get(dev, "vdd-vbus");
if (IS_ERR(pmic_typec_port->vdd_vbus))
return PTR_ERR(pmic_typec_port->vdd_vbus);
@@ -556,5 +753,56 @@ int qcom_pmic_typec_port_probe(struct platform_device *pdev,
return ret;
}
+ tcpm->pmic_typec_port = pmic_typec_port;
+
+ tcpm->tcpc.get_vbus = qcom_pmic_typec_port_get_vbus;
+ tcpm->tcpc.set_vbus = qcom_pmic_typec_port_set_vbus;
+ tcpm->tcpc.set_cc = qcom_pmic_typec_port_set_cc;
+ tcpm->tcpc.get_cc = qcom_pmic_typec_port_get_cc;
+ tcpm->tcpc.set_polarity = qcom_pmic_typec_port_set_polarity;
+ tcpm->tcpc.set_vconn = qcom_pmic_typec_port_set_vconn;
+ tcpm->tcpc.start_toggling = qcom_pmic_typec_port_start_toggling;
+
+ tcpm->port_start = qcom_pmic_typec_port_start;
+ tcpm->port_stop = qcom_pmic_typec_port_stop;
+
return 0;
}
+
+const struct pmic_typec_port_resources pm8150b_port_res = {
+ .irq_params = {
+ {
+ .irq_name = "vpd-detect",
+ .virq = PMIC_TYPEC_VPD_IRQ,
+ },
+
+ {
+ .irq_name = "cc-state-change",
+ .virq = PMIC_TYPEC_CC_STATE_IRQ,
+ },
+ {
+ .irq_name = "vconn-oc",
+ .virq = PMIC_TYPEC_VCONN_OC_IRQ,
+ },
+
+ {
+ .irq_name = "vbus-change",
+ .virq = PMIC_TYPEC_VBUS_IRQ,
+ },
+
+ {
+ .irq_name = "attach-detach",
+ .virq = PMIC_TYPEC_ATTACH_DETACH_IRQ,
+ },
+ {
+ .irq_name = "legacy-cable-detect",
+ .virq = PMIC_TYPEC_LEGACY_CABLE_IRQ,
+ },
+
+ {
+ .irq_name = "try-snk-src-detect",
+ .virq = PMIC_TYPEC_TRY_SNK_SRC_IRQ,
+ },
+ },
+ .nr_irqs = 7,
+};
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.h b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.h
index d4d358c680b6b..2ca83a46cf3b5 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.h
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.h
@@ -3,149 +3,12 @@
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2023, Linaro Ltd. All rights reserved.
*/
-#ifndef __QCOM_PMIC_TYPEC_H__
-#define __QCOM_PMIC_TYPEC_H__
+#ifndef __QCOM_PMIC_TYPEC_PORT_H__
+#define __QCOM_PMIC_TYPEC_PORT_H__
#include <linux/platform_device.h>
#include <linux/usb/tcpm.h>
-#define TYPEC_SNK_STATUS_REG 0x06
-#define DETECTED_SNK_TYPE_MASK GENMASK(6, 0)
-#define SNK_DAM_MASK GENMASK(6, 4)
-#define SNK_DAM_500MA BIT(6)
-#define SNK_DAM_1500MA BIT(5)
-#define SNK_DAM_3000MA BIT(4)
-#define SNK_RP_STD BIT(3)
-#define SNK_RP_1P5 BIT(2)
-#define SNK_RP_3P0 BIT(1)
-#define SNK_RP_SHORT BIT(0)
-
-#define TYPEC_SRC_STATUS_REG 0x08
-#define DETECTED_SRC_TYPE_MASK GENMASK(4, 0)
-#define SRC_HIGH_BATT BIT(5)
-#define SRC_DEBUG_ACCESS BIT(4)
-#define SRC_RD_OPEN BIT(3)
-#define SRC_RD_RA_VCONN BIT(2)
-#define SRC_RA_OPEN BIT(1)
-#define AUDIO_ACCESS_RA_RA BIT(0)
-
-#define TYPEC_STATE_MACHINE_STATUS_REG 0x09
-#define TYPEC_ATTACH_DETACH_STATE BIT(5)
-
-#define TYPEC_SM_STATUS_REG 0x0A
-#define TYPEC_SM_VBUS_VSAFE5V BIT(5)
-#define TYPEC_SM_VBUS_VSAFE0V BIT(6)
-#define TYPEC_SM_USBIN_LT_LV BIT(7)
-
-#define TYPEC_MISC_STATUS_REG 0x0B
-#define TYPEC_WATER_DETECTION_STATUS BIT(7)
-#define SNK_SRC_MODE BIT(6)
-#define TYPEC_VBUS_DETECT BIT(5)
-#define TYPEC_VBUS_ERROR_STATUS BIT(4)
-#define TYPEC_DEBOUNCE_DONE BIT(3)
-#define CC_ORIENTATION BIT(1)
-#define CC_ATTACHED BIT(0)
-
-#define LEGACY_CABLE_STATUS_REG 0x0D
-#define TYPEC_LEGACY_CABLE_STATUS BIT(1)
-#define TYPEC_NONCOMP_LEGACY_CABLE_STATUS BIT(0)
-
-#define TYPEC_U_USB_STATUS_REG 0x0F
-#define U_USB_GROUND_NOVBUS BIT(6)
-#define U_USB_GROUND BIT(4)
-#define U_USB_FMB1 BIT(3)
-#define U_USB_FLOAT1 BIT(2)
-#define U_USB_FMB2 BIT(1)
-#define U_USB_FLOAT2 BIT(0)
-
-#define TYPEC_MODE_CFG_REG 0x44
-#define TYPEC_TRY_MODE_MASK GENMASK(4, 3)
-#define EN_TRY_SNK BIT(4)
-#define EN_TRY_SRC BIT(3)
-#define TYPEC_POWER_ROLE_CMD_MASK GENMASK(2, 0)
-#define EN_SRC_ONLY BIT(2)
-#define EN_SNK_ONLY BIT(1)
-#define TYPEC_DISABLE_CMD BIT(0)
-
-#define TYPEC_VCONN_CONTROL_REG 0x46
-#define VCONN_EN_ORIENTATION BIT(2)
-#define VCONN_EN_VALUE BIT(1)
-#define VCONN_EN_SRC BIT(0)
-
-#define TYPEC_CCOUT_CONTROL_REG 0x48
-#define TYPEC_CCOUT_BUFFER_EN BIT(2)
-#define TYPEC_CCOUT_VALUE BIT(1)
-#define TYPEC_CCOUT_SRC BIT(0)
-
-#define DEBUG_ACCESS_SRC_CFG_REG 0x4C
-#define EN_UNORIENTED_DEBUG_ACCESS_SRC BIT(0)
-
-#define TYPE_C_CRUDE_SENSOR_CFG_REG 0x4e
-#define EN_SRC_CRUDE_SENSOR BIT(1)
-#define EN_SNK_CRUDE_SENSOR BIT(0)
-
-#define TYPEC_EXIT_STATE_CFG_REG 0x50
-#define BYPASS_VSAFE0V_DURING_ROLE_SWAP BIT(3)
-#define SEL_SRC_UPPER_REF BIT(2)
-#define USE_TPD_FOR_EXITING_ATTACHSRC BIT(1)
-#define EXIT_SNK_BASED_ON_CC BIT(0)
-
-#define TYPEC_CURRSRC_CFG_REG 0x52
-#define TYPEC_SRC_RP_SEL_330UA BIT(1)
-#define TYPEC_SRC_RP_SEL_180UA BIT(0)
-#define TYPEC_SRC_RP_SEL_80UA 0
-#define TYPEC_SRC_RP_SEL_MASK GENMASK(1, 0)
-
-#define TYPEC_INTERRUPT_EN_CFG_1_REG 0x5E
-#define TYPEC_LEGACY_CABLE_INT_EN BIT(7)
-#define TYPEC_NONCOMPLIANT_LEGACY_CABLE_INT_EN BIT(6)
-#define TYPEC_TRYSOURCE_DETECT_INT_EN BIT(5)
-#define TYPEC_TRYSINK_DETECT_INT_EN BIT(4)
-#define TYPEC_CCOUT_DETACH_INT_EN BIT(3)
-#define TYPEC_CCOUT_ATTACH_INT_EN BIT(2)
-#define TYPEC_VBUS_DEASSERT_INT_EN BIT(1)
-#define TYPEC_VBUS_ASSERT_INT_EN BIT(0)
-
-#define TYPEC_INTERRUPT_EN_CFG_2_REG 0x60
-#define TYPEC_SRC_BATT_HPWR_INT_EN BIT(6)
-#define MICRO_USB_STATE_CHANGE_INT_EN BIT(5)
-#define TYPEC_STATE_MACHINE_CHANGE_INT_EN BIT(4)
-#define TYPEC_DEBUG_ACCESS_DETECT_INT_EN BIT(3)
-#define TYPEC_WATER_DETECTION_INT_EN BIT(2)
-#define TYPEC_VBUS_ERROR_INT_EN BIT(1)
-#define TYPEC_DEBOUNCE_DONE_INT_EN BIT(0)
-
-#define TYPEC_DEBOUNCE_OPTION_REG 0x62
-#define REDUCE_TCCDEBOUNCE_TO_2MS BIT(2)
-
-#define TYPE_C_SBU_CFG_REG 0x6A
-#define SEL_SBU1_ISRC_VAL 0x04
-#define SEL_SBU2_ISRC_VAL 0x01
-
-#define TYPEC_U_USB_CFG_REG 0x70
-#define EN_MICRO_USB_FACTORY_MODE BIT(1)
-#define EN_MICRO_USB_MODE BIT(0)
-
-#define TYPEC_PMI632_U_USB_WATER_PROTECTION_CFG_REG 0x72
-
-#define TYPEC_U_USB_WATER_PROTECTION_CFG_REG 0x73
-#define EN_MICRO_USB_WATER_PROTECTION BIT(4)
-#define MICRO_USB_DETECTION_ON_TIME_CFG_MASK GENMASK(3, 2)
-#define MICRO_USB_DETECTION_PERIOD_CFG_MASK GENMASK(1, 0)
-
-#define TYPEC_PMI632_MICRO_USB_MODE_REG 0x73
-#define MICRO_USB_MODE_ONLY BIT(0)
-
-/* Interrupt numbers */
-#define PMIC_TYPEC_OR_RID_IRQ 0x0
-#define PMIC_TYPEC_VPD_IRQ 0x1
-#define PMIC_TYPEC_CC_STATE_IRQ 0x2
-#define PMIC_TYPEC_VCONN_OC_IRQ 0x3
-#define PMIC_TYPEC_VBUS_IRQ 0x4
-#define PMIC_TYPEC_ATTACH_DETACH_IRQ 0x5
-#define PMIC_TYPEC_LEGACY_CABLE_IRQ 0x6
-#define PMIC_TYPEC_TRY_SNK_SRC_IRQ 0x7
-
/* Resources */
#define PMIC_TYPEC_MAX_IRQS 0x08
@@ -156,40 +19,17 @@ struct pmic_typec_port_irq_params {
struct pmic_typec_port_resources {
unsigned int nr_irqs;
- struct pmic_typec_port_irq_params irq_params[PMIC_TYPEC_MAX_IRQS];
+ const struct pmic_typec_port_irq_params irq_params[PMIC_TYPEC_MAX_IRQS];
};
/* API */
-struct pmic_typec;
-struct pmic_typec_port *qcom_pmic_typec_port_alloc(struct device *dev);
+extern const struct pmic_typec_port_resources pm8150b_port_res;
int qcom_pmic_typec_port_probe(struct platform_device *pdev,
- struct pmic_typec_port *pmic_typec_port,
- struct pmic_typec_port_resources *res,
+ struct pmic_typec *tcpm,
+ const struct pmic_typec_port_resources *res,
struct regmap *regmap,
u32 base);
-int qcom_pmic_typec_port_start(struct pmic_typec_port *pmic_typec_port,
- struct tcpm_port *tcpm_port);
-
-void qcom_pmic_typec_port_stop(struct pmic_typec_port *pmic_typec_port);
-
-int qcom_pmic_typec_port_get_cc(struct pmic_typec_port *pmic_typec_port,
- enum typec_cc_status *cc1,
- enum typec_cc_status *cc2);
-
-int qcom_pmic_typec_port_set_cc(struct pmic_typec_port *pmic_typec_port,
- enum typec_cc_status cc);
-
-int qcom_pmic_typec_port_get_vbus(struct pmic_typec_port *pmic_typec_port);
-
-int qcom_pmic_typec_port_set_vconn(struct pmic_typec_port *pmic_typec_port, bool on);
-
-int qcom_pmic_typec_port_start_toggling(struct pmic_typec_port *pmic_typec_port,
- enum typec_port_type port_type,
- enum typec_cc_status cc);
-
-int qcom_pmic_typec_port_set_vbus(struct pmic_typec_port *pmic_typec_port, bool on);
-
#endif /* __QCOM_PMIC_TYPE_C_PORT_H__ */
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index 0ee3e6e29bb17..c962014bba4e8 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -445,8 +445,11 @@ static int tcpci_set_pd_rx(struct tcpc_dev *tcpc, bool enable)
unsigned int reg = 0;
int ret;
- if (enable)
+ if (enable) {
reg = TCPC_RX_DETECT_SOP | TCPC_RX_DETECT_HARD_RESET;
+ if (tcpci->data->cable_comm_capable)
+ reg |= TCPC_RX_DETECT_SOP1;
+ }
ret = regmap_write(tcpci->regmap, TCPC_RX_DETECT, reg);
if (ret < 0)
return ret;
@@ -584,6 +587,23 @@ static int tcpci_pd_transmit(struct tcpc_dev *tcpc, enum tcpm_transmit_type type
return 0;
}
+static bool tcpci_cable_comm_capable(struct tcpc_dev *tcpc)
+{
+ struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
+
+ return tcpci->data->cable_comm_capable;
+}
+
+static bool tcpci_attempt_vconn_swap_discovery(struct tcpc_dev *tcpc)
+{
+ struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
+
+ if (tcpci->data->attempt_vconn_swap_discovery)
+ return tcpci->data->attempt_vconn_swap_discovery(tcpci, tcpci->data);
+
+ return false;
+}
+
static int tcpci_init(struct tcpc_dev *tcpc)
{
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
@@ -712,7 +732,7 @@ irqreturn_t tcpci_irq(struct tcpci *tcpci)
/* Read complete, clear RX status alert bit */
tcpci_write16(tcpci, TCPC_ALERT, TCPC_ALERT_RX_STATUS);
- tcpm_pd_receive(tcpci->port, &msg);
+ tcpm_pd_receive(tcpci->port, &msg, TCPC_TX_SOP);
}
if (tcpci->data->vbus_vsafe0v && (status & TCPC_ALERT_EXTENDED_STATUS)) {
@@ -793,6 +813,8 @@ struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data)
tcpci->tcpc.enable_frs = tcpci_enable_frs;
tcpci->tcpc.frs_sourcing_vbus = tcpci_frs_sourcing_vbus;
tcpci->tcpc.set_partner_usb_comm_capable = tcpci_set_partner_usb_comm_capable;
+ tcpci->tcpc.cable_comm_capable = tcpci_cable_comm_capable;
+ tcpci->tcpc.attempt_vconn_swap_discovery = tcpci_attempt_vconn_swap_discovery;
if (tcpci->data->check_contaminant)
tcpci->tcpc.check_contaminant = tcpci_check_contaminant;
@@ -889,6 +911,7 @@ MODULE_DEVICE_TABLE(i2c, tcpci_id);
#ifdef CONFIG_OF
static const struct of_device_id tcpci_of_match[] = {
{ .compatible = "nxp,ptn5110", },
+ { .compatible = "tcpci", },
{},
};
MODULE_DEVICE_TABLE(of, tcpci_of_match);
diff --git a/drivers/usb/typec/tcpm/tcpci_maxim.h b/drivers/usb/typec/tcpm/tcpci_maxim.h
index 2c1c4d161b0dc..78ff3b73ee7e3 100644
--- a/drivers/usb/typec/tcpm/tcpci_maxim.h
+++ b/drivers/usb/typec/tcpm/tcpci_maxim.h
@@ -62,6 +62,7 @@ struct max_tcpci_chip {
struct i2c_client *client;
struct tcpm_port *port;
enum contamiant_state contaminant_state;
+ bool veto_vconn_swap;
};
static inline int max_tcpci_read16(struct max_tcpci_chip *chip, unsigned int reg, u16 *val)
diff --git a/drivers/usb/typec/tcpm/tcpci_maxim_core.c b/drivers/usb/typec/tcpm/tcpci_maxim_core.c
index 7fb966fd639b3..eec3bcec119c1 100644
--- a/drivers/usb/typec/tcpm/tcpci_maxim_core.c
+++ b/drivers/usb/typec/tcpm/tcpci_maxim_core.c
@@ -128,6 +128,7 @@ static void process_rx(struct max_tcpci_chip *chip, u16 status)
u8 count, frame_type, rx_buf[TCPC_RECEIVE_BUFFER_LEN];
int ret, payload_index;
u8 *rx_buf_ptr;
+ enum tcpm_transmit_type rx_type;
/*
* READABLE_BYTE_COUNT: Indicates the number of bytes in the RX_BUF_BYTE_x registers
@@ -143,10 +144,23 @@ static void process_rx(struct max_tcpci_chip *chip, u16 status)
count = rx_buf[TCPC_RECEIVE_BUFFER_COUNT_OFFSET];
frame_type = rx_buf[TCPC_RECEIVE_BUFFER_FRAME_TYPE_OFFSET];
- if (count == 0 || frame_type != TCPC_RX_BUF_FRAME_TYPE_SOP) {
+ switch (frame_type) {
+ case TCPC_RX_BUF_FRAME_TYPE_SOP1:
+ rx_type = TCPC_TX_SOP_PRIME;
+ break;
+ case TCPC_RX_BUF_FRAME_TYPE_SOP:
+ rx_type = TCPC_TX_SOP;
+ break;
+ default:
+ rx_type = TCPC_TX_SOP;
+ break;
+ }
+
+ if (count == 0 || (frame_type != TCPC_RX_BUF_FRAME_TYPE_SOP &&
+ frame_type != TCPC_RX_BUF_FRAME_TYPE_SOP1)) {
max_tcpci_write16(chip, TCPC_ALERT, TCPC_ALERT_RX_STATUS);
dev_err(chip->dev, "%s\n", count == 0 ? "error: count is 0" :
- "error frame_type is not SOP");
+ "error frame_type is not SOP/SOP'");
return;
}
@@ -183,7 +197,7 @@ static void process_rx(struct max_tcpci_chip *chip, u16 status)
if (ret < 0)
return;
- tcpm_pd_receive(chip->port, &msg);
+ tcpm_pd_receive(chip->port, &msg, rx_type);
}
static int max_tcpci_set_vbus(struct tcpci *tcpci, struct tcpci_data *tdata, bool source, bool sink)
@@ -309,8 +323,10 @@ static irqreturn_t _max_tcpci_irq(struct max_tcpci_chip *chip, u16 status)
if (ret < 0)
return ret;
- if (reg_status & TCPC_FAULT_STATUS_VCONN_OC)
+ if (reg_status & TCPC_FAULT_STATUS_VCONN_OC) {
+ chip->veto_vconn_swap = true;
tcpm_port_error_recovery(chip->port);
+ }
}
if (status & TCPC_ALERT_EXTND) {
@@ -444,6 +460,18 @@ static void max_tcpci_check_contaminant(struct tcpci *tcpci, struct tcpci_data *
tcpm_port_clean(chip->port);
}
+static bool max_tcpci_attempt_vconn_swap_discovery(struct tcpci *tcpci, struct tcpci_data *tdata)
+{
+ struct max_tcpci_chip *chip = tdata_to_max_tcpci(tdata);
+
+ if (chip->veto_vconn_swap) {
+ chip->veto_vconn_swap = false;
+ return false;
+ }
+
+ return true;
+}
+
static int max_tcpci_probe(struct i2c_client *client)
{
int ret;
@@ -478,6 +506,8 @@ static int max_tcpci_probe(struct i2c_client *client)
chip->data.vbus_vsafe0v = true;
chip->data.set_partner_usb_comm_capable = max_tcpci_set_partner_usb_comm_capable;
chip->data.check_contaminant = max_tcpci_check_contaminant;
+ chip->data.cable_comm_capable = true;
+ chip->data.attempt_vconn_swap_discovery = max_tcpci_attempt_vconn_swap_discovery;
max_tcpci_init_regs(chip);
chip->tcpci = tcpci_register_port(chip->dev, &chip->data);
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 0965972310275..ab6ed6111ed05 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -108,6 +108,7 @@
S(VCONN_SWAP_WAIT_FOR_VCONN), \
S(VCONN_SWAP_TURN_ON_VCONN), \
S(VCONN_SWAP_TURN_OFF_VCONN), \
+ S(VCONN_SWAP_SEND_SOFT_RESET), \
\
S(FR_SWAP_SEND), \
S(FR_SWAP_SEND_TIMEOUT), \
@@ -145,7 +146,9 @@
S(PORT_RESET_WAIT_OFF), \
\
S(AMS_START), \
- S(CHUNK_NOT_SUPP)
+ S(CHUNK_NOT_SUPP), \
+ \
+ S(SRC_VDM_IDENTITY_REQUEST)
#define FOREACH_AMS(S) \
S(NONE_AMS), \
@@ -327,6 +330,12 @@ struct tcpm_port {
struct typec_partner_desc partner_desc;
struct typec_partner *partner;
+ struct usb_pd_identity cable_ident;
+ struct typec_cable_desc cable_desc;
+ struct typec_cable *cable;
+ struct typec_plug_desc plug_prime_desc;
+ struct typec_plug *plug_prime;
+
enum typec_cc_status cc_req;
enum typec_cc_status src_rp; /* work only if pd_supported == false */
@@ -468,7 +477,9 @@ struct tcpm_port {
/* Alternate mode data */
struct pd_mode_data mode_data;
+ struct pd_mode_data mode_data_prime;
struct typec_altmode *partner_altmode[ALTMODE_DISCOVERY_MAX];
+ struct typec_altmode *plug_prime_altmode[ALTMODE_DISCOVERY_MAX];
struct typec_altmode *port_altmode[ALTMODE_DISCOVERY_MAX];
/* Deadline in jiffies to exit src_try_wait state */
@@ -505,6 +516,41 @@ struct tcpm_port {
* transitions.
*/
bool potential_contaminant;
+
+ /* SOP* Related Fields */
+ /*
+ * Flag to determine if SOP' Discover Identity is available. The flag
+ * is set if Discover Identity on SOP' does not immediately follow
+ * Discover Identity on SOP.
+ */
+ bool send_discover_prime;
+ /*
+ * tx_sop_type determines which SOP* a message is being sent on.
+ * For messages that are queued and not sent immediately such as in
+ * tcpm_queue_message or messages that send after state changes,
+ * the tx_sop_type is set accordingly.
+ */
+ enum tcpm_transmit_type tx_sop_type;
+ /*
+ * Prior to discovering the port partner's Specification Revision, the
+ * Vconn source and cable plug will use the lower of their two revisions.
+ *
+ * When the port partner's Specification Revision is discovered, the following
+ * rules are put in place.
+ * 1. If the cable revision (1) is lower than the revision negotiated
+ * between the port and partner (2), the port and partner will communicate
+ * on revision (2), but the port and cable will communicate on revision (1).
+ * 2. If the cable revision (1) is higher than the revision negotiated
+ * between the port and partner (2), the port and partner will communicate
+ * on revision (2), and the port and cable will communicate on revision (2)
+ * as well.
+ */
+ unsigned int negotiated_rev_prime;
+ /*
+ * Each SOP* type must maintain their own tx and rx message IDs
+ */
+ unsigned int message_id_prime;
+ unsigned int rx_msgid_prime;
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
struct mutex logbuffer_lock; /* log buffer access lock */
@@ -518,6 +564,7 @@ struct pd_rx_event {
struct kthread_work work;
struct tcpm_port *port;
struct pd_message msg;
+ enum tcpm_transmit_type rx_sop_type;
};
static const char * const pd_rev[] = {
@@ -893,19 +940,30 @@ static void tcpm_ams_finish(struct tcpm_port *port)
}
static int tcpm_pd_transmit(struct tcpm_port *port,
- enum tcpm_transmit_type type,
+ enum tcpm_transmit_type tx_sop_type,
const struct pd_message *msg)
{
unsigned long timeout;
int ret;
+ unsigned int negotiated_rev;
+
+ switch (tx_sop_type) {
+ case TCPC_TX_SOP_PRIME:
+ negotiated_rev = port->negotiated_rev_prime;
+ break;
+ case TCPC_TX_SOP:
+ default:
+ negotiated_rev = port->negotiated_rev;
+ break;
+ }
if (msg)
tcpm_log(port, "PD TX, header: %#x", le16_to_cpu(msg->header));
else
- tcpm_log(port, "PD TX, type: %#x", type);
+ tcpm_log(port, "PD TX, type: %#x", tx_sop_type);
reinit_completion(&port->tx_complete);
- ret = port->tcpc->pd_transmit(port->tcpc, type, msg, port->negotiated_rev);
+ ret = port->tcpc->pd_transmit(port->tcpc, tx_sop_type, msg, negotiated_rev);
if (ret < 0)
return ret;
@@ -918,7 +976,17 @@ static int tcpm_pd_transmit(struct tcpm_port *port,
switch (port->tx_status) {
case TCPC_TX_SUCCESS:
- port->message_id = (port->message_id + 1) & PD_HEADER_ID_MASK;
+ switch (tx_sop_type) {
+ case TCPC_TX_SOP_PRIME:
+ port->message_id_prime = (port->message_id_prime + 1) &
+ PD_HEADER_ID_MASK;
+ break;
+ case TCPC_TX_SOP:
+ default:
+ port->message_id = (port->message_id + 1) &
+ PD_HEADER_ID_MASK;
+ break;
+ }
/*
* USB PD rev 2.0, 8.3.2.2.1:
* USB PD rev 3.0, 8.3.2.1.3:
@@ -1099,6 +1167,12 @@ static int tcpm_set_roles(struct tcpm_port *port, bool attached,
if (ret < 0)
return ret;
+ if (port->tcpc->set_orientation) {
+ ret = port->tcpc->set_orientation(port->tcpc, orientation);
+ if (ret < 0)
+ return ret;
+ }
+
port->pwr_role = role;
port->data_role = data;
typec_set_data_role(port->typec_port, data);
@@ -1456,7 +1530,7 @@ static int tcpm_ams_start(struct tcpm_port *port, enum tcpm_ams ams)
* VDM/VDO handling functions
*/
static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
- const u32 *data, int cnt)
+ const u32 *data, int cnt, enum tcpm_transmit_type tx_sop_type)
{
u32 vdo_hdr = port->vdo_data[0];
@@ -1464,7 +1538,10 @@ static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
/* If is sending discover_identity, handle received message first */
if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMD(vdo_hdr) == CMD_DISCOVER_IDENT) {
- port->send_discover = true;
+ if (tx_sop_type == TCPC_TX_SOP_PRIME)
+ port->send_discover_prime = true;
+ else
+ port->send_discover = true;
mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
} else {
/* Make sure we are not still processing a previous VDM packet */
@@ -1479,14 +1556,16 @@ static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
port->vdm_state = VDM_STATE_READY;
port->vdm_sm_running = true;
+ port->tx_sop_type = tx_sop_type;
+
mod_vdm_delayed_work(port, 0);
}
static void tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 header,
- const u32 *data, int cnt)
+ const u32 *data, int cnt, enum tcpm_transmit_type tx_sop_type)
{
mutex_lock(&port->lock);
- tcpm_queue_vdm(port, header, data, cnt);
+ tcpm_queue_vdm(port, header, data, cnt, TCPC_TX_SOP);
mutex_unlock(&port->lock);
}
@@ -1508,9 +1587,68 @@ static void svdm_consume_identity(struct tcpm_port *port, const u32 *p, int cnt)
PD_PRODUCT_PID(product), product & 0xffff);
}
-static bool svdm_consume_svids(struct tcpm_port *port, const u32 *p, int cnt)
+static void svdm_consume_identity_sop_prime(struct tcpm_port *port, const u32 *p, int cnt)
{
- struct pd_mode_data *pmdata = &port->mode_data;
+ u32 idh = p[VDO_INDEX_IDH];
+ u32 product = p[VDO_INDEX_PRODUCT];
+ int svdm_version;
+
+ /*
+ * Attempt to consume identity only if cable currently is not set
+ */
+ if (!IS_ERR_OR_NULL(port->cable))
+ goto register_plug;
+
+ /* Reset cable identity */
+ memset(&port->cable_ident, 0, sizeof(port->cable_ident));
+
+ /* Fill out id header, cert, product, cable VDO 1 */
+ port->cable_ident.id_header = idh;
+ port->cable_ident.cert_stat = p[VDO_INDEX_CSTAT];
+ port->cable_ident.product = product;
+ port->cable_ident.vdo[0] = p[VDO_INDEX_CABLE_1];
+
+ /* Fill out cable desc, infer svdm_version from pd revision */
+ port->cable_desc.type = (enum typec_plug_type) (VDO_TYPEC_CABLE_TYPE(p[VDO_INDEX_CABLE_1]) +
+ USB_PLUG_TYPE_A);
+ port->cable_desc.active = PD_IDH_PTYPE(idh) == IDH_PTYPE_ACABLE ? 1 : 0;
+ /* Log PD Revision and additional cable VDO from negotiated revision */
+ switch (port->negotiated_rev_prime) {
+ case PD_REV30:
+ port->cable_desc.pd_revision = 0x0300;
+ if (port->cable_desc.active)
+ port->cable_ident.vdo[1] = p[VDO_INDEX_CABLE_2];
+ break;
+ case PD_REV20:
+ port->cable_desc.pd_revision = 0x0200;
+ break;
+ default:
+ port->cable_desc.pd_revision = 0x0200;
+ break;
+ }
+ port->cable_desc.identity = &port->cable_ident;
+ /* Register Cable, set identity and svdm_version */
+ port->cable = typec_register_cable(port->typec_port, &port->cable_desc);
+ if (IS_ERR_OR_NULL(port->cable))
+ return;
+ typec_cable_set_identity(port->cable);
+ /* Get SVDM version */
+ svdm_version = PD_VDO_SVDM_VER(p[VDO_INDEX_HDR]);
+ typec_cable_set_svdm_version(port->cable, svdm_version);
+
+register_plug:
+ if (IS_ERR_OR_NULL(port->plug_prime)) {
+ port->plug_prime_desc.index = TYPEC_PLUG_SOP_P;
+ port->plug_prime = typec_register_plug(port->cable,
+ &port->plug_prime_desc);
+ }
+}
+
+static bool svdm_consume_svids(struct tcpm_port *port, const u32 *p, int cnt,
+ enum tcpm_transmit_type rx_sop_type)
+{
+ struct pd_mode_data *pmdata = rx_sop_type == TCPC_TX_SOP_PRIME ?
+ &port->mode_data_prime : &port->mode_data;
int i;
for (i = 1; i < cnt; i++) {
@@ -1556,14 +1694,29 @@ abort:
return false;
}
-static void svdm_consume_modes(struct tcpm_port *port, const u32 *p, int cnt)
+static void svdm_consume_modes(struct tcpm_port *port, const u32 *p, int cnt,
+ enum tcpm_transmit_type rx_sop_type)
{
struct pd_mode_data *pmdata = &port->mode_data;
struct typec_altmode_desc *paltmode;
int i;
- if (pmdata->altmodes >= ARRAY_SIZE(port->partner_altmode)) {
- /* Already logged in svdm_consume_svids() */
+ switch (rx_sop_type) {
+ case TCPC_TX_SOP_PRIME:
+ pmdata = &port->mode_data_prime;
+ if (pmdata->altmodes >= ARRAY_SIZE(port->plug_prime_altmode)) {
+ /* Already logged in svdm_consume_svids() */
+ return;
+ }
+ break;
+ case TCPC_TX_SOP:
+ pmdata = &port->mode_data;
+ if (pmdata->altmodes >= ARRAY_SIZE(port->partner_altmode)) {
+ /* Already logged in svdm_consume_svids() */
+ return;
+ }
+ break;
+ default:
return;
}
@@ -1601,20 +1754,129 @@ static void tcpm_register_partner_altmodes(struct tcpm_port *port)
}
}
+static void tcpm_register_plug_altmodes(struct tcpm_port *port)
+{
+ struct pd_mode_data *modep = &port->mode_data_prime;
+ struct typec_altmode *altmode;
+ int i;
+
+ typec_plug_set_num_altmodes(port->plug_prime, modep->altmodes);
+
+ for (i = 0; i < modep->altmodes; i++) {
+ altmode = typec_plug_register_altmode(port->plug_prime,
+ &modep->altmode_desc[i]);
+ if (IS_ERR(altmode)) {
+ tcpm_log(port, "Failed to register plug SVID 0x%04x",
+ modep->altmode_desc[i].svid);
+ altmode = NULL;
+ }
+ port->plug_prime_altmode[i] = altmode;
+ }
+}
+
#define supports_modal(port) PD_IDH_MODAL_SUPP((port)->partner_ident.id_header)
+#define supports_modal_cable(port) PD_IDH_MODAL_SUPP((port)->cable_ident.id_header)
+#define supports_host(port) PD_IDH_HOST_SUPP((port->partner_ident.id_header))
+
+/*
+ * Helper to determine whether the port is capable of SOP' communication at the
+ * current point in time.
+ */
+static bool tcpm_can_communicate_sop_prime(struct tcpm_port *port)
+{
+ /* Check to see if tcpc supports SOP' communication */
+ if (!port->tcpc->cable_comm_capable || !port->tcpc->cable_comm_capable(port->tcpc))
+ return false;
+ /*
+ * Power Delivery 2.0 Section 6.3.11
+ * Before communicating with a Cable Plug a Port Should ensure that it
+ * is the Vconn Source and that the Cable Plugs are powered by
+ * performing a Vconn swap if necessary. Since it cannot be guaranteed
+ * that the present Vconn Source is supplying Vconn, the only means to
+ * ensure that the Cable Plugs are powered is for a Port wishing to
+ * communicate with a Cable Plug is to become the Vconn Source.
+ *
+ * Power Delivery 3.0 Section 6.3.11
+ * Before communicating with a Cable Plug a Port Shall ensure that it
+ * is the Vconn source.
+ */
+ if (port->vconn_role != TYPEC_SOURCE)
+ return false;
+ /*
+ * Power Delivery 2.0 Section 2.4.4
+ * When no Contract or an Implicit Contract is in place the Source can
+ * communicate with a Cable Plug using SOP' packets in order to discover
+ * its characteristics.
+ *
+ * Power Delivery 3.0 Section 2.4.4
+ * When no Contract or an Implicit Contract is in place only the Source
+ * port that is supplying Vconn is allowed to send packets to a Cable
+ * Plug and is allowed to respond to packets from the Cable Plug.
+ */
+ if (!port->explicit_contract)
+ return port->pwr_role == TYPEC_SOURCE;
+ if (port->negotiated_rev == PD_REV30)
+ return true;
+ /*
+ * Power Delivery 2.0 Section 2.4.4
+ *
+ * When an Explicit Contract is in place the DFP (either the Source or
+ * the Sink) can communicate with the Cable Plug(s) using SOP’/SOP”
+ * Packets (see Figure 2-3).
+ */
+ if (port->negotiated_rev == PD_REV20)
+ return port->data_role == TYPEC_HOST;
+ return false;
+}
+
+static bool tcpm_attempt_vconn_swap_discovery(struct tcpm_port *port)
+{
+ if (!port->tcpc->attempt_vconn_swap_discovery)
+ return false;
+
+ /* Port is already source, no need to perform swap */
+ if (port->vconn_role == TYPEC_SOURCE)
+ return false;
+
+ /*
+ * Partner needs to support Alternate Modes with modal support. If
+ * partner is also capable of being a USB Host, it could be a device
+ * that supports Alternate Modes as the DFP.
+ */
+ if (!supports_modal(port) || supports_host(port))
+ return false;
+
+ if ((port->negotiated_rev == PD_REV20 && port->data_role == TYPEC_HOST) ||
+ port->negotiated_rev == PD_REV30)
+ return port->tcpc->attempt_vconn_swap_discovery(port->tcpc);
+
+ return false;
+}
+
+
+static bool tcpm_cable_vdm_supported(struct tcpm_port *port)
+{
+ return !IS_ERR_OR_NULL(port->cable) &&
+ typec_cable_is_active(port->cable) &&
+ supports_modal_cable(port) &&
+ tcpm_can_communicate_sop_prime(port);
+}
static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
const u32 *p, int cnt, u32 *response,
- enum adev_actions *adev_action)
+ enum adev_actions *adev_action,
+ enum tcpm_transmit_type rx_sop_type,
+ enum tcpm_transmit_type *response_tx_sop_type)
{
struct typec_port *typec = port->typec_port;
- struct typec_altmode *pdev;
- struct pd_mode_data *modep;
+ struct typec_altmode *pdev, *pdev_prime;
+ struct pd_mode_data *modep, *modep_prime;
int svdm_version;
int rlen = 0;
int cmd_type;
int cmd;
int i;
+ int ret;
cmd_type = PD_VDO_CMDT(p[0]);
cmd = PD_VDO_CMD(p[0]);
@@ -1622,17 +1884,54 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d",
p[0], cmd_type, cmd, cnt);
- modep = &port->mode_data;
-
- pdev = typec_match_altmode(port->partner_altmode, ALTMODE_DISCOVERY_MAX,
- PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0]));
-
- svdm_version = typec_get_negotiated_svdm_version(typec);
- if (svdm_version < 0)
- return 0;
+ switch (rx_sop_type) {
+ case TCPC_TX_SOP_PRIME:
+ modep_prime = &port->mode_data_prime;
+ pdev_prime = typec_match_altmode(port->plug_prime_altmode,
+ ALTMODE_DISCOVERY_MAX,
+ PD_VDO_VID(p[0]),
+ PD_VDO_OPOS(p[0]));
+ svdm_version = typec_get_cable_svdm_version(typec);
+ /*
+ * Update SVDM version if cable was discovered before port partner.
+ */
+ if (!IS_ERR_OR_NULL(port->cable) &&
+ PD_VDO_SVDM_VER(p[0]) < svdm_version)
+ typec_cable_set_svdm_version(port->cable, svdm_version);
+ break;
+ case TCPC_TX_SOP:
+ modep = &port->mode_data;
+ pdev = typec_match_altmode(port->partner_altmode,
+ ALTMODE_DISCOVERY_MAX,
+ PD_VDO_VID(p[0]),
+ PD_VDO_OPOS(p[0]));
+ svdm_version = typec_get_negotiated_svdm_version(typec);
+ if (svdm_version < 0)
+ return 0;
+ break;
+ default:
+ modep = &port->mode_data;
+ pdev = typec_match_altmode(port->partner_altmode,
+ ALTMODE_DISCOVERY_MAX,
+ PD_VDO_VID(p[0]),
+ PD_VDO_OPOS(p[0]));
+ svdm_version = typec_get_negotiated_svdm_version(typec);
+ if (svdm_version < 0)
+ return 0;
+ break;
+ }
switch (cmd_type) {
case CMDT_INIT:
+ /*
+ * Only the port or port partner is allowed to initialize SVDM
+ * commands over SOP'. In case the port partner initializes a
+ * sequence when it is not allowed to send SOP' messages, drop
+ * the message should the TCPM port try to process it.
+ */
+ if (rx_sop_type == TCPC_TX_SOP_PRIME)
+ return 0;
+
switch (cmd) {
case CMD_DISCOVER_IDENT:
if (PD_VDO_VID(p[0]) != USB_SID_PD)
@@ -1699,55 +1998,186 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
(VDO_SVDM_VERS(typec_get_negotiated_svdm_version(typec)));
break;
case CMDT_RSP_ACK:
- /* silently drop message if we are not connected */
- if (IS_ERR_OR_NULL(port->partner))
+ /*
+ * Silently drop message if we are not connected, but can process
+ * if SOP' Discover Identity prior to explicit contract.
+ */
+ if (IS_ERR_OR_NULL(port->partner) &&
+ !(rx_sop_type == TCPC_TX_SOP_PRIME && cmd == CMD_DISCOVER_IDENT))
break;
tcpm_ams_finish(port);
switch (cmd) {
+ /*
+ * SVDM Command Flow for SOP and SOP':
+ * SOP Discover Identity
+ * SOP' Discover Identity
+ * SOP Discover SVIDs
+ * Discover Modes
+ * (Active Cables)
+ * SOP' Discover SVIDs
+ * Discover Modes
+ *
+ * Perform Discover SOP' if the port can communicate with cable
+ * plug.
+ */
case CMD_DISCOVER_IDENT:
- if (PD_VDO_SVDM_VER(p[0]) < svdm_version)
- typec_partner_set_svdm_version(port->partner,
- PD_VDO_SVDM_VER(p[0]));
- /* 6.4.4.3.1 */
- svdm_consume_identity(port, p, cnt);
- response[0] = VDO(USB_SID_PD, 1, typec_get_negotiated_svdm_version(typec),
- CMD_DISCOVER_SVID);
- rlen = 1;
+ switch (rx_sop_type) {
+ case TCPC_TX_SOP:
+ if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
+ typec_partner_set_svdm_version(port->partner,
+ PD_VDO_SVDM_VER(p[0]));
+ /* If cable is discovered before partner, downgrade svdm */
+ if (!IS_ERR_OR_NULL(port->cable) &&
+ (typec_get_cable_svdm_version(port->typec_port) >
+ svdm_version))
+ typec_cable_set_svdm_version(port->cable,
+ svdm_version);
+ }
+ /* 6.4.4.3.1 */
+ svdm_consume_identity(port, p, cnt);
+ /* Attempt Vconn swap, delay SOP' discovery if necessary */
+ if (tcpm_attempt_vconn_swap_discovery(port)) {
+ port->send_discover_prime = true;
+ port->upcoming_state = VCONN_SWAP_SEND;
+ ret = tcpm_ams_start(port, VCONN_SWAP);
+ if (!ret)
+ return 0;
+ /* Cannot perform Vconn swap */
+ port->upcoming_state = INVALID_STATE;
+ port->send_discover_prime = false;
+ }
+
+ /*
+ * Attempt Discover Identity on SOP' if the
+ * cable was not discovered previously, and use
+ * the SVDM version of the partner to probe.
+ */
+ if (IS_ERR_OR_NULL(port->cable) &&
+ tcpm_can_communicate_sop_prime(port)) {
+ *response_tx_sop_type = TCPC_TX_SOP_PRIME;
+ port->send_discover_prime = true;
+ response[0] = VDO(USB_SID_PD, 1,
+ typec_get_negotiated_svdm_version(typec),
+ CMD_DISCOVER_IDENT);
+ rlen = 1;
+ } else {
+ *response_tx_sop_type = TCPC_TX_SOP;
+ response[0] = VDO(USB_SID_PD, 1,
+ typec_get_negotiated_svdm_version(typec),
+ CMD_DISCOVER_SVID);
+ rlen = 1;
+ }
+ break;
+ case TCPC_TX_SOP_PRIME:
+ /*
+ * svdm_consume_identity_sop_prime will determine
+ * the svdm_version for the cable moving forward.
+ */
+ svdm_consume_identity_sop_prime(port, p, cnt);
+
+ /*
+ * If received in SRC_VDM_IDENTITY_REQUEST, continue
+ * to SRC_SEND_CAPABILITIES
+ */
+ if (port->state == SRC_VDM_IDENTITY_REQUEST) {
+ tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
+ return 0;
+ }
+
+ *response_tx_sop_type = TCPC_TX_SOP;
+ response[0] = VDO(USB_SID_PD, 1,
+ typec_get_negotiated_svdm_version(typec),
+ CMD_DISCOVER_SVID);
+ rlen = 1;
+ break;
+ default:
+ return 0;
+ }
break;
case CMD_DISCOVER_SVID:
+ *response_tx_sop_type = rx_sop_type;
/* 6.4.4.3.2 */
- if (svdm_consume_svids(port, p, cnt)) {
+ if (svdm_consume_svids(port, p, cnt, rx_sop_type)) {
response[0] = VDO(USB_SID_PD, 1, svdm_version, CMD_DISCOVER_SVID);
rlen = 1;
- } else if (modep->nsvids && supports_modal(port)) {
- response[0] = VDO(modep->svids[0], 1, svdm_version,
- CMD_DISCOVER_MODES);
- rlen = 1;
+ } else {
+ if (rx_sop_type == TCPC_TX_SOP) {
+ if (modep->nsvids && supports_modal(port)) {
+ response[0] = VDO(modep->svids[0], 1, svdm_version,
+ CMD_DISCOVER_MODES);
+ rlen = 1;
+ }
+ } else if (rx_sop_type == TCPC_TX_SOP_PRIME) {
+ if (modep_prime->nsvids) {
+ response[0] = VDO(modep_prime->svids[0], 1,
+ svdm_version, CMD_DISCOVER_MODES);
+ rlen = 1;
+ }
+ }
}
break;
case CMD_DISCOVER_MODES:
- /* 6.4.4.3.3 */
- svdm_consume_modes(port, p, cnt);
- modep->svid_index++;
- if (modep->svid_index < modep->nsvids) {
- u16 svid = modep->svids[modep->svid_index];
- response[0] = VDO(svid, 1, svdm_version, CMD_DISCOVER_MODES);
- rlen = 1;
- } else {
- tcpm_register_partner_altmodes(port);
+ if (rx_sop_type == TCPC_TX_SOP) {
+ /* 6.4.4.3.3 */
+ svdm_consume_modes(port, p, cnt, rx_sop_type);
+ modep->svid_index++;
+ if (modep->svid_index < modep->nsvids) {
+ u16 svid = modep->svids[modep->svid_index];
+ *response_tx_sop_type = TCPC_TX_SOP;
+ response[0] = VDO(svid, 1, svdm_version,
+ CMD_DISCOVER_MODES);
+ rlen = 1;
+ } else if (tcpm_cable_vdm_supported(port)) {
+ *response_tx_sop_type = TCPC_TX_SOP_PRIME;
+ response[0] = VDO(USB_SID_PD, 1,
+ typec_get_cable_svdm_version(typec),
+ CMD_DISCOVER_SVID);
+ rlen = 1;
+ } else {
+ tcpm_register_partner_altmodes(port);
+ }
+ } else if (rx_sop_type == TCPC_TX_SOP_PRIME) {
+ /* 6.4.4.3.3 */
+ svdm_consume_modes(port, p, cnt, rx_sop_type);
+ modep_prime->svid_index++;
+ if (modep_prime->svid_index < modep_prime->nsvids) {
+ u16 svid = modep_prime->svids[modep_prime->svid_index];
+ *response_tx_sop_type = TCPC_TX_SOP_PRIME;
+ response[0] = VDO(svid, 1,
+ typec_get_cable_svdm_version(typec),
+ CMD_DISCOVER_MODES);
+ rlen = 1;
+ } else {
+ tcpm_register_plug_altmodes(port);
+ tcpm_register_partner_altmodes(port);
+ }
}
break;
case CMD_ENTER_MODE:
- if (adev && pdev)
- *adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
+ *response_tx_sop_type = rx_sop_type;
+ if (rx_sop_type == TCPC_TX_SOP) {
+ if (adev && pdev) {
+ typec_altmode_update_active(pdev, true);
+ *adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
+ }
+ } else if (rx_sop_type == TCPC_TX_SOP_PRIME) {
+ if (adev && pdev_prime) {
+ typec_altmode_update_active(pdev_prime, true);
+ *adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
+ }
+ }
return 0;
case CMD_EXIT_MODE:
- if (adev && pdev) {
- /* Back to USB Operation */
- *adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
- return 0;
+ *response_tx_sop_type = rx_sop_type;
+ if (rx_sop_type == TCPC_TX_SOP) {
+ if (adev && pdev) {
+ typec_altmode_update_active(pdev, false);
+ /* Back to USB Operation */
+ *adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
+ return 0;
+ }
}
break;
case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
@@ -1800,13 +2230,15 @@ static void tcpm_pd_handle_msg(struct tcpm_port *port,
enum tcpm_ams ams);
static void tcpm_handle_vdm_request(struct tcpm_port *port,
- const __le32 *payload, int cnt)
+ const __le32 *payload, int cnt,
+ enum tcpm_transmit_type rx_sop_type)
{
enum adev_actions adev_action = ADEV_NONE;
struct typec_altmode *adev;
u32 p[PD_MAX_PAYLOAD];
u32 response[8] = { };
int i, rlen = 0;
+ enum tcpm_transmit_type response_tx_sop_type = TCPC_TX_SOP;
for (i = 0; i < cnt; i++)
p[i] = le32_to_cpu(payload[i]);
@@ -1841,7 +2273,8 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
* - We will send NAK and the flag will be cleared in the state machine.
*/
port->vdm_sm_running = true;
- rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action);
+ rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action,
+ rx_sop_type, &response_tx_sop_type);
} else {
if (port->negotiated_rev >= PD_REV30)
tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
@@ -1877,19 +2310,37 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
typec_altmode_vdm(adev, p[0], &p[1], cnt);
break;
case ADEV_QUEUE_VDM:
- typec_altmode_vdm(adev, p[0], &p[1], cnt);
+ if (response_tx_sop_type == TCPC_TX_SOP_PRIME)
+ typec_cable_altmode_vdm(adev, TYPEC_PLUG_SOP_P, p[0], &p[1], cnt);
+ else
+ typec_altmode_vdm(adev, p[0], &p[1], cnt);
break;
case ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL:
- if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
- int svdm_version = typec_get_negotiated_svdm_version(
- port->typec_port);
- if (svdm_version < 0)
- break;
-
- response[0] = VDO(adev->svid, 1, svdm_version,
- CMD_EXIT_MODE);
- response[0] |= VDO_OPOS(adev->mode);
- rlen = 1;
+ if (response_tx_sop_type == TCPC_TX_SOP_PRIME) {
+ if (typec_cable_altmode_vdm(adev, TYPEC_PLUG_SOP_P,
+ p[0], &p[1], cnt)) {
+ int svdm_version = typec_get_cable_svdm_version(
+ port->typec_port);
+ if (svdm_version < 0)
+ break;
+
+ response[0] = VDO(adev->svid, 1, svdm_version,
+ CMD_EXIT_MODE);
+ response[0] |= VDO_OPOS(adev->mode);
+ rlen = 1;
+ }
+ } else {
+ if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
+ int svdm_version = typec_get_negotiated_svdm_version(
+ port->typec_port);
+ if (svdm_version < 0)
+ break;
+
+ response[0] = VDO(adev->svid, 1, svdm_version,
+ CMD_EXIT_MODE);
+ response[0] |= VDO_OPOS(adev->mode);
+ rlen = 1;
+ }
}
break;
case ADEV_ATTENTION:
@@ -1909,19 +2360,38 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
mutex_lock(&port->lock);
if (rlen > 0)
- tcpm_queue_vdm(port, response[0], &response[1], rlen - 1);
+ tcpm_queue_vdm(port, response[0], &response[1], rlen - 1, response_tx_sop_type);
else
port->vdm_sm_running = false;
}
static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
- const u32 *data, int count)
+ const u32 *data, int count, enum tcpm_transmit_type tx_sop_type)
{
- int svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
+ int svdm_version;
u32 header;
- if (svdm_version < 0)
- return;
+ switch (tx_sop_type) {
+ case TCPC_TX_SOP_PRIME:
+ /*
+ * If the port partner is discovered, then the port partner's
+ * SVDM Version will be returned
+ */
+ svdm_version = typec_get_cable_svdm_version(port->typec_port);
+ if (svdm_version < 0)
+ svdm_version = SVDM_VER_MAX;
+ break;
+ case TCPC_TX_SOP:
+ svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
+ if (svdm_version < 0)
+ return;
+ break;
+ default:
+ svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
+ if (svdm_version < 0)
+ return;
+ break;
+ }
if (WARN_ON(count > VDO_MAX_SIZE - 1))
count = VDO_MAX_SIZE - 1;
@@ -1930,7 +2400,7 @@ static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
header = VDO(vid, ((vid & USB_SID_PD) == USB_SID_PD) ?
1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION),
svdm_version, cmd);
- tcpm_queue_vdm(port, header, data, count);
+ tcpm_queue_vdm(port, header, data, count, tx_sop_type);
}
static unsigned int vdm_ready_timeout(u32 vdm_hdr)
@@ -1964,6 +2434,7 @@ static void vdm_run_state_machine(struct tcpm_port *port)
struct pd_message msg;
int i, res = 0;
u32 vdo_hdr = port->vdo_data[0];
+ u32 response[8] = { };
switch (port->vdm_state) {
case VDM_STATE_READY:
@@ -1977,7 +2448,8 @@ static void vdm_run_state_machine(struct tcpm_port *port)
* if there's traffic or we're not in PDO ready state don't send
* a VDM.
*/
- if (port->state != SRC_READY && port->state != SNK_READY) {
+ if (port->state != SRC_READY && port->state != SNK_READY &&
+ port->state != SRC_VDM_IDENTITY_REQUEST) {
port->vdm_sm_running = false;
break;
}
@@ -1988,7 +2460,17 @@ static void vdm_run_state_machine(struct tcpm_port *port)
case CMD_DISCOVER_IDENT:
res = tcpm_ams_start(port, DISCOVER_IDENTITY);
if (res == 0) {
- port->send_discover = false;
+ switch (port->tx_sop_type) {
+ case TCPC_TX_SOP_PRIME:
+ port->send_discover_prime = false;
+ break;
+ case TCPC_TX_SOP:
+ port->send_discover = false;
+ break;
+ default:
+ port->send_discover = false;
+ break;
+ }
} else if (res == -EAGAIN) {
port->vdo_data[0] = 0;
mod_send_discover_delayed_work(port,
@@ -2044,12 +2526,21 @@ static void vdm_run_state_machine(struct tcpm_port *port)
break;
case VDM_STATE_ERR_SEND:
/*
+ * When sending Discover Identity to SOP' before establishing an
+ * explicit contract, do not retry. Instead, weave sending
+ * Source_Capabilities over SOP and Discover Identity over SOP'.
+ */
+ if (port->state == SRC_VDM_IDENTITY_REQUEST) {
+ tcpm_ams_finish(port);
+ port->vdm_state = VDM_STATE_DONE;
+ tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
+ /*
* A partner which does not support USB PD will not reply,
* so this is not a fatal error. At the same time, some
* devices may not return GoodCRC under some circumstances,
* so we need to retry.
*/
- if (port->vdm_retries < 3) {
+ } else if (port->vdm_retries < 3) {
tcpm_log(port, "VDM Tx error, retry");
port->vdm_retries++;
port->vdm_state = VDM_STATE_READY;
@@ -2057,19 +2548,59 @@ static void vdm_run_state_machine(struct tcpm_port *port)
tcpm_ams_finish(port);
} else {
tcpm_ams_finish(port);
+ if (port->tx_sop_type == TCPC_TX_SOP)
+ break;
+ /* Handle SOP' Transmission Errors */
+ switch (PD_VDO_CMD(vdo_hdr)) {
+ /*
+ * If Discover Identity fails on SOP', then resume
+ * discovery process on SOP only.
+ */
+ case CMD_DISCOVER_IDENT:
+ port->vdo_data[0] = 0;
+ response[0] = VDO(USB_SID_PD, 1,
+ typec_get_negotiated_svdm_version(
+ port->typec_port),
+ CMD_DISCOVER_SVID);
+ tcpm_queue_vdm(port, response[0], &response[1],
+ 0, TCPC_TX_SOP);
+ break;
+ /*
+ * If Discover SVIDs or Discover Modes fail, then
+ * proceed with Alt Mode discovery process on SOP.
+ */
+ case CMD_DISCOVER_SVID:
+ tcpm_register_partner_altmodes(port);
+ break;
+ case CMD_DISCOVER_MODES:
+ tcpm_register_partner_altmodes(port);
+ break;
+ default:
+ break;
+ }
}
break;
case VDM_STATE_SEND_MESSAGE:
/* Prepare and send VDM */
memset(&msg, 0, sizeof(msg));
- msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
- port->pwr_role,
- port->data_role,
- port->negotiated_rev,
- port->message_id, port->vdo_count);
+ if (port->tx_sop_type == TCPC_TX_SOP_PRIME) {
+ msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
+ 0, /* Cable Plug Indicator for DFP/UFP */
+ 0, /* Reserved */
+ port->negotiated_rev_prime,
+ port->message_id_prime,
+ port->vdo_count);
+ } else {
+ msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
+ port->pwr_role,
+ port->data_role,
+ port->negotiated_rev,
+ port->message_id,
+ port->vdo_count);
+ }
for (i = 0; i < port->vdo_count; i++)
msg.payload[i] = cpu_to_le32(port->vdo_data[i]);
- res = tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
+ res = tcpm_pd_transmit(port, port->tx_sop_type, &msg);
if (res < 0) {
port->vdm_state = VDM_STATE_ERR_SEND;
} else {
@@ -2244,7 +2775,7 @@ static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo)
header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
header |= VDO_OPOS(altmode->mode);
- tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0);
+ tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP);
return 0;
}
@@ -2261,7 +2792,7 @@ static int tcpm_altmode_exit(struct typec_altmode *altmode)
header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE);
header |= VDO_OPOS(altmode->mode);
- tcpm_queue_vdm_unlocked(port, header, NULL, 0);
+ tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP);
return 0;
}
@@ -2270,7 +2801,7 @@ static int tcpm_altmode_vdm(struct typec_altmode *altmode,
{
struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
- tcpm_queue_vdm_unlocked(port, header, data, count - 1);
+ tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP);
return 0;
}
@@ -2281,6 +2812,58 @@ static const struct typec_altmode_ops tcpm_altmode_ops = {
.vdm = tcpm_altmode_vdm,
};
+
+static int tcpm_cable_altmode_enter(struct typec_altmode *altmode, enum typec_plug_index sop,
+ u32 *vdo)
+{
+ struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
+ int svdm_version;
+ u32 header;
+
+ svdm_version = typec_get_cable_svdm_version(port->typec_port);
+ if (svdm_version < 0)
+ return svdm_version;
+
+ header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
+ header |= VDO_OPOS(altmode->mode);
+
+ tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP_PRIME);
+ return 0;
+}
+
+static int tcpm_cable_altmode_exit(struct typec_altmode *altmode, enum typec_plug_index sop)
+{
+ struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
+ int svdm_version;
+ u32 header;
+
+ svdm_version = typec_get_cable_svdm_version(port->typec_port);
+ if (svdm_version < 0)
+ return svdm_version;
+
+ header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE);
+ header |= VDO_OPOS(altmode->mode);
+
+ tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP_PRIME);
+ return 0;
+}
+
+static int tcpm_cable_altmode_vdm(struct typec_altmode *altmode, enum typec_plug_index sop,
+ u32 header, const u32 *data, int count)
+{
+ struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
+
+ tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP_PRIME);
+
+ return 0;
+}
+
+static const struct typec_cable_ops tcpm_cable_ops = {
+ .enter = tcpm_cable_altmode_enter,
+ .exit = tcpm_cable_altmode_exit,
+ .vdm = tcpm_cable_altmode_vdm,
+};
+
/*
* PD (data, control) command handling functions
*/
@@ -2293,7 +2876,8 @@ static inline enum tcpm_state ready_state(struct tcpm_port *port)
}
static int tcpm_pd_send_control(struct tcpm_port *port,
- enum pd_ctrl_msg_type type);
+ enum pd_ctrl_msg_type type,
+ enum tcpm_transmit_type tx_sop_type);
static void tcpm_handle_alert(struct tcpm_port *port, const __le32 *payload,
int cnt)
@@ -2455,7 +3039,8 @@ static int tcpm_register_sink_caps(struct tcpm_port *port)
}
static void tcpm_pd_data_request(struct tcpm_port *port,
- const struct pd_message *msg)
+ const struct pd_message *msg,
+ enum tcpm_transmit_type rx_sop_type)
{
enum pd_data_msg_type type = pd_header_type_le(msg->header);
unsigned int cnt = pd_header_cnt_le(msg->header);
@@ -2496,8 +3081,11 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
break;
}
- if (rev < PD_MAX_REV)
+ if (rev < PD_MAX_REV) {
port->negotiated_rev = rev;
+ if (port->negotiated_rev_prime > port->negotiated_rev)
+ port->negotiated_rev_prime = port->negotiated_rev;
+ }
if (port->pwr_role == TYPEC_SOURCE) {
if (port->ams == GET_SOURCE_CAPABILITIES)
@@ -2548,8 +3136,11 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
break;
}
- if (rev < PD_MAX_REV)
+ if (rev < PD_MAX_REV) {
port->negotiated_rev = rev;
+ if (port->negotiated_rev_prime > port->negotiated_rev)
+ port->negotiated_rev_prime = port->negotiated_rev;
+ }
if (port->pwr_role != TYPEC_SOURCE || cnt != 1) {
tcpm_pd_handle_msg(port,
@@ -2605,7 +3196,7 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
NONE_AMS);
break;
case PD_DATA_VENDOR_DEF:
- tcpm_handle_vdm_request(port, msg->payload, cnt);
+ tcpm_handle_vdm_request(port, msg->payload, cnt, rx_sop_type);
break;
case PD_DATA_BIST:
port->bist_request = le32_to_cpu(msg->payload[0]);
@@ -2647,10 +3238,12 @@ static void tcpm_pps_complete(struct tcpm_port *port, int result)
}
static void tcpm_pd_ctrl_request(struct tcpm_port *port,
- const struct pd_message *msg)
+ const struct pd_message *msg,
+ enum tcpm_transmit_type rx_sop_type)
{
enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
enum tcpm_state next_state;
+ unsigned int rev = pd_header_rev_le(msg->header);
/*
* Stop VDM state machine if interrupted by other Messages while NOT_SUPP is allowed in
@@ -2815,6 +3408,16 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
case SOFT_RESET_SEND:
if (port->ams == SOFT_RESET_AMS)
tcpm_ams_finish(port);
+ /*
+ * SOP' Soft Reset is done after Vconn Swap,
+ * which returns to ready state
+ */
+ if (rx_sop_type == TCPC_TX_SOP_PRIME) {
+ if (rev < port->negotiated_rev_prime)
+ port->negotiated_rev_prime = rev;
+ tcpm_set_state(port, ready_state(port), 0);
+ break;
+ }
if (port->pwr_role == TYPEC_SOURCE) {
port->upcoming_state = SRC_SEND_CAPABILITIES;
tcpm_ams_start(port, POWER_NEGOTIATION);
@@ -2981,6 +3584,7 @@ static void tcpm_pd_rx_handler(struct kthread_work *work)
const struct pd_message *msg = &event->msg;
unsigned int cnt = pd_header_cnt_le(msg->header);
struct tcpm_port *port = event->port;
+ enum tcpm_transmit_type rx_sop_type = event->rx_sop_type;
mutex_lock(&port->lock);
@@ -2992,6 +3596,14 @@ static void tcpm_pd_rx_handler(struct kthread_work *work)
unsigned int msgid = pd_header_msgid_le(msg->header);
/*
+ * Drop SOP' messages if cannot receive via
+ * tcpm_can_communicate_sop_prime
+ */
+ if (rx_sop_type == TCPC_TX_SOP_PRIME &&
+ !tcpm_can_communicate_sop_prime(port))
+ goto done;
+
+ /*
* USB PD standard, 6.6.1.2:
* "... if MessageID value in a received Message is the
* same as the stored value, the receiver shall return a
@@ -3000,16 +3612,26 @@ static void tcpm_pd_rx_handler(struct kthread_work *work)
* Message). Note: this shall not apply to the Soft_Reset
* Message which always has a MessageID value of zero."
*/
- if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET)
- goto done;
- port->rx_msgid = msgid;
+ switch (rx_sop_type) {
+ case TCPC_TX_SOP_PRIME:
+ if (msgid == port->rx_msgid_prime)
+ goto done;
+ port->rx_msgid_prime = msgid;
+ break;
+ case TCPC_TX_SOP:
+ default:
+ if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET)
+ goto done;
+ port->rx_msgid = msgid;
+ break;
+ }
/*
* If both ends believe to be DFP/host, we have a data role
* mismatch.
*/
if (!!(le16_to_cpu(msg->header) & PD_HEADER_DATA_ROLE) ==
- (port->data_role == TYPEC_HOST)) {
+ (port->data_role == TYPEC_HOST) && rx_sop_type == TCPC_TX_SOP) {
tcpm_log(port,
"Data role mismatch, initiating error recovery");
tcpm_set_state(port, ERROR_RECOVERY, 0);
@@ -3017,9 +3639,9 @@ static void tcpm_pd_rx_handler(struct kthread_work *work)
if (le16_to_cpu(msg->header) & PD_HEADER_EXT_HDR)
tcpm_pd_ext_msg_request(port, msg);
else if (cnt)
- tcpm_pd_data_request(port, msg);
+ tcpm_pd_data_request(port, msg, rx_sop_type);
else
- tcpm_pd_ctrl_request(port, msg);
+ tcpm_pd_ctrl_request(port, msg, rx_sop_type);
}
}
@@ -3028,7 +3650,8 @@ done:
kfree(event);
}
-void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg)
+void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg,
+ enum tcpm_transmit_type rx_sop_type)
{
struct pd_rx_event *event;
@@ -3038,23 +3661,47 @@ void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg)
kthread_init_work(&event->work, tcpm_pd_rx_handler);
event->port = port;
+ event->rx_sop_type = rx_sop_type;
memcpy(&event->msg, msg, sizeof(*msg));
kthread_queue_work(port->wq, &event->work);
}
EXPORT_SYMBOL_GPL(tcpm_pd_receive);
static int tcpm_pd_send_control(struct tcpm_port *port,
- enum pd_ctrl_msg_type type)
+ enum pd_ctrl_msg_type type,
+ enum tcpm_transmit_type tx_sop_type)
{
struct pd_message msg;
memset(&msg, 0, sizeof(msg));
- msg.header = PD_HEADER_LE(type, port->pwr_role,
- port->data_role,
- port->negotiated_rev,
- port->message_id, 0);
+ switch (tx_sop_type) {
+ case TCPC_TX_SOP_PRIME:
+ msg.header = PD_HEADER_LE(type,
+ 0, /* Cable Plug Indicator for DFP/UFP */
+ 0, /* Reserved */
+ port->negotiated_rev,
+ port->message_id_prime,
+ 0);
+ break;
+ case TCPC_TX_SOP:
+ msg.header = PD_HEADER_LE(type,
+ port->pwr_role,
+ port->data_role,
+ port->negotiated_rev,
+ port->message_id,
+ 0);
+ break;
+ default:
+ msg.header = PD_HEADER_LE(type,
+ port->pwr_role,
+ port->data_role,
+ port->negotiated_rev,
+ port->message_id,
+ 0);
+ break;
+ }
- return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
+ return tcpm_pd_transmit(port, tx_sop_type, &msg);
}
/*
@@ -3073,13 +3720,13 @@ static bool tcpm_send_queued_message(struct tcpm_port *port)
switch (queued_message) {
case PD_MSG_CTRL_WAIT:
- tcpm_pd_send_control(port, PD_CTRL_WAIT);
+ tcpm_pd_send_control(port, PD_CTRL_WAIT, TCPC_TX_SOP);
break;
case PD_MSG_CTRL_REJECT:
- tcpm_pd_send_control(port, PD_CTRL_REJECT);
+ tcpm_pd_send_control(port, PD_CTRL_REJECT, TCPC_TX_SOP);
break;
case PD_MSG_CTRL_NOT_SUPP:
- tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP);
+ tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP, TCPC_TX_SOP);
break;
case PD_MSG_DATA_SINK_CAP:
ret = tcpm_pd_send_sink_caps(port);
@@ -3649,6 +4296,7 @@ static int tcpm_src_attach(struct tcpm_port *port)
port->attached = true;
port->send_discover = true;
+ port->send_discover_prime = false;
return 0;
@@ -3665,6 +4313,15 @@ out_disable_mux:
static void tcpm_typec_disconnect(struct tcpm_port *port)
{
+ /*
+ * Unregister plug/cable outside of port->connected because cable can
+ * be discovered before SRC_READY/SNK_READY states where port->connected
+ * is set.
+ */
+ typec_unregister_plug(port->plug_prime);
+ typec_unregister_cable(port->cable);
+ port->plug_prime = NULL;
+ port->cable = NULL;
if (port->connected) {
typec_partner_set_usb_power_delivery(port->partner, NULL);
typec_unregister_partner(port->partner);
@@ -3676,14 +4333,20 @@ static void tcpm_typec_disconnect(struct tcpm_port *port)
static void tcpm_unregister_altmodes(struct tcpm_port *port)
{
struct pd_mode_data *modep = &port->mode_data;
+ struct pd_mode_data *modep_prime = &port->mode_data_prime;
int i;
for (i = 0; i < modep->altmodes; i++) {
typec_unregister_altmode(port->partner_altmode[i]);
port->partner_altmode[i] = NULL;
}
+ for (i = 0; i < modep_prime->altmodes; i++) {
+ typec_unregister_altmode(port->plug_prime_altmode[i]);
+ port->plug_prime_altmode[i] = NULL;
+ }
memset(modep, 0, sizeof(*modep));
+ memset(modep_prime, 0, sizeof(*modep_prime));
}
static void tcpm_set_partner_usb_comm_capable(struct tcpm_port *port, bool capable)
@@ -3712,6 +4375,7 @@ static void tcpm_reset_port(struct tcpm_port *port)
* we can check tcpm_pd_rx_handler() if we had seen it before.
*/
port->rx_msgid = -1;
+ port->rx_msgid_prime = -1;
port->tcpc->set_pd_rx(port->tcpc, false);
tcpm_init_vbus(port); /* also disables charging */
@@ -3783,6 +4447,7 @@ static int tcpm_snk_attach(struct tcpm_port *port)
port->attached = true;
port->send_discover = true;
+ port->send_discover_prime = false;
return 0;
}
@@ -4023,8 +4688,11 @@ static void run_state_machine(struct tcpm_port *port)
port->pwr_opmode = TYPEC_PWR_MODE_USB;
port->caps_count = 0;
port->negotiated_rev = PD_MAX_REV;
+ port->negotiated_rev_prime = PD_MAX_REV;
port->message_id = 0;
+ port->message_id_prime = 0;
port->rx_msgid = -1;
+ port->rx_msgid_prime = -1;
port->explicit_contract = false;
/* SNK -> SRC POWER/FAST_ROLE_SWAP finished */
if (port->ams == POWER_ROLE_SWAP ||
@@ -4045,8 +4713,12 @@ static void run_state_machine(struct tcpm_port *port)
}
ret = tcpm_pd_send_source_caps(port);
if (ret < 0) {
- tcpm_set_state(port, SRC_SEND_CAPABILITIES,
- PD_T_SEND_SOURCE_CAP);
+ if (tcpm_can_communicate_sop_prime(port) &&
+ IS_ERR_OR_NULL(port->cable))
+ tcpm_set_state(port, SRC_VDM_IDENTITY_REQUEST, 0);
+ else
+ tcpm_set_state(port, SRC_SEND_CAPABILITIES,
+ PD_T_SEND_SOURCE_CAP);
} else {
/*
* Per standard, we should clear the reset counter here.
@@ -4087,7 +4759,7 @@ static void run_state_machine(struct tcpm_port *port)
case SRC_NEGOTIATE_CAPABILITIES:
ret = tcpm_pd_check_request(port);
if (ret < 0) {
- tcpm_pd_send_control(port, PD_CTRL_REJECT);
+ tcpm_pd_send_control(port, PD_CTRL_REJECT, TCPC_TX_SOP);
if (!port->explicit_contract) {
tcpm_set_state(port,
SRC_WAIT_NEW_CAPABILITIES, 0);
@@ -4095,7 +4767,7 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_state(port, SRC_READY, 0);
}
} else {
- tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
+ tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
tcpm_set_partner_usb_comm_capable(port,
!!(port->sink_request & RDO_USB_COMM));
tcpm_set_state(port, SRC_TRANSITION_SUPPLY,
@@ -4104,7 +4776,7 @@ static void run_state_machine(struct tcpm_port *port)
break;
case SRC_TRANSITION_SUPPLY:
/* XXX: regulator_set_voltage(vbus, ...) */
- tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
+ tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP);
port->explicit_contract = true;
typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_PD);
port->pwr_opmode = TYPEC_PWR_MODE_PD;
@@ -4141,14 +4813,23 @@ static void run_state_machine(struct tcpm_port *port)
* 6.4.4.3.1 Discover Identity
* "The Discover Identity Command Shall only be sent to SOP when there is an
* Explicit Contract."
- * For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
- * port->explicit_contract to decide whether to send the command.
+ *
+ * Discover Identity on SOP' should be discovered prior to the
+ * ready state, but if done after a Vconn Swap following Discover
+ * Identity on SOP then the discovery process can be run here
+ * as well.
*/
if (port->explicit_contract) {
- tcpm_set_initial_svdm_version(port);
+ if (port->send_discover_prime) {
+ port->tx_sop_type = TCPC_TX_SOP_PRIME;
+ } else {
+ port->tx_sop_type = TCPC_TX_SOP;
+ tcpm_set_initial_svdm_version(port);
+ }
mod_send_discover_delayed_work(port, 0);
} else {
port->send_discover = false;
+ port->send_discover_prime = false;
}
/*
@@ -4264,8 +4945,11 @@ static void run_state_machine(struct tcpm_port *port)
typec_set_pwr_opmode(port->typec_port, opmode);
port->pwr_opmode = TYPEC_PWR_MODE_USB;
port->negotiated_rev = PD_MAX_REV;
+ port->negotiated_rev_prime = PD_MAX_REV;
port->message_id = 0;
+ port->message_id_prime = 0;
port->rx_msgid = -1;
+ port->rx_msgid_prime = -1;
port->explicit_contract = false;
if (port->ams == POWER_ROLE_SWAP ||
@@ -4437,14 +5121,23 @@ static void run_state_machine(struct tcpm_port *port)
* 6.4.4.3.1 Discover Identity
* "The Discover Identity Command Shall only be sent to SOP when there is an
* Explicit Contract."
- * For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
- * port->explicit_contract.
+ *
+ * Discover Identity on SOP' should be discovered prior to the
+ * ready state, but if done after a Vconn Swap following Discover
+ * Identity on SOP then the discovery process can be run here
+ * as well.
*/
if (port->explicit_contract) {
- tcpm_set_initial_svdm_version(port);
+ if (port->send_discover_prime) {
+ port->tx_sop_type = TCPC_TX_SOP_PRIME;
+ } else {
+ port->tx_sop_type = TCPC_TX_SOP;
+ tcpm_set_initial_svdm_version(port);
+ }
mod_send_discover_delayed_work(port, 0);
} else {
port->send_discover = false;
+ port->send_discover_prime = false;
}
power_supply_changed(port->psy);
@@ -4485,6 +5178,7 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_unregister_altmodes(port);
port->nr_sink_caps = 0;
port->send_discover = true;
+ port->send_discover_prime = false;
if (port->pwr_role == TYPEC_SOURCE)
tcpm_set_state(port, SRC_HARD_RESET_VBUS_OFF,
PD_T_PS_HARD_RESET);
@@ -4586,7 +5280,7 @@ static void run_state_machine(struct tcpm_port *port)
/* remove existing capabilities */
usb_power_delivery_unregister_capabilities(port->partner_source_caps);
port->partner_source_caps = NULL;
- tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
+ tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
tcpm_ams_finish(port);
if (port->pwr_role == TYPEC_SOURCE) {
port->upcoming_state = SRC_SEND_CAPABILITIES;
@@ -4603,35 +5297,53 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_ams_start(port, SOFT_RESET_AMS);
break;
case SOFT_RESET_SEND:
- port->message_id = 0;
- port->rx_msgid = -1;
- /* remove existing capabilities */
- usb_power_delivery_unregister_capabilities(port->partner_source_caps);
- port->partner_source_caps = NULL;
- if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET))
- tcpm_set_state_cond(port, hard_reset_state(port), 0);
- else
- tcpm_set_state_cond(port, hard_reset_state(port),
- PD_T_SENDER_RESPONSE);
+ /*
+ * Power Delivery 3.0 Section 6.3.13
+ *
+ * A Soft_Reset Message Shall be targeted at a specific entity
+ * depending on the type of SOP* packet used.
+ */
+ if (port->tx_sop_type == TCPC_TX_SOP_PRIME) {
+ port->message_id_prime = 0;
+ port->rx_msgid_prime = -1;
+ tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET, TCPC_TX_SOP_PRIME);
+ tcpm_set_state_cond(port, ready_state(port), PD_T_SENDER_RESPONSE);
+ } else {
+ port->message_id = 0;
+ port->rx_msgid = -1;
+ /* remove existing capabilities */
+ usb_power_delivery_unregister_capabilities(port->partner_source_caps);
+ port->partner_source_caps = NULL;
+ if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET, TCPC_TX_SOP))
+ tcpm_set_state_cond(port, hard_reset_state(port), 0);
+ else
+ tcpm_set_state_cond(port, hard_reset_state(port),
+ PD_T_SENDER_RESPONSE);
+ }
break;
/* DR_Swap states */
case DR_SWAP_SEND:
- tcpm_pd_send_control(port, PD_CTRL_DR_SWAP);
- if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20)
+ tcpm_pd_send_control(port, PD_CTRL_DR_SWAP, TCPC_TX_SOP);
+ if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20) {
port->send_discover = true;
+ port->send_discover_prime = false;
+ }
tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT,
PD_T_SENDER_RESPONSE);
break;
case DR_SWAP_ACCEPT:
- tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
- if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20)
+ tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
+ if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20) {
port->send_discover = true;
+ port->send_discover_prime = false;
+ }
tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
break;
case DR_SWAP_SEND_TIMEOUT:
tcpm_swap_complete(port, -ETIMEDOUT);
port->send_discover = false;
+ port->send_discover_prime = false;
tcpm_ams_finish(port);
tcpm_set_state(port, ready_state(port), 0);
break;
@@ -4648,7 +5360,7 @@ static void run_state_machine(struct tcpm_port *port)
break;
case FR_SWAP_SEND:
- if (tcpm_pd_send_control(port, PD_CTRL_FR_SWAP)) {
+ if (tcpm_pd_send_control(port, PD_CTRL_FR_SWAP, TCPC_TX_SOP)) {
tcpm_set_state(port, ERROR_RECOVERY, 0);
break;
}
@@ -4668,7 +5380,7 @@ static void run_state_machine(struct tcpm_port *port)
break;
case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
tcpm_set_pwr_role(port, TYPEC_SOURCE);
- if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) {
+ if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP)) {
tcpm_set_state(port, ERROR_RECOVERY, 0);
break;
}
@@ -4678,11 +5390,11 @@ static void run_state_machine(struct tcpm_port *port)
/* PR_Swap states */
case PR_SWAP_ACCEPT:
- tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
+ tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
tcpm_set_state(port, PR_SWAP_START, 0);
break;
case PR_SWAP_SEND:
- tcpm_pd_send_control(port, PD_CTRL_PR_SWAP);
+ tcpm_pd_send_control(port, PD_CTRL_PR_SWAP, TCPC_TX_SOP);
tcpm_set_state_cond(port, PR_SWAP_SEND_TIMEOUT,
PD_T_SENDER_RESPONSE);
break;
@@ -4724,7 +5436,7 @@ static void run_state_machine(struct tcpm_port *port)
* supply is turned off"
*/
tcpm_set_pwr_role(port, TYPEC_SINK);
- if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) {
+ if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP)) {
tcpm_set_state(port, ERROR_RECOVERY, 0);
break;
}
@@ -4771,17 +5483,17 @@ static void run_state_machine(struct tcpm_port *port)
* Source."
*/
tcpm_set_pwr_role(port, TYPEC_SOURCE);
- tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
+ tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP);
tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
break;
case VCONN_SWAP_ACCEPT:
- tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
+ tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
tcpm_ams_finish(port);
tcpm_set_state(port, VCONN_SWAP_START, 0);
break;
case VCONN_SWAP_SEND:
- tcpm_pd_send_control(port, PD_CTRL_VCONN_SWAP);
+ tcpm_pd_send_control(port, PD_CTRL_VCONN_SWAP, TCPC_TX_SOP);
tcpm_set_state(port, VCONN_SWAP_SEND_TIMEOUT,
PD_T_SENDER_RESPONSE);
break;
@@ -4800,14 +5512,34 @@ static void run_state_machine(struct tcpm_port *port)
PD_T_VCONN_SOURCE_ON);
break;
case VCONN_SWAP_TURN_ON_VCONN:
- tcpm_set_vconn(port, true);
- tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
- tcpm_set_state(port, ready_state(port), 0);
+ ret = tcpm_set_vconn(port, true);
+ tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP);
+ /*
+ * USB PD 3.0 Section 6.4.4.3.1
+ *
+ * Note that a Cable Plug or VPD will not be ready for PD
+ * Communication until tVCONNStable after VCONN has been applied
+ */
+ if (!ret)
+ tcpm_set_state(port, VCONN_SWAP_SEND_SOFT_RESET,
+ PD_T_VCONN_STABLE);
+ else
+ tcpm_set_state(port, ready_state(port), 0);
break;
case VCONN_SWAP_TURN_OFF_VCONN:
tcpm_set_vconn(port, false);
tcpm_set_state(port, ready_state(port), 0);
break;
+ case VCONN_SWAP_SEND_SOFT_RESET:
+ tcpm_swap_complete(port, port->swap_status);
+ if (tcpm_can_communicate_sop_prime(port)) {
+ port->tx_sop_type = TCPC_TX_SOP_PRIME;
+ port->upcoming_state = SOFT_RESET_SEND;
+ tcpm_ams_start(port, SOFT_RESET_AMS);
+ } else {
+ tcpm_set_state(port, ready_state(port), 0);
+ }
+ break;
case DR_SWAP_CANCEL:
case PR_SWAP_CANCEL:
@@ -4843,7 +5575,7 @@ static void run_state_machine(struct tcpm_port *port)
}
break;
case GET_STATUS_SEND:
- tcpm_pd_send_control(port, PD_CTRL_GET_STATUS);
+ tcpm_pd_send_control(port, PD_CTRL_GET_STATUS, TCPC_TX_SOP);
tcpm_set_state(port, GET_STATUS_SEND_TIMEOUT,
PD_T_SENDER_RESPONSE);
break;
@@ -4851,7 +5583,7 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_state(port, ready_state(port), 0);
break;
case GET_PPS_STATUS_SEND:
- tcpm_pd_send_control(port, PD_CTRL_GET_PPS_STATUS);
+ tcpm_pd_send_control(port, PD_CTRL_GET_PPS_STATUS, TCPC_TX_SOP);
tcpm_set_state(port, GET_PPS_STATUS_SEND_TIMEOUT,
PD_T_SENDER_RESPONSE);
break;
@@ -4859,7 +5591,7 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_state(port, ready_state(port), 0);
break;
case GET_SINK_CAP:
- tcpm_pd_send_control(port, PD_CTRL_GET_SINK_CAP);
+ tcpm_pd_send_control(port, PD_CTRL_GET_SINK_CAP, TCPC_TX_SOP);
tcpm_set_state(port, GET_SINK_CAP_TIMEOUT, PD_T_SENDER_RESPONSE);
break;
case GET_SINK_CAP_TIMEOUT:
@@ -4902,9 +5634,18 @@ static void run_state_machine(struct tcpm_port *port)
/* Chunk state */
case CHUNK_NOT_SUPP:
- tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP);
+ tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP, TCPC_TX_SOP);
tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ? SRC_READY : SNK_READY, 0);
break;
+
+ /* Cable states */
+ case SRC_VDM_IDENTITY_REQUEST:
+ port->send_discover_prime = true;
+ port->tx_sop_type = TCPC_TX_SOP_PRIME;
+ mod_send_discover_delayed_work(port, 0);
+ port->upcoming_state = SRC_SEND_CAPABILITIES;
+ break;
+
default:
WARN(1, "Unexpected port state %d\n", port->state);
break;
@@ -5596,7 +6337,8 @@ static void tcpm_enable_frs_work(struct kthread_work *work)
goto unlock;
/* Send when the state machine is idle */
- if (port->state != SNK_READY || port->vdm_sm_running || port->send_discover)
+ if (port->state != SNK_READY || port->vdm_sm_running || port->send_discover ||
+ port->send_discover_prime)
goto resched;
port->upcoming_state = GET_SINK_CAP;
@@ -5619,21 +6361,23 @@ static void tcpm_send_discover_work(struct kthread_work *work)
mutex_lock(&port->lock);
/* No need to send DISCOVER_IDENTITY anymore */
- if (!port->send_discover)
+ if (!port->send_discover && !port->send_discover_prime)
goto unlock;
if (port->data_role == TYPEC_DEVICE && port->negotiated_rev < PD_REV30) {
port->send_discover = false;
+ port->send_discover_prime = false;
goto unlock;
}
/* Retry if the port is not idle */
- if ((port->state != SRC_READY && port->state != SNK_READY) || port->vdm_sm_running) {
+ if ((port->state != SRC_READY && port->state != SNK_READY &&
+ port->state != SRC_VDM_IDENTITY_REQUEST) || port->vdm_sm_running) {
mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
goto unlock;
}
- tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
+ tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0, port->tx_sop_type);
unlock:
mutex_unlock(&port->lock);
@@ -6111,14 +6855,14 @@ static int tcpm_pd_set(struct typec_port *p, struct usb_power_delivery *pd)
if (data->sink_desc.pdo[0]) {
for (i = 0; i < PDO_MAX_OBJECTS && data->sink_desc.pdo[i]; i++)
port->snk_pdo[i] = data->sink_desc.pdo[i];
- port->nr_snk_pdo = i + 1;
+ port->nr_snk_pdo = i;
port->operating_snk_mw = data->operating_snk_mw;
}
if (data->source_desc.pdo[0]) {
for (i = 0; i < PDO_MAX_OBJECTS && data->source_desc.pdo[i]; i++)
- port->snk_pdo[i] = data->source_desc.pdo[i];
- port->nr_src_pdo = i + 1;
+ port->src_pdo[i] = data->source_desc.pdo[i];
+ port->nr_src_pdo = i;
}
switch (port->state) {
@@ -6166,7 +6910,9 @@ static int tcpm_pd_set(struct typec_port *p, struct usb_power_delivery *pd)
port->port_source_caps = data->source_cap;
port->port_sink_caps = data->sink_cap;
+ typec_port_set_usb_power_delivery(p, NULL);
port->selected_pd = pd;
+ typec_port_set_usb_power_delivery(p, port->selected_pd);
unlock:
mutex_unlock(&port->lock);
return ret;
@@ -6199,9 +6945,7 @@ static void tcpm_port_unregister_pd(struct tcpm_port *port)
port->port_source_caps = NULL;
for (i = 0; i < port->pd_count; i++) {
usb_power_delivery_unregister_capabilities(port->pd_list[i]->sink_cap);
- kfree(port->pd_list[i]->sink_cap);
usb_power_delivery_unregister_capabilities(port->pd_list[i]->source_cap);
- kfree(port->pd_list[i]->source_cap);
devm_kfree(port->dev, port->pd_list[i]);
port->pd_list[i] = NULL;
usb_power_delivery_unregister(port->pds[i]);
@@ -6860,6 +7604,8 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
typec_port_register_altmodes(port->typec_port,
&tcpm_altmode_ops, port,
port->port_altmode, ALTMODE_DISCOVERY_MAX);
+ typec_port_register_cable_ops(port->port_altmode, ARRAY_SIZE(port->port_altmode),
+ &tcpm_cable_ops);
port->registered = true;
mutex_lock(&port->lock);
diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c
index 87d4abde0ea27..cf719307b3f6b 100644
--- a/drivers/usb/typec/tcpm/wcove.c
+++ b/drivers/usb/typec/tcpm/wcove.c
@@ -535,7 +535,7 @@ static irqreturn_t wcove_typec_irq(int irq, void *data)
goto err;
}
- tcpm_pd_receive(wcove->tcpm, &msg);
+ tcpm_pd_receive(wcove->tcpm, &msg, TCPC_TX_SOP);
ret = regmap_read(wcove->regmap, USBC_RXSTATUS,
&status);
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 14f5a7bfae2e9..bd6ae92aa39e7 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -36,6 +36,19 @@
*/
#define UCSI_SWAP_TIMEOUT_MS 5000
+static int ucsi_read_message_in(struct ucsi *ucsi, void *buf,
+ size_t buf_size)
+{
+ /*
+ * Below UCSI 2.0, MESSAGE_IN was limited to 16 bytes. Truncate the
+ * reads here.
+ */
+ if (ucsi->version <= UCSI_VERSION_1_2)
+ buf_size = clamp(buf_size, 0, 16);
+
+ return ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, buf, buf_size);
+}
+
static int ucsi_acknowledge_command(struct ucsi *ucsi)
{
u64 ctrl;
@@ -72,7 +85,7 @@ static int ucsi_read_error(struct ucsi *ucsi)
if (ret < 0)
return ret;
- ret = ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, &error, sizeof(error));
+ ret = ucsi_read_message_in(ucsi, &error, sizeof(error));
if (ret)
return ret;
@@ -138,8 +151,12 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd)
if (!(cci & UCSI_CCI_COMMAND_COMPLETE))
return -EIO;
- if (cci & UCSI_CCI_NOT_SUPPORTED)
+ if (cci & UCSI_CCI_NOT_SUPPORTED) {
+ if (ucsi_acknowledge_command(ucsi) < 0)
+ dev_err(ucsi->dev,
+ "ACK of unsupported command failed\n");
return -EOPNOTSUPP;
+ }
if (cci & UCSI_CCI_ERROR) {
if (cmd == UCSI_GET_ERROR_STATUS)
@@ -170,7 +187,7 @@ int ucsi_send_command(struct ucsi *ucsi, u64 command,
length = ret;
if (data) {
- ret = ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, data, size);
+ ret = ucsi_read_message_in(ucsi, data, size);
if (ret)
goto out;
}
@@ -386,6 +403,27 @@ static int ucsi_register_altmode(struct ucsi_connector *con,
con->partner_altmode[i] = alt;
break;
+ case UCSI_RECIPIENT_SOP_P:
+ i = ucsi_next_altmode(con->plug_altmode);
+ if (i < 0) {
+ ret = i;
+ goto err;
+ }
+
+ ret = ucsi_altmode_next_mode(con->plug_altmode, desc->svid);
+ if (ret < 0)
+ return ret;
+
+ desc->mode = ret;
+
+ alt = typec_plug_register_altmode(con->plug, desc);
+ if (IS_ERR(alt)) {
+ ret = PTR_ERR(alt);
+ goto err;
+ }
+
+ con->plug_altmode[i] = alt;
+ break;
default:
return -EINVAL;
}
@@ -553,6 +591,9 @@ static void ucsi_unregister_altmodes(struct ucsi_connector *con, u8 recipient)
case UCSI_RECIPIENT_SOP:
adev = con->partner_altmode;
break;
+ case UCSI_RECIPIENT_SOP_P:
+ adev = con->plug_altmode;
+ break;
default:
return;
}
@@ -633,6 +674,108 @@ static int ucsi_get_src_pdos(struct ucsi_connector *con)
return ret;
}
+static int ucsi_read_identity(struct ucsi_connector *con, u8 recipient,
+ u8 offset, u8 bytes, void *resp)
+{
+ struct ucsi *ucsi = con->ucsi;
+ u64 command;
+ int ret;
+
+ command = UCSI_COMMAND(UCSI_GET_PD_MESSAGE) |
+ UCSI_CONNECTOR_NUMBER(con->num);
+ command |= UCSI_GET_PD_MESSAGE_RECIPIENT(recipient);
+ command |= UCSI_GET_PD_MESSAGE_OFFSET(offset);
+ command |= UCSI_GET_PD_MESSAGE_BYTES(bytes);
+ command |= UCSI_GET_PD_MESSAGE_TYPE(UCSI_GET_PD_MESSAGE_TYPE_IDENTITY);
+
+ ret = ucsi_send_command(ucsi, command, resp, bytes);
+ if (ret < 0)
+ dev_err(ucsi->dev, "UCSI_GET_PD_MESSAGE failed (%d)\n", ret);
+
+ return ret;
+}
+
+static int ucsi_get_identity(struct ucsi_connector *con, u8 recipient,
+ struct usb_pd_identity *id)
+{
+ struct ucsi *ucsi = con->ucsi;
+ struct ucsi_pd_message_disc_id resp = {};
+ int ret;
+
+ if (ucsi->version < UCSI_VERSION_2_0) {
+ /*
+ * Before UCSI v2.0, MESSAGE_IN is 16 bytes which cannot fit
+ * the 28 byte identity response including the VDM header.
+ * First request the VDM header, ID Header VDO, Cert Stat VDO
+ * and Product VDO.
+ */
+ ret = ucsi_read_identity(con, recipient, 0, 0x10, &resp);
+ if (ret < 0)
+ return ret;
+
+
+ /* Then request Product Type VDO1 through Product Type VDO3. */
+ ret = ucsi_read_identity(con, recipient, 0x10, 0xc,
+ &resp.vdo[0]);
+ if (ret < 0)
+ return ret;
+
+ } else {
+ /*
+ * In UCSI v2.0 and after, MESSAGE_IN is large enough to request
+ * the large enough to request the full Discover Identity
+ * response at once.
+ */
+ ret = ucsi_read_identity(con, recipient, 0x0, 0x1c, &resp);
+ if (ret < 0)
+ return ret;
+ }
+
+ id->id_header = resp.id_header;
+ id->cert_stat = resp.cert_stat;
+ id->product = resp.product;
+ id->vdo[0] = resp.vdo[0];
+ id->vdo[1] = resp.vdo[1];
+ id->vdo[2] = resp.vdo[2];
+ return 0;
+}
+
+static int ucsi_get_partner_identity(struct ucsi_connector *con)
+{
+ int ret;
+
+ ret = ucsi_get_identity(con, UCSI_RECIPIENT_SOP,
+ &con->partner_identity);
+ if (ret < 0)
+ return ret;
+
+ ret = typec_partner_set_identity(con->partner);
+ if (ret < 0) {
+ dev_err(con->ucsi->dev, "Failed to set partner identity (%d)\n",
+ ret);
+ }
+
+ return ret;
+}
+
+static int ucsi_get_cable_identity(struct ucsi_connector *con)
+{
+ int ret;
+
+ ret = ucsi_get_identity(con, UCSI_RECIPIENT_SOP_P,
+ &con->cable_identity);
+ if (ret < 0)
+ return ret;
+
+ ret = typec_cable_set_identity(con->cable);
+ if (ret < 0) {
+ dev_err(con->ucsi->dev, "Failed to set cable identity (%d)\n",
+ ret);
+ }
+
+ return ret;
+}
+
static int ucsi_check_altmodes(struct ucsi_connector *con)
{
int ret, num_partner_am;
@@ -721,6 +864,82 @@ static void ucsi_unregister_partner_pdos(struct ucsi_connector *con)
con->partner_pd = NULL;
}
+static int ucsi_register_plug(struct ucsi_connector *con)
+{
+ struct typec_plug *plug;
+ struct typec_plug_desc desc = {.index = TYPEC_PLUG_SOP_P};
+
+ plug = typec_register_plug(con->cable, &desc);
+ if (IS_ERR(plug)) {
+ dev_err(con->ucsi->dev,
+ "con%d: failed to register plug (%ld)\n", con->num,
+ PTR_ERR(plug));
+ return PTR_ERR(plug);
+ }
+
+ con->plug = plug;
+ return 0;
+}
+
+static void ucsi_unregister_plug(struct ucsi_connector *con)
+{
+ if (!con->plug)
+ return;
+
+ ucsi_unregister_altmodes(con, UCSI_RECIPIENT_SOP_P);
+ typec_unregister_plug(con->plug);
+ con->plug = NULL;
+}
+
+static int ucsi_register_cable(struct ucsi_connector *con)
+{
+ struct typec_cable *cable;
+ struct typec_cable_desc desc = {};
+
+ switch (UCSI_CABLE_PROP_FLAG_PLUG_TYPE(con->cable_prop.flags)) {
+ case UCSI_CABLE_PROPERTY_PLUG_TYPE_A:
+ desc.type = USB_PLUG_TYPE_A;
+ break;
+ case UCSI_CABLE_PROPERTY_PLUG_TYPE_B:
+ desc.type = USB_PLUG_TYPE_B;
+ break;
+ case UCSI_CABLE_PROPERTY_PLUG_TYPE_C:
+ desc.type = USB_PLUG_TYPE_C;
+ break;
+ default:
+ desc.type = USB_PLUG_NONE;
+ break;
+ }
+
+ desc.identity = &con->cable_identity;
+ desc.active = !!(UCSI_CABLE_PROP_FLAG_ACTIVE_CABLE &
+ con->cable_prop.flags);
+ desc.pd_revision = UCSI_CABLE_PROP_FLAG_PD_MAJOR_REV_AS_BCD(
+ con->cable_prop.flags);
+
+ cable = typec_register_cable(con->port, &desc);
+ if (IS_ERR(cable)) {
+ dev_err(con->ucsi->dev,
+ "con%d: failed to register cable (%ld)\n", con->num,
+ PTR_ERR(cable));
+ return PTR_ERR(cable);
+ }
+
+ con->cable = cable;
+ return 0;
+}
+
+static void ucsi_unregister_cable(struct ucsi_connector *con)
+{
+ if (!con->cable)
+ return;
+
+ ucsi_unregister_plug(con);
+ typec_unregister_cable(con->cable);
+ memset(&con->cable_identity, 0, sizeof(con->cable_identity));
+ con->cable = NULL;
+}
+
static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
{
switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) {
@@ -768,7 +987,9 @@ static int ucsi_register_partner(struct ucsi_connector *con)
break;
}
+ desc.identity = &con->partner_identity;
desc.usb_pd = pwr_opmode == UCSI_CONSTAT_PWR_OPMODE_PD;
+ desc.pd_revision = UCSI_CONCAP_FLAG_PARTNER_PD_MAJOR_REV_AS_BCD(con->cap.flags);
partner = typec_register_partner(con->port, &desc);
if (IS_ERR(partner)) {
@@ -793,7 +1014,9 @@ static void ucsi_unregister_partner(struct ucsi_connector *con)
typec_partner_set_usb_power_delivery(con->partner, NULL);
ucsi_unregister_partner_pdos(con);
ucsi_unregister_altmodes(con, UCSI_RECIPIENT_SOP);
+ ucsi_unregister_cable(con);
typec_unregister_partner(con->partner);
+ memset(&con->partner_identity, 0, sizeof(con->partner_identity));
con->partner = NULL;
}
@@ -843,6 +1066,27 @@ static void ucsi_partner_change(struct ucsi_connector *con)
con->num, u_role);
}
+static int ucsi_check_connector_capability(struct ucsi_connector *con)
+{
+ u64 command;
+ int ret;
+
+ if (!con->partner || con->ucsi->version < UCSI_VERSION_2_0)
+ return 0;
+
+ command = UCSI_GET_CONNECTOR_CAPABILITY | UCSI_CONNECTOR_NUMBER(con->num);
+ ret = ucsi_send_command(con->ucsi, command, &con->cap, sizeof(con->cap));
+ if (ret < 0) {
+ dev_err(con->ucsi->dev, "GET_CONNECTOR_CAPABILITY failed (%d)\n", ret);
+ return ret;
+ }
+
+ typec_partner_set_pd_revision(con->partner,
+ UCSI_CONCAP_FLAG_PARTNER_PD_MAJOR_REV_AS_BCD(con->cap.flags));
+
+ return ret;
+}
+
static int ucsi_check_connection(struct ucsi_connector *con)
{
u8 prev_flags = con->status.flags;
@@ -872,6 +1116,46 @@ static int ucsi_check_connection(struct ucsi_connector *con)
return 0;
}
+static int ucsi_check_cable(struct ucsi_connector *con)
+{
+ u64 command;
+ int ret;
+
+ if (con->cable)
+ return 0;
+
+ command = UCSI_GET_CABLE_PROPERTY | UCSI_CONNECTOR_NUMBER(con->num);
+ ret = ucsi_send_command(con->ucsi, command, &con->cable_prop,
+ sizeof(con->cable_prop));
+ if (ret < 0) {
+ dev_err(con->ucsi->dev, "GET_CABLE_PROPERTY failed (%d)\n",
+ ret);
+ return ret;
+ }
+
+ ret = ucsi_register_cable(con);
+ if (ret < 0)
+ return ret;
+
+ if (con->ucsi->cap.features & UCSI_CAP_GET_PD_MESSAGE) {
+ ret = ucsi_get_cable_identity(con);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (con->ucsi->cap.features & UCSI_CAP_ALT_MODE_DETAILS) {
+ ret = ucsi_register_plug(con);
+ if (ret < 0)
+ return ret;
+
+ ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_SOP_P);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
static void ucsi_handle_connector_change(struct work_struct *work)
{
struct ucsi_connector *con = container_of(work, struct ucsi_connector,
@@ -912,6 +1196,11 @@ static void ucsi_handle_connector_change(struct work_struct *work)
if (con->status.flags & UCSI_CONSTAT_CONNECTED) {
ucsi_register_partner(con);
ucsi_partner_task(con, ucsi_check_connection, 1, HZ);
+ ucsi_partner_task(con, ucsi_check_connector_capability, 1, HZ);
+ if (con->ucsi->cap.features & UCSI_CAP_GET_PD_MESSAGE)
+ ucsi_partner_task(con, ucsi_get_partner_identity, 1, HZ);
+ if (con->ucsi->cap.features & UCSI_CAP_CABLE_DETAILS)
+ ucsi_partner_task(con, ucsi_check_cable, 1, HZ);
if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) ==
UCSI_CONSTAT_PWR_OPMODE_PD)
@@ -936,11 +1225,11 @@ static void ucsi_handle_connector_change(struct work_struct *work)
if (con->status.change & UCSI_CONSTAT_CAM_CHANGE)
ucsi_partner_task(con, ucsi_check_altmodes, 1, 0);
- clear_bit(EVENT_PENDING, &con->ucsi->flags);
-
mutex_lock(&ucsi->ppm_lock);
+ clear_bit(EVENT_PENDING, &con->ucsi->flags);
ret = ucsi_acknowledge_connector_change(ucsi);
mutex_unlock(&ucsi->ppm_lock);
+
if (ret)
dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
@@ -958,7 +1247,7 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num)
struct ucsi_connector *con = &ucsi->connector[num - 1];
if (!(ucsi->ntfy & UCSI_ENABLE_NTFY_CONNECTOR_CHANGE)) {
- dev_dbg(ucsi->dev, "Bogus connector change event\n");
+ dev_dbg(ucsi->dev, "Early connector change event\n");
return;
}
@@ -981,13 +1270,47 @@ static int ucsi_reset_connector(struct ucsi_connector *con, bool hard)
static int ucsi_reset_ppm(struct ucsi *ucsi)
{
- u64 command = UCSI_PPM_RESET;
+ u64 command;
unsigned long tmo;
u32 cci;
int ret;
mutex_lock(&ucsi->ppm_lock);
+ ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci));
+ if (ret < 0)
+ goto out;
+
+ /*
+ * If UCSI_CCI_RESET_COMPLETE is already set we must clear
+ * the flag before we start another reset. Send a
+ * UCSI_SET_NOTIFICATION_ENABLE command to achieve this.
+ * Ignore a timeout and try the reset anyway if this fails.
+ */
+ if (cci & UCSI_CCI_RESET_COMPLETE) {
+ command = UCSI_SET_NOTIFICATION_ENABLE;
+ ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL, &command,
+ sizeof(command));
+ if (ret < 0)
+ goto out;
+
+ tmo = jiffies + msecs_to_jiffies(UCSI_TIMEOUT_MS);
+ do {
+ ret = ucsi->ops->read(ucsi, UCSI_CCI,
+ &cci, sizeof(cci));
+ if (ret < 0)
+ goto out;
+ if (cci & UCSI_CCI_COMMAND_COMPLETE)
+ break;
+ if (time_is_before_jiffies(tmo))
+ break;
+ msleep(20);
+ } while (1);
+
+ WARN_ON(cci & UCSI_CCI_RESET_COMPLETE);
+ }
+
+ command = UCSI_PPM_RESET;
ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL, &command,
sizeof(command));
if (ret < 0)
@@ -1310,6 +1633,10 @@ static int ucsi_register_port(struct ucsi *ucsi, struct ucsi_connector *con)
ucsi_register_partner(con);
ucsi_pwr_opmode_change(con);
ucsi_port_psy_changed(con);
+ if (con->ucsi->cap.features & UCSI_CAP_GET_PD_MESSAGE)
+ ucsi_get_partner_identity(con);
+ if (con->ucsi->cap.features & UCSI_CAP_CABLE_DETAILS)
+ ucsi_check_cable(con);
}
/* Only notify USB controller if partner supports USB data */
@@ -1355,6 +1682,7 @@ static int ucsi_init(struct ucsi *ucsi)
{
struct ucsi_connector *con, *connector;
u64 command, ntfy;
+ u32 cci;
int ret;
int i;
@@ -1407,6 +1735,15 @@ static int ucsi_init(struct ucsi *ucsi)
ucsi->connector = connector;
ucsi->ntfy = ntfy;
+
+ mutex_lock(&ucsi->ppm_lock);
+ ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci));
+ mutex_unlock(&ucsi->ppm_lock);
+ if (ret)
+ return ret;
+ if (UCSI_CCI_CONNECTOR(cci))
+ ucsi_connector_change(ucsi, UCSI_CCI_CONNECTOR(cci));
+
return 0;
err_unregister:
@@ -1558,6 +1895,15 @@ int ucsi_register(struct ucsi *ucsi)
if (!ucsi->version)
return -ENODEV;
+ /*
+ * Version format is JJ.M.N (JJ = Major version, M = Minor version,
+ * N = sub-minor version).
+ */
+ dev_dbg(ucsi->dev, "Registered UCSI interface with version %x.%x.%x",
+ UCSI_BCD_GET_MAJOR(ucsi->version),
+ UCSI_BCD_GET_MINOR(ucsi->version),
+ UCSI_BCD_GET_SUBMINOR(ucsi->version));
+
queue_delayed_work(system_long_wq, &ucsi->work, 0);
ucsi_debugfs_register(ucsi);
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 6478016d5cb8b..0e7c92eb1b227 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -10,6 +10,7 @@
#include <linux/usb/typec.h>
#include <linux/usb/pd.h>
#include <linux/usb/role.h>
+#include <asm/unaligned.h>
/* -------------------------------------------------------------------------- */
@@ -23,6 +24,23 @@ struct dentry;
#define UCSI_CONTROL 8
#define UCSI_MESSAGE_IN 16
#define UCSI_MESSAGE_OUT 32
+#define UCSIv2_MESSAGE_OUT 272
+
+/* UCSI versions */
+#define UCSI_VERSION_1_2 0x0120
+#define UCSI_VERSION_2_0 0x0200
+#define UCSI_VERSION_2_1 0x0210
+#define UCSI_VERSION_3_0 0x0300
+
+#define UCSI_BCD_GET_MAJOR(_v_) (((_v_) >> 8) & 0xFF)
+#define UCSI_BCD_GET_MINOR(_v_) (((_v_) >> 4) & 0x0F)
+#define UCSI_BCD_GET_SUBMINOR(_v_) ((_v_) & 0x0F)
+
+/*
+ * Per USB PD 3.2, Section 6.2.1.1.5, the spec revision is represented by 2 bits
+ * 0b00 = 1.0, 0b01 = 2.0, 0b10 = 3.0, 0b11 = Reserved, Shall NOT be used.
+ */
+#define UCSI_SPEC_REVISION_TO_BCD(_v_) (((_v_) + 1) << 8)
/* Command Status and Connector Change Indication (CCI) bits */
#define UCSI_CCI_CONNECTOR(_c_) (((_c_) & GENMASK(7, 1)) >> 1)
@@ -88,6 +106,7 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
#define UCSI_GET_CABLE_PROPERTY 0x11
#define UCSI_GET_CONNECTOR_STATUS 0x12
#define UCSI_GET_ERROR_STATUS 0x13
+#define UCSI_GET_PD_MESSAGE 0x15
#define UCSI_CONNECTOR_NUMBER(_num_) ((u64)(_num_) << 16)
#define UCSI_COMMAND(_cmd_) ((_cmd_) & 0xff)
@@ -141,6 +160,18 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
#define UCSI_MAX_PDOS (4)
#define UCSI_GET_PDOS_SRC_PDOS ((u64)1 << 34)
+/* GET_PD_MESSAGE command bits */
+#define UCSI_GET_PD_MESSAGE_RECIPIENT(_r_) ((u64)(_r_) << 23)
+#define UCSI_GET_PD_MESSAGE_OFFSET(_r_) ((u64)(_r_) << 26)
+#define UCSI_GET_PD_MESSAGE_BYTES(_r_) ((u64)(_r_) << 34)
+#define UCSI_GET_PD_MESSAGE_TYPE(_r_) ((u64)(_r_) << 42)
+#define UCSI_GET_PD_MESSAGE_TYPE_SNK_CAP_EXT 0
+#define UCSI_GET_PD_MESSAGE_TYPE_SRC_CAP_EXT 1
+#define UCSI_GET_PD_MESSAGE_TYPE_BAT_CAP 2
+#define UCSI_GET_PD_MESSAGE_TYPE_BAT_STAT 3
+#define UCSI_GET_PD_MESSAGE_TYPE_IDENTITY 4
+#define UCSI_GET_PD_MESSAGE_TYPE_REVISION 5
+
/* -------------------------------------------------------------------------- */
/* Error information returned by PPM in response to GET_ERROR_STATUS command. */
@@ -175,7 +206,7 @@ struct ucsi_capability {
#define UCSI_CAP_ATTR_POWER_OTHER BIT(10)
#define UCSI_CAP_ATTR_POWER_VBUS BIT(14)
u8 num_connectors;
- u8 features;
+ u16 features;
#define UCSI_CAP_SET_UOM BIT(0)
#define UCSI_CAP_SET_PDM BIT(1)
#define UCSI_CAP_ALT_MODE_DETAILS BIT(2)
@@ -184,7 +215,8 @@ struct ucsi_capability {
#define UCSI_CAP_CABLE_DETAILS BIT(5)
#define UCSI_CAP_EXT_SUPPLY_NOTIFICATIONS BIT(6)
#define UCSI_CAP_PD_RESET BIT(7)
- u16 reserved_1;
+#define UCSI_CAP_GET_PD_MESSAGE BIT(8)
+ u8 reserved_1;
u8 num_alt_modes;
u8 reserved_2;
u16 bc_version;
@@ -203,9 +235,29 @@ struct ucsi_connector_capability {
#define UCSI_CONCAP_OPMODE_USB2 BIT(5)
#define UCSI_CONCAP_OPMODE_USB3 BIT(6)
#define UCSI_CONCAP_OPMODE_ALT_MODE BIT(7)
- u8 flags;
+ u32 flags;
#define UCSI_CONCAP_FLAG_PROVIDER BIT(0)
#define UCSI_CONCAP_FLAG_CONSUMER BIT(1)
+#define UCSI_CONCAP_FLAG_SWAP_TO_DFP BIT(2)
+#define UCSI_CONCAP_FLAG_SWAP_TO_UFP BIT(3)
+#define UCSI_CONCAP_FLAG_SWAP_TO_SRC BIT(4)
+#define UCSI_CONCAP_FLAG_SWAP_TO_SINK BIT(5)
+#define UCSI_CONCAP_FLAG_EX_OP_MODE(_f_) \
+ (((_f_) & GENMASK(13, 6)) >> 6)
+#define UCSI_CONCAP_EX_OP_MODE_USB4_GEN2 BIT(0)
+#define UCSI_CONCAP_EX_OP_MODE_EPR_SRC BIT(1)
+#define UCSI_CONCAP_EX_OP_MODE_EPR_SINK BIT(2)
+#define UCSI_CONCAP_EX_OP_MODE_USB4_GEN3 BIT(3)
+#define UCSI_CONCAP_EX_OP_MODE_USB4_GEN4 BIT(4)
+#define UCSI_CONCAP_FLAG_MISC_CAPS(_f_) \
+ (((_f_) & GENMASK(17, 14)) >> 14)
+#define UCSI_CONCAP_MISC_CAP_FW_UPDATE BIT(0)
+#define UCSI_CONCAP_MISC_CAP_SECURITY BIT(1)
+#define UCSI_CONCAP_FLAG_REV_CURR_PROT_SUPPORT BIT(18)
+#define UCSI_CONCAP_FLAG_PARTNER_PD_MAJOR_REV(_f_) \
+ (((_f_) & GENMASK(20, 19)) >> 19)
+#define UCSI_CONCAP_FLAG_PARTNER_PD_MAJOR_REV_AS_BCD(_f_) \
+ UCSI_SPEC_REVISION_TO_BCD(UCSI_CONCAP_FLAG_PARTNER_PD_MAJOR_REV(_f_))
} __packed;
struct ucsi_altmode {
@@ -221,12 +273,15 @@ struct ucsi_cable_property {
#define UCSI_CABLE_PROP_FLAG_VBUS_IN_CABLE BIT(0)
#define UCSI_CABLE_PROP_FLAG_ACTIVE_CABLE BIT(1)
#define UCSI_CABLE_PROP_FLAG_DIRECTIONALITY BIT(2)
-#define UCSI_CABLE_PROP_FLAG_PLUG_TYPE(_f_) ((_f_) & GENMASK(3, 0))
+#define UCSI_CABLE_PROP_FLAG_PLUG_TYPE(_f_) (((_f_) & GENMASK(4, 3)) >> 3)
#define UCSI_CABLE_PROPERTY_PLUG_TYPE_A 0
#define UCSI_CABLE_PROPERTY_PLUG_TYPE_B 1
#define UCSI_CABLE_PROPERTY_PLUG_TYPE_C 2
#define UCSI_CABLE_PROPERTY_PLUG_OTHER 3
-#define UCSI_CABLE_PROP_MODE_SUPPORT BIT(5)
+#define UCSI_CABLE_PROP_FLAG_MODE_SUPPORT BIT(5)
+#define UCSI_CABLE_PROP_FLAG_PD_MAJOR_REV(_f_) (((_f_) & GENMASK(7, 6)) >> 6)
+#define UCSI_CABLE_PROP_FLAG_PD_MAJOR_REV_AS_BCD(_f_) \
+ UCSI_SPEC_REVISION_TO_BCD(UCSI_CABLE_PROP_FLAG_PD_MAJOR_REV(_f_))
u8 latency;
} __packed;
@@ -265,15 +320,48 @@ struct ucsi_connector_status {
#define UCSI_CONSTAT_PARTNER_TYPE_DEBUG 5
#define UCSI_CONSTAT_PARTNER_TYPE_AUDIO 6
u32 request_data_obj;
- u8 pwr_status;
-#define UCSI_CONSTAT_BC_STATUS(_p_) ((_p_) & GENMASK(2, 0))
+
+ u8 pwr_status[3];
+#define UCSI_CONSTAT_BC_STATUS(_p_) ((_p_[0]) & GENMASK(1, 0))
#define UCSI_CONSTAT_BC_NOT_CHARGING 0
#define UCSI_CONSTAT_BC_NOMINAL_CHARGING 1
#define UCSI_CONSTAT_BC_SLOW_CHARGING 2
#define UCSI_CONSTAT_BC_TRICKLE_CHARGING 3
-#define UCSI_CONSTAT_PROVIDER_CAP_LIMIT(_p_) (((_p_) & GENMASK(6, 3)) >> 3)
+#define UCSI_CONSTAT_PROVIDER_CAP_LIMIT(_p_) (((_p_[0]) & GENMASK(5, 2)) >> 2)
#define UCSI_CONSTAT_CAP_PWR_LOWERED 0
#define UCSI_CONSTAT_CAP_PWR_BUDGET_LIMIT 1
+#define UCSI_CONSTAT_PROVIDER_PD_VERSION_OPER_MODE(_p_) \
+ ((get_unaligned_le32(_p_) & GENMASK(21, 6)) >> 6)
+#define UCSI_CONSTAT_ORIENTATION(_p_) (((_p_[2]) & GENMASK(6, 6)) >> 6)
+#define UCSI_CONSTAT_ORIENTATION_DIRECT 0
+#define UCSI_CONSTAT_ORIENTATION_FLIPPED 1
+#define UCSI_CONSTAT_SINK_PATH_STATUS(_p_) (((_p_[2]) & GENMASK(7, 7)) >> 7)
+#define UCSI_CONSTAT_SINK_PATH_DISABLED 0
+#define UCSI_CONSTAT_SINK_PATH_ENABLED 1
+ u8 pwr_readings[9];
+#define UCSI_CONSTAT_REV_CURR_PROT_STATUS(_p_) ((_p_[0]) & 0x1)
+#define UCSI_CONSTAT_PWR_READING_VALID(_p_) (((_p_[0]) & GENMASK(1, 1)) >> 1)
+#define UCSI_CONSTAT_CURRENT_SCALE(_p_) (((_p_[0]) & GENMASK(4, 2)) >> 2)
+#define UCSI_CONSTAT_PEAK_CURRENT(_p_) \
+ ((get_unaligned_le32(_p_) & GENMASK(20, 5)) >> 5)
+#define UCSI_CONSTAT_AVG_CURRENT(_p_) \
+ ((get_unaligned_le32(&(_p_)[2]) & GENMASK(20, 5)) >> 5)
+#define UCSI_CONSTAT_VOLTAGE_SCALE(_p_) \
+ ((get_unaligned_le16(&(_p_)[4]) & GENMASK(8, 5)) >> 5)
+#define UCSI_CONSTAT_VOLTAGE_READING(_p_) \
+ ((get_unaligned_le32(&(_p_)[5]) & GENMASK(16, 1)) >> 1)
+} __packed;
+
+/*
+ * Data structure filled by PPM in response to GET_PD_MESSAGE command with the
+ * Response Message Type set to Discover Identity Response.
+ */
+struct ucsi_pd_message_disc_id {
+ u32 vdm_header;
+ u32 id_header;
+ u32 cert_stat;
+ u32 product;
+ u32 vdo[3];
} __packed;
/* -------------------------------------------------------------------------- */
@@ -341,14 +429,18 @@ struct ucsi_connector {
struct typec_port *port;
struct typec_partner *partner;
+ struct typec_cable *cable;
+ struct typec_plug *plug;
struct typec_altmode *port_altmode[UCSI_MAX_ALTMODES];
struct typec_altmode *partner_altmode[UCSI_MAX_ALTMODES];
+ struct typec_altmode *plug_altmode[UCSI_MAX_ALTMODES];
struct typec_capability typec_cap;
struct ucsi_connector_status status;
struct ucsi_connector_capability cap;
+ struct ucsi_cable_property cable_prop;
struct power_supply *psy;
struct power_supply_desc psy_desc;
u32 rdo;
@@ -364,6 +456,10 @@ struct ucsi_connector {
struct usb_power_delivery_capabilities *partner_sink_caps;
struct usb_role_switch *usb_role_sw;
+
+ /* USB PD identity */
+ struct usb_pd_identity partner_identity;
+ struct usb_pd_identity cable_identity;
};
int ucsi_send_command(struct ucsi *ucsi, u64 command,
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index 928eacbeb21ac..7b3ac133ef861 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -23,10 +23,11 @@ struct ucsi_acpi {
void *base;
struct completion complete;
unsigned long flags;
+#define UCSI_ACPI_SUPPRESS_EVENT 0
+#define UCSI_ACPI_COMMAND_PENDING 1
+#define UCSI_ACPI_ACK_PENDING 2
guid_t guid;
u64 cmd;
- bool dell_quirk_probed;
- bool dell_quirk_active;
};
static int ucsi_acpi_dsm(struct ucsi_acpi *ua, int func)
@@ -79,9 +80,9 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
int ret;
if (ack)
- set_bit(ACK_PENDING, &ua->flags);
+ set_bit(UCSI_ACPI_ACK_PENDING, &ua->flags);
else
- set_bit(COMMAND_PENDING, &ua->flags);
+ set_bit(UCSI_ACPI_COMMAND_PENDING, &ua->flags);
ret = ucsi_acpi_async_write(ucsi, offset, val, val_len);
if (ret)
@@ -92,9 +93,9 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
out_clear_bit:
if (ack)
- clear_bit(ACK_PENDING, &ua->flags);
+ clear_bit(UCSI_ACPI_ACK_PENDING, &ua->flags);
else
- clear_bit(COMMAND_PENDING, &ua->flags);
+ clear_bit(UCSI_ACPI_COMMAND_PENDING, &ua->flags);
return ret;
}
@@ -129,51 +130,40 @@ static const struct ucsi_operations ucsi_zenbook_ops = {
};
/*
- * Some Dell laptops expect that an ACK command with the
- * UCSI_ACK_CONNECTOR_CHANGE bit set is followed by a (separate)
- * ACK command that only has the UCSI_ACK_COMMAND_COMPLETE bit set.
- * If this is not done events are not delivered to OSPM and
- * subsequent commands will timeout.
+ * Some Dell laptops don't like ACK commands with the
+ * UCSI_ACK_CONNECTOR_CHANGE but not the UCSI_ACK_COMMAND_COMPLETE
+ * bit set. To work around this send a dummy command and bundle the
+ * UCSI_ACK_CONNECTOR_CHANGE with the UCSI_ACK_COMMAND_COMPLETE
+ * for the dummy command.
*/
static int
ucsi_dell_sync_write(struct ucsi *ucsi, unsigned int offset,
const void *val, size_t val_len)
{
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
- u64 cmd = *(u64 *)val, ack = 0;
+ u64 cmd = *(u64 *)val;
+ u64 dummycmd = UCSI_GET_CAPABILITY;
int ret;
- if (UCSI_COMMAND(cmd) == UCSI_ACK_CC_CI &&
- cmd & UCSI_ACK_CONNECTOR_CHANGE)
- ack = UCSI_ACK_CC_CI | UCSI_ACK_COMMAND_COMPLETE;
-
- ret = ucsi_acpi_sync_write(ucsi, offset, val, val_len);
- if (ret != 0)
- return ret;
- if (ack == 0)
- return ret;
-
- if (!ua->dell_quirk_probed) {
- ua->dell_quirk_probed = true;
-
- cmd = UCSI_GET_CAPABILITY;
- ret = ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &cmd,
- sizeof(cmd));
- if (ret == 0)
- return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL,
- &ack, sizeof(ack));
- if (ret != -ETIMEDOUT)
+ if (cmd == (UCSI_ACK_CC_CI | UCSI_ACK_CONNECTOR_CHANGE)) {
+ cmd |= UCSI_ACK_COMMAND_COMPLETE;
+
+ /*
+ * The UCSI core thinks it is sending a connector change ack
+ * and will accept new connector change events. We don't want
+ * this to happen for the dummy command as its response will
+ * still report the very event that the core is trying to clear.
+ */
+ set_bit(UCSI_ACPI_SUPPRESS_EVENT, &ua->flags);
+ ret = ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &dummycmd,
+ sizeof(dummycmd));
+ clear_bit(UCSI_ACPI_SUPPRESS_EVENT, &ua->flags);
+
+ if (ret < 0)
return ret;
-
- ua->dell_quirk_active = true;
- dev_err(ua->dev, "Firmware bug: Additional ACK required after ACKing a connector change.\n");
- dev_err(ua->dev, "Firmware bug: Enabling workaround\n");
}
- if (!ua->dell_quirk_active)
- return ret;
-
- return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &ack, sizeof(ack));
+ return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));
}
static const struct ucsi_operations ucsi_dell_ops = {
@@ -209,13 +199,14 @@ static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data)
if (ret)
return;
- if (UCSI_CCI_CONNECTOR(cci))
+ if (UCSI_CCI_CONNECTOR(cci) &&
+ !test_bit(UCSI_ACPI_SUPPRESS_EVENT, &ua->flags))
ucsi_connector_change(ua->ucsi, UCSI_CCI_CONNECTOR(cci));
if (cci & UCSI_CCI_ACK_COMPLETE && test_bit(ACK_PENDING, &ua->flags))
complete(&ua->complete);
if (cci & UCSI_CCI_COMMAND_COMPLETE &&
- test_bit(COMMAND_PENDING, &ua->flags))
+ test_bit(UCSI_ACPI_COMMAND_PENDING, &ua->flags))
complete(&ua->complete);
}
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index 449c125f6f870..dda7c7c94e08a 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -192,6 +192,12 @@ struct ucsi_ccg_altmode {
bool checked;
} __packed;
+#define CCGX_MESSAGE_IN_MAX 4
+struct op_region {
+ __le32 cci;
+ __le32 message_in[CCGX_MESSAGE_IN_MAX];
+};
+
struct ucsi_ccg {
struct device *dev;
struct ucsi *ucsi;
@@ -222,6 +228,13 @@ struct ucsi_ccg {
bool has_multiple_dp;
struct ucsi_ccg_altmode orig[UCSI_MAX_ALTMODES];
struct ucsi_ccg_altmode updated[UCSI_MAX_ALTMODES];
+
+ /*
+ * This spinlock protects op_data which includes CCI and MESSAGE_IN that
+ * will be updated in ISR
+ */
+ spinlock_t op_lock;
+ struct op_region op_data;
};
static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
@@ -305,12 +318,42 @@ static int ccg_write(struct ucsi_ccg *uc, u16 rab, const u8 *data, u32 len)
return 0;
}
+static int ccg_op_region_update(struct ucsi_ccg *uc, u32 cci)
+{
+ u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(UCSI_MESSAGE_IN);
+ struct op_region *data = &uc->op_data;
+ unsigned char *buf;
+ size_t size = sizeof(data->message_in);
+
+ buf = kzalloc(size, GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+ if (UCSI_CCI_LENGTH(cci)) {
+ int ret = ccg_read(uc, reg, (void *)buf, size);
+
+ if (ret) {
+ kfree(buf);
+ return ret;
+ }
+ }
+
+ spin_lock(&uc->op_lock);
+ data->cci = cpu_to_le32(cci);
+ if (UCSI_CCI_LENGTH(cci))
+ memcpy(&data->message_in, buf, size);
+ spin_unlock(&uc->op_lock);
+ kfree(buf);
+ return 0;
+}
+
static int ucsi_ccg_init(struct ucsi_ccg *uc)
{
unsigned int count = 10;
u8 data;
int status;
+ spin_lock_init(&uc->op_lock);
+
data = CCGX_RAB_UCSI_CONTROL_STOP;
status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
if (status < 0)
@@ -520,9 +563,20 @@ static int ucsi_ccg_read(struct ucsi *ucsi, unsigned int offset,
u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset);
struct ucsi_capability *cap;
struct ucsi_altmode *alt;
- int ret;
+ int ret = 0;
+
+ if (offset == UCSI_CCI) {
+ spin_lock(&uc->op_lock);
+ memcpy(val, &(uc->op_data).cci, val_len);
+ spin_unlock(&uc->op_lock);
+ } else if (offset == UCSI_MESSAGE_IN) {
+ spin_lock(&uc->op_lock);
+ memcpy(val, &(uc->op_data).message_in, val_len);
+ spin_unlock(&uc->op_lock);
+ } else {
+ ret = ccg_read(uc, reg, val, val_len);
+ }
- ret = ccg_read(uc, reg, val, val_len);
if (ret)
return ret;
@@ -559,9 +613,18 @@ static int ucsi_ccg_read(struct ucsi *ucsi, unsigned int offset,
static int ucsi_ccg_async_write(struct ucsi *ucsi, unsigned int offset,
const void *val, size_t val_len)
{
+ struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset);
- return ccg_write(ucsi_get_drvdata(ucsi), reg, val, val_len);
+ /*
+ * UCSI may read CCI instantly after async_write,
+ * clear CCI to avoid caller getting wrong data before we get CCI from ISR
+ */
+ spin_lock(&uc->op_lock);
+ uc->op_data.cci = 0;
+ spin_unlock(&uc->op_lock);
+
+ return ccg_write(uc, reg, val, val_len);
}
static int ucsi_ccg_sync_write(struct ucsi *ucsi, unsigned int offset,
@@ -615,13 +678,18 @@ static irqreturn_t ccg_irq_handler(int irq, void *data)
u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(UCSI_CCI);
struct ucsi_ccg *uc = data;
u8 intr_reg;
- u32 cci;
- int ret;
+ u32 cci = 0;
+ int ret = 0;
ret = ccg_read(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg));
if (ret)
return ret;
+ if (!intr_reg)
+ return IRQ_HANDLED;
+ else if (!(intr_reg & UCSI_READ_INT))
+ goto err_clear_irq;
+
ret = ccg_read(uc, reg, (void *)&cci, sizeof(cci));
if (ret)
goto err_clear_irq;
@@ -629,13 +697,21 @@ static irqreturn_t ccg_irq_handler(int irq, void *data)
if (UCSI_CCI_CONNECTOR(cci))
ucsi_connector_change(uc->ucsi, UCSI_CCI_CONNECTOR(cci));
- if (test_bit(DEV_CMD_PENDING, &uc->flags) &&
- cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
- complete(&uc->complete);
+ /*
+ * As per CCGx UCSI interface guide, copy CCI and MESSAGE_IN
+ * to the OpRegion before clear the UCSI interrupt
+ */
+ ret = ccg_op_region_update(uc, cci);
+ if (ret)
+ goto err_clear_irq;
err_clear_irq:
ccg_write(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg));
+ if (!ret && test_bit(DEV_CMD_PENDING, &uc->flags) &&
+ cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
+ complete(&uc->complete);
+
return IRQ_HANDLED;
}
diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
index faccc942b381b..ce08eb33e5bec 100644
--- a/drivers/usb/typec/ucsi/ucsi_glink.c
+++ b/drivers/usb/typec/ucsi/ucsi_glink.c
@@ -255,6 +255,20 @@ static void pmic_glink_ucsi_notify(struct work_struct *work)
static void pmic_glink_ucsi_register(struct work_struct *work)
{
struct pmic_glink_ucsi *ucsi = container_of(work, struct pmic_glink_ucsi, register_work);
+ int orientation;
+ int i;
+
+ for (i = 0; i < PMIC_GLINK_MAX_PORTS; i++) {
+ if (!ucsi->port_orientation[i])
+ continue;
+ orientation = gpiod_get_value(ucsi->port_orientation[i]);
+
+ if (orientation >= 0) {
+ typec_switch_set(ucsi->port_switch[i],
+ orientation ? TYPEC_ORIENTATION_REVERSE
+ : TYPEC_ORIENTATION_NORMAL);
+ }
+ }
ucsi_register(ucsi->ucsi);
}
@@ -298,6 +312,7 @@ static void pmic_glink_ucsi_destroy(void *data)
}
static const struct of_device_id pmic_glink_ucsi_of_quirks[] = {
+ { .compatible = "qcom,qcm6490-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
{ .compatible = "qcom,sc8180x-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
{ .compatible = "qcom,sc8280xp-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
{ .compatible = "qcom,sm8350-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
diff --git a/drivers/vdpa/alibaba/eni_vdpa.c b/drivers/vdpa/alibaba/eni_vdpa.c
index cce3d1837104c..ad7f3447fe90c 100644
--- a/drivers/vdpa/alibaba/eni_vdpa.c
+++ b/drivers/vdpa/alibaba/eni_vdpa.c
@@ -254,6 +254,13 @@ static u16 eni_vdpa_get_vq_num_min(struct vdpa_device *vdpa)
return vp_legacy_get_queue_size(ldev, 0);
}
+static u16 eni_vdpa_get_vq_size(struct vdpa_device *vdpa, u16 qid)
+{
+ struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
+
+ return vp_legacy_get_queue_size(ldev, qid);
+}
+
static int eni_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid,
struct vdpa_vq_state *state)
{
@@ -416,6 +423,7 @@ static const struct vdpa_config_ops eni_vdpa_ops = {
.reset = eni_vdpa_reset,
.get_vq_num_max = eni_vdpa_get_vq_num_max,
.get_vq_num_min = eni_vdpa_get_vq_num_min,
+ .get_vq_size = eni_vdpa_get_vq_size,
.get_vq_state = eni_vdpa_get_vq_state,
.set_vq_state = eni_vdpa_set_vq_state,
.set_vq_cb = eni_vdpa_set_vq_cb,
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index 060f837a4f9f7..472daa588a9d2 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -69,20 +69,19 @@ static int ifcvf_read_config_range(struct pci_dev *dev,
return 0;
}
-static u16 ifcvf_get_vq_size(struct ifcvf_hw *hw, u16 qid)
+u16 ifcvf_get_vq_size(struct ifcvf_hw *hw, u16 qid)
{
u16 queue_size;
+ if (qid >= hw->nr_vring)
+ return 0;
+
vp_iowrite16(qid, &hw->common_cfg->queue_select);
queue_size = vp_ioread16(&hw->common_cfg->queue_size);
return queue_size;
}
-/* This function returns the max allowed safe size for
- * all virtqueues. It is the minimal size that can be
- * suppprted by all virtqueues.
- */
u16 ifcvf_get_max_vq_size(struct ifcvf_hw *hw)
{
u16 queue_size, max_size, qid;
@@ -94,7 +93,7 @@ u16 ifcvf_get_max_vq_size(struct ifcvf_hw *hw)
if (!queue_size)
continue;
- max_size = min(queue_size, max_size);
+ max_size = max(queue_size, max_size);
}
return max_size;
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
index b57849c643f61..0f347717021a2 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -28,6 +28,7 @@
#define IFCVF_PCI_MAX_RESOURCE 6
#define IFCVF_LM_BAR 4
+#define IFCVF_MIN_VQ_SIZE 64
#define IFCVF_ERR(pdev, fmt, ...) dev_err(&pdev->dev, fmt, ##__VA_ARGS__)
#define IFCVF_DBG(pdev, fmt, ...) dev_dbg(&pdev->dev, fmt, ##__VA_ARGS__)
@@ -131,4 +132,5 @@ void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready);
void ifcvf_set_driver_features(struct ifcvf_hw *hw, u64 features);
u64 ifcvf_get_driver_features(struct ifcvf_hw *hw);
u16 ifcvf_get_max_vq_size(struct ifcvf_hw *hw);
+u16 ifcvf_get_vq_size(struct ifcvf_hw *hw, u16 qid);
#endif /* _IFCVF_H_ */
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index e98fa8100f3cc..80d0a04608858 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -456,6 +456,11 @@ static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
return ifcvf_get_max_vq_size(vf);
}
+static u16 ifcvf_vdpa_get_vq_num_min(struct vdpa_device *vdpa_dev)
+{
+ return IFCVF_MIN_VQ_SIZE;
+}
+
static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
struct vdpa_vq_state *state)
{
@@ -597,6 +602,14 @@ static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
return -EINVAL;
}
+static u16 ifcvf_vdpa_get_vq_size(struct vdpa_device *vdpa_dev,
+ u16 qid)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ return ifcvf_get_vq_size(vf, qid);
+}
+
static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_device *vdpa_dev,
u16 idx)
{
@@ -624,6 +637,7 @@ static const struct vdpa_config_ops ifc_vdpa_ops = {
.set_status = ifcvf_vdpa_set_status,
.reset = ifcvf_vdpa_reset,
.get_vq_num_max = ifcvf_vdpa_get_vq_num_max,
+ .get_vq_num_min = ifcvf_vdpa_get_vq_num_min,
.get_vq_state = ifcvf_vdpa_get_vq_state,
.set_vq_state = ifcvf_vdpa_set_vq_state,
.set_vq_cb = ifcvf_vdpa_set_vq_cb,
@@ -632,6 +646,7 @@ static const struct vdpa_config_ops ifc_vdpa_ops = {
.set_vq_num = ifcvf_vdpa_set_vq_num,
.set_vq_address = ifcvf_vdpa_set_vq_address,
.get_vq_irq = ifcvf_vdpa_get_vq_irq,
+ .get_vq_size = ifcvf_vdpa_get_vq_size,
.kick_vq = ifcvf_vdpa_kick_vq,
.get_generation = ifcvf_vdpa_get_generation,
.get_device_id = ifcvf_vdpa_get_device_id,
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 778821bab7d93..ecfc16151d619 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -151,8 +151,6 @@ static void teardown_driver(struct mlx5_vdpa_net *ndev);
static bool mlx5_vdpa_debug;
-#define MLX5_CVQ_MAX_ENT 16
-
#define MLX5_LOG_VIO_FLAG(_feature) \
do { \
if (features & BIT_ULL(_feature)) \
@@ -2276,9 +2274,16 @@ static void mlx5_vdpa_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num)
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct mlx5_vdpa_virtqueue *mvq;
- if (!is_index_valid(mvdev, idx) || is_ctrl_vq_idx(mvdev, idx))
+ if (!is_index_valid(mvdev, idx))
return;
+ if (is_ctrl_vq_idx(mvdev, idx)) {
+ struct mlx5_control_vq *cvq = &mvdev->cvq;
+
+ cvq->vring.vring.num = num;
+ return;
+ }
+
mvq = &ndev->vqs[idx];
mvq->num_ent = num;
}
@@ -2963,7 +2968,7 @@ static int setup_cvq_vring(struct mlx5_vdpa_dev *mvdev)
u16 idx = cvq->vring.last_avail_idx;
err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features,
- MLX5_CVQ_MAX_ENT, false,
+ cvq->vring.vring.num, false,
(struct vring_desc *)(uintptr_t)cvq->desc_addr,
(struct vring_avail *)(uintptr_t)cvq->driver_addr,
(struct vring_used *)(uintptr_t)cvq->device_addr);
diff --git a/drivers/vdpa/pds/aux_drv.c b/drivers/vdpa/pds/aux_drv.c
index 186e9ee22eb11..f57330cf90246 100644
--- a/drivers/vdpa/pds/aux_drv.c
+++ b/drivers/vdpa/pds/aux_drv.c
@@ -93,8 +93,8 @@ static void pds_vdpa_remove(struct auxiliary_device *aux_dev)
struct device *dev = &aux_dev->dev;
vdpa_mgmtdev_unregister(&vdpa_aux->vdpa_mdev);
+ pds_vdpa_release_irqs(vdpa_aux->pdsv);
vp_modern_remove(&vdpa_aux->vd_mdev);
- pci_free_irq_vectors(vdpa_aux->padev->vf_pdev);
pds_vdpa_debugfs_del_vdpadev(vdpa_aux);
kfree(vdpa_aux);
diff --git a/drivers/vdpa/pds/vdpa_dev.c b/drivers/vdpa/pds/vdpa_dev.c
index 25c0fe5ec3d5d..301d95e085960 100644
--- a/drivers/vdpa/pds/vdpa_dev.c
+++ b/drivers/vdpa/pds/vdpa_dev.c
@@ -426,12 +426,18 @@ err_release:
return err;
}
-static void pds_vdpa_release_irqs(struct pds_vdpa_device *pdsv)
+void pds_vdpa_release_irqs(struct pds_vdpa_device *pdsv)
{
- struct pci_dev *pdev = pdsv->vdpa_aux->padev->vf_pdev;
- struct pds_vdpa_aux *vdpa_aux = pdsv->vdpa_aux;
+ struct pds_vdpa_aux *vdpa_aux;
+ struct pci_dev *pdev;
int qid;
+ if (!pdsv)
+ return;
+
+ pdev = pdsv->vdpa_aux->padev->vf_pdev;
+ vdpa_aux = pdsv->vdpa_aux;
+
if (!vdpa_aux->nintrs)
return;
@@ -612,6 +618,7 @@ static int pds_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
struct device *dma_dev;
struct pci_dev *pdev;
struct device *dev;
+ u8 status;
int err;
int i;
@@ -638,6 +645,13 @@ static int pds_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
dma_dev = &pdev->dev;
pdsv->vdpa_dev.dma_dev = dma_dev;
+ status = pds_vdpa_get_status(&pdsv->vdpa_dev);
+ if (status == 0xff) {
+ dev_err(dev, "Broken PCI - status %#x\n", status);
+ err = -ENXIO;
+ goto err_unmap;
+ }
+
pdsv->supported_features = mgmt->supported_features;
if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) {
diff --git a/drivers/vdpa/pds/vdpa_dev.h b/drivers/vdpa/pds/vdpa_dev.h
index d984ba24a7dae..84bdb45871ff0 100644
--- a/drivers/vdpa/pds/vdpa_dev.h
+++ b/drivers/vdpa/pds/vdpa_dev.h
@@ -46,5 +46,6 @@ struct pds_vdpa_device {
#define PDS_VDPA_PACKED_INVERT_IDX 0x8000
+void pds_vdpa_release_irqs(struct pds_vdpa_device *pdsv);
int pds_vdpa_get_mgmt_info(struct pds_vdpa_aux *vdpa_aux);
#endif /* _VDPA_DEV_H_ */
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index d0695680b282e..b246067e074bc 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -115,7 +115,7 @@ static const struct attribute_group vdpa_dev_group = {
};
__ATTRIBUTE_GROUPS(vdpa_dev);
-static struct bus_type vdpa_bus = {
+static const struct bus_type vdpa_bus = {
.name = "vdpa",
.dev_groups = vdpa_dev_groups,
.match = vdpa_dev_match,
@@ -945,6 +945,215 @@ static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *ms
}
static int
+vdpa_dev_blk_capacity_config_fill(struct sk_buff *msg,
+ const struct virtio_blk_config *config)
+{
+ u64 val_u64;
+
+ val_u64 = __virtio64_to_cpu(true, config->capacity);
+
+ return nla_put_u64_64bit(msg, VDPA_ATTR_DEV_BLK_CFG_CAPACITY,
+ val_u64, VDPA_ATTR_PAD);
+}
+
+static int
+vdpa_dev_blk_seg_size_config_fill(struct sk_buff *msg, u64 features,
+ const struct virtio_blk_config *config)
+{
+ u32 val_u32;
+
+ if ((features & BIT_ULL(VIRTIO_BLK_F_SIZE_MAX)) == 0)
+ return 0;
+
+ val_u32 = __virtio32_to_cpu(true, config->size_max);
+
+ return nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_SEG_SIZE, val_u32);
+}
+
+/* fill the block size*/
+static int
+vdpa_dev_blk_block_size_config_fill(struct sk_buff *msg, u64 features,
+ const struct virtio_blk_config *config)
+{
+ u32 val_u32;
+
+ if ((features & BIT_ULL(VIRTIO_BLK_F_BLK_SIZE)) == 0)
+ return 0;
+
+ val_u32 = __virtio32_to_cpu(true, config->blk_size);
+
+ return nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_BLK_SIZE, val_u32);
+}
+
+static int
+vdpa_dev_blk_seg_max_config_fill(struct sk_buff *msg, u64 features,
+ const struct virtio_blk_config *config)
+{
+ u32 val_u32;
+
+ if ((features & BIT_ULL(VIRTIO_BLK_F_SEG_MAX)) == 0)
+ return 0;
+
+ val_u32 = __virtio32_to_cpu(true, config->seg_max);
+
+ return nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_SEG_MAX, val_u32);
+}
+
+static int vdpa_dev_blk_mq_config_fill(struct sk_buff *msg, u64 features,
+ const struct virtio_blk_config *config)
+{
+ u16 val_u16;
+
+ if ((features & BIT_ULL(VIRTIO_BLK_F_MQ)) == 0)
+ return 0;
+
+ val_u16 = __virtio16_to_cpu(true, config->num_queues);
+
+ return nla_put_u16(msg, VDPA_ATTR_DEV_BLK_CFG_NUM_QUEUES, val_u16);
+}
+
+static int vdpa_dev_blk_topology_config_fill(struct sk_buff *msg, u64 features,
+ const struct virtio_blk_config *config)
+{
+ u16 min_io_size;
+ u32 opt_io_size;
+
+ if ((features & BIT_ULL(VIRTIO_BLK_F_TOPOLOGY)) == 0)
+ return 0;
+
+ min_io_size = __virtio16_to_cpu(true, config->min_io_size);
+ opt_io_size = __virtio32_to_cpu(true, config->opt_io_size);
+
+ if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_CFG_PHY_BLK_EXP,
+ config->physical_block_exp))
+ return -EMSGSIZE;
+
+ if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_CFG_ALIGN_OFFSET,
+ config->alignment_offset))
+ return -EMSGSIZE;
+
+ if (nla_put_u16(msg, VDPA_ATTR_DEV_BLK_CFG_MIN_IO_SIZE, min_io_size))
+ return -EMSGSIZE;
+
+ if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_OPT_IO_SIZE, opt_io_size))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int vdpa_dev_blk_discard_config_fill(struct sk_buff *msg, u64 features,
+ const struct virtio_blk_config *config)
+{
+ u32 val_u32;
+
+ if ((features & BIT_ULL(VIRTIO_BLK_F_DISCARD)) == 0)
+ return 0;
+
+ val_u32 = __virtio32_to_cpu(true, config->max_discard_sectors);
+ if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_MAX_DISCARD_SEC, val_u32))
+ return -EMSGSIZE;
+
+ val_u32 = __virtio32_to_cpu(true, config->max_discard_seg);
+ if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_MAX_DISCARD_SEG, val_u32))
+ return -EMSGSIZE;
+
+ val_u32 = __virtio32_to_cpu(true, config->discard_sector_alignment);
+ if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_DISCARD_SEC_ALIGN, val_u32))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int
+vdpa_dev_blk_write_zeroes_config_fill(struct sk_buff *msg, u64 features,
+ const struct virtio_blk_config *config)
+{
+ u32 val_u32;
+
+ if ((features & BIT_ULL(VIRTIO_BLK_F_WRITE_ZEROES)) == 0)
+ return 0;
+
+ val_u32 = __virtio32_to_cpu(true, config->max_write_zeroes_sectors);
+ if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_MAX_WRITE_ZEROES_SEC, val_u32))
+ return -EMSGSIZE;
+
+ val_u32 = __virtio32_to_cpu(true, config->max_write_zeroes_seg);
+ if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_MAX_WRITE_ZEROES_SEG, val_u32))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int vdpa_dev_blk_ro_config_fill(struct sk_buff *msg, u64 features)
+{
+ u8 ro;
+
+ ro = ((features & BIT_ULL(VIRTIO_BLK_F_RO)) == 0) ? 0 : 1;
+ if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_CFG_READ_ONLY, ro))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int vdpa_dev_blk_flush_config_fill(struct sk_buff *msg, u64 features)
+{
+ u8 flush;
+
+ flush = ((features & BIT_ULL(VIRTIO_BLK_F_FLUSH)) == 0) ? 0 : 1;
+ if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_CFG_FLUSH, flush))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int vdpa_dev_blk_config_fill(struct vdpa_device *vdev,
+ struct sk_buff *msg)
+{
+ struct virtio_blk_config config = {};
+ u64 features_device;
+
+ vdev->config->get_config(vdev, 0, &config, sizeof(config));
+
+ features_device = vdev->config->get_device_features(vdev);
+
+ if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_FEATURES, features_device,
+ VDPA_ATTR_PAD))
+ return -EMSGSIZE;
+
+ if (vdpa_dev_blk_capacity_config_fill(msg, &config))
+ return -EMSGSIZE;
+
+ if (vdpa_dev_blk_seg_size_config_fill(msg, features_device, &config))
+ return -EMSGSIZE;
+
+ if (vdpa_dev_blk_block_size_config_fill(msg, features_device, &config))
+ return -EMSGSIZE;
+
+ if (vdpa_dev_blk_seg_max_config_fill(msg, features_device, &config))
+ return -EMSGSIZE;
+
+ if (vdpa_dev_blk_mq_config_fill(msg, features_device, &config))
+ return -EMSGSIZE;
+
+ if (vdpa_dev_blk_topology_config_fill(msg, features_device, &config))
+ return -EMSGSIZE;
+
+ if (vdpa_dev_blk_discard_config_fill(msg, features_device, &config))
+ return -EMSGSIZE;
+
+ if (vdpa_dev_blk_write_zeroes_config_fill(msg, features_device, &config))
+ return -EMSGSIZE;
+
+ if (vdpa_dev_blk_ro_config_fill(msg, features_device))
+ return -EMSGSIZE;
+
+ if (vdpa_dev_blk_flush_config_fill(msg, features_device))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int
vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
int flags, struct netlink_ext_ack *extack)
{
@@ -988,6 +1197,9 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
case VIRTIO_ID_NET:
err = vdpa_dev_net_config_fill(vdev, msg);
break;
+ case VIRTIO_ID_BLOCK:
+ err = vdpa_dev_blk_config_fill(vdev, msg);
+ break;
default:
err = -EOPNOTSUPP;
break;
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index be2925d0d2836..8ffea8430f95f 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -160,7 +160,7 @@ static void vdpasim_do_reset(struct vdpasim *vdpasim, u32 flags)
}
}
- vdpasim->running = true;
+ vdpasim->running = false;
spin_unlock(&vdpasim->iommu_lock);
vdpasim->features = 0;
@@ -311,6 +311,17 @@ static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
vq->num = num;
}
+static u16 vdpasim_get_vq_size(struct vdpa_device *vdpa, u16 idx)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
+
+ if (vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK)
+ return vq->num;
+ else
+ return VDPASIM_QUEUE_MAX;
+}
+
static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
@@ -483,6 +494,7 @@ static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
mutex_lock(&vdpasim->mutex);
vdpasim->status = status;
+ vdpasim->running = (status & VIRTIO_CONFIG_S_DRIVER_OK) != 0;
mutex_unlock(&vdpasim->mutex);
}
@@ -774,6 +786,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
.get_driver_features = vdpasim_get_driver_features,
.set_config_cb = vdpasim_set_config_cb,
.get_vq_num_max = vdpasim_get_vq_num_max,
+ .get_vq_size = vdpasim_get_vq_size,
.get_device_id = vdpasim_get_device_id,
.get_vendor_id = vdpasim_get_vendor_id,
.get_status = vdpasim_get_status,
diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
index 5e4a77b9bae6b..791d38d6284c5 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.c
+++ b/drivers/vdpa/vdpa_user/iova_domain.c
@@ -373,6 +373,26 @@ static void vduse_domain_free_iova(struct iova_domain *iovad,
free_iova_fast(iovad, iova >> shift, iova_len);
}
+void vduse_domain_sync_single_for_device(struct vduse_iova_domain *domain,
+ dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir)
+{
+ read_lock(&domain->bounce_lock);
+ if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
+ vduse_domain_bounce(domain, dma_addr, size, DMA_TO_DEVICE);
+ read_unlock(&domain->bounce_lock);
+}
+
+void vduse_domain_sync_single_for_cpu(struct vduse_iova_domain *domain,
+ dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir)
+{
+ read_lock(&domain->bounce_lock);
+ if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
+ vduse_domain_bounce(domain, dma_addr, size, DMA_FROM_DEVICE);
+ read_unlock(&domain->bounce_lock);
+}
+
dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain,
struct page *page, unsigned long offset,
size_t size, enum dma_data_direction dir,
@@ -393,7 +413,8 @@ dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain,
if (vduse_domain_map_bounce_page(domain, (u64)iova, (u64)size, pa))
goto err_unlock;
- if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+ (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
vduse_domain_bounce(domain, iova, size, DMA_TO_DEVICE);
read_unlock(&domain->bounce_lock);
@@ -411,9 +432,9 @@ void vduse_domain_unmap_page(struct vduse_iova_domain *domain,
enum dma_data_direction dir, unsigned long attrs)
{
struct iova_domain *iovad = &domain->stream_iovad;
-
read_lock(&domain->bounce_lock);
- if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+ (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
vduse_domain_bounce(domain, dma_addr, size, DMA_FROM_DEVICE);
vduse_domain_unmap_bounce_page(domain, (u64)dma_addr, (u64)size);
diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
index 173e979b84a93..f92f22a7267d7 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.h
+++ b/drivers/vdpa/vdpa_user/iova_domain.h
@@ -44,6 +44,14 @@ int vduse_domain_set_map(struct vduse_iova_domain *domain,
void vduse_domain_clear_map(struct vduse_iova_domain *domain,
struct vhost_iotlb *iotlb);
+void vduse_domain_sync_single_for_device(struct vduse_iova_domain *domain,
+ dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir);
+
+void vduse_domain_sync_single_for_cpu(struct vduse_iova_domain *domain,
+ dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir);
+
dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain,
struct page *page, unsigned long offset,
size_t size, enum dma_data_direction dir,
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index 1d24da79c3995..73c89701fc9d4 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -541,6 +541,17 @@ static void vduse_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
vq->num = num;
}
+static u16 vduse_vdpa_get_vq_size(struct vdpa_device *vdpa, u16 idx)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ struct vduse_virtqueue *vq = dev->vqs[idx];
+
+ if (vq->num)
+ return vq->num;
+ else
+ return vq->num_max;
+}
+
static void vduse_vdpa_set_vq_ready(struct vdpa_device *vdpa,
u16 idx, bool ready)
{
@@ -773,6 +784,7 @@ static const struct vdpa_config_ops vduse_vdpa_config_ops = {
.kick_vq = vduse_vdpa_kick_vq,
.set_vq_cb = vduse_vdpa_set_vq_cb,
.set_vq_num = vduse_vdpa_set_vq_num,
+ .get_vq_size = vduse_vdpa_get_vq_size,
.set_vq_ready = vduse_vdpa_set_vq_ready,
.get_vq_ready = vduse_vdpa_get_vq_ready,
.set_vq_state = vduse_vdpa_set_vq_state,
@@ -798,6 +810,26 @@ static const struct vdpa_config_ops vduse_vdpa_config_ops = {
.free = vduse_vdpa_free,
};
+static void vduse_dev_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir)
+{
+ struct vduse_dev *vdev = dev_to_vduse(dev);
+ struct vduse_iova_domain *domain = vdev->domain;
+
+ vduse_domain_sync_single_for_device(domain, dma_addr, size, dir);
+}
+
+static void vduse_dev_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir)
+{
+ struct vduse_dev *vdev = dev_to_vduse(dev);
+ struct vduse_iova_domain *domain = vdev->domain;
+
+ vduse_domain_sync_single_for_cpu(domain, dma_addr, size, dir);
+}
+
static dma_addr_t vduse_dev_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
@@ -858,6 +890,8 @@ static size_t vduse_dev_max_mapping_size(struct device *dev)
}
static const struct dma_map_ops vduse_dev_dma_ops = {
+ .sync_single_for_device = vduse_dev_sync_single_for_device,
+ .sync_single_for_cpu = vduse_dev_sync_single_for_cpu,
.map_page = vduse_dev_map_page,
.unmap_page = vduse_dev_unmap_page,
.alloc = vduse_dev_alloc_coherent,
diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
index 281287fae89f1..df5f4a3bccb57 100644
--- a/drivers/vdpa/virtio_pci/vp_vdpa.c
+++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
@@ -328,6 +328,13 @@ static void vp_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid,
vp_modern_set_queue_size(mdev, qid, num);
}
+static u16 vp_vdpa_get_vq_size(struct vdpa_device *vdpa, u16 qid)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ return vp_modern_get_queue_size(mdev, qid);
+}
+
static int vp_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 qid,
u64 desc_area, u64 driver_area,
u64 device_area)
@@ -449,6 +456,7 @@ static const struct vdpa_config_ops vp_vdpa_ops = {
.set_vq_ready = vp_vdpa_set_vq_ready,
.get_vq_ready = vp_vdpa_get_vq_ready,
.set_vq_num = vp_vdpa_set_vq_num,
+ .get_vq_size = vp_vdpa_get_vq_size,
.set_vq_address = vp_vdpa_set_vq_address,
.kick_vq = vp_vdpa_kick_vq,
.get_generation = vp_vdpa_get_generation,
diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
index d62fbfff20b82..82b2afa9b7e31 100644
--- a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
@@ -141,13 +141,14 @@ static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
irq = &vdev->mc_irqs[index];
if (flags & VFIO_IRQ_SET_DATA_NONE) {
- vfio_fsl_mc_irq_handler(hwirq, irq);
+ if (irq->trigger)
+ eventfd_signal(irq->trigger);
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
u8 trigger = *(u8 *)data;
- if (trigger)
- vfio_fsl_mc_irq_handler(hwirq, irq);
+ if (trigger && irq->trigger)
+ eventfd_signal(irq->trigger);
}
return 0;
diff --git a/drivers/vfio/mdev/mdev_driver.c b/drivers/vfio/mdev/mdev_driver.c
index 7825d83a55f8c..b98322966b3ed 100644
--- a/drivers/vfio/mdev/mdev_driver.c
+++ b/drivers/vfio/mdev/mdev_driver.c
@@ -40,7 +40,7 @@ static int mdev_match(struct device *dev, struct device_driver *drv)
return 0;
}
-struct bus_type mdev_bus_type = {
+const struct bus_type mdev_bus_type = {
.name = "mdev",
.probe = mdev_probe,
.remove = mdev_remove,
diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h
index af457b27f6074..63a1316b08b72 100644
--- a/drivers/vfio/mdev/mdev_private.h
+++ b/drivers/vfio/mdev/mdev_private.h
@@ -13,7 +13,7 @@
int mdev_bus_register(void);
void mdev_bus_unregister(void);
-extern struct bus_type mdev_bus_type;
+extern const struct bus_type mdev_bus_type;
extern const struct attribute_group *mdev_device_groups[];
#define to_mdev_type_attr(_attr) \
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index 18c397df566d8..15821a2d77d25 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -67,4 +67,6 @@ source "drivers/vfio/pci/pds/Kconfig"
source "drivers/vfio/pci/virtio/Kconfig"
+source "drivers/vfio/pci/nvgrace-gpu/Kconfig"
+
endmenu
diff --git a/drivers/vfio/pci/Makefile b/drivers/vfio/pci/Makefile
index 046139a4eca5b..ce7a61f1d912b 100644
--- a/drivers/vfio/pci/Makefile
+++ b/drivers/vfio/pci/Makefile
@@ -15,3 +15,5 @@ obj-$(CONFIG_HISI_ACC_VFIO_PCI) += hisilicon/
obj-$(CONFIG_PDS_VFIO_PCI) += pds/
obj-$(CONFIG_VIRTIO_VFIO_PCI) += virtio/
+
+obj-$(CONFIG_NVGRACE_GPU_VFIO_PCI) += nvgrace-gpu/
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
index 4d27465c8f1a8..9a3e97108ace8 100644
--- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
+++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
@@ -630,25 +630,11 @@ static void hisi_acc_vf_disable_fds(struct hisi_acc_vf_core_device *hisi_acc_vde
}
}
-/*
- * This function is called in all state_mutex unlock cases to
- * handle a 'deferred_reset' if exists.
- */
-static void
-hisi_acc_vf_state_mutex_unlock(struct hisi_acc_vf_core_device *hisi_acc_vdev)
+static void hisi_acc_vf_reset(struct hisi_acc_vf_core_device *hisi_acc_vdev)
{
-again:
- spin_lock(&hisi_acc_vdev->reset_lock);
- if (hisi_acc_vdev->deferred_reset) {
- hisi_acc_vdev->deferred_reset = false;
- spin_unlock(&hisi_acc_vdev->reset_lock);
- hisi_acc_vdev->vf_qm_state = QM_NOT_READY;
- hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
- hisi_acc_vf_disable_fds(hisi_acc_vdev);
- goto again;
- }
- mutex_unlock(&hisi_acc_vdev->state_mutex);
- spin_unlock(&hisi_acc_vdev->reset_lock);
+ hisi_acc_vdev->vf_qm_state = QM_NOT_READY;
+ hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
+ hisi_acc_vf_disable_fds(hisi_acc_vdev);
}
static void hisi_acc_vf_start_device(struct hisi_acc_vf_core_device *hisi_acc_vdev)
@@ -804,8 +790,10 @@ static long hisi_acc_vf_precopy_ioctl(struct file *filp,
info.dirty_bytes = 0;
info.initial_bytes = migf->total_length - *pos;
+ mutex_unlock(&migf->lock);
+ mutex_unlock(&hisi_acc_vdev->state_mutex);
- ret = copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
+ return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
out:
mutex_unlock(&migf->lock);
mutex_unlock(&hisi_acc_vdev->state_mutex);
@@ -1071,7 +1059,7 @@ hisi_acc_vfio_pci_set_device_state(struct vfio_device *vdev,
break;
}
}
- hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
+ mutex_unlock(&hisi_acc_vdev->state_mutex);
return res;
}
@@ -1092,7 +1080,7 @@ hisi_acc_vfio_pci_get_device_state(struct vfio_device *vdev,
mutex_lock(&hisi_acc_vdev->state_mutex);
*curr_state = hisi_acc_vdev->mig_state;
- hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
+ mutex_unlock(&hisi_acc_vdev->state_mutex);
return 0;
}
@@ -1104,21 +1092,9 @@ static void hisi_acc_vf_pci_aer_reset_done(struct pci_dev *pdev)
VFIO_MIGRATION_STOP_COPY)
return;
- /*
- * As the higher VFIO layers are holding locks across reset and using
- * those same locks with the mm_lock we need to prevent ABBA deadlock
- * with the state_mutex and mm_lock.
- * In case the state_mutex was taken already we defer the cleanup work
- * to the unlock flow of the other running context.
- */
- spin_lock(&hisi_acc_vdev->reset_lock);
- hisi_acc_vdev->deferred_reset = true;
- if (!mutex_trylock(&hisi_acc_vdev->state_mutex)) {
- spin_unlock(&hisi_acc_vdev->reset_lock);
- return;
- }
- spin_unlock(&hisi_acc_vdev->reset_lock);
- hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
+ mutex_lock(&hisi_acc_vdev->state_mutex);
+ hisi_acc_vf_reset(hisi_acc_vdev);
+ mutex_unlock(&hisi_acc_vdev->state_mutex);
}
static int hisi_acc_vf_qm_init(struct hisi_acc_vf_core_device *hisi_acc_vdev)
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
index dcabfeec6ca19..5bab46602fad2 100644
--- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
+++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
@@ -98,8 +98,8 @@ struct hisi_acc_vf_migration_file {
struct hisi_acc_vf_core_device {
struct vfio_pci_core_device core_device;
- u8 match_done:1;
- u8 deferred_reset:1;
+ u8 match_done;
+
/* For migration state */
struct mutex state_mutex;
enum vfio_device_mig_state mig_state;
@@ -109,8 +109,6 @@ struct hisi_acc_vf_core_device {
struct hisi_qm vf_qm;
u32 vf_qm_state;
int vf_id;
- /* For reset handler */
- spinlock_t reset_lock;
struct hisi_acc_vf_migration_file *resuming_migf;
struct hisi_acc_vf_migration_file *saving_migf;
};
diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c
index efd1d252cdc95..41a4b0cf42975 100644
--- a/drivers/vfio/pci/mlx5/cmd.c
+++ b/drivers/vfio/pci/mlx5/cmd.c
@@ -108,8 +108,9 @@ int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev,
ret = wait_for_completion_interruptible(&mvdev->saving_migf->save_comp);
if (ret)
return ret;
- if (mvdev->saving_migf->state ==
- MLX5_MIGF_STATE_PRE_COPY_ERROR) {
+ /* Upon cleanup, ignore previous pre_copy error state */
+ if (mvdev->saving_migf->state == MLX5_MIGF_STATE_PRE_COPY_ERROR &&
+ !(query_flags & MLX5VF_QUERY_CLEANUP)) {
/*
* In case we had a PRE_COPY error, only query full
* image for final image
@@ -121,6 +122,11 @@ int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev,
}
query_flags &= ~MLX5VF_QUERY_INC;
}
+ /* Block incremental query which is state-dependent */
+ if (mvdev->saving_migf->state == MLX5_MIGF_STATE_ERROR) {
+ complete(&mvdev->saving_migf->save_comp);
+ return -ENODEV;
+ }
}
MLX5_SET(query_vhca_migration_state_in, in, opcode,
@@ -149,6 +155,12 @@ int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev,
return 0;
}
+static void set_tracker_change_event(struct mlx5vf_pci_core_device *mvdev)
+{
+ mvdev->tracker.object_changed = true;
+ complete(&mvdev->tracker_comp);
+}
+
static void set_tracker_error(struct mlx5vf_pci_core_device *mvdev)
{
/* Mark the tracker under an error and wake it up if it's running */
@@ -189,7 +201,7 @@ void mlx5vf_cmd_close_migratable(struct mlx5vf_pci_core_device *mvdev)
/* Must be done outside the lock to let it progress */
set_tracker_error(mvdev);
mutex_lock(&mvdev->state_mutex);
- mlx5vf_disable_fds(mvdev);
+ mlx5vf_disable_fds(mvdev, NULL);
_mlx5vf_free_page_tracker_resources(mvdev);
mlx5vf_state_mutex_unlock(mvdev);
}
@@ -221,6 +233,10 @@ void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev,
if (!MLX5_CAP_GEN(mvdev->mdev, migration))
goto end;
+ if (!(MLX5_CAP_GEN_2(mvdev->mdev, migration_multi_load) &&
+ MLX5_CAP_GEN_2(mvdev->mdev, migration_tracking_state)))
+ goto end;
+
mvdev->vf_id = pci_iov_vf_id(pdev);
if (mvdev->vf_id < 0)
goto end;
@@ -250,17 +266,14 @@ void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev,
mvdev->migrate_cap = 1;
mvdev->core_device.vdev.migration_flags =
VFIO_MIGRATION_STOP_COPY |
- VFIO_MIGRATION_P2P;
+ VFIO_MIGRATION_P2P |
+ VFIO_MIGRATION_PRE_COPY;
+
mvdev->core_device.vdev.mig_ops = mig_ops;
init_completion(&mvdev->tracker_comp);
if (MLX5_CAP_GEN(mvdev->mdev, adv_virtualization))
mvdev->core_device.vdev.log_ops = log_ops;
- if (MLX5_CAP_GEN_2(mvdev->mdev, migration_multi_load) &&
- MLX5_CAP_GEN_2(mvdev->mdev, migration_tracking_state))
- mvdev->core_device.vdev.migration_flags |=
- VFIO_MIGRATION_PRE_COPY;
-
if (MLX5_CAP_GEN_2(mvdev->mdev, migration_in_chunks))
mvdev->chunk_mode = 1;
@@ -402,6 +415,50 @@ void mlx5vf_free_data_buffer(struct mlx5_vhca_data_buffer *buf)
kfree(buf);
}
+static int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf,
+ unsigned int npages)
+{
+ unsigned int to_alloc = npages;
+ struct page **page_list;
+ unsigned long filled;
+ unsigned int to_fill;
+ int ret;
+
+ to_fill = min_t(unsigned int, npages, PAGE_SIZE / sizeof(*page_list));
+ page_list = kvzalloc(to_fill * sizeof(*page_list), GFP_KERNEL_ACCOUNT);
+ if (!page_list)
+ return -ENOMEM;
+
+ do {
+ filled = alloc_pages_bulk_array(GFP_KERNEL_ACCOUNT, to_fill,
+ page_list);
+ if (!filled) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ to_alloc -= filled;
+ ret = sg_alloc_append_table_from_pages(
+ &buf->table, page_list, filled, 0,
+ filled << PAGE_SHIFT, UINT_MAX, SG_MAX_SINGLE_ALLOC,
+ GFP_KERNEL_ACCOUNT);
+
+ if (ret)
+ goto err;
+ buf->allocated_length += filled * PAGE_SIZE;
+ /* clean input for another bulk allocation */
+ memset(page_list, 0, filled * sizeof(*page_list));
+ to_fill = min_t(unsigned int, to_alloc,
+ PAGE_SIZE / sizeof(*page_list));
+ } while (to_alloc > 0);
+
+ kvfree(page_list);
+ return 0;
+
+err:
+ kvfree(page_list);
+ return ret;
+}
+
struct mlx5_vhca_data_buffer *
mlx5vf_alloc_data_buffer(struct mlx5_vf_migration_file *migf,
size_t length,
@@ -608,8 +665,13 @@ static void mlx5vf_save_callback(int status, struct mlx5_async_work *context)
err:
/* The error flow can't run from an interrupt context */
- if (status == -EREMOTEIO)
+ if (status == -EREMOTEIO) {
status = MLX5_GET(save_vhca_state_out, async_data->out, status);
+ /* Failed in FW, print cmd out failure details */
+ mlx5_cmd_out_err(migf->mvdev->mdev, MLX5_CMD_OP_SAVE_VHCA_STATE, 0,
+ async_data->out);
+ }
+
async_data->status = status;
queue_work(migf->mvdev->cb_wq, &async_data->work);
}
@@ -623,6 +685,7 @@ int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
u32 in[MLX5_ST_SZ_DW(save_vhca_state_in)] = {};
struct mlx5_vhca_data_buffer *header_buf = NULL;
struct mlx5vf_async_data *async_data;
+ bool pre_copy_cleanup = false;
int err;
lockdep_assert_held(&mvdev->state_mutex);
@@ -633,6 +696,10 @@ int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
if (err)
return err;
+ if ((migf->state == MLX5_MIGF_STATE_PRE_COPY ||
+ migf->state == MLX5_MIGF_STATE_PRE_COPY_ERROR) && !track && !inc)
+ pre_copy_cleanup = true;
+
if (migf->state == MLX5_MIGF_STATE_PRE_COPY_ERROR)
/*
* In case we had a PRE_COPY error, SAVE is triggered only for
@@ -651,29 +718,27 @@ int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
async_data = &migf->async_data;
async_data->buf = buf;
- async_data->stop_copy_chunk = !track;
+ async_data->stop_copy_chunk = (!track && !pre_copy_cleanup);
async_data->out = kvzalloc(out_size, GFP_KERNEL);
if (!async_data->out) {
err = -ENOMEM;
goto err_out;
}
- if (MLX5VF_PRE_COPY_SUPP(mvdev)) {
- if (async_data->stop_copy_chunk) {
- u8 header_idx = buf->stop_copy_chunk_num ?
- buf->stop_copy_chunk_num - 1 : 0;
+ if (async_data->stop_copy_chunk) {
+ u8 header_idx = buf->stop_copy_chunk_num ?
+ buf->stop_copy_chunk_num - 1 : 0;
- header_buf = migf->buf_header[header_idx];
- migf->buf_header[header_idx] = NULL;
- }
+ header_buf = migf->buf_header[header_idx];
+ migf->buf_header[header_idx] = NULL;
+ }
- if (!header_buf) {
- header_buf = mlx5vf_get_data_buffer(migf,
- sizeof(struct mlx5_vf_migration_header), DMA_NONE);
- if (IS_ERR(header_buf)) {
- err = PTR_ERR(header_buf);
- goto err_free;
- }
+ if (!header_buf) {
+ header_buf = mlx5vf_get_data_buffer(migf,
+ sizeof(struct mlx5_vf_migration_header), DMA_NONE);
+ if (IS_ERR(header_buf)) {
+ err = PTR_ERR(header_buf);
+ goto err_free;
}
}
@@ -900,6 +965,29 @@ static int mlx5vf_cmd_modify_tracker(struct mlx5_core_dev *mdev,
return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
+static int mlx5vf_cmd_query_tracker(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_page_tracker *tracker)
+{
+ u32 out[MLX5_ST_SZ_DW(query_page_track_obj_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ void *obj_context;
+ void *cmd_hdr;
+ int err;
+
+ cmd_hdr = MLX5_ADDR_OF(modify_page_track_obj_in, in, general_obj_in_cmd_hdr);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_PAGE_TRACK);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, tracker->id);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ obj_context = MLX5_ADDR_OF(query_page_track_obj_out, out, obj_context);
+ tracker->status = MLX5_GET(page_track, obj_context, state);
+ return 0;
+}
+
static int alloc_cq_frag_buf(struct mlx5_core_dev *mdev,
struct mlx5_vhca_cq_buf *buf, int nent,
int cqe_size)
@@ -957,9 +1045,11 @@ static int mlx5vf_event_notifier(struct notifier_block *nb, unsigned long type,
mlx5_nb_cof(nb, struct mlx5_vhca_page_tracker, nb);
struct mlx5vf_pci_core_device *mvdev = container_of(
tracker, struct mlx5vf_pci_core_device, tracker);
+ struct mlx5_eqe_obj_change *object;
struct mlx5_eqe *eqe = data;
u8 event_type = (u8)type;
u8 queue_type;
+ u32 obj_id;
int qp_num;
switch (event_type) {
@@ -975,6 +1065,12 @@ static int mlx5vf_event_notifier(struct notifier_block *nb, unsigned long type,
break;
set_tracker_error(mvdev);
break;
+ case MLX5_EVENT_TYPE_OBJECT_CHANGE:
+ object = &eqe->data.obj_change;
+ obj_id = be32_to_cpu(object->obj_id);
+ if (obj_id == tracker->id)
+ set_tracker_change_event(mvdev);
+ break;
default:
break;
}
@@ -1634,6 +1730,11 @@ int mlx5vf_tracker_read_and_clear(struct vfio_device *vdev, unsigned long iova,
goto end;
}
+ if (tracker->is_err) {
+ err = -EIO;
+ goto end;
+ }
+
mdev = mvdev->mdev;
err = mlx5vf_cmd_modify_tracker(mdev, tracker->id, iova, length,
MLX5_PAGE_TRACK_STATE_REPORTING);
@@ -1652,6 +1753,12 @@ int mlx5vf_tracker_read_and_clear(struct vfio_device *vdev, unsigned long iova,
dirty, &tracker->status);
if (poll_err == CQ_EMPTY) {
wait_for_completion(&mvdev->tracker_comp);
+ if (tracker->object_changed) {
+ tracker->object_changed = false;
+ err = mlx5vf_cmd_query_tracker(mdev, tracker);
+ if (err)
+ goto end;
+ }
continue;
}
}
diff --git a/drivers/vfio/pci/mlx5/cmd.h b/drivers/vfio/pci/mlx5/cmd.h
index f2c7227fa683a..df421dc6de048 100644
--- a/drivers/vfio/pci/mlx5/cmd.h
+++ b/drivers/vfio/pci/mlx5/cmd.h
@@ -13,9 +13,6 @@
#include <linux/mlx5/cq.h>
#include <linux/mlx5/qp.h>
-#define MLX5VF_PRE_COPY_SUPP(mvdev) \
- ((mvdev)->core_device.vdev.migration_flags & VFIO_MIGRATION_PRE_COPY)
-
enum mlx5_vf_migf_state {
MLX5_MIGF_STATE_ERROR = 1,
MLX5_MIGF_STATE_PRE_COPY_ERROR,
@@ -25,7 +22,6 @@ enum mlx5_vf_migf_state {
};
enum mlx5_vf_load_state {
- MLX5_VF_LOAD_STATE_READ_IMAGE_NO_HEADER,
MLX5_VF_LOAD_STATE_READ_HEADER,
MLX5_VF_LOAD_STATE_PREP_HEADER_DATA,
MLX5_VF_LOAD_STATE_READ_HEADER_DATA,
@@ -162,6 +158,7 @@ struct mlx5_vhca_page_tracker {
u32 id;
u32 pdn;
u8 is_err:1;
+ u8 object_changed:1;
struct mlx5_uars_page *uar;
struct mlx5_vhca_cq cq;
struct mlx5_vhca_qp *host_qp;
@@ -196,6 +193,7 @@ struct mlx5vf_pci_core_device {
enum {
MLX5VF_QUERY_INC = (1UL << 0),
MLX5VF_QUERY_FINAL = (1UL << 1),
+ MLX5VF_QUERY_CLEANUP = (1UL << 2),
};
int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod);
@@ -226,12 +224,11 @@ struct mlx5_vhca_data_buffer *
mlx5vf_get_data_buffer(struct mlx5_vf_migration_file *migf,
size_t length, enum dma_data_direction dma_dir);
void mlx5vf_put_data_buffer(struct mlx5_vhca_data_buffer *buf);
-int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf,
- unsigned int npages);
struct page *mlx5vf_get_migration_page(struct mlx5_vhca_data_buffer *buf,
unsigned long offset);
void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev);
-void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev);
+void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev,
+ enum mlx5_vf_migf_state *last_save_state);
void mlx5vf_mig_file_cleanup_cb(struct work_struct *_work);
void mlx5vf_mig_file_set_save_work(struct mlx5_vf_migration_file *migf,
u8 chunk_num, size_t next_required_umem_size);
diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c
index fe09a8c8af95e..61d9b0f9146d1 100644
--- a/drivers/vfio/pci/mlx5/main.c
+++ b/drivers/vfio/pci/mlx5/main.c
@@ -65,50 +65,6 @@ mlx5vf_get_migration_page(struct mlx5_vhca_data_buffer *buf,
return NULL;
}
-int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf,
- unsigned int npages)
-{
- unsigned int to_alloc = npages;
- struct page **page_list;
- unsigned long filled;
- unsigned int to_fill;
- int ret;
-
- to_fill = min_t(unsigned int, npages, PAGE_SIZE / sizeof(*page_list));
- page_list = kvzalloc(to_fill * sizeof(*page_list), GFP_KERNEL_ACCOUNT);
- if (!page_list)
- return -ENOMEM;
-
- do {
- filled = alloc_pages_bulk_array(GFP_KERNEL_ACCOUNT, to_fill,
- page_list);
- if (!filled) {
- ret = -ENOMEM;
- goto err;
- }
- to_alloc -= filled;
- ret = sg_alloc_append_table_from_pages(
- &buf->table, page_list, filled, 0,
- filled << PAGE_SHIFT, UINT_MAX, SG_MAX_SINGLE_ALLOC,
- GFP_KERNEL_ACCOUNT);
-
- if (ret)
- goto err;
- buf->allocated_length += filled * PAGE_SIZE;
- /* clean input for another bulk allocation */
- memset(page_list, 0, filled * sizeof(*page_list));
- to_fill = min_t(unsigned int, to_alloc,
- PAGE_SIZE / sizeof(*page_list));
- } while (to_alloc > 0);
-
- kvfree(page_list);
- return 0;
-
-err:
- kvfree(page_list);
- return ret;
-}
-
static void mlx5vf_disable_fd(struct mlx5_vf_migration_file *migf)
{
mutex_lock(&migf->lock);
@@ -777,36 +733,6 @@ mlx5vf_append_page_to_mig_buf(struct mlx5_vhca_data_buffer *vhca_buf,
return 0;
}
-static int
-mlx5vf_resume_read_image_no_header(struct mlx5_vhca_data_buffer *vhca_buf,
- loff_t requested_length,
- const char __user **buf, size_t *len,
- loff_t *pos, ssize_t *done)
-{
- int ret;
-
- if (requested_length > MAX_LOAD_SIZE)
- return -ENOMEM;
-
- if (vhca_buf->allocated_length < requested_length) {
- ret = mlx5vf_add_migration_pages(
- vhca_buf,
- DIV_ROUND_UP(requested_length - vhca_buf->allocated_length,
- PAGE_SIZE));
- if (ret)
- return ret;
- }
-
- while (*len) {
- ret = mlx5vf_append_page_to_mig_buf(vhca_buf, buf, len, pos,
- done);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static ssize_t
mlx5vf_resume_read_image(struct mlx5_vf_migration_file *migf,
struct mlx5_vhca_data_buffer *vhca_buf,
@@ -1038,13 +964,6 @@ static ssize_t mlx5vf_resume_write(struct file *filp, const char __user *buf,
migf->load_state = MLX5_VF_LOAD_STATE_READ_IMAGE;
break;
}
- case MLX5_VF_LOAD_STATE_READ_IMAGE_NO_HEADER:
- ret = mlx5vf_resume_read_image_no_header(vhca_buf,
- requested_length,
- &buf, &len, pos, &done);
- if (ret)
- goto out_unlock;
- break;
case MLX5_VF_LOAD_STATE_READ_IMAGE:
ret = mlx5vf_resume_read_image(migf, vhca_buf,
migf->record_size,
@@ -1114,21 +1033,16 @@ mlx5vf_pci_resume_device_data(struct mlx5vf_pci_core_device *mvdev)
}
migf->buf[0] = buf;
- if (MLX5VF_PRE_COPY_SUPP(mvdev)) {
- buf = mlx5vf_alloc_data_buffer(migf,
- sizeof(struct mlx5_vf_migration_header), DMA_NONE);
- if (IS_ERR(buf)) {
- ret = PTR_ERR(buf);
- goto out_buf;
- }
-
- migf->buf_header[0] = buf;
- migf->load_state = MLX5_VF_LOAD_STATE_READ_HEADER;
- } else {
- /* Initial state will be to read the image */
- migf->load_state = MLX5_VF_LOAD_STATE_READ_IMAGE_NO_HEADER;
+ buf = mlx5vf_alloc_data_buffer(migf,
+ sizeof(struct mlx5_vf_migration_header), DMA_NONE);
+ if (IS_ERR(buf)) {
+ ret = PTR_ERR(buf);
+ goto out_buf;
}
+ migf->buf_header[0] = buf;
+ migf->load_state = MLX5_VF_LOAD_STATE_READ_HEADER;
+
stream_open(migf->filp->f_inode, migf->filp);
mutex_init(&migf->lock);
INIT_LIST_HEAD(&migf->buf_list);
@@ -1146,7 +1060,8 @@ end:
return ERR_PTR(ret);
}
-void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev)
+void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev,
+ enum mlx5_vf_migf_state *last_save_state)
{
if (mvdev->resuming_migf) {
mlx5vf_disable_fd(mvdev->resuming_migf);
@@ -1157,6 +1072,8 @@ void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev)
if (mvdev->saving_migf) {
mlx5_cmd_cleanup_async_ctx(&mvdev->saving_migf->async_ctx);
cancel_work_sync(&mvdev->saving_migf->async_data.work);
+ if (last_save_state)
+ *last_save_state = mvdev->saving_migf->state;
mlx5vf_disable_fd(mvdev->saving_migf);
wake_up_interruptible(&mvdev->saving_migf->poll_wait);
mlx5fv_cmd_clean_migf_resources(mvdev->saving_migf);
@@ -1217,12 +1134,34 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev,
return migf->filp;
}
- if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP) ||
- (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_RUNNING) ||
+ if (cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP) {
+ mlx5vf_disable_fds(mvdev, NULL);
+ return NULL;
+ }
+
+ if ((cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_RUNNING) ||
(cur == VFIO_DEVICE_STATE_PRE_COPY_P2P &&
new == VFIO_DEVICE_STATE_RUNNING_P2P)) {
- mlx5vf_disable_fds(mvdev);
- return NULL;
+ struct mlx5_vf_migration_file *migf = mvdev->saving_migf;
+ struct mlx5_vhca_data_buffer *buf;
+ enum mlx5_vf_migf_state state;
+ size_t size;
+
+ ret = mlx5vf_cmd_query_vhca_migration_state(mvdev, &size, NULL,
+ MLX5VF_QUERY_INC | MLX5VF_QUERY_CLEANUP);
+ if (ret)
+ return ERR_PTR(ret);
+ buf = mlx5vf_get_data_buffer(migf, size, DMA_FROM_DEVICE);
+ if (IS_ERR(buf))
+ return ERR_CAST(buf);
+ /* pre_copy cleanup */
+ ret = mlx5vf_cmd_save_vhca_state(mvdev, migf, buf, false, false);
+ if (ret) {
+ mlx5vf_put_data_buffer(buf);
+ return ERR_PTR(ret);
+ }
+ mlx5vf_disable_fds(mvdev, &state);
+ return (state != MLX5_MIGF_STATE_ERROR) ? NULL : ERR_PTR(-EIO);
}
if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) {
@@ -1237,14 +1176,7 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev,
}
if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
- if (!MLX5VF_PRE_COPY_SUPP(mvdev)) {
- ret = mlx5vf_cmd_load_vhca_state(mvdev,
- mvdev->resuming_migf,
- mvdev->resuming_migf->buf[0]);
- if (ret)
- return ERR_PTR(ret);
- }
- mlx5vf_disable_fds(mvdev);
+ mlx5vf_disable_fds(mvdev, NULL);
return NULL;
}
@@ -1289,7 +1221,7 @@ again:
mvdev->deferred_reset = false;
spin_unlock(&mvdev->reset_lock);
mvdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
- mlx5vf_disable_fds(mvdev);
+ mlx5vf_disable_fds(mvdev, NULL);
goto again;
}
mutex_unlock(&mvdev->state_mutex);
diff --git a/drivers/vfio/pci/nvgrace-gpu/Kconfig b/drivers/vfio/pci/nvgrace-gpu/Kconfig
new file mode 100644
index 0000000000000..a7f624b37e410
--- /dev/null
+++ b/drivers/vfio/pci/nvgrace-gpu/Kconfig
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NVGRACE_GPU_VFIO_PCI
+ tristate "VFIO support for the GPU in the NVIDIA Grace Hopper Superchip"
+ depends on ARM64 || (COMPILE_TEST && 64BIT)
+ select VFIO_PCI_CORE
+ help
+ VFIO support for the GPU in the NVIDIA Grace Hopper Superchip is
+ required to assign the GPU device to userspace using KVM/qemu/etc.
+
+ If you don't know what to do here, say N.
diff --git a/drivers/vfio/pci/nvgrace-gpu/Makefile b/drivers/vfio/pci/nvgrace-gpu/Makefile
new file mode 100644
index 0000000000000..3ca8c187897a9
--- /dev/null
+++ b/drivers/vfio/pci/nvgrace-gpu/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_NVGRACE_GPU_VFIO_PCI) += nvgrace-gpu-vfio-pci.o
+nvgrace-gpu-vfio-pci-y := main.o
diff --git a/drivers/vfio/pci/nvgrace-gpu/main.c b/drivers/vfio/pci/nvgrace-gpu/main.c
new file mode 100644
index 0000000000000..a7fd018aa5483
--- /dev/null
+++ b/drivers/vfio/pci/nvgrace-gpu/main.c
@@ -0,0 +1,888 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#include <linux/sizes.h>
+#include <linux/vfio_pci_core.h>
+
+/*
+ * The device memory usable to the workloads running in the VM is cached
+ * and showcased as a 64b device BAR (comprising of BAR4 and BAR5 region)
+ * to the VM and is represented as usemem.
+ * Moreover, the VM GPU device driver needs a non-cacheable region to
+ * support the MIG feature. This region is also exposed as a 64b BAR
+ * (comprising of BAR2 and BAR3 region) and represented as resmem.
+ */
+#define RESMEM_REGION_INDEX VFIO_PCI_BAR2_REGION_INDEX
+#define USEMEM_REGION_INDEX VFIO_PCI_BAR4_REGION_INDEX
+
+/* Memory size expected as non cached and reserved by the VM driver */
+#define RESMEM_SIZE SZ_1G
+
+/* A hardwired and constant ABI value between the GPU FW and VFIO driver. */
+#define MEMBLK_SIZE SZ_512M
+
+/*
+ * The state of the two device memory region - resmem and usemem - is
+ * saved as struct mem_region.
+ */
+struct mem_region {
+ phys_addr_t memphys; /* Base physical address of the region */
+ size_t memlength; /* Region size */
+ size_t bar_size; /* Reported region BAR size */
+ __le64 bar_val; /* Emulated BAR offset registers */
+ union {
+ void *memaddr;
+ void __iomem *ioaddr;
+ }; /* Base virtual address of the region */
+};
+
+struct nvgrace_gpu_pci_core_device {
+ struct vfio_pci_core_device core_device;
+ /* Cached and usable memory for the VM. */
+ struct mem_region usemem;
+ /* Non cached memory carved out from the end of device memory */
+ struct mem_region resmem;
+ /* Lock to control device memory kernel mapping */
+ struct mutex remap_lock;
+};
+
+static void nvgrace_gpu_init_fake_bar_emu_regs(struct vfio_device *core_vdev)
+{
+ struct nvgrace_gpu_pci_core_device *nvdev =
+ container_of(core_vdev, struct nvgrace_gpu_pci_core_device,
+ core_device.vdev);
+
+ nvdev->resmem.bar_val = 0;
+ nvdev->usemem.bar_val = 0;
+}
+
+/* Choose the structure corresponding to the fake BAR with a given index. */
+static struct mem_region *
+nvgrace_gpu_memregion(int index,
+ struct nvgrace_gpu_pci_core_device *nvdev)
+{
+ if (index == USEMEM_REGION_INDEX)
+ return &nvdev->usemem;
+
+ if (index == RESMEM_REGION_INDEX)
+ return &nvdev->resmem;
+
+ return NULL;
+}
+
+static int nvgrace_gpu_open_device(struct vfio_device *core_vdev)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+ struct nvgrace_gpu_pci_core_device *nvdev =
+ container_of(core_vdev, struct nvgrace_gpu_pci_core_device,
+ core_device.vdev);
+ int ret;
+
+ ret = vfio_pci_core_enable(vdev);
+ if (ret)
+ return ret;
+
+ if (nvdev->usemem.memlength) {
+ nvgrace_gpu_init_fake_bar_emu_regs(core_vdev);
+ mutex_init(&nvdev->remap_lock);
+ }
+
+ vfio_pci_core_finish_enable(vdev);
+
+ return 0;
+}
+
+static void nvgrace_gpu_close_device(struct vfio_device *core_vdev)
+{
+ struct nvgrace_gpu_pci_core_device *nvdev =
+ container_of(core_vdev, struct nvgrace_gpu_pci_core_device,
+ core_device.vdev);
+
+ /* Unmap the mapping to the device memory cached region */
+ if (nvdev->usemem.memaddr) {
+ memunmap(nvdev->usemem.memaddr);
+ nvdev->usemem.memaddr = NULL;
+ }
+
+ /* Unmap the mapping to the device memory non-cached region */
+ if (nvdev->resmem.ioaddr) {
+ iounmap(nvdev->resmem.ioaddr);
+ nvdev->resmem.ioaddr = NULL;
+ }
+
+ mutex_destroy(&nvdev->remap_lock);
+
+ vfio_pci_core_close_device(core_vdev);
+}
+
+static int nvgrace_gpu_mmap(struct vfio_device *core_vdev,
+ struct vm_area_struct *vma)
+{
+ struct nvgrace_gpu_pci_core_device *nvdev =
+ container_of(core_vdev, struct nvgrace_gpu_pci_core_device,
+ core_device.vdev);
+ struct mem_region *memregion;
+ unsigned long start_pfn;
+ u64 req_len, pgoff, end;
+ unsigned int index;
+ int ret = 0;
+
+ index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
+
+ memregion = nvgrace_gpu_memregion(index, nvdev);
+ if (!memregion)
+ return vfio_pci_core_mmap(core_vdev, vma);
+
+ /*
+ * Request to mmap the BAR. Map to the CPU accessible memory on the
+ * GPU using the memory information gathered from the system ACPI
+ * tables.
+ */
+ pgoff = vma->vm_pgoff &
+ ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
+
+ if (check_sub_overflow(vma->vm_end, vma->vm_start, &req_len) ||
+ check_add_overflow(PHYS_PFN(memregion->memphys), pgoff, &start_pfn) ||
+ check_add_overflow(PFN_PHYS(pgoff), req_len, &end))
+ return -EOVERFLOW;
+
+ /*
+ * Check that the mapping request does not go beyond available device
+ * memory size
+ */
+ if (end > memregion->memlength)
+ return -EINVAL;
+
+ /*
+ * The carved out region of the device memory needs the NORMAL_NC
+ * property. Communicate as such to the hypervisor.
+ */
+ if (index == RESMEM_REGION_INDEX) {
+ /*
+ * The nvgrace-gpu module has no issues with uncontained
+ * failures on NORMAL_NC accesses. VM_ALLOW_ANY_UNCACHED is
+ * set to communicate to the KVM to S2 map as NORMAL_NC.
+ * This opens up guest usage of NORMAL_NC for this mapping.
+ */
+ vm_flags_set(vma, VM_ALLOW_ANY_UNCACHED);
+
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ }
+
+ /*
+ * Perform a PFN map to the memory and back the device BAR by the
+ * GPU memory.
+ *
+ * The available GPU memory size may not be power-of-2 aligned. The
+ * remainder is only backed by vfio_device_ops read/write handlers.
+ *
+ * During device reset, the GPU is safely disconnected to the CPU
+ * and access to the BAR will be immediately returned preventing
+ * machine check.
+ */
+ ret = remap_pfn_range(vma, vma->vm_start, start_pfn,
+ req_len, vma->vm_page_prot);
+ if (ret)
+ return ret;
+
+ vma->vm_pgoff = start_pfn;
+
+ return 0;
+}
+
+static long
+nvgrace_gpu_ioctl_get_region_info(struct vfio_device *core_vdev,
+ unsigned long arg)
+{
+ struct nvgrace_gpu_pci_core_device *nvdev =
+ container_of(core_vdev, struct nvgrace_gpu_pci_core_device,
+ core_device.vdev);
+ unsigned long minsz = offsetofend(struct vfio_region_info, offset);
+ struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
+ struct vfio_region_info_cap_sparse_mmap *sparse;
+ struct vfio_region_info info;
+ struct mem_region *memregion;
+ u32 size;
+ int ret;
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ /*
+ * Request to determine the BAR region information. Send the
+ * GPU memory information.
+ */
+ memregion = nvgrace_gpu_memregion(info.index, nvdev);
+ if (!memregion)
+ return vfio_pci_core_ioctl(core_vdev,
+ VFIO_DEVICE_GET_REGION_INFO, arg);
+
+ size = struct_size(sparse, areas, 1);
+
+ /*
+ * Setup for sparse mapping for the device memory. Only the
+ * available device memory on the hardware is shown as a
+ * mappable region.
+ */
+ sparse = kzalloc(size, GFP_KERNEL);
+ if (!sparse)
+ return -ENOMEM;
+
+ sparse->nr_areas = 1;
+ sparse->areas[0].offset = 0;
+ sparse->areas[0].size = memregion->memlength;
+ sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
+ sparse->header.version = 1;
+
+ ret = vfio_info_add_capability(&caps, &sparse->header, size);
+ kfree(sparse);
+ if (ret)
+ return ret;
+
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ /*
+ * The region memory size may not be power-of-2 aligned.
+ * Given that the memory as a BAR and may not be
+ * aligned, roundup to the next power-of-2.
+ */
+ info.size = memregion->bar_size;
+ info.flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE |
+ VFIO_REGION_INFO_FLAG_MMAP;
+
+ if (caps.size) {
+ info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
+ if (info.argsz < sizeof(info) + caps.size) {
+ info.argsz = sizeof(info) + caps.size;
+ info.cap_offset = 0;
+ } else {
+ vfio_info_cap_shift(&caps, sizeof(info));
+ if (copy_to_user((void __user *)arg +
+ sizeof(info), caps.buf,
+ caps.size)) {
+ kfree(caps.buf);
+ return -EFAULT;
+ }
+ info.cap_offset = sizeof(info);
+ }
+ kfree(caps.buf);
+ }
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
+}
+
+static long nvgrace_gpu_ioctl(struct vfio_device *core_vdev,
+ unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case VFIO_DEVICE_GET_REGION_INFO:
+ return nvgrace_gpu_ioctl_get_region_info(core_vdev, arg);
+ case VFIO_DEVICE_IOEVENTFD:
+ return -ENOTTY;
+ case VFIO_DEVICE_RESET:
+ nvgrace_gpu_init_fake_bar_emu_regs(core_vdev);
+ fallthrough;
+ default:
+ return vfio_pci_core_ioctl(core_vdev, cmd, arg);
+ }
+}
+
+static __le64
+nvgrace_gpu_get_read_value(size_t bar_size, u64 flags, __le64 val64)
+{
+ u64 tmp_val;
+
+ tmp_val = le64_to_cpu(val64);
+ tmp_val &= ~(bar_size - 1);
+ tmp_val |= flags;
+
+ return cpu_to_le64(tmp_val);
+}
+
+/*
+ * Both the usable (usemem) and the reserved (resmem) device memory region
+ * are exposed as a 64b fake device BARs in the VM. These fake BARs must
+ * respond to the accesses on their respective PCI config space offsets.
+ *
+ * resmem BAR owns PCI_BASE_ADDRESS_2 & PCI_BASE_ADDRESS_3.
+ * usemem BAR owns PCI_BASE_ADDRESS_4 & PCI_BASE_ADDRESS_5.
+ */
+static ssize_t
+nvgrace_gpu_read_config_emu(struct vfio_device *core_vdev,
+ char __user *buf, size_t count, loff_t *ppos)
+{
+ struct nvgrace_gpu_pci_core_device *nvdev =
+ container_of(core_vdev, struct nvgrace_gpu_pci_core_device,
+ core_device.vdev);
+ u64 pos = *ppos & VFIO_PCI_OFFSET_MASK;
+ struct mem_region *memregion = NULL;
+ __le64 val64;
+ size_t register_offset;
+ loff_t copy_offset;
+ size_t copy_count;
+ int ret;
+
+ ret = vfio_pci_core_read(core_vdev, buf, count, ppos);
+ if (ret < 0)
+ return ret;
+
+ if (vfio_pci_core_range_intersect_range(pos, count, PCI_BASE_ADDRESS_2,
+ sizeof(val64),
+ &copy_offset, &copy_count,
+ &register_offset))
+ memregion = nvgrace_gpu_memregion(RESMEM_REGION_INDEX, nvdev);
+ else if (vfio_pci_core_range_intersect_range(pos, count,
+ PCI_BASE_ADDRESS_4,
+ sizeof(val64),
+ &copy_offset, &copy_count,
+ &register_offset))
+ memregion = nvgrace_gpu_memregion(USEMEM_REGION_INDEX, nvdev);
+
+ if (memregion) {
+ val64 = nvgrace_gpu_get_read_value(memregion->bar_size,
+ PCI_BASE_ADDRESS_MEM_TYPE_64 |
+ PCI_BASE_ADDRESS_MEM_PREFETCH,
+ memregion->bar_val);
+ if (copy_to_user(buf + copy_offset,
+ (void *)&val64 + register_offset, copy_count)) {
+ /*
+ * The position has been incremented in
+ * vfio_pci_core_read. Reset the offset back to the
+ * starting position.
+ */
+ *ppos -= count;
+ return -EFAULT;
+ }
+ }
+
+ return count;
+}
+
+static ssize_t
+nvgrace_gpu_write_config_emu(struct vfio_device *core_vdev,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ struct nvgrace_gpu_pci_core_device *nvdev =
+ container_of(core_vdev, struct nvgrace_gpu_pci_core_device,
+ core_device.vdev);
+ u64 pos = *ppos & VFIO_PCI_OFFSET_MASK;
+ struct mem_region *memregion = NULL;
+ size_t register_offset;
+ loff_t copy_offset;
+ size_t copy_count;
+
+ if (vfio_pci_core_range_intersect_range(pos, count, PCI_BASE_ADDRESS_2,
+ sizeof(u64), &copy_offset,
+ &copy_count, &register_offset))
+ memregion = nvgrace_gpu_memregion(RESMEM_REGION_INDEX, nvdev);
+ else if (vfio_pci_core_range_intersect_range(pos, count, PCI_BASE_ADDRESS_4,
+ sizeof(u64), &copy_offset,
+ &copy_count, &register_offset))
+ memregion = nvgrace_gpu_memregion(USEMEM_REGION_INDEX, nvdev);
+
+ if (memregion) {
+ if (copy_from_user((void *)&memregion->bar_val + register_offset,
+ buf + copy_offset, copy_count))
+ return -EFAULT;
+ *ppos += copy_count;
+ return copy_count;
+ }
+
+ return vfio_pci_core_write(core_vdev, buf, count, ppos);
+}
+
+/*
+ * Ad hoc map the device memory in the module kernel VA space. Primarily needed
+ * as vfio does not require the userspace driver to only perform accesses through
+ * mmaps of the vfio-pci BAR regions and such accesses should be supported using
+ * vfio_device_ops read/write implementations.
+ *
+ * The usemem region is cacheable memory and hence is memremaped.
+ * The resmem region is non-cached and is mapped using ioremap_wc (NORMAL_NC).
+ */
+static int
+nvgrace_gpu_map_device_mem(int index,
+ struct nvgrace_gpu_pci_core_device *nvdev)
+{
+ struct mem_region *memregion;
+ int ret = 0;
+
+ memregion = nvgrace_gpu_memregion(index, nvdev);
+ if (!memregion)
+ return -EINVAL;
+
+ mutex_lock(&nvdev->remap_lock);
+
+ if (memregion->memaddr)
+ goto unlock;
+
+ if (index == USEMEM_REGION_INDEX)
+ memregion->memaddr = memremap(memregion->memphys,
+ memregion->memlength,
+ MEMREMAP_WB);
+ else
+ memregion->ioaddr = ioremap_wc(memregion->memphys,
+ memregion->memlength);
+
+ if (!memregion->memaddr)
+ ret = -ENOMEM;
+
+unlock:
+ mutex_unlock(&nvdev->remap_lock);
+
+ return ret;
+}
+
+/*
+ * Read the data from the device memory (mapped either through ioremap
+ * or memremap) into the user buffer.
+ */
+static int
+nvgrace_gpu_map_and_read(struct nvgrace_gpu_pci_core_device *nvdev,
+ char __user *buf, size_t mem_count, loff_t *ppos)
+{
+ unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+ u64 offset = *ppos & VFIO_PCI_OFFSET_MASK;
+ int ret;
+
+ if (!mem_count)
+ return 0;
+
+ /*
+ * Handle read on the BAR regions. Map to the target device memory
+ * physical address and copy to the request read buffer.
+ */
+ ret = nvgrace_gpu_map_device_mem(index, nvdev);
+ if (ret)
+ return ret;
+
+ if (index == USEMEM_REGION_INDEX) {
+ if (copy_to_user(buf,
+ (u8 *)nvdev->usemem.memaddr + offset,
+ mem_count))
+ ret = -EFAULT;
+ } else {
+ /*
+ * The hardware ensures that the system does not crash when
+ * the device memory is accessed with the memory enable
+ * turned off. It synthesizes ~0 on such read. So there is
+ * no need to check or support the disablement/enablement of
+ * BAR through PCI_COMMAND config space register. Pass
+ * test_mem flag as false.
+ */
+ ret = vfio_pci_core_do_io_rw(&nvdev->core_device, false,
+ nvdev->resmem.ioaddr,
+ buf, offset, mem_count,
+ 0, 0, false);
+ }
+
+ return ret;
+}
+
+/*
+ * Read count bytes from the device memory at an offset. The actual device
+ * memory size (available) may not be a power-of-2. So the driver fakes
+ * the size to a power-of-2 (reported) when exposing to a user space driver.
+ *
+ * Reads starting beyond the reported size generate -EINVAL; reads extending
+ * beyond the actual device size is filled with ~0; reads extending beyond
+ * the reported size are truncated.
+ */
+static ssize_t
+nvgrace_gpu_read_mem(struct nvgrace_gpu_pci_core_device *nvdev,
+ char __user *buf, size_t count, loff_t *ppos)
+{
+ u64 offset = *ppos & VFIO_PCI_OFFSET_MASK;
+ unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+ struct mem_region *memregion;
+ size_t mem_count, i;
+ u8 val = 0xFF;
+ int ret;
+
+ /* No need to do NULL check as caller does. */
+ memregion = nvgrace_gpu_memregion(index, nvdev);
+
+ if (offset >= memregion->bar_size)
+ return -EINVAL;
+
+ /* Clip short the read request beyond reported BAR size */
+ count = min(count, memregion->bar_size - (size_t)offset);
+
+ /*
+ * Determine how many bytes to be actually read from the device memory.
+ * Read request beyond the actual device memory size is filled with ~0,
+ * while those beyond the actual reported size is skipped.
+ */
+ if (offset >= memregion->memlength)
+ mem_count = 0;
+ else
+ mem_count = min(count, memregion->memlength - (size_t)offset);
+
+ ret = nvgrace_gpu_map_and_read(nvdev, buf, mem_count, ppos);
+ if (ret)
+ return ret;
+
+ /*
+ * Only the device memory present on the hardware is mapped, which may
+ * not be power-of-2 aligned. A read to an offset beyond the device memory
+ * size is filled with ~0.
+ */
+ for (i = mem_count; i < count; i++) {
+ ret = put_user(val, (unsigned char __user *)(buf + i));
+ if (ret)
+ return ret;
+ }
+
+ *ppos += count;
+ return count;
+}
+
+static ssize_t
+nvgrace_gpu_read(struct vfio_device *core_vdev,
+ char __user *buf, size_t count, loff_t *ppos)
+{
+ unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+ struct nvgrace_gpu_pci_core_device *nvdev =
+ container_of(core_vdev, struct nvgrace_gpu_pci_core_device,
+ core_device.vdev);
+
+ if (nvgrace_gpu_memregion(index, nvdev))
+ return nvgrace_gpu_read_mem(nvdev, buf, count, ppos);
+
+ if (index == VFIO_PCI_CONFIG_REGION_INDEX)
+ return nvgrace_gpu_read_config_emu(core_vdev, buf, count, ppos);
+
+ return vfio_pci_core_read(core_vdev, buf, count, ppos);
+}
+
+/*
+ * Write the data to the device memory (mapped either through ioremap
+ * or memremap) from the user buffer.
+ */
+static int
+nvgrace_gpu_map_and_write(struct nvgrace_gpu_pci_core_device *nvdev,
+ const char __user *buf, size_t mem_count,
+ loff_t *ppos)
+{
+ unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+ loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+ int ret;
+
+ if (!mem_count)
+ return 0;
+
+ ret = nvgrace_gpu_map_device_mem(index, nvdev);
+ if (ret)
+ return ret;
+
+ if (index == USEMEM_REGION_INDEX) {
+ if (copy_from_user((u8 *)nvdev->usemem.memaddr + pos,
+ buf, mem_count))
+ return -EFAULT;
+ } else {
+ /*
+ * The hardware ensures that the system does not crash when
+ * the device memory is accessed with the memory enable
+ * turned off. It drops such writes. So there is no need to
+ * check or support the disablement/enablement of BAR
+ * through PCI_COMMAND config space register. Pass test_mem
+ * flag as false.
+ */
+ ret = vfio_pci_core_do_io_rw(&nvdev->core_device, false,
+ nvdev->resmem.ioaddr,
+ (char __user *)buf, pos, mem_count,
+ 0, 0, true);
+ }
+
+ return ret;
+}
+
+/*
+ * Write count bytes to the device memory at a given offset. The actual device
+ * memory size (available) may not be a power-of-2. So the driver fakes the
+ * size to a power-of-2 (reported) when exposing to a user space driver.
+ *
+ * Writes extending beyond the reported size are truncated; writes starting
+ * beyond the reported size generate -EINVAL.
+ */
+static ssize_t
+nvgrace_gpu_write_mem(struct nvgrace_gpu_pci_core_device *nvdev,
+ size_t count, loff_t *ppos, const char __user *buf)
+{
+ unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+ u64 offset = *ppos & VFIO_PCI_OFFSET_MASK;
+ struct mem_region *memregion;
+ size_t mem_count;
+ int ret = 0;
+
+ /* No need to do NULL check as caller does. */
+ memregion = nvgrace_gpu_memregion(index, nvdev);
+
+ if (offset >= memregion->bar_size)
+ return -EINVAL;
+
+ /* Clip short the write request beyond reported BAR size */
+ count = min(count, memregion->bar_size - (size_t)offset);
+
+ /*
+ * Determine how many bytes to be actually written to the device memory.
+ * Do not write to the offset beyond available size.
+ */
+ if (offset >= memregion->memlength)
+ goto exitfn;
+
+ /*
+ * Only the device memory present on the hardware is mapped, which may
+ * not be power-of-2 aligned. Drop access outside the available device
+ * memory on the hardware.
+ */
+ mem_count = min(count, memregion->memlength - (size_t)offset);
+
+ ret = nvgrace_gpu_map_and_write(nvdev, buf, mem_count, ppos);
+ if (ret)
+ return ret;
+
+exitfn:
+ *ppos += count;
+ return count;
+}
+
+static ssize_t
+nvgrace_gpu_write(struct vfio_device *core_vdev,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ struct nvgrace_gpu_pci_core_device *nvdev =
+ container_of(core_vdev, struct nvgrace_gpu_pci_core_device,
+ core_device.vdev);
+ unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+
+ if (nvgrace_gpu_memregion(index, nvdev))
+ return nvgrace_gpu_write_mem(nvdev, count, ppos, buf);
+
+ if (index == VFIO_PCI_CONFIG_REGION_INDEX)
+ return nvgrace_gpu_write_config_emu(core_vdev, buf, count, ppos);
+
+ return vfio_pci_core_write(core_vdev, buf, count, ppos);
+}
+
+static const struct vfio_device_ops nvgrace_gpu_pci_ops = {
+ .name = "nvgrace-gpu-vfio-pci",
+ .init = vfio_pci_core_init_dev,
+ .release = vfio_pci_core_release_dev,
+ .open_device = nvgrace_gpu_open_device,
+ .close_device = nvgrace_gpu_close_device,
+ .ioctl = nvgrace_gpu_ioctl,
+ .device_feature = vfio_pci_core_ioctl_feature,
+ .read = nvgrace_gpu_read,
+ .write = nvgrace_gpu_write,
+ .mmap = nvgrace_gpu_mmap,
+ .request = vfio_pci_core_request,
+ .match = vfio_pci_core_match,
+ .bind_iommufd = vfio_iommufd_physical_bind,
+ .unbind_iommufd = vfio_iommufd_physical_unbind,
+ .attach_ioas = vfio_iommufd_physical_attach_ioas,
+ .detach_ioas = vfio_iommufd_physical_detach_ioas,
+};
+
+static const struct vfio_device_ops nvgrace_gpu_pci_core_ops = {
+ .name = "nvgrace-gpu-vfio-pci-core",
+ .init = vfio_pci_core_init_dev,
+ .release = vfio_pci_core_release_dev,
+ .open_device = nvgrace_gpu_open_device,
+ .close_device = vfio_pci_core_close_device,
+ .ioctl = vfio_pci_core_ioctl,
+ .device_feature = vfio_pci_core_ioctl_feature,
+ .read = vfio_pci_core_read,
+ .write = vfio_pci_core_write,
+ .mmap = vfio_pci_core_mmap,
+ .request = vfio_pci_core_request,
+ .match = vfio_pci_core_match,
+ .bind_iommufd = vfio_iommufd_physical_bind,
+ .unbind_iommufd = vfio_iommufd_physical_unbind,
+ .attach_ioas = vfio_iommufd_physical_attach_ioas,
+ .detach_ioas = vfio_iommufd_physical_detach_ioas,
+};
+
+static int
+nvgrace_gpu_fetch_memory_property(struct pci_dev *pdev,
+ u64 *pmemphys, u64 *pmemlength)
+{
+ int ret;
+
+ /*
+ * The memory information is present in the system ACPI tables as DSD
+ * properties nvidia,gpu-mem-base-pa and nvidia,gpu-mem-size.
+ */
+ ret = device_property_read_u64(&pdev->dev, "nvidia,gpu-mem-base-pa",
+ pmemphys);
+ if (ret)
+ return ret;
+
+ if (*pmemphys > type_max(phys_addr_t))
+ return -EOVERFLOW;
+
+ ret = device_property_read_u64(&pdev->dev, "nvidia,gpu-mem-size",
+ pmemlength);
+ if (ret)
+ return ret;
+
+ if (*pmemlength > type_max(size_t))
+ return -EOVERFLOW;
+
+ /*
+ * If the C2C link is not up due to an error, the coherent device
+ * memory size is returned as 0. Fail in such case.
+ */
+ if (*pmemlength == 0)
+ return -ENOMEM;
+
+ return ret;
+}
+
+static int
+nvgrace_gpu_init_nvdev_struct(struct pci_dev *pdev,
+ struct nvgrace_gpu_pci_core_device *nvdev,
+ u64 memphys, u64 memlength)
+{
+ int ret = 0;
+
+ /*
+ * The VM GPU device driver needs a non-cacheable region to support
+ * the MIG feature. Since the device memory is mapped as NORMAL cached,
+ * carve out a region from the end with a different NORMAL_NC
+ * property (called as reserved memory and represented as resmem). This
+ * region then is exposed as a 64b BAR (region 2 and 3) to the VM, while
+ * exposing the rest (termed as usable memory and represented using usemem)
+ * as cacheable 64b BAR (region 4 and 5).
+ *
+ * devmem (memlength)
+ * |-------------------------------------------------|
+ * | |
+ * usemem.memphys resmem.memphys
+ */
+ nvdev->usemem.memphys = memphys;
+
+ /*
+ * The device memory exposed to the VM is added to the kernel by the
+ * VM driver module in chunks of memory block size. Only the usable
+ * memory (usemem) is added to the kernel for usage by the VM
+ * workloads. Make the usable memory size memblock aligned.
+ */
+ if (check_sub_overflow(memlength, RESMEM_SIZE,
+ &nvdev->usemem.memlength)) {
+ ret = -EOVERFLOW;
+ goto done;
+ }
+
+ /*
+ * The USEMEM part of the device memory has to be MEMBLK_SIZE
+ * aligned. This is a hardwired ABI value between the GPU FW and
+ * VFIO driver. The VM device driver is also aware of it and make
+ * use of the value for its calculation to determine USEMEM size.
+ */
+ nvdev->usemem.memlength = round_down(nvdev->usemem.memlength,
+ MEMBLK_SIZE);
+ if (nvdev->usemem.memlength == 0) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if ((check_add_overflow(nvdev->usemem.memphys,
+ nvdev->usemem.memlength,
+ &nvdev->resmem.memphys)) ||
+ (check_sub_overflow(memlength, nvdev->usemem.memlength,
+ &nvdev->resmem.memlength))) {
+ ret = -EOVERFLOW;
+ goto done;
+ }
+
+ /*
+ * The memory regions are exposed as BARs. Calculate and save
+ * the BAR size for them.
+ */
+ nvdev->usemem.bar_size = roundup_pow_of_two(nvdev->usemem.memlength);
+ nvdev->resmem.bar_size = roundup_pow_of_two(nvdev->resmem.memlength);
+done:
+ return ret;
+}
+
+static int nvgrace_gpu_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ const struct vfio_device_ops *ops = &nvgrace_gpu_pci_core_ops;
+ struct nvgrace_gpu_pci_core_device *nvdev;
+ u64 memphys, memlength;
+ int ret;
+
+ ret = nvgrace_gpu_fetch_memory_property(pdev, &memphys, &memlength);
+ if (!ret)
+ ops = &nvgrace_gpu_pci_ops;
+
+ nvdev = vfio_alloc_device(nvgrace_gpu_pci_core_device, core_device.vdev,
+ &pdev->dev, ops);
+ if (IS_ERR(nvdev))
+ return PTR_ERR(nvdev);
+
+ dev_set_drvdata(&pdev->dev, &nvdev->core_device);
+
+ if (ops == &nvgrace_gpu_pci_ops) {
+ /*
+ * Device memory properties are identified in the host ACPI
+ * table. Set the nvgrace_gpu_pci_core_device structure.
+ */
+ ret = nvgrace_gpu_init_nvdev_struct(pdev, nvdev,
+ memphys, memlength);
+ if (ret)
+ goto out_put_vdev;
+ }
+
+ ret = vfio_pci_core_register_device(&nvdev->core_device);
+ if (ret)
+ goto out_put_vdev;
+
+ return ret;
+
+out_put_vdev:
+ vfio_put_device(&nvdev->core_device.vdev);
+ return ret;
+}
+
+static void nvgrace_gpu_remove(struct pci_dev *pdev)
+{
+ struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev);
+
+ vfio_pci_core_unregister_device(core_device);
+ vfio_put_device(&core_device->vdev);
+}
+
+static const struct pci_device_id nvgrace_gpu_vfio_pci_table[] = {
+ /* GH200 120GB */
+ { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2342) },
+ /* GH200 480GB */
+ { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2345) },
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, nvgrace_gpu_vfio_pci_table);
+
+static struct pci_driver nvgrace_gpu_vfio_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = nvgrace_gpu_vfio_pci_table,
+ .probe = nvgrace_gpu_probe,
+ .remove = nvgrace_gpu_remove,
+ .err_handler = &vfio_pci_core_err_handlers,
+ .driver_managed_dma = true,
+};
+
+module_pci_driver(nvgrace_gpu_vfio_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ankit Agrawal <ankita@nvidia.com>");
+MODULE_AUTHOR("Aniket Agashe <aniketa@nvidia.com>");
+MODULE_DESCRIPTION("VFIO NVGRACE GPU PF - User Level driver for NVIDIA devices with CPU coherently accessible device memory");
diff --git a/drivers/vfio/pci/pds/dirty.c b/drivers/vfio/pci/pds/dirty.c
index 8ddf4346fcd5d..68e8f006dfdbf 100644
--- a/drivers/vfio/pci/pds/dirty.c
+++ b/drivers/vfio/pci/pds/dirty.c
@@ -607,7 +607,7 @@ int pds_vfio_dma_logging_report(struct vfio_device *vdev, unsigned long iova,
mutex_lock(&pds_vfio->state_mutex);
err = pds_vfio_dirty_sync(pds_vfio, dirty, iova, length);
- pds_vfio_state_mutex_unlock(pds_vfio);
+ mutex_unlock(&pds_vfio->state_mutex);
return err;
}
@@ -624,7 +624,7 @@ int pds_vfio_dma_logging_start(struct vfio_device *vdev,
mutex_lock(&pds_vfio->state_mutex);
pds_vfio_send_host_vf_lm_status_cmd(pds_vfio, PDS_LM_STA_IN_PROGRESS);
err = pds_vfio_dirty_enable(pds_vfio, ranges, nnodes, page_size);
- pds_vfio_state_mutex_unlock(pds_vfio);
+ mutex_unlock(&pds_vfio->state_mutex);
return err;
}
@@ -637,7 +637,7 @@ int pds_vfio_dma_logging_stop(struct vfio_device *vdev)
mutex_lock(&pds_vfio->state_mutex);
pds_vfio_dirty_disable(pds_vfio, true);
- pds_vfio_state_mutex_unlock(pds_vfio);
+ mutex_unlock(&pds_vfio->state_mutex);
return 0;
}
diff --git a/drivers/vfio/pci/pds/lm.c b/drivers/vfio/pci/pds/lm.c
index 79fe2e66bb498..6b94cc0bf45b4 100644
--- a/drivers/vfio/pci/pds/lm.c
+++ b/drivers/vfio/pci/pds/lm.c
@@ -92,8 +92,10 @@ static void pds_vfio_put_lm_file(struct pds_vfio_lm_file *lm_file)
{
mutex_lock(&lm_file->lock);
+ lm_file->disabled = true;
lm_file->size = 0;
lm_file->alloc_size = 0;
+ lm_file->filep->f_pos = 0;
/* Free scatter list of file pages */
sg_free_table(&lm_file->sg_table);
@@ -183,6 +185,12 @@ static ssize_t pds_vfio_save_read(struct file *filp, char __user *buf,
pos = &filp->f_pos;
mutex_lock(&lm_file->lock);
+
+ if (lm_file->disabled) {
+ done = -ENODEV;
+ goto out_unlock;
+ }
+
if (*pos > lm_file->size) {
done = -EINVAL;
goto out_unlock;
@@ -283,6 +291,11 @@ static ssize_t pds_vfio_restore_write(struct file *filp, const char __user *buf,
mutex_lock(&lm_file->lock);
+ if (lm_file->disabled) {
+ done = -ENODEV;
+ goto out_unlock;
+ }
+
while (len) {
size_t page_offset;
struct page *page;
diff --git a/drivers/vfio/pci/pds/lm.h b/drivers/vfio/pci/pds/lm.h
index 13be893198b74..9511b1afc6a11 100644
--- a/drivers/vfio/pci/pds/lm.h
+++ b/drivers/vfio/pci/pds/lm.h
@@ -27,6 +27,7 @@ struct pds_vfio_lm_file {
struct scatterlist *last_offset_sg; /* Iterator */
unsigned int sg_last_entry;
unsigned long last_offset;
+ bool disabled;
};
struct pds_vfio_pci_device;
diff --git a/drivers/vfio/pci/pds/pci_drv.c b/drivers/vfio/pci/pds/pci_drv.c
index a34dda5166293..16e93b11ab1b0 100644
--- a/drivers/vfio/pci/pds/pci_drv.c
+++ b/drivers/vfio/pci/pds/pci_drv.c
@@ -21,16 +21,13 @@
static void pds_vfio_recovery(struct pds_vfio_pci_device *pds_vfio)
{
- bool deferred_reset_needed = false;
-
/*
* Documentation states that the kernel migration driver must not
* generate asynchronous device state transitions outside of
* manipulation by the user or the VFIO_DEVICE_RESET ioctl.
*
* Since recovery is an asynchronous event received from the device,
- * initiate a deferred reset. Issue a deferred reset in the following
- * situations:
+ * initiate a reset in the following situations:
* 1. Migration is in progress, which will cause the next step of
* the migration to fail.
* 2. If the device is in a state that will be set to
@@ -42,24 +39,8 @@ static void pds_vfio_recovery(struct pds_vfio_pci_device *pds_vfio)
pds_vfio->state != VFIO_DEVICE_STATE_ERROR) ||
(pds_vfio->state == VFIO_DEVICE_STATE_RUNNING &&
pds_vfio_dirty_is_enabled(pds_vfio)))
- deferred_reset_needed = true;
+ pds_vfio_reset(pds_vfio, VFIO_DEVICE_STATE_ERROR);
mutex_unlock(&pds_vfio->state_mutex);
-
- /*
- * On the next user initiated state transition, the device will
- * transition to the VFIO_DEVICE_STATE_ERROR. At this point it's the user's
- * responsibility to reset the device.
- *
- * If a VFIO_DEVICE_RESET is requested post recovery and before the next
- * state transition, then the deferred reset state will be set to
- * VFIO_DEVICE_STATE_RUNNING.
- */
- if (deferred_reset_needed) {
- mutex_lock(&pds_vfio->reset_mutex);
- pds_vfio->deferred_reset = true;
- pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_ERROR;
- mutex_unlock(&pds_vfio->reset_mutex);
- }
}
static int pds_vfio_pci_notify_handler(struct notifier_block *nb,
@@ -185,7 +166,9 @@ static void pds_vfio_pci_aer_reset_done(struct pci_dev *pdev)
{
struct pds_vfio_pci_device *pds_vfio = pds_vfio_pci_drvdata(pdev);
- pds_vfio_reset(pds_vfio);
+ mutex_lock(&pds_vfio->state_mutex);
+ pds_vfio_reset(pds_vfio, VFIO_DEVICE_STATE_RUNNING);
+ mutex_unlock(&pds_vfio->state_mutex);
}
static const struct pci_error_handlers pds_vfio_pci_err_handlers = {
diff --git a/drivers/vfio/pci/pds/vfio_dev.c b/drivers/vfio/pci/pds/vfio_dev.c
index 4c351c59d05a9..76a80ae7087b5 100644
--- a/drivers/vfio/pci/pds/vfio_dev.c
+++ b/drivers/vfio/pci/pds/vfio_dev.c
@@ -26,37 +26,14 @@ struct pds_vfio_pci_device *pds_vfio_pci_drvdata(struct pci_dev *pdev)
vfio_coredev);
}
-void pds_vfio_state_mutex_unlock(struct pds_vfio_pci_device *pds_vfio)
+void pds_vfio_reset(struct pds_vfio_pci_device *pds_vfio,
+ enum vfio_device_mig_state state)
{
-again:
- mutex_lock(&pds_vfio->reset_mutex);
- if (pds_vfio->deferred_reset) {
- pds_vfio->deferred_reset = false;
- if (pds_vfio->state == VFIO_DEVICE_STATE_ERROR) {
- pds_vfio_put_restore_file(pds_vfio);
- pds_vfio_put_save_file(pds_vfio);
- pds_vfio_dirty_disable(pds_vfio, false);
- }
- pds_vfio->state = pds_vfio->deferred_reset_state;
- pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING;
- mutex_unlock(&pds_vfio->reset_mutex);
- goto again;
- }
- mutex_unlock(&pds_vfio->state_mutex);
- mutex_unlock(&pds_vfio->reset_mutex);
-}
-
-void pds_vfio_reset(struct pds_vfio_pci_device *pds_vfio)
-{
- mutex_lock(&pds_vfio->reset_mutex);
- pds_vfio->deferred_reset = true;
- pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING;
- if (!mutex_trylock(&pds_vfio->state_mutex)) {
- mutex_unlock(&pds_vfio->reset_mutex);
- return;
- }
- mutex_unlock(&pds_vfio->reset_mutex);
- pds_vfio_state_mutex_unlock(pds_vfio);
+ pds_vfio_put_restore_file(pds_vfio);
+ pds_vfio_put_save_file(pds_vfio);
+ if (state == VFIO_DEVICE_STATE_ERROR)
+ pds_vfio_dirty_disable(pds_vfio, false);
+ pds_vfio->state = state;
}
static struct file *
@@ -97,8 +74,7 @@ pds_vfio_set_device_state(struct vfio_device *vdev,
break;
}
}
- pds_vfio_state_mutex_unlock(pds_vfio);
- /* still waiting on a deferred_reset */
+ mutex_unlock(&pds_vfio->state_mutex);
if (pds_vfio->state == VFIO_DEVICE_STATE_ERROR)
res = ERR_PTR(-EIO);
@@ -114,7 +90,7 @@ static int pds_vfio_get_device_state(struct vfio_device *vdev,
mutex_lock(&pds_vfio->state_mutex);
*current_state = pds_vfio->state;
- pds_vfio_state_mutex_unlock(pds_vfio);
+ mutex_unlock(&pds_vfio->state_mutex);
return 0;
}
@@ -156,7 +132,6 @@ static int pds_vfio_init_device(struct vfio_device *vdev)
pds_vfio->vf_id = vf_id;
mutex_init(&pds_vfio->state_mutex);
- mutex_init(&pds_vfio->reset_mutex);
vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P;
vdev->mig_ops = &pds_vfio_lm_ops;
@@ -178,7 +153,6 @@ static void pds_vfio_release_device(struct vfio_device *vdev)
vfio_coredev.vdev);
mutex_destroy(&pds_vfio->state_mutex);
- mutex_destroy(&pds_vfio->reset_mutex);
vfio_pci_core_release_dev(vdev);
}
@@ -194,7 +168,6 @@ static int pds_vfio_open_device(struct vfio_device *vdev)
return err;
pds_vfio->state = VFIO_DEVICE_STATE_RUNNING;
- pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING;
vfio_pci_core_finish_enable(&pds_vfio->vfio_coredev);
diff --git a/drivers/vfio/pci/pds/vfio_dev.h b/drivers/vfio/pci/pds/vfio_dev.h
index e7b01080a1ec3..803d99d69c738 100644
--- a/drivers/vfio/pci/pds/vfio_dev.h
+++ b/drivers/vfio/pci/pds/vfio_dev.h
@@ -18,20 +18,16 @@ struct pds_vfio_pci_device {
struct pds_vfio_dirty dirty;
struct mutex state_mutex; /* protect migration state */
enum vfio_device_mig_state state;
- struct mutex reset_mutex; /* protect reset_done flow */
- u8 deferred_reset;
- enum vfio_device_mig_state deferred_reset_state;
struct notifier_block nb;
int vf_id;
u16 client_id;
};
-void pds_vfio_state_mutex_unlock(struct pds_vfio_pci_device *pds_vfio);
-
const struct vfio_device_ops *pds_vfio_ops_info(void);
struct pds_vfio_pci_device *pds_vfio_pci_drvdata(struct pci_dev *pdev);
-void pds_vfio_reset(struct pds_vfio_pci_device *pds_vfio);
+void pds_vfio_reset(struct pds_vfio_pci_device *pds_vfio,
+ enum vfio_device_mig_state state);
struct pci_dev *pds_vfio_to_pci_dev(struct pds_vfio_pci_device *pds_vfio);
struct device *pds_vfio_to_dev(struct pds_vfio_pci_device *pds_vfio);
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 7e2e62ab0869c..97422aafaa7b5 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -1966,3 +1966,45 @@ ssize_t vfio_pci_config_rw(struct vfio_pci_core_device *vdev, char __user *buf,
return done;
}
+
+/**
+ * vfio_pci_core_range_intersect_range() - Determine overlap between a buffer
+ * and register offset ranges.
+ * @buf_start: start offset of the buffer
+ * @buf_cnt: number of buffer bytes
+ * @reg_start: start register offset
+ * @reg_cnt: number of register bytes
+ * @buf_offset: start offset of overlap in the buffer
+ * @intersect_count: number of overlapping bytes
+ * @register_offset: start offset of overlap in register
+ *
+ * Returns: true if there is overlap, false if not.
+ * The overlap start and size is returned through function args.
+ */
+bool vfio_pci_core_range_intersect_range(loff_t buf_start, size_t buf_cnt,
+ loff_t reg_start, size_t reg_cnt,
+ loff_t *buf_offset,
+ size_t *intersect_count,
+ size_t *register_offset)
+{
+ if (buf_start <= reg_start &&
+ buf_start + buf_cnt > reg_start) {
+ *buf_offset = reg_start - buf_start;
+ *intersect_count = min_t(size_t, reg_cnt,
+ buf_start + buf_cnt - reg_start);
+ *register_offset = 0;
+ return true;
+ }
+
+ if (buf_start > reg_start &&
+ buf_start < reg_start + reg_cnt) {
+ *buf_offset = 0;
+ *intersect_count = min_t(size_t, buf_cnt,
+ reg_start + reg_cnt - buf_start);
+ *register_offset = buf_start - reg_start;
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_range_intersect_range);
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index 1cbc990d42e07..d94d61b92c1ac 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -1862,8 +1862,25 @@ int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma
/*
* See remap_pfn_range(), called from vfio_pci_fault() but we can't
* change vm_flags within the fault handler. Set them now.
+ *
+ * VM_ALLOW_ANY_UNCACHED: The VMA flag is implemented for ARM64,
+ * allowing KVM stage 2 device mapping attributes to use Normal-NC
+ * rather than DEVICE_nGnRE, which allows guest mappings
+ * supporting write-combining attributes (WC). ARM does not
+ * architecturally guarantee this is safe, and indeed some MMIO
+ * regions like the GICv2 VCPU interface can trigger uncontained
+ * faults if Normal-NC is used.
+ *
+ * To safely use VFIO in KVM the platform must guarantee full
+ * safety in the guest where no action taken against a MMIO
+ * mapping can trigger an uncontained failure. The assumption is
+ * that most VFIO PCI platforms support this for both mapping types,
+ * at least in common flows, based on some expectations of how
+ * PCI IP is integrated. Hence VM_ALLOW_ANY_UNCACHED is set in
+ * the VMA flags.
*/
- vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
+ vm_flags_set(vma, VM_ALLOW_ANY_UNCACHED | VM_IO | VM_PFNMAP |
+ VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &vfio_pci_mmap_ops;
return 0;
@@ -2047,6 +2064,7 @@ static int vfio_pci_bus_notifier(struct notifier_block *nb,
pci_name(pdev));
pdev->driver_override = kasprintf(GFP_KERNEL, "%s",
vdev->vdev.ops->name);
+ WARN_ON(!pdev->driver_override);
} else if (action == BUS_NOTIFY_BOUND_DRIVER &&
pdev->is_virtfn && physfn == vdev->pdev) {
struct pci_driver *drv = pci_dev_driver(pdev);
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 237beac838097..fb5392b749fff 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -90,22 +90,28 @@ static void vfio_send_intx_eventfd(void *opaque, void *unused)
if (likely(is_intx(vdev) && !vdev->virq_disabled)) {
struct vfio_pci_irq_ctx *ctx;
+ struct eventfd_ctx *trigger;
ctx = vfio_irq_ctx_get(vdev, 0);
if (WARN_ON_ONCE(!ctx))
return;
- eventfd_signal(ctx->trigger);
+
+ trigger = READ_ONCE(ctx->trigger);
+ if (likely(trigger))
+ eventfd_signal(trigger);
}
}
/* Returns true if the INTx vfio_pci_irq_ctx.masked value is changed. */
-bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
+static bool __vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
struct vfio_pci_irq_ctx *ctx;
unsigned long flags;
bool masked_changed = false;
+ lockdep_assert_held(&vdev->igate);
+
spin_lock_irqsave(&vdev->irqlock, flags);
/*
@@ -143,6 +149,17 @@ out_unlock:
return masked_changed;
}
+bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
+{
+ bool mask_changed;
+
+ mutex_lock(&vdev->igate);
+ mask_changed = __vfio_pci_intx_mask(vdev);
+ mutex_unlock(&vdev->igate);
+
+ return mask_changed;
+}
+
/*
* If this is triggered by an eventfd, we can't call eventfd_signal
* or else we'll deadlock on the eventfd wait queue. Return >0 when
@@ -194,12 +211,21 @@ out_unlock:
return ret;
}
-void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
+static void __vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
{
+ lockdep_assert_held(&vdev->igate);
+
if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
vfio_send_intx_eventfd(vdev, NULL);
}
+void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
+{
+ mutex_lock(&vdev->igate);
+ __vfio_pci_intx_unmask(vdev);
+ mutex_unlock(&vdev->igate);
+}
+
static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
{
struct vfio_pci_core_device *vdev = dev_id;
@@ -231,97 +257,100 @@ static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
return ret;
}
-static int vfio_intx_enable(struct vfio_pci_core_device *vdev)
+static int vfio_intx_enable(struct vfio_pci_core_device *vdev,
+ struct eventfd_ctx *trigger)
{
+ struct pci_dev *pdev = vdev->pdev;
struct vfio_pci_irq_ctx *ctx;
+ unsigned long irqflags;
+ char *name;
+ int ret;
if (!is_irq_none(vdev))
return -EINVAL;
- if (!vdev->pdev->irq)
+ if (!pdev->irq)
return -ENODEV;
+ name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)", pci_name(pdev));
+ if (!name)
+ return -ENOMEM;
+
ctx = vfio_irq_ctx_alloc(vdev, 0);
if (!ctx)
return -ENOMEM;
+ ctx->name = name;
+ ctx->trigger = trigger;
+
/*
- * If the virtual interrupt is masked, restore it. Devices
- * supporting DisINTx can be masked at the hardware level
- * here, non-PCI-2.3 devices will have to wait until the
- * interrupt is enabled.
+ * Fill the initial masked state based on virq_disabled. After
+ * enable, changing the DisINTx bit in vconfig directly changes INTx
+ * masking. igate prevents races during setup, once running masked
+ * is protected via irqlock.
+ *
+ * Devices supporting DisINTx also reflect the current mask state in
+ * the physical DisINTx bit, which is not affected during IRQ setup.
+ *
+ * Devices without DisINTx support require an exclusive interrupt.
+ * IRQ masking is performed at the IRQ chip. Again, igate protects
+ * against races during setup and IRQ handlers and irqfds are not
+ * yet active, therefore masked is stable and can be used to
+ * conditionally auto-enable the IRQ.
+ *
+ * irq_type must be stable while the IRQ handler is registered,
+ * therefore it must be set before request_irq().
*/
ctx->masked = vdev->virq_disabled;
- if (vdev->pci_2_3)
- pci_intx(vdev->pdev, !ctx->masked);
+ if (vdev->pci_2_3) {
+ pci_intx(pdev, !ctx->masked);
+ irqflags = IRQF_SHARED;
+ } else {
+ irqflags = ctx->masked ? IRQF_NO_AUTOEN : 0;
+ }
vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
+ ret = request_irq(pdev->irq, vfio_intx_handler,
+ irqflags, ctx->name, vdev);
+ if (ret) {
+ vdev->irq_type = VFIO_PCI_NUM_IRQS;
+ kfree(name);
+ vfio_irq_ctx_free(vdev, ctx, 0);
+ return ret;
+ }
+
return 0;
}
-static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd)
+static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev,
+ struct eventfd_ctx *trigger)
{
struct pci_dev *pdev = vdev->pdev;
- unsigned long irqflags = IRQF_SHARED;
struct vfio_pci_irq_ctx *ctx;
- struct eventfd_ctx *trigger;
- unsigned long flags;
- int ret;
+ struct eventfd_ctx *old;
ctx = vfio_irq_ctx_get(vdev, 0);
if (WARN_ON_ONCE(!ctx))
return -EINVAL;
- if (ctx->trigger) {
- free_irq(pdev->irq, vdev);
- kfree(ctx->name);
- eventfd_ctx_put(ctx->trigger);
- ctx->trigger = NULL;
- }
-
- if (fd < 0) /* Disable only */
- return 0;
+ old = ctx->trigger;
- ctx->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)",
- pci_name(pdev));
- if (!ctx->name)
- return -ENOMEM;
+ WRITE_ONCE(ctx->trigger, trigger);
- trigger = eventfd_ctx_fdget(fd);
- if (IS_ERR(trigger)) {
- kfree(ctx->name);
- return PTR_ERR(trigger);
+ /* Releasing an old ctx requires synchronizing in-flight users */
+ if (old) {
+ synchronize_irq(pdev->irq);
+ vfio_virqfd_flush_thread(&ctx->unmask);
+ eventfd_ctx_put(old);
}
- ctx->trigger = trigger;
-
- if (!vdev->pci_2_3)
- irqflags = 0;
-
- ret = request_irq(pdev->irq, vfio_intx_handler,
- irqflags, ctx->name, vdev);
- if (ret) {
- ctx->trigger = NULL;
- kfree(ctx->name);
- eventfd_ctx_put(trigger);
- return ret;
- }
-
- /*
- * INTx disable will stick across the new irq setup,
- * disable_irq won't.
- */
- spin_lock_irqsave(&vdev->irqlock, flags);
- if (!vdev->pci_2_3 && ctx->masked)
- disable_irq_nosync(pdev->irq);
- spin_unlock_irqrestore(&vdev->irqlock, flags);
-
return 0;
}
static void vfio_intx_disable(struct vfio_pci_core_device *vdev)
{
+ struct pci_dev *pdev = vdev->pdev;
struct vfio_pci_irq_ctx *ctx;
ctx = vfio_irq_ctx_get(vdev, 0);
@@ -329,10 +358,13 @@ static void vfio_intx_disable(struct vfio_pci_core_device *vdev)
if (ctx) {
vfio_virqfd_disable(&ctx->unmask);
vfio_virqfd_disable(&ctx->mask);
+ free_irq(pdev->irq, vdev);
+ if (ctx->trigger)
+ eventfd_ctx_put(ctx->trigger);
+ kfree(ctx->name);
+ vfio_irq_ctx_free(vdev, ctx, 0);
}
- vfio_intx_set_signal(vdev, -1);
vdev->irq_type = VFIO_PCI_NUM_IRQS;
- vfio_irq_ctx_free(vdev, ctx, 0);
}
/*
@@ -560,11 +592,11 @@ static int vfio_pci_set_intx_unmask(struct vfio_pci_core_device *vdev,
return -EINVAL;
if (flags & VFIO_IRQ_SET_DATA_NONE) {
- vfio_pci_intx_unmask(vdev);
+ __vfio_pci_intx_unmask(vdev);
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
uint8_t unmask = *(uint8_t *)data;
if (unmask)
- vfio_pci_intx_unmask(vdev);
+ __vfio_pci_intx_unmask(vdev);
} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
struct vfio_pci_irq_ctx *ctx = vfio_irq_ctx_get(vdev, 0);
int32_t fd = *(int32_t *)data;
@@ -591,11 +623,11 @@ static int vfio_pci_set_intx_mask(struct vfio_pci_core_device *vdev,
return -EINVAL;
if (flags & VFIO_IRQ_SET_DATA_NONE) {
- vfio_pci_intx_mask(vdev);
+ __vfio_pci_intx_mask(vdev);
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
uint8_t mask = *(uint8_t *)data;
if (mask)
- vfio_pci_intx_mask(vdev);
+ __vfio_pci_intx_mask(vdev);
} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
return -ENOTTY; /* XXX implement me */
}
@@ -616,19 +648,23 @@ static int vfio_pci_set_intx_trigger(struct vfio_pci_core_device *vdev,
return -EINVAL;
if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ struct eventfd_ctx *trigger = NULL;
int32_t fd = *(int32_t *)data;
int ret;
- if (is_intx(vdev))
- return vfio_intx_set_signal(vdev, fd);
+ if (fd >= 0) {
+ trigger = eventfd_ctx_fdget(fd);
+ if (IS_ERR(trigger))
+ return PTR_ERR(trigger);
+ }
- ret = vfio_intx_enable(vdev);
- if (ret)
- return ret;
+ if (is_intx(vdev))
+ ret = vfio_intx_set_signal(vdev, trigger);
+ else
+ ret = vfio_intx_enable(vdev, trigger);
- ret = vfio_intx_set_signal(vdev, fd);
- if (ret)
- vfio_intx_disable(vdev);
+ if (ret && trigger)
+ eventfd_ctx_put(trigger);
return ret;
}
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index 07fea08ea8a21..03b8f7ada1ac2 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -96,10 +96,10 @@ VFIO_IOREAD(32)
* reads with -1. This is intended for handling MSI-X vector tables and
* leftover space for ROM BARs.
*/
-static ssize_t do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
- void __iomem *io, char __user *buf,
- loff_t off, size_t count, size_t x_start,
- size_t x_end, bool iswrite)
+ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
+ void __iomem *io, char __user *buf,
+ loff_t off, size_t count, size_t x_start,
+ size_t x_end, bool iswrite)
{
ssize_t done = 0;
int ret;
@@ -201,6 +201,7 @@ static ssize_t do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
return done;
}
+EXPORT_SYMBOL_GPL(vfio_pci_core_do_io_rw);
int vfio_pci_core_setup_barmap(struct vfio_pci_core_device *vdev, int bar)
{
@@ -279,8 +280,8 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf,
x_end = vdev->msix_offset + vdev->msix_size;
}
- done = do_io_rw(vdev, res->flags & IORESOURCE_MEM, io, buf, pos,
- count, x_start, x_end, iswrite);
+ done = vfio_pci_core_do_io_rw(vdev, res->flags & IORESOURCE_MEM, io, buf, pos,
+ count, x_start, x_end, iswrite);
if (done >= 0)
*ppos += done;
@@ -348,7 +349,8 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf,
* probing, so we don't currently worry about access in relation
* to the memory enable bit in the command register.
*/
- done = do_io_rw(vdev, false, iomem, buf, off, count, 0, 0, iswrite);
+ done = vfio_pci_core_do_io_rw(vdev, false, iomem, buf, off, count,
+ 0, 0, iswrite);
vga_put(vdev->pdev, rsrc);
diff --git a/drivers/vfio/pci/virtio/main.c b/drivers/vfio/pci/virtio/main.c
index d5af683837d34..b5d3a8c5bbc9a 100644
--- a/drivers/vfio/pci/virtio/main.c
+++ b/drivers/vfio/pci/virtio/main.c
@@ -132,33 +132,6 @@ end:
return ret ? ret : count;
}
-static bool range_intersect_range(loff_t range1_start, size_t count1,
- loff_t range2_start, size_t count2,
- loff_t *start_offset,
- size_t *intersect_count,
- size_t *register_offset)
-{
- if (range1_start <= range2_start &&
- range1_start + count1 > range2_start) {
- *start_offset = range2_start - range1_start;
- *intersect_count = min_t(size_t, count2,
- range1_start + count1 - range2_start);
- *register_offset = 0;
- return true;
- }
-
- if (range1_start > range2_start &&
- range1_start < range2_start + count2) {
- *start_offset = 0;
- *intersect_count = min_t(size_t, count1,
- range2_start + count2 - range1_start);
- *register_offset = range1_start - range2_start;
- return true;
- }
-
- return false;
-}
-
static ssize_t virtiovf_pci_read_config(struct vfio_device *core_vdev,
char __user *buf, size_t count,
loff_t *ppos)
@@ -178,16 +151,18 @@ static ssize_t virtiovf_pci_read_config(struct vfio_device *core_vdev,
if (ret < 0)
return ret;
- if (range_intersect_range(pos, count, PCI_DEVICE_ID, sizeof(val16),
- &copy_offset, &copy_count, &register_offset)) {
+ if (vfio_pci_core_range_intersect_range(pos, count, PCI_DEVICE_ID,
+ sizeof(val16), &copy_offset,
+ &copy_count, &register_offset)) {
val16 = cpu_to_le16(VIRTIO_TRANS_ID_NET);
if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset, copy_count))
return -EFAULT;
}
if ((le16_to_cpu(virtvdev->pci_cmd) & PCI_COMMAND_IO) &&
- range_intersect_range(pos, count, PCI_COMMAND, sizeof(val16),
- &copy_offset, &copy_count, &register_offset)) {
+ vfio_pci_core_range_intersect_range(pos, count, PCI_COMMAND,
+ sizeof(val16), &copy_offset,
+ &copy_count, &register_offset)) {
if (copy_from_user((void *)&val16 + register_offset, buf + copy_offset,
copy_count))
return -EFAULT;
@@ -197,16 +172,18 @@ static ssize_t virtiovf_pci_read_config(struct vfio_device *core_vdev,
return -EFAULT;
}
- if (range_intersect_range(pos, count, PCI_REVISION_ID, sizeof(val8),
- &copy_offset, &copy_count, &register_offset)) {
+ if (vfio_pci_core_range_intersect_range(pos, count, PCI_REVISION_ID,
+ sizeof(val8), &copy_offset,
+ &copy_count, &register_offset)) {
/* Transional needs to have revision 0 */
val8 = 0;
if (copy_to_user(buf + copy_offset, &val8, copy_count))
return -EFAULT;
}
- if (range_intersect_range(pos, count, PCI_BASE_ADDRESS_0, sizeof(val32),
- &copy_offset, &copy_count, &register_offset)) {
+ if (vfio_pci_core_range_intersect_range(pos, count, PCI_BASE_ADDRESS_0,
+ sizeof(val32), &copy_offset,
+ &copy_count, &register_offset)) {
u32 bar_mask = ~(virtvdev->bar0_virtual_buf_size - 1);
u32 pci_base_addr_0 = le32_to_cpu(virtvdev->pci_base_addr_0);
@@ -215,8 +192,9 @@ static ssize_t virtiovf_pci_read_config(struct vfio_device *core_vdev,
return -EFAULT;
}
- if (range_intersect_range(pos, count, PCI_SUBSYSTEM_ID, sizeof(val16),
- &copy_offset, &copy_count, &register_offset)) {
+ if (vfio_pci_core_range_intersect_range(pos, count, PCI_SUBSYSTEM_ID,
+ sizeof(val16), &copy_offset,
+ &copy_count, &register_offset)) {
/*
* Transitional devices use the PCI subsystem device id as
* virtio device id, same as legacy driver always did.
@@ -227,8 +205,9 @@ static ssize_t virtiovf_pci_read_config(struct vfio_device *core_vdev,
return -EFAULT;
}
- if (range_intersect_range(pos, count, PCI_SUBSYSTEM_VENDOR_ID, sizeof(val16),
- &copy_offset, &copy_count, &register_offset)) {
+ if (vfio_pci_core_range_intersect_range(pos, count, PCI_SUBSYSTEM_VENDOR_ID,
+ sizeof(val16), &copy_offset,
+ &copy_count, &register_offset)) {
val16 = cpu_to_le16(PCI_VENDOR_ID_REDHAT_QUMRANET);
if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset,
copy_count))
@@ -270,19 +249,20 @@ static ssize_t virtiovf_pci_write_config(struct vfio_device *core_vdev,
loff_t copy_offset;
size_t copy_count;
- if (range_intersect_range(pos, count, PCI_COMMAND, sizeof(virtvdev->pci_cmd),
- &copy_offset, &copy_count,
- &register_offset)) {
+ if (vfio_pci_core_range_intersect_range(pos, count, PCI_COMMAND,
+ sizeof(virtvdev->pci_cmd),
+ &copy_offset, &copy_count,
+ &register_offset)) {
if (copy_from_user((void *)&virtvdev->pci_cmd + register_offset,
buf + copy_offset,
copy_count))
return -EFAULT;
}
- if (range_intersect_range(pos, count, PCI_BASE_ADDRESS_0,
- sizeof(virtvdev->pci_base_addr_0),
- &copy_offset, &copy_count,
- &register_offset)) {
+ if (vfio_pci_core_range_intersect_range(pos, count, PCI_BASE_ADDRESS_0,
+ sizeof(virtvdev->pci_base_addr_0),
+ &copy_offset, &copy_count,
+ &register_offset)) {
if (copy_from_user((void *)&virtvdev->pci_base_addr_0 + register_offset,
buf + copy_offset,
copy_count))
diff --git a/drivers/vfio/platform/vfio_amba.c b/drivers/vfio/platform/vfio_amba.c
index 6464b3939ebcf..485c6f9161a91 100644
--- a/drivers/vfio/platform/vfio_amba.c
+++ b/drivers/vfio/platform/vfio_amba.c
@@ -122,16 +122,16 @@ static const struct vfio_device_ops vfio_amba_ops = {
.detach_ioas = vfio_iommufd_physical_detach_ioas,
};
-static const struct amba_id pl330_ids[] = {
+static const struct amba_id vfio_amba_ids[] = {
{ 0, 0 },
};
-MODULE_DEVICE_TABLE(amba, pl330_ids);
+MODULE_DEVICE_TABLE(amba, vfio_amba_ids);
static struct amba_driver vfio_amba_driver = {
.probe = vfio_amba_probe,
.remove = vfio_amba_remove,
- .id_table = pl330_ids,
+ .id_table = vfio_amba_ids,
.drv = {
.name = "vfio-amba",
.owner = THIS_MODULE,
diff --git a/drivers/vfio/platform/vfio_platform.c b/drivers/vfio/platform/vfio_platform.c
index 8cf22fa65baa2..42d1462c5e19d 100644
--- a/drivers/vfio/platform/vfio_platform.c
+++ b/drivers/vfio/platform/vfio_platform.c
@@ -85,14 +85,13 @@ static void vfio_platform_release_dev(struct vfio_device *core_vdev)
vfio_platform_release_common(vdev);
}
-static int vfio_platform_remove(struct platform_device *pdev)
+static void vfio_platform_remove(struct platform_device *pdev)
{
struct vfio_platform_device *vdev = dev_get_drvdata(&pdev->dev);
vfio_unregister_group_dev(&vdev->vdev);
pm_runtime_disable(vdev->device);
vfio_put_device(&vdev->vdev);
- return 0;
}
static const struct vfio_device_ops vfio_platform_ops = {
@@ -113,7 +112,7 @@ static const struct vfio_device_ops vfio_platform_ops = {
static struct platform_driver vfio_platform_driver = {
.probe = vfio_platform_probe,
- .remove = vfio_platform_remove,
+ .remove_new = vfio_platform_remove,
.driver = {
.name = "vfio-platform",
},
diff --git a/drivers/vfio/platform/vfio_platform_irq.c b/drivers/vfio/platform/vfio_platform_irq.c
index 61a1bfb68ac78..ef41ecef83af1 100644
--- a/drivers/vfio/platform/vfio_platform_irq.c
+++ b/drivers/vfio/platform/vfio_platform_irq.c
@@ -136,6 +136,16 @@ static int vfio_platform_set_irq_unmask(struct vfio_platform_device *vdev,
return 0;
}
+/*
+ * The trigger eventfd is guaranteed valid in the interrupt path
+ * and protected by the igate mutex when triggered via ioctl.
+ */
+static void vfio_send_eventfd(struct vfio_platform_irq *irq_ctx)
+{
+ if (likely(irq_ctx->trigger))
+ eventfd_signal(irq_ctx->trigger);
+}
+
static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id)
{
struct vfio_platform_irq *irq_ctx = dev_id;
@@ -155,7 +165,7 @@ static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id)
spin_unlock_irqrestore(&irq_ctx->lock, flags);
if (ret == IRQ_HANDLED)
- eventfd_signal(irq_ctx->trigger);
+ vfio_send_eventfd(irq_ctx);
return ret;
}
@@ -164,52 +174,40 @@ static irqreturn_t vfio_irq_handler(int irq, void *dev_id)
{
struct vfio_platform_irq *irq_ctx = dev_id;
- eventfd_signal(irq_ctx->trigger);
+ vfio_send_eventfd(irq_ctx);
return IRQ_HANDLED;
}
static int vfio_set_trigger(struct vfio_platform_device *vdev, int index,
- int fd, irq_handler_t handler)
+ int fd)
{
struct vfio_platform_irq *irq = &vdev->irqs[index];
struct eventfd_ctx *trigger;
- int ret;
if (irq->trigger) {
- irq_clear_status_flags(irq->hwirq, IRQ_NOAUTOEN);
- free_irq(irq->hwirq, irq);
- kfree(irq->name);
+ disable_irq(irq->hwirq);
eventfd_ctx_put(irq->trigger);
irq->trigger = NULL;
}
if (fd < 0) /* Disable only */
return 0;
- irq->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-irq[%d](%s)",
- irq->hwirq, vdev->name);
- if (!irq->name)
- return -ENOMEM;
trigger = eventfd_ctx_fdget(fd);
- if (IS_ERR(trigger)) {
- kfree(irq->name);
+ if (IS_ERR(trigger))
return PTR_ERR(trigger);
- }
irq->trigger = trigger;
- irq_set_status_flags(irq->hwirq, IRQ_NOAUTOEN);
- ret = request_irq(irq->hwirq, handler, 0, irq->name, irq);
- if (ret) {
- kfree(irq->name);
- eventfd_ctx_put(trigger);
- irq->trigger = NULL;
- return ret;
- }
-
- if (!irq->masked)
- enable_irq(irq->hwirq);
+ /*
+ * irq->masked effectively provides nested disables within the overall
+ * enable relative to trigger. Specifically request_irq() is called
+ * with NO_AUTOEN, therefore the IRQ is initially disabled. The user
+ * may only further disable the IRQ with a MASK operations because
+ * irq->masked is initially false.
+ */
+ enable_irq(irq->hwirq);
return 0;
}
@@ -228,7 +226,7 @@ static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev,
handler = vfio_irq_handler;
if (!count && (flags & VFIO_IRQ_SET_DATA_NONE))
- return vfio_set_trigger(vdev, index, -1, handler);
+ return vfio_set_trigger(vdev, index, -1);
if (start != 0 || count != 1)
return -EINVAL;
@@ -236,7 +234,7 @@ static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev,
if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
int32_t fd = *(int32_t *)data;
- return vfio_set_trigger(vdev, index, fd, handler);
+ return vfio_set_trigger(vdev, index, fd);
}
if (flags & VFIO_IRQ_SET_DATA_NONE) {
@@ -260,6 +258,14 @@ int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev,
unsigned start, unsigned count, uint32_t flags,
void *data) = NULL;
+ /*
+ * For compatibility, errors from request_irq() are local to the
+ * SET_IRQS path and reflected in the name pointer. This allows,
+ * for example, polling mode fallback for an exclusive IRQ failure.
+ */
+ if (IS_ERR(vdev->irqs[index].name))
+ return PTR_ERR(vdev->irqs[index].name);
+
switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
case VFIO_IRQ_SET_ACTION_MASK:
func = vfio_platform_set_irq_mask;
@@ -280,7 +286,7 @@ int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev,
int vfio_platform_irq_init(struct vfio_platform_device *vdev)
{
- int cnt = 0, i;
+ int cnt = 0, i, ret = 0;
while (vdev->get_irq(vdev, cnt) >= 0)
cnt++;
@@ -292,37 +298,70 @@ int vfio_platform_irq_init(struct vfio_platform_device *vdev)
for (i = 0; i < cnt; i++) {
int hwirq = vdev->get_irq(vdev, i);
+ irq_handler_t handler = vfio_irq_handler;
- if (hwirq < 0)
+ if (hwirq < 0) {
+ ret = -EINVAL;
goto err;
+ }
spin_lock_init(&vdev->irqs[i].lock);
vdev->irqs[i].flags = VFIO_IRQ_INFO_EVENTFD;
- if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK)
+ if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK) {
vdev->irqs[i].flags |= VFIO_IRQ_INFO_MASKABLE
| VFIO_IRQ_INFO_AUTOMASKED;
+ handler = vfio_automasked_irq_handler;
+ }
vdev->irqs[i].count = 1;
vdev->irqs[i].hwirq = hwirq;
vdev->irqs[i].masked = false;
+ vdev->irqs[i].name = kasprintf(GFP_KERNEL_ACCOUNT,
+ "vfio-irq[%d](%s)", hwirq,
+ vdev->name);
+ if (!vdev->irqs[i].name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = request_irq(hwirq, handler, IRQF_NO_AUTOEN,
+ vdev->irqs[i].name, &vdev->irqs[i]);
+ if (ret) {
+ kfree(vdev->irqs[i].name);
+ vdev->irqs[i].name = ERR_PTR(ret);
+ }
}
vdev->num_irqs = cnt;
return 0;
err:
+ for (--i; i >= 0; i--) {
+ if (!IS_ERR(vdev->irqs[i].name)) {
+ free_irq(vdev->irqs[i].hwirq, &vdev->irqs[i]);
+ kfree(vdev->irqs[i].name);
+ }
+ }
kfree(vdev->irqs);
- return -EINVAL;
+ return ret;
}
void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev)
{
int i;
- for (i = 0; i < vdev->num_irqs; i++)
- vfio_set_trigger(vdev, i, -1, NULL);
+ for (i = 0; i < vdev->num_irqs; i++) {
+ vfio_virqfd_disable(&vdev->irqs[i].mask);
+ vfio_virqfd_disable(&vdev->irqs[i].unmask);
+ if (!IS_ERR(vdev->irqs[i].name)) {
+ free_irq(vdev->irqs[i].hwirq, &vdev->irqs[i]);
+ if (vdev->irqs[i].trigger)
+ eventfd_ctx_put(vdev->irqs[i].trigger);
+ kfree(vdev->irqs[i].name);
+ }
+ }
vdev->num_irqs = 0;
kfree(vdev->irqs);
diff --git a/drivers/vfio/vfio.h b/drivers/vfio/vfio.h
index bde84ad344e50..50128da18bcaf 100644
--- a/drivers/vfio/vfio.h
+++ b/drivers/vfio/vfio.h
@@ -434,7 +434,7 @@ static inline void vfio_virqfd_exit(void)
}
#endif
-#ifdef CONFIG_HAVE_KVM
+#if IS_ENABLED(CONFIG_KVM)
void vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm);
void vfio_device_put_kvm(struct vfio_device *device);
#else
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index b2854d7939ce0..b5c15fe8f9fcf 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -567,18 +567,6 @@ static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
ret = pin_user_pages_remote(mm, vaddr, npages, flags | FOLL_LONGTERM,
pages, NULL);
if (ret > 0) {
- int i;
-
- /*
- * The zero page is always resident, we don't need to pin it
- * and it falls into our invalid/reserved test so we don't
- * unpin in put_pfn(). Unpin all zero pages in the batch here.
- */
- for (i = 0 ; i < ret; i++) {
- if (unlikely(is_zero_pfn(page_to_pfn(pages[i]))))
- unpin_user_page(pages[i]);
- }
-
*pfn = page_to_pfn(pages[0]);
goto done;
}
diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c
index 1cc93aac99a29..e97d796a54fba 100644
--- a/drivers/vfio/vfio_main.c
+++ b/drivers/vfio/vfio_main.c
@@ -16,7 +16,7 @@
#include <linux/fs.h>
#include <linux/idr.h>
#include <linux/iommu.h>
-#ifdef CONFIG_HAVE_KVM
+#if IS_ENABLED(CONFIG_KVM)
#include <linux/kvm_host.h>
#endif
#include <linux/list.h>
@@ -385,7 +385,7 @@ void vfio_unregister_group_dev(struct vfio_device *device)
}
EXPORT_SYMBOL_GPL(vfio_unregister_group_dev);
-#ifdef CONFIG_HAVE_KVM
+#if IS_ENABLED(CONFIG_KVM)
void vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm)
{
void (*pfn)(struct kvm *kvm);
diff --git a/drivers/vfio/virqfd.c b/drivers/vfio/virqfd.c
index 29c564b7a6e13..5322691338019 100644
--- a/drivers/vfio/virqfd.c
+++ b/drivers/vfio/virqfd.c
@@ -101,6 +101,13 @@ static void virqfd_inject(struct work_struct *work)
virqfd->thread(virqfd->opaque, virqfd->data);
}
+static void virqfd_flush_inject(struct work_struct *work)
+{
+ struct virqfd *virqfd = container_of(work, struct virqfd, flush_inject);
+
+ flush_work(&virqfd->inject);
+}
+
int vfio_virqfd_enable(void *opaque,
int (*handler)(void *, void *),
void (*thread)(void *, void *),
@@ -124,6 +131,7 @@ int vfio_virqfd_enable(void *opaque,
INIT_WORK(&virqfd->shutdown, virqfd_shutdown);
INIT_WORK(&virqfd->inject, virqfd_inject);
+ INIT_WORK(&virqfd->flush_inject, virqfd_flush_inject);
irqfd = fdget(fd);
if (!irqfd.file) {
@@ -213,3 +221,16 @@ void vfio_virqfd_disable(struct virqfd **pvirqfd)
flush_workqueue(vfio_irqfd_cleanup_wq);
}
EXPORT_SYMBOL_GPL(vfio_virqfd_disable);
+
+void vfio_virqfd_flush_thread(struct virqfd **pvirqfd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&virqfd_lock, flags);
+ if (*pvirqfd && (*pvirqfd)->thread)
+ queue_work(vfio_irqfd_cleanup_wq, &(*pvirqfd)->flush_inject);
+ spin_unlock_irqrestore(&virqfd_lock, flags);
+
+ flush_workqueue(vfio_irqfd_cleanup_wq);
+}
+EXPORT_SYMBOL_GPL(vfio_virqfd_flush_thread);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 4b2fcb228a0a7..c64ded183f8dd 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -697,6 +697,9 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
hdr = buf;
gso = &hdr->gso;
+ if (!sock_hlen)
+ memset(buf, 0, pad);
+
if ((gso->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
vhost16_to_cpu(vq, gso->csum_start) +
vhost16_to_cpu(vq, gso->csum_offset) + 2 >
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index bc4a51e4638b4..ba52d128aeb76 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -595,6 +595,9 @@ static long vhost_vdpa_suspend(struct vhost_vdpa *v)
const struct vdpa_config_ops *ops = vdpa->config;
int ret;
+ if (!(ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK))
+ return 0;
+
if (!ops->suspend)
return -EOPNOTSUPP;
@@ -615,6 +618,9 @@ static long vhost_vdpa_resume(struct vhost_vdpa *v)
const struct vdpa_config_ops *ops = vdpa->config;
int ret;
+ if (!(ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK))
+ return 0;
+
if (!ops->resume)
return -EOPNOTSUPP;
@@ -681,6 +687,14 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
if (!ops->set_group_asid)
return -EOPNOTSUPP;
return ops->set_group_asid(vdpa, idx, s.num);
+ case VHOST_VDPA_GET_VRING_SIZE:
+ if (!ops->get_vq_size)
+ return -EOPNOTSUPP;
+ s.index = idx;
+ s.num = ops->get_vq_size(vdpa, idx);
+ if (copy_to_user(argp, &s, sizeof(s)))
+ return -EFAULT;
+ return 0;
case VHOST_GET_VRING_BASE:
r = ops->get_vq_state(v->vdpa, idx, &vq_state);
if (r)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 045f666b4f12a..8995730ce0bfc 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -2515,7 +2515,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
- vq_err(vq, "Guest moved used index from %u to %u",
+ vq_err(vq, "Guest moved avail index from %u to %u",
last_avail_idx, vq->avail_idx);
return -EFAULT;
}
@@ -2799,9 +2799,19 @@ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
r = vhost_get_avail_idx(vq, &avail_idx);
if (unlikely(r))
return false;
+
vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
+ if (vq->avail_idx != vq->last_avail_idx) {
+ /* Since we have updated avail_idx, the following
+ * call to vhost_get_vq_desc() will read available
+ * ring entries. Make sure that read happens after
+ * the avail_idx read.
+ */
+ smp_rmb();
+ return false;
+ }
- return vq->avail_idx == vq->last_avail_idx;
+ return true;
}
EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
@@ -2838,9 +2848,19 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
&vq->avail->idx, r);
return false;
}
+
vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
+ if (vq->avail_idx != vq->last_avail_idx) {
+ /* Since we have updated avail_idx, the following
+ * call to vhost_get_vq_desc() will read available
+ * ring entries. Make sure that read happens after
+ * the avail_idx read.
+ */
+ smp_rmb();
+ return true;
+ }
- return vq->avail_idx != vq->last_avail_idx;
+ return false;
}
EXPORT_SYMBOL_GPL(vhost_enable_notify);
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index ea2d0d69bd8cc..230bca07b09dc 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -183,6 +183,13 @@ config BACKLIGHT_KTD253
which is a 1-wire GPIO-controlled backlight found in some mobile
phones.
+config BACKLIGHT_KTD2801
+ tristate "Backlight Driver for Kinetic KTD2801"
+ select LEDS_EXPRESSWIRE
+ help
+ Say Y to enable the backlight driver for the Kinetic KTD2801 1-wire
+ GPIO-controlled backlight found in Samsung Galaxy Core Prime VE LTE.
+
config BACKLIGHT_KTZ8866
tristate "Backlight Driver for Kinetic KTZ8866"
depends on I2C
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 06966cb204597..8d2cb252042db 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o
obj-$(CONFIG_BACKLIGHT_HP700) += jornada720_bl.o
obj-$(CONFIG_BACKLIGHT_IPAQ_MICRO) += ipaq_micro_bl.o
obj-$(CONFIG_BACKLIGHT_KTD253) += ktd253-backlight.o
+obj-$(CONFIG_BACKLIGHT_KTD2801) += ktd2801-backlight.o
obj-$(CONFIG_BACKLIGHT_KTZ8866) += ktz8866.o
obj-$(CONFIG_BACKLIGHT_LM3533) += lm3533_bl.o
obj-$(CONFIG_BACKLIGHT_LM3630A) += lm3630a_bl.o
diff --git a/drivers/video/backlight/as3711_bl.c b/drivers/video/backlight/as3711_bl.c
index 28437c2da0f58..e6f66bb35ef59 100644
--- a/drivers/video/backlight/as3711_bl.c
+++ b/drivers/video/backlight/as3711_bl.c
@@ -383,10 +383,8 @@ static int as3711_backlight_probe(struct platform_device *pdev)
if (pdev->dev.parent->of_node) {
ret = as3711_backlight_parse_dt(&pdev->dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "DT parsing failed: %d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "DT parsing failed\n");
}
if (!pdata->su1_fb && !pdata->su2_fb) {
diff --git a/drivers/video/backlight/bd6107.c b/drivers/video/backlight/bd6107.c
index c95a12bf0ce26..b1e7126380ef2 100644
--- a/drivers/video/backlight/bd6107.c
+++ b/drivers/video/backlight/bd6107.c
@@ -119,7 +119,6 @@ static int bd6107_probe(struct i2c_client *client)
struct backlight_device *backlight;
struct backlight_properties props;
struct bd6107 *bd;
- int ret;
if (pdata == NULL) {
dev_err(&client->dev, "No platform data\n");
@@ -147,11 +146,9 @@ static int bd6107_probe(struct i2c_client *client)
* the reset.
*/
bd->reset = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(bd->reset)) {
- dev_err(&client->dev, "unable to request reset GPIO\n");
- ret = PTR_ERR(bd->reset);
- return ret;
- }
+ if (IS_ERR(bd->reset))
+ return dev_err_probe(&client->dev, PTR_ERR(bd->reset),
+ "unable to request reset GPIO\n");
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
diff --git a/drivers/video/backlight/da9052_bl.c b/drivers/video/backlight/da9052_bl.c
index 1cdc8543310b4..b8ff7046510eb 100644
--- a/drivers/video/backlight/da9052_bl.c
+++ b/drivers/video/backlight/da9052_bl.c
@@ -117,6 +117,7 @@ static int da9052_backlight_probe(struct platform_device *pdev)
wleds->led_reg = platform_get_device_id(pdev)->driver_data;
wleds->state = DA9052_WLEDS_OFF;
+ memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = DA9052_MAX_BRIGHTNESS;
diff --git a/drivers/video/backlight/gpio_backlight.c b/drivers/video/backlight/gpio_backlight.c
index d28c30b2a35d2..e0c8c2a3f5dc2 100644
--- a/drivers/video/backlight/gpio_backlight.c
+++ b/drivers/video/backlight/gpio_backlight.c
@@ -64,13 +64,9 @@ static int gpio_backlight_probe(struct platform_device *pdev)
def_value = device_property_read_bool(dev, "default-on");
gbl->gpiod = devm_gpiod_get(dev, NULL, GPIOD_ASIS);
- if (IS_ERR(gbl->gpiod)) {
- ret = PTR_ERR(gbl->gpiod);
- if (ret != -EPROBE_DEFER)
- dev_err(dev,
- "Error: The gpios parameter is missing or invalid.\n");
- return ret;
- }
+ if (IS_ERR(gbl->gpiod))
+ return dev_err_probe(dev, PTR_ERR(gbl->gpiod),
+ "The gpios parameter is missing or invalid\n");
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
diff --git a/drivers/video/backlight/hx8357.c b/drivers/video/backlight/hx8357.c
index d7298376cf74d..339d9128fbdeb 100644
--- a/drivers/video/backlight/hx8357.c
+++ b/drivers/video/backlight/hx8357.c
@@ -8,9 +8,9 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/lcd.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
#include <linux/spi/spi.h>
#define HX8357_NUM_IM_PINS 3
@@ -564,41 +564,28 @@ static struct lcd_ops hx8357_ops = {
.get_power = hx8357_get_power,
};
-static const struct of_device_id hx8357_dt_ids[] = {
- {
- .compatible = "himax,hx8357",
- .data = hx8357_lcd_init,
- },
- {
- .compatible = "himax,hx8369",
- .data = hx8369_lcd_init,
- },
- {},
-};
-MODULE_DEVICE_TABLE(of, hx8357_dt_ids);
+typedef int (*hx8357_init_fn)(struct lcd_device *);
static int hx8357_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
struct lcd_device *lcdev;
struct hx8357_data *lcd;
- const struct of_device_id *match;
+ hx8357_init_fn init_fn;
int i, ret;
- lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
+ lcd = devm_kzalloc(dev, sizeof(*lcd), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
ret = spi_setup(spi);
- if (ret < 0) {
- dev_err(&spi->dev, "SPI setup failed.\n");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "SPI setup failed.\n");
lcd->spi = spi;
- match = of_match_device(hx8357_dt_ids, &spi->dev);
- if (!match || !match->data)
+ init_fn = device_get_match_data(dev);
+ if (!init_fn)
return -EINVAL;
lcd->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
@@ -609,14 +596,15 @@ static int hx8357_probe(struct spi_device *spi)
lcd->im_pins = devm_gpiod_get_array_optional(dev, "im", GPIOD_OUT_LOW);
if (IS_ERR(lcd->im_pins))
return dev_err_probe(dev, PTR_ERR(lcd->im_pins), "failed to request im GPIOs\n");
- if (lcd->im_pins->ndescs < HX8357_NUM_IM_PINS)
- return dev_err_probe(dev, -EINVAL, "not enough im GPIOs\n");
+ if (lcd->im_pins) {
+ if (lcd->im_pins->ndescs < HX8357_NUM_IM_PINS)
+ return dev_err_probe(dev, -EINVAL, "not enough im GPIOs\n");
- for (i = 0; i < HX8357_NUM_IM_PINS; i++)
- gpiod_set_consumer_name(lcd->im_pins->desc[i], "im_pins");
+ for (i = 0; i < HX8357_NUM_IM_PINS; i++)
+ gpiod_set_consumer_name(lcd->im_pins->desc[i], "im_pins");
+ }
- lcdev = devm_lcd_device_register(&spi->dev, "mxsfb", &spi->dev, lcd,
- &hx8357_ops);
+ lcdev = devm_lcd_device_register(dev, "mxsfb", dev, lcd, &hx8357_ops);
if (IS_ERR(lcdev)) {
ret = PTR_ERR(lcdev);
return ret;
@@ -625,17 +613,28 @@ static int hx8357_probe(struct spi_device *spi)
hx8357_lcd_reset(lcdev);
- ret = ((int (*)(struct lcd_device *))match->data)(lcdev);
- if (ret) {
- dev_err(&spi->dev, "Couldn't initialize panel\n");
- return ret;
- }
+ ret = init_fn(lcdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Couldn't initialize panel\n");
- dev_info(&spi->dev, "Panel probed\n");
+ dev_info(dev, "Panel probed\n");
return 0;
}
+static const struct of_device_id hx8357_dt_ids[] = {
+ {
+ .compatible = "himax,hx8357",
+ .data = hx8357_lcd_init,
+ },
+ {
+ .compatible = "himax,hx8369",
+ .data = hx8369_lcd_init,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, hx8357_dt_ids);
+
static struct spi_driver hx8357_driver = {
.probe = hx8357_probe,
.driver = {
diff --git a/drivers/video/backlight/ktd2801-backlight.c b/drivers/video/backlight/ktd2801-backlight.c
new file mode 100644
index 0000000000000..d295c27660251
--- /dev/null
+++ b/drivers/video/backlight/ktd2801-backlight.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Datasheet:
+ * https://www.kinet-ic.com/uploads/web/KTD2801/KTD2801-04b.pdf
+ */
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/leds-expresswire.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#define KTD2801_DEFAULT_BRIGHTNESS 100
+#define KTD2801_MAX_BRIGHTNESS 255
+
+/* These values have been extracted from Samsung's driver. */
+static const struct expresswire_timing ktd2801_timing = {
+ .poweroff_us = 2600,
+ .detect_delay_us = 150,
+ .detect_us = 270,
+ .data_start_us = 5,
+ .short_bitset_us = 5,
+ .long_bitset_us = 15,
+ .end_of_data_low_us = 10,
+ .end_of_data_high_us = 350
+};
+
+struct ktd2801_backlight {
+ struct expresswire_common_props props;
+ struct backlight_device *bd;
+ bool was_on;
+};
+
+static int ktd2801_update_status(struct backlight_device *bd)
+{
+ struct ktd2801_backlight *ktd2801 = bl_get_data(bd);
+ u8 brightness = (u8) backlight_get_brightness(bd);
+
+ if (backlight_is_blank(bd)) {
+ expresswire_power_off(&ktd2801->props);
+ ktd2801->was_on = false;
+ return 0;
+ }
+
+ if (!ktd2801->was_on) {
+ expresswire_enable(&ktd2801->props);
+ ktd2801->was_on = true;
+ }
+
+ expresswire_write_u8(&ktd2801->props, brightness);
+
+ return 0;
+}
+
+static const struct backlight_ops ktd2801_backlight_ops = {
+ .update_status = ktd2801_update_status,
+};
+
+static int ktd2801_backlight_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct backlight_device *bd;
+ struct ktd2801_backlight *ktd2801;
+ u32 brightness, max_brightness;
+ int ret;
+
+ ktd2801 = devm_kzalloc(dev, sizeof(*ktd2801), GFP_KERNEL);
+ if (!ktd2801)
+ return -ENOMEM;
+ ktd2801->was_on = true;
+ ktd2801->props.timing = ktd2801_timing;
+
+ ret = device_property_read_u32(dev, "max-brightness", &max_brightness);
+ if (ret)
+ max_brightness = KTD2801_MAX_BRIGHTNESS;
+ if (max_brightness > KTD2801_MAX_BRIGHTNESS) {
+ dev_err(dev, "illegal max brightness specified\n");
+ max_brightness = KTD2801_MAX_BRIGHTNESS;
+ }
+
+ ret = device_property_read_u32(dev, "default-brightness", &brightness);
+ if (ret)
+ brightness = KTD2801_DEFAULT_BRIGHTNESS;
+ if (brightness > max_brightness) {
+ dev_err(dev, "default brightness exceeds max\n");
+ brightness = max_brightness;
+ }
+
+ ktd2801->props.ctrl_gpio = devm_gpiod_get(dev, "ctrl", GPIOD_OUT_HIGH);
+ if (IS_ERR(ktd2801->props.ctrl_gpio))
+ return dev_err_probe(dev, PTR_ERR(ktd2801->props.ctrl_gpio),
+ "failed to get backlight GPIO");
+ gpiod_set_consumer_name(ktd2801->props.ctrl_gpio, dev_name(dev));
+
+ bd = devm_backlight_device_register(dev, dev_name(dev), dev, ktd2801,
+ &ktd2801_backlight_ops, NULL);
+ if (IS_ERR(bd))
+ return dev_err_probe(dev, PTR_ERR(bd),
+ "failed to register backlight");
+
+ bd->props.max_brightness = max_brightness;
+ bd->props.brightness = brightness;
+
+ ktd2801->bd = bd;
+ platform_set_drvdata(pdev, bd);
+ backlight_update_status(bd);
+
+ return 0;
+}
+
+static const struct of_device_id ktd2801_of_match[] = {
+ { .compatible = "kinetic,ktd2801" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ktd2801_of_match);
+
+static struct platform_driver ktd2801_backlight_driver = {
+ .driver = {
+ .name = "ktd2801-backlight",
+ .of_match_table = ktd2801_of_match,
+ },
+ .probe = ktd2801_backlight_probe,
+};
+module_platform_driver(ktd2801_backlight_driver);
+
+MODULE_IMPORT_NS(EXPRESSWIRE);
+MODULE_AUTHOR("Duje Mihanović <duje.mihanovic@skole.hr>");
+MODULE_DESCRIPTION("Kinetic KTD2801 Backlight Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/ktz8866.c b/drivers/video/backlight/ktz8866.c
index 9c980f2571ee3..014877b5a9848 100644
--- a/drivers/video/backlight/ktz8866.c
+++ b/drivers/video/backlight/ktz8866.c
@@ -97,20 +97,20 @@ static void ktz8866_init(struct ktz8866 *ktz)
{
unsigned int val = 0;
- if (of_property_read_u32(ktz->client->dev.of_node, "current-num-sinks", &val))
+ if (!of_property_read_u32(ktz->client->dev.of_node, "current-num-sinks", &val))
ktz8866_write(ktz, BL_EN, BIT(val) - 1);
else
/* Enable all 6 current sinks if the number of current sinks isn't specified. */
ktz8866_write(ktz, BL_EN, BIT(6) - 1);
- if (of_property_read_u32(ktz->client->dev.of_node, "kinetic,current-ramp-delay-ms", &val)) {
+ if (!of_property_read_u32(ktz->client->dev.of_node, "kinetic,current-ramp-delay-ms", &val)) {
if (val <= 128)
ktz8866_write(ktz, BL_CFG2, BIT(7) | (ilog2(val) << 3) | PWM_HYST);
else
ktz8866_write(ktz, BL_CFG2, BIT(7) | ((5 + val / 64) << 3) | PWM_HYST);
}
- if (of_property_read_u32(ktz->client->dev.of_node, "kinetic,led-enable-ramp-delay-ms", &val)) {
+ if (!of_property_read_u32(ktz->client->dev.of_node, "kinetic,led-enable-ramp-delay-ms", &val)) {
if (val == 0)
ktz8866_write(ktz, BL_DIMMING, 0);
else {
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index cc763cf15f53e..bd5137ee203ba 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -179,34 +179,28 @@ static int l4f00242t03_probe(struct spi_device *spi)
priv->spi = spi;
priv->reset = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(priv->reset)) {
- dev_err(&spi->dev,
- "Unable to get the lcd l4f00242t03 reset gpio.\n");
- return PTR_ERR(priv->reset);
- }
+ if (IS_ERR(priv->reset))
+ return dev_err_probe(&spi->dev, PTR_ERR(priv->reset),
+ "Unable to get the lcd l4f00242t03 reset gpio.\n");
gpiod_set_consumer_name(priv->reset, "lcd l4f00242t03 reset");
priv->enable = devm_gpiod_get(&spi->dev, "enable", GPIOD_OUT_LOW);
- if (IS_ERR(priv->enable)) {
- dev_err(&spi->dev,
- "Unable to get the lcd l4f00242t03 data en gpio.\n");
- return PTR_ERR(priv->enable);
- }
+ if (IS_ERR(priv->enable))
+ return dev_err_probe(&spi->dev, PTR_ERR(priv->enable),
+ "Unable to get the lcd l4f00242t03 data en gpio.\n");
gpiod_set_consumer_name(priv->enable, "lcd l4f00242t03 data enable");
priv->io_reg = devm_regulator_get(&spi->dev, "vdd");
- if (IS_ERR(priv->io_reg)) {
- dev_err(&spi->dev, "%s: Unable to get the IO regulator\n",
- __func__);
- return PTR_ERR(priv->io_reg);
- }
+ if (IS_ERR(priv->io_reg))
+ return dev_err_probe(&spi->dev, PTR_ERR(priv->io_reg),
+ "%s: Unable to get the IO regulator\n",
+ __func__);
priv->core_reg = devm_regulator_get(&spi->dev, "vcore");
- if (IS_ERR(priv->core_reg)) {
- dev_err(&spi->dev, "%s: Unable to get the core regulator\n",
- __func__);
- return PTR_ERR(priv->core_reg);
- }
+ if (IS_ERR(priv->core_reg))
+ return dev_err_probe(&spi->dev, PTR_ERR(priv->core_reg),
+ "%s: Unable to get the core regulator\n",
+ __func__);
priv->ld = devm_lcd_device_register(&spi->dev, "l4f00242t03", &spi->dev,
priv, &l4f_ops);
diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
index a3412c936ca28..76d47e2e82427 100644
--- a/drivers/video/backlight/lm3630a_bl.c
+++ b/drivers/video/backlight/lm3630a_bl.c
@@ -189,10 +189,11 @@ static int lm3630a_bank_a_update_status(struct backlight_device *bl)
int ret;
struct lm3630a_chip *pchip = bl_get_data(bl);
enum lm3630a_pwm_ctrl pwm_ctrl = pchip->pdata->pwm_ctrl;
+ int brightness = backlight_get_brightness(bl);
/* pwm control */
if ((pwm_ctrl & LM3630A_PWM_BANK_A) != 0)
- return lm3630a_pwm_ctrl(pchip, bl->props.brightness,
+ return lm3630a_pwm_ctrl(pchip, brightness,
bl->props.max_brightness);
/* disable sleep */
@@ -201,9 +202,9 @@ static int lm3630a_bank_a_update_status(struct backlight_device *bl)
goto out_i2c_err;
usleep_range(1000, 2000);
/* minimum brightness is 0x04 */
- ret = lm3630a_write(pchip, REG_BRT_A, bl->props.brightness);
+ ret = lm3630a_write(pchip, REG_BRT_A, brightness);
- if (backlight_is_blank(bl) || (backlight_get_brightness(bl) < 0x4))
+ if (brightness < 0x4)
/* turn the string off */
ret |= lm3630a_update(pchip, REG_CTRL, LM3630A_LEDA_ENABLE, 0);
else
@@ -233,7 +234,7 @@ static int lm3630a_bank_a_get_brightness(struct backlight_device *bl)
if (rval < 0)
goto out_i2c_err;
brightness |= rval;
- goto out;
+ return brightness;
}
/* disable sleep */
@@ -244,11 +245,8 @@ static int lm3630a_bank_a_get_brightness(struct backlight_device *bl)
rval = lm3630a_read(pchip, REG_BRT_A);
if (rval < 0)
goto out_i2c_err;
- brightness = rval;
+ return rval;
-out:
- bl->props.brightness = brightness;
- return bl->props.brightness;
out_i2c_err:
dev_err(pchip->dev, "i2c failed to access register\n");
return 0;
@@ -266,10 +264,11 @@ static int lm3630a_bank_b_update_status(struct backlight_device *bl)
int ret;
struct lm3630a_chip *pchip = bl_get_data(bl);
enum lm3630a_pwm_ctrl pwm_ctrl = pchip->pdata->pwm_ctrl;
+ int brightness = backlight_get_brightness(bl);
/* pwm control */
if ((pwm_ctrl & LM3630A_PWM_BANK_B) != 0)
- return lm3630a_pwm_ctrl(pchip, bl->props.brightness,
+ return lm3630a_pwm_ctrl(pchip, brightness,
bl->props.max_brightness);
/* disable sleep */
@@ -278,9 +277,9 @@ static int lm3630a_bank_b_update_status(struct backlight_device *bl)
goto out_i2c_err;
usleep_range(1000, 2000);
/* minimum brightness is 0x04 */
- ret = lm3630a_write(pchip, REG_BRT_B, bl->props.brightness);
+ ret = lm3630a_write(pchip, REG_BRT_B, brightness);
- if (backlight_is_blank(bl) || (backlight_get_brightness(bl) < 0x4))
+ if (brightness < 0x4)
/* turn the string off */
ret |= lm3630a_update(pchip, REG_CTRL, LM3630A_LEDB_ENABLE, 0);
else
@@ -310,7 +309,7 @@ static int lm3630a_bank_b_get_brightness(struct backlight_device *bl)
if (rval < 0)
goto out_i2c_err;
brightness |= rval;
- goto out;
+ return brightness;
}
/* disable sleep */
@@ -321,11 +320,8 @@ static int lm3630a_bank_b_get_brightness(struct backlight_device *bl)
rval = lm3630a_read(pchip, REG_BRT_B);
if (rval < 0)
goto out_i2c_err;
- brightness = rval;
+ return rval;
-out:
- bl->props.brightness = brightness;
- return bl->props.brightness;
out_i2c_err:
dev_err(pchip->dev, "i2c failed to access register\n");
return 0;
@@ -343,6 +339,7 @@ static int lm3630a_backlight_register(struct lm3630a_chip *pchip)
struct backlight_properties props;
const char *label;
+ memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
if (pdata->leda_ctrl != LM3630A_LEDA_DISABLE) {
props.brightness = pdata->leda_init_brt;
@@ -543,10 +540,8 @@ static int lm3630a_probe(struct i2c_client *client)
pchip->enable_gpio = devm_gpiod_get_optional(&client->dev, "enable",
GPIOD_OUT_HIGH);
- if (IS_ERR(pchip->enable_gpio)) {
- rval = PTR_ERR(pchip->enable_gpio);
- return rval;
- }
+ if (IS_ERR(pchip->enable_gpio))
+ return PTR_ERR(pchip->enable_gpio);
/* chip initialize */
rval = lm3630a_chip_init(pchip);
@@ -563,10 +558,9 @@ static int lm3630a_probe(struct i2c_client *client)
/* pwm */
if (pdata->pwm_ctrl != LM3630A_PWM_DISABLE) {
pchip->pwmd = devm_pwm_get(pchip->dev, "lm3630a-pwm");
- if (IS_ERR(pchip->pwmd)) {
- dev_err(&client->dev, "fail : get pwm device\n");
- return PTR_ERR(pchip->pwmd);
- }
+ if (IS_ERR(pchip->pwmd))
+ return dev_err_probe(&client->dev, PTR_ERR(pchip->pwmd),
+ "fail : get pwm device\n");
pwm_init_state(pchip->pwmd, &pchip->pwmd_state);
}
diff --git a/drivers/video/backlight/lm3639_bl.c b/drivers/video/backlight/lm3639_bl.c
index 5246c171497d6..564f62acd7211 100644
--- a/drivers/video/backlight/lm3639_bl.c
+++ b/drivers/video/backlight/lm3639_bl.c
@@ -338,6 +338,7 @@ static int lm3639_probe(struct i2c_client *client)
}
/* backlight */
+ memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.brightness = pdata->init_brt_led;
props.max_brightness = pdata->max_brt_led;
diff --git a/drivers/video/backlight/lp8788_bl.c b/drivers/video/backlight/lp8788_bl.c
index d1a14b0db265b..31f97230ee506 100644
--- a/drivers/video/backlight/lp8788_bl.c
+++ b/drivers/video/backlight/lp8788_bl.c
@@ -191,6 +191,7 @@ static int lp8788_backlight_register(struct lp8788_bl *bl)
int init_brt;
char *name;
+ memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = MAX_BRIGHTNESS;
diff --git a/drivers/video/backlight/mp3309c.c b/drivers/video/backlight/mp3309c.c
index 34d71259fac1d..c80a1481e742b 100644
--- a/drivers/video/backlight/mp3309c.c
+++ b/drivers/video/backlight/mp3309c.c
@@ -15,6 +15,8 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
@@ -131,7 +133,7 @@ static int mp3309c_bl_update_status(struct backlight_device *bl)
chip->pdata->levels[brightness],
chip->pdata->levels[chip->pdata->max_brightness]);
pwmstate.enabled = true;
- ret = pwm_apply_state(chip->pwmd, &pwmstate);
+ ret = pwm_apply_might_sleep(chip->pwmd, &pwmstate);
if (ret)
return ret;
@@ -199,20 +201,15 @@ static const struct backlight_ops mp3309c_bl_ops = {
.update_status = mp3309c_bl_update_status,
};
-static int pm3309c_parse_dt_node(struct mp3309c_chip *chip,
- struct mp3309c_platform_data *pdata)
+static int mp3309c_parse_fwnode(struct mp3309c_chip *chip,
+ struct mp3309c_platform_data *pdata)
{
- struct device_node *node = chip->dev->of_node;
- struct property *prop_pwms;
- struct property *prop_levels = NULL;
- int length = 0;
int ret, i;
unsigned int num_levels, tmp_value;
+ struct device *dev = chip->dev;
- if (!node) {
- dev_err(chip->dev, "failed to get DT node\n");
- return -ENODEV;
- }
+ if (!dev_fwnode(dev))
+ return dev_err_probe(dev, -ENODEV, "failed to get firmware node\n");
/*
* Dimming mode: the MP3309C provides two dimming control mode:
@@ -224,12 +221,10 @@ static int pm3309c_parse_dt_node(struct mp3309c_chip *chip,
* found in the backlight node, the mode switches to PWM mode.
*/
pdata->dimming_mode = DIMMING_ANALOG_I2C;
- prop_pwms = of_find_property(node, "pwms", &length);
- if (prop_pwms) {
- chip->pwmd = devm_pwm_get(chip->dev, NULL);
+ if (device_property_present(dev, "pwms")) {
+ chip->pwmd = devm_pwm_get(dev, NULL);
if (IS_ERR(chip->pwmd))
- return dev_err_probe(chip->dev, PTR_ERR(chip->pwmd),
- "error getting pwm data\n");
+ return dev_err_probe(dev, PTR_ERR(chip->pwmd), "error getting pwm data\n");
pdata->dimming_mode = DIMMING_PWM;
pwm_apply_args(chip->pwmd);
}
@@ -247,21 +242,17 @@ static int pm3309c_parse_dt_node(struct mp3309c_chip *chip,
num_levels = ANALOG_I2C_NUM_LEVELS;
/* Enable GPIO used in I2C dimming mode only */
- chip->enable_gpio = devm_gpiod_get(chip->dev, "enable",
- GPIOD_OUT_HIGH);
+ chip->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_HIGH);
if (IS_ERR(chip->enable_gpio))
- return dev_err_probe(chip->dev,
- PTR_ERR(chip->enable_gpio),
+ return dev_err_probe(dev, PTR_ERR(chip->enable_gpio),
"error getting enable gpio\n");
} else {
/*
* PWM control mode: check for brightness level in DT
*/
- prop_levels = of_find_property(node, "brightness-levels",
- &length);
- if (prop_levels) {
+ if (device_property_present(dev, "brightness-levels")) {
/* Read brightness levels from DT */
- num_levels = length / sizeof(u32);
+ num_levels = device_property_count_u32(dev, "brightness-levels");
if (num_levels < 2)
return -EINVAL;
} else {
@@ -271,14 +262,12 @@ static int pm3309c_parse_dt_node(struct mp3309c_chip *chip,
}
/* Fill brightness levels array */
- pdata->levels = devm_kcalloc(chip->dev, num_levels,
- sizeof(*pdata->levels), GFP_KERNEL);
+ pdata->levels = devm_kcalloc(dev, num_levels, sizeof(*pdata->levels), GFP_KERNEL);
if (!pdata->levels)
return -ENOMEM;
- if (prop_levels) {
- ret = of_property_read_u32_array(node, "brightness-levels",
- pdata->levels,
- num_levels);
+ if (device_property_present(dev, "brightness-levels")) {
+ ret = device_property_read_u32_array(dev, "brightness-levels",
+ pdata->levels, num_levels);
if (ret < 0)
return ret;
} else {
@@ -288,13 +277,11 @@ static int pm3309c_parse_dt_node(struct mp3309c_chip *chip,
pdata->max_brightness = num_levels - 1;
- ret = of_property_read_u32(node, "default-brightness",
- &pdata->default_brightness);
+ ret = device_property_read_u32(dev, "default-brightness", &pdata->default_brightness);
if (ret)
pdata->default_brightness = pdata->max_brightness;
if (pdata->default_brightness > pdata->max_brightness) {
- dev_err(chip->dev,
- "default brightness exceeds max brightness\n");
+ dev_err_probe(dev, -ERANGE, "default brightness exceeds max brightness\n");
pdata->default_brightness = pdata->max_brightness;
}
@@ -310,8 +297,8 @@ static int pm3309c_parse_dt_node(struct mp3309c_chip *chip,
* If missing, the default value for OVP is 35.5V
*/
pdata->over_voltage_protection = REG_I2C_1_OVP1;
- if (!of_property_read_u32(node, "mps,overvoltage-protection-microvolt",
- &tmp_value)) {
+ ret = device_property_read_u32(dev, "mps,overvoltage-protection-microvolt", &tmp_value);
+ if (!ret) {
switch (tmp_value) {
case 13500000:
pdata->over_voltage_protection = 0x00;
@@ -328,62 +315,59 @@ static int pm3309c_parse_dt_node(struct mp3309c_chip *chip,
}
/* Synchronous (default) and non-synchronous mode */
- pdata->sync_mode = true;
- if (of_property_read_bool(node, "mps,no-sync-mode"))
- pdata->sync_mode = false;
+ pdata->sync_mode = !device_property_read_bool(dev, "mps,no-sync-mode");
return 0;
}
static int mp3309c_probe(struct i2c_client *client)
{
- struct mp3309c_platform_data *pdata = dev_get_platdata(&client->dev);
+ struct device *dev = &client->dev;
+ struct mp3309c_platform_data *pdata = dev_get_platdata(dev);
struct mp3309c_chip *chip;
struct backlight_properties props;
struct pwm_state pwmstate;
int ret;
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
- dev_err(&client->dev, "failed to check i2c functionality\n");
- return -EOPNOTSUPP;
- }
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return dev_err_probe(dev, -EOPNOTSUPP, "failed to check i2c functionality\n");
- chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
- chip->dev = &client->dev;
+ chip->dev = dev;
chip->regmap = devm_regmap_init_i2c(client, &mp3309c_regmap);
if (IS_ERR(chip->regmap))
- return dev_err_probe(&client->dev, PTR_ERR(chip->regmap),
+ return dev_err_probe(dev, PTR_ERR(chip->regmap),
"failed to allocate register map\n");
i2c_set_clientdata(client, chip);
if (!pdata) {
- pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL);
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- ret = pm3309c_parse_dt_node(chip, pdata);
+ ret = mp3309c_parse_fwnode(chip, pdata);
if (ret)
return ret;
}
chip->pdata = pdata;
/* Backlight properties */
+ memset(&props, 0, sizeof(struct backlight_properties));
props.brightness = pdata->default_brightness;
props.max_brightness = pdata->max_brightness;
props.scale = BACKLIGHT_SCALE_LINEAR;
props.type = BACKLIGHT_RAW;
props.power = FB_BLANK_UNBLANK;
props.fb_blank = FB_BLANK_UNBLANK;
- chip->bl = devm_backlight_device_register(chip->dev, "mp3309c",
- chip->dev, chip,
+ chip->bl = devm_backlight_device_register(dev, "mp3309c", dev, chip,
&mp3309c_bl_ops, &props);
if (IS_ERR(chip->bl))
- return dev_err_probe(chip->dev, PTR_ERR(chip->bl),
+ return dev_err_probe(dev, PTR_ERR(chip->bl),
"error registering backlight device\n");
/* In PWM dimming mode, enable pwm device */
@@ -393,10 +377,9 @@ static int mp3309c_probe(struct i2c_client *client)
chip->pdata->default_brightness,
chip->pdata->max_brightness);
pwmstate.enabled = true;
- ret = pwm_apply_state(chip->pwmd, &pwmstate);
+ ret = pwm_apply_might_sleep(chip->pwmd, &pwmstate);
if (ret)
- return dev_err_probe(chip->dev, ret,
- "error setting pwm device\n");
+ return dev_err_probe(dev, ret, "error setting pwm device\n");
}
chip->pdata->status = FIRST_POWER_ON;
diff --git a/drivers/video/backlight/pandora_bl.c b/drivers/video/backlight/pandora_bl.c
index f946470ce9f65..51faa889e01f4 100644
--- a/drivers/video/backlight/pandora_bl.c
+++ b/drivers/video/backlight/pandora_bl.c
@@ -114,10 +114,8 @@ static int pandora_backlight_probe(struct platform_device *pdev)
u8 r;
priv = devm_kmalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&pdev->dev, "failed to allocate driver private data\n");
+ if (!priv)
return -ENOMEM;
- }
memset(&props, 0, sizeof(props));
props.max_brightness = MAX_USER_VALUE;
diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c
index 14af5d9e13b00..139049368fdcf 100644
--- a/drivers/video/console/dummycon.c
+++ b/drivers/video/console/dummycon.c
@@ -50,7 +50,8 @@ void dummycon_unregister_output_notifier(struct notifier_block *nb)
raw_notifier_chain_unregister(&dummycon_output_nh, nb);
}
-static void dummycon_putc(struct vc_data *vc, int c, int ypos, int xpos)
+static void dummycon_putc(struct vc_data *vc, u16 c, unsigned int y,
+ unsigned int x)
{
WARN_CONSOLE_UNLOCKED();
@@ -58,10 +59,10 @@ static void dummycon_putc(struct vc_data *vc, int c, int ypos, int xpos)
raw_notifier_call_chain(&dummycon_output_nh, 0, NULL);
}
-static void dummycon_putcs(struct vc_data *vc, const unsigned short *s,
- int count, int ypos, int xpos)
+static void dummycon_putcs(struct vc_data *vc, const u16 *s, unsigned int count,
+ unsigned int ypos, unsigned int xpos)
{
- int i;
+ unsigned int i;
if (!dummycon_putc_called) {
/* Ignore erases */
@@ -78,18 +79,21 @@ static void dummycon_putcs(struct vc_data *vc, const unsigned short *s,
raw_notifier_call_chain(&dummycon_output_nh, 0, NULL);
}
-static int dummycon_blank(struct vc_data *vc, int blank, int mode_switch)
+static bool dummycon_blank(struct vc_data *vc, enum vesa_blank_mode blank,
+ bool mode_switch)
{
/* Redraw, so that we get putc(s) for output done while blanked */
- return 1;
+ return true;
}
#else
-static void dummycon_putc(struct vc_data *vc, int c, int ypos, int xpos) { }
-static void dummycon_putcs(struct vc_data *vc, const unsigned short *s,
- int count, int ypos, int xpos) { }
-static int dummycon_blank(struct vc_data *vc, int blank, int mode_switch)
+static void dummycon_putc(struct vc_data *vc, u16 c, unsigned int y,
+ unsigned int x) { }
+static void dummycon_putcs(struct vc_data *vc, const u16 *s, unsigned int count,
+ unsigned int ypos, unsigned int xpos) { }
+static bool dummycon_blank(struct vc_data *vc, enum vesa_blank_mode blank,
+ bool mode_switch)
{
- return 0;
+ return false;
}
#endif
@@ -98,7 +102,7 @@ static const char *dummycon_startup(void)
return "dummy device";
}
-static void dummycon_init(struct vc_data *vc, int init)
+static void dummycon_init(struct vc_data *vc, bool init)
{
vc->vc_can_do_color = 1;
if (init) {
@@ -109,9 +113,9 @@ static void dummycon_init(struct vc_data *vc, int init)
}
static void dummycon_deinit(struct vc_data *vc) { }
-static void dummycon_clear(struct vc_data *vc, int sy, int sx, int height,
- int width) { }
-static void dummycon_cursor(struct vc_data *vc, int mode) { }
+static void dummycon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx,
+ unsigned int width) { }
+static void dummycon_cursor(struct vc_data *vc, bool enable) { }
static bool dummycon_scroll(struct vc_data *vc, unsigned int top,
unsigned int bottom, enum con_scroll dir,
@@ -120,9 +124,9 @@ static bool dummycon_scroll(struct vc_data *vc, unsigned int top,
return false;
}
-static int dummycon_switch(struct vc_data *vc)
+static bool dummycon_switch(struct vc_data *vc)
{
- return 0;
+ return false;
}
/*
diff --git a/drivers/video/console/mdacon.c b/drivers/video/console/mdacon.c
index ef29b321967f0..c0e1f4554a44b 100644
--- a/drivers/video/console/mdacon.c
+++ b/drivers/video/console/mdacon.c
@@ -352,7 +352,7 @@ static const char *mdacon_startup(void)
return "MDA-2";
}
-static void mdacon_init(struct vc_data *c, int init)
+static void mdacon_init(struct vc_data *c, bool init)
{
c->vc_complement_mask = 0x0800; /* reverse video */
c->vc_display_fg = &mda_display_fg;
@@ -427,13 +427,8 @@ static inline u16 *mda_addr(unsigned int x, unsigned int y)
return mda_vram_base + y * mda_num_columns + x;
}
-static void mdacon_putc(struct vc_data *c, int ch, int y, int x)
-{
- scr_writew(mda_convert_attr(ch), mda_addr(x, y));
-}
-
-static void mdacon_putcs(struct vc_data *c, const unsigned short *s,
- int count, int y, int x)
+static void mdacon_putcs(struct vc_data *c, const u16 *s, unsigned int count,
+ unsigned int y, unsigned int x)
{
u16 *dest = mda_addr(x, y);
@@ -442,29 +437,22 @@ static void mdacon_putcs(struct vc_data *c, const unsigned short *s,
}
}
-static void mdacon_clear(struct vc_data *c, int y, int x,
- int height, int width)
+static void mdacon_clear(struct vc_data *c, unsigned int y, unsigned int x,
+ unsigned int width)
{
u16 *dest = mda_addr(x, y);
u16 eattr = mda_convert_attr(c->vc_video_erase_char);
- if (width <= 0 || height <= 0)
- return;
-
- if (x==0 && width==mda_num_columns) {
- scr_memsetw(dest, eattr, height*width*2);
- } else {
- for (; height > 0; height--, dest+=mda_num_columns)
- scr_memsetw(dest, eattr, width*2);
- }
+ scr_memsetw(dest, eattr, width * 2);
}
-
-static int mdacon_switch(struct vc_data *c)
+
+static bool mdacon_switch(struct vc_data *c)
{
- return 1; /* redrawing needed */
+ return true; /* redrawing needed */
}
-static int mdacon_blank(struct vc_data *c, int blank, int mode_switch)
+static bool mdacon_blank(struct vc_data *c, enum vesa_blank_mode blank,
+ bool mode_switch)
{
if (mda_type == TYPE_MDA) {
if (blank)
@@ -472,20 +460,20 @@ static int mdacon_blank(struct vc_data *c, int blank, int mode_switch)
mda_convert_attr(c->vc_video_erase_char),
c->vc_screenbuf_size);
/* Tell console.c that it has to restore the screen itself */
- return 1;
+ return true;
} else {
if (blank)
outb_p(0x00, mda_mode_port); /* disable video */
else
outb_p(MDA_MODE_VIDEO_EN | MDA_MODE_BLINK_EN,
mda_mode_port);
- return 0;
+ return false;
}
}
-static void mdacon_cursor(struct vc_data *c, int mode)
+static void mdacon_cursor(struct vc_data *c, bool enable)
{
- if (mode == CM_ERASE) {
+ if (!enable) {
mda_set_cursor(mda_vram_len - 1);
return;
}
@@ -544,7 +532,6 @@ static const struct consw mda_con = {
.con_init = mdacon_init,
.con_deinit = mdacon_deinit,
.con_clear = mdacon_clear,
- .con_putc = mdacon_putc,
.con_putcs = mdacon_putcs,
.con_cursor = mdacon_cursor,
.con_scroll = mdacon_scroll,
diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c
index e8e4f82cd4a1b..a51cfc1d560e7 100644
--- a/drivers/video/console/newport_con.c
+++ b/drivers/video/console/newport_con.c
@@ -324,7 +324,7 @@ out_unmap:
return NULL;
}
-static void newport_init(struct vc_data *vc, int init)
+static void newport_init(struct vc_data *vc, bool init)
{
int cols, rows;
@@ -346,12 +346,12 @@ static void newport_deinit(struct vc_data *c)
}
}
-static void newport_clear(struct vc_data *vc, int sy, int sx, int height,
- int width)
+static void newport_clear(struct vc_data *vc, unsigned int sy, unsigned int sx,
+ unsigned int width)
{
int xend = ((sx + width) << 3) - 1;
int ystart = ((sy << 4) + topscan) & 0x3ff;
- int yend = (((sy + height) << 4) + topscan - 1) & 0x3ff;
+ int yend = (((sy + 1) << 4) + topscan - 1) & 0x3ff;
if (logo_active)
return;
@@ -367,8 +367,8 @@ static void newport_clear(struct vc_data *vc, int sy, int sx, int height,
}
}
-static void newport_putc(struct vc_data *vc, int charattr, int ypos,
- int xpos)
+static void newport_putc(struct vc_data *vc, u16 charattr, unsigned int ypos,
+ unsigned int xpos)
{
unsigned char *p;
@@ -396,12 +396,13 @@ static void newport_putc(struct vc_data *vc, int charattr, int ypos,
RENDER(npregs, p);
}
-static void newport_putcs(struct vc_data *vc, const unsigned short *s,
- int count, int ypos, int xpos)
+static void newport_putcs(struct vc_data *vc, const u16 *s,
+ unsigned int count, unsigned int ypos,
+ unsigned int xpos)
{
- int i;
- int charattr;
unsigned char *p;
+ unsigned int i;
+ u16 charattr;
charattr = (scr_readw(s) >> 8) & 0xff;
@@ -437,32 +438,28 @@ static void newport_putcs(struct vc_data *vc, const unsigned short *s,
}
}
-static void newport_cursor(struct vc_data *vc, int mode)
+static void newport_cursor(struct vc_data *vc, bool enable)
{
unsigned short treg;
int xcurs, ycurs;
- switch (mode) {
- case CM_ERASE:
- treg = newport_vc2_get(npregs, VC2_IREG_CONTROL);
- newport_vc2_set(npregs, VC2_IREG_CONTROL,
- (treg & ~(VC2_CTRL_ECDISP)));
- break;
+ treg = newport_vc2_get(npregs, VC2_IREG_CONTROL);
- case CM_MOVE:
- case CM_DRAW:
- treg = newport_vc2_get(npregs, VC2_IREG_CONTROL);
+ if (!enable) {
newport_vc2_set(npregs, VC2_IREG_CONTROL,
- (treg | VC2_CTRL_ECDISP));
- xcurs = (vc->vc_pos - vc->vc_visible_origin) / 2;
- ycurs = ((xcurs / vc->vc_cols) << 4) + 31;
- xcurs = ((xcurs % vc->vc_cols) << 3) + xcurs_correction;
- newport_vc2_set(npregs, VC2_IREG_CURSX, xcurs);
- newport_vc2_set(npregs, VC2_IREG_CURSY, ycurs);
+ (treg & ~(VC2_CTRL_ECDISP)));
+ return;
}
+
+ newport_vc2_set(npregs, VC2_IREG_CONTROL, (treg | VC2_CTRL_ECDISP));
+ xcurs = (vc->vc_pos - vc->vc_visible_origin) / 2;
+ ycurs = ((xcurs / vc->vc_cols) << 4) + 31;
+ xcurs = ((xcurs % vc->vc_cols) << 3) + xcurs_correction;
+ newport_vc2_set(npregs, VC2_IREG_CURSX, xcurs);
+ newport_vc2_set(npregs, VC2_IREG_CURSY, ycurs);
}
-static int newport_switch(struct vc_data *vc)
+static bool newport_switch(struct vc_data *vc)
{
static int logo_drawn = 0;
@@ -476,14 +473,15 @@ static int newport_switch(struct vc_data *vc)
}
}
- return 1;
+ return true;
}
-static int newport_blank(struct vc_data *c, int blank, int mode_switch)
+static bool newport_blank(struct vc_data *c, enum vesa_blank_mode blank,
+ bool mode_switch)
{
unsigned short treg;
- if (blank == 0) {
+ if (blank == VESA_NO_BLANKING) {
/* unblank console */
treg = newport_vc2_get(npregs, VC2_IREG_CONTROL);
newport_vc2_set(npregs, VC2_IREG_CONTROL,
@@ -494,10 +492,12 @@ static int newport_blank(struct vc_data *c, int blank, int mode_switch)
newport_vc2_set(npregs, VC2_IREG_CONTROL,
(treg & ~(VC2_CTRL_EDISP)));
}
- return 1;
+
+ return true;
}
-static int newport_set_font(int unit, struct console_font *op, unsigned int vpitch)
+static int newport_set_font(int unit, const struct console_font *op,
+ unsigned int vpitch)
{
int w = op->width;
int h = op->height;
@@ -564,12 +564,13 @@ static int newport_set_def_font(int unit, struct console_font *op)
return 0;
}
-static int newport_font_default(struct vc_data *vc, struct console_font *op, char *name)
+static int newport_font_default(struct vc_data *vc, struct console_font *op,
+ const char *name)
{
return newport_set_def_font(vc->vc_num, op);
}
-static int newport_font_set(struct vc_data *vc, struct console_font *font,
+static int newport_font_set(struct vc_data *vc, const struct console_font *font,
unsigned int vpitch, unsigned int flags)
{
return newport_set_font(vc->vc_num, font, vpitch);
diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c
index 992a4fa431aaa..4c7b4959a1aab 100644
--- a/drivers/video/console/sticon.c
+++ b/drivers/video/console/sticon.c
@@ -71,19 +71,8 @@ static const char *sticon_startup(void)
return "STI console";
}
-static void sticon_putc(struct vc_data *conp, int c, int ypos, int xpos)
-{
- if (vga_is_gfx || console_blanked)
- return;
-
- if (conp->vc_mode != KD_TEXT)
- return;
-
- sti_putc(sticon_sti, c, ypos, xpos, font_data[conp->vc_num]);
-}
-
-static void sticon_putcs(struct vc_data *conp, const unsigned short *s,
- int count, int ypos, int xpos)
+static void sticon_putcs(struct vc_data *conp, const u16 *s, unsigned int count,
+ unsigned int ypos, unsigned int xpos)
{
if (vga_is_gfx || console_blanked)
return;
@@ -97,7 +86,7 @@ static void sticon_putcs(struct vc_data *conp, const unsigned short *s,
}
}
-static void sticon_cursor(struct vc_data *conp, int mode)
+static void sticon_cursor(struct vc_data *conp, bool enable)
{
unsigned short car1;
@@ -106,23 +95,20 @@ static void sticon_cursor(struct vc_data *conp, int mode)
return;
car1 = conp->vc_screenbuf[conp->state.x + conp->state.y * conp->vc_cols];
- switch (mode) {
- case CM_ERASE:
+ if (!enable) {
sti_putc(sticon_sti, car1, conp->state.y, conp->state.x,
font_data[conp->vc_num]);
- break;
- case CM_MOVE:
- case CM_DRAW:
- switch (CUR_SIZE(conp->vc_cursor_type)) {
- case CUR_UNDERLINE:
- case CUR_LOWER_THIRD:
- case CUR_LOWER_HALF:
- case CUR_TWO_THIRDS:
- case CUR_BLOCK:
- sti_putc(sticon_sti, (car1 & 255) + (0 << 8) + (7 << 11),
- conp->state.y, conp->state.x, font_data[conp->vc_num]);
- break;
- }
+ return;
+ }
+
+ switch (CUR_SIZE(conp->vc_cursor_type)) {
+ case CUR_UNDERLINE:
+ case CUR_LOWER_THIRD:
+ case CUR_LOWER_HALF:
+ case CUR_TWO_THIRDS:
+ case CUR_BLOCK:
+ sti_putc(sticon_sti, (car1 & 255) + (0 << 8) + (7 << 11),
+ conp->state.y, conp->state.x, font_data[conp->vc_num]);
break;
}
}
@@ -135,7 +121,7 @@ static bool sticon_scroll(struct vc_data *conp, unsigned int t,
if (vga_is_gfx)
return false;
- sticon_cursor(conp, CM_ERASE);
+ sticon_cursor(conp, false);
switch (dir) {
case SM_UP:
@@ -167,7 +153,7 @@ static void sticon_set_def_font(int unit)
}
}
-static int sticon_set_font(struct vc_data *vc, struct console_font *op,
+static int sticon_set_font(struct vc_data *vc, const struct console_font *op,
unsigned int vpitch)
{
struct sti_struct *sti = sticon_sti;
@@ -260,20 +246,21 @@ static int sticon_set_font(struct vc_data *vc, struct console_font *op,
return 0;
}
-static int sticon_font_default(struct vc_data *vc, struct console_font *op, char *name)
+static int sticon_font_default(struct vc_data *vc, struct console_font *op,
+ const char *name)
{
sticon_set_def_font(vc->vc_num);
return 0;
}
-static int sticon_font_set(struct vc_data *vc, struct console_font *font,
+static int sticon_font_set(struct vc_data *vc, const struct console_font *font,
unsigned int vpitch, unsigned int flags)
{
return sticon_set_font(vc, font, vpitch);
}
-static void sticon_init(struct vc_data *c, int init)
+static void sticon_init(struct vc_data *c, bool init)
{
struct sti_struct *sti = sticon_sti;
int vc_cols, vc_rows;
@@ -300,33 +287,32 @@ static void sticon_deinit(struct vc_data *c)
sticon_set_def_font(i);
}
-static void sticon_clear(struct vc_data *conp, int sy, int sx, int height,
- int width)
+static void sticon_clear(struct vc_data *conp, unsigned int sy, unsigned int sx,
+ unsigned int width)
{
- if (!height || !width)
- return;
-
- sti_clear(sticon_sti, sy, sx, height, width,
+ sti_clear(sticon_sti, sy, sx, 1, width,
conp->vc_video_erase_char, font_data[conp->vc_num]);
}
-static int sticon_switch(struct vc_data *conp)
+static bool sticon_switch(struct vc_data *conp)
{
- return 1; /* needs refreshing */
+ return true; /* needs refreshing */
}
-static int sticon_blank(struct vc_data *c, int blank, int mode_switch)
+static bool sticon_blank(struct vc_data *c, enum vesa_blank_mode blank,
+ bool mode_switch)
{
- if (blank == 0) {
+ if (blank == VESA_NO_BLANKING) {
if (mode_switch)
vga_is_gfx = 0;
- return 1;
+ return true;
}
sti_clear(sticon_sti, 0, 0, c->vc_rows, c->vc_cols, BLANK,
font_data[c->vc_num]);
if (mode_switch)
vga_is_gfx = 1;
- return 1;
+
+ return true;
}
static u8 sticon_build_attr(struct vc_data *conp, u8 color,
@@ -365,7 +351,6 @@ static const struct consw sti_con = {
.con_init = sticon_init,
.con_deinit = sticon_deinit,
.con_clear = sticon_clear,
- .con_putc = sticon_putc,
.con_putcs = sticon_putcs,
.con_cursor = sticon_cursor,
.con_scroll = sticon_scroll,
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 8ef1579fa57fd..7597f04b0dc77 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -65,7 +65,7 @@ static struct vgastate vgastate;
* Interface used by the world
*/
-static int vgacon_set_origin(struct vc_data *c);
+static bool vgacon_set_origin(struct vc_data *c);
static struct uni_pagedict *vgacon_uni_pagedir;
static int vgacon_refcount;
@@ -81,7 +81,7 @@ static unsigned int vga_video_num_lines; /* Number of text lines */
static bool vga_can_do_color; /* Do we support colors? */
static unsigned int vga_default_font_height __read_mostly; /* Height of default screen font */
static unsigned char vga_video_type __read_mostly; /* Card type */
-static int vga_vesa_blanked;
+static enum vesa_blank_mode vga_vesa_blanked;
static bool vga_palette_blanked;
static bool vga_is_gfx;
static bool vga_512_chars;
@@ -138,8 +138,40 @@ static inline void vga_set_mem_top(struct vc_data *c)
static void vgacon_scrolldelta(struct vc_data *c, int lines)
{
- vc_scrolldelta_helper(c, lines, vga_rolled_over, (void *)vga_vram_base,
- vga_vram_size);
+ unsigned long scr_end = c->vc_scr_end - vga_vram_base;
+ unsigned long vorigin = c->vc_visible_origin - vga_vram_base;
+ unsigned long origin = c->vc_origin - vga_vram_base;
+ int margin = c->vc_size_row * 4;
+ int from, wrap, from_off, avail;
+
+ /* Turn scrollback off */
+ if (!lines) {
+ c->vc_visible_origin = c->vc_origin;
+ return;
+ }
+
+ /* Do we have already enough to allow jumping from 0 to the end? */
+ if (vga_rolled_over > scr_end + margin) {
+ from = scr_end;
+ wrap = vga_rolled_over + c->vc_size_row;
+ } else {
+ from = 0;
+ wrap = vga_vram_size;
+ }
+
+ from_off = (vorigin - from + wrap) % wrap + lines * c->vc_size_row;
+ avail = (origin - from + wrap) % wrap;
+
+ /* Only a little piece would be left? Show all incl. the piece! */
+ if (avail < 2 * margin)
+ margin = 0;
+ if (from_off < margin)
+ from_off = 0;
+ if (from_off > avail - margin)
+ from_off = avail;
+
+ c->vc_visible_origin = vga_vram_base + (from + from_off) % wrap;
+
vga_set_mem_top(c);
}
@@ -335,7 +367,7 @@ static const char *vgacon_startup(void)
return display_desc;
}
-static void vgacon_init(struct vc_data *c, int init)
+static void vgacon_init(struct vc_data *c, bool init)
{
struct uni_pagedict *p;
@@ -352,7 +384,7 @@ static void vgacon_init(struct vc_data *c, int init)
c->vc_scan_lines = vga_scan_lines;
c->vc_font.height = c->vc_cell_height = vga_video_font_height;
- /* set dimensions manually if init != 0 since vc_resize() will fail */
+ /* set dimensions manually if init is true since vc_resize() will fail */
if (init) {
c->vc_cols = vga_video_num_columns;
c->vc_rows = vga_video_num_lines;
@@ -471,7 +503,7 @@ static void vgacon_set_cursor_size(int from, int to)
raw_spin_unlock_irqrestore(&vga_lock, flags);
}
-static void vgacon_cursor(struct vc_data *c, int mode)
+static void vgacon_cursor(struct vc_data *c, bool enable)
{
unsigned int c_height;
@@ -482,47 +514,41 @@ static void vgacon_cursor(struct vc_data *c, int mode)
c_height = c->vc_cell_height;
- switch (mode) {
- case CM_ERASE:
- write_vga(14, (c->vc_pos - vga_vram_base) / 2);
+ write_vga(14, (c->vc_pos - vga_vram_base) / 2);
+
+ if (!enable) {
if (vga_video_type >= VIDEO_TYPE_VGAC)
vgacon_set_cursor_size(31, 30);
else
vgacon_set_cursor_size(31, 31);
- break;
+ return;
+ }
- case CM_MOVE:
- case CM_DRAW:
- write_vga(14, (c->vc_pos - vga_vram_base) / 2);
- switch (CUR_SIZE(c->vc_cursor_type)) {
- case CUR_UNDERLINE:
- vgacon_set_cursor_size(c_height -
- (c_height < 10 ? 2 : 3),
- c_height -
- (c_height < 10 ? 1 : 2));
- break;
- case CUR_TWO_THIRDS:
- vgacon_set_cursor_size(c_height / 3, c_height -
- (c_height < 10 ? 1 : 2));
- break;
- case CUR_LOWER_THIRD:
- vgacon_set_cursor_size(c_height * 2 / 3, c_height -
- (c_height < 10 ? 1 : 2));
- break;
- case CUR_LOWER_HALF:
- vgacon_set_cursor_size(c_height / 2, c_height -
- (c_height < 10 ? 1 : 2));
- break;
- case CUR_NONE:
- if (vga_video_type >= VIDEO_TYPE_VGAC)
- vgacon_set_cursor_size(31, 30);
- else
- vgacon_set_cursor_size(31, 31);
- break;
- default:
- vgacon_set_cursor_size(1, c_height);
- break;
- }
+ switch (CUR_SIZE(c->vc_cursor_type)) {
+ case CUR_UNDERLINE:
+ vgacon_set_cursor_size(c_height - (c_height < 10 ? 2 : 3),
+ c_height - (c_height < 10 ? 1 : 2));
+ break;
+ case CUR_TWO_THIRDS:
+ vgacon_set_cursor_size(c_height / 3,
+ c_height - (c_height < 10 ? 1 : 2));
+ break;
+ case CUR_LOWER_THIRD:
+ vgacon_set_cursor_size(c_height * 2 / 3,
+ c_height - (c_height < 10 ? 1 : 2));
+ break;
+ case CUR_LOWER_HALF:
+ vgacon_set_cursor_size(c_height / 2,
+ c_height - (c_height < 10 ? 1 : 2));
+ break;
+ case CUR_NONE:
+ if (vga_video_type >= VIDEO_TYPE_VGAC)
+ vgacon_set_cursor_size(31, 30);
+ else
+ vgacon_set_cursor_size(31, 31);
+ break;
+ default:
+ vgacon_set_cursor_size(1, c_height);
break;
}
}
@@ -588,7 +614,7 @@ static void vgacon_doresize(struct vc_data *c,
raw_spin_unlock_irqrestore(&vga_lock, flags);
}
-static int vgacon_switch(struct vc_data *c)
+static bool vgacon_switch(struct vc_data *c)
{
int x = c->vc_cols * VGA_FONTWIDTH;
int y = c->vc_rows * c->vc_cell_height;
@@ -617,7 +643,7 @@ static int vgacon_switch(struct vc_data *c)
vgacon_doresize(c, c->vc_cols, c->vc_rows);
}
- return 0; /* Redrawing not needed */
+ return false; /* Redrawing not needed */
}
static void vga_set_palette(struct vc_data *vc, const unsigned char *table)
@@ -657,7 +683,7 @@ static struct {
unsigned char ClockingMode; /* Seq-Controller:01h */
} vga_state;
-static void vga_vesa_blank(struct vgastate *state, int mode)
+static void vga_vesa_blank(struct vgastate *state, enum vesa_blank_mode mode)
{
/* save original values of VGA controller registers */
if (!vga_vesa_blanked) {
@@ -771,13 +797,14 @@ static void vga_pal_blank(struct vgastate *state)
}
}
-static int vgacon_blank(struct vc_data *c, int blank, int mode_switch)
+static bool vgacon_blank(struct vc_data *c, enum vesa_blank_mode blank,
+ bool mode_switch)
{
switch (blank) {
- case 0: /* Unblank */
+ case VESA_NO_BLANKING: /* Unblank */
if (vga_vesa_blanked) {
vga_vesa_unblank(&vgastate);
- vga_vesa_blanked = 0;
+ vga_vesa_blanked = VESA_NO_BLANKING;
}
if (vga_palette_blanked) {
vga_set_palette(c, color_table);
@@ -787,8 +814,7 @@ static int vgacon_blank(struct vc_data *c, int blank, int mode_switch)
vga_is_gfx = false;
/* Tell console.c that it has to restore the screen itself */
return 1;
- case 1: /* Normal blanking */
- case -1: /* Obsolete */
+ case VESA_VSYNC_SUSPEND: /* Normal blanking */
if (!mode_switch && vga_video_type == VIDEO_TYPE_VGAC) {
vga_pal_blank(&vgastate);
vga_palette_blanked = true;
@@ -1004,7 +1030,7 @@ static int vgacon_adjust_height(struct vc_data *vc, unsigned fontheight)
/* void size to cause regs to be rewritten */
cursor_size_lastfrom = 0;
cursor_size_lastto = 0;
- c->vc_sw->con_cursor(c, CM_DRAW);
+ c->vc_sw->con_cursor(c, true);
}
c->vc_font.height = c->vc_cell_height = fontheight;
vc_resize(c, 0, rows); /* Adjust console size */
@@ -1013,7 +1039,7 @@ static int vgacon_adjust_height(struct vc_data *vc, unsigned fontheight)
return 0;
}
-static int vgacon_font_set(struct vc_data *c, struct console_font *font,
+static int vgacon_font_set(struct vc_data *c, const struct console_font *font,
unsigned int vpitch, unsigned int flags)
{
unsigned charcount = font->charcount;
@@ -1049,12 +1075,12 @@ static int vgacon_font_get(struct vc_data *c, struct console_font *font, unsigne
}
static int vgacon_resize(struct vc_data *c, unsigned int width,
- unsigned int height, unsigned int user)
+ unsigned int height, bool from_user)
{
if ((width << 1) * height > vga_vram_size)
return -EINVAL;
- if (user) {
+ if (from_user) {
/*
* Ho ho! Someone (svgatextmode, eh?) may have reprogrammed
* the video mode! Set the new defaults then and go away.
@@ -1074,15 +1100,15 @@ static int vgacon_resize(struct vc_data *c, unsigned int width,
return 0;
}
-static int vgacon_set_origin(struct vc_data *c)
+static bool vgacon_set_origin(struct vc_data *c)
{
if (vga_is_gfx || /* We don't play origin tricks in graphic modes */
(console_blanked && !vga_palette_blanked)) /* Nor we write to blanked screens */
- return 0;
+ return false;
c->vc_origin = c->vc_visible_origin = vga_vram_base;
vga_set_mem_top(c);
vga_rolled_over = 0;
- return 1;
+ return true;
}
static void vgacon_save_screen(struct vc_data *c)
@@ -1159,11 +1185,10 @@ static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b,
* The console `switch' structure for the VGA based console
*/
-static void vgacon_clear(struct vc_data *vc, int sy, int sx, int height,
- int width) { }
-static void vgacon_putc(struct vc_data *vc, int c, int ypos, int xpos) { }
-static void vgacon_putcs(struct vc_data *vc, const unsigned short *s,
- int count, int ypos, int xpos) { }
+static void vgacon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx,
+ unsigned int width) { }
+static void vgacon_putcs(struct vc_data *vc, const u16 *s, unsigned int count,
+ unsigned int ypos, unsigned int xpos) { }
const struct consw vga_con = {
.owner = THIS_MODULE,
@@ -1171,7 +1196,6 @@ const struct consw vga_con = {
.con_init = vgacon_init,
.con_deinit = vgacon_deinit,
.con_clear = vgacon_clear,
- .con_putc = vgacon_putc,
.con_putcs = vgacon_putcs,
.con_cursor = vgacon_cursor,
.con_scroll = vgacon_scroll,
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index a61b8260b8f36..197b6d5268e94 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -494,6 +494,7 @@ config FB_SBUS_HELPERS
select FB_CFB_COPYAREA
select FB_CFB_FILLRECT
select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_FOPS
config FB_BW2
bool "BWtwo support"
@@ -514,6 +515,7 @@ config FB_CG6
depends on (FB = y) && (SPARC && FB_SBUS)
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_FOPS
help
This is the frame buffer device driver for the CGsix (GX, TurboGX)
frame buffer.
@@ -523,6 +525,7 @@ config FB_FFB
depends on FB_SBUS && SPARC64
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_FOPS
help
This is the frame buffer device driver for the Creator, Creator3D,
and Elite3D graphics boards.
@@ -1523,7 +1526,7 @@ config FB_FSL_DIU
config FB_SH_MOBILE_LCDC
tristate "SuperH Mobile LCDC framebuffer support"
depends on FB && HAVE_CLK && HAS_IOMEM
- depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
+ depends on SUPERH || COMPILE_TEST
depends on FB_DEVICE
select FB_BACKLIGHT
select FB_DEFERRED_IO
diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c
index dca9c0325b3f0..082501feceb96 100644
--- a/drivers/video/fbdev/arkfb.c
+++ b/drivers/video/fbdev/arkfb.c
@@ -622,8 +622,13 @@ static int arkfb_set_par(struct fb_info *info)
info->tileops = NULL;
/* in 4bpp supports 8p wide tiles only, any tiles otherwise */
- info->pixmap.blit_x = (bpp == 4) ? (1 << (8 - 1)) : (~(u32)0);
- info->pixmap.blit_y = ~(u32)0;
+ if (bpp == 4) {
+ bitmap_zero(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ set_bit(8 - 1, info->pixmap.blit_x);
+ } else {
+ bitmap_fill(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ }
+ bitmap_fill(info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT);
offset_value = (info->var.xres_virtual * bpp) / 64;
screen_size = info->var.yres_virtual * info->fix.line_length;
@@ -635,8 +640,10 @@ static int arkfb_set_par(struct fb_info *info)
info->tileops = &arkfb_tile_ops;
/* supports 8x16 tiles only */
- info->pixmap.blit_x = 1 << (8 - 1);
- info->pixmap.blit_y = 1 << (16 - 1);
+ bitmap_zero(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ set_bit(8 - 1, info->pixmap.blit_x);
+ bitmap_zero(info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT);
+ set_bit(16 - 1, info->pixmap.blit_y);
offset_value = info->var.xres_virtual / 16;
screen_size = (info->var.xres_virtual * info->var.yres_virtual) / 64;
diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c
index 8587c9da06700..3ff1b2a8659e8 100644
--- a/drivers/video/fbdev/core/bitblit.c
+++ b/drivers/video/fbdev/core/bitblit.c
@@ -233,7 +233,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
}
}
-static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
+static void bit_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
int fg, int bg)
{
struct fb_cursor cursor;
@@ -348,16 +348,7 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
mask[i++] = msk;
}
- switch (mode) {
- case CM_ERASE:
- ops->cursor_state.enable = 0;
- break;
- case CM_DRAW:
- case CM_MOVE:
- default:
- ops->cursor_state.enable = (use_sw) ? 0 : 1;
- break;
- }
+ ops->cursor_state.enable = enable && !use_sw;
cursor.image.data = src;
cursor.image.fg_color = ops->cursor_state.image.fg_color;
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 46823c2e2ba12..fcabc668e9fbe 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -351,7 +351,7 @@ static void fb_flashcursor(struct work_struct *work)
struct fb_info *info;
struct vc_data *vc = NULL;
int c;
- int mode;
+ bool enable;
int ret;
/* FIXME: we should sort out the unbind locking instead */
@@ -375,9 +375,8 @@ static void fb_flashcursor(struct work_struct *work)
}
c = scr_readw((u16 *) vc->vc_pos);
- mode = (!ops->cursor_flash || ops->cursor_state.enable) ?
- CM_ERASE : CM_DRAW;
- ops->cursor(vc, info, mode, get_color(vc, info, c, 1),
+ enable = ops->cursor_flash && !ops->cursor_state.enable;
+ ops->cursor(vc, info, enable, get_color(vc, info, c, 1),
get_color(vc, info, c, 0));
console_unlock();
@@ -920,7 +919,7 @@ static void display_to_var(struct fb_var_screeninfo *var,
static const char *fbcon_startup(void)
{
- const char *display_desc = "frame buffer device";
+ static const char display_desc[] = "frame buffer device";
struct fbcon_display *p = &fb_display[fg_console];
struct vc_data *vc = vc_cons[fg_console].d;
const struct font_desc *font = NULL;
@@ -987,7 +986,7 @@ static const char *fbcon_startup(void)
return display_desc;
}
-static void fbcon_init(struct vc_data *vc, int init)
+static void fbcon_init(struct vc_data *vc, bool init)
{
struct fb_info *info;
struct fbcon_ops *ops;
@@ -1234,8 +1233,8 @@ finished:
* restriction is simplicity & efficiency at the moment.
*/
-static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height,
- int width)
+static void __fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx,
+ unsigned int height, unsigned int width)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
@@ -1272,8 +1271,14 @@ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height,
ops->clear(vc, info, real_y(p, sy), sx, height, width);
}
-static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
- int count, int ypos, int xpos)
+static void fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx,
+ unsigned int width)
+{
+ __fbcon_clear(vc, sy, sx, 1, width);
+}
+
+static void fbcon_putcs(struct vc_data *vc, const u16 *s, unsigned int count,
+ unsigned int ypos, unsigned int xpos)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_display *p = &fb_display[vc->vc_num];
@@ -1285,14 +1290,6 @@ static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
get_color(vc, info, scr_readw(s), 0));
}
-static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos)
-{
- unsigned short chr;
-
- scr_writew(c, &chr);
- fbcon_putcs(vc, &chr, 1, ypos, xpos);
-}
-
static void fbcon_clear_margins(struct vc_data *vc, int bottom_only)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
@@ -1302,7 +1299,7 @@ static void fbcon_clear_margins(struct vc_data *vc, int bottom_only)
ops->clear_margins(vc, info, margin_color, bottom_only);
}
-static void fbcon_cursor(struct vc_data *vc, int mode)
+static void fbcon_cursor(struct vc_data *vc, bool enable)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
@@ -1318,12 +1315,12 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
else
fbcon_add_cursor_work(info);
- ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1;
+ ops->cursor_flash = enable;
if (!ops->cursor)
return;
- ops->cursor(vc, info, mode, get_color(vc, info, c, 1),
+ ops->cursor(vc, info, enable, get_color(vc, info, c, 1),
get_color(vc, info, c, 0));
}
@@ -1743,7 +1740,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
if (fbcon_is_inactive(vc, info))
return true;
- fbcon_cursor(vc, CM_ERASE);
+ fbcon_cursor(vc, false);
/*
* ++Geert: Only use ywrap/ypan if the console is in text mode
@@ -1759,7 +1756,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
case SCROLL_MOVE:
fbcon_redraw_blit(vc, info, p, t, b - t - count,
count);
- fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ __fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
scr_memsetw((unsigned short *) (vc->vc_origin +
vc->vc_size_row *
(b - count)),
@@ -1782,7 +1779,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
b - t - count, vc->vc_cols);
else
goto redraw_up;
- fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ __fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
break;
case SCROLL_PAN_REDRAW:
@@ -1800,7 +1797,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
vc->vc_rows - b, b);
} else
fbcon_redraw_move(vc, p, t + count, b - t - count, t);
- fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ __fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
break;
case SCROLL_PAN_MOVE:
@@ -1823,14 +1820,14 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
b - t - count, vc->vc_cols);
else
goto redraw_up;
- fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ __fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
break;
case SCROLL_REDRAW:
redraw_up:
fbcon_redraw(vc, t, b - t - count,
count * vc->vc_cols);
- fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ __fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
scr_memsetw((unsigned short *) (vc->vc_origin +
vc->vc_size_row *
(b - count)),
@@ -1847,7 +1844,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
case SCROLL_MOVE:
fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
-count);
- fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ __fbcon_clear(vc, t, 0, count, vc->vc_cols);
scr_memsetw((unsigned short *) (vc->vc_origin +
vc->vc_size_row *
t),
@@ -1870,7 +1867,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
b - t - count, vc->vc_cols);
else
goto redraw_down;
- fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ __fbcon_clear(vc, t, 0, count, vc->vc_cols);
break;
case SCROLL_PAN_MOVE:
@@ -1892,7 +1889,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
b - t - count, vc->vc_cols);
else
goto redraw_down;
- fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ __fbcon_clear(vc, t, 0, count, vc->vc_cols);
break;
case SCROLL_PAN_REDRAW:
@@ -1909,14 +1906,14 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
fbcon_redraw_move(vc, p, count, t, 0);
} else
fbcon_redraw_move(vc, p, t, b - t - count, t + count);
- fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ __fbcon_clear(vc, t, 0, count, vc->vc_cols);
break;
case SCROLL_REDRAW:
redraw_down:
fbcon_redraw(vc, b - 1, b - t - count,
-count * vc->vc_cols);
- fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ __fbcon_clear(vc, t, 0, count, vc->vc_cols);
scr_memsetw((unsigned short *) (vc->vc_origin +
vc->vc_size_row *
t),
@@ -1995,7 +1992,7 @@ static void updatescrollmode(struct fbcon_display *p,
#define CALC_FONTSZ(h, p, c) ((h) * (p) * (c)) /* size = height * pitch * charcount */
static int fbcon_resize(struct vc_data *vc, unsigned int width,
- unsigned int height, unsigned int user)
+ unsigned int height, bool from_user)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
@@ -2058,7 +2055,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
return 0;
}
-static int fbcon_switch(struct vc_data *vc)
+static bool fbcon_switch(struct vc_data *vc)
{
struct fb_info *info, *old_info = NULL;
struct fbcon_ops *ops;
@@ -2180,9 +2177,9 @@ static int fbcon_switch(struct vc_data *vc)
vc->vc_origin + vc->vc_size_row * vc->vc_top,
vc->vc_size_row * (vc->vc_bottom -
vc->vc_top) / 2);
- return 0;
+ return false;
}
- return 1;
+ return true;
}
static void fbcon_generic_blank(struct vc_data *vc, struct fb_info *info,
@@ -2195,12 +2192,13 @@ static void fbcon_generic_blank(struct vc_data *vc, struct fb_info *info,
oldc = vc->vc_video_erase_char;
vc->vc_video_erase_char &= charmask;
- fbcon_clear(vc, 0, 0, vc->vc_rows, vc->vc_cols);
+ __fbcon_clear(vc, 0, 0, vc->vc_rows, vc->vc_cols);
vc->vc_video_erase_char = oldc;
}
}
-static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
+static bool fbcon_blank(struct vc_data *vc, enum vesa_blank_mode blank,
+ bool mode_switch)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
@@ -2222,7 +2220,7 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
if (!fbcon_is_inactive(vc, info)) {
if (ops->blank_state != blank) {
ops->blank_state = blank;
- fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW);
+ fbcon_cursor(vc, !blank);
ops->cursor_flash = (!blank);
if (fb_blank(info, blank))
@@ -2239,10 +2237,10 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
else
fbcon_add_cursor_work(info);
- return 0;
+ return false;
}
-static int fbcon_debug_enter(struct vc_data *vc)
+static void fbcon_debug_enter(struct vc_data *vc)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
@@ -2252,10 +2250,9 @@ static int fbcon_debug_enter(struct vc_data *vc)
if (info->fbops->fb_debug_enter)
info->fbops->fb_debug_enter(info);
fbcon_set_palette(vc, color_table);
- return 0;
}
-static int fbcon_debug_leave(struct vc_data *vc)
+static void fbcon_debug_leave(struct vc_data *vc)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
@@ -2263,7 +2260,6 @@ static int fbcon_debug_leave(struct vc_data *vc)
ops->graphics = ops->save_graphics;
if (info->fbops->fb_debug_leave)
info->fbops->fb_debug_leave(info);
- return 0;
}
static int fbcon_get_font(struct vc_data *vc, struct console_font *font, unsigned int vpitch)
@@ -2461,7 +2457,7 @@ err_out:
* but lets not assume that, since charcount of 512 is small for unicode support.
*/
-static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
+static int fbcon_set_font(struct vc_data *vc, const struct console_font *font,
unsigned int vpitch, unsigned int flags)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
@@ -2483,12 +2479,12 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
h > FBCON_SWAP(info->var.rotate, info->var.yres, info->var.xres))
return -EINVAL;
- if (font->width > 32 || font->height > 32)
+ if (font->width > FB_MAX_BLIT_WIDTH || font->height > FB_MAX_BLIT_HEIGHT)
return -EINVAL;
/* Make sure drawing engine can handle the font */
- if (!(info->pixmap.blit_x & BIT(font->width - 1)) ||
- !(info->pixmap.blit_y & BIT(font->height - 1)))
+ if (!test_bit(font->width - 1, info->pixmap.blit_x) ||
+ !test_bit(font->height - 1, info->pixmap.blit_y))
return -EINVAL;
/* Make sure driver can handle the font length */
@@ -2534,7 +2530,8 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
return fbcon_do_set_font(vc, font->width, font->height, charcount, new_data, 1);
}
-static int fbcon_set_def_font(struct vc_data *vc, struct console_font *font, char *name)
+static int fbcon_set_def_font(struct vc_data *vc, struct console_font *font,
+ const char *name)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
const struct font_desc *f;
@@ -2593,35 +2590,6 @@ static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table)
fb_set_cmap(&palette_cmap, info);
}
-static u16 *fbcon_screen_pos(const struct vc_data *vc, int offset)
-{
- return (u16 *) (vc->vc_origin + offset);
-}
-
-static unsigned long fbcon_getxy(struct vc_data *vc, unsigned long pos,
- int *px, int *py)
-{
- unsigned long ret;
- int x, y;
-
- if (pos >= vc->vc_origin && pos < vc->vc_scr_end) {
- unsigned long offset = (pos - vc->vc_origin) / 2;
-
- x = offset % vc->vc_cols;
- y = offset / vc->vc_cols;
- ret = pos + (vc->vc_cols - x) * 2;
- } else {
- /* Should not happen */
- x = y = 0;
- ret = vc->vc_origin;
- }
- if (px)
- *px = x;
- if (py)
- *py = y;
- return ret;
-}
-
/* As we might be inside of softback, we may work with non-contiguous buffer,
that's why we have to use a separate routine. */
static void fbcon_invert_region(struct vc_data *vc, u16 * p, int cnt)
@@ -2650,7 +2618,7 @@ void fbcon_suspended(struct fb_info *info)
vc = vc_cons[ops->currcon].d;
/* Clear cursor, restore saved data */
- fbcon_cursor(vc, CM_ERASE);
+ fbcon_cursor(vc, false);
}
void fbcon_resumed(struct fb_info *info)
@@ -3082,8 +3050,8 @@ void fbcon_get_requirement(struct fb_info *info,
vc = vc_cons[i].d;
if (vc && vc->vc_mode == KD_TEXT &&
info->node == con2fb_map[i]) {
- caps->x |= 1 << (vc->vc_font.width - 1);
- caps->y |= 1 << (vc->vc_font.height - 1);
+ set_bit(vc->vc_font.width - 1, caps->x);
+ set_bit(vc->vc_font.height - 1, caps->y);
charcnt = vc->vc_font.charcount;
if (caps->len < charcnt)
caps->len = charcnt;
@@ -3094,8 +3062,10 @@ void fbcon_get_requirement(struct fb_info *info,
if (vc && vc->vc_mode == KD_TEXT &&
info->node == con2fb_map[fg_console]) {
- caps->x = 1 << (vc->vc_font.width - 1);
- caps->y = 1 << (vc->vc_font.height - 1);
+ bitmap_zero(caps->x, FB_MAX_BLIT_WIDTH);
+ set_bit(vc->vc_font.width - 1, caps->x);
+ bitmap_zero(caps->y, FB_MAX_BLIT_HEIGHT);
+ set_bit(vc->vc_font.height - 1, caps->y);
caps->len = vc->vc_font.charcount;
}
}
@@ -3152,7 +3122,6 @@ static const struct consw fb_con = {
.con_init = fbcon_init,
.con_deinit = fbcon_deinit,
.con_clear = fbcon_clear,
- .con_putc = fbcon_putc,
.con_putcs = fbcon_putcs,
.con_cursor = fbcon_cursor,
.con_scroll = fbcon_scroll,
@@ -3163,8 +3132,6 @@ static const struct consw fb_con = {
.con_font_default = fbcon_set_def_font,
.con_set_palette = fbcon_set_palette,
.con_invert_region = fbcon_invert_region,
- .con_screen_pos = fbcon_screen_pos,
- .con_getxy = fbcon_getxy,
.con_resize = fbcon_resize,
.con_debug_enter = fbcon_debug_enter,
.con_debug_leave = fbcon_debug_leave,
diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h
index 0eaf54a211516..df70ea5ec5b37 100644
--- a/drivers/video/fbdev/core/fbcon.h
+++ b/drivers/video/fbdev/core/fbcon.h
@@ -61,8 +61,8 @@ struct fbcon_ops {
int fg, int bg);
void (*clear_margins)(struct vc_data *vc, struct fb_info *info,
int color, int bottom_only);
- void (*cursor)(struct vc_data *vc, struct fb_info *info, int mode,
- int fg, int bg);
+ void (*cursor)(struct vc_data *vc, struct fb_info *info,
+ bool enable, int fg, int bg);
int (*update_start)(struct fb_info *info);
int (*rotate_font)(struct fb_info *info, struct vc_data *vc);
struct fb_var_screeninfo var; /* copy of the current fb_var_screeninfo */
diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c
index 2789ace796342..f9b794ff7d396 100644
--- a/drivers/video/fbdev/core/fbcon_ccw.c
+++ b/drivers/video/fbdev/core/fbcon_ccw.c
@@ -218,7 +218,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info,
}
}
-static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
+static void ccw_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
int fg, int bg)
{
struct fb_cursor cursor;
@@ -349,16 +349,7 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
kfree(tmp);
}
- switch (mode) {
- case CM_ERASE:
- ops->cursor_state.enable = 0;
- break;
- case CM_DRAW:
- case CM_MOVE:
- default:
- ops->cursor_state.enable = (use_sw) ? 0 : 1;
- break;
- }
+ ops->cursor_state.enable = enable && !use_sw;
cursor.image.data = src;
cursor.image.fg_color = ops->cursor_state.image.fg_color;
diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c
index 86a254c1b2b7b..903f6fc174e14 100644
--- a/drivers/video/fbdev/core/fbcon_cw.c
+++ b/drivers/video/fbdev/core/fbcon_cw.c
@@ -201,7 +201,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info,
}
}
-static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
+static void cw_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
int fg, int bg)
{
struct fb_cursor cursor;
@@ -332,16 +332,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
kfree(tmp);
}
- switch (mode) {
- case CM_ERASE:
- ops->cursor_state.enable = 0;
- break;
- case CM_DRAW:
- case CM_MOVE:
- default:
- ops->cursor_state.enable = (use_sw) ? 0 : 1;
- break;
- }
+ ops->cursor_state.enable = enable && !use_sw;
cursor.image.data = src;
cursor.image.fg_color = ops->cursor_state.image.fg_color;
diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c
index 23bc045769d08..594331936fd3c 100644
--- a/drivers/video/fbdev/core/fbcon_ud.c
+++ b/drivers/video/fbdev/core/fbcon_ud.c
@@ -248,7 +248,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info,
}
}
-static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode,
+static void ud_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
int fg, int bg)
{
struct fb_cursor cursor;
@@ -372,16 +372,7 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode,
mask[i++] = ~msk;
}
- switch (mode) {
- case CM_ERASE:
- ops->cursor_state.enable = 0;
- break;
- case CM_DRAW:
- case CM_MOVE:
- default:
- ops->cursor_state.enable = (use_sw) ? 0 : 1;
- break;
- }
+ ops->cursor_state.enable = enable && !use_sw;
cursor.image.data = src;
cursor.image.fg_color = ops->cursor_state.image.fg_color;
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 48287366e0d4a..4c4ad0a86a504 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -212,8 +212,8 @@ static int fb_check_caps(struct fb_info *info, struct fb_var_screeninfo *var,
fbcon_get_requirement(info, &caps);
info->fbops->fb_get_caps(info, &fbcaps, var);
- if (((fbcaps.x ^ caps.x) & caps.x) ||
- ((fbcaps.y ^ caps.y) & caps.y) ||
+ if (!bitmap_subset(caps.x, fbcaps.x, FB_MAX_BLIT_WIDTH) ||
+ !bitmap_subset(caps.y, fbcaps.y, FB_MAX_BLIT_HEIGHT) ||
(fbcaps.len < caps.len))
err = -EINVAL;
@@ -420,11 +420,11 @@ static int do_register_framebuffer(struct fb_info *fb_info)
}
fb_info->pixmap.offset = 0;
- if (!fb_info->pixmap.blit_x)
- fb_info->pixmap.blit_x = ~(u32)0;
+ if (bitmap_empty(fb_info->pixmap.blit_x, FB_MAX_BLIT_WIDTH))
+ bitmap_fill(fb_info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
- if (!fb_info->pixmap.blit_y)
- fb_info->pixmap.blit_y = ~(u32)0;
+ if (bitmap_empty(fb_info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT))
+ bitmap_fill(fb_info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT);
if (!fb_info->modelist.prev || !fb_info->modelist.next)
INIT_LIST_HEAD(&fb_info->modelist);
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index 79e5bfbdd34c2..0a26399dbc899 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -1311,7 +1311,7 @@ int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, struct fb_inf
int fb_videomode_from_videomode(const struct videomode *vm,
struct fb_videomode *fbmode)
{
- unsigned int htotal, vtotal;
+ unsigned int htotal, vtotal, total;
fbmode->xres = vm->hactive;
fbmode->left_margin = vm->hback_porch;
@@ -1344,8 +1344,9 @@ int fb_videomode_from_videomode(const struct videomode *vm,
vtotal = vm->vactive + vm->vfront_porch + vm->vback_porch +
vm->vsync_len;
/* prevent division by zero */
- if (htotal && vtotal) {
- fbmode->refresh = vm->pixelclock / (htotal * vtotal);
+ total = htotal * vtotal;
+ if (total) {
+ fbmode->refresh = vm->pixelclock / total;
/* a mode must have htotal and vtotal != 0 or it is invalid */
} else {
fbmode->refresh = 0;
diff --git a/drivers/video/fbdev/core/svgalib.c b/drivers/video/fbdev/core/svgalib.c
index 2cba158888ea4..821b89a0a6453 100644
--- a/drivers/video/fbdev/core/svgalib.c
+++ b/drivers/video/fbdev/core/svgalib.c
@@ -354,12 +354,19 @@ void svga_get_caps(struct fb_info *info, struct fb_blit_caps *caps,
{
if (var->bits_per_pixel == 0) {
/* can only support 256 8x16 bitmap */
- caps->x = 1 << (8 - 1);
- caps->y = 1 << (16 - 1);
+ bitmap_zero(caps->x, FB_MAX_BLIT_WIDTH);
+ set_bit(8 - 1, caps->x);
+ bitmap_zero(caps->y, FB_MAX_BLIT_HEIGHT);
+ set_bit(16 - 1, caps->y);
caps->len = 256;
} else {
- caps->x = (var->bits_per_pixel == 4) ? 1 << (8 - 1) : ~(u32)0;
- caps->y = ~(u32)0;
+ if (var->bits_per_pixel == 4) {
+ bitmap_zero(caps->x, FB_MAX_BLIT_WIDTH);
+ set_bit(8 - 1, caps->x);
+ } else {
+ bitmap_fill(caps->x, FB_MAX_BLIT_WIDTH);
+ }
+ bitmap_fill(caps->y, FB_MAX_BLIT_HEIGHT);
caps->len = ~(u32)0;
}
}
diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c
index 2768eff247ba4..eff7ec4da1671 100644
--- a/drivers/video/fbdev/core/tileblit.c
+++ b/drivers/video/fbdev/core/tileblit.c
@@ -79,7 +79,7 @@ static void tile_clear_margins(struct vc_data *vc, struct fb_info *info,
return;
}
-static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode,
+static void tile_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
int fg, int bg)
{
struct fb_tilecursor cursor;
@@ -87,7 +87,7 @@ static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode,
cursor.sx = vc->state.x;
cursor.sy = vc->state.y;
- cursor.mode = (mode == CM_ERASE || use_sw) ? 0 : 1;
+ cursor.mode = enable && !use_sw;
cursor.fg = fg;
cursor.bg = bg;
diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c
index 264c8cedba159..c3bc5b78b749a 100644
--- a/drivers/video/fbdev/hgafb.c
+++ b/drivers/video/fbdev/hgafb.c
@@ -670,7 +670,7 @@ static void __exit hgafb_exit(void)
*
* ------------------------------------------------------------------------- */
-MODULE_AUTHOR("Ferenc Bakonyi (fero@drama.obuda.kando.hu)");
+MODULE_AUTHOR("Ferenc Bakonyi <fero@drama.obuda.kando.hu>");
MODULE_DESCRIPTION("FBDev driver for Hercules Graphics Adaptor");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
index 7c402e9fd7a98..baec312d7b33b 100644
--- a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
@@ -32,15 +32,6 @@
#define CARMINE_MEM_SIZE 0x8000000
#define DRV_NAME "mb862xxfb"
-#if defined(CONFIG_SOCRATES)
-static struct mb862xx_gc_mode socrates_gc_mode = {
- /* Mode for Prime View PM070WL4 TFT LCD Panel */
- { "800x480", 45, 800, 480, 40000, 86, 42, 33, 10, 128, 2, 0, 0, 0 },
- /* 16 bits/pixel, 16MB, 133MHz, SDRAM memory mode value */
- 16, 0x1000000, GC_CCF_COT_133, 0x4157ba63
-};
-#endif
-
/* Helpers */
static inline int h_total(struct fb_var_screeninfo *var)
{
@@ -666,6 +657,15 @@ static int mb862xx_gdc_init(struct mb862xxfb_par *par)
return 0;
}
+#if defined(CONFIG_SOCRATES)
+static struct mb862xx_gc_mode socrates_gc_mode = {
+ /* Mode for Prime View PM070WL4 TFT LCD Panel */
+ { "800x480", 45, 800, 480, 40000, 86, 42, 33, 10, 128, 2, 0, 0, 0 },
+ /* 16 bits/pixel, 16MB, 133MHz, SDRAM memory mode value */
+ 16, 0x1000000, GC_CCF_COT_133, 0x4157ba63
+};
+#endif
+
static int of_platform_mb862xx_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
index 477789cff8e08..d487941853e64 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
@@ -225,17 +225,12 @@ static ssize_t tpo_td043_gamma_show(struct device *dev,
{
struct panel_drv_data *ddata = dev_get_drvdata(dev);
ssize_t len = 0;
- int ret;
int i;
- for (i = 0; i < ARRAY_SIZE(ddata->gamma); i++) {
- ret = snprintf(buf + len, PAGE_SIZE - len, "%u ",
- ddata->gamma[i]);
- if (ret < 0)
- return ret;
- len += ret;
- }
- buf[len - 1] = '\n';
+ for (i = 0; i < ARRAY_SIZE(ddata->gamma); i++)
+ len += sysfs_emit_at(buf, len, "%u ", ddata->gamma[i]);
+ if (len)
+ buf[len - 1] = '\n';
return len;
}
diff --git a/drivers/video/fbdev/s3fb.c b/drivers/video/fbdev/s3fb.c
index 07722a5ea8eff..ff84106ecf1c1 100644
--- a/drivers/video/fbdev/s3fb.c
+++ b/drivers/video/fbdev/s3fb.c
@@ -617,8 +617,13 @@ static int s3fb_set_par(struct fb_info *info)
info->tileops = NULL;
/* in 4bpp supports 8p wide tiles only, any tiles otherwise */
- info->pixmap.blit_x = (bpp == 4) ? (1 << (8 - 1)) : (~(u32)0);
- info->pixmap.blit_y = ~(u32)0;
+ if (bpp == 4) {
+ bitmap_zero(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ set_bit(8 - 1, info->pixmap.blit_x);
+ } else {
+ bitmap_fill(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ }
+ bitmap_fill(info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT);
offset_value = (info->var.xres_virtual * bpp) / 64;
screen_size = info->var.yres_virtual * info->fix.line_length;
@@ -630,8 +635,10 @@ static int s3fb_set_par(struct fb_info *info)
info->tileops = fasttext ? &s3fb_fast_tile_ops : &s3fb_tile_ops;
/* supports 8x16 tiles only */
- info->pixmap.blit_x = 1 << (8 - 1);
- info->pixmap.blit_y = 1 << (16 - 1);
+ bitmap_zero(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ set_bit(8 - 1, info->pixmap.blit_x);
+ bitmap_zero(info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT);
+ set_bit(16 - 1, info->pixmap.blit_y);
offset_value = info->var.xres_virtual / 16;
screen_size = (info->var.xres_virtual * info->var.yres_virtual) / 64;
diff --git a/drivers/video/fbdev/tgafb.c b/drivers/video/fbdev/tgafb.c
index ca43774f3156e..dccfc38cfbd52 100644
--- a/drivers/video/fbdev/tgafb.c
+++ b/drivers/video/fbdev/tgafb.c
@@ -380,7 +380,7 @@ tgafb_set_par(struct fb_info *info)
BT463_LOAD_ADDR(par, 0x0000);
TGA_WRITE_REG(par, BT463_PALETTE << 2, TGA_RAMDAC_SETUP_REG);
-#ifdef CONFIG_HW_CONSOLE
+#ifdef CONFIG_VT
for (i = 0; i < 16; i++) {
int j = color_table[i];
diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
index e1f421e91b4fb..73f00c079a949 100644
--- a/drivers/video/fbdev/uvesafb.c
+++ b/drivers/video/fbdev/uvesafb.c
@@ -1546,7 +1546,7 @@ static ssize_t uvesafb_show_vbe_ver(struct device *dev,
struct fb_info *info = dev_get_drvdata(dev);
struct uvesafb_par *par = info->par;
- return snprintf(buf, PAGE_SIZE, "%.4x\n", par->vbe_ib.vbe_version);
+ return sysfs_emit(buf, "%.4x\n", par->vbe_ib.vbe_version);
}
static DEVICE_ATTR(vbe_version, S_IRUGO, uvesafb_show_vbe_ver, NULL);
diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
index b485e91982013..a87bafbb119cc 100644
--- a/drivers/video/fbdev/vga16fb.c
+++ b/drivers/video/fbdev/vga16fb.c
@@ -1353,7 +1353,11 @@ static int vga16fb_probe(struct platform_device *dev)
info->var = vga16fb_defined;
info->fix = vga16fb_fix;
/* supports rectangles with widths of multiples of 8 */
- info->pixmap.blit_x = 1 << 7 | 1 << 15 | 1 << 23 | 1 << 31;
+ bitmap_zero(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ set_bit(8 - 1, info->pixmap.blit_x);
+ set_bit(16 - 1, info->pixmap.blit_x);
+ set_bit(24 - 1, info->pixmap.blit_x);
+ set_bit(32 - 1, info->pixmap.blit_x);
info->flags = FBINFO_HWACCEL_YPAN;
i = (info->var.bits_per_pixel == 8) ? 256 : 16;
diff --git a/drivers/video/fbdev/via/accel.c b/drivers/video/fbdev/via/accel.c
index 0a1bc7a4d7853..1e04026f08091 100644
--- a/drivers/video/fbdev/via/accel.c
+++ b/drivers/video/fbdev/via/accel.c
@@ -115,7 +115,7 @@ static int hw_bitblt_1(void __iomem *engine, u8 op, u32 width, u32 height,
if (op != VIA_BITBLT_FILL) {
tmp = src_mem ? 0 : src_addr;
- if (dst_addr & 0xE0000007) {
+ if (tmp & 0xE0000007) {
printk(KERN_WARNING "hw_bitblt_1: Unsupported source "
"address %X\n", tmp);
return -EINVAL;
@@ -260,7 +260,7 @@ static int hw_bitblt_2(void __iomem *engine, u8 op, u32 width, u32 height,
writel(tmp, engine + 0x18);
tmp = src_mem ? 0 : src_addr;
- if (dst_addr & 0xE0000007) {
+ if (tmp & 0xE0000007) {
printk(KERN_WARNING "hw_bitblt_2: Unsupported source "
"address %X\n", tmp);
return -EINVAL;
diff --git a/drivers/video/fbdev/vt8623fb.c b/drivers/video/fbdev/vt8623fb.c
index f8d022cb61e8d..df984f3a7ff64 100644
--- a/drivers/video/fbdev/vt8623fb.c
+++ b/drivers/video/fbdev/vt8623fb.c
@@ -390,8 +390,13 @@ static int vt8623fb_set_par(struct fb_info *info)
info->tileops = NULL;
/* in 4bpp supports 8p wide tiles only, any tiles otherwise */
- info->pixmap.blit_x = (bpp == 4) ? (1 << (8 - 1)) : (~(u32)0);
- info->pixmap.blit_y = ~(u32)0;
+ if (bpp == 4) {
+ bitmap_zero(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ set_bit(8 - 1, info->pixmap.blit_x);
+ } else {
+ bitmap_fill(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ }
+ bitmap_fill(info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT);
offset_value = (info->var.xres_virtual * bpp) / 64;
fetch_value = ((info->var.xres * bpp) / 128) + 4;
@@ -408,8 +413,10 @@ static int vt8623fb_set_par(struct fb_info *info)
info->tileops = &vt8623fb_tile_ops;
/* supports 8x16 tiles only */
- info->pixmap.blit_x = 1 << (8 - 1);
- info->pixmap.blit_y = 1 << (16 - 1);
+ bitmap_zero(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ set_bit(8 - 1, info->pixmap.blit_x);
+ bitmap_zero(info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT);
+ set_bit(16 - 1, info->pixmap.blit_y);
offset_value = info->var.xres_virtual / 16;
fetch_value = (info->var.xres / 8) + 8;
diff --git a/drivers/video/sticore.c b/drivers/video/sticore.c
index 7115b325817f6..88a1758616e02 100644
--- a/drivers/video/sticore.c
+++ b/drivers/video/sticore.c
@@ -529,7 +529,7 @@ sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
if (fbfont_name && strlen(fbfont_name))
fbfont = find_font(fbfont_name);
if (!fbfont)
- fbfont = get_default_font(1024,768, ~(u32)0, ~(u32)0);
+ fbfont = get_default_font(1024, 768, NULL, NULL);
if (!fbfont)
return NULL;
diff --git a/drivers/virt/vmgenid.c b/drivers/virt/vmgenid.c
index b67a28da47026..a1c467a0e9f71 100644
--- a/drivers/virt/vmgenid.c
+++ b/drivers/virt/vmgenid.c
@@ -68,7 +68,6 @@ out:
static void vmgenid_notify(struct acpi_device *device, u32 event)
{
struct vmgenid_state *state = acpi_driver_data(device);
- char *envp[] = { "NEW_VMGENID=1", NULL };
u8 old_id[VMGENID_SIZE];
memcpy(old_id, state->this_id, sizeof(old_id));
@@ -76,7 +75,6 @@ static void vmgenid_notify(struct acpi_device *device, u32 event)
if (!memcmp(old_id, state->this_id, sizeof(old_id)))
return;
add_vmfork_randomness(state->this_id, sizeof(state->this_id));
- kobject_uevent_env(&device->dev.kobj, KOBJ_CHANGE, envp);
}
static const struct acpi_device_id vmgenid_ids[] = {
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index f4080692b3513..9510c551dce86 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -353,7 +353,7 @@ static void virtio_dev_remove(struct device *_d)
of_node_put(dev->dev.of_node);
}
-static struct bus_type virtio_bus = {
+static const struct bus_type virtio_bus = {
.name = "virtio",
.match = virtio_dev_match,
.dev_groups = virtio_dev_groups,
@@ -362,14 +362,16 @@ static struct bus_type virtio_bus = {
.remove = virtio_dev_remove,
};
-int register_virtio_driver(struct virtio_driver *driver)
+int __register_virtio_driver(struct virtio_driver *driver, struct module *owner)
{
/* Catch this early. */
BUG_ON(driver->feature_table_size && !driver->feature_table);
driver->driver.bus = &virtio_bus;
+ driver->driver.owner = owner;
+
return driver_register(&driver->driver);
}
-EXPORT_SYMBOL_GPL(register_virtio_driver);
+EXPORT_SYMBOL_GPL(__register_virtio_driver);
void unregister_virtio_driver(struct virtio_driver *driver)
{
@@ -510,8 +512,10 @@ int virtio_device_freeze(struct virtio_device *dev)
if (drv && drv->freeze) {
ret = drv->freeze(dev);
- if (ret)
+ if (ret) {
+ virtio_config_enable(dev);
return ret;
+ }
}
if (dev->config->destroy_avq)
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 49299b1f9ec74..6f7e5010a6735 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -1340,7 +1340,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
sizeof(struct vring_packed_desc));
vq->packed.vring.desc[head].id = cpu_to_le16(id);
- if (vq->do_unmap) {
+ if (vq->use_dma_api) {
vq->packed.desc_extra[id].addr = addr;
vq->packed.desc_extra[id].len = total_sg *
sizeof(struct vring_packed_desc);
@@ -1481,7 +1481,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
desc[i].len = cpu_to_le32(sg->length);
desc[i].id = cpu_to_le16(id);
- if (unlikely(vq->do_unmap)) {
+ if (unlikely(vq->use_dma_api)) {
vq->packed.desc_extra[curr].addr = addr;
vq->packed.desc_extra[curr].len = sg->length;
vq->packed.desc_extra[curr].flags =
@@ -1615,7 +1615,7 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
vq->free_head = id;
vq->vq.num_free += state->num;
- if (unlikely(vq->do_unmap)) {
+ if (unlikely(vq->use_dma_api)) {
curr = id;
for (i = 0; i < state->num; i++) {
vring_unmap_extra_packed(vq,
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index 8d63e5923d245..e803db0da307a 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -183,8 +183,11 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return ERR_PTR(-ENOMEM);
+ if (ops->get_vq_size)
+ max_num = ops->get_vq_size(vdpa, index);
+ else
+ max_num = ops->get_vq_num_max(vdpa);
- max_num = ops->get_vq_num_max(vdpa);
if (max_num == 0) {
err = -ENOENT;
goto error_new_virtqueue;
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 513c0b114337c..e6049a75b35b8 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -78,5 +78,15 @@ config W1_MASTER_SGI
This support is also available as a module. If so, the module
will be called sgi_w1.
+config W1_MASTER_UART
+ tristate "UART 1-wire driver"
+ depends on SERIAL_DEV_BUS
+ help
+ Say Y here if you want to communicate with your 1-wire devices using
+ UART interface.
+
+ This support is also available as a module. If so, the module
+ will be called w1-uart.
+
endmenu
diff --git a/drivers/w1/masters/Makefile b/drivers/w1/masters/Makefile
index 6c5a21f9b88ce..227f80987e698 100644
--- a/drivers/w1/masters/Makefile
+++ b/drivers/w1/masters/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_W1_MASTER_MXC) += mxc_w1.o
obj-$(CONFIG_W1_MASTER_GPIO) += w1-gpio.o
obj-$(CONFIG_HDQ_MASTER_OMAP) += omap_hdq.o
obj-$(CONFIG_W1_MASTER_SGI) += sgi_w1.o
+obj-$(CONFIG_W1_MASTER_UART) += w1-uart.o
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index 090cbbf9e1e22..ba1d0866d1c40 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -151,15 +151,13 @@ out_disable_clk:
/*
* disassociate the w1 device from the driver
*/
-static int mxc_w1_remove(struct platform_device *pdev)
+static void mxc_w1_remove(struct platform_device *pdev)
{
struct mxc_w1_device *mdev = platform_get_drvdata(pdev);
w1_remove_master_device(&mdev->bus_master);
clk_disable_unprepare(mdev->clk);
-
- return 0;
}
static const struct of_device_id mxc_w1_dt_ids[] = {
@@ -174,7 +172,7 @@ static struct platform_driver mxc_w1_driver = {
.of_match_table = mxc_w1_dt_ids,
},
.probe = mxc_w1_probe,
- .remove = mxc_w1_remove,
+ .remove_new = mxc_w1_remove,
};
module_platform_driver(mxc_w1_driver);
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 6a39b71eb7184..d1cb5190445a9 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -647,7 +647,7 @@ err_w1:
return ret;
}
-static int omap_hdq_remove(struct platform_device *pdev)
+static void omap_hdq_remove(struct platform_device *pdev)
{
int active;
@@ -661,8 +661,6 @@ static int omap_hdq_remove(struct platform_device *pdev)
if (active >= 0)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static const struct of_device_id omap_hdq_dt_ids[] = {
@@ -674,7 +672,7 @@ MODULE_DEVICE_TABLE(of, omap_hdq_dt_ids);
static struct platform_driver omap_hdq_driver = {
.probe = omap_hdq_probe,
- .remove = omap_hdq_remove,
+ .remove_new = omap_hdq_remove,
.driver = {
.name = "omap_hdq",
.of_match_table = omap_hdq_dt_ids,
diff --git a/drivers/w1/masters/sgi_w1.c b/drivers/w1/masters/sgi_w1.c
index d7fbc3c146e1c..7bb7876aa70e6 100644
--- a/drivers/w1/masters/sgi_w1.c
+++ b/drivers/w1/masters/sgi_w1.c
@@ -105,13 +105,11 @@ static int sgi_w1_probe(struct platform_device *pdev)
/*
* disassociate the w1 device from the driver
*/
-static int sgi_w1_remove(struct platform_device *pdev)
+static void sgi_w1_remove(struct platform_device *pdev)
{
struct sgi_w1_device *sdev = platform_get_drvdata(pdev);
w1_remove_master_device(&sdev->bus_master);
-
- return 0;
}
static struct platform_driver sgi_w1_driver = {
@@ -119,7 +117,7 @@ static struct platform_driver sgi_w1_driver = {
.name = "sgi_w1",
},
.probe = sgi_w1_probe,
- .remove = sgi_w1_remove,
+ .remove_new = sgi_w1_remove,
};
module_platform_driver(sgi_w1_driver);
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
index 05c67038ed20c..34128e6bbbfaf 100644
--- a/drivers/w1/masters/w1-gpio.c
+++ b/drivers/w1/masters/w1-gpio.c
@@ -141,7 +141,7 @@ static int w1_gpio_probe(struct platform_device *pdev)
return 0;
}
-static int w1_gpio_remove(struct platform_device *pdev)
+static void w1_gpio_remove(struct platform_device *pdev)
{
struct w1_bus_master *master = platform_get_drvdata(pdev);
struct w1_gpio_ddata *ddata = master->data;
@@ -150,8 +150,6 @@ static int w1_gpio_remove(struct platform_device *pdev)
gpiod_set_value(ddata->pullup_gpiod, 0);
w1_remove_master_device(master);
-
- return 0;
}
static struct platform_driver w1_gpio_driver = {
@@ -160,7 +158,7 @@ static struct platform_driver w1_gpio_driver = {
.of_match_table = of_match_ptr(w1_gpio_dt_ids),
},
.probe = w1_gpio_probe,
- .remove = w1_gpio_remove,
+ .remove_new = w1_gpio_remove,
};
module_platform_driver(w1_gpio_driver);
diff --git a/drivers/w1/masters/w1-uart.c b/drivers/w1/masters/w1-uart.c
new file mode 100644
index 0000000000000..a31782e56ba75
--- /dev/null
+++ b/drivers/w1/masters/w1-uart.c
@@ -0,0 +1,415 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * w1-uart - UART 1-Wire bus driver
+ *
+ * Uses the UART interface (via Serial Device Bus) to create the 1-Wire
+ * timing patterns. Implements the following 1-Wire master interface:
+ *
+ * - reset_bus: requests baud-rate 9600
+ *
+ * - touch_bit: requests baud-rate 115200
+ *
+ * Author: Christoph Winklhofer <cj.winklhofer@gmail.com>
+ */
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/serdev.h>
+#include <linux/w1.h>
+
+/* UART packet contains start and stop bit */
+#define W1_UART_BITS_PER_PACKET (BITS_PER_BYTE + 2)
+
+/* Timeout to wait for completion of serdev-receive */
+#define W1_UART_TIMEOUT msecs_to_jiffies(500)
+
+/**
+ * struct w1_uart_config - configuration for 1-Wire operation
+ * @baudrate: baud-rate returned from serdev
+ * @delay_us: delay to complete a 1-Wire cycle (in us)
+ * @tx_byte: byte to generate 1-Wire timing pattern
+ */
+struct w1_uart_config {
+ unsigned int baudrate;
+ unsigned int delay_us;
+ u8 tx_byte;
+};
+
+/**
+ * struct w1_uart_device - 1-Wire UART device structure
+ * @serdev: serial device
+ * @bus: w1-bus master
+ * @cfg_reset: config for 1-Wire reset
+ * @cfg_touch_0: config for 1-Wire write-0 cycle
+ * @cfg_touch_1: config for 1-Wire write-1 and read cycle
+ * @rx_byte_received: completion for serdev receive
+ * @rx_mutex: mutex to protect rx_err and rx_byte
+ * @rx_err: indicates an error in serdev-receive
+ * @rx_byte: result byte from serdev-receive
+ */
+struct w1_uart_device {
+ struct serdev_device *serdev;
+ struct w1_bus_master bus;
+
+ struct w1_uart_config cfg_reset;
+ struct w1_uart_config cfg_touch_0;
+ struct w1_uart_config cfg_touch_1;
+
+ struct completion rx_byte_received;
+ /*
+ * protect rx_err and rx_byte from concurrent access in
+ * w1-callbacks and serdev-receive.
+ */
+ struct mutex rx_mutex;
+ int rx_err;
+ u8 rx_byte;
+};
+
+/**
+ * struct w1_uart_limits - limits for 1-Wire operations
+ * @baudrate: Requested baud-rate to create 1-Wire timing pattern
+ * @bit_min_us: minimum time for a bit (in us)
+ * @bit_max_us: maximum time for a bit (in us)
+ * @sample_us: timespan to sample 1-Wire response
+ * @cycle_us: duration of the 1-Wire cycle
+ */
+struct w1_uart_limits {
+ unsigned int baudrate;
+ unsigned int bit_min_us;
+ unsigned int bit_max_us;
+ unsigned int sample_us;
+ unsigned int cycle_us;
+};
+
+static inline unsigned int baud_to_bit_ns(unsigned int baud)
+{
+ return NSEC_PER_SEC / baud;
+}
+
+static inline unsigned int to_ns(unsigned int us)
+{
+ return us * NSEC_PER_USEC;
+}
+
+/*
+ * Set baud-rate, delay and tx-byte to create a 1-Wire pulse and adapt
+ * the tx-byte according to the actual baud-rate.
+ *
+ * Reject when:
+ * - time for a bit outside min/max range
+ * - a 1-Wire response is not detectable for sent byte
+ */
+static int w1_uart_set_config(struct serdev_device *serdev,
+ const struct w1_uart_limits *limits,
+ struct w1_uart_config *w1cfg)
+{
+ unsigned int packet_ns;
+ unsigned int bits_low;
+ unsigned int bit_ns;
+ unsigned int low_ns;
+
+ w1cfg->baudrate = serdev_device_set_baudrate(serdev, limits->baudrate);
+ if (w1cfg->baudrate == 0)
+ return -EINVAL;
+
+ /* Compute in nanoseconds for accuracy */
+ bit_ns = baud_to_bit_ns(w1cfg->baudrate);
+ bits_low = to_ns(limits->bit_min_us) / bit_ns;
+ /* start bit is always low */
+ low_ns = bit_ns * (bits_low + 1);
+
+ if (low_ns < to_ns(limits->bit_min_us))
+ return -EINVAL;
+
+ if (low_ns > to_ns(limits->bit_max_us))
+ return -EINVAL;
+
+ /* 1-Wire response detectable for sent byte */
+ if (limits->sample_us > 0 &&
+ bit_ns * BITS_PER_BYTE < low_ns + to_ns(limits->sample_us))
+ return -EINVAL;
+
+ /* delay: 1-Wire cycle takes longer than the UART packet */
+ packet_ns = bit_ns * W1_UART_BITS_PER_PACKET;
+ w1cfg->delay_us = 0;
+ if (to_ns(limits->cycle_us) > packet_ns)
+ w1cfg->delay_us =
+ (to_ns(limits->cycle_us) - packet_ns) / NSEC_PER_USEC;
+
+ /* byte to create 1-Wire pulse */
+ w1cfg->tx_byte = 0xff << bits_low;
+
+ return 0;
+}
+
+/*
+ * Configuration for reset and presence detect
+ * - bit_min_us is 480us, add margin and use 485us
+ * - limits for sample time 60us-75us, use 65us
+ */
+static int w1_uart_set_config_reset(struct w1_uart_device *w1dev)
+{
+ struct serdev_device *serdev = w1dev->serdev;
+ struct device_node *np = serdev->dev.of_node;
+
+ struct w1_uart_limits limits = { .baudrate = 9600,
+ .bit_min_us = 485,
+ .bit_max_us = 640,
+ .sample_us = 65,
+ .cycle_us = 960 };
+
+ of_property_read_u32(np, "reset-bps", &limits.baudrate);
+
+ return w1_uart_set_config(serdev, &limits, &w1dev->cfg_reset);
+}
+
+/*
+ * Configuration for write-0 cycle (touch bit 0)
+ * - bit_min_us is 60us, add margin and use 65us
+ * - no sampling required, sample_us = 0
+ */
+static int w1_uart_set_config_touch_0(struct w1_uart_device *w1dev)
+{
+ struct serdev_device *serdev = w1dev->serdev;
+ struct device_node *np = serdev->dev.of_node;
+
+ struct w1_uart_limits limits = { .baudrate = 115200,
+ .bit_min_us = 65,
+ .bit_max_us = 120,
+ .sample_us = 0,
+ .cycle_us = 70 };
+
+ of_property_read_u32(np, "write-0-bps", &limits.baudrate);
+
+ return w1_uart_set_config(serdev, &limits, &w1dev->cfg_touch_0);
+}
+
+/*
+ * Configuration for write-1 and read cycle (touch bit 1)
+ * - bit_min_us is 5us, add margin and use 6us
+ * - limits for sample time 5us-15us, use 15us
+ */
+static int w1_uart_set_config_touch_1(struct w1_uart_device *w1dev)
+{
+ struct serdev_device *serdev = w1dev->serdev;
+ struct device_node *np = serdev->dev.of_node;
+
+ struct w1_uart_limits limits = { .baudrate = 115200,
+ .bit_min_us = 6,
+ .bit_max_us = 15,
+ .sample_us = 15,
+ .cycle_us = 70 };
+
+ of_property_read_u32(np, "write-1-bps", &limits.baudrate);
+
+ return w1_uart_set_config(serdev, &limits, &w1dev->cfg_touch_1);
+}
+
+/*
+ * Configure and open the serial device
+ */
+static int w1_uart_serdev_open(struct w1_uart_device *w1dev)
+{
+ struct serdev_device *serdev = w1dev->serdev;
+ struct device *dev = &serdev->dev;
+ int ret;
+
+ ret = devm_serdev_device_open(dev, serdev);
+ if (ret < 0)
+ return ret;
+
+ ret = serdev_device_set_parity(serdev, SERDEV_PARITY_NONE);
+ if (ret < 0) {
+ dev_err(dev, "set parity failed\n");
+ return ret;
+ }
+
+ ret = w1_uart_set_config_reset(w1dev);
+ if (ret < 0) {
+ dev_err(dev, "config for reset failed\n");
+ return ret;
+ }
+
+ ret = w1_uart_set_config_touch_0(w1dev);
+ if (ret < 0) {
+ dev_err(dev, "config for touch-0 failed\n");
+ return ret;
+ }
+
+ ret = w1_uart_set_config_touch_1(w1dev);
+ if (ret < 0) {
+ dev_err(dev, "config for touch-1 failed\n");
+ return ret;
+ }
+
+ serdev_device_set_flow_control(serdev, false);
+
+ return 0;
+}
+
+/*
+ * Send one byte (tx_byte) and read one byte (rx_byte) via serdev.
+ */
+static int w1_uart_serdev_tx_rx(struct w1_uart_device *w1dev,
+ const struct w1_uart_config *w1cfg, u8 *rx_byte)
+{
+ struct serdev_device *serdev = w1dev->serdev;
+ int ret;
+
+ serdev_device_write_flush(serdev);
+ serdev_device_set_baudrate(serdev, w1cfg->baudrate);
+
+ /* write and immediately read one byte */
+ reinit_completion(&w1dev->rx_byte_received);
+ ret = serdev_device_write_buf(serdev, &w1cfg->tx_byte, 1);
+ if (ret != 1)
+ return -EIO;
+ ret = wait_for_completion_interruptible_timeout(
+ &w1dev->rx_byte_received, W1_UART_TIMEOUT);
+ if (ret <= 0)
+ return -EIO;
+
+ /* locking could fail when serdev is unexpectedly receiving. */
+ if (!mutex_trylock(&w1dev->rx_mutex))
+ return -EIO;
+
+ ret = w1dev->rx_err;
+ if (ret == 0)
+ *rx_byte = w1dev->rx_byte;
+
+ mutex_unlock(&w1dev->rx_mutex);
+
+ if (w1cfg->delay_us > 0)
+ fsleep(w1cfg->delay_us);
+
+ return ret;
+}
+
+static size_t w1_uart_serdev_receive_buf(struct serdev_device *serdev,
+ const u8 *buf, size_t count)
+{
+ struct w1_uart_device *w1dev = serdev_device_get_drvdata(serdev);
+
+ mutex_lock(&w1dev->rx_mutex);
+
+ /* sent a single byte and receive one single byte */
+ if (count == 1) {
+ w1dev->rx_byte = buf[0];
+ w1dev->rx_err = 0;
+ } else {
+ w1dev->rx_err = -EIO;
+ }
+
+ mutex_unlock(&w1dev->rx_mutex);
+ complete(&w1dev->rx_byte_received);
+
+ return count;
+}
+
+static const struct serdev_device_ops w1_uart_serdev_ops = {
+ .receive_buf = w1_uart_serdev_receive_buf,
+ .write_wakeup = serdev_device_write_wakeup,
+};
+
+/*
+ * 1-wire reset and presence detect: A present slave will manipulate
+ * the received byte by pulling the 1-Wire low.
+ */
+static u8 w1_uart_reset_bus(void *data)
+{
+ struct w1_uart_device *w1dev = data;
+ const struct w1_uart_config *w1cfg = &w1dev->cfg_reset;
+ int ret;
+ u8 val;
+
+ ret = w1_uart_serdev_tx_rx(w1dev, w1cfg, &val);
+ if (ret < 0)
+ return -1;
+
+ /* Device present (0) or no device (1) */
+ return val != w1cfg->tx_byte ? 0 : 1;
+}
+
+/*
+ * 1-Wire read and write cycle: Only the read-0 manipulates the
+ * received byte, all others left the line untouched.
+ */
+static u8 w1_uart_touch_bit(void *data, u8 bit)
+{
+ struct w1_uart_device *w1dev = data;
+ const struct w1_uart_config *w1cfg = bit ? &w1dev->cfg_touch_1 :
+ &w1dev->cfg_touch_0;
+ int ret;
+ u8 val;
+
+ ret = w1_uart_serdev_tx_rx(w1dev, w1cfg, &val);
+
+ /* return inactive bus state on error */
+ if (ret < 0)
+ return 1;
+
+ return val == w1cfg->tx_byte ? 1 : 0;
+}
+
+static int w1_uart_probe(struct serdev_device *serdev)
+{
+ struct device *dev = &serdev->dev;
+ struct w1_uart_device *w1dev;
+ int ret;
+
+ w1dev = devm_kzalloc(dev, sizeof(*w1dev), GFP_KERNEL);
+ if (!w1dev)
+ return -ENOMEM;
+ w1dev->bus.data = w1dev;
+ w1dev->bus.reset_bus = w1_uart_reset_bus;
+ w1dev->bus.touch_bit = w1_uart_touch_bit;
+ w1dev->serdev = serdev;
+
+ init_completion(&w1dev->rx_byte_received);
+ mutex_init(&w1dev->rx_mutex);
+
+ ret = w1_uart_serdev_open(w1dev);
+ if (ret < 0)
+ return ret;
+ serdev_device_set_drvdata(serdev, w1dev);
+ serdev_device_set_client_ops(serdev, &w1_uart_serdev_ops);
+
+ return w1_add_master_device(&w1dev->bus);
+}
+
+static void w1_uart_remove(struct serdev_device *serdev)
+{
+ struct w1_uart_device *w1dev = serdev_device_get_drvdata(serdev);
+
+ /*
+ * Waits until w1-uart callbacks are finished, serdev is closed
+ * and its device data released automatically by devres (waits
+ * until serdev-receive is finished).
+ */
+ w1_remove_master_device(&w1dev->bus);
+}
+
+static const struct of_device_id w1_uart_of_match[] = {
+ { .compatible = "w1-uart" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, w1_uart_of_match);
+
+static struct serdev_device_driver w1_uart_driver = {
+ .driver = {
+ .name = "w1-uart",
+ .of_match_table = w1_uart_of_match,
+ },
+ .probe = w1_uart_probe,
+ .remove = w1_uart_remove,
+};
+
+module_serdev_device_driver(w1_uart_driver);
+
+MODULE_DESCRIPTION("UART w1 bus driver");
+MODULE_AUTHOR("Christoph Winklhofer <cj.winklhofer@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 5353cbd75126c..afb1cc4606c52 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -167,7 +167,7 @@ static struct w1_family w1_default_family = {
static int w1_uevent(const struct device *dev, struct kobj_uevent_env *env);
-static struct bus_type w1_bus_type = {
+static const struct bus_type w1_bus_type = {
.name = "w1",
.uevent = w1_uevent,
};
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index d78fe7137799a..6bee137cfbe00 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -181,6 +181,17 @@ config BD957XMUF_WATCHDOG
watchdog. Alternatively say M to compile the driver as a module,
which will be called bd9576_wdt.
+config CROS_EC_WATCHDOG
+ tristate "ChromeOS EC-based watchdog"
+ select WATCHDOG_CORE
+ depends on CROS_EC
+ help
+ Watchdog driver for Chromebook devices equipped with embedded controller.
+ Trigger event is recorded in EC and checked on the subsequent boot.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_ec_wdt.
+
config DA9052_WATCHDOG
tristate "Dialog DA9052 Watchdog"
depends on PMIC_DA9052 || COMPILE_TEST
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 7cbc34514ec11..3710c218f05e4 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -217,6 +217,7 @@ obj-$(CONFIG_XEN_WDT) += xen_wdt.o
# Architecture Independent
obj-$(CONFIG_BD957XMUF_WATCHDOG) += bd9576_wdt.o
+obj-$(CONFIG_CROS_EC_WATCHDOG) += cros_ec_wdt.o
obj-$(CONFIG_DA9052_WATCHDOG) += da9052_wdt.o
obj-$(CONFIG_DA9055_WATCHDOG) += da9055_wdt.o
obj-$(CONFIG_DA9062_WATCHDOG) += da9062_wdt.o
diff --git a/drivers/watchdog/cros_ec_wdt.c b/drivers/watchdog/cros_ec_wdt.c
new file mode 100644
index 0000000000000..ba045e29f9a5e
--- /dev/null
+++ b/drivers/watchdog/cros_ec_wdt.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2024 Google LLC.
+ * Author: Lukasz Majczak <lma@chromium.com>
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+
+#define CROS_EC_WATCHDOG_DEFAULT_TIME 30 /* seconds */
+#define DRV_NAME "cros-ec-wdt"
+
+union cros_ec_wdt_data {
+ struct ec_params_hang_detect req;
+ struct ec_response_hang_detect resp;
+} __packed;
+
+static int cros_ec_wdt_send_cmd(struct cros_ec_device *cros_ec,
+ union cros_ec_wdt_data *arg)
+{
+ int ret;
+ struct {
+ struct cros_ec_command msg;
+ union cros_ec_wdt_data data;
+ } __packed buf = {
+ .msg = {
+ .version = 0,
+ .command = EC_CMD_HANG_DETECT,
+ .insize = (arg->req.command == EC_HANG_DETECT_CMD_GET_STATUS) ?
+ sizeof(struct ec_response_hang_detect) :
+ 0,
+ .outsize = sizeof(struct ec_params_hang_detect),
+ },
+ .data.req = arg->req
+ };
+
+ ret = cros_ec_cmd_xfer_status(cros_ec, &buf.msg);
+ if (ret < 0)
+ return ret;
+
+ arg->resp = buf.data.resp;
+
+ return 0;
+}
+
+static int cros_ec_wdt_ping(struct watchdog_device *wdd)
+{
+ struct cros_ec_device *cros_ec = watchdog_get_drvdata(wdd);
+ union cros_ec_wdt_data arg;
+ int ret;
+
+ arg.req.command = EC_HANG_DETECT_CMD_RELOAD;
+ ret = cros_ec_wdt_send_cmd(cros_ec, &arg);
+ if (ret < 0)
+ dev_dbg(wdd->parent, "Failed to ping watchdog (%d)", ret);
+
+ return ret;
+}
+
+static int cros_ec_wdt_start(struct watchdog_device *wdd)
+{
+ struct cros_ec_device *cros_ec = watchdog_get_drvdata(wdd);
+ union cros_ec_wdt_data arg;
+ int ret;
+
+ /* Prepare watchdog on EC side */
+ arg.req.command = EC_HANG_DETECT_CMD_SET_TIMEOUT;
+ arg.req.reboot_timeout_sec = wdd->timeout;
+ ret = cros_ec_wdt_send_cmd(cros_ec, &arg);
+ if (ret < 0)
+ dev_dbg(wdd->parent, "Failed to start watchdog (%d)", ret);
+
+ return ret;
+}
+
+static int cros_ec_wdt_stop(struct watchdog_device *wdd)
+{
+ struct cros_ec_device *cros_ec = watchdog_get_drvdata(wdd);
+ union cros_ec_wdt_data arg;
+ int ret;
+
+ arg.req.command = EC_HANG_DETECT_CMD_CANCEL;
+ ret = cros_ec_wdt_send_cmd(cros_ec, &arg);
+ if (ret < 0)
+ dev_dbg(wdd->parent, "Failed to stop watchdog (%d)", ret);
+
+ return ret;
+}
+
+static int cros_ec_wdt_set_timeout(struct watchdog_device *wdd, unsigned int t)
+{
+ unsigned int old_timeout = wdd->timeout;
+ int ret;
+
+ wdd->timeout = t;
+ ret = cros_ec_wdt_start(wdd);
+ if (ret < 0)
+ wdd->timeout = old_timeout;
+
+ return ret;
+}
+
+static const struct watchdog_info cros_ec_wdt_ident = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+ .firmware_version = 0,
+ .identity = DRV_NAME,
+};
+
+static const struct watchdog_ops cros_ec_wdt_ops = {
+ .owner = THIS_MODULE,
+ .ping = cros_ec_wdt_ping,
+ .start = cros_ec_wdt_start,
+ .stop = cros_ec_wdt_stop,
+ .set_timeout = cros_ec_wdt_set_timeout,
+};
+
+static int cros_ec_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
+ struct cros_ec_device *cros_ec = ec_dev->ec_dev;
+ struct watchdog_device *wdd;
+ union cros_ec_wdt_data arg;
+ int ret = 0;
+
+ wdd = devm_kzalloc(&pdev->dev, sizeof(*wdd), GFP_KERNEL);
+ if (!wdd)
+ return -ENOMEM;
+
+ arg.req.command = EC_HANG_DETECT_CMD_GET_STATUS;
+ ret = cros_ec_wdt_send_cmd(cros_ec, &arg);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get watchdog bootstatus");
+
+ wdd->parent = &pdev->dev;
+ wdd->info = &cros_ec_wdt_ident;
+ wdd->ops = &cros_ec_wdt_ops;
+ wdd->timeout = CROS_EC_WATCHDOG_DEFAULT_TIME;
+ wdd->min_timeout = EC_HANG_DETECT_MIN_TIMEOUT;
+ wdd->max_timeout = EC_HANG_DETECT_MAX_TIMEOUT;
+ if (arg.resp.status == EC_HANG_DETECT_AP_BOOT_EC_WDT)
+ wdd->bootstatus = WDIOF_CARDRESET;
+
+ arg.req.command = EC_HANG_DETECT_CMD_CLEAR_STATUS;
+ ret = cros_ec_wdt_send_cmd(cros_ec, &arg);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to clear watchdog bootstatus");
+
+ watchdog_stop_on_reboot(wdd);
+ watchdog_stop_on_unregister(wdd);
+ watchdog_set_drvdata(wdd, cros_ec);
+ platform_set_drvdata(pdev, wdd);
+
+ return devm_watchdog_register_device(dev, wdd);
+}
+
+static int __maybe_unused cros_ec_wdt_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct watchdog_device *wdd = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ if (watchdog_active(wdd))
+ ret = cros_ec_wdt_stop(wdd);
+
+ return ret;
+}
+
+static int __maybe_unused cros_ec_wdt_resume(struct platform_device *pdev)
+{
+ struct watchdog_device *wdd = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ if (watchdog_active(wdd))
+ ret = cros_ec_wdt_start(wdd);
+
+ return ret;
+}
+
+static const struct platform_device_id cros_ec_wdt_id[] = {
+ { DRV_NAME, 0 },
+ {}
+};
+
+static struct platform_driver cros_ec_wdt_driver = {
+ .probe = cros_ec_wdt_probe,
+ .suspend = pm_ptr(cros_ec_wdt_suspend),
+ .resume = pm_ptr(cros_ec_wdt_resume),
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .id_table = cros_ec_wdt_id,
+};
+
+module_platform_driver(cros_ec_wdt_driver);
+
+MODULE_DEVICE_TABLE(platform, cros_ec_wdt_id);
+MODULE_DESCRIPTION("Cros EC Watchdog Device Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 138dc8d8ca3d8..ae30e394d176e 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -378,11 +378,36 @@ static void hpwdt_exit(struct pci_dev *dev)
pci_disable_device(dev);
}
+static int hpwdt_suspend(struct device *dev)
+{
+ if (watchdog_active(&hpwdt_dev))
+ hpwdt_stop();
+
+ return 0;
+}
+
+static int hpwdt_resume(struct device *dev)
+{
+ if (watchdog_active(&hpwdt_dev))
+ hpwdt_start(&hpwdt_dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops hpwdt_pm_ops = {
+ LATE_SYSTEM_SLEEP_PM_OPS(hpwdt_suspend, hpwdt_resume)
+};
+
static struct pci_driver hpwdt_driver = {
.name = "hpwdt",
.id_table = hpwdt_devices,
.probe = hpwdt_init_one,
.remove = hpwdt_exit,
+
+ .driver = {
+ .name = "hpwdt",
+ .pm = &hpwdt_pm_ops,
+ }
};
MODULE_AUTHOR("Tom Mingarelli");
diff --git a/drivers/watchdog/intel-mid_wdt.c b/drivers/watchdog/intel-mid_wdt.c
index fb7fae750181b..8d71f6a2236bf 100644
--- a/drivers/watchdog/intel-mid_wdt.c
+++ b/drivers/watchdog/intel-mid_wdt.c
@@ -9,15 +9,20 @@
* Contact: David Cohen <david.a.cohen@linux.intel.com>
*/
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
#include <linux/interrupt.h>
+#include <linux/math.h>
#include <linux/module.h>
-#include <linux/nmi.h>
+#include <linux/panic.h>
#include <linux/platform_device.h>
+#include <linux/types.h>
#include <linux/watchdog.h>
+
#include <linux/platform_data/intel-mid_wdt.h>
#include <asm/intel_scu_ipc.h>
-#include <asm/intel-mid.h>
#define IPC_WATCHDOG 0xf8
@@ -122,7 +127,7 @@ static int mid_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct watchdog_device *wdt_dev;
- struct intel_mid_wdt_pdata *pdata = dev->platform_data;
+ struct intel_mid_wdt_pdata *pdata = dev_get_platdata(dev);
struct mid_wdt *mid;
int ret;
diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c
index 9297a58919127..3e8c15138edda 100644
--- a/drivers/watchdog/it87_wdt.c
+++ b/drivers/watchdog/it87_wdt.c
@@ -213,12 +213,16 @@ static int wdt_stop(struct watchdog_device *wdd)
/**
* wdt_set_timeout - set a new timeout value with watchdog ioctl
+ * @wdd: pointer to the watchdog_device structure
* @t: timeout value in seconds
*
* The hardware device has a 8 or 16 bit watchdog timer (depends on
* chip version) that can be configured to count seconds or minutes.
*
* Used within WDIOC_SETTIMEOUT watchdog device ioctl.
+ *
+ * Return: 0 if the timeout was set successfully, or a negative error code on
+ * failure.
*/
static int wdt_set_timeout(struct watchdog_device *wdd, unsigned int t)
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index 9e790f0c2096c..006f9c61aa64f 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -41,6 +41,7 @@ static const u32 reg_offset_data_kpss[] = {
struct qcom_wdt_match_data {
const u32 *offset;
bool pretimeout;
+ u32 max_tick_count;
};
struct qcom_wdt {
@@ -177,11 +178,13 @@ static const struct watchdog_info qcom_wdt_pt_info = {
static const struct qcom_wdt_match_data match_data_apcs_tmr = {
.offset = reg_offset_data_apcs_tmr,
.pretimeout = false,
+ .max_tick_count = 0x10000000U,
};
static const struct qcom_wdt_match_data match_data_kpss = {
.offset = reg_offset_data_kpss,
.pretimeout = true,
+ .max_tick_count = 0xFFFFFU,
};
static int qcom_wdt_probe(struct platform_device *pdev)
@@ -236,7 +239,7 @@ static int qcom_wdt_probe(struct platform_device *pdev)
*/
wdt->rate = clk_get_rate(clk);
if (wdt->rate == 0 ||
- wdt->rate > 0x10000000U) {
+ wdt->rate > data->max_tick_count) {
dev_err(dev, "invalid clock rate\n");
return -EINVAL;
}
@@ -260,7 +263,7 @@ static int qcom_wdt_probe(struct platform_device *pdev)
wdt->wdd.ops = &qcom_wdt_ops;
wdt->wdd.min_timeout = 1;
- wdt->wdd.max_timeout = 0x10000000U / wdt->rate;
+ wdt->wdd.max_timeout = data->max_tick_count / wdt->rate;
wdt->wdd.parent = dev;
wdt->layout = data->offset;
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 2756ed54ca3d5..109e2e37e8f09 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -25,6 +25,7 @@
#include <linux/moduleparam.h>
#include <linux/pm.h>
#include <linux/property.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
@@ -232,6 +233,7 @@ static int
sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
{
struct sp805_wdt *wdt;
+ struct reset_control *rst;
u64 rate = 0;
int ret = 0;
@@ -264,6 +266,12 @@ sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
return -ENODEV;
}
+ rst = devm_reset_control_get_optional_exclusive(&adev->dev, NULL);
+ if (IS_ERR(rst))
+ return dev_err_probe(&adev->dev, PTR_ERR(rst), "Can not get reset\n");
+
+ reset_control_deassert(rst);
+
wdt->adev = adev;
wdt->wdd.info = &wdt_info;
wdt->wdd.ops = &wdt_ops;
diff --git a/drivers/watchdog/starfive-wdt.c b/drivers/watchdog/starfive-wdt.c
index e28ead24c520e..b4b059883618b 100644
--- a/drivers/watchdog/starfive-wdt.c
+++ b/drivers/watchdog/starfive-wdt.c
@@ -494,8 +494,13 @@ static int starfive_wdt_probe(struct platform_device *pdev)
if (ret)
goto err_exit;
- if (!early_enable)
- pm_runtime_put_sync(&pdev->dev);
+ if (!early_enable) {
+ if (pm_runtime_enabled(&pdev->dev)) {
+ ret = pm_runtime_put_sync(&pdev->dev);
+ if (ret)
+ goto err_exit;
+ }
+ }
return 0;
@@ -554,7 +559,10 @@ static int starfive_wdt_resume(struct device *dev)
starfive_wdt_set_reload_count(wdt, wdt->reload);
starfive_wdt_lock(wdt);
- return starfive_wdt_start(wdt);
+ if (watchdog_active(&wdt->wdd))
+ return starfive_wdt_start(wdt);
+
+ return 0;
}
static int starfive_wdt_runtime_suspend(struct device *dev)
diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c
index d9fd50df9802c..5404e03876202 100644
--- a/drivers/watchdog/stm32_iwdg.c
+++ b/drivers/watchdog/stm32_iwdg.c
@@ -20,6 +20,8 @@
#include <linux/platform_device.h>
#include <linux/watchdog.h>
+#define DEFAULT_TIMEOUT 10
+
/* IWDG registers */
#define IWDG_KR 0x00 /* Key register */
#define IWDG_PR 0x04 /* Prescaler Register */
@@ -248,6 +250,7 @@ static int stm32_iwdg_probe(struct platform_device *pdev)
wdd->parent = dev;
wdd->info = &stm32_iwdg_info;
wdd->ops = &stm32_iwdg_ops;
+ wdd->timeout = DEFAULT_TIMEOUT;
wdd->min_timeout = DIV_ROUND_UP((RLR_MIN + 1) * PR_MIN, wdt->rate);
wdd->max_hw_heartbeat_ms = ((RLR_MAX + 1) * wdt->data->max_prescaler *
1000) / wdt->rate;
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index 5b55ccae06d4b..aff2c3912ead6 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -260,12 +260,12 @@ static int __watchdog_register_device(struct watchdog_device *wdd)
if (wdd->parent) {
ret = of_alias_get_id(wdd->parent->of_node, "watchdog");
if (ret >= 0)
- id = ida_simple_get(&watchdog_ida, ret,
- ret + 1, GFP_KERNEL);
+ id = ida_alloc_range(&watchdog_ida, ret, ret,
+ GFP_KERNEL);
}
if (id < 0)
- id = ida_simple_get(&watchdog_ida, 0, MAX_DOGS, GFP_KERNEL);
+ id = ida_alloc_max(&watchdog_ida, MAX_DOGS - 1, GFP_KERNEL);
if (id < 0)
return id;
@@ -273,19 +273,20 @@ static int __watchdog_register_device(struct watchdog_device *wdd)
ret = watchdog_dev_register(wdd);
if (ret) {
- ida_simple_remove(&watchdog_ida, id);
+ ida_free(&watchdog_ida, id);
if (!(id == 0 && ret == -EBUSY))
return ret;
/* Retry in case a legacy watchdog module exists */
- id = ida_simple_get(&watchdog_ida, 1, MAX_DOGS, GFP_KERNEL);
+ id = ida_alloc_range(&watchdog_ida, 1, MAX_DOGS - 1,
+ GFP_KERNEL);
if (id < 0)
return id;
wdd->id = id;
ret = watchdog_dev_register(wdd);
if (ret) {
- ida_simple_remove(&watchdog_ida, id);
+ ida_free(&watchdog_ida, id);
return ret;
}
}
@@ -309,7 +310,7 @@ static int __watchdog_register_device(struct watchdog_device *wdd)
pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
wdd->id, ret);
watchdog_dev_unregister(wdd);
- ida_simple_remove(&watchdog_ida, id);
+ ida_free(&watchdog_ida, id);
return ret;
}
}
@@ -382,7 +383,7 @@ static void __watchdog_unregister_device(struct watchdog_device *wdd)
unregister_reboot_notifier(&wdd->reboot_nb);
watchdog_dev_unregister(wdd);
- ida_simple_remove(&watchdog_ida, wdd->id);
+ ida_free(&watchdog_ida, wdd->id);
}
/**
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 976c6cdf9ee67..aaf2514fcfa46 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -672,7 +672,6 @@ EXPORT_SYMBOL(xen_free_ballooned_pages);
static void __init balloon_add_regions(void)
{
-#if defined(CONFIG_XEN_PV)
unsigned long start_pfn, pages;
unsigned long pfn, extra_pfn_end;
unsigned int i;
@@ -696,7 +695,6 @@ static void __init balloon_add_regions(void)
balloon_stats.total_pages += extra_pfn_end - start_pfn;
}
-#endif
}
static int __init balloon_init(void)
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 2faa4bf78c7a9..81effbd53dc52 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1190,7 +1190,7 @@ int xen_pirq_from_irq(unsigned irq)
EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip,
- struct xenbus_device *dev)
+ struct xenbus_device *dev, bool shared)
{
int ret = -ENOMEM;
struct irq_info *info;
@@ -1224,7 +1224,8 @@ static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip,
*/
bind_evtchn_to_cpu(info, 0, false);
} else if (!WARN_ON(info->type != IRQT_EVTCHN)) {
- info->refcnt++;
+ if (shared && !WARN_ON(info->refcnt < 0))
+ info->refcnt++;
}
ret = info->irq;
@@ -1237,13 +1238,13 @@ out:
int bind_evtchn_to_irq(evtchn_port_t evtchn)
{
- return bind_evtchn_to_irq_chip(evtchn, &xen_dynamic_chip, NULL);
+ return bind_evtchn_to_irq_chip(evtchn, &xen_dynamic_chip, NULL, false);
}
EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn)
{
- return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip, NULL);
+ return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip, NULL, false);
}
EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi);
@@ -1295,7 +1296,8 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
static int bind_interdomain_evtchn_to_irq_chip(struct xenbus_device *dev,
evtchn_port_t remote_port,
- struct irq_chip *chip)
+ struct irq_chip *chip,
+ bool shared)
{
struct evtchn_bind_interdomain bind_interdomain;
int err;
@@ -1307,14 +1309,14 @@ static int bind_interdomain_evtchn_to_irq_chip(struct xenbus_device *dev,
&bind_interdomain);
return err ? : bind_evtchn_to_irq_chip(bind_interdomain.local_port,
- chip, dev);
+ chip, dev, shared);
}
int bind_interdomain_evtchn_to_irq_lateeoi(struct xenbus_device *dev,
evtchn_port_t remote_port)
{
return bind_interdomain_evtchn_to_irq_chip(dev, remote_port,
- &xen_lateeoi_chip);
+ &xen_lateeoi_chip, false);
}
EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi);
@@ -1430,7 +1432,8 @@ static int bind_evtchn_to_irqhandler_chip(evtchn_port_t evtchn,
{
int irq, retval;
- irq = bind_evtchn_to_irq_chip(evtchn, chip, NULL);
+ irq = bind_evtchn_to_irq_chip(evtchn, chip, NULL,
+ irqflags & IRQF_SHARED);
if (irq < 0)
return irq;
retval = request_irq(irq, handler, irqflags, devname, dev_id);
@@ -1471,7 +1474,8 @@ static int bind_interdomain_evtchn_to_irqhandler_chip(
{
int irq, retval;
- irq = bind_interdomain_evtchn_to_irq_chip(dev, remote_port, chip);
+ irq = bind_interdomain_evtchn_to_irq_chip(dev, remote_port, chip,
+ irqflags & IRQF_SHARED);
if (irq < 0)
return irq;
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 59717628ca42b..f6a2216c2c870 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -85,6 +85,7 @@ struct user_evtchn {
struct per_user_data *user;
evtchn_port_t port;
bool enabled;
+ bool unbinding;
};
static void evtchn_free_ring(evtchn_port_t *ring)
@@ -164,6 +165,10 @@ static irqreturn_t evtchn_interrupt(int irq, void *data)
struct per_user_data *u = evtchn->user;
unsigned int prod, cons;
+ /* Handler might be called when tearing down the IRQ. */
+ if (evtchn->unbinding)
+ return IRQ_HANDLED;
+
WARN(!evtchn->enabled,
"Interrupt for port %u, but apparently not enabled; per-user %p\n",
evtchn->port, u);
@@ -421,6 +426,7 @@ static void evtchn_unbind_from_user(struct per_user_data *u,
BUG_ON(irq < 0);
+ evtchn->unbinding = true;
unbind_from_irqhandler(irq, evtchn);
del_evtchn(u, evtchn);
diff --git a/drivers/xen/grant-dma-iommu.c b/drivers/xen/grant-dma-iommu.c
index 6a9fe02c6bfcc..2ee750a03c2f7 100644
--- a/drivers/xen/grant-dma-iommu.c
+++ b/drivers/xen/grant-dma-iommu.c
@@ -51,14 +51,12 @@ static int grant_dma_iommu_probe(struct platform_device *pdev)
return 0;
}
-static int grant_dma_iommu_remove(struct platform_device *pdev)
+static void grant_dma_iommu_remove(struct platform_device *pdev)
{
struct grant_dma_iommu_device *mmu = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
iommu_device_unregister(&mmu->iommu);
-
- return 0;
}
static struct platform_driver grant_dma_iommu_driver = {
@@ -67,7 +65,7 @@ static struct platform_driver grant_dma_iommu_driver = {
.of_match_table = grant_dma_iommu_of_match,
},
.probe = grant_dma_iommu_probe,
- .remove = grant_dma_iommu_remove,
+ .remove_new = grant_dma_iommu_remove,
};
static int __init grant_dma_iommu_init(void)