aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap3
-rw-r--r--Documentation/arch/riscv/vm-layout.rst16
-rw-r--r--Documentation/arch/x86/resctrl.rst8
-rw-r--r--Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml2
-rw-r--r--Documentation/devicetree/bindings/i2c/st,nomadik-i2c.yaml49
-rw-r--r--Documentation/devicetree/bindings/mtd/partitions/linux,ubi.yaml75
-rw-r--r--Documentation/devicetree/bindings/mtd/partitions/ubi-volume.yaml40
-rw-r--r--Documentation/devicetree/bindings/riscv/cpus.yaml6
-rw-r--r--Documentation/devicetree/bindings/riscv/extensions.yaml7
-rw-r--r--Documentation/devicetree/bindings/rtc/abracon,abx80x.txt31
-rw-r--r--Documentation/devicetree/bindings/rtc/abracon,abx80x.yaml98
-rw-r--r--Documentation/devicetree/bindings/rtc/atmel,at91sam9260-rtt.yaml4
-rw-r--r--Documentation/devicetree/bindings/rtc/mediatek,mt2712-rtc.yaml39
-rw-r--r--Documentation/devicetree/bindings/rtc/mediatek,mt7622-rtc.yaml52
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-mt2712.txt14
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-mt7622.txt21
-rw-r--r--Documentation/devicetree/bindings/rtc/xlnx,zynqmp-rtc.yaml11
-rw-r--r--Documentation/devicetree/bindings/sound/cirrus,cs42l43.yaml11
-rw-r--r--Documentation/devicetree/bindings/timer/cdns,ttc.yaml22
-rw-r--r--Documentation/devicetree/bindings/timer/nxp,sysctr-timer.yaml4
-rw-r--r--Documentation/devicetree/bindings/timer/ralink,cevt-systick.yaml38
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,ostm.yaml2
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,tmu.yaml18
-rw-r--r--Documentation/devicetree/bindings/timer/samsung,exynos4210-mct.yaml2
-rw-r--r--Documentation/features/sched/membarrier-sync-core/arch-support.txt18
-rw-r--r--Documentation/mm/page_cache.rst10
-rw-r--r--Documentation/scheduler/index.rst1
-rw-r--r--Documentation/scheduler/membarrier.rst39
-rw-r--r--Documentation/scheduler/sched-design-CFS.rst3
-rw-r--r--Documentation/spi/spi-lm70llp.rst4
-rw-r--r--Documentation/spi/spidev.rst2
-rw-r--r--MAINTAINERS13
-rw-r--r--Makefile4
-rw-r--r--arch/Kconfig2
-rw-r--r--arch/arm/Kconfig4
-rw-r--r--arch/arm/Kconfig.debug3
-rw-r--r--arch/arm/include/asm/mman.h14
-rw-r--r--arch/arm/include/asm/ptrace.h5
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/iwmmxt.S51
-rw-r--r--arch/arm/kernel/pj4-cp0.c135
-rw-r--r--arch/arm/kernel/traps.c2
-rw-r--r--arch/arm/kernel/unwind.c2
-rw-r--r--arch/arm/mm/fault.c8
-rw-r--r--arch/arm/mm/flush.c3
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm64/Kconfig3
-rw-r--r--arch/hexagon/kernel/vmlinux.lds.S1
-rw-r--r--arch/loongarch/Kconfig8
-rw-r--r--arch/loongarch/Kconfig.debug11
-rw-r--r--arch/loongarch/Makefile23
-rw-r--r--arch/loongarch/crypto/crc32-loongarch.c2
-rw-r--r--arch/loongarch/include/asm/Kbuild3
-rw-r--r--arch/loongarch/include/asm/bug.h1
-rw-r--r--arch/loongarch/include/asm/cacheflush.h3
-rw-r--r--arch/loongarch/include/asm/exception.h2
-rw-r--r--arch/loongarch/include/asm/io.h2
-rw-r--r--arch/loongarch/include/asm/module.h7
-rw-r--r--arch/loongarch/include/asm/orc_header.h18
-rw-r--r--arch/loongarch/include/asm/orc_lookup.h31
-rw-r--r--arch/loongarch/include/asm/orc_types.h58
-rw-r--r--arch/loongarch/include/asm/page.h3
-rw-r--r--arch/loongarch/include/asm/percpu.h7
-rw-r--r--arch/loongarch/include/asm/pgtable.h3
-rw-r--r--arch/loongarch/include/asm/qspinlock.h18
-rw-r--r--arch/loongarch/include/asm/stackframe.h3
-rw-r--r--arch/loongarch/include/asm/thread_info.h2
-rw-r--r--arch/loongarch/include/asm/unwind.h20
-rw-r--r--arch/loongarch/include/asm/unwind_hints.h28
-rw-r--r--arch/loongarch/kernel/Makefile4
-rw-r--r--arch/loongarch/kernel/entry.S5
-rw-r--r--arch/loongarch/kernel/fpu.S7
-rw-r--r--arch/loongarch/kernel/genex.S6
-rw-r--r--arch/loongarch/kernel/lbt.S3
-rw-r--r--arch/loongarch/kernel/mcount_dyn.S6
-rw-r--r--arch/loongarch/kernel/module.c22
-rw-r--r--arch/loongarch/kernel/relocate_kernel.S7
-rw-r--r--arch/loongarch/kernel/rethook_trampoline.S1
-rw-r--r--arch/loongarch/kernel/setup.c2
-rw-r--r--arch/loongarch/kernel/stacktrace.c41
-rw-r--r--arch/loongarch/kernel/traps.c42
-rw-r--r--arch/loongarch/kernel/unwind_orc.c528
-rw-r--r--arch/loongarch/kernel/vmlinux.lds.S3
-rw-r--r--arch/loongarch/kvm/switch.S9
-rw-r--r--arch/loongarch/lib/clear_user.S3
-rw-r--r--arch/loongarch/lib/copy_user.S3
-rw-r--r--arch/loongarch/lib/memcpy.S3
-rw-r--r--arch/loongarch/lib/memset.S3
-rw-r--r--arch/loongarch/mm/tlb.c27
-rw-r--r--arch/loongarch/mm/tlbex.S9
-rw-r--r--arch/loongarch/vdso/Makefile1
-rw-r--r--arch/parisc/include/asm/mman.h14
-rw-r--r--arch/powerpc/Kconfig9
-rw-r--r--arch/powerpc/include/asm/kexec.h98
-rw-r--r--arch/powerpc/kernel/prom.c2
-rw-r--r--arch/powerpc/kernel/setup-common.c2
-rw-r--r--arch/powerpc/kernel/smp.c4
-rw-r--r--arch/powerpc/kexec/Makefile3
-rw-r--r--arch/powerpc/kexec/core.c4
-rw-r--r--arch/powerpc/kexec/elf_64.c4
-rw-r--r--arch/powerpc/kexec/file_load_64.c269
-rw-r--r--arch/powerpc/mm/book3s32/mmu.c7
-rw-r--r--arch/powerpc/mm/mmu_decl.h8
-rw-r--r--arch/powerpc/mm/nohash/8xx.c33
-rw-r--r--arch/powerpc/mm/nohash/e500.c10
-rw-r--r--arch/powerpc/mm/pgtable_32.c38
-rw-r--r--arch/powerpc/platforms/powernv/smp.c2
-rw-r--r--arch/riscv/Kbuild1
-rw-r--r--arch/riscv/Kconfig80
-rw-r--r--arch/riscv/Makefile5
-rw-r--r--arch/riscv/boot/dts/renesas/r9a07g043f.dtsi4
-rw-r--r--arch/riscv/configs/defconfig3
-rw-r--r--arch/riscv/crypto/Kconfig93
-rw-r--r--arch/riscv/crypto/Makefile23
-rw-r--r--arch/riscv/crypto/aes-macros.S156
-rw-r--r--arch/riscv/crypto/aes-riscv64-glue.c637
-rw-r--r--arch/riscv/crypto/aes-riscv64-zvkned-zvbb-zvkg.S312
-rw-r--r--arch/riscv/crypto/aes-riscv64-zvkned-zvkb.S146
-rw-r--r--arch/riscv/crypto/aes-riscv64-zvkned.S339
-rw-r--r--arch/riscv/crypto/chacha-riscv64-glue.c101
-rw-r--r--arch/riscv/crypto/chacha-riscv64-zvkb.S294
-rw-r--r--arch/riscv/crypto/ghash-riscv64-glue.c168
-rw-r--r--arch/riscv/crypto/ghash-riscv64-zvkg.S72
-rw-r--r--arch/riscv/crypto/sha256-riscv64-glue.c137
-rw-r--r--arch/riscv/crypto/sha256-riscv64-zvknha_or_zvknhb-zvkb.S225
-rw-r--r--arch/riscv/crypto/sha512-riscv64-glue.c133
-rw-r--r--arch/riscv/crypto/sha512-riscv64-zvknhb-zvkb.S203
-rw-r--r--arch/riscv/crypto/sm3-riscv64-glue.c112
-rw-r--r--arch/riscv/crypto/sm3-riscv64-zvksh-zvkb.S123
-rw-r--r--arch/riscv/crypto/sm4-riscv64-glue.c107
-rw-r--r--arch/riscv/crypto/sm4-riscv64-zvksed-zvkb.S117
-rw-r--r--arch/riscv/errata/andes/errata.c10
-rw-r--r--arch/riscv/include/asm/asm.h10
-rw-r--r--arch/riscv/include/asm/atomic.h17
-rw-r--r--arch/riscv/include/asm/barrier.h21
-rw-r--r--arch/riscv/include/asm/bitops.h138
-rw-r--r--arch/riscv/include/asm/cmpxchg.h5
-rw-r--r--arch/riscv/include/asm/compat.h19
-rw-r--r--arch/riscv/include/asm/cpufeature.h31
-rw-r--r--arch/riscv/include/asm/elf.h11
-rw-r--r--arch/riscv/include/asm/errata_list.h13
-rw-r--r--arch/riscv/include/asm/fence.h10
-rw-r--r--arch/riscv/include/asm/hwcap.h1
-rw-r--r--arch/riscv/include/asm/io.h8
-rw-r--r--arch/riscv/include/asm/membarrier.h50
-rw-r--r--arch/riscv/include/asm/mmio.h5
-rw-r--r--arch/riscv/include/asm/mmiowb.h2
-rw-r--r--arch/riscv/include/asm/pgalloc.h67
-rw-r--r--arch/riscv/include/asm/pgtable.h32
-rw-r--r--arch/riscv/include/asm/processor.h31
-rw-r--r--arch/riscv/include/asm/simd.h4
-rw-r--r--arch/riscv/include/asm/suspend.h3
-rw-r--r--arch/riscv/include/asm/sync_core.h29
-rw-r--r--arch/riscv/include/asm/syscall_wrapper.h53
-rw-r--r--arch/riscv/include/asm/tlb.h18
-rw-r--r--arch/riscv/include/asm/vector.h11
-rw-r--r--arch/riscv/include/asm/vendorid_list.h2
-rw-r--r--arch/riscv/kernel/Makefile4
-rw-r--r--arch/riscv/kernel/alternative.c2
-rw-r--r--arch/riscv/kernel/cpufeature.c256
-rw-r--r--arch/riscv/kernel/entry.S3
-rw-r--r--arch/riscv/kernel/pi/Makefile3
-rw-r--r--arch/riscv/kernel/ptrace.c6
-rw-r--r--arch/riscv/kernel/smpboot.c1
-rw-r--r--arch/riscv/kernel/suspend.c49
-rw-r--r--arch/riscv/kernel/sys_hwprobe.c13
-rw-r--r--arch/riscv/kernel/traps.c17
-rw-r--r--arch/riscv/kernel/traps_misaligned.c17
-rw-r--r--arch/riscv/kernel/unaligned_access_speed.c281
-rw-r--r--arch/riscv/lib/csum.c7
-rw-r--r--arch/riscv/lib/uaccess_vector.S1
-rw-r--r--arch/riscv/mm/cacheflush.c4
-rw-r--r--arch/riscv/mm/context.c2
-rw-r--r--arch/riscv/mm/init.c6
-rw-r--r--arch/riscv/mm/pgtable.c2
-rw-r--r--arch/x86/boot/compressed/efi_mixed.S9
-rw-r--r--arch/x86/configs/tiny.config1
-rw-r--r--arch/x86/include/asm/crash_reserve.h2
-rw-r--r--arch/x86/include/asm/intel-family.h1
-rw-r--r--arch/x86/include/asm/suspend_32.h10
-rw-r--r--arch/x86/kernel/cpu/common.c9
-rw-r--r--arch/x86/kernel/cpu/topology.c11
-rw-r--r--arch/x86/kernel/cpu/topology_common.c12
-rw-r--r--arch/x86/kernel/e820.c17
-rw-r--r--arch/x86/kernel/fpu/xstate.c5
-rw-r--r--arch/x86/kernel/fpu/xstate.h14
-rw-r--r--arch/x86/kernel/head64.c18
-rw-r--r--arch/x86/kernel/kprobes/core.c11
-rw-r--r--arch/x86/kernel/mpparse.c10
-rw-r--r--arch/x86/kernel/setup.c10
-rw-r--r--arch/x86/kernel/smpboot.c32
-rw-r--r--crypto/Kconfig3
-rw-r--r--crypto/asymmetric_keys/mscode_parser.c3
-rw-r--r--crypto/asymmetric_keys/pkcs7_parser.c4
-rw-r--r--crypto/asymmetric_keys/public_key.c3
-rw-r--r--crypto/asymmetric_keys/signature.c2
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c8
-rw-r--r--crypto/testmgr.h80
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/riscv/Makefile4
-rw-r--r--drivers/acpi/riscv/cppc.c157
-rw-r--r--drivers/acpi/riscv/cpuidle.c81
-rw-r--r--drivers/block/floppy.c1
-rw-r--r--drivers/clocksource/arm_global_timer.c35
-rw-r--r--drivers/clocksource/timer-clint.c2
-rw-r--r--drivers/clocksource/timer-imx-gpt.c3
-rw-r--r--drivers/clocksource/timer-imx-sysctr.c117
-rw-r--r--drivers/clocksource/timer-riscv.c5
-rw-r--r--drivers/clocksource/timer-stm32.c4
-rw-r--r--drivers/clocksource/timer-ti-32k.c2
-rw-r--r--drivers/cpufreq/Kconfig29
-rw-r--r--drivers/cpufreq/Kconfig.arm26
-rw-r--r--drivers/cpuidle/cpuidle-riscv-sbi.c49
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c10
-rw-r--r--drivers/firmware/efi/earlycon.c2
-rw-r--r--drivers/firmware/efi/efi.c2
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c2
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c3
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v7_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c71
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h8
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c3
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c28
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c19
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c31
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c18
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c18
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c16
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c2
-rw-r--r--drivers/gpu/drm/drm_panel.c17
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c4
-rw-r--r--drivers/gpu/drm/imx/ipuv3/parallel-display.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c9
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c2
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c41
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c4
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h2
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c32
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h7
-rw-r--r--drivers/gpu/drm/xe/xe_vram_freq.c4
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c740
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c46
-rw-r--r--drivers/iommu/dma-iommu.c9
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c72
-rw-r--r--drivers/md/dm-integrity.c18
-rw-r--r--drivers/md/dm-snap.c4
-rw-r--r--drivers/misc/lkdtm/bugs.c2
-rw-r--r--drivers/mtd/ubi/Kconfig13
-rw-r--r--drivers/mtd/ubi/Makefile1
-rw-r--r--drivers/mtd/ubi/block.c136
-rw-r--r--drivers/mtd/ubi/build.c154
-rw-r--r--drivers/mtd/ubi/eba.c7
-rw-r--r--drivers/mtd/ubi/fastmap.c7
-rw-r--r--drivers/mtd/ubi/kapi.c56
-rw-r--r--drivers/mtd/ubi/nvmem.c191
-rw-r--r--drivers/mtd/ubi/ubi.h3
-rw-r--r--drivers/mtd/ubi/vmt.c75
-rw-r--r--drivers/mtd/ubi/vtbl.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c10
-rw-r--r--drivers/nvme/host/apple.c6
-rw-r--r--drivers/nvme/host/core.c11
-rw-r--r--drivers/nvme/host/fabrics.h7
-rw-r--r--drivers/nvme/host/pci.c3
-rw-r--r--drivers/nvme/host/pr.c3
-rw-r--r--drivers/nvme/host/sysfs.c3
-rw-r--r--drivers/nvme/host/tcp.c21
-rw-r--r--drivers/nvme/host/trace.c105
-rw-r--r--drivers/nvme/target/rdma.c8
-rw-r--r--drivers/nvme/target/tcp.c1
-rw-r--r--drivers/nvme/target/trace.c98
-rw-r--r--drivers/perf/Kconfig17
-rw-r--r--drivers/perf/riscv_pmu_sbi.c37
-rw-r--r--drivers/pwm/pwm-img.c4
-rw-r--r--drivers/regulator/core.c11
-rw-r--r--drivers/rtc/Kconfig3
-rw-r--r--drivers/rtc/class.c21
-rw-r--r--drivers/rtc/interface.c2
-rw-r--r--drivers/rtc/rtc-ds1511.c340
-rw-r--r--drivers/rtc/rtc-m41t80.c5
-rw-r--r--drivers/rtc/rtc-max31335.c2
-rw-r--r--drivers/rtc/rtc-nct3018y.c6
-rw-r--r--drivers/rtc/rtc-pcf8523.c25
-rw-r--r--drivers/scsi/Kconfig5
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/scsi.c2
-rw-r--r--drivers/scsi/scsi_debug.c293
-rw-r--r--drivers/scsi/scsi_proto_test.c56
-rw-r--r--drivers/scsi/scsi_sysfs.c10
-rw-r--r--drivers/scsi/sd.c111
-rw-r--r--drivers/scsi/sd.h3
-rw-r--r--drivers/siox/siox-bus-gpio.c62
-rw-r--r--drivers/siox/siox-core.c46
-rw-r--r--drivers/siox/siox.h4
-rw-r--r--drivers/spi/spi-fsl-lpspi.c8
-rw-r--r--drivers/spi/spi-imx.c4
-rw-r--r--drivers/spi/spi-lm70llp.c4
-rw-r--r--drivers/spi/spi-mem.c2
-rw-r--r--drivers/spi/spi-mt65xx.c22
-rw-r--r--drivers/spi/spi.c24
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/uio/uio_dmem_genirq.c4
-rw-r--r--drivers/uio/uio_pruss.c2
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/arkfb.c15
-rw-r--r--drivers/video/fbdev/core/fbcon.c16
-rw-r--r--drivers/video/fbdev/core/fbmem.c12
-rw-r--r--drivers/video/fbdev/core/fbmon.c7
-rw-r--r--drivers/video/fbdev/core/svgalib.c15
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfbdrv.c18
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c13
-rw-r--r--drivers/video/fbdev/s3fb.c15
-rw-r--r--drivers/video/fbdev/uvesafb.c2
-rw-r--r--drivers/video/fbdev/vga16fb.c6
-rw-r--r--drivers/video/fbdev/via/accel.c4
-rw-r--r--drivers/video/fbdev/vt8623fb.c15
-rw-r--r--drivers/video/sticore.c2
-rw-r--r--fs/9p/vfs_inode.c16
-rw-r--r--fs/9p/vfs_inode_dotl.c6
-rw-r--r--fs/binfmt_elf_fdpic.c2
-rw-r--r--fs/btrfs/block-group.c3
-rw-r--r--fs/btrfs/extent_io.c13
-rw-r--r--fs/btrfs/extent_map.c16
-rw-r--r--fs/btrfs/scrub.c12
-rw-r--r--fs/btrfs/volumes.c27
-rw-r--r--fs/btrfs/zoned.c14
-rw-r--r--fs/ceph/caps.c8
-rw-r--r--fs/ceph/file.c31
-rw-r--r--fs/erofs/super.c1
-rw-r--r--fs/exec.c1
-rw-r--r--fs/gfs2/bmap.c5
-rw-r--r--fs/smb/client/cached_dir.c3
-rw-r--r--fs/smb/client/cifsfs.c4
-rw-r--r--fs/smb/client/cifsglob.h5
-rw-r--r--fs/smb/client/connect.c9
-rw-r--r--fs/smb/client/file.c20
-rw-r--r--fs/smb/client/inode.c26
-rw-r--r--fs/smb/client/misc.c3
-rw-r--r--fs/smb/client/sess.c4
-rw-r--r--fs/smb/client/smb2ops.c2
-rw-r--r--fs/smb/client/smb2pdu.c5
-rw-r--r--fs/smb/common/smb2pdu.h12
-rw-r--r--fs/ubifs/debug.c9
-rw-r--r--fs/ubifs/dir.c2
-rw-r--r--fs/ubifs/file.c443
-rw-r--r--fs/ubifs/find.c32
-rw-r--r--fs/ubifs/journal.c171
-rw-r--r--fs/ubifs/lprops.c6
-rw-r--r--fs/ubifs/lpt_commit.c1
-rw-r--r--fs/ubifs/super.c2
-rw-r--r--fs/ubifs/tnc.c9
-rw-r--r--fs/ubifs/tnc_misc.c22
-rw-r--r--fs/ubifs/ubifs.h5
-rw-r--r--fs/xfs/xfs_buf_mem.c4
-rw-r--r--fs/xfs/xfs_dquot.c18
-rw-r--r--fs/xfs/xfs_trace.h9
-rw-r--r--include/asm-generic/bitops/__ffs.h8
-rw-r--r--include/asm-generic/bitops/__fls.h8
-rw-r--r--include/asm-generic/bitops/ffs.h8
-rw-r--r--include/asm-generic/bitops/fls.h8
-rw-r--r--include/drm/drm_bridge.h4
-rw-r--r--include/drm/drm_fixed.h3
-rw-r--r--include/drm/drm_modeset_helper_vtables.h3
-rw-r--r--include/linux/compiler.h9
-rw-r--r--include/linux/crash_core.h12
-rw-r--r--include/linux/fb.h18
-rw-r--r--include/linux/font.h3
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mman.h8
-rw-r--r--include/linux/mtd/ubi.h2
-rw-r--r--include/linux/oid_registry.h4
-rw-r--r--include/linux/overflow.h25
-rw-r--r--include/linux/pagevec.h4
-rw-r--r--include/linux/rtc.h2
-rw-r--r--include/linux/sync_core.h16
-rw-r--r--include/scsi/scsi_device.h1
-rw-r--r--include/scsi/scsi_proto.h78
-rw-r--r--init/Kconfig3
-rw-r--r--init/initramfs.c2
-rw-r--r--io_uring/futex.c1
-rw-r--r--io_uring/io_uring.c63
-rw-r--r--io_uring/kbuf.c20
-rw-r--r--io_uring/kbuf.h2
-rw-r--r--io_uring/net.c9
-rw-r--r--io_uring/poll.c4
-rw-r--r--io_uring/rw.c2
-rw-r--r--io_uring/sqpoll.c6
-rw-r--r--io_uring/waitid.c7
-rw-r--r--kernel/crash_reserve.c7
-rw-r--r--kernel/dma/swiotlb.c45
-rw-r--r--kernel/entry/common.c8
-rw-r--r--kernel/module/Kconfig5
-rw-r--r--kernel/power/suspend_test.c2
-rw-r--r--kernel/printk/printk.c6
-rw-r--r--kernel/sched/core.c16
-rw-r--r--kernel/sched/membarrier.c13
-rw-r--r--kernel/sys.c7
-rw-r--r--kernel/time/alarmtimer.c2
-rw-r--r--kernel/time/posix-clock.c16
-rw-r--r--kernel/time/timer.c12
-rw-r--r--kernel/time/timer_migration.c11
-rw-r--r--kernel/trace/trace_probe.c2
-rw-r--r--lib/Kconfig.debug12
-rw-r--r--lib/Kconfig.ubsan2
-rw-r--r--lib/fonts/fonts.c15
-rw-r--r--lib/memcpy_kunit.c3
-rw-r--r--lib/overflow_kunit.c19
-rw-r--r--mm/filemap.c16
-rw-r--r--mm/memory.c4
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/page_owner.c33
-rw-r--r--mm/shmem_quota.c10
-rw-r--r--mm/userfaultfd.c3
-rw-r--r--mm/zswap.c45
-rw-r--r--scripts/Makefile7
-rw-r--r--sound/core/control.c4
-rw-r--r--sound/pci/hda/patch_realtek.c68
-rw-r--r--sound/soc/amd/yc/acp6x-mach.c14
-rw-r--r--sound/soc/amd/yc/pci-acp6x.c1
-rw-r--r--sound/soc/codecs/tlv320adc3xxx.c4
-rw-r--r--sound/soc/rockchip/rockchip_i2s_tdm.c352
-rw-r--r--sound/soc/soc-compress.c4
-rw-r--r--sound/soc/soc-core.c3
-rw-r--r--sound/soc/sof/amd/acp-loader.c2
-rw-r--r--sound/soc/sof/amd/acp.c50
-rw-r--r--sound/soc/sof/amd/acp.h7
-rw-r--r--sound/soc/sof/amd/vangogh.c9
-rw-r--r--tools/Makefile13
-rw-r--r--tools/arch/loongarch/include/asm/inst.h161
-rw-r--r--tools/arch/loongarch/include/asm/orc_types.h58
-rw-r--r--tools/include/linux/bitops.h11
-rw-r--r--tools/objtool/Makefile4
-rw-r--r--tools/objtool/arch/loongarch/Build3
-rw-r--r--tools/objtool/arch/loongarch/decode.c356
-rw-r--r--tools/objtool/arch/loongarch/include/arch/cfi_regs.h22
-rw-r--r--tools/objtool/arch/loongarch/include/arch/elf.h30
-rw-r--r--tools/objtool/arch/loongarch/include/arch/special.h33
-rw-r--r--tools/objtool/arch/loongarch/orc.c171
-rw-r--r--tools/objtool/arch/loongarch/special.c15
-rw-r--r--tools/objtool/arch/x86/Build1
-rw-r--r--tools/objtool/arch/x86/orc.c188
-rw-r--r--tools/objtool/check.c52
-rw-r--r--tools/objtool/include/objtool/elf.h1
-rw-r--r--tools/objtool/include/objtool/orc.h14
-rw-r--r--tools/objtool/orc_dump.c69
-rw-r--r--tools/objtool/orc_gen.c113
-rw-r--r--tools/perf/pmu-events/arch/riscv/andes/ax45/firmware.json68
-rw-r--r--tools/perf/pmu-events/arch/riscv/andes/ax45/instructions.json127
-rw-r--r--tools/perf/pmu-events/arch/riscv/andes/ax45/memory.json57
-rw-r--r--tools/perf/pmu-events/arch/riscv/andes/ax45/microarch.json77
-rw-r--r--tools/perf/pmu-events/arch/riscv/mapfile.csv1
-rw-r--r--tools/testing/selftests/exec/Makefile4
-rwxr-xr-xtools/testing/selftests/exec/binfmt_script.py10
-rw-r--r--tools/testing/selftests/exec/execveat.c12
-rw-r--r--tools/testing/selftests/exec/load_address.c34
-rw-r--r--tools/testing/selftests/exec/recursion-depth.c53
-rw-r--r--tools/testing/selftests/mm/gup_test.c2
-rw-r--r--tools/testing/selftests/mm/protection_keys.c6
-rw-r--r--tools/testing/selftests/mm/soft-dirty.c2
-rw-r--r--tools/testing/selftests/mm/split_huge_page_test.c2
-rw-r--r--tools/testing/selftests/mm/uffd-common.c3
-rw-r--r--tools/testing/selftests/mm/uffd-common.h2
-rw-r--r--tools/testing/selftests/mm/uffd-unit-tests.c13
-rw-r--r--tools/testing/selftests/riscv/mm/mmap_bottomup.c23
-rw-r--r--tools/testing/selftests/riscv/mm/mmap_default.c23
-rw-r--r--tools/testing/selftests/riscv/mm/mmap_test.h107
531 files changed, 12227 insertions, 3914 deletions
diff --git a/.mailmap b/.mailmap
index 02cd23b76eda45..59c9a841bf7149 100644
--- a/.mailmap
+++ b/.mailmap
@@ -340,7 +340,8 @@ Lee Jones <lee@kernel.org> <joneslee@google.com>
Lee Jones <lee@kernel.org> <lee.jones@canonical.com>
Lee Jones <lee@kernel.org> <lee.jones@linaro.org>
Lee Jones <lee@kernel.org> <lee@ubuntu.com>
-Leonard Crestez <leonard.crestez@nxp.com> Leonard Crestez <cdleonard@gmail.com>
+Leonard Crestez <cdleonard@gmail.com> <leonard.crestez@nxp.com>
+Leonard Crestez <cdleonard@gmail.com> <leonard.crestez@intel.com>
Leonardo Bras <leobras.c@gmail.com> <leonardo@linux.ibm.com>
Leonard Göhrs <l.goehrs@pengutronix.de>
Leonid I Ananiev <leonid.i.ananiev@intel.com>
diff --git a/Documentation/arch/riscv/vm-layout.rst b/Documentation/arch/riscv/vm-layout.rst
index 69ff6da1dbf8f3..e476b4386bd9db 100644
--- a/Documentation/arch/riscv/vm-layout.rst
+++ b/Documentation/arch/riscv/vm-layout.rst
@@ -144,14 +144,8 @@ passing 0 into the hint address parameter of mmap. On CPUs with an address space
smaller than sv48, the CPU maximum supported address space will be the default.
Software can "opt-in" to receiving VAs from another VA space by providing
-a hint address to mmap. A hint address passed to mmap will cause the largest
-address space that fits entirely into the hint to be used, unless there is no
-space left in the address space. If there is no space available in the requested
-address space, an address in the next smallest available address space will be
-returned.
-
-For example, in order to obtain 48-bit VA space, a hint address greater than
-:code:`1 << 47` must be provided. Note that this is 47 due to sv48 userspace
-ending at :code:`1 << 47` and the addresses beyond this are reserved for the
-kernel. Similarly, to obtain 57-bit VA space addresses, a hint address greater
-than or equal to :code:`1 << 56` must be provided.
+a hint address to mmap. When a hint address is passed to mmap, the returned
+address will never use more bits than the hint address. For example, if a hint
+address of `1 << 40` is passed to mmap, a valid returned address will never use
+bits 41 through 63. If no mappable addresses are available in that range, mmap
+will return `MAP_FAILED`.
diff --git a/Documentation/arch/x86/resctrl.rst b/Documentation/arch/x86/resctrl.rst
index a6279df64a9db8..3712d81cb50c67 100644
--- a/Documentation/arch/x86/resctrl.rst
+++ b/Documentation/arch/x86/resctrl.rst
@@ -45,7 +45,7 @@ mount options are:
Enable code/data prioritization in L2 cache allocations.
"mba_MBps":
Enable the MBA Software Controller(mba_sc) to specify MBA
- bandwidth in MBps
+ bandwidth in MiBps
"debug":
Make debug files accessible. Available debug files are annotated with
"Available only with debug option".
@@ -526,7 +526,7 @@ threads start using more cores in an rdtgroup, the actual bandwidth may
increase or vary although user specified bandwidth percentage is same.
In order to mitigate this and make the interface more user friendly,
-resctrl added support for specifying the bandwidth in MBps as well. The
+resctrl added support for specifying the bandwidth in MiBps as well. The
kernel underneath would use a software feedback mechanism or a "Software
Controller(mba_sc)" which reads the actual bandwidth using MBM counters
and adjust the memory bandwidth percentages to ensure::
@@ -573,13 +573,13 @@ Memory b/w domain is L3 cache.
MB:<cache_id0>=bandwidth0;<cache_id1>=bandwidth1;...
-Memory bandwidth Allocation specified in MBps
+Memory bandwidth Allocation specified in MiBps
---------------------------------------------
Memory bandwidth domain is L3 cache.
::
- MB:<cache_id0>=bw_MBps0;<cache_id1>=bw_MBps1;...
+ MB:<cache_id0>=bw_MiBps0;<cache_id1>=bw_MiBps1;...
Slow Memory Bandwidth Allocation (SMBA)
---------------------------------------
diff --git a/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml b/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml
index 8386cfe21532e5..f0eabff863106a 100644
--- a/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml
+++ b/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml
@@ -270,7 +270,7 @@ examples:
port {
ov7251_ep: endpoint {
- data-lanes = <0 1>;
+ data-lanes = <0>;
link-frequencies = /bits/ 64 <240000000 319200000>;
remote-endpoint = <&csiphy3_ep>;
};
diff --git a/Documentation/devicetree/bindings/i2c/st,nomadik-i2c.yaml b/Documentation/devicetree/bindings/i2c/st,nomadik-i2c.yaml
index 16024415a4a740..44c54b162bb107 100644
--- a/Documentation/devicetree/bindings/i2c/st,nomadik-i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/st,nomadik-i2c.yaml
@@ -14,9 +14,6 @@ description: The Nomadik I2C host controller began its life in the ST
maintainers:
- Linus Walleij <linus.walleij@linaro.org>
-allOf:
- - $ref: /schemas/i2c/i2c-controller.yaml#
-
# Need a custom select here or 'arm,primecell' will match on lots of nodes
select:
properties:
@@ -24,21 +21,23 @@ select:
contains:
enum:
- st,nomadik-i2c
+ - mobileye,eyeq5-i2c
required:
- compatible
properties:
compatible:
oneOf:
- # The variant found in STn8815
- items:
- const: st,nomadik-i2c
- const: arm,primecell
- # The variant found in DB8500
- items:
- const: stericsson,db8500-i2c
- const: st,nomadik-i2c
- const: arm,primecell
+ - items:
+ - const: mobileye,eyeq5-i2c
+ - const: arm,primecell
reg:
maxItems: 1
@@ -55,7 +54,7 @@ properties:
- items:
- const: mclk
- const: apb_pclk
- # Clock name in DB8500
+ # Clock name in DB8500 or EyeQ5
- items:
- const: i2cclk
- const: apb_pclk
@@ -70,6 +69,16 @@ properties:
minimum: 1
maximum: 400000
+ mobileye,olb:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ items:
+ - items:
+ - description: Phandle to OLB system controller node.
+ - description: Platform-wide controller ID (integer starting from zero).
+ description:
+ The phandle pointing to OLB system controller node, with the I2C
+ controller index.
+
required:
- compatible
- reg
@@ -79,6 +88,20 @@ required:
unevaluatedProperties: false
+allOf:
+ - $ref: /schemas/i2c/i2c-controller.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: mobileye,eyeq5-i2c
+ then:
+ required:
+ - mobileye,olb
+ else:
+ properties:
+ mobileye,olb: false
+
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
@@ -111,5 +134,19 @@ examples:
clocks = <&i2c0clk>, <&pclki2c0>;
clock-names = "mclk", "apb_pclk";
};
+ - |
+ #include <dt-bindings/interrupt-controller/mips-gic.h>
+ i2c@300000 {
+ compatible = "mobileye,eyeq5-i2c", "arm,primecell";
+ reg = <0x300000 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 1 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <400000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&i2c_ser_clk>, <&i2c_clk>;
+ clock-names = "i2cclk", "apb_pclk";
+ mobileye,olb = <&olb 0>;
+ };
...
diff --git a/Documentation/devicetree/bindings/mtd/partitions/linux,ubi.yaml b/Documentation/devicetree/bindings/mtd/partitions/linux,ubi.yaml
new file mode 100644
index 00000000000000..27e1ac1f252e47
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/partitions/linux,ubi.yaml
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/partitions/linux,ubi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Unsorted Block Images
+
+description: |
+ UBI ("Unsorted Block Images") is a volume management system for raw
+ flash devices which manages multiple logical volumes on a single
+ physical flash device and spreads the I/O load (i.e wear-leveling)
+ across the whole flash chip.
+
+maintainers:
+ - Daniel Golle <daniel@makrotopia.org>
+
+allOf:
+ - $ref: partition.yaml#
+
+properties:
+ compatible:
+ const: linux,ubi
+
+ volumes:
+ type: object
+ description: UBI Volumes
+
+ patternProperties:
+ "^ubi-volume-.*$":
+ $ref: /schemas/mtd/partitions/ubi-volume.yaml#
+
+ unevaluatedProperties: false
+
+required:
+ - compatible
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ reg = <0x0 0x100000>;
+ label = "bootloader";
+ read-only;
+ };
+
+ partition@100000 {
+ reg = <0x100000 0x1ff00000>;
+ label = "ubi";
+ compatible = "linux,ubi";
+
+ volumes {
+ ubi-volume-caldata {
+ volid = <2>;
+ volname = "rf";
+
+ nvmem-layout {
+ compatible = "fixed-layout";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ eeprom@0 {
+ reg = <0x0 0x1000>;
+ };
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mtd/partitions/ubi-volume.yaml b/Documentation/devicetree/bindings/mtd/partitions/ubi-volume.yaml
new file mode 100644
index 00000000000000..19736b26056b25
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/partitions/ubi-volume.yaml
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/partitions/ubi-volume.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: UBI volume
+
+description: |
+ This binding describes a single UBI volume. Volumes can be matches either
+ by their ID or their name, or both.
+
+maintainers:
+ - Daniel Golle <daniel@makrotopia.org>
+
+properties:
+ volid:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Match UBI volume ID
+
+ volname:
+ $ref: /schemas/types.yaml#/definitions/string
+ description:
+ Match UBI volume ID
+
+ nvmem-layout:
+ $ref: /schemas/nvmem/layouts/nvmem-layout.yaml#
+ description:
+ This container may reference an NVMEM layout parser.
+
+anyOf:
+ - required:
+ - volid
+
+ - required:
+ - volname
+
+# This is a generic file other binding inherit from and extend
+additionalProperties: true
diff --git a/Documentation/devicetree/bindings/riscv/cpus.yaml b/Documentation/devicetree/bindings/riscv/cpus.yaml
index b252c3966b8bf0..d87dd50f1a4b57 100644
--- a/Documentation/devicetree/bindings/riscv/cpus.yaml
+++ b/Documentation/devicetree/bindings/riscv/cpus.yaml
@@ -110,7 +110,11 @@ properties:
const: 1
compatible:
- const: riscv,cpu-intc
+ oneOf:
+ - items:
+ - const: andestech,cpu-intc
+ - const: riscv,cpu-intc
+ - const: riscv,cpu-intc
interrupt-controller: true
diff --git a/Documentation/devicetree/bindings/riscv/extensions.yaml b/Documentation/devicetree/bindings/riscv/extensions.yaml
index 63d81dc895e5ce..468c646247aa5c 100644
--- a/Documentation/devicetree/bindings/riscv/extensions.yaml
+++ b/Documentation/devicetree/bindings/riscv/extensions.yaml
@@ -477,5 +477,12 @@ properties:
latency, as ratified in commit 56ed795 ("Update
riscv-crypto-spec-vector.adoc") of riscv-crypto.
+ - const: xandespmu
+ description:
+ The Andes Technology performance monitor extension for counter overflow
+ and privilege mode filtering. For more details, see Counter Related
+ Registers in the AX45MP datasheet.
+ https://www.andestech.com/wp-content/uploads/AX45MP-1C-Rev.-5.0.0-Datasheet.pdf
+
additionalProperties: true
...
diff --git a/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt b/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt
deleted file mode 100644
index 2405e35a1bc0f0..00000000000000
--- a/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-Abracon ABX80X I2C ultra low power RTC/Alarm chip
-
-The Abracon ABX80X family consist of the ab0801, ab0803, ab0804, ab0805, ab1801,
-ab1803, ab1804 and ab1805. The ab0805 is the superset of ab080x and the ab1805
-is the superset of ab180x.
-
-Required properties:
-
- - "compatible": should one of:
- "abracon,abx80x"
- "abracon,ab0801"
- "abracon,ab0803"
- "abracon,ab0804"
- "abracon,ab0805"
- "abracon,ab1801"
- "abracon,ab1803"
- "abracon,ab1804"
- "abracon,ab1805"
- "microcrystal,rv1805"
- Using "abracon,abx80x" will enable chip autodetection.
- - "reg": I2C bus address of the device
-
-Optional properties:
-
-The abx804 and abx805 have a trickle charger that is able to charge the
-connected battery or supercap. Both the following properties have to be defined
-and valid to enable charging:
-
- - "abracon,tc-diode": should be "standard" (0.6V) or "schottky" (0.3V)
- - "abracon,tc-resistor": should be <0>, <3>, <6> or <11>. 0 disables the output
- resistor, the other values are in kOhm.
diff --git a/Documentation/devicetree/bindings/rtc/abracon,abx80x.yaml b/Documentation/devicetree/bindings/rtc/abracon,abx80x.yaml
new file mode 100644
index 00000000000000..355b0598411a62
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/abracon,abx80x.yaml
@@ -0,0 +1,98 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/abracon,abx80x.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Abracon ABX80X I2C ultra low power RTC/Alarm chip
+
+maintainers:
+ - linux-rtc@vger.kernel.org
+
+properties:
+ compatible:
+ description:
+ The wildcard 'abracon,abx80x' may be used to support a mix
+ of different abracon rtc`s. In this case the driver
+ must perform auto-detection from ID register.
+ enum:
+ - abracon,abx80x
+ - abracon,ab0801
+ - abracon,ab0803
+ - abracon,ab0804
+ - abracon,ab0805
+ - abracon,ab1801
+ - abracon,ab1803
+ - abracon,ab1804
+ - abracon,ab1805
+ - microcrystal,rv1805
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ abracon,tc-diode:
+ description:
+ Trickle-charge diode type.
+ Required to enable charging backup battery.
+
+ Supported are 'standard' diodes with a 0.6V drop
+ and 'schottky' diodes with a 0.3V drop.
+ $ref: /schemas/types.yaml#/definitions/string
+ enum:
+ - standard
+ - schottky
+
+ abracon,tc-resistor:
+ description:
+ Trickle-charge resistor value in kOhm.
+ Required to enable charging backup battery.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 3, 6, 11]
+
+dependentRequired:
+ abracon,tc-diode: ["abracon,tc-resistor"]
+ abracon,tc-resistor: ["abracon,tc-diode"]
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - $ref: rtc.yaml#
+ - if:
+ properties:
+ compatible:
+ not:
+ contains:
+ enum:
+ - abracon,abx80x
+ - abracon,ab0804
+ - abracon,ab1804
+ - abracon,ab0805
+ - abracon,ab1805
+ then:
+ properties:
+ abracon,tc-diode: false
+ abracon,tc-resistor: false
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rtc@69 {
+ compatible = "abracon,abx80x";
+ reg = <0x69>;
+ abracon,tc-diode = "schottky";
+ abracon,tc-resistor = <3>;
+ interrupts = <44 IRQ_TYPE_EDGE_FALLING>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/rtc/atmel,at91sam9260-rtt.yaml b/Documentation/devicetree/bindings/rtc/atmel,at91sam9260-rtt.yaml
index b80b85c394ac5c..a7f6c1d1a08ab9 100644
--- a/Documentation/devicetree/bindings/rtc/atmel,at91sam9260-rtt.yaml
+++ b/Documentation/devicetree/bindings/rtc/atmel,at91sam9260-rtt.yaml
@@ -19,7 +19,9 @@ properties:
- items:
- const: atmel,at91sam9260-rtt
- items:
- - const: microchip,sam9x60-rtt
+ - enum:
+ - microchip,sam9x60-rtt
+ - microchip,sam9x7-rtt
- const: atmel,at91sam9260-rtt
- items:
- const: microchip,sama7g5-rtt
diff --git a/Documentation/devicetree/bindings/rtc/mediatek,mt2712-rtc.yaml b/Documentation/devicetree/bindings/rtc/mediatek,mt2712-rtc.yaml
new file mode 100644
index 00000000000000..75624ddf6d4d66
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/mediatek,mt2712-rtc.yaml
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/mediatek,mt2712-rtc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek MT2712 on-SoC RTC
+
+allOf:
+ - $ref: rtc.yaml#
+
+maintainers:
+ - Ran Bi <ran.bi@mediatek.com>
+
+properties:
+ compatible:
+ const: mediatek,mt2712-rtc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - reg
+ - interrupts
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ rtc@10011000 {
+ compatible = "mediatek,mt2712-rtc";
+ reg = <0x10011000 0x1000>;
+ interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_LOW>;
+ };
diff --git a/Documentation/devicetree/bindings/rtc/mediatek,mt7622-rtc.yaml b/Documentation/devicetree/bindings/rtc/mediatek,mt7622-rtc.yaml
new file mode 100644
index 00000000000000..e74dfc161cfc66
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/mediatek,mt7622-rtc.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/mediatek,mt7622-rtc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek MT7622 on-SoC RTC
+
+allOf:
+ - $ref: rtc.yaml#
+
+maintainers:
+ - Sean Wang <sean.wang@mediatek.com>
+
+properties:
+ compatible:
+ items:
+ - const: mediatek,mt7622-rtc
+ - const: mediatek,soc-rtc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: rtc
+
+required:
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/mt7622-clk.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ rtc@10212800 {
+ compatible = "mediatek,mt7622-rtc", "mediatek,soc-rtc";
+ reg = <0x10212800 0x200>;
+ interrupts = <GIC_SPI 129 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&topckgen CLK_TOP_RTC>;
+ clock-names = "rtc";
+ };
diff --git a/Documentation/devicetree/bindings/rtc/rtc-mt2712.txt b/Documentation/devicetree/bindings/rtc/rtc-mt2712.txt
deleted file mode 100644
index c33d87e5e753fd..00000000000000
--- a/Documentation/devicetree/bindings/rtc/rtc-mt2712.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-Device-Tree bindings for MediaTek SoC based RTC
-
-Required properties:
-- compatible : Should be "mediatek,mt2712-rtc" : for MT2712 SoC
-- reg : Specifies base physical address and size of the registers;
-- interrupts : Should contain the interrupt for RTC alarm;
-
-Example:
-
-rtc: rtc@10011000 {
- compatible = "mediatek,mt2712-rtc";
- reg = <0 0x10011000 0 0x1000>;
- interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_LOW>;
-};
diff --git a/Documentation/devicetree/bindings/rtc/rtc-mt7622.txt b/Documentation/devicetree/bindings/rtc/rtc-mt7622.txt
deleted file mode 100644
index 09fe8f51476f86..00000000000000
--- a/Documentation/devicetree/bindings/rtc/rtc-mt7622.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-Device-Tree bindings for MediaTek SoC based RTC
-
-Required properties:
-- compatible : Should be
- "mediatek,mt7622-rtc", "mediatek,soc-rtc" : for MT7622 SoC
-- reg : Specifies base physical address and size of the registers;
-- interrupts : Should contain the interrupt for RTC alarm;
-- clocks : Specifies list of clock specifiers, corresponding to
- entries in clock-names property;
-- clock-names : Should contain "rtc" entries
-
-Example:
-
-rtc: rtc@10212800 {
- compatible = "mediatek,mt7622-rtc",
- "mediatek,soc-rtc";
- reg = <0 0x10212800 0 0x200>;
- interrupts = <GIC_SPI 129 IRQ_TYPE_LEVEL_LOW>;
- clocks = <&topckgen CLK_TOP_RTC>;
- clock-names = "rtc";
-};
diff --git a/Documentation/devicetree/bindings/rtc/xlnx,zynqmp-rtc.yaml b/Documentation/devicetree/bindings/rtc/xlnx,zynqmp-rtc.yaml
index d1f5eb996dba06..01cc90fee81e5e 100644
--- a/Documentation/devicetree/bindings/rtc/xlnx,zynqmp-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/xlnx,zynqmp-rtc.yaml
@@ -18,7 +18,13 @@ allOf:
properties:
compatible:
- const: xlnx,zynqmp-rtc
+ oneOf:
+ - const: xlnx,zynqmp-rtc
+ - items:
+ - enum:
+ - xlnx,versal-rtc
+ - xlnx,versal-net-rtc
+ - const: xlnx,zynqmp-rtc
reg:
maxItems: 1
@@ -48,6 +54,9 @@ properties:
default: 0x198233
deprecated: true
+ power-domains:
+ maxItems: 1
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/sound/cirrus,cs42l43.yaml b/Documentation/devicetree/bindings/sound/cirrus,cs42l43.yaml
index 7f9d8c7a635a6f..99a536601cc7e6 100644
--- a/Documentation/devicetree/bindings/sound/cirrus,cs42l43.yaml
+++ b/Documentation/devicetree/bindings/sound/cirrus,cs42l43.yaml
@@ -185,11 +185,12 @@ properties:
gpio-ranges:
items:
- - description: A phandle to the CODEC pinctrl node
- minimum: 0
- - const: 0
- - const: 0
- - const: 3
+ - items:
+ - description: A phandle to the CODEC pinctrl node
+ minimum: 0
+ - const: 0
+ - const: 0
+ - const: 3
patternProperties:
"-state$":
diff --git a/Documentation/devicetree/bindings/timer/cdns,ttc.yaml b/Documentation/devicetree/bindings/timer/cdns,ttc.yaml
index dbba780c9b0213..da342464d32ed2 100644
--- a/Documentation/devicetree/bindings/timer/cdns,ttc.yaml
+++ b/Documentation/devicetree/bindings/timer/cdns,ttc.yaml
@@ -32,12 +32,23 @@ properties:
description: |
Bit width of the timer, necessary if not 16.
+ "#pwm-cells":
+ const: 3
+
required:
- compatible
- reg
- - interrupts
- clocks
+allOf:
+ - if:
+ not:
+ required:
+ - "#pwm-cells"
+ then:
+ required:
+ - interrupts
+
additionalProperties: false
examples:
@@ -50,3 +61,12 @@ examples:
clocks = <&cpu_clk 3>;
timer-width = <32>;
};
+
+ - |
+ pwm: pwm@f8002000 {
+ compatible = "cdns,ttc";
+ reg = <0xf8002000 0x1000>;
+ clocks = <&cpu_clk 3>;
+ timer-width = <32>;
+ #pwm-cells = <3>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/nxp,sysctr-timer.yaml b/Documentation/devicetree/bindings/timer/nxp,sysctr-timer.yaml
index 2b9653dafab8f3..891cca00952815 100644
--- a/Documentation/devicetree/bindings/timer/nxp,sysctr-timer.yaml
+++ b/Documentation/devicetree/bindings/timer/nxp,sysctr-timer.yaml
@@ -18,7 +18,9 @@ description: |
properties:
compatible:
- const: nxp,sysctr-timer
+ enum:
+ - nxp,imx95-sysctr-timer
+ - nxp,sysctr-timer
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/timer/ralink,cevt-systick.yaml b/Documentation/devicetree/bindings/timer/ralink,cevt-systick.yaml
new file mode 100644
index 00000000000000..59d97feddf4e53
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/ralink,cevt-systick.yaml
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/ralink,cevt-systick.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: System tick counter present in Ralink family SoCs
+
+maintainers:
+ - Sergio Paracuellos <sergio.paracuellos@gmail.com>
+
+properties:
+ compatible:
+ const: ralink,cevt-systick
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ systick@d00 {
+ compatible = "ralink,cevt-systick";
+ reg = <0xd00 0x10>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <7>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/timer/renesas,ostm.yaml b/Documentation/devicetree/bindings/timer/renesas,ostm.yaml
index 7207929e5cd6a7..8b06a681764e3b 100644
--- a/Documentation/devicetree/bindings/timer/renesas,ostm.yaml
+++ b/Documentation/devicetree/bindings/timer/renesas,ostm.yaml
@@ -23,7 +23,7 @@ properties:
- enum:
- renesas,r7s72100-ostm # RZ/A1H
- renesas,r7s9210-ostm # RZ/A2M
- - renesas,r9a07g043-ostm # RZ/G2UL
+ - renesas,r9a07g043-ostm # RZ/G2UL and RZ/Five
- renesas,r9a07g044-ostm # RZ/G2{L,LC}
- renesas,r9a07g054-ostm # RZ/V2L
- const: renesas,ostm # Generic
diff --git a/Documentation/devicetree/bindings/timer/renesas,tmu.yaml b/Documentation/devicetree/bindings/timer/renesas,tmu.yaml
index a67e427a9e7e22..84bbe15028a1de 100644
--- a/Documentation/devicetree/bindings/timer/renesas,tmu.yaml
+++ b/Documentation/devicetree/bindings/timer/renesas,tmu.yaml
@@ -46,7 +46,19 @@ properties:
interrupts:
minItems: 2
- maxItems: 3
+ items:
+ - description: Underflow interrupt, channel 0
+ - description: Underflow interrupt, channel 1
+ - description: Underflow interrupt, channel 2
+ - description: Input capture interrupt, channel 2
+
+ interrupt-names:
+ minItems: 2
+ items:
+ - const: tuni0
+ - const: tuni1
+ - const: tuni2
+ - const: ticpi2
clocks:
maxItems: 1
@@ -100,7 +112,9 @@ examples:
reg = <0xffd80000 0x30>;
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2", "ticpi2";
clocks = <&mstp0_clks R8A7779_CLK_TMU0>;
clock-names = "fck";
power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
diff --git a/Documentation/devicetree/bindings/timer/samsung,exynos4210-mct.yaml b/Documentation/devicetree/bindings/timer/samsung,exynos4210-mct.yaml
index 829bd2227f7c9c..774b7992a0cafc 100644
--- a/Documentation/devicetree/bindings/timer/samsung,exynos4210-mct.yaml
+++ b/Documentation/devicetree/bindings/timer/samsung,exynos4210-mct.yaml
@@ -26,6 +26,7 @@ properties:
- items:
- enum:
- axis,artpec8-mct
+ - google,gs101-mct
- samsung,exynos3250-mct
- samsung,exynos5250-mct
- samsung,exynos5260-mct
@@ -127,6 +128,7 @@ allOf:
contains:
enum:
- axis,artpec8-mct
+ - google,gs101-mct
- samsung,exynos5260-mct
- samsung,exynos5420-mct
- samsung,exynos5433-mct
diff --git a/Documentation/features/sched/membarrier-sync-core/arch-support.txt b/Documentation/features/sched/membarrier-sync-core/arch-support.txt
index d96b778b87ed8e..7425d2b994a399 100644
--- a/Documentation/features/sched/membarrier-sync-core/arch-support.txt
+++ b/Documentation/features/sched/membarrier-sync-core/arch-support.txt
@@ -10,6 +10,22 @@
# Rely on implicit context synchronization as a result of exception return
# when returning from IPI handler, and when returning to user-space.
#
+# * riscv
+#
+# riscv uses xRET as return from interrupt and to return to user-space.
+#
+# Given that xRET is not core serializing, we rely on FENCE.I for providing
+# core serialization:
+#
+# - by calling sync_core_before_usermode() on return from interrupt (cf.
+# ipi_sync_core()),
+#
+# - via switch_mm() and sync_core_before_usermode() (respectively, for
+# uthread->uthread and kthread->uthread transitions) before returning
+# to user-space.
+#
+# The serialization in switch_mm() is activated by prepare_sync_core_cmd().
+#
# * x86
#
# x86-32 uses IRET as return from interrupt, which takes care of the IPI.
@@ -43,7 +59,7 @@
| openrisc: | TODO |
| parisc: | TODO |
| powerpc: | ok |
- | riscv: | TODO |
+ | riscv: | ok |
| s390: | ok |
| sh: | TODO |
| sparc: | TODO |
diff --git a/Documentation/mm/page_cache.rst b/Documentation/mm/page_cache.rst
index 75eba7c431b2f1..138d61f869df09 100644
--- a/Documentation/mm/page_cache.rst
+++ b/Documentation/mm/page_cache.rst
@@ -3,3 +3,13 @@
==========
Page Cache
==========
+
+The page cache is the primary way that the user and the rest of the kernel
+interact with filesystems. It can be bypassed (e.g. with O_DIRECT),
+but normal reads, writes and mmaps go through the page cache.
+
+Folios
+======
+
+The folio is the unit of memory management within the page cache.
+Operations
diff --git a/Documentation/scheduler/index.rst b/Documentation/scheduler/index.rst
index 3170747226f6da..43bd8a145b7a9b 100644
--- a/Documentation/scheduler/index.rst
+++ b/Documentation/scheduler/index.rst
@@ -7,6 +7,7 @@ Scheduler
completion
+ membarrier
sched-arch
sched-bwc
sched-deadline
diff --git a/Documentation/scheduler/membarrier.rst b/Documentation/scheduler/membarrier.rst
new file mode 100644
index 00000000000000..2387804b1c6331
--- /dev/null
+++ b/Documentation/scheduler/membarrier.rst
@@ -0,0 +1,39 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+========================
+membarrier() System Call
+========================
+
+MEMBARRIER_CMD_{PRIVATE,GLOBAL}_EXPEDITED - Architecture requirements
+=====================================================================
+
+Memory barriers before updating rq->curr
+----------------------------------------
+
+The commands MEMBARRIER_CMD_PRIVATE_EXPEDITED and MEMBARRIER_CMD_GLOBAL_EXPEDITED
+require each architecture to have a full memory barrier after coming from
+user-space, before updating rq->curr. This barrier is implied by the sequence
+rq_lock(); smp_mb__after_spinlock() in __schedule(). The barrier matches a full
+barrier in the proximity of the membarrier system call exit, cf.
+membarrier_{private,global}_expedited().
+
+Memory barriers after updating rq->curr
+---------------------------------------
+
+The commands MEMBARRIER_CMD_PRIVATE_EXPEDITED and MEMBARRIER_CMD_GLOBAL_EXPEDITED
+require each architecture to have a full memory barrier after updating rq->curr,
+before returning to user-space. The schemes providing this barrier on the various
+architectures are as follows.
+
+ - alpha, arc, arm, hexagon, mips rely on the full barrier implied by
+ spin_unlock() in finish_lock_switch().
+
+ - arm64 relies on the full barrier implied by switch_to().
+
+ - powerpc, riscv, s390, sparc, x86 rely on the full barrier implied by
+ switch_mm(), if mm is not NULL; they rely on the full barrier implied
+ by mmdrop(), otherwise. On powerpc and riscv, switch_mm() relies on
+ membarrier_arch_switch_mm().
+
+The barrier matches a full barrier in the proximity of the membarrier system call
+entry, cf. membarrier_{private,global}_expedited().
diff --git a/Documentation/scheduler/sched-design-CFS.rst b/Documentation/scheduler/sched-design-CFS.rst
index 6cffffe265006e..e030876fbd6892 100644
--- a/Documentation/scheduler/sched-design-CFS.rst
+++ b/Documentation/scheduler/sched-design-CFS.rst
@@ -100,6 +100,9 @@ which can be used to tune the scheduler from "desktop" (i.e., low latencies) to
"server" (i.e., good batching) workloads. It defaults to a setting suitable
for desktop workloads. SCHED_BATCH is handled by the CFS scheduler module too.
+In case CONFIG_HZ results in base_slice_ns < TICK_NSEC, the value of
+base_slice_ns will have little to no impact on the workloads.
+
Due to its design, the CFS scheduler is not prone to any of the "attacks" that
exist today against the heuristics of the stock scheduler: fiftyp.c, thud.c,
chew.c, ring-test.c, massive_intr.c all work fine and do not impact
diff --git a/Documentation/spi/spi-lm70llp.rst b/Documentation/spi/spi-lm70llp.rst
index 2f20e5b405e66a..ff98ddc76a7478 100644
--- a/Documentation/spi/spi-lm70llp.rst
+++ b/Documentation/spi/spi-lm70llp.rst
@@ -6,7 +6,7 @@ Supported board/chip:
* National Semiconductor LM70 LLP evaluation board
- Datasheet: http://www.national.com/pf/LM/LM70.html
+ Datasheet: https://www.ti.com/lit/gpn/lm70
Author:
Kaiwan N Billimoria <kaiwan@designergraphix.com>
@@ -28,7 +28,7 @@ Hardware Interfacing
The schematic for this particular board (the LM70EVAL-LLP) is
available (on page 4) here:
- http://www.national.com/appinfo/tempsensors/files/LM70LLPEVALmanual.pdf
+ https://download.datasheets.com/pdfs/documentation/nat/kit&board/lm70llpevalmanual.pdf
The hardware interfacing on the LM70 LLP eval board is as follows:
diff --git a/Documentation/spi/spidev.rst b/Documentation/spi/spidev.rst
index 369c657ba4358b..e08b301ad24ac6 100644
--- a/Documentation/spi/spidev.rst
+++ b/Documentation/spi/spidev.rst
@@ -61,7 +61,7 @@ the spidev driver failing to probe.
Sysfs also supports userspace driven binding/unbinding of drivers to
devices that do not bind automatically using one of the tables above.
-To make the spidev driver bind to such a device, use the following:
+To make the spidev driver bind to such a device, use the following::
echo spidev > /sys/bus/spi/devices/spiB.C/driver_override
echo spiB.C > /sys/bus/spi/drivers/spidev/bind
diff --git a/MAINTAINERS b/MAINTAINERS
index debe23c287a060..6a233e1a3cf2e8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2377,8 +2377,8 @@ M: Sean Wang <sean.wang@mediatek.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-F: Documentation/devicetree/bindings/rtc/rtc-mt2712.txt
-F: Documentation/devicetree/bindings/rtc/rtc-mt7622.txt
+F: Documentation/devicetree/bindings/rtc/mediatek,mt2712-rtc.yaml
+F: Documentation/devicetree/bindings/rtc/mediatek,mt7622-rtc.yaml
F: drivers/rtc/rtc-mt2712.c
F: drivers/rtc/rtc-mt6397.c
F: drivers/rtc/rtc-mt7622.c
@@ -2617,6 +2617,7 @@ F: drivers/pci/controller/dwc/pcie-qcom.c
F: drivers/phy/qualcomm/
F: drivers/power/*/msm*
F: drivers/reset/reset-qcom-*
+F: drivers/rtc/rtc-pm8xxx.c
F: drivers/spi/spi-geni-qcom.c
F: drivers/spi/spi-qcom-qspi.c
F: drivers/spi/spi-qup.c
@@ -6171,7 +6172,6 @@ F: include/uapi/linux/dm-*.h
DEVICE-MAPPER VDO TARGET
M: Matthew Sakai <msakai@redhat.com>
-M: dm-devel@lists.linux.dev
L: dm-devel@lists.linux.dev
S: Maintained
F: Documentation/admin-guide/device-mapper/vdo*.rst
@@ -7939,6 +7939,7 @@ M: Gao Xiang <xiang@kernel.org>
M: Chao Yu <chao@kernel.org>
R: Yue Hu <huyue2@coolpad.com>
R: Jeffle Xu <jefflexu@linux.alibaba.com>
+R: Sandeep Dhavale <dhavale@google.com>
L: linux-erofs@lists.ozlabs.org
S: Maintained
W: https://erofs.docs.kernel.org
@@ -14133,7 +14134,9 @@ M: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
M: "Paul E. McKenney" <paulmck@kernel.org>
L: linux-kernel@vger.kernel.org
S: Supported
-F: arch/powerpc/include/asm/membarrier.h
+F: Documentation/scheduler/membarrier.rst
+F: arch/*/include/asm/membarrier.h
+F: arch/*/include/asm/sync_core.h
F: include/uapi/linux/membarrier.h
F: kernel/sched/membarrier.c
@@ -22599,6 +22602,7 @@ F: include/uapi/misc/uacce/
UBI FILE SYSTEM (UBIFS)
M: Richard Weinberger <richard@nod.at>
+R: Zhihao Cheng <chengzhihao1@huawei.com>
L: linux-mtd@lists.infradead.org
S: Supported
W: http://www.linux-mtd.infradead.org/doc/ubifs.html
@@ -22744,6 +22748,7 @@ F: drivers/ufs/host/ufs-renesas.c
UNSORTED BLOCK IMAGES (UBI)
M: Richard Weinberger <richard@nod.at>
+R: Zhihao Cheng <chengzhihao1@huawei.com>
L: linux-mtd@lists.infradead.org
S: Supported
W: http://www.linux-mtd.infradead.org/
diff --git a/Makefile b/Makefile
index 5e09b53b485016..763b6792d3d513 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
-PATCHLEVEL = 8
+PATCHLEVEL = 9
SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index 59dee290d94b30..9f066785bb71d9 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -799,7 +799,7 @@ config CFI_CLANG
depends on ARCH_SUPPORTS_CFI_CLANG
depends on $(cc-option,-fsanitize=kcfi)
help
- This option enables Clang’s forward-edge Control Flow Integrity
+ This option enables Clang's forward-edge Control Flow Integrity
(CFI) checking, where the compiler injects a runtime check to each
indirect function call to ensure the target is a valid function with
the correct static type. This restricts possible call targets and
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 702d97a9c30471..b14aed3a17abb6 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -505,8 +505,8 @@ source "arch/arm/mm/Kconfig"
config IWMMXT
bool "Enable iWMMXt support"
- depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 || CPU_PJ4B
- default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4 || CPU_PJ4B
+ depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK
+ default y if PXA27x || PXA3xx || ARCH_MMP
help
Enable support for iWMMXt context switching at run time if
running on a CPU that supports it.
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index f1fc278081d035..7f47b4f335c3d2 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -90,9 +90,6 @@ config BACKTRACE_VERBOSE
In most cases, say N here, unless you are intending to debug the
kernel and have access to the kernel binary image.
-config FRAME_POINTER
- bool
-
config DEBUG_USER
bool "Verbose user fault messages"
help
diff --git a/arch/arm/include/asm/mman.h b/arch/arm/include/asm/mman.h
new file mode 100644
index 00000000000000..2189e507c8e08b
--- /dev/null
+++ b/arch/arm/include/asm/mman.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MMAN_H__
+#define __ASM_MMAN_H__
+
+#include <asm/system_info.h>
+#include <uapi/asm/mman.h>
+
+static inline bool arch_memory_deny_write_exec_supported(void)
+{
+ return cpu_architecture() >= CPU_ARCH_ARMv6;
+}
+#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported
+
+#endif /* __ASM_MMAN_H__ */
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 7f44e88d1f25bc..14a38cc67e0bc9 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -10,6 +10,7 @@
#include <uapi/asm/ptrace.h>
#ifndef __ASSEMBLY__
+#include <linux/bitfield.h>
#include <linux/types.h>
struct pt_regs {
@@ -35,8 +36,8 @@ struct svc_pt_regs {
#ifndef CONFIG_CPU_V7M
#define isa_mode(regs) \
- ((((regs)->ARM_cpsr & PSR_J_BIT) >> (__ffs(PSR_J_BIT) - 1)) | \
- (((regs)->ARM_cpsr & PSR_T_BIT) >> (__ffs(PSR_T_BIT))))
+ (FIELD_GET(PSR_J_BIT, (regs)->ARM_cpsr) << 1 | \
+ FIELD_GET(PSR_T_BIT, (regs)->ARM_cpsr))
#else
#define isa_mode(regs) 1 /* Thumb */
#endif
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 6a9de826ffd3c0..89a77e3f51d20a 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -76,8 +76,6 @@ obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o
obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
-obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
-obj-$(CONFIG_CPU_PJ4B) += pj4-cp0.o
obj-$(CONFIG_IWMMXT) += iwmmxt.o
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_xscale.o perf_event_v6.o \
diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S
index a0218c4867b9b6..4a335d3c59690b 100644
--- a/arch/arm/kernel/iwmmxt.S
+++ b/arch/arm/kernel/iwmmxt.S
@@ -18,18 +18,6 @@
#include <asm/assembler.h>
#include "iwmmxt.h"
-#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B)
-#define PJ4(code...) code
-#define XSC(code...)
-#elif defined(CONFIG_CPU_MOHAWK) || \
- defined(CONFIG_CPU_XSC3) || \
- defined(CONFIG_CPU_XSCALE)
-#define PJ4(code...)
-#define XSC(code...) code
-#else
-#error "Unsupported iWMMXt architecture"
-#endif
-
#define MMX_WR0 (0x00)
#define MMX_WR1 (0x08)
#define MMX_WR2 (0x10)
@@ -81,17 +69,13 @@ ENDPROC(iwmmxt_undef_handler)
ENTRY(iwmmxt_task_enable)
inc_preempt_count r10, r3
- XSC(mrc p15, 0, r2, c15, c1, 0)
- PJ4(mrc p15, 0, r2, c1, c0, 2)
+ mrc p15, 0, r2, c15, c1, 0
@ CP0 and CP1 accessible?
- XSC(tst r2, #0x3)
- PJ4(tst r2, #0xf)
+ tst r2, #0x3
bne 4f @ if so no business here
@ enable access to CP0 and CP1
- XSC(orr r2, r2, #0x3)
- XSC(mcr p15, 0, r2, c15, c1, 0)
- PJ4(orr r2, r2, #0xf)
- PJ4(mcr p15, 0, r2, c1, c0, 2)
+ orr r2, r2, #0x3
+ mcr p15, 0, r2, c15, c1, 0
ldr r3, =concan_owner
ldr r2, [r0, #S_PC] @ current task pc value
@@ -218,12 +202,9 @@ ENTRY(iwmmxt_task_disable)
bne 1f @ no: quit
@ enable access to CP0 and CP1
- XSC(mrc p15, 0, r4, c15, c1, 0)
- XSC(orr r4, r4, #0x3)
- XSC(mcr p15, 0, r4, c15, c1, 0)
- PJ4(mrc p15, 0, r4, c1, c0, 2)
- PJ4(orr r4, r4, #0xf)
- PJ4(mcr p15, 0, r4, c1, c0, 2)
+ mrc p15, 0, r4, c15, c1, 0
+ orr r4, r4, #0x3
+ mcr p15, 0, r4, c15, c1, 0
mov r0, #0 @ nothing to load
str r0, [r3] @ no more current owner
@@ -232,10 +213,8 @@ ENTRY(iwmmxt_task_disable)
bl concan_save
@ disable access to CP0 and CP1
- XSC(bic r4, r4, #0x3)
- XSC(mcr p15, 0, r4, c15, c1, 0)
- PJ4(bic r4, r4, #0xf)
- PJ4(mcr p15, 0, r4, c1, c0, 2)
+ bic r4, r4, #0x3
+ mcr p15, 0, r4, c15, c1, 0
mrc p15, 0, r2, c2, c0, 0
mov r2, r2 @ cpwait
@@ -330,11 +309,9 @@ ENDPROC(iwmmxt_task_restore)
*/
ENTRY(iwmmxt_task_switch)
- XSC(mrc p15, 0, r1, c15, c1, 0)
- PJ4(mrc p15, 0, r1, c1, c0, 2)
+ mrc p15, 0, r1, c15, c1, 0
@ CP0 and CP1 accessible?
- XSC(tst r1, #0x3)
- PJ4(tst r1, #0xf)
+ tst r1, #0x3
bne 1f @ yes: block them for next task
ldr r2, =concan_owner
@@ -344,10 +321,8 @@ ENTRY(iwmmxt_task_switch)
retne lr @ no: leave Concan disabled
1: @ flip Concan access
- XSC(eor r1, r1, #0x3)
- XSC(mcr p15, 0, r1, c15, c1, 0)
- PJ4(eor r1, r1, #0xf)
- PJ4(mcr p15, 0, r1, c1, c0, 2)
+ eor r1, r1, #0x3
+ mcr p15, 0, r1, c15, c1, 0
mrc p15, 0, r1, c2, c0, 0
sub pc, lr, r1, lsr #32 @ cpwait and return
diff --git a/arch/arm/kernel/pj4-cp0.c b/arch/arm/kernel/pj4-cp0.c
deleted file mode 100644
index 4bca8098c4ff55..00000000000000
--- a/arch/arm/kernel/pj4-cp0.c
+++ /dev/null
@@ -1,135 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/arch/arm/kernel/pj4-cp0.c
- *
- * PJ4 iWMMXt coprocessor context switching and handling
- *
- * Copyright (c) 2010 Marvell International Inc.
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <asm/thread_notify.h>
-#include <asm/cputype.h>
-
-static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
-{
- struct thread_info *thread = t;
-
- switch (cmd) {
- case THREAD_NOTIFY_FLUSH:
- /*
- * flush_thread() zeroes thread->fpstate, so no need
- * to do anything here.
- *
- * FALLTHROUGH: Ensure we don't try to overwrite our newly
- * initialised state information on the first fault.
- */
-
- case THREAD_NOTIFY_EXIT:
- iwmmxt_task_release(thread);
- break;
-
- case THREAD_NOTIFY_SWITCH:
- iwmmxt_task_switch(thread);
- break;
- }
-
- return NOTIFY_DONE;
-}
-
-static struct notifier_block __maybe_unused iwmmxt_notifier_block = {
- .notifier_call = iwmmxt_do,
-};
-
-
-static u32 __init pj4_cp_access_read(void)
-{
- u32 value;
-
- __asm__ __volatile__ (
- "mrc p15, 0, %0, c1, c0, 2\n\t"
- : "=r" (value));
- return value;
-}
-
-static void __init pj4_cp_access_write(u32 value)
-{
- u32 temp;
-
- __asm__ __volatile__ (
- "mcr p15, 0, %1, c1, c0, 2\n\t"
-#ifdef CONFIG_THUMB2_KERNEL
- "isb\n\t"
-#else
- "mrc p15, 0, %0, c1, c0, 2\n\t"
- "mov %0, %0\n\t"
- "sub pc, pc, #4\n\t"
-#endif
- : "=r" (temp) : "r" (value));
-}
-
-static int __init pj4_get_iwmmxt_version(void)
-{
- u32 cp_access, wcid;
-
- cp_access = pj4_cp_access_read();
- pj4_cp_access_write(cp_access | 0xf);
-
- /* check if coprocessor 0 and 1 are available */
- if ((pj4_cp_access_read() & 0xf) != 0xf) {
- pj4_cp_access_write(cp_access);
- return -ENODEV;
- }
-
- /* read iWMMXt coprocessor id register p1, c0 */
- __asm__ __volatile__ ("mrc p1, 0, %0, c0, c0, 0\n" : "=r" (wcid));
-
- pj4_cp_access_write(cp_access);
-
- /* iWMMXt v1 */
- if ((wcid & 0xffffff00) == 0x56051000)
- return 1;
- /* iWMMXt v2 */
- if ((wcid & 0xffffff00) == 0x56052000)
- return 2;
-
- return -EINVAL;
-}
-
-/*
- * Disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy
- * switch code handle iWMMXt context switching.
- */
-static int __init pj4_cp0_init(void)
-{
- u32 __maybe_unused cp_access;
- int vers;
-
- if (!cpu_is_pj4())
- return 0;
-
- vers = pj4_get_iwmmxt_version();
- if (vers < 0)
- return 0;
-
-#ifndef CONFIG_IWMMXT
- pr_info("PJ4 iWMMXt coprocessor detected, but kernel support is missing.\n");
-#else
- cp_access = pj4_cp_access_read() & ~0xf;
- pj4_cp_access_write(cp_access);
-
- pr_info("PJ4 iWMMXt v%d coprocessor enabled.\n", vers);
- elf_hwcap |= HWCAP_IWMMXT;
- thread_register_notifier(&iwmmxt_notifier_block);
- register_iwmmxt_undef_handler();
-#endif
-
- return 0;
-}
-
-late_initcall(pj4_cp0_init);
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 3bad79db5d6e80..72c82a4d63ac20 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -220,7 +220,7 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
unsigned int fp, mode;
int ok = 1;
- printk("%sBacktrace: ", loglvl);
+ printk("%sCall trace: ", loglvl);
if (!tsk)
tsk = current;
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 9d2192156087be..f60547dadc9398 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -524,6 +524,8 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk,
{
struct stackframe frame;
+ printk("%sCall trace: ", loglvl);
+
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
if (!tsk)
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 07565b593ed681..439dc6a26bb982 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -25,6 +25,13 @@
#include "fault.h"
+bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+{
+ unsigned long addr = (unsigned long)unsafe_src;
+
+ return addr >= TASK_SIZE && ULONG_MAX - addr >= size;
+}
+
#ifdef CONFIG_MMU
/*
@@ -588,6 +595,7 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
return;
+ pr_alert("8<--- cut here ---\n");
pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
inf->name, ifsr, addr);
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index d19d140a10c7d5..0749cf8a66371b 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -296,6 +296,9 @@ void __sync_icache_dcache(pte_t pteval)
return;
folio = page_folio(pfn_to_page(pfn));
+ if (folio_test_reserved(folio))
+ return;
+
if (cache_is_vipt_aliasing())
mapping = folio_flush_mapping(folio);
else
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 4c3d78691279d3..e8c6f4be0ce198 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -418,7 +418,7 @@ static void set_section_perms(struct section_perm *perms, int n, bool set,
}
-/**
+/*
* update_sections_early intended to be called only through stop_machine
* framework and executed by only one CPU while all other CPUs will spin and
* wait, so no locking is required in this function.
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 77e05d4959f289..7b11c98b3e84bf 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -120,6 +120,7 @@ config ARM64
select CLONE_BACKWARDS
select COMMON_CLK
select CPU_PM if (SUSPEND || CPU_IDLE)
+ select CPUMASK_OFFSTACK if NR_CPUS > 256
select CRC32
select DCACHE_WORD_ACCESS
select DYNAMIC_FTRACE if FUNCTION_TRACER
@@ -1425,7 +1426,7 @@ config SCHED_SMT
config NR_CPUS
int "Maximum number of CPUs (2-4096)"
range 2 4096
- default "256"
+ default "512"
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S
index 1140051a0c455d..1150b77fa281ce 100644
--- a/arch/hexagon/kernel/vmlinux.lds.S
+++ b/arch/hexagon/kernel/vmlinux.lds.S
@@ -63,6 +63,7 @@ SECTIONS
STABS_DEBUG
DWARF_DEBUG
ELF_DETAILS
+ .hexagon.attributes 0 : { *(.hexagon.attributes) }
DISCARDS
}
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index c139d0d728029e..a5f300ec6f2808 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -15,6 +15,7 @@ config LOONGARCH
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_CPU_FINALIZE_INIT
+ select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_KCOV
select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
@@ -104,6 +105,7 @@ config LOONGARCH
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ select HAVE_ARCH_USERFAULTFD_MINOR if USERFAULTFD
select HAVE_ASM_MODVERSIONS
select HAVE_CONTEXT_TRACKING_USER
select HAVE_C_RECORDMCOUNT
@@ -133,20 +135,24 @@ config LOONGARCH
select HAVE_KPROBES
select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES
+ select HAVE_LIVEPATCH
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
+ select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS
select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_PREEMPT_DYNAMIC_KEY
select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_RELIABLE_STACKTRACE if UNWINDER_ORC
select HAVE_RETHOOK
select HAVE_RSEQ
select HAVE_RUST
select HAVE_SAMPLE_FTRACE_DIRECT
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
select HAVE_SETUP_PER_CPU_AREA if NUMA
+ select HAVE_STACK_VALIDATION if HAVE_OBJTOOL
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_TIF_NOHZ
@@ -624,6 +630,8 @@ config RANDOMIZE_BASE_MAX_OFFSET
This is limited by the size of the lower address memory, 256MB.
+source "kernel/livepatch/Kconfig"
+
endmenu
config ARCH_SELECT_MEMORY_MODEL
diff --git a/arch/loongarch/Kconfig.debug b/arch/loongarch/Kconfig.debug
index 8d36aab530083b..98d60630c3d4b4 100644
--- a/arch/loongarch/Kconfig.debug
+++ b/arch/loongarch/Kconfig.debug
@@ -26,4 +26,15 @@ config UNWINDER_PROLOGUE
Some of the addresses it reports may be incorrect (but better than the
Guess unwinder).
+config UNWINDER_ORC
+ bool "ORC unwinder"
+ select OBJTOOL
+ help
+ This option enables the ORC (Oops Rewind Capability) unwinder for
+ unwinding kernel stack traces. It uses a custom data format which is
+ a simplified version of the DWARF Call Frame Information standard.
+
+ Enabling this option will increase the kernel's runtime memory usage
+ by roughly 2-4MB, depending on your kernel config.
+
endchoice
diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
index fa4fb09909aeb1..df6caf79537a9d 100644
--- a/arch/loongarch/Makefile
+++ b/arch/loongarch/Makefile
@@ -26,6 +26,18 @@ endif
32bit-emul = elf32loongarch
64bit-emul = elf64loongarch
+ifdef CONFIG_UNWINDER_ORC
+orc_hash_h := arch/$(SRCARCH)/include/generated/asm/orc_hash.h
+orc_hash_sh := $(srctree)/scripts/orc_hash.sh
+targets += $(orc_hash_h)
+quiet_cmd_orc_hash = GEN $@
+ cmd_orc_hash = mkdir -p $(dir $@); \
+ $(CONFIG_SHELL) $(orc_hash_sh) < $< > $@
+$(orc_hash_h): $(srctree)/arch/loongarch/include/asm/orc_types.h $(orc_hash_sh) FORCE
+ $(call if_changed,orc_hash)
+archprepare: $(orc_hash_h)
+endif
+
ifdef CONFIG_DYNAMIC_FTRACE
KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
CC_FLAGS_FTRACE := -fpatchable-function-entry=2
@@ -72,8 +84,6 @@ KBUILD_CFLAGS_KERNEL += $(call cc-option,-mdirect-extern-access)
KBUILD_CFLAGS_KERNEL += $(call cc-option,-fdirect-access-external-data)
KBUILD_AFLAGS_MODULE += $(call cc-option,-fno-direct-access-external-data)
KBUILD_CFLAGS_MODULE += $(call cc-option,-fno-direct-access-external-data)
-KBUILD_AFLAGS_MODULE += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax)
-KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax)
else
cflags-y += $(call cc-option,-mno-explicit-relocs)
KBUILD_AFLAGS_KERNEL += -Wa,-mla-global-with-pcrel
@@ -82,6 +92,15 @@ KBUILD_AFLAGS_MODULE += -Wa,-mla-global-with-abs
KBUILD_CFLAGS_MODULE += -fplt -Wa,-mla-global-with-abs,-mla-local-with-abs
endif
+KBUILD_AFLAGS += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax)
+KBUILD_CFLAGS += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax)
+KBUILD_AFLAGS += $(call cc-option,-mthin-add-sub) $(call cc-option,-Wa$(comma)-mthin-add-sub)
+KBUILD_CFLAGS += $(call cc-option,-mthin-add-sub) $(call cc-option,-Wa$(comma)-mthin-add-sub)
+
+ifdef CONFIG_OBJTOOL
+KBUILD_CFLAGS += -fno-jump-tables
+endif
+
KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json
KBUILD_RUSTFLAGS_MODULE += -Crelocation-model=pic
diff --git a/arch/loongarch/crypto/crc32-loongarch.c b/arch/loongarch/crypto/crc32-loongarch.c
index a49e507af38c0e..3eebea3a7b478d 100644
--- a/arch/loongarch/crypto/crc32-loongarch.c
+++ b/arch/loongarch/crypto/crc32-loongarch.c
@@ -44,7 +44,6 @@ static u32 crc32_loongarch_hw(u32 crc_, const u8 *p, unsigned int len)
CRC32(crc, value, w);
p += sizeof(u32);
- len -= sizeof(u32);
}
if (len & sizeof(u16)) {
@@ -80,7 +79,6 @@ static u32 crc32c_loongarch_hw(u32 crc_, const u8 *p, unsigned int len)
CRC32C(crc, value, w);
p += sizeof(u32);
- len -= sizeof(u32);
}
if (len & sizeof(u16)) {
diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild
index 93783fa24f6e9b..2dbec7853ae864 100644
--- a/arch/loongarch/include/asm/Kbuild
+++ b/arch/loongarch/include/asm/Kbuild
@@ -1,9 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
+generated-y += orc_hash.h
+
generic-y += dma-contiguous.h
generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += early_ioremap.h
generic-y += qrwlock.h
+generic-y += qspinlock.h
generic-y += rwsem.h
generic-y += segment.h
generic-y += user.h
diff --git a/arch/loongarch/include/asm/bug.h b/arch/loongarch/include/asm/bug.h
index d4ca3ba2541885..08388876ade4ce 100644
--- a/arch/loongarch/include/asm/bug.h
+++ b/arch/loongarch/include/asm/bug.h
@@ -44,6 +44,7 @@
do { \
instrumentation_begin(); \
__BUG_FLAGS(BUGFLAG_WARNING|(flags)); \
+ annotate_reachable(); \
instrumentation_end(); \
} while (0)
diff --git a/arch/loongarch/include/asm/cacheflush.h b/arch/loongarch/include/asm/cacheflush.h
index 80bd74106985a9..f8754d08a31ab0 100644
--- a/arch/loongarch/include/asm/cacheflush.h
+++ b/arch/loongarch/include/asm/cacheflush.h
@@ -37,8 +37,6 @@ void local_flush_icache_range(unsigned long start, unsigned long end);
#define flush_icache_range local_flush_icache_range
#define flush_icache_user_range local_flush_icache_range
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
@@ -47,7 +45,6 @@ void local_flush_icache_range(unsigned long start, unsigned long end);
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
#define flush_icache_user_page(vma, page, addr, len) do { } while (0)
-#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
diff --git a/arch/loongarch/include/asm/exception.h b/arch/loongarch/include/asm/exception.h
index af74a3fdcad179..c6d20736fd9270 100644
--- a/arch/loongarch/include/asm/exception.h
+++ b/arch/loongarch/include/asm/exception.h
@@ -6,6 +6,8 @@
#include <asm/ptrace.h>
#include <linux/kprobes.h>
+extern void *exception_table[];
+
void show_registers(struct pt_regs *regs);
asmlinkage void cache_parity_error(void);
diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h
index c486c2341b6623..4a8adcca329b81 100644
--- a/arch/loongarch/include/asm/io.h
+++ b/arch/loongarch/include/asm/io.h
@@ -71,6 +71,8 @@ extern void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t
#define memcpy_fromio(a, c, l) __memcpy_fromio((a), (c), (l))
#define memcpy_toio(c, a, l) __memcpy_toio((c), (a), (l))
+#define __io_aw() mmiowb()
+
#include <asm-generic/io.h>
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
diff --git a/arch/loongarch/include/asm/module.h b/arch/loongarch/include/asm/module.h
index 2ecd82bb64e132..f33f3fd32ecc2c 100644
--- a/arch/loongarch/include/asm/module.h
+++ b/arch/loongarch/include/asm/module.h
@@ -6,6 +6,7 @@
#define _ASM_MODULE_H
#include <asm/inst.h>
+#include <asm/orc_types.h>
#include <asm-generic/module.h>
#define RELA_STACK_DEPTH 16
@@ -21,6 +22,12 @@ struct mod_arch_specific {
struct mod_section plt;
struct mod_section plt_idx;
+#ifdef CONFIG_UNWINDER_ORC
+ unsigned int num_orcs;
+ int *orc_unwind_ip;
+ struct orc_entry *orc_unwind;
+#endif
+
/* For CONFIG_DYNAMIC_FTRACE */
struct plt_entry *ftrace_trampolines;
};
diff --git a/arch/loongarch/include/asm/orc_header.h b/arch/loongarch/include/asm/orc_header.h
new file mode 100644
index 00000000000000..f9d509c3fd7048
--- /dev/null
+++ b/arch/loongarch/include/asm/orc_header.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _ORC_HEADER_H
+#define _ORC_HEADER_H
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <asm/orc_hash.h>
+
+/*
+ * The header is currently a 20-byte hash of the ORC entry definition; see
+ * scripts/orc_hash.sh.
+ */
+#define ORC_HEADER \
+ __used __section(".orc_header") __aligned(4) \
+ static const u8 orc_header[] = { ORC_HASH }
+
+#endif /* _ORC_HEADER_H */
diff --git a/arch/loongarch/include/asm/orc_lookup.h b/arch/loongarch/include/asm/orc_lookup.h
new file mode 100644
index 00000000000000..b02e6357def484
--- /dev/null
+++ b/arch/loongarch/include/asm/orc_lookup.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ORC_LOOKUP_H
+#define _ORC_LOOKUP_H
+
+/*
+ * This is a lookup table for speeding up access to the .orc_unwind table.
+ * Given an input address offset, the corresponding lookup table entry
+ * specifies a subset of the .orc_unwind table to search.
+ *
+ * Each block represents the end of the previous range and the start of the
+ * next range. An extra block is added to give the last range an end.
+ *
+ * The block size should be a power of 2 to avoid a costly 'div' instruction.
+ *
+ * A block size of 256 was chosen because it roughly doubles unwinder
+ * performance while only adding ~5% to the ORC data footprint.
+ */
+#define LOOKUP_BLOCK_ORDER 8
+#define LOOKUP_BLOCK_SIZE (1 << LOOKUP_BLOCK_ORDER)
+
+#ifndef LINKER_SCRIPT
+
+extern unsigned int orc_lookup[];
+extern unsigned int orc_lookup_end[];
+
+#define LOOKUP_START_IP (unsigned long)_stext
+#define LOOKUP_STOP_IP (unsigned long)_etext
+
+#endif /* LINKER_SCRIPT */
+
+#endif /* _ORC_LOOKUP_H */
diff --git a/arch/loongarch/include/asm/orc_types.h b/arch/loongarch/include/asm/orc_types.h
new file mode 100644
index 00000000000000..caf1f71a1057b6
--- /dev/null
+++ b/arch/loongarch/include/asm/orc_types.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ORC_TYPES_H
+#define _ORC_TYPES_H
+
+#include <linux/types.h>
+
+/*
+ * The ORC_REG_* registers are base registers which are used to find other
+ * registers on the stack.
+ *
+ * ORC_REG_PREV_SP, also known as DWARF Call Frame Address (CFA), is the
+ * address of the previous frame: the caller's SP before it called the current
+ * function.
+ *
+ * ORC_REG_UNDEFINED means the corresponding register's value didn't change in
+ * the current frame.
+ *
+ * The most commonly used base registers are SP and FP -- which the previous SP
+ * is usually based on -- and PREV_SP and UNDEFINED -- which the previous FP is
+ * usually based on.
+ *
+ * The rest of the base registers are needed for special cases like entry code
+ * and GCC realigned stacks.
+ */
+#define ORC_REG_UNDEFINED 0
+#define ORC_REG_PREV_SP 1
+#define ORC_REG_SP 2
+#define ORC_REG_FP 3
+#define ORC_REG_MAX 4
+
+#define ORC_TYPE_UNDEFINED 0
+#define ORC_TYPE_END_OF_STACK 1
+#define ORC_TYPE_CALL 2
+#define ORC_TYPE_REGS 3
+#define ORC_TYPE_REGS_PARTIAL 4
+
+#ifndef __ASSEMBLY__
+/*
+ * This struct is more or less a vastly simplified version of the DWARF Call
+ * Frame Information standard. It contains only the necessary parts of DWARF
+ * CFI, simplified for ease of access by the in-kernel unwinder. It tells the
+ * unwinder how to find the previous SP and FP (and sometimes entry regs) on
+ * the stack for a given code address. Each instance of the struct corresponds
+ * to one or more code locations.
+ */
+struct orc_entry {
+ s16 sp_offset;
+ s16 fp_offset;
+ s16 ra_offset;
+ unsigned int sp_reg:4;
+ unsigned int fp_reg:4;
+ unsigned int ra_reg:4;
+ unsigned int type:3;
+ unsigned int signal:1;
+};
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ORC_TYPES_H */
diff --git a/arch/loongarch/include/asm/page.h b/arch/loongarch/include/asm/page.h
index afb6fa16b82636..44027060c54a28 100644
--- a/arch/loongarch/include/asm/page.h
+++ b/arch/loongarch/include/asm/page.h
@@ -75,6 +75,9 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
+struct page *dmw_virt_to_page(unsigned long kaddr);
+struct page *tlb_virt_to_page(unsigned long kaddr);
+
#define virt_to_pfn(kaddr) PFN_DOWN(PHYSADDR(kaddr))
#define virt_to_page(kaddr) \
diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h
index 9b36ac003f8907..8f290e5546cf72 100644
--- a/arch/loongarch/include/asm/percpu.h
+++ b/arch/loongarch/include/asm/percpu.h
@@ -29,7 +29,12 @@ static inline void set_my_cpu_offset(unsigned long off)
__my_cpu_offset = off;
csr_write64(off, PERCPU_BASE_KS);
}
-#define __my_cpu_offset __my_cpu_offset
+
+#define __my_cpu_offset \
+({ \
+ __asm__ __volatile__("":"+r"(__my_cpu_offset)); \
+ __my_cpu_offset; \
+})
#define PERCPU_OP(op, asm_op, c_op) \
static __always_inline unsigned long __percpu_##op(void *ptr, \
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
index 8b5df1bbf9e9c2..af3acdf3481a6a 100644
--- a/arch/loongarch/include/asm/pgtable.h
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -363,9 +363,6 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
extern pgd_t swapper_pg_dir[];
extern pgd_t invalid_pg_dir[];
-struct page *dmw_virt_to_page(unsigned long kaddr);
-struct page *tlb_virt_to_page(unsigned long kaddr);
-
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
diff --git a/arch/loongarch/include/asm/qspinlock.h b/arch/loongarch/include/asm/qspinlock.h
deleted file mode 100644
index 34f43f8ad5912b..00000000000000
--- a/arch/loongarch/include/asm/qspinlock.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_QSPINLOCK_H
-#define _ASM_QSPINLOCK_H
-
-#include <asm-generic/qspinlock_types.h>
-
-#define queued_spin_unlock queued_spin_unlock
-
-static inline void queued_spin_unlock(struct qspinlock *lock)
-{
- compiletime_assert_atomic_type(lock->locked);
- c_sync();
- WRITE_ONCE(lock->locked, 0);
-}
-
-#include <asm-generic/qspinlock.h>
-
-#endif /* _ASM_QSPINLOCK_H */
diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/include/asm/stackframe.h
index 4fb1e6408b982a..45b507a7b06fca 100644
--- a/arch/loongarch/include/asm/stackframe.h
+++ b/arch/loongarch/include/asm/stackframe.h
@@ -13,6 +13,7 @@
#include <asm/asm-offsets.h>
#include <asm/loongarch.h>
#include <asm/thread_info.h>
+#include <asm/unwind_hints.h>
/* Make the addition of cfi info a little easier. */
.macro cfi_rel_offset reg offset=0 docfi=0
@@ -162,6 +163,7 @@
li.w t0, CSR_CRMD_WE
csrxchg t0, t0, LOONGARCH_CSR_CRMD
#endif
+ UNWIND_HINT_REGS
.endm
.macro SAVE_ALL docfi=0
@@ -219,6 +221,7 @@
.macro RESTORE_SP_AND_RET docfi=0
cfi_ld sp, PT_R3, \docfi
+ UNWIND_HINT_FUNC
ertn
.endm
diff --git a/arch/loongarch/include/asm/thread_info.h b/arch/loongarch/include/asm/thread_info.h
index 8cb653d49a5434..8bf0e6f5154668 100644
--- a/arch/loongarch/include/asm/thread_info.h
+++ b/arch/loongarch/include/asm/thread_info.h
@@ -86,6 +86,7 @@ register unsigned long current_stack_pointer __asm__("$sp");
#define TIF_LASX_CTX_LIVE 18 /* LASX context must be preserved */
#define TIF_USEDLBT 19 /* LBT was used by this task this quantum (SMP) */
#define TIF_LBT_CTX_LIVE 20 /* LBT context must be preserved */
+#define TIF_PATCH_PENDING 21 /* pending live patching update */
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
@@ -105,6 +106,7 @@ register unsigned long current_stack_pointer __asm__("$sp");
#define _TIF_LASX_CTX_LIVE (1<<TIF_LASX_CTX_LIVE)
#define _TIF_USEDLBT (1<<TIF_USEDLBT)
#define _TIF_LBT_CTX_LIVE (1<<TIF_LBT_CTX_LIVE)
+#define _TIF_PATCH_PENDING (1<<TIF_PATCH_PENDING)
#endif /* __KERNEL__ */
#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/loongarch/include/asm/unwind.h b/arch/loongarch/include/asm/unwind.h
index b9dce87afd2e05..40a6763c5aecd5 100644
--- a/arch/loongarch/include/asm/unwind.h
+++ b/arch/loongarch/include/asm/unwind.h
@@ -16,6 +16,7 @@
enum unwinder_type {
UNWINDER_GUESS,
UNWINDER_PROLOGUE,
+ UNWINDER_ORC,
};
struct unwind_state {
@@ -24,7 +25,7 @@ struct unwind_state {
struct task_struct *task;
bool first, error, reset;
int graph_idx;
- unsigned long sp, pc, ra;
+ unsigned long sp, fp, pc, ra;
};
bool default_next_frame(struct unwind_state *state);
@@ -61,14 +62,17 @@ static __always_inline void __unwind_start(struct unwind_state *state,
state->sp = regs->regs[3];
state->pc = regs->csr_era;
state->ra = regs->regs[1];
+ state->fp = regs->regs[22];
} else if (task && task != current) {
state->sp = thread_saved_fp(task);
state->pc = thread_saved_ra(task);
state->ra = 0;
+ state->fp = 0;
} else {
state->sp = (unsigned long)__builtin_frame_address(0);
state->pc = (unsigned long)__builtin_return_address(0);
state->ra = 0;
+ state->fp = 0;
}
state->task = task;
get_stack_info(state->sp, state->task, &state->stack_info);
@@ -77,6 +81,18 @@ static __always_inline void __unwind_start(struct unwind_state *state,
static __always_inline unsigned long __unwind_get_return_address(struct unwind_state *state)
{
- return unwind_done(state) ? 0 : state->pc;
+ if (unwind_done(state))
+ return 0;
+
+ return __kernel_text_address(state->pc) ? state->pc : 0;
}
+
+#ifdef CONFIG_UNWINDER_ORC
+void unwind_init(void);
+void unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size, void *orc, size_t orc_size);
+#else
+static inline void unwind_init(void) {}
+static inline void unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size, void *orc, size_t orc_size) {}
+#endif
+
#endif /* _ASM_UNWIND_H */
diff --git a/arch/loongarch/include/asm/unwind_hints.h b/arch/loongarch/include/asm/unwind_hints.h
new file mode 100644
index 00000000000000..a01086ad9ddea4
--- /dev/null
+++ b/arch/loongarch/include/asm/unwind_hints.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_LOONGARCH_UNWIND_HINTS_H
+#define _ASM_LOONGARCH_UNWIND_HINTS_H
+
+#include <linux/objtool.h>
+#include <asm/orc_types.h>
+
+#ifdef __ASSEMBLY__
+
+.macro UNWIND_HINT_UNDEFINED
+ UNWIND_HINT type=UNWIND_HINT_TYPE_UNDEFINED
+.endm
+
+.macro UNWIND_HINT_END_OF_STACK
+ UNWIND_HINT type=UNWIND_HINT_TYPE_END_OF_STACK
+.endm
+
+.macro UNWIND_HINT_REGS
+ UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_REGS
+.endm
+
+.macro UNWIND_HINT_FUNC
+ UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_CALL
+.endm
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_LOONGARCH_UNWIND_HINTS_H */
diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
index 3c808c6803703c..3a7620b66bc6c0 100644
--- a/arch/loongarch/kernel/Makefile
+++ b/arch/loongarch/kernel/Makefile
@@ -3,6 +3,8 @@
# Makefile for the Linux/LoongArch kernel.
#
+OBJECT_FILES_NON_STANDARD_head.o := y
+
extra-y := vmlinux.lds
obj-y += head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \
@@ -21,6 +23,7 @@ obj-$(CONFIG_ARCH_STRICT_ALIGN) += unaligned.o
CFLAGS_module.o += $(call cc-option,-Wno-override-init,)
CFLAGS_syscall.o += $(call cc-option,-Wno-override-init,)
+CFLAGS_traps.o += $(call cc-option,-Wno-override-init,)
CFLAGS_perf_event.o += $(call cc-option,-Wno-override-init,)
ifdef CONFIG_FUNCTION_TRACER
@@ -62,6 +65,7 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o
obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o
+obj-$(CONFIG_UNWINDER_ORC) += unwind_orc.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_regs.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
diff --git a/arch/loongarch/kernel/entry.S b/arch/loongarch/kernel/entry.S
index 1ec8e4c4cc2bd8..48e7e34e355e83 100644
--- a/arch/loongarch/kernel/entry.S
+++ b/arch/loongarch/kernel/entry.S
@@ -14,11 +14,13 @@
#include <asm/regdef.h>
#include <asm/stackframe.h>
#include <asm/thread_info.h>
+#include <asm/unwind_hints.h>
.text
.cfi_sections .debug_frame
.align 5
SYM_CODE_START(handle_syscall)
+ UNWIND_HINT_UNDEFINED
csrrd t0, PERCPU_BASE_KS
la.pcrel t1, kernelsp
add.d t1, t1, t0
@@ -57,6 +59,7 @@ SYM_CODE_START(handle_syscall)
cfi_st fp, PT_R22
SAVE_STATIC
+ UNWIND_HINT_REGS
#ifdef CONFIG_KGDB
li.w t1, CSR_CRMD_WE
@@ -75,6 +78,7 @@ SYM_CODE_END(handle_syscall)
_ASM_NOKPROBE(handle_syscall)
SYM_CODE_START(ret_from_fork)
+ UNWIND_HINT_REGS
bl schedule_tail # a0 = struct task_struct *prev
move a0, sp
bl syscall_exit_to_user_mode
@@ -84,6 +88,7 @@ SYM_CODE_START(ret_from_fork)
SYM_CODE_END(ret_from_fork)
SYM_CODE_START(ret_from_kernel_thread)
+ UNWIND_HINT_REGS
bl schedule_tail # a0 = struct task_struct *prev
move a0, s1
jirl ra, s0, 0
diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S
index 4382e36ae3d444..69a85f2479fba1 100644
--- a/arch/loongarch/kernel/fpu.S
+++ b/arch/loongarch/kernel/fpu.S
@@ -15,6 +15,7 @@
#include <asm/fpregdef.h>
#include <asm/loongarch.h>
#include <asm/regdef.h>
+#include <asm/unwind_hints.h>
#define FPU_REG_WIDTH 8
#define LSX_REG_WIDTH 16
@@ -526,3 +527,9 @@ SYM_FUNC_END(_restore_lasx_context)
.L_fpu_fault:
li.w a0, -EFAULT # failure
jr ra
+
+#ifdef CONFIG_CPU_HAS_LBT
+STACK_FRAME_NON_STANDARD _restore_fp
+STACK_FRAME_NON_STANDARD _restore_lsx
+STACK_FRAME_NON_STANDARD _restore_lasx
+#endif
diff --git a/arch/loongarch/kernel/genex.S b/arch/loongarch/kernel/genex.S
index 2bb3aa2dcfcb2e..86d5d90ebefe5b 100644
--- a/arch/loongarch/kernel/genex.S
+++ b/arch/loongarch/kernel/genex.S
@@ -32,6 +32,7 @@ SYM_FUNC_START(__arch_cpu_idle)
SYM_FUNC_END(__arch_cpu_idle)
SYM_CODE_START(handle_vint)
+ UNWIND_HINT_UNDEFINED
BACKUP_T0T1
SAVE_ALL
la_abs t1, __arch_cpu_idle
@@ -49,6 +50,7 @@ SYM_CODE_START(handle_vint)
SYM_CODE_END(handle_vint)
SYM_CODE_START(except_vec_cex)
+ UNWIND_HINT_UNDEFINED
b cache_parity_error
SYM_CODE_END(except_vec_cex)
@@ -67,6 +69,7 @@ SYM_CODE_END(except_vec_cex)
.macro BUILD_HANDLER exception handler prep
.align 5
SYM_CODE_START(handle_\exception)
+ UNWIND_HINT_UNDEFINED
666:
BACKUP_T0T1
SAVE_ALL
@@ -77,7 +80,9 @@ SYM_CODE_END(except_vec_cex)
668:
RESTORE_ALL_AND_RET
SYM_CODE_END(handle_\exception)
+ .pushsection ".data", "aw", %progbits
SYM_DATA(unwind_hint_\exception, .word 668b - 666b)
+ .popsection
.endm
BUILD_HANDLER ade ade badv
@@ -94,6 +99,7 @@ SYM_CODE_END(except_vec_cex)
BUILD_HANDLER reserved reserved none /* others */
SYM_CODE_START(handle_sys)
+ UNWIND_HINT_UNDEFINED
la_abs t0, handle_syscall
jr t0
SYM_CODE_END(handle_sys)
diff --git a/arch/loongarch/kernel/lbt.S b/arch/loongarch/kernel/lbt.S
index 9c75120a26d836..001f061d226ab5 100644
--- a/arch/loongarch/kernel/lbt.S
+++ b/arch/loongarch/kernel/lbt.S
@@ -11,6 +11,7 @@
#include <asm/asm-offsets.h>
#include <asm/errno.h>
#include <asm/regdef.h>
+#include <asm/unwind_hints.h>
#define SCR_REG_WIDTH 8
@@ -153,3 +154,5 @@ SYM_FUNC_END(_restore_ftop_context)
.L_lbt_fault:
li.w a0, -EFAULT # failure
jr ra
+
+STACK_FRAME_NON_STANDARD _restore_ftop_context
diff --git a/arch/loongarch/kernel/mcount_dyn.S b/arch/loongarch/kernel/mcount_dyn.S
index 482aa553aa2d5e..0c65cf09110cd4 100644
--- a/arch/loongarch/kernel/mcount_dyn.S
+++ b/arch/loongarch/kernel/mcount_dyn.S
@@ -73,6 +73,7 @@ SYM_FUNC_START(ftrace_stub)
SYM_FUNC_END(ftrace_stub)
SYM_CODE_START(ftrace_common)
+ UNWIND_HINT_UNDEFINED
PTR_ADDI a0, ra, -8 /* arg0: ip */
move a1, t0 /* arg1: parent_ip */
la.pcrel t1, function_trace_op
@@ -113,12 +114,14 @@ ftrace_common_return:
SYM_CODE_END(ftrace_common)
SYM_CODE_START(ftrace_caller)
+ UNWIND_HINT_UNDEFINED
ftrace_regs_entry allregs=0
b ftrace_common
SYM_CODE_END(ftrace_caller)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
SYM_CODE_START(ftrace_regs_caller)
+ UNWIND_HINT_UNDEFINED
ftrace_regs_entry allregs=1
b ftrace_common
SYM_CODE_END(ftrace_regs_caller)
@@ -126,6 +129,7 @@ SYM_CODE_END(ftrace_regs_caller)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
SYM_CODE_START(ftrace_graph_caller)
+ UNWIND_HINT_UNDEFINED
PTR_L a0, sp, PT_ERA
PTR_ADDI a0, a0, -8 /* arg0: self_addr */
PTR_ADDI a1, sp, PT_R1 /* arg1: parent */
@@ -134,6 +138,7 @@ SYM_CODE_START(ftrace_graph_caller)
SYM_CODE_END(ftrace_graph_caller)
SYM_CODE_START(return_to_handler)
+ UNWIND_HINT_UNDEFINED
/* Save return value regs */
PTR_ADDI sp, sp, -FGRET_REGS_SIZE
PTR_S a0, sp, FGRET_REGS_A0
@@ -155,6 +160,7 @@ SYM_CODE_END(return_to_handler)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
SYM_CODE_START(ftrace_stub_direct_tramp)
+ UNWIND_HINT_UNDEFINED
jr t0
SYM_CODE_END(ftrace_stub_direct_tramp)
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
diff --git a/arch/loongarch/kernel/module.c b/arch/loongarch/kernel/module.c
index b13b2858fe3923..c7d0338d12c15b 100644
--- a/arch/loongarch/kernel/module.c
+++ b/arch/loongarch/kernel/module.c
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <asm/alternative.h>
#include <asm/inst.h>
+#include <asm/unwind.h>
static int rela_stack_push(s64 stack_value, s64 *rela_stack, size_t *rela_stack_top)
{
@@ -515,15 +516,28 @@ static void module_init_ftrace_plt(const Elf_Ehdr *hdr,
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, struct module *mod)
{
- const Elf_Shdr *s, *se;
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+ const Elf_Shdr *s, *alt = NULL, *orc = NULL, *orc_ip = NULL, *ftrace = NULL;
- for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
+ for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
if (!strcmp(".altinstructions", secstrs + s->sh_name))
- apply_alternatives((void *)s->sh_addr, (void *)s->sh_addr + s->sh_size);
+ alt = s;
+ if (!strcmp(".orc_unwind", secstrs + s->sh_name))
+ orc = s;
+ if (!strcmp(".orc_unwind_ip", secstrs + s->sh_name))
+ orc_ip = s;
if (!strcmp(".ftrace_trampoline", secstrs + s->sh_name))
- module_init_ftrace_plt(hdr, s, mod);
+ ftrace = s;
}
+ if (alt)
+ apply_alternatives((void *)alt->sh_addr, (void *)alt->sh_addr + alt->sh_size);
+
+ if (orc && orc_ip)
+ unwind_module_init(mod, (void *)orc_ip->sh_addr, orc_ip->sh_size, (void *)orc->sh_addr, orc->sh_size);
+
+ if (ftrace)
+ module_init_ftrace_plt(hdr, ftrace, mod);
+
return 0;
}
diff --git a/arch/loongarch/kernel/relocate_kernel.S b/arch/loongarch/kernel/relocate_kernel.S
index f49f6b053763d1..84e6de2fd97354 100644
--- a/arch/loongarch/kernel/relocate_kernel.S
+++ b/arch/loongarch/kernel/relocate_kernel.S
@@ -15,6 +15,7 @@
#include <asm/addrspace.h>
SYM_CODE_START(relocate_new_kernel)
+ UNWIND_HINT_UNDEFINED
/*
* a0: EFI boot flag for the new kernel
* a1: Command line pointer for the new kernel
@@ -90,6 +91,7 @@ SYM_CODE_END(relocate_new_kernel)
* then start at the entry point from LOONGARCH_IOCSR_MBUF0.
*/
SYM_CODE_START(kexec_smp_wait)
+ UNWIND_HINT_UNDEFINED
1: li.w t0, 0x100 /* wait for init loop */
2: addi.w t0, t0, -1 /* limit mailbox access */
bnez t0, 2b
@@ -106,6 +108,5 @@ SYM_CODE_END(kexec_smp_wait)
relocate_new_kernel_end:
-SYM_DATA_START(relocate_new_kernel_size)
- PTR relocate_new_kernel_end - relocate_new_kernel
-SYM_DATA_END(relocate_new_kernel_size)
+ .section ".data"
+SYM_DATA(relocate_new_kernel_size, .long relocate_new_kernel_end - relocate_new_kernel)
diff --git a/arch/loongarch/kernel/rethook_trampoline.S b/arch/loongarch/kernel/rethook_trampoline.S
index bd5772c963382f..d4ceb2fa2a5ce4 100644
--- a/arch/loongarch/kernel/rethook_trampoline.S
+++ b/arch/loongarch/kernel/rethook_trampoline.S
@@ -76,6 +76,7 @@
.endm
SYM_CODE_START(arch_rethook_trampoline)
+ UNWIND_HINT_UNDEFINED
addi.d sp, sp, -PT_SIZE
save_all_base_regs
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
index 2b72eb326b4428..60e0fe97f61a31 100644
--- a/arch/loongarch/kernel/setup.c
+++ b/arch/loongarch/kernel/setup.c
@@ -47,6 +47,7 @@
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/time.h>
+#include <asm/unwind.h>
#define SMBIOS_BIOSSIZE_OFFSET 0x09
#define SMBIOS_BIOSEXTERN_OFFSET 0x13
@@ -587,6 +588,7 @@ static void __init prefill_possible_map(void)
void __init setup_arch(char **cmdline_p)
{
cpu_probe();
+ unwind_init();
init_environ();
efi_init();
diff --git a/arch/loongarch/kernel/stacktrace.c b/arch/loongarch/kernel/stacktrace.c
index f623feb2129f12..9a038d1070d73b 100644
--- a/arch/loongarch/kernel/stacktrace.c
+++ b/arch/loongarch/kernel/stacktrace.c
@@ -29,6 +29,7 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
regs->csr_era = thread_saved_ra(task);
}
regs->regs[1] = 0;
+ regs->regs[22] = 0;
}
for (unwind_start(&state, task, regs);
@@ -39,6 +40,46 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
}
}
+int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
+ void *cookie, struct task_struct *task)
+{
+ unsigned long addr;
+ struct pt_regs dummyregs;
+ struct pt_regs *regs = &dummyregs;
+ struct unwind_state state;
+
+ if (task == current) {
+ regs->regs[3] = (unsigned long)__builtin_frame_address(0);
+ regs->csr_era = (unsigned long)__builtin_return_address(0);
+ } else {
+ regs->regs[3] = thread_saved_fp(task);
+ regs->csr_era = thread_saved_ra(task);
+ }
+ regs->regs[1] = 0;
+ regs->regs[22] = 0;
+
+ for (unwind_start(&state, task, regs);
+ !unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) {
+ addr = unwind_get_return_address(&state);
+
+ /*
+ * A NULL or invalid return address probably means there's some
+ * generated code which __kernel_text_address() doesn't know about.
+ */
+ if (!addr)
+ return -EINVAL;
+
+ if (!consume_entry(cookie, addr))
+ return -EINVAL;
+ }
+
+ /* Check for stack corruption */
+ if (unwind_error(&state))
+ return -EINVAL;
+
+ return 0;
+}
+
static int
copy_stack_frame(unsigned long fp, struct stack_frame *frame)
{
diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
index aebfc3733a7607..f9f4eb00c92ef5 100644
--- a/arch/loongarch/kernel/traps.c
+++ b/arch/loongarch/kernel/traps.c
@@ -53,6 +53,32 @@
#include "access-helper.h"
+void *exception_table[EXCCODE_INT_START] = {
+ [0 ... EXCCODE_INT_START - 1] = handle_reserved,
+
+ [EXCCODE_TLBI] = handle_tlb_load,
+ [EXCCODE_TLBL] = handle_tlb_load,
+ [EXCCODE_TLBS] = handle_tlb_store,
+ [EXCCODE_TLBM] = handle_tlb_modify,
+ [EXCCODE_TLBNR] = handle_tlb_protect,
+ [EXCCODE_TLBNX] = handle_tlb_protect,
+ [EXCCODE_TLBPE] = handle_tlb_protect,
+ [EXCCODE_ADE] = handle_ade,
+ [EXCCODE_ALE] = handle_ale,
+ [EXCCODE_BCE] = handle_bce,
+ [EXCCODE_SYS] = handle_sys,
+ [EXCCODE_BP] = handle_bp,
+ [EXCCODE_INE] = handle_ri,
+ [EXCCODE_IPE] = handle_ri,
+ [EXCCODE_FPDIS] = handle_fpu,
+ [EXCCODE_LSXDIS] = handle_lsx,
+ [EXCCODE_LASXDIS] = handle_lasx,
+ [EXCCODE_FPE] = handle_fpe,
+ [EXCCODE_WATCH] = handle_watch,
+ [EXCCODE_BTDIS] = handle_lbt,
+};
+EXPORT_SYMBOL_GPL(exception_table);
+
static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
const char *loglvl, bool user)
{
@@ -1150,19 +1176,9 @@ void __init trap_init(void)
for (i = EXCCODE_INT_START; i <= EXCCODE_INT_END; i++)
set_handler(i * VECSIZE, handle_vint, VECSIZE);
- set_handler(EXCCODE_ADE * VECSIZE, handle_ade, VECSIZE);
- set_handler(EXCCODE_ALE * VECSIZE, handle_ale, VECSIZE);
- set_handler(EXCCODE_BCE * VECSIZE, handle_bce, VECSIZE);
- set_handler(EXCCODE_SYS * VECSIZE, handle_sys, VECSIZE);
- set_handler(EXCCODE_BP * VECSIZE, handle_bp, VECSIZE);
- set_handler(EXCCODE_INE * VECSIZE, handle_ri, VECSIZE);
- set_handler(EXCCODE_IPE * VECSIZE, handle_ri, VECSIZE);
- set_handler(EXCCODE_FPDIS * VECSIZE, handle_fpu, VECSIZE);
- set_handler(EXCCODE_LSXDIS * VECSIZE, handle_lsx, VECSIZE);
- set_handler(EXCCODE_LASXDIS * VECSIZE, handle_lasx, VECSIZE);
- set_handler(EXCCODE_FPE * VECSIZE, handle_fpe, VECSIZE);
- set_handler(EXCCODE_BTDIS * VECSIZE, handle_lbt, VECSIZE);
- set_handler(EXCCODE_WATCH * VECSIZE, handle_watch, VECSIZE);
+ /* Set exception vector handler */
+ for (i = EXCCODE_ADE; i <= EXCCODE_BTDIS; i++)
+ set_handler(i * VECSIZE, exception_table[i], VECSIZE);
cache_error_setup();
diff --git a/arch/loongarch/kernel/unwind_orc.c b/arch/loongarch/kernel/unwind_orc.c
new file mode 100644
index 00000000000000..b2572287633179
--- /dev/null
+++ b/arch/loongarch/kernel/unwind_orc.c
@@ -0,0 +1,528 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/objtool.h>
+#include <linux/module.h>
+#include <linux/sort.h>
+#include <asm/exception.h>
+#include <asm/orc_header.h>
+#include <asm/orc_lookup.h>
+#include <asm/orc_types.h>
+#include <asm/ptrace.h>
+#include <asm/setup.h>
+#include <asm/stacktrace.h>
+#include <asm/tlb.h>
+#include <asm/unwind.h>
+
+ORC_HEADER;
+
+#define orc_warn(fmt, ...) \
+ printk_deferred_once(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__)
+
+extern int __start_orc_unwind_ip[];
+extern int __stop_orc_unwind_ip[];
+extern struct orc_entry __start_orc_unwind[];
+extern struct orc_entry __stop_orc_unwind[];
+
+static bool orc_init __ro_after_init;
+static unsigned int lookup_num_blocks __ro_after_init;
+
+/* Fake frame pointer entry -- used as a fallback for generated code */
+static struct orc_entry orc_fp_entry = {
+ .sp_reg = ORC_REG_FP,
+ .sp_offset = 16,
+ .fp_reg = ORC_REG_PREV_SP,
+ .fp_offset = -16,
+ .ra_reg = ORC_REG_PREV_SP,
+ .ra_offset = -8,
+ .type = ORC_TYPE_CALL
+};
+
+/*
+ * If we crash with IP==0, the last successfully executed instruction
+ * was probably an indirect function call with a NULL function pointer,
+ * and we don't have unwind information for NULL.
+ * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function
+ * pointer into its parent and then continue normally from there.
+ */
+static struct orc_entry orc_null_entry = {
+ .sp_reg = ORC_REG_SP,
+ .sp_offset = sizeof(long),
+ .fp_reg = ORC_REG_UNDEFINED,
+ .type = ORC_TYPE_CALL
+};
+
+static inline unsigned long orc_ip(const int *ip)
+{
+ return (unsigned long)ip + *ip;
+}
+
+static struct orc_entry *__orc_find(int *ip_table, struct orc_entry *u_table,
+ unsigned int num_entries, unsigned long ip)
+{
+ int *first = ip_table;
+ int *mid = first, *found = first;
+ int *last = ip_table + num_entries - 1;
+
+ if (!num_entries)
+ return NULL;
+
+ /*
+ * Do a binary range search to find the rightmost duplicate of a given
+ * starting address. Some entries are section terminators which are
+ * "weak" entries for ensuring there are no gaps. They should be
+ * ignored when they conflict with a real entry.
+ */
+ while (first <= last) {
+ mid = first + ((last - first) / 2);
+
+ if (orc_ip(mid) <= ip) {
+ found = mid;
+ first = mid + 1;
+ } else
+ last = mid - 1;
+ }
+
+ return u_table + (found - ip_table);
+}
+
+#ifdef CONFIG_MODULES
+static struct orc_entry *orc_module_find(unsigned long ip)
+{
+ struct module *mod;
+
+ mod = __module_address(ip);
+ if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip)
+ return NULL;
+
+ return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind, mod->arch.num_orcs, ip);
+}
+#else
+static struct orc_entry *orc_module_find(unsigned long ip)
+{
+ return NULL;
+}
+#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+static struct orc_entry *orc_find(unsigned long ip);
+
+/*
+ * Ftrace dynamic trampolines do not have orc entries of their own.
+ * But they are copies of the ftrace entries that are static and
+ * defined in ftrace_*.S, which do have orc entries.
+ *
+ * If the unwinder comes across a ftrace trampoline, then find the
+ * ftrace function that was used to create it, and use that ftrace
+ * function's orc entry, as the placement of the return code in
+ * the stack will be identical.
+ */
+static struct orc_entry *orc_ftrace_find(unsigned long ip)
+{
+ struct ftrace_ops *ops;
+ unsigned long tramp_addr, offset;
+
+ ops = ftrace_ops_trampoline(ip);
+ if (!ops)
+ return NULL;
+
+ /* Set tramp_addr to the start of the code copied by the trampoline */
+ if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
+ tramp_addr = (unsigned long)ftrace_regs_caller;
+ else
+ tramp_addr = (unsigned long)ftrace_caller;
+
+ /* Now place tramp_addr to the location within the trampoline ip is at */
+ offset = ip - ops->trampoline;
+ tramp_addr += offset;
+
+ /* Prevent unlikely recursion */
+ if (ip == tramp_addr)
+ return NULL;
+
+ return orc_find(tramp_addr);
+}
+#else
+static struct orc_entry *orc_ftrace_find(unsigned long ip)
+{
+ return NULL;
+}
+#endif
+
+static struct orc_entry *orc_find(unsigned long ip)
+{
+ static struct orc_entry *orc;
+
+ if (ip == 0)
+ return &orc_null_entry;
+
+ /* For non-init vmlinux addresses, use the fast lookup table: */
+ if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) {
+ unsigned int idx, start, stop;
+
+ idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE;
+
+ if (unlikely((idx >= lookup_num_blocks-1))) {
+ orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n",
+ idx, lookup_num_blocks, (void *)ip);
+ return NULL;
+ }
+
+ start = orc_lookup[idx];
+ stop = orc_lookup[idx + 1] + 1;
+
+ if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) ||
+ (__start_orc_unwind + stop > __stop_orc_unwind))) {
+ orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n",
+ idx, lookup_num_blocks, start, stop, (void *)ip);
+ return NULL;
+ }
+
+ return __orc_find(__start_orc_unwind_ip + start,
+ __start_orc_unwind + start, stop - start, ip);
+ }
+
+ /* vmlinux .init slow lookup: */
+ if (is_kernel_inittext(ip))
+ return __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
+ __stop_orc_unwind_ip - __start_orc_unwind_ip, ip);
+
+ /* Module lookup: */
+ orc = orc_module_find(ip);
+ if (orc)
+ return orc;
+
+ return orc_ftrace_find(ip);
+}
+
+#ifdef CONFIG_MODULES
+
+static DEFINE_MUTEX(sort_mutex);
+static int *cur_orc_ip_table = __start_orc_unwind_ip;
+static struct orc_entry *cur_orc_table = __start_orc_unwind;
+
+static void orc_sort_swap(void *_a, void *_b, int size)
+{
+ int delta = _b - _a;
+ int *a = _a, *b = _b, tmp;
+ struct orc_entry *orc_a, *orc_b;
+
+ /* Swap the .orc_unwind_ip entries: */
+ tmp = *a;
+ *a = *b + delta;
+ *b = tmp - delta;
+
+ /* Swap the corresponding .orc_unwind entries: */
+ orc_a = cur_orc_table + (a - cur_orc_ip_table);
+ orc_b = cur_orc_table + (b - cur_orc_ip_table);
+ swap(*orc_a, *orc_b);
+}
+
+static int orc_sort_cmp(const void *_a, const void *_b)
+{
+ const int *a = _a, *b = _b;
+ unsigned long a_val = orc_ip(a);
+ unsigned long b_val = orc_ip(b);
+ struct orc_entry *orc_a;
+
+ if (a_val > b_val)
+ return 1;
+ if (a_val < b_val)
+ return -1;
+
+ /*
+ * The "weak" section terminator entries need to always be first
+ * to ensure the lookup code skips them in favor of real entries.
+ * These terminator entries exist to handle any gaps created by
+ * whitelisted .o files which didn't get objtool generation.
+ */
+ orc_a = cur_orc_table + (a - cur_orc_ip_table);
+
+ return orc_a->type == ORC_TYPE_UNDEFINED ? -1 : 1;
+}
+
+void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size,
+ void *_orc, size_t orc_size)
+{
+ int *orc_ip = _orc_ip;
+ struct orc_entry *orc = _orc;
+ unsigned int num_entries = orc_ip_size / sizeof(int);
+
+ WARN_ON_ONCE(orc_ip_size % sizeof(int) != 0 ||
+ orc_size % sizeof(*orc) != 0 ||
+ num_entries != orc_size / sizeof(*orc));
+
+ /*
+ * The 'cur_orc_*' globals allow the orc_sort_swap() callback to
+ * associate an .orc_unwind_ip table entry with its corresponding
+ * .orc_unwind entry so they can both be swapped.
+ */
+ mutex_lock(&sort_mutex);
+ cur_orc_ip_table = orc_ip;
+ cur_orc_table = orc;
+ sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap);
+ mutex_unlock(&sort_mutex);
+
+ mod->arch.orc_unwind_ip = orc_ip;
+ mod->arch.orc_unwind = orc;
+ mod->arch.num_orcs = num_entries;
+}
+#endif
+
+void __init unwind_init(void)
+{
+ int i;
+ size_t orc_size = (void *)__stop_orc_unwind - (void *)__start_orc_unwind;
+ size_t orc_ip_size = (void *)__stop_orc_unwind_ip - (void *)__start_orc_unwind_ip;
+ size_t num_entries = orc_ip_size / sizeof(int);
+ struct orc_entry *orc;
+
+ if (!num_entries || orc_ip_size % sizeof(int) != 0 ||
+ orc_size % sizeof(struct orc_entry) != 0 ||
+ num_entries != orc_size / sizeof(struct orc_entry)) {
+ orc_warn("WARNING: Bad or missing .orc_unwind table. Disabling unwinder.\n");
+ return;
+ }
+
+ /*
+ * Note, the orc_unwind and orc_unwind_ip tables were already
+ * sorted at build time via the 'sorttable' tool.
+ * It's ready for binary search straight away, no need to sort it.
+ */
+
+ /* Initialize the fast lookup table: */
+ lookup_num_blocks = orc_lookup_end - orc_lookup;
+ for (i = 0; i < lookup_num_blocks-1; i++) {
+ orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
+ num_entries, LOOKUP_START_IP + (LOOKUP_BLOCK_SIZE * i));
+ if (!orc) {
+ orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n");
+ return;
+ }
+
+ orc_lookup[i] = orc - __start_orc_unwind;
+ }
+
+ /* Initialize the ending block: */
+ orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, num_entries, LOOKUP_STOP_IP);
+ if (!orc) {
+ orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n");
+ return;
+ }
+ orc_lookup[lookup_num_blocks-1] = orc - __start_orc_unwind;
+
+ orc_init = true;
+}
+
+static inline bool on_stack(struct stack_info *info, unsigned long addr, size_t len)
+{
+ unsigned long begin = info->begin;
+ unsigned long end = info->end;
+
+ return (info->type != STACK_TYPE_UNKNOWN &&
+ addr >= begin && addr < end && addr + len > begin && addr + len <= end);
+}
+
+static bool stack_access_ok(struct unwind_state *state, unsigned long addr, size_t len)
+{
+ struct stack_info *info = &state->stack_info;
+
+ if (on_stack(info, addr, len))
+ return true;
+
+ return !get_stack_info(addr, state->task, info) && on_stack(info, addr, len);
+}
+
+unsigned long unwind_get_return_address(struct unwind_state *state)
+{
+ return __unwind_get_return_address(state);
+}
+EXPORT_SYMBOL_GPL(unwind_get_return_address);
+
+void unwind_start(struct unwind_state *state, struct task_struct *task,
+ struct pt_regs *regs)
+{
+ __unwind_start(state, task, regs);
+ state->type = UNWINDER_ORC;
+ if (!unwind_done(state) && !__kernel_text_address(state->pc))
+ unwind_next_frame(state);
+}
+EXPORT_SYMBOL_GPL(unwind_start);
+
+static bool is_entry_func(unsigned long addr)
+{
+ extern u32 kernel_entry;
+ extern u32 kernel_entry_end;
+
+ return addr >= (unsigned long)&kernel_entry && addr < (unsigned long)&kernel_entry_end;
+}
+
+static inline unsigned long bt_address(unsigned long ra)
+{
+ extern unsigned long eentry;
+
+ if (__kernel_text_address(ra))
+ return ra;
+
+ if (__module_text_address(ra))
+ return ra;
+
+ if (ra >= eentry && ra < eentry + EXCCODE_INT_END * VECSIZE) {
+ unsigned long func;
+ unsigned long type = (ra - eentry) / VECSIZE;
+ unsigned long offset = (ra - eentry) % VECSIZE;
+
+ switch (type) {
+ case 0 ... EXCCODE_INT_START - 1:
+ func = (unsigned long)exception_table[type];
+ break;
+ case EXCCODE_INT_START ... EXCCODE_INT_END:
+ func = (unsigned long)handle_vint;
+ break;
+ default:
+ func = (unsigned long)handle_reserved;
+ break;
+ }
+
+ return func + offset;
+ }
+
+ return ra;
+}
+
+bool unwind_next_frame(struct unwind_state *state)
+{
+ unsigned long *p, pc;
+ struct pt_regs *regs;
+ struct orc_entry *orc;
+ struct stack_info *info = &state->stack_info;
+
+ if (unwind_done(state))
+ return false;
+
+ /* Don't let modules unload while we're reading their ORC data. */
+ preempt_disable();
+
+ if (is_entry_func(state->pc))
+ goto end;
+
+ orc = orc_find(state->pc);
+ if (!orc) {
+ /*
+ * As a fallback, try to assume this code uses a frame pointer.
+ * This is useful for generated code, like BPF, which ORC
+ * doesn't know about. This is just a guess, so the rest of
+ * the unwind is no longer considered reliable.
+ */
+ orc = &orc_fp_entry;
+ state->error = true;
+ } else {
+ if (orc->type == ORC_TYPE_UNDEFINED)
+ goto err;
+
+ if (orc->type == ORC_TYPE_END_OF_STACK)
+ goto end;
+ }
+
+ switch (orc->sp_reg) {
+ case ORC_REG_SP:
+ if (info->type == STACK_TYPE_IRQ && state->sp == info->end)
+ orc->type = ORC_TYPE_REGS;
+ else
+ state->sp = state->sp + orc->sp_offset;
+ break;
+ case ORC_REG_FP:
+ state->sp = state->fp;
+ break;
+ default:
+ orc_warn("unknown SP base reg %d at %pB\n", orc->sp_reg, (void *)state->pc);
+ goto err;
+ }
+
+ switch (orc->fp_reg) {
+ case ORC_REG_PREV_SP:
+ p = (unsigned long *)(state->sp + orc->fp_offset);
+ if (!stack_access_ok(state, (unsigned long)p, sizeof(unsigned long)))
+ goto err;
+
+ state->fp = *p;
+ break;
+ case ORC_REG_UNDEFINED:
+ /* Nothing. */
+ break;
+ default:
+ orc_warn("unknown FP base reg %d at %pB\n", orc->fp_reg, (void *)state->pc);
+ goto err;
+ }
+
+ switch (orc->type) {
+ case ORC_TYPE_CALL:
+ if (orc->ra_reg == ORC_REG_PREV_SP) {
+ p = (unsigned long *)(state->sp + orc->ra_offset);
+ if (!stack_access_ok(state, (unsigned long)p, sizeof(unsigned long)))
+ goto err;
+
+ pc = unwind_graph_addr(state, *p, state->sp);
+ pc -= LOONGARCH_INSN_SIZE;
+ } else if (orc->ra_reg == ORC_REG_UNDEFINED) {
+ if (!state->ra || state->ra == state->pc)
+ goto err;
+
+ pc = unwind_graph_addr(state, state->ra, state->sp);
+ pc -= LOONGARCH_INSN_SIZE;
+ state->ra = 0;
+ } else {
+ orc_warn("unknown ra base reg %d at %pB\n", orc->ra_reg, (void *)state->pc);
+ goto err;
+ }
+ break;
+ case ORC_TYPE_REGS:
+ if (info->type == STACK_TYPE_IRQ && state->sp == info->end)
+ regs = (struct pt_regs *)info->next_sp;
+ else
+ regs = (struct pt_regs *)state->sp;
+
+ if (!stack_access_ok(state, (unsigned long)regs, sizeof(*regs)))
+ goto err;
+
+ if ((info->end == (unsigned long)regs + sizeof(*regs)) &&
+ !regs->regs[3] && !regs->regs[1])
+ goto end;
+
+ if (user_mode(regs))
+ goto end;
+
+ pc = regs->csr_era;
+ if (!__kernel_text_address(pc))
+ goto err;
+
+ state->sp = regs->regs[3];
+ state->ra = regs->regs[1];
+ state->fp = regs->regs[22];
+ get_stack_info(state->sp, state->task, info);
+
+ break;
+ default:
+ orc_warn("unknown .orc_unwind entry type %d at %pB\n", orc->type, (void *)state->pc);
+ goto err;
+ }
+
+ state->pc = bt_address(pc);
+ if (!state->pc) {
+ pr_err("cannot find unwind pc at %pK\n", (void *)pc);
+ goto err;
+ }
+
+ if (!__kernel_text_address(state->pc))
+ goto err;
+
+ preempt_enable();
+ return true;
+
+err:
+ state->error = true;
+
+end:
+ preempt_enable();
+ state->stack_info.type = STACK_TYPE_UNKNOWN;
+ return false;
+}
+EXPORT_SYMBOL_GPL(unwind_next_frame);
diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S
index a5d0cd2035da04..e8e97dbf9ca40f 100644
--- a/arch/loongarch/kernel/vmlinux.lds.S
+++ b/arch/loongarch/kernel/vmlinux.lds.S
@@ -2,6 +2,7 @@
#include <linux/sizes.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
+#include <asm/orc_lookup.h>
#define PAGE_SIZE _PAGE_SIZE
#define RO_EXCEPTION_TABLE_ALIGN 4
@@ -122,6 +123,8 @@ SECTIONS
}
#endif
+ ORC_UNWIND_TABLE
+
.sdata : {
*(.sdata)
}
diff --git a/arch/loongarch/kvm/switch.S b/arch/loongarch/kvm/switch.S
index 3634431db18a4a..80e988985a6adf 100644
--- a/arch/loongarch/kvm/switch.S
+++ b/arch/loongarch/kvm/switch.S
@@ -8,7 +8,7 @@
#include <asm/asmmacro.h>
#include <asm/loongarch.h>
#include <asm/regdef.h>
-#include <asm/stackframe.h>
+#include <asm/unwind_hints.h>
#define HGPR_OFFSET(x) (PT_R0 + 8*x)
#define GGPR_OFFSET(x) (KVM_ARCH_GGPR + 8*x)
@@ -112,6 +112,7 @@
.text
.cfi_sections .debug_frame
SYM_CODE_START(kvm_exc_entry)
+ UNWIND_HINT_UNDEFINED
csrwr a2, KVM_TEMP_KS
csrrd a2, KVM_VCPU_KS
addi.d a2, a2, KVM_VCPU_ARCH
@@ -273,3 +274,9 @@ SYM_FUNC_END(kvm_restore_lasx)
.section ".rodata"
SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry)
SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest)
+
+#ifdef CONFIG_CPU_HAS_LBT
+STACK_FRAME_NON_STANDARD kvm_restore_fpu
+STACK_FRAME_NON_STANDARD kvm_restore_lsx
+STACK_FRAME_NON_STANDARD kvm_restore_lasx
+#endif
diff --git a/arch/loongarch/lib/clear_user.S b/arch/loongarch/lib/clear_user.S
index be741544e62bf6..7a0db643b2866c 100644
--- a/arch/loongarch/lib/clear_user.S
+++ b/arch/loongarch/lib/clear_user.S
@@ -10,6 +10,7 @@
#include <asm/asm-extable.h>
#include <asm/cpu.h>
#include <asm/regdef.h>
+#include <asm/unwind_hints.h>
SYM_FUNC_START(__clear_user)
/*
@@ -204,3 +205,5 @@ SYM_FUNC_START(__clear_user_fast)
_asm_extable 28b, .Lsmall_fixup
_asm_extable 29b, .Lexit
SYM_FUNC_END(__clear_user_fast)
+
+STACK_FRAME_NON_STANDARD __clear_user_fast
diff --git a/arch/loongarch/lib/copy_user.S b/arch/loongarch/lib/copy_user.S
index feec3d3628032f..095ce9181c6c04 100644
--- a/arch/loongarch/lib/copy_user.S
+++ b/arch/loongarch/lib/copy_user.S
@@ -10,6 +10,7 @@
#include <asm/asm-extable.h>
#include <asm/cpu.h>
#include <asm/regdef.h>
+#include <asm/unwind_hints.h>
SYM_FUNC_START(__copy_user)
/*
@@ -278,3 +279,5 @@ SYM_FUNC_START(__copy_user_fast)
_asm_extable 58b, .Lexit
_asm_extable 59b, .Lexit
SYM_FUNC_END(__copy_user_fast)
+
+STACK_FRAME_NON_STANDARD __copy_user_fast
diff --git a/arch/loongarch/lib/memcpy.S b/arch/loongarch/lib/memcpy.S
index fa1148878d2b9d..9517a2f961af3d 100644
--- a/arch/loongarch/lib/memcpy.S
+++ b/arch/loongarch/lib/memcpy.S
@@ -9,6 +9,7 @@
#include <asm/asmmacro.h>
#include <asm/cpu.h>
#include <asm/regdef.h>
+#include <asm/unwind_hints.h>
.section .noinstr.text, "ax"
@@ -197,3 +198,5 @@ SYM_FUNC_START(__memcpy_fast)
jr ra
SYM_FUNC_END(__memcpy_fast)
_ASM_NOKPROBE(__memcpy_fast)
+
+STACK_FRAME_NON_STANDARD __memcpy_small
diff --git a/arch/loongarch/lib/memset.S b/arch/loongarch/lib/memset.S
index 06d3ca54cbfe7d..df38466205531d 100644
--- a/arch/loongarch/lib/memset.S
+++ b/arch/loongarch/lib/memset.S
@@ -9,6 +9,7 @@
#include <asm/asmmacro.h>
#include <asm/cpu.h>
#include <asm/regdef.h>
+#include <asm/unwind_hints.h>
.macro fill_to_64 r0
bstrins.d \r0, \r0, 15, 8
@@ -166,3 +167,5 @@ SYM_FUNC_START(__memset_fast)
jr ra
SYM_FUNC_END(__memset_fast)
_ASM_NOKPROBE(__memset_fast)
+
+STACK_FRAME_NON_STANDARD __memset_fast
diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c
index 0b95d32b30c947..5ac9beb5f0935e 100644
--- a/arch/loongarch/mm/tlb.c
+++ b/arch/loongarch/mm/tlb.c
@@ -9,8 +9,9 @@
#include <linux/hugetlb.h>
#include <linux/export.h>
-#include <asm/cpu.h>
#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+#include <asm/exception.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/tlb.h>
@@ -266,24 +267,20 @@ static void setup_tlb_handler(int cpu)
setup_ptwalker();
local_flush_tlb_all();
+ if (cpu_has_ptw) {
+ exception_table[EXCCODE_TLBI] = handle_tlb_load_ptw;
+ exception_table[EXCCODE_TLBL] = handle_tlb_load_ptw;
+ exception_table[EXCCODE_TLBS] = handle_tlb_store_ptw;
+ exception_table[EXCCODE_TLBM] = handle_tlb_modify_ptw;
+ }
+
/* The tlb handlers are generated only once */
if (cpu == 0) {
memcpy((void *)tlbrentry, handle_tlb_refill, 0x80);
local_flush_icache_range(tlbrentry, tlbrentry + 0x80);
- if (!cpu_has_ptw) {
- set_handler(EXCCODE_TLBI * VECSIZE, handle_tlb_load, VECSIZE);
- set_handler(EXCCODE_TLBL * VECSIZE, handle_tlb_load, VECSIZE);
- set_handler(EXCCODE_TLBS * VECSIZE, handle_tlb_store, VECSIZE);
- set_handler(EXCCODE_TLBM * VECSIZE, handle_tlb_modify, VECSIZE);
- } else {
- set_handler(EXCCODE_TLBI * VECSIZE, handle_tlb_load_ptw, VECSIZE);
- set_handler(EXCCODE_TLBL * VECSIZE, handle_tlb_load_ptw, VECSIZE);
- set_handler(EXCCODE_TLBS * VECSIZE, handle_tlb_store_ptw, VECSIZE);
- set_handler(EXCCODE_TLBM * VECSIZE, handle_tlb_modify_ptw, VECSIZE);
- }
- set_handler(EXCCODE_TLBNR * VECSIZE, handle_tlb_protect, VECSIZE);
- set_handler(EXCCODE_TLBNX * VECSIZE, handle_tlb_protect, VECSIZE);
- set_handler(EXCCODE_TLBPE * VECSIZE, handle_tlb_protect, VECSIZE);
+
+ for (int i = EXCCODE_TLBL; i <= EXCCODE_TLBPE; i++)
+ set_handler(i * VECSIZE, exception_table[i], VECSIZE);
} else {
int vec_sz __maybe_unused;
void *addr __maybe_unused;
diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S
index d5d682f3d29f3a..a44387b838af61 100644
--- a/arch/loongarch/mm/tlbex.S
+++ b/arch/loongarch/mm/tlbex.S
@@ -18,6 +18,7 @@
.macro tlb_do_page_fault, write
SYM_CODE_START(tlb_do_page_fault_\write)
+ UNWIND_HINT_UNDEFINED
SAVE_ALL
csrrd a2, LOONGARCH_CSR_BADV
move a0, sp
@@ -32,6 +33,7 @@
tlb_do_page_fault 1
SYM_CODE_START(handle_tlb_protect)
+ UNWIND_HINT_UNDEFINED
BACKUP_T0T1
SAVE_ALL
move a0, sp
@@ -44,6 +46,7 @@ SYM_CODE_START(handle_tlb_protect)
SYM_CODE_END(handle_tlb_protect)
SYM_CODE_START(handle_tlb_load)
+ UNWIND_HINT_UNDEFINED
csrwr t0, EXCEPTION_KS0
csrwr t1, EXCEPTION_KS1
csrwr ra, EXCEPTION_KS2
@@ -190,6 +193,7 @@ nopage_tlb_load:
SYM_CODE_END(handle_tlb_load)
SYM_CODE_START(handle_tlb_load_ptw)
+ UNWIND_HINT_UNDEFINED
csrwr t0, LOONGARCH_CSR_KS0
csrwr t1, LOONGARCH_CSR_KS1
la_abs t0, tlb_do_page_fault_0
@@ -197,6 +201,7 @@ SYM_CODE_START(handle_tlb_load_ptw)
SYM_CODE_END(handle_tlb_load_ptw)
SYM_CODE_START(handle_tlb_store)
+ UNWIND_HINT_UNDEFINED
csrwr t0, EXCEPTION_KS0
csrwr t1, EXCEPTION_KS1
csrwr ra, EXCEPTION_KS2
@@ -346,6 +351,7 @@ nopage_tlb_store:
SYM_CODE_END(handle_tlb_store)
SYM_CODE_START(handle_tlb_store_ptw)
+ UNWIND_HINT_UNDEFINED
csrwr t0, LOONGARCH_CSR_KS0
csrwr t1, LOONGARCH_CSR_KS1
la_abs t0, tlb_do_page_fault_1
@@ -353,6 +359,7 @@ SYM_CODE_START(handle_tlb_store_ptw)
SYM_CODE_END(handle_tlb_store_ptw)
SYM_CODE_START(handle_tlb_modify)
+ UNWIND_HINT_UNDEFINED
csrwr t0, EXCEPTION_KS0
csrwr t1, EXCEPTION_KS1
csrwr ra, EXCEPTION_KS2
@@ -500,6 +507,7 @@ nopage_tlb_modify:
SYM_CODE_END(handle_tlb_modify)
SYM_CODE_START(handle_tlb_modify_ptw)
+ UNWIND_HINT_UNDEFINED
csrwr t0, LOONGARCH_CSR_KS0
csrwr t1, LOONGARCH_CSR_KS1
la_abs t0, tlb_do_page_fault_1
@@ -507,6 +515,7 @@ SYM_CODE_START(handle_tlb_modify_ptw)
SYM_CODE_END(handle_tlb_modify_ptw)
SYM_CODE_START(handle_tlb_refill)
+ UNWIND_HINT_UNDEFINED
csrwr t0, LOONGARCH_CSR_TLBRSAVE
csrrd t0, LOONGARCH_CSR_PGD
lddir t0, t0, 3
diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile
index f597cd08a96be0..75c6726382c34f 100644
--- a/arch/loongarch/vdso/Makefile
+++ b/arch/loongarch/vdso/Makefile
@@ -4,6 +4,7 @@
KASAN_SANITIZE := n
UBSAN_SANITIZE := n
KCOV_INSTRUMENT := n
+OBJECT_FILES_NON_STANDARD := y
# Include the generic Makefile to check the built vdso.
include $(srctree)/lib/vdso/Makefile
diff --git a/arch/parisc/include/asm/mman.h b/arch/parisc/include/asm/mman.h
new file mode 100644
index 00000000000000..47c5a1991d1034
--- /dev/null
+++ b/arch/parisc/include/asm/mman.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MMAN_H__
+#define __ASM_MMAN_H__
+
+#include <uapi/asm/mman.h>
+
+/* PARISC cannot allow mdwe as it needs writable stacks */
+static inline bool arch_memory_deny_write_exec_supported(void)
+{
+ return false;
+}
+#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported
+
+#endif /* __ASM_MMAN_H__ */
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index a68b9e637eda04..1c4be337368604 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -607,11 +607,6 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
config ARCH_SUPPORTS_KEXEC
def_bool PPC_BOOK3S || PPC_E500 || (44x && !SMP)
-config ARCH_SELECTS_KEXEC
- def_bool y
- depends on KEXEC
- select CRASH_DUMP
-
config ARCH_SUPPORTS_KEXEC_FILE
def_bool PPC64
@@ -622,7 +617,6 @@ config ARCH_SELECTS_KEXEC_FILE
def_bool y
depends on KEXEC_FILE
select KEXEC_ELF
- select CRASH_DUMP
select HAVE_IMA_KEXEC if IMA
config PPC64_BIG_ENDIAN_ELF_ABI_V2
@@ -694,8 +688,7 @@ config ARCH_SELECTS_CRASH_DUMP
config FA_DUMP
bool "Firmware-assisted dump"
- depends on PPC64 && (PPC_RTAS || PPC_POWERNV)
- select CRASH_DUMP
+ depends on CRASH_DUMP && PPC64 && (PPC_RTAS || PPC_POWERNV)
help
A robust mechanism to get reliable kernel crash dump with
assistance from firmware. This approach does not use kexec,
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index e1b43aa1217535..fdb90e24dc74d4 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -55,59 +55,18 @@
typedef void (*crash_shutdown_t)(void);
#ifdef CONFIG_KEXEC_CORE
-
-/*
- * This function is responsible for capturing register states if coming
- * via panic or invoking dump using sysrq-trigger.
- */
-static inline void crash_setup_regs(struct pt_regs *newregs,
- struct pt_regs *oldregs)
-{
- if (oldregs)
- memcpy(newregs, oldregs, sizeof(*newregs));
- else
- ppc_save_regs(newregs);
-}
+struct kimage;
+struct pt_regs;
extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
master to copy new code to 0 */
-extern int crashing_cpu;
-extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *));
-extern void crash_ipi_callback(struct pt_regs *);
-extern int crash_wake_offline;
-
-struct kimage;
-struct pt_regs;
extern void default_machine_kexec(struct kimage *image);
-extern void default_machine_crash_shutdown(struct pt_regs *regs);
-extern int crash_shutdown_register(crash_shutdown_t handler);
-extern int crash_shutdown_unregister(crash_shutdown_t handler);
-
-extern void crash_kexec_prepare(void);
-extern void crash_kexec_secondary(struct pt_regs *regs);
-int __init overlaps_crashkernel(unsigned long start, unsigned long size);
-extern void reserve_crashkernel(void);
extern void machine_kexec_mask_interrupts(void);
-static inline bool kdump_in_progress(void)
-{
- return crashing_cpu >= 0;
-}
-
void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_code_buffer,
unsigned long start_address) __noreturn;
-
void kexec_copy_flush(struct kimage *image);
-#if defined(CONFIG_CRASH_DUMP)
-bool is_kdump_kernel(void);
-#define is_kdump_kernel is_kdump_kernel
-#if defined(CONFIG_PPC_RTAS)
-void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
-#define crash_free_reserved_phys_range crash_free_reserved_phys_range
-#endif /* CONFIG_PPC_RTAS */
-#endif /* CONFIG_CRASH_DUMP */
-
#ifdef CONFIG_KEXEC_FILE
extern const struct kexec_file_ops kexec_elf64_ops;
@@ -152,15 +111,56 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
#endif /* CONFIG_KEXEC_FILE */
-#else /* !CONFIG_KEXEC_CORE */
-static inline void crash_kexec_secondary(struct pt_regs *regs) { }
+#endif /* CONFIG_KEXEC_CORE */
+
+#ifdef CONFIG_CRASH_RESERVE
+int __init overlaps_crashkernel(unsigned long start, unsigned long size);
+extern void reserve_crashkernel(void);
+#else
+static inline void reserve_crashkernel(void) {}
+static inline int overlaps_crashkernel(unsigned long start, unsigned long size) { return 0; }
+#endif
-static inline int overlaps_crashkernel(unsigned long start, unsigned long size)
+#if defined(CONFIG_CRASH_DUMP)
+/*
+ * This function is responsible for capturing register states if coming
+ * via panic or invoking dump using sysrq-trigger.
+ */
+static inline void crash_setup_regs(struct pt_regs *newregs,
+ struct pt_regs *oldregs)
{
- return 0;
+ if (oldregs)
+ memcpy(newregs, oldregs, sizeof(*newregs));
+ else
+ ppc_save_regs(newregs);
+}
+
+extern int crashing_cpu;
+extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *));
+extern void crash_ipi_callback(struct pt_regs *regs);
+extern int crash_wake_offline;
+
+extern int crash_shutdown_register(crash_shutdown_t handler);
+extern int crash_shutdown_unregister(crash_shutdown_t handler);
+extern void default_machine_crash_shutdown(struct pt_regs *regs);
+
+extern void crash_kexec_prepare(void);
+extern void crash_kexec_secondary(struct pt_regs *regs);
+
+static inline bool kdump_in_progress(void)
+{
+ return crashing_cpu >= 0;
}
-static inline void reserve_crashkernel(void) { ; }
+bool is_kdump_kernel(void);
+#define is_kdump_kernel is_kdump_kernel
+#if defined(CONFIG_PPC_RTAS)
+void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
+#define crash_free_reserved_phys_range crash_free_reserved_phys_range
+#endif /* CONFIG_PPC_RTAS */
+
+#else /* !CONFIG_CRASH_DUMP */
+static inline void crash_kexec_secondary(struct pt_regs *regs) { }
static inline int crash_shutdown_register(crash_shutdown_t handler)
{
@@ -183,7 +183,7 @@ static inline void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
{
}
-#endif /* CONFIG_KEXEC_CORE */
+#endif /* CONFIG_CRASH_DUMP */
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/kexec.h>
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 1dc32a05815612..cd8d8883de9040 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -475,7 +475,7 @@ static int __init early_init_dt_scan_chosen_ppc(unsigned long node,
tce_alloc_end = *lprop;
#endif
-#ifdef CONFIG_KEXEC_CORE
+#ifdef CONFIG_CRASH_RESERVE
lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
if (lprop)
crashk_res.start = *lprop;
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 2add292da49432..01ed1263e1a989 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -110,7 +110,7 @@ int ppc_do_canonicalize_irqs;
EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
#endif
-#ifdef CONFIG_VMCORE_INFO
+#ifdef CONFIG_CRASH_DUMP
/* This keeps a track of which one is the crashing cpu. */
int crashing_cpu = -1;
#endif
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index a60e4139214be5..12e53b3d792379 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -588,7 +588,7 @@ void smp_send_debugger_break(void)
}
#endif
-#ifdef CONFIG_KEXEC_CORE
+#ifdef CONFIG_CRASH_DUMP
void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
{
int cpu;
@@ -631,7 +631,7 @@ void crash_smp_send_stop(void)
stopped = true;
-#ifdef CONFIG_KEXEC_CORE
+#ifdef CONFIG_CRASH_DUMP
if (kexec_crash_image) {
crash_kexec_prepare();
return;
diff --git a/arch/powerpc/kexec/Makefile b/arch/powerpc/kexec/Makefile
index 91e96f5168b753..8e469c4da3f882 100644
--- a/arch/powerpc/kexec/Makefile
+++ b/arch/powerpc/kexec/Makefile
@@ -3,12 +3,13 @@
# Makefile for the linux kernel.
#
-obj-y += core.o crash.o core_$(BITS).o
+obj-y += core.o core_$(BITS).o
obj-$(CONFIG_PPC32) += relocate_32.o
obj-$(CONFIG_KEXEC_FILE) += file_load.o ranges.o file_load_$(BITS).o elf_$(BITS).o
obj-$(CONFIG_VMCORE_INFO) += vmcore_info.o
+obj-$(CONFIG_CRASH_DUMP) += crash.o
# Disable GCOV, KCOV & sanitizers in odd or sensitive code
GCOV_PROFILE_core_$(BITS).o := n
diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c
index 3ff4411ed49671..b8333a49ea5daa 100644
--- a/arch/powerpc/kexec/core.c
+++ b/arch/powerpc/kexec/core.c
@@ -44,10 +44,12 @@ void machine_kexec_mask_interrupts(void) {
}
}
+#ifdef CONFIG_CRASH_DUMP
void machine_crash_shutdown(struct pt_regs *regs)
{
default_machine_crash_shutdown(regs);
}
+#endif
void machine_kexec_cleanup(struct kimage *image)
{
@@ -77,6 +79,7 @@ void machine_kexec(struct kimage *image)
for(;;);
}
+#ifdef CONFIG_CRASH_RESERVE
void __init reserve_crashkernel(void)
{
unsigned long long crash_size, crash_base, total_mem_sz;
@@ -251,3 +254,4 @@ static int __init kexec_setup(void)
return 0;
}
late_initcall(kexec_setup);
+#endif /* CONFIG_CRASH_RESERVE */
diff --git a/arch/powerpc/kexec/elf_64.c b/arch/powerpc/kexec/elf_64.c
index 904016cf89ea46..6d8951e8e96663 100644
--- a/arch/powerpc/kexec/elf_64.c
+++ b/arch/powerpc/kexec/elf_64.c
@@ -47,7 +47,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
if (ret)
return ERR_PTR(ret);
- if (image->type == KEXEC_TYPE_CRASH) {
+ if (IS_ENABLED(CONFIG_CRASH_DUMP) && image->type == KEXEC_TYPE_CRASH) {
/* min & max buffer values for kdump case */
kbuf.buf_min = pbuf.buf_min = crashk_res.start;
kbuf.buf_max = pbuf.buf_max =
@@ -70,7 +70,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
kexec_dprintk("Loaded purgatory at 0x%lx\n", pbuf.mem);
/* Load additional segments needed for panic kernel */
- if (image->type == KEXEC_TYPE_CRASH) {
+ if (IS_ENABLED(CONFIG_CRASH_DUMP) && image->type == KEXEC_TYPE_CRASH) {
ret = load_crashdump_segments_ppc64(image, &kbuf);
if (ret) {
pr_err("Failed to load kdump kernel segments\n");
diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c
index 5b4c5cb2335485..1bc65de6174f3e 100644
--- a/arch/powerpc/kexec/file_load_64.c
+++ b/arch/powerpc/kexec/file_load_64.c
@@ -97,119 +97,6 @@ out:
}
/**
- * get_usable_memory_ranges - Get usable memory ranges. This list includes
- * regions like crashkernel, opal/rtas & tce-table,
- * that kdump kernel could use.
- * @mem_ranges: Range list to add the memory ranges to.
- *
- * Returns 0 on success, negative errno on error.
- */
-static int get_usable_memory_ranges(struct crash_mem **mem_ranges)
-{
- int ret;
-
- /*
- * Early boot failure observed on guests when low memory (first memory
- * block?) is not added to usable memory. So, add [0, crashk_res.end]
- * instead of [crashk_res.start, crashk_res.end] to workaround it.
- * Also, crashed kernel's memory must be added to reserve map to
- * avoid kdump kernel from using it.
- */
- ret = add_mem_range(mem_ranges, 0, crashk_res.end + 1);
- if (ret)
- goto out;
-
- ret = add_rtas_mem_range(mem_ranges);
- if (ret)
- goto out;
-
- ret = add_opal_mem_range(mem_ranges);
- if (ret)
- goto out;
-
- ret = add_tce_mem_ranges(mem_ranges);
-out:
- if (ret)
- pr_err("Failed to setup usable memory ranges\n");
- return ret;
-}
-
-/**
- * get_crash_memory_ranges - Get crash memory ranges. This list includes
- * first/crashing kernel's memory regions that
- * would be exported via an elfcore.
- * @mem_ranges: Range list to add the memory ranges to.
- *
- * Returns 0 on success, negative errno on error.
- */
-static int get_crash_memory_ranges(struct crash_mem **mem_ranges)
-{
- phys_addr_t base, end;
- struct crash_mem *tmem;
- u64 i;
- int ret;
-
- for_each_mem_range(i, &base, &end) {
- u64 size = end - base;
-
- /* Skip backup memory region, which needs a separate entry */
- if (base == BACKUP_SRC_START) {
- if (size > BACKUP_SRC_SIZE) {
- base = BACKUP_SRC_END + 1;
- size -= BACKUP_SRC_SIZE;
- } else
- continue;
- }
-
- ret = add_mem_range(mem_ranges, base, size);
- if (ret)
- goto out;
-
- /* Try merging adjacent ranges before reallocation attempt */
- if ((*mem_ranges)->nr_ranges == (*mem_ranges)->max_nr_ranges)
- sort_memory_ranges(*mem_ranges, true);
- }
-
- /* Reallocate memory ranges if there is no space to split ranges */
- tmem = *mem_ranges;
- if (tmem && (tmem->nr_ranges == tmem->max_nr_ranges)) {
- tmem = realloc_mem_ranges(mem_ranges);
- if (!tmem)
- goto out;
- }
-
- /* Exclude crashkernel region */
- ret = crash_exclude_mem_range(tmem, crashk_res.start, crashk_res.end);
- if (ret)
- goto out;
-
- /*
- * FIXME: For now, stay in parity with kexec-tools but if RTAS/OPAL
- * regions are exported to save their context at the time of
- * crash, they should actually be backed up just like the
- * first 64K bytes of memory.
- */
- ret = add_rtas_mem_range(mem_ranges);
- if (ret)
- goto out;
-
- ret = add_opal_mem_range(mem_ranges);
- if (ret)
- goto out;
-
- /* create a separate program header for the backup region */
- ret = add_mem_range(mem_ranges, BACKUP_SRC_START, BACKUP_SRC_SIZE);
- if (ret)
- goto out;
-
- sort_memory_ranges(*mem_ranges, false);
-out:
- if (ret)
- pr_err("Failed to setup crash memory ranges\n");
- return ret;
-}
-
-/**
* get_reserved_memory_ranges - Get reserve memory ranges. This list includes
* memory regions that should be added to the
* memory reserve map to ensure the region is
@@ -434,6 +321,120 @@ static int locate_mem_hole_bottom_up_ppc64(struct kexec_buf *kbuf,
return ret;
}
+#ifdef CONFIG_CRASH_DUMP
+/**
+ * get_usable_memory_ranges - Get usable memory ranges. This list includes
+ * regions like crashkernel, opal/rtas & tce-table,
+ * that kdump kernel could use.
+ * @mem_ranges: Range list to add the memory ranges to.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+static int get_usable_memory_ranges(struct crash_mem **mem_ranges)
+{
+ int ret;
+
+ /*
+ * Early boot failure observed on guests when low memory (first memory
+ * block?) is not added to usable memory. So, add [0, crashk_res.end]
+ * instead of [crashk_res.start, crashk_res.end] to workaround it.
+ * Also, crashed kernel's memory must be added to reserve map to
+ * avoid kdump kernel from using it.
+ */
+ ret = add_mem_range(mem_ranges, 0, crashk_res.end + 1);
+ if (ret)
+ goto out;
+
+ ret = add_rtas_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_opal_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_tce_mem_ranges(mem_ranges);
+out:
+ if (ret)
+ pr_err("Failed to setup usable memory ranges\n");
+ return ret;
+}
+
+/**
+ * get_crash_memory_ranges - Get crash memory ranges. This list includes
+ * first/crashing kernel's memory regions that
+ * would be exported via an elfcore.
+ * @mem_ranges: Range list to add the memory ranges to.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+static int get_crash_memory_ranges(struct crash_mem **mem_ranges)
+{
+ phys_addr_t base, end;
+ struct crash_mem *tmem;
+ u64 i;
+ int ret;
+
+ for_each_mem_range(i, &base, &end) {
+ u64 size = end - base;
+
+ /* Skip backup memory region, which needs a separate entry */
+ if (base == BACKUP_SRC_START) {
+ if (size > BACKUP_SRC_SIZE) {
+ base = BACKUP_SRC_END + 1;
+ size -= BACKUP_SRC_SIZE;
+ } else
+ continue;
+ }
+
+ ret = add_mem_range(mem_ranges, base, size);
+ if (ret)
+ goto out;
+
+ /* Try merging adjacent ranges before reallocation attempt */
+ if ((*mem_ranges)->nr_ranges == (*mem_ranges)->max_nr_ranges)
+ sort_memory_ranges(*mem_ranges, true);
+ }
+
+ /* Reallocate memory ranges if there is no space to split ranges */
+ tmem = *mem_ranges;
+ if (tmem && (tmem->nr_ranges == tmem->max_nr_ranges)) {
+ tmem = realloc_mem_ranges(mem_ranges);
+ if (!tmem)
+ goto out;
+ }
+
+ /* Exclude crashkernel region */
+ ret = crash_exclude_mem_range(tmem, crashk_res.start, crashk_res.end);
+ if (ret)
+ goto out;
+
+ /*
+ * FIXME: For now, stay in parity with kexec-tools but if RTAS/OPAL
+ * regions are exported to save their context at the time of
+ * crash, they should actually be backed up just like the
+ * first 64K bytes of memory.
+ */
+ ret = add_rtas_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_opal_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ /* create a separate program header for the backup region */
+ ret = add_mem_range(mem_ranges, BACKUP_SRC_START, BACKUP_SRC_SIZE);
+ if (ret)
+ goto out;
+
+ sort_memory_ranges(*mem_ranges, false);
+out:
+ if (ret)
+ pr_err("Failed to setup crash memory ranges\n");
+ return ret;
+}
+
/**
* check_realloc_usable_mem - Reallocate buffer if it can't accommodate entries
* @um_info: Usable memory buffer and ranges info.
@@ -863,6 +864,7 @@ int load_crashdump_segments_ppc64(struct kimage *image,
return 0;
}
+#endif
/**
* setup_purgatory_ppc64 - initialize PPC64 specific purgatory's global
@@ -972,26 +974,14 @@ static unsigned int cpu_node_size(void)
return size;
}
-/**
- * kexec_extra_fdt_size_ppc64 - Return the estimated additional size needed to
- * setup FDT for kexec/kdump kernel.
- * @image: kexec image being loaded.
- *
- * Returns the estimated extra size needed for kexec/kdump kernel FDT.
- */
-unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image)
+static unsigned int kdump_extra_fdt_size_ppc64(struct kimage *image)
{
unsigned int cpu_nodes, extra_size = 0;
struct device_node *dn;
u64 usm_entries;
- // Budget some space for the password blob. There's already extra space
- // for the key name
- if (plpks_is_available())
- extra_size += (unsigned int)plpks_get_passwordlen();
-
- if (image->type != KEXEC_TYPE_CRASH)
- return extra_size;
+ if (!IS_ENABLED(CONFIG_CRASH_DUMP) || image->type != KEXEC_TYPE_CRASH)
+ return 0;
/*
* For kdump kernel, account for linux,usable-memory and
@@ -1020,6 +1010,25 @@ unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image)
}
/**
+ * kexec_extra_fdt_size_ppc64 - Return the estimated additional size needed to
+ * setup FDT for kexec/kdump kernel.
+ * @image: kexec image being loaded.
+ *
+ * Returns the estimated extra size needed for kexec/kdump kernel FDT.
+ */
+unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image)
+{
+ unsigned int extra_size = 0;
+
+ // Budget some space for the password blob. There's already extra space
+ // for the key name
+ if (plpks_is_available())
+ extra_size += (unsigned int)plpks_get_passwordlen();
+
+ return extra_size + kdump_extra_fdt_size_ppc64(image);
+}
+
+/**
* add_node_props - Reads node properties from device node structure and add
* them to fdt.
* @fdt: Flattened device tree of the kernel
@@ -1171,6 +1180,7 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
struct crash_mem *umem = NULL, *rmem = NULL;
int i, nr_ranges, ret;
+#ifdef CONFIG_CRASH_DUMP
/*
* Restrict memory usage for kdump kernel by setting up
* usable memory ranges and memory reserve map.
@@ -1207,6 +1217,7 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
goto out;
}
}
+#endif
/* Update cpus nodes information to account hotplug CPUs. */
ret = update_cpus_node(fdt);
@@ -1278,7 +1289,7 @@ int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
buf_min = kbuf->buf_min;
buf_max = kbuf->buf_max;
/* Segments for kdump kernel should be within crashkernel region */
- if (kbuf->image->type == KEXEC_TYPE_CRASH) {
+ if (IS_ENABLED(CONFIG_CRASH_DUMP) && kbuf->image->type == KEXEC_TYPE_CRASH) {
buf_min = (buf_min < crashk_res.start ?
crashk_res.start : buf_min);
buf_max = (buf_max > crashk_res.end ?
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index 5445587bfe8418..100f999871bc3b 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -193,7 +193,7 @@ static bool is_module_segment(unsigned long addr)
return true;
}
-void mmu_mark_initmem_nx(void)
+int mmu_mark_initmem_nx(void)
{
int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
int i;
@@ -230,9 +230,10 @@ void mmu_mark_initmem_nx(void)
mtsr(mfsr(i << 28) | 0x10000000, i << 28);
}
+ return 0;
}
-void mmu_mark_rodata_ro(void)
+int mmu_mark_rodata_ro(void)
{
int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
int i;
@@ -245,6 +246,8 @@ void mmu_mark_rodata_ro(void)
}
update_bats();
+
+ return 0;
}
/*
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 8e84bc214d133c..6949c2c937e72f 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -160,11 +160,11 @@ static inline unsigned long p_block_mapped(phys_addr_t pa) { return 0; }
#endif
#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_E500)
-void mmu_mark_initmem_nx(void);
-void mmu_mark_rodata_ro(void);
+int mmu_mark_initmem_nx(void);
+int mmu_mark_rodata_ro(void);
#else
-static inline void mmu_mark_initmem_nx(void) { }
-static inline void mmu_mark_rodata_ro(void) { }
+static inline int mmu_mark_initmem_nx(void) { return 0; }
+static inline int mmu_mark_rodata_ro(void) { return 0; }
#endif
#ifdef CONFIG_PPC_8xx
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index 6be6421086ed99..43d4842bb1c7a7 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -119,23 +119,26 @@ void __init mmu_mapin_immr(void)
PAGE_KERNEL_NCG, MMU_PAGE_512K, true);
}
-static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top,
- pgprot_t prot, bool new)
+static int mmu_mapin_ram_chunk(unsigned long offset, unsigned long top,
+ pgprot_t prot, bool new)
{
unsigned long v = PAGE_OFFSET + offset;
unsigned long p = offset;
+ int err = 0;
WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K));
- for (; p < ALIGN(p, SZ_8M) && p < top; p += SZ_512K, v += SZ_512K)
- __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
- for (; p < ALIGN_DOWN(top, SZ_8M) && p < top; p += SZ_8M, v += SZ_8M)
- __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new);
- for (; p < ALIGN_DOWN(top, SZ_512K) && p < top; p += SZ_512K, v += SZ_512K)
- __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
+ for (; p < ALIGN(p, SZ_8M) && p < top && !err; p += SZ_512K, v += SZ_512K)
+ err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
+ for (; p < ALIGN_DOWN(top, SZ_8M) && p < top && !err; p += SZ_8M, v += SZ_8M)
+ err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new);
+ for (; p < ALIGN_DOWN(top, SZ_512K) && p < top && !err; p += SZ_512K, v += SZ_512K)
+ err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
if (!new)
flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top);
+
+ return err;
}
unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
@@ -166,27 +169,33 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
return top;
}
-void mmu_mark_initmem_nx(void)
+int mmu_mark_initmem_nx(void)
{
unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
unsigned long sinittext = __pa(_sinittext);
unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8;
unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
+ int err = 0;
if (!debug_pagealloc_enabled_or_kfence())
- mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
+ err = mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
mmu_pin_tlb(block_mapped_ram, false);
+
+ return err;
}
#ifdef CONFIG_STRICT_KERNEL_RWX
-void mmu_mark_rodata_ro(void)
+int mmu_mark_rodata_ro(void)
{
unsigned long sinittext = __pa(_sinittext);
+ int err;
- mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false);
+ err = mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false);
if (IS_ENABLED(CONFIG_PIN_TLB_DATA))
mmu_pin_tlb(block_mapped_ram, true);
+
+ return err;
}
#endif
diff --git a/arch/powerpc/mm/nohash/e500.c b/arch/powerpc/mm/nohash/e500.c
index 921c3521ec113e..266fb22131fc1e 100644
--- a/arch/powerpc/mm/nohash/e500.c
+++ b/arch/powerpc/mm/nohash/e500.c
@@ -285,19 +285,23 @@ void __init adjust_total_lowmem(void)
}
#ifdef CONFIG_STRICT_KERNEL_RWX
-void mmu_mark_rodata_ro(void)
+int mmu_mark_rodata_ro(void)
{
unsigned long remapped;
remapped = map_mem_in_cams(__max_low_memory, CONFIG_LOWMEM_CAM_NUM, false, false);
- WARN_ON(__max_low_memory != remapped);
+ if (WARN_ON(__max_low_memory != remapped))
+ return -EINVAL;
+
+ return 0;
}
#endif
-void mmu_mark_initmem_nx(void)
+int mmu_mark_initmem_nx(void)
{
/* Everything is done in mmu_mark_rodata_ro() */
+ return 0;
}
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index face94977cb2fb..cfd622ebf774b0 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -130,31 +130,41 @@ void __init mapin_ram(void)
}
}
-void mark_initmem_nx(void)
+static int __mark_initmem_nx(void)
{
unsigned long numpages = PFN_UP((unsigned long)_einittext) -
PFN_DOWN((unsigned long)_sinittext);
+ int err;
- mmu_mark_initmem_nx();
+ err = mmu_mark_initmem_nx();
if (!v_block_mapped((unsigned long)_sinittext)) {
- set_memory_nx((unsigned long)_sinittext, numpages);
- set_memory_rw((unsigned long)_sinittext, numpages);
+ err = set_memory_nx((unsigned long)_sinittext, numpages);
+ if (err)
+ return err;
+ err = set_memory_rw((unsigned long)_sinittext, numpages);
}
+ return err;
+}
+
+void mark_initmem_nx(void)
+{
+ int err = __mark_initmem_nx();
+
+ if (err)
+ panic("%s() failed, err = %d\n", __func__, err);
}
#ifdef CONFIG_STRICT_KERNEL_RWX
-void mark_rodata_ro(void)
+static int __mark_rodata_ro(void)
{
unsigned long numpages;
if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX) && mmu_has_feature(MMU_FTR_HPTE_TABLE))
pr_warn("This platform has HASH MMU, STRICT_MODULE_RWX won't work\n");
- if (v_block_mapped((unsigned long)_stext + 1)) {
- mmu_mark_rodata_ro();
- return;
- }
+ if (v_block_mapped((unsigned long)_stext + 1))
+ return mmu_mark_rodata_ro();
/*
* mark text and rodata as read only. __end_rodata is set by
@@ -164,6 +174,14 @@ void mark_rodata_ro(void)
numpages = PFN_UP((unsigned long)__end_rodata) -
PFN_DOWN((unsigned long)_stext);
- set_memory_ro((unsigned long)_stext, numpages);
+ return set_memory_ro((unsigned long)_stext, numpages);
+}
+
+void mark_rodata_ro(void)
+{
+ int err = __mark_rodata_ro();
+
+ if (err)
+ panic("%s() failed, err = %d\n", __func__, err);
}
#endif
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 9e1a25398f98cb..8f14f0581a21b1 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -434,7 +434,7 @@ void __init pnv_smp_init(void)
smp_ops = &pnv_smp_ops;
#ifdef CONFIG_HOTPLUG_CPU
-#ifdef CONFIG_KEXEC_CORE
+#ifdef CONFIG_CRASH_DUMP
crash_wake_offline = 1;
#endif
#endif
diff --git a/arch/riscv/Kbuild b/arch/riscv/Kbuild
index d25ad1c19f881d..2c585f7a0b6ef3 100644
--- a/arch/riscv/Kbuild
+++ b/arch/riscv/Kbuild
@@ -2,6 +2,7 @@
obj-y += kernel/ mm/ net/
obj-$(CONFIG_BUILTIN_DTB) += boot/dts/
+obj-$(CONFIG_CRYPTO) += crypto/
obj-y += errata/
obj-$(CONFIG_KVM) += kvm/
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 92b1dbf55176fa..be09c8836d56be 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -27,14 +27,18 @@ config RISCV
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_HAS_KCOV
+ select ARCH_HAS_MEMBARRIER_CALLBACKS
+ select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_MMIOWB
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PMEM_API
+ select ARCH_HAS_PREPARE_SYNC_CORE_CMD
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SET_DIRECT_MAP if MMU
select ARCH_HAS_SET_MEMORY if MMU
select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
select ARCH_HAS_STRICT_MODULE_RWX if MMU && !XIP_KERNEL
+ select ARCH_HAS_SYNC_CORE_BEFORE_USERMODE
select ARCH_HAS_SYSCALL_WRAPPER
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UBSAN
@@ -47,6 +51,9 @@ config RISCV
select ARCH_SUPPORTS_CFI_CLANG
select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU
select ARCH_SUPPORTS_HUGETLBFS if MMU
+ # LLD >= 14: https://github.com/llvm/llvm-project/issues/50505
+ select ARCH_SUPPORTS_LTO_CLANG if LLD_VERSION >= 140000
+ select ARCH_SUPPORTS_LTO_CLANG_THIN if LLD_VERSION >= 140000
select ARCH_SUPPORTS_PAGE_TABLE_CHECK if MMU
select ARCH_SUPPORTS_PER_VMA_LOCK if MMU
select ARCH_SUPPORTS_SHADOW_CALL_STACK if HAVE_SHADOW_CALL_STACK
@@ -106,6 +113,7 @@ config RISCV
select HAVE_ARCH_KGDB_QXFER_PKT
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
+ select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
select HAVE_ARCH_TRACEHOOK
@@ -124,6 +132,7 @@ config RISCV
select HAVE_FUNCTION_GRAPH_RETVAL if HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !PREEMPTION
select HAVE_EBPF_JIT if MMU
+ select HAVE_FAST_GUP if MMU
select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_GCC_PLUGINS
@@ -155,6 +164,7 @@ config RISCV
select IRQ_FORCED_THREADING
select KASAN_VMALLOC if KASAN
select LOCK_MM_AND_FIND_VMA
+ select MMU_GATHER_RCU_TABLE_FREE if SMP && MMU
select MODULES_USE_ELF_RELA if MODULES
select MODULE_SECTIONS if MODULES
select OF
@@ -576,6 +586,13 @@ config TOOLCHAIN_HAS_ZBB
depends on LLD_VERSION >= 150000 || LD_VERSION >= 23900
depends on AS_HAS_OPTION_ARCH
+# This symbol indicates that the toolchain supports all v1.0 vector crypto
+# extensions, including Zvk*, Zvbb, and Zvbc. LLVM added all of these at once.
+# binutils added all except Zvkb, then added Zvkb. So we just check for Zvkb.
+config TOOLCHAIN_HAS_VECTOR_CRYPTO
+ def_bool $(as-instr, .option arch$(comma) +v$(comma) +zvkb)
+ depends on AS_HAS_OPTION_ARCH
+
config RISCV_ISA_ZBB
bool "Zbb extension support for bit manipulation instructions"
depends on TOOLCHAIN_HAS_ZBB
@@ -686,27 +703,61 @@ config THREAD_SIZE_ORDER
affects irq stack size, which is equal to thread stack size.
config RISCV_MISALIGNED
- bool "Support misaligned load/store traps for kernel and userspace"
+ bool
select SYSCTL_ARCH_UNALIGN_ALLOW
- default y
help
- Say Y here if you want the kernel to embed support for misaligned
- load/store for both kernel and userspace. When disable, misaligned
- accesses will generate SIGBUS in userspace and panic in kernel.
+ Embed support for emulating misaligned loads and stores.
+
+choice
+ prompt "Unaligned Accesses Support"
+ default RISCV_PROBE_UNALIGNED_ACCESS
+ help
+ This determines the level of support for unaligned accesses. This
+ information is used by the kernel to perform optimizations. It is also
+ exposed to user space via the hwprobe syscall. The hardware will be
+ probed at boot by default.
+
+config RISCV_PROBE_UNALIGNED_ACCESS
+ bool "Probe for hardware unaligned access support"
+ select RISCV_MISALIGNED
+ help
+ During boot, the kernel will run a series of tests to determine the
+ speed of unaligned accesses. This probing will dynamically determine
+ the speed of unaligned accesses on the underlying system. If unaligned
+ memory accesses trap into the kernel as they are not supported by the
+ system, the kernel will emulate the unaligned accesses to preserve the
+ UABI.
+
+config RISCV_EMULATED_UNALIGNED_ACCESS
+ bool "Emulate unaligned access where system support is missing"
+ select RISCV_MISALIGNED
+ help
+ If unaligned memory accesses trap into the kernel as they are not
+ supported by the system, the kernel will emulate the unaligned
+ accesses to preserve the UABI. When the underlying system does support
+ unaligned accesses, the unaligned accesses are assumed to be slow.
+
+config RISCV_SLOW_UNALIGNED_ACCESS
+ bool "Assume the system supports slow unaligned memory accesses"
+ depends on NONPORTABLE
+ help
+ Assume that the system supports slow unaligned memory accesses. The
+ kernel and userspace programs may not be able to run at all on systems
+ that do not support unaligned memory accesses.
config RISCV_EFFICIENT_UNALIGNED_ACCESS
- bool "Assume the CPU supports fast unaligned memory accesses"
+ bool "Assume the system supports fast unaligned memory accesses"
depends on NONPORTABLE
select DCACHE_WORD_ACCESS if MMU
select HAVE_EFFICIENT_UNALIGNED_ACCESS
help
- Say Y here if you want the kernel to assume that the CPU supports
- efficient unaligned memory accesses. When enabled, this option
- improves the performance of the kernel on such CPUs. However, the
- kernel will run much more slowly, or will not be able to run at all,
- on CPUs that do not support efficient unaligned memory accesses.
+ Assume that the system supports fast unaligned memory accesses. When
+ enabled, this option improves the performance of the kernel on such
+ systems. However, the kernel and userspace programs will run much more
+ slowly, or will not be able to run at all, on systems that do not
+ support efficient unaligned memory accesses.
- If unsure what to do here, say N.
+endchoice
endmenu # "Platform type"
@@ -1011,11 +1062,8 @@ menu "Power management options"
source "kernel/power/Kconfig"
-# Hibernation is only possible on systems where the SBI implementation has
-# marked its reserved memory as not accessible from, or does not run
-# from the same memory as, Linux
config ARCH_HIBERNATION_POSSIBLE
- def_bool NONPORTABLE
+ def_bool y
config ARCH_HIBERNATION_HEADER
def_bool HIBERNATION
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 0b7d109258e7d8..252d63942f34eb 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -50,6 +50,11 @@ ifndef CONFIG_AS_IS_LLVM
KBUILD_CFLAGS += -Wa,-mno-relax
KBUILD_AFLAGS += -Wa,-mno-relax
endif
+# LLVM has an issue with target-features and LTO: https://github.com/llvm/llvm-project/issues/59350
+# Ensure it is aware of linker relaxation with LTO, otherwise relocations may
+# be incorrect: https://github.com/llvm/llvm-project/issues/65090
+else ifeq ($(CONFIG_LTO_CLANG),y)
+ KBUILD_LDFLAGS += -mllvm -mattr=+c -mllvm -mattr=+relax
endif
ifeq ($(CONFIG_SHADOW_CALL_STACK),y)
diff --git a/arch/riscv/boot/dts/renesas/r9a07g043f.dtsi b/arch/riscv/boot/dts/renesas/r9a07g043f.dtsi
index 09ef10b39f46cf..f35324b9173cde 100644
--- a/arch/riscv/boot/dts/renesas/r9a07g043f.dtsi
+++ b/arch/riscv/boot/dts/renesas/r9a07g043f.dtsi
@@ -27,7 +27,7 @@
riscv,isa-base = "rv64i";
riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
"zicntr", "zicsr", "zifencei",
- "zihpm";
+ "zihpm", "xandespmu";
mmu-type = "riscv,sv39";
i-cache-size = <0x8000>;
i-cache-line-size = <0x40>;
@@ -39,7 +39,7 @@
cpu0_intc: interrupt-controller {
#interrupt-cells = <1>;
- compatible = "riscv,cpu-intc";
+ compatible = "andestech,cpu-intc", "riscv,cpu-intc";
interrupt-controller;
};
};
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index eaf34e871e308f..fc0ec2ee13bc22 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -44,6 +44,7 @@ CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
CONFIG_CPUFREQ_DT=y
+CONFIG_ACPI_CPPC_CPUFREQ=m
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=m
CONFIG_ACPI=y
@@ -215,6 +216,7 @@ CONFIG_MMC=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_CADENCE=y
+CONFIG_MMC_SDHCI_OF_DWCMSHC=y
CONFIG_MMC_SPI=y
CONFIG_MMC_DW=y
CONFIG_MMC_DW_STARFIVE=y
@@ -224,6 +226,7 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_SUN6I=y
CONFIG_DMADEVICES=y
CONFIG_DMA_SUN6I=m
+CONFIG_DW_AXI_DMAC=y
CONFIG_RZ_DMAC=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BALLOON=y
diff --git a/arch/riscv/crypto/Kconfig b/arch/riscv/crypto/Kconfig
new file mode 100644
index 00000000000000..ad58dad9a58076
--- /dev/null
+++ b/arch/riscv/crypto/Kconfig
@@ -0,0 +1,93 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "Accelerated Cryptographic Algorithms for CPU (riscv)"
+
+config CRYPTO_AES_RISCV64
+ tristate "Ciphers: AES, modes: ECB, CBC, CTS, CTR, XTS"
+ depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
+ select CRYPTO_ALGAPI
+ select CRYPTO_LIB_AES
+ select CRYPTO_SKCIPHER
+ help
+ Block cipher: AES cipher algorithms
+ Length-preserving ciphers: AES with ECB, CBC, CTS, CTR, XTS
+
+ Architecture: riscv64 using:
+ - Zvkned vector crypto extension
+ - Zvbb vector extension (XTS)
+ - Zvkb vector crypto extension (CTR)
+ - Zvkg vector crypto extension (XTS)
+
+config CRYPTO_CHACHA_RISCV64
+ tristate "Ciphers: ChaCha"
+ depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
+ select CRYPTO_SKCIPHER
+ select CRYPTO_LIB_CHACHA_GENERIC
+ help
+ Length-preserving ciphers: ChaCha20 stream cipher algorithm
+
+ Architecture: riscv64 using:
+ - Zvkb vector crypto extension
+
+config CRYPTO_GHASH_RISCV64
+ tristate "Hash functions: GHASH"
+ depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
+ select CRYPTO_GCM
+ help
+ GCM GHASH function (NIST SP 800-38D)
+
+ Architecture: riscv64 using:
+ - Zvkg vector crypto extension
+
+config CRYPTO_SHA256_RISCV64
+ tristate "Hash functions: SHA-224 and SHA-256"
+ depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
+ select CRYPTO_SHA256
+ help
+ SHA-224 and SHA-256 secure hash algorithm (FIPS 180)
+
+ Architecture: riscv64 using:
+ - Zvknha or Zvknhb vector crypto extensions
+ - Zvkb vector crypto extension
+
+config CRYPTO_SHA512_RISCV64
+ tristate "Hash functions: SHA-384 and SHA-512"
+ depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
+ select CRYPTO_SHA512
+ help
+ SHA-384 and SHA-512 secure hash algorithm (FIPS 180)
+
+ Architecture: riscv64 using:
+ - Zvknhb vector crypto extension
+ - Zvkb vector crypto extension
+
+config CRYPTO_SM3_RISCV64
+ tristate "Hash functions: SM3 (ShangMi 3)"
+ depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
+ select CRYPTO_HASH
+ select CRYPTO_SM3
+ help
+ SM3 (ShangMi 3) secure hash function (OSCCA GM/T 0004-2012)
+
+ Architecture: riscv64 using:
+ - Zvksh vector crypto extension
+ - Zvkb vector crypto extension
+
+config CRYPTO_SM4_RISCV64
+ tristate "Ciphers: SM4 (ShangMi 4)"
+ depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
+ select CRYPTO_ALGAPI
+ select CRYPTO_SM4
+ help
+ SM4 block cipher algorithm (OSCCA GB/T 32907-2016,
+ ISO/IEC 18033-3:2010/Amd 1:2021)
+
+ SM4 (GBT.32907-2016) is a cryptographic standard issued by the
+ Organization of State Commercial Administration of China (OSCCA)
+ as an authorized cryptographic algorithm for use within China.
+
+ Architecture: riscv64 using:
+ - Zvksed vector crypto extension
+ - Zvkb vector crypto extension
+
+endmenu
diff --git a/arch/riscv/crypto/Makefile b/arch/riscv/crypto/Makefile
new file mode 100644
index 00000000000000..247c7bc7288cec
--- /dev/null
+++ b/arch/riscv/crypto/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_CRYPTO_AES_RISCV64) += aes-riscv64.o
+aes-riscv64-y := aes-riscv64-glue.o aes-riscv64-zvkned.o \
+ aes-riscv64-zvkned-zvbb-zvkg.o aes-riscv64-zvkned-zvkb.o
+
+obj-$(CONFIG_CRYPTO_CHACHA_RISCV64) += chacha-riscv64.o
+chacha-riscv64-y := chacha-riscv64-glue.o chacha-riscv64-zvkb.o
+
+obj-$(CONFIG_CRYPTO_GHASH_RISCV64) += ghash-riscv64.o
+ghash-riscv64-y := ghash-riscv64-glue.o ghash-riscv64-zvkg.o
+
+obj-$(CONFIG_CRYPTO_SHA256_RISCV64) += sha256-riscv64.o
+sha256-riscv64-y := sha256-riscv64-glue.o sha256-riscv64-zvknha_or_zvknhb-zvkb.o
+
+obj-$(CONFIG_CRYPTO_SHA512_RISCV64) += sha512-riscv64.o
+sha512-riscv64-y := sha512-riscv64-glue.o sha512-riscv64-zvknhb-zvkb.o
+
+obj-$(CONFIG_CRYPTO_SM3_RISCV64) += sm3-riscv64.o
+sm3-riscv64-y := sm3-riscv64-glue.o sm3-riscv64-zvksh-zvkb.o
+
+obj-$(CONFIG_CRYPTO_SM4_RISCV64) += sm4-riscv64.o
+sm4-riscv64-y := sm4-riscv64-glue.o sm4-riscv64-zvksed-zvkb.o
diff --git a/arch/riscv/crypto/aes-macros.S b/arch/riscv/crypto/aes-macros.S
new file mode 100644
index 00000000000000..d1a258d04bc730
--- /dev/null
+++ b/arch/riscv/crypto/aes-macros.S
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */
+//
+// This file is dual-licensed, meaning that you can use it under your
+// choice of either of the following two licenses:
+//
+// Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
+//
+// Licensed under the Apache License 2.0 (the "License"). You can obtain
+// a copy in the file LICENSE in the source distribution or at
+// https://www.openssl.org/source/license.html
+//
+// or
+//
+// Copyright (c) 2023, Christoph Müllner <christoph.muellner@vrull.eu>
+// Copyright (c) 2023, Phoebe Chen <phoebe.chen@sifive.com>
+// Copyright (c) 2023, Jerry Shih <jerry.shih@sifive.com>
+// Copyright 2024 Google LLC
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file contains macros that are shared by the other aes-*.S files. The
+// generated code of these macros depends on the following RISC-V extensions:
+// - RV64I
+// - RISC-V Vector ('V') with VLEN >= 128
+// - RISC-V Vector AES block cipher extension ('Zvkned')
+
+// Loads the AES round keys from \keyp into vector registers and jumps to code
+// specific to the length of the key. Specifically:
+// - If AES-128, loads round keys into v1-v11 and jumps to \label128.
+// - If AES-192, loads round keys into v1-v13 and jumps to \label192.
+// - If AES-256, loads round keys into v1-v15 and continues onwards.
+//
+// Also sets vl=4 and vtype=e32,m1,ta,ma. Clobbers t0 and t1.
+.macro aes_begin keyp, label128, label192
+ lwu t0, 480(\keyp) // t0 = key length in bytes
+ li t1, 24 // t1 = key length for AES-192
+ vsetivli zero, 4, e32, m1, ta, ma
+ vle32.v v1, (\keyp)
+ addi \keyp, \keyp, 16
+ vle32.v v2, (\keyp)
+ addi \keyp, \keyp, 16
+ vle32.v v3, (\keyp)
+ addi \keyp, \keyp, 16
+ vle32.v v4, (\keyp)
+ addi \keyp, \keyp, 16
+ vle32.v v5, (\keyp)
+ addi \keyp, \keyp, 16
+ vle32.v v6, (\keyp)
+ addi \keyp, \keyp, 16
+ vle32.v v7, (\keyp)
+ addi \keyp, \keyp, 16
+ vle32.v v8, (\keyp)
+ addi \keyp, \keyp, 16
+ vle32.v v9, (\keyp)
+ addi \keyp, \keyp, 16
+ vle32.v v10, (\keyp)
+ addi \keyp, \keyp, 16
+ vle32.v v11, (\keyp)
+ blt t0, t1, \label128 // If AES-128, goto label128.
+ addi \keyp, \keyp, 16
+ vle32.v v12, (\keyp)
+ addi \keyp, \keyp, 16
+ vle32.v v13, (\keyp)
+ beq t0, t1, \label192 // If AES-192, goto label192.
+ // Else, it's AES-256.
+ addi \keyp, \keyp, 16
+ vle32.v v14, (\keyp)
+ addi \keyp, \keyp, 16
+ vle32.v v15, (\keyp)
+.endm
+
+// Encrypts \data using zvkned instructions, using the round keys loaded into
+// v1-v11 (for AES-128), v1-v13 (for AES-192), or v1-v15 (for AES-256). \keylen
+// is the AES key length in bits. vl and vtype must already be set
+// appropriately. Note that if vl > 4, multiple blocks are encrypted.
+.macro aes_encrypt data, keylen
+ vaesz.vs \data, v1
+ vaesem.vs \data, v2
+ vaesem.vs \data, v3
+ vaesem.vs \data, v4
+ vaesem.vs \data, v5
+ vaesem.vs \data, v6
+ vaesem.vs \data, v7
+ vaesem.vs \data, v8
+ vaesem.vs \data, v9
+ vaesem.vs \data, v10
+.if \keylen == 128
+ vaesef.vs \data, v11
+.elseif \keylen == 192
+ vaesem.vs \data, v11
+ vaesem.vs \data, v12
+ vaesef.vs \data, v13
+.else
+ vaesem.vs \data, v11
+ vaesem.vs \data, v12
+ vaesem.vs \data, v13
+ vaesem.vs \data, v14
+ vaesef.vs \data, v15
+.endif
+.endm
+
+// Same as aes_encrypt, but decrypts instead of encrypts.
+.macro aes_decrypt data, keylen
+.if \keylen == 128
+ vaesz.vs \data, v11
+.elseif \keylen == 192
+ vaesz.vs \data, v13
+ vaesdm.vs \data, v12
+ vaesdm.vs \data, v11
+.else
+ vaesz.vs \data, v15
+ vaesdm.vs \data, v14
+ vaesdm.vs \data, v13
+ vaesdm.vs \data, v12
+ vaesdm.vs \data, v11
+.endif
+ vaesdm.vs \data, v10
+ vaesdm.vs \data, v9
+ vaesdm.vs \data, v8
+ vaesdm.vs \data, v7
+ vaesdm.vs \data, v6
+ vaesdm.vs \data, v5
+ vaesdm.vs \data, v4
+ vaesdm.vs \data, v3
+ vaesdm.vs \data, v2
+ vaesdf.vs \data, v1
+.endm
+
+// Expands to aes_encrypt or aes_decrypt according to \enc, which is 1 or 0.
+.macro aes_crypt data, enc, keylen
+.if \enc
+ aes_encrypt \data, \keylen
+.else
+ aes_decrypt \data, \keylen
+.endif
+.endm
diff --git a/arch/riscv/crypto/aes-riscv64-glue.c b/arch/riscv/crypto/aes-riscv64-glue.c
new file mode 100644
index 00000000000000..f814ee048555b1
--- /dev/null
+++ b/arch/riscv/crypto/aes-riscv64-glue.c
@@ -0,0 +1,637 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AES using the RISC-V vector crypto extensions. Includes the bare block
+ * cipher and the ECB, CBC, CBC-CTS, CTR, and XTS modes.
+ *
+ * Copyright (C) 2023 VRULL GmbH
+ * Author: Heiko Stuebner <heiko.stuebner@vrull.eu>
+ *
+ * Copyright (C) 2023 SiFive, Inc.
+ * Author: Jerry Shih <jerry.shih@sifive.com>
+ *
+ * Copyright 2024 Google LLC
+ */
+
+#include <asm/simd.h>
+#include <asm/vector.h>
+#include <crypto/aes.h>
+#include <crypto/internal/cipher.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/xts.h>
+#include <linux/linkage.h>
+#include <linux/module.h>
+
+asmlinkage void aes_encrypt_zvkned(const struct crypto_aes_ctx *key,
+ const u8 in[AES_BLOCK_SIZE],
+ u8 out[AES_BLOCK_SIZE]);
+asmlinkage void aes_decrypt_zvkned(const struct crypto_aes_ctx *key,
+ const u8 in[AES_BLOCK_SIZE],
+ u8 out[AES_BLOCK_SIZE]);
+
+asmlinkage void aes_ecb_encrypt_zvkned(const struct crypto_aes_ctx *key,
+ const u8 *in, u8 *out, size_t len);
+asmlinkage void aes_ecb_decrypt_zvkned(const struct crypto_aes_ctx *key,
+ const u8 *in, u8 *out, size_t len);
+
+asmlinkage void aes_cbc_encrypt_zvkned(const struct crypto_aes_ctx *key,
+ const u8 *in, u8 *out, size_t len,
+ u8 iv[AES_BLOCK_SIZE]);
+asmlinkage void aes_cbc_decrypt_zvkned(const struct crypto_aes_ctx *key,
+ const u8 *in, u8 *out, size_t len,
+ u8 iv[AES_BLOCK_SIZE]);
+
+asmlinkage void aes_cbc_cts_crypt_zvkned(const struct crypto_aes_ctx *key,
+ const u8 *in, u8 *out, size_t len,
+ const u8 iv[AES_BLOCK_SIZE], bool enc);
+
+asmlinkage void aes_ctr32_crypt_zvkned_zvkb(const struct crypto_aes_ctx *key,
+ const u8 *in, u8 *out, size_t len,
+ u8 iv[AES_BLOCK_SIZE]);
+
+asmlinkage void aes_xts_encrypt_zvkned_zvbb_zvkg(
+ const struct crypto_aes_ctx *key,
+ const u8 *in, u8 *out, size_t len,
+ u8 tweak[AES_BLOCK_SIZE]);
+
+asmlinkage void aes_xts_decrypt_zvkned_zvbb_zvkg(
+ const struct crypto_aes_ctx *key,
+ const u8 *in, u8 *out, size_t len,
+ u8 tweak[AES_BLOCK_SIZE]);
+
+static int riscv64_aes_setkey(struct crypto_aes_ctx *ctx,
+ const u8 *key, unsigned int keylen)
+{
+ /*
+ * For now we just use the generic key expansion, for these reasons:
+ *
+ * - zvkned's key expansion instructions don't support AES-192.
+ * So, non-zvkned fallback code would be needed anyway.
+ *
+ * - Users of AES in Linux usually don't change keys frequently.
+ * So, key expansion isn't performance-critical.
+ *
+ * - For single-block AES exposed as a "cipher" algorithm, it's
+ * necessary to use struct crypto_aes_ctx and initialize its 'key_dec'
+ * field with the round keys for the Equivalent Inverse Cipher. This
+ * is because with "cipher", decryption can be requested from a
+ * context where the vector unit isn't usable, necessitating a
+ * fallback to aes_decrypt(). But, zvkned can only generate and use
+ * the normal round keys. Of course, it's preferable to not have
+ * special code just for "cipher", as e.g. XTS also uses a
+ * single-block AES encryption. It's simplest to just use
+ * struct crypto_aes_ctx and aes_expandkey() everywhere.
+ */
+ return aes_expandkey(ctx, key, keylen);
+}
+
+static int riscv64_aes_setkey_cipher(struct crypto_tfm *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ return riscv64_aes_setkey(ctx, key, keylen);
+}
+
+static int riscv64_aes_setkey_skcipher(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ return riscv64_aes_setkey(ctx, key, keylen);
+}
+
+/* Bare AES, without a mode of operation */
+
+static void riscv64_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+ const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (crypto_simd_usable()) {
+ kernel_vector_begin();
+ aes_encrypt_zvkned(ctx, src, dst);
+ kernel_vector_end();
+ } else {
+ aes_encrypt(ctx, dst, src);
+ }
+}
+
+static void riscv64_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+ const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (crypto_simd_usable()) {
+ kernel_vector_begin();
+ aes_decrypt_zvkned(ctx, src, dst);
+ kernel_vector_end();
+ } else {
+ aes_decrypt(ctx, dst, src);
+ }
+}
+
+/* AES-ECB */
+
+static inline int riscv64_aes_ecb_crypt(struct skcipher_request *req, bool enc)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+ while ((nbytes = walk.nbytes) != 0) {
+ kernel_vector_begin();
+ if (enc)
+ aes_ecb_encrypt_zvkned(ctx, walk.src.virt.addr,
+ walk.dst.virt.addr,
+ nbytes & ~(AES_BLOCK_SIZE - 1));
+ else
+ aes_ecb_decrypt_zvkned(ctx, walk.src.virt.addr,
+ walk.dst.virt.addr,
+ nbytes & ~(AES_BLOCK_SIZE - 1));
+ kernel_vector_end();
+ err = skcipher_walk_done(&walk, nbytes & (AES_BLOCK_SIZE - 1));
+ }
+
+ return err;
+}
+
+static int riscv64_aes_ecb_encrypt(struct skcipher_request *req)
+{
+ return riscv64_aes_ecb_crypt(req, true);
+}
+
+static int riscv64_aes_ecb_decrypt(struct skcipher_request *req)
+{
+ return riscv64_aes_ecb_crypt(req, false);
+}
+
+/* AES-CBC */
+
+static int riscv64_aes_cbc_crypt(struct skcipher_request *req, bool enc)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+ while ((nbytes = walk.nbytes) != 0) {
+ kernel_vector_begin();
+ if (enc)
+ aes_cbc_encrypt_zvkned(ctx, walk.src.virt.addr,
+ walk.dst.virt.addr,
+ nbytes & ~(AES_BLOCK_SIZE - 1),
+ walk.iv);
+ else
+ aes_cbc_decrypt_zvkned(ctx, walk.src.virt.addr,
+ walk.dst.virt.addr,
+ nbytes & ~(AES_BLOCK_SIZE - 1),
+ walk.iv);
+ kernel_vector_end();
+ err = skcipher_walk_done(&walk, nbytes & (AES_BLOCK_SIZE - 1));
+ }
+
+ return err;
+}
+
+static int riscv64_aes_cbc_encrypt(struct skcipher_request *req)
+{
+ return riscv64_aes_cbc_crypt(req, true);
+}
+
+static int riscv64_aes_cbc_decrypt(struct skcipher_request *req)
+{
+ return riscv64_aes_cbc_crypt(req, false);
+}
+
+/* AES-CBC-CTS */
+
+static int riscv64_aes_cbc_cts_crypt(struct skcipher_request *req, bool enc)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct scatterlist sg_src[2], sg_dst[2];
+ struct skcipher_request subreq;
+ struct scatterlist *src, *dst;
+ struct skcipher_walk walk;
+ unsigned int cbc_len;
+ int err;
+
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ err = skcipher_walk_virt(&walk, req, false);
+ if (err)
+ return err;
+ /*
+ * If the full message is available in one step, decrypt it in one call
+ * to the CBC-CTS assembly function. This reduces overhead, especially
+ * on short messages. Otherwise, fall back to doing CBC up to the last
+ * two blocks, then invoke CTS just for the ciphertext stealing.
+ */
+ if (unlikely(walk.nbytes != req->cryptlen)) {
+ cbc_len = round_down(req->cryptlen - AES_BLOCK_SIZE - 1,
+ AES_BLOCK_SIZE);
+ skcipher_walk_abort(&walk);
+ skcipher_request_set_tfm(&subreq, tfm);
+ skcipher_request_set_callback(&subreq,
+ skcipher_request_flags(req),
+ NULL, NULL);
+ skcipher_request_set_crypt(&subreq, req->src, req->dst,
+ cbc_len, req->iv);
+ err = riscv64_aes_cbc_crypt(&subreq, enc);
+ if (err)
+ return err;
+ dst = src = scatterwalk_ffwd(sg_src, req->src, cbc_len);
+ if (req->dst != req->src)
+ dst = scatterwalk_ffwd(sg_dst, req->dst, cbc_len);
+ skcipher_request_set_crypt(&subreq, src, dst,
+ req->cryptlen - cbc_len, req->iv);
+ err = skcipher_walk_virt(&walk, &subreq, false);
+ if (err)
+ return err;
+ }
+ kernel_vector_begin();
+ aes_cbc_cts_crypt_zvkned(ctx, walk.src.virt.addr, walk.dst.virt.addr,
+ walk.nbytes, req->iv, enc);
+ kernel_vector_end();
+ return skcipher_walk_done(&walk, 0);
+}
+
+static int riscv64_aes_cbc_cts_encrypt(struct skcipher_request *req)
+{
+ return riscv64_aes_cbc_cts_crypt(req, true);
+}
+
+static int riscv64_aes_cbc_cts_decrypt(struct skcipher_request *req)
+{
+ return riscv64_aes_cbc_cts_crypt(req, false);
+}
+
+/* AES-CTR */
+
+static int riscv64_aes_ctr_crypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ unsigned int nbytes, p1_nbytes;
+ struct skcipher_walk walk;
+ u32 ctr32, nblocks;
+ int err;
+
+ /* Get the low 32-bit word of the 128-bit big endian counter. */
+ ctr32 = get_unaligned_be32(req->iv + 12);
+
+ err = skcipher_walk_virt(&walk, req, false);
+ while ((nbytes = walk.nbytes) != 0) {
+ if (nbytes < walk.total) {
+ /* Not the end yet, so keep the length block-aligned. */
+ nbytes = round_down(nbytes, AES_BLOCK_SIZE);
+ nblocks = nbytes / AES_BLOCK_SIZE;
+ } else {
+ /* It's the end, so include any final partial block. */
+ nblocks = DIV_ROUND_UP(nbytes, AES_BLOCK_SIZE);
+ }
+ ctr32 += nblocks;
+
+ kernel_vector_begin();
+ if (ctr32 >= nblocks) {
+ /* The low 32-bit word of the counter won't overflow. */
+ aes_ctr32_crypt_zvkned_zvkb(ctx, walk.src.virt.addr,
+ walk.dst.virt.addr, nbytes,
+ req->iv);
+ } else {
+ /*
+ * The low 32-bit word of the counter will overflow.
+ * The assembly doesn't handle this case, so split the
+ * operation into two at the point where the overflow
+ * will occur. After the first part, add the carry bit.
+ */
+ p1_nbytes = min_t(unsigned int, nbytes,
+ (nblocks - ctr32) * AES_BLOCK_SIZE);
+ aes_ctr32_crypt_zvkned_zvkb(ctx, walk.src.virt.addr,
+ walk.dst.virt.addr,
+ p1_nbytes, req->iv);
+ crypto_inc(req->iv, 12);
+
+ if (ctr32) {
+ aes_ctr32_crypt_zvkned_zvkb(
+ ctx,
+ walk.src.virt.addr + p1_nbytes,
+ walk.dst.virt.addr + p1_nbytes,
+ nbytes - p1_nbytes, req->iv);
+ }
+ }
+ kernel_vector_end();
+
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ }
+
+ return err;
+}
+
+/* AES-XTS */
+
+struct riscv64_aes_xts_ctx {
+ struct crypto_aes_ctx ctx1;
+ struct crypto_aes_ctx ctx2;
+};
+
+static int riscv64_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct riscv64_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ return xts_verify_key(tfm, key, keylen) ?:
+ riscv64_aes_setkey(&ctx->ctx1, key, keylen / 2) ?:
+ riscv64_aes_setkey(&ctx->ctx2, key + keylen / 2, keylen / 2);
+}
+
+static int riscv64_aes_xts_crypt(struct skcipher_request *req, bool enc)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct riscv64_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int tail = req->cryptlen % AES_BLOCK_SIZE;
+ struct scatterlist sg_src[2], sg_dst[2];
+ struct skcipher_request subreq;
+ struct scatterlist *src, *dst;
+ struct skcipher_walk walk;
+ int err;
+
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ /* Encrypt the IV with the tweak key to get the first tweak. */
+ kernel_vector_begin();
+ aes_encrypt_zvkned(&ctx->ctx2, req->iv, req->iv);
+ kernel_vector_end();
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ /*
+ * If the message length isn't divisible by the AES block size and the
+ * full message isn't available in one step of the scatterlist walk,
+ * then separate off the last full block and the partial block. This
+ * ensures that they are processed in the same call to the assembly
+ * function, which is required for ciphertext stealing.
+ */
+ if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
+ skcipher_walk_abort(&walk);
+
+ skcipher_request_set_tfm(&subreq, tfm);
+ skcipher_request_set_callback(&subreq,
+ skcipher_request_flags(req),
+ NULL, NULL);
+ skcipher_request_set_crypt(&subreq, req->src, req->dst,
+ req->cryptlen - tail - AES_BLOCK_SIZE,
+ req->iv);
+ req = &subreq;
+ err = skcipher_walk_virt(&walk, req, false);
+ } else {
+ tail = 0;
+ }
+
+ while (walk.nbytes) {
+ unsigned int nbytes = walk.nbytes;
+
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, AES_BLOCK_SIZE);
+
+ kernel_vector_begin();
+ if (enc)
+ aes_xts_encrypt_zvkned_zvbb_zvkg(
+ &ctx->ctx1, walk.src.virt.addr,
+ walk.dst.virt.addr, nbytes, req->iv);
+ else
+ aes_xts_decrypt_zvkned_zvbb_zvkg(
+ &ctx->ctx1, walk.src.virt.addr,
+ walk.dst.virt.addr, nbytes, req->iv);
+ kernel_vector_end();
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ }
+
+ if (err || likely(!tail))
+ return err;
+
+ /* Do ciphertext stealing with the last full block and partial block. */
+
+ dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
+ if (req->dst != req->src)
+ dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
+
+ skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
+ req->iv);
+
+ err = skcipher_walk_virt(&walk, req, false);
+ if (err)
+ return err;
+
+ kernel_vector_begin();
+ if (enc)
+ aes_xts_encrypt_zvkned_zvbb_zvkg(
+ &ctx->ctx1, walk.src.virt.addr,
+ walk.dst.virt.addr, walk.nbytes, req->iv);
+ else
+ aes_xts_decrypt_zvkned_zvbb_zvkg(
+ &ctx->ctx1, walk.src.virt.addr,
+ walk.dst.virt.addr, walk.nbytes, req->iv);
+ kernel_vector_end();
+
+ return skcipher_walk_done(&walk, 0);
+}
+
+static int riscv64_aes_xts_encrypt(struct skcipher_request *req)
+{
+ return riscv64_aes_xts_crypt(req, true);
+}
+
+static int riscv64_aes_xts_decrypt(struct skcipher_request *req)
+{
+ return riscv64_aes_xts_crypt(req, false);
+}
+
+/* Algorithm definitions */
+
+static struct crypto_alg riscv64_zvkned_aes_cipher_alg = {
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_priority = 300,
+ .cra_name = "aes",
+ .cra_driver_name = "aes-riscv64-zvkned",
+ .cra_cipher = {
+ .cia_min_keysize = AES_MIN_KEY_SIZE,
+ .cia_max_keysize = AES_MAX_KEY_SIZE,
+ .cia_setkey = riscv64_aes_setkey_cipher,
+ .cia_encrypt = riscv64_aes_encrypt,
+ .cia_decrypt = riscv64_aes_decrypt,
+ },
+ .cra_module = THIS_MODULE,
+};
+
+static struct skcipher_alg riscv64_zvkned_aes_skcipher_algs[] = {
+ {
+ .setkey = riscv64_aes_setkey_skcipher,
+ .encrypt = riscv64_aes_ecb_encrypt,
+ .decrypt = riscv64_aes_ecb_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .walksize = 8 * AES_BLOCK_SIZE, /* matches LMUL=8 */
+ .base = {
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_priority = 300,
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-riscv64-zvkned",
+ .cra_module = THIS_MODULE,
+ },
+ }, {
+ .setkey = riscv64_aes_setkey_skcipher,
+ .encrypt = riscv64_aes_cbc_encrypt,
+ .decrypt = riscv64_aes_cbc_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .base = {
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_priority = 300,
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-riscv64-zvkned",
+ .cra_module = THIS_MODULE,
+ },
+ }, {
+ .setkey = riscv64_aes_setkey_skcipher,
+ .encrypt = riscv64_aes_cbc_cts_encrypt,
+ .decrypt = riscv64_aes_cbc_cts_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .walksize = 4 * AES_BLOCK_SIZE, /* matches LMUL=4 */
+ .base = {
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_priority = 300,
+ .cra_name = "cts(cbc(aes))",
+ .cra_driver_name = "cts-cbc-aes-riscv64-zvkned",
+ .cra_module = THIS_MODULE,
+ },
+ }
+};
+
+static struct skcipher_alg riscv64_zvkned_zvkb_aes_skcipher_alg = {
+ .setkey = riscv64_aes_setkey_skcipher,
+ .encrypt = riscv64_aes_ctr_crypt,
+ .decrypt = riscv64_aes_ctr_crypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+ .walksize = 4 * AES_BLOCK_SIZE, /* matches LMUL=4 */
+ .base = {
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_priority = 300,
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-riscv64-zvkned-zvkb",
+ .cra_module = THIS_MODULE,
+ },
+};
+
+static struct skcipher_alg riscv64_zvkned_zvbb_zvkg_aes_skcipher_alg = {
+ .setkey = riscv64_aes_xts_setkey,
+ .encrypt = riscv64_aes_xts_encrypt,
+ .decrypt = riscv64_aes_xts_decrypt,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+ .walksize = 4 * AES_BLOCK_SIZE, /* matches LMUL=4 */
+ .base = {
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct riscv64_aes_xts_ctx),
+ .cra_priority = 300,
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-riscv64-zvkned-zvbb-zvkg",
+ .cra_module = THIS_MODULE,
+ },
+};
+
+static inline bool riscv64_aes_xts_supported(void)
+{
+ return riscv_isa_extension_available(NULL, ZVBB) &&
+ riscv_isa_extension_available(NULL, ZVKG) &&
+ riscv_vector_vlen() < 2048 /* Implementation limitation */;
+}
+
+static int __init riscv64_aes_mod_init(void)
+{
+ int err = -ENODEV;
+
+ if (riscv_isa_extension_available(NULL, ZVKNED) &&
+ riscv_vector_vlen() >= 128) {
+ err = crypto_register_alg(&riscv64_zvkned_aes_cipher_alg);
+ if (err)
+ return err;
+
+ err = crypto_register_skciphers(
+ riscv64_zvkned_aes_skcipher_algs,
+ ARRAY_SIZE(riscv64_zvkned_aes_skcipher_algs));
+ if (err)
+ goto unregister_zvkned_cipher_alg;
+
+ if (riscv_isa_extension_available(NULL, ZVKB)) {
+ err = crypto_register_skcipher(
+ &riscv64_zvkned_zvkb_aes_skcipher_alg);
+ if (err)
+ goto unregister_zvkned_skcipher_algs;
+ }
+
+ if (riscv64_aes_xts_supported()) {
+ err = crypto_register_skcipher(
+ &riscv64_zvkned_zvbb_zvkg_aes_skcipher_alg);
+ if (err)
+ goto unregister_zvkned_zvkb_skcipher_alg;
+ }
+ }
+
+ return err;
+
+unregister_zvkned_zvkb_skcipher_alg:
+ if (riscv_isa_extension_available(NULL, ZVKB))
+ crypto_unregister_skcipher(&riscv64_zvkned_zvkb_aes_skcipher_alg);
+unregister_zvkned_skcipher_algs:
+ crypto_unregister_skciphers(riscv64_zvkned_aes_skcipher_algs,
+ ARRAY_SIZE(riscv64_zvkned_aes_skcipher_algs));
+unregister_zvkned_cipher_alg:
+ crypto_unregister_alg(&riscv64_zvkned_aes_cipher_alg);
+ return err;
+}
+
+static void __exit riscv64_aes_mod_exit(void)
+{
+ if (riscv64_aes_xts_supported())
+ crypto_unregister_skcipher(&riscv64_zvkned_zvbb_zvkg_aes_skcipher_alg);
+ if (riscv_isa_extension_available(NULL, ZVKB))
+ crypto_unregister_skcipher(&riscv64_zvkned_zvkb_aes_skcipher_alg);
+ crypto_unregister_skciphers(riscv64_zvkned_aes_skcipher_algs,
+ ARRAY_SIZE(riscv64_zvkned_aes_skcipher_algs));
+ crypto_unregister_alg(&riscv64_zvkned_aes_cipher_alg);
+}
+
+module_init(riscv64_aes_mod_init);
+module_exit(riscv64_aes_mod_exit);
+
+MODULE_DESCRIPTION("AES-ECB/CBC/CTS/CTR/XTS (RISC-V accelerated)");
+MODULE_AUTHOR("Jerry Shih <jerry.shih@sifive.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CRYPTO("aes");
+MODULE_ALIAS_CRYPTO("ecb(aes)");
+MODULE_ALIAS_CRYPTO("cbc(aes)");
+MODULE_ALIAS_CRYPTO("cts(cbc(aes))");
+MODULE_ALIAS_CRYPTO("ctr(aes)");
+MODULE_ALIAS_CRYPTO("xts(aes)");
diff --git a/arch/riscv/crypto/aes-riscv64-zvkned-zvbb-zvkg.S b/arch/riscv/crypto/aes-riscv64-zvkned-zvbb-zvkg.S
new file mode 100644
index 00000000000000..146fc9cfb268d4
--- /dev/null
+++ b/arch/riscv/crypto/aes-riscv64-zvkned-zvbb-zvkg.S
@@ -0,0 +1,312 @@
+/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */
+//
+// This file is dual-licensed, meaning that you can use it under your
+// choice of either of the following two licenses:
+//
+// Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
+//
+// Licensed under the Apache License 2.0 (the "License"). You can obtain
+// a copy in the file LICENSE in the source distribution or at
+// https://www.openssl.org/source/license.html
+//
+// or
+//
+// Copyright (c) 2023, Jerry Shih <jerry.shih@sifive.com>
+// Copyright 2024 Google LLC
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The generated code of this file depends on the following RISC-V extensions:
+// - RV64I
+// - RISC-V Vector ('V') with VLEN >= 128 && VLEN < 2048
+// - RISC-V Vector AES block cipher extension ('Zvkned')
+// - RISC-V Vector Bit-manipulation extension ('Zvbb')
+// - RISC-V Vector GCM/GMAC extension ('Zvkg')
+
+#include <linux/linkage.h>
+
+.text
+.option arch, +zvkned, +zvbb, +zvkg
+
+#include "aes-macros.S"
+
+#define KEYP a0
+#define INP a1
+#define OUTP a2
+#define LEN a3
+#define TWEAKP a4
+
+#define LEN32 a5
+#define TAIL_LEN a6
+#define VL a7
+#define VLMAX t4
+
+// v1-v15 contain the AES round keys, but they are used for temporaries before
+// the AES round keys have been loaded.
+#define TWEAKS v16 // LMUL=4 (most of the time)
+#define TWEAKS_BREV v20 // LMUL=4 (most of the time)
+#define MULTS_BREV v24 // LMUL=4 (most of the time)
+#define TMP0 v28
+#define TMP1 v29
+#define TMP2 v30
+#define TMP3 v31
+
+// xts_init initializes the following values:
+//
+// TWEAKS: N 128-bit tweaks T*(x^i) for i in 0..(N - 1)
+// TWEAKS_BREV: same as TWEAKS, but bit-reversed
+// MULTS_BREV: N 128-bit values x^N, bit-reversed. Only if N > 1.
+//
+// N is the maximum number of blocks that will be processed per loop iteration,
+// computed using vsetvli.
+//
+// The field convention used by XTS is the same as that of GHASH, but with the
+// bits reversed within each byte. The zvkg extension provides the vgmul
+// instruction which does multiplication in this field. Therefore, for tweak
+// computation we use vgmul to do multiplications in parallel, instead of
+// serially multiplying by x using shifting+xoring. Note that for this to work,
+// the inputs and outputs to vgmul must be bit-reversed (we do it with vbrev8).
+.macro xts_init
+
+ // Load the first tweak T.
+ vsetivli zero, 4, e32, m1, ta, ma
+ vle32.v TWEAKS, (TWEAKP)
+
+ // If there's only one block (or no blocks at all), then skip the tweak
+ // sequence computation because (at most) T itself is needed.
+ li t0, 16
+ ble LEN, t0, .Linit_single_block\@
+
+ // Save a copy of T bit-reversed in v12.
+ vbrev8.v v12, TWEAKS
+
+ //
+ // Generate x^i for i in 0..(N - 1), i.e. 128-bit values 1 << i assuming
+ // that N <= 128. Though, this code actually requires N < 64 (or
+ // equivalently VLEN < 2048) due to the use of 64-bit intermediate
+ // values here and in the x^N computation later.
+ //
+ vsetvli VL, LEN32, e32, m4, ta, ma
+ srli t0, VL, 2 // t0 = N (num blocks)
+ // Generate two sequences, each with N 32-bit values:
+ // v0=[1, 1, 1, ...] and v1=[0, 1, 2, ...].
+ vsetvli zero, t0, e32, m1, ta, ma
+ vmv.v.i v0, 1
+ vid.v v1
+ // Use vzext to zero-extend the sequences to 64 bits. Reinterpret them
+ // as two sequences, each with 2*N 32-bit values:
+ // v2=[1, 0, 1, 0, 1, 0, ...] and v4=[0, 0, 1, 0, 2, 0, ...].
+ vsetvli zero, t0, e64, m2, ta, ma
+ vzext.vf2 v2, v0
+ vzext.vf2 v4, v1
+ slli t1, t0, 1 // t1 = 2*N
+ vsetvli zero, t1, e32, m2, ta, ma
+ // Use vwsll to compute [1<<0, 0<<0, 1<<1, 0<<0, 1<<2, 0<<0, ...],
+ // widening to 64 bits per element. When reinterpreted as N 128-bit
+ // values, this is the needed sequence of 128-bit values 1 << i (x^i).
+ vwsll.vv v8, v2, v4
+
+ // Copy the bit-reversed T to all N elements of TWEAKS_BREV, then
+ // multiply by x^i. This gives the sequence T*(x^i), bit-reversed.
+ vsetvli zero, LEN32, e32, m4, ta, ma
+ vmv.v.i TWEAKS_BREV, 0
+ vaesz.vs TWEAKS_BREV, v12
+ vbrev8.v v8, v8
+ vgmul.vv TWEAKS_BREV, v8
+
+ // Save a copy of the sequence T*(x^i) with the bit reversal undone.
+ vbrev8.v TWEAKS, TWEAKS_BREV
+
+ // Generate N copies of x^N, i.e. 128-bit values 1 << N, bit-reversed.
+ li t1, 1
+ sll t1, t1, t0 // t1 = 1 << N
+ vsetivli zero, 2, e64, m1, ta, ma
+ vmv.v.i v0, 0
+ vsetivli zero, 1, e64, m1, tu, ma
+ vmv.v.x v0, t1
+ vbrev8.v v0, v0
+ vsetvli zero, LEN32, e32, m4, ta, ma
+ vmv.v.i MULTS_BREV, 0
+ vaesz.vs MULTS_BREV, v0
+
+ j .Linit_done\@
+
+.Linit_single_block\@:
+ vbrev8.v TWEAKS_BREV, TWEAKS
+.Linit_done\@:
+.endm
+
+// Set the first 128 bits of MULTS_BREV to 0x40, i.e. 'x' bit-reversed. This is
+// the multiplier required to advance the tweak by one.
+.macro load_x
+ li t0, 0x40
+ vsetivli zero, 4, e32, m1, ta, ma
+ vmv.v.i MULTS_BREV, 0
+ vsetivli zero, 1, e8, m1, tu, ma
+ vmv.v.x MULTS_BREV, t0
+.endm
+
+.macro __aes_xts_crypt enc, keylen
+ // With 16 < len <= 31, there's no main loop, just ciphertext stealing.
+ beqz LEN32, .Lcts_without_main_loop\@
+
+ vsetvli VLMAX, zero, e32, m4, ta, ma
+1:
+ vsetvli VL, LEN32, e32, m4, ta, ma
+2:
+ // Encrypt or decrypt VL/4 blocks.
+ vle32.v TMP0, (INP)
+ vxor.vv TMP0, TMP0, TWEAKS
+ aes_crypt TMP0, \enc, \keylen
+ vxor.vv TMP0, TMP0, TWEAKS
+ vse32.v TMP0, (OUTP)
+
+ // Update the pointers and the remaining length.
+ slli t0, VL, 2
+ add INP, INP, t0
+ add OUTP, OUTP, t0
+ sub LEN32, LEN32, VL
+
+ // Check whether more blocks remain.
+ beqz LEN32, .Lmain_loop_done\@
+
+ // Compute the next sequence of tweaks by multiplying the previous
+ // sequence by x^N. Store the result in both bit-reversed order and
+ // regular order (i.e. with the bit reversal undone).
+ vgmul.vv TWEAKS_BREV, MULTS_BREV
+ vbrev8.v TWEAKS, TWEAKS_BREV
+
+ // Since we compute the tweak multipliers x^N in advance, we require
+ // that each iteration process the same length except possibly the last.
+ // This conflicts slightly with the behavior allowed by RISC-V Vector
+ // Extension, where CPUs can select a lower length for both of the last
+ // two iterations. E.g., vl might take the sequence of values
+ // [16, 16, 16, 12, 12], whereas we need [16, 16, 16, 16, 8] so that we
+ // can use x^4 again instead of computing x^3. Therefore, we explicitly
+ // keep the vl at VLMAX if there is at least VLMAX remaining.
+ bge LEN32, VLMAX, 2b
+ j 1b
+
+.Lmain_loop_done\@:
+ load_x
+
+ // Compute the next tweak.
+ addi t0, VL, -4
+ vsetivli zero, 4, e32, m4, ta, ma
+ vslidedown.vx TWEAKS_BREV, TWEAKS_BREV, t0 // Extract last tweak
+ vsetivli zero, 4, e32, m1, ta, ma
+ vgmul.vv TWEAKS_BREV, MULTS_BREV // Advance to next tweak
+
+ bnez TAIL_LEN, .Lcts\@
+
+ // Update *TWEAKP to contain the next tweak.
+ vbrev8.v TWEAKS, TWEAKS_BREV
+ vse32.v TWEAKS, (TWEAKP)
+ ret
+
+.Lcts_without_main_loop\@:
+ load_x
+.Lcts\@:
+ // TWEAKS_BREV now contains the next tweak. Compute the one after that.
+ vsetivli zero, 4, e32, m1, ta, ma
+ vmv.v.v TMP0, TWEAKS_BREV
+ vgmul.vv TMP0, MULTS_BREV
+ // Undo the bit reversal of the next two tweaks and store them in TMP1
+ // and TMP2, such that TMP1 is the first needed and TMP2 the second.
+.if \enc
+ vbrev8.v TMP1, TWEAKS_BREV
+ vbrev8.v TMP2, TMP0
+.else
+ vbrev8.v TMP1, TMP0
+ vbrev8.v TMP2, TWEAKS_BREV
+.endif
+
+ // Encrypt/decrypt the last full block.
+ vle32.v TMP0, (INP)
+ vxor.vv TMP0, TMP0, TMP1
+ aes_crypt TMP0, \enc, \keylen
+ vxor.vv TMP0, TMP0, TMP1
+
+ // Swap the first TAIL_LEN bytes of the above result with the tail.
+ // Note that to support in-place encryption/decryption, the load from
+ // the input tail must happen before the store to the output tail.
+ addi t0, INP, 16
+ addi t1, OUTP, 16
+ vmv.v.v TMP3, TMP0
+ vsetvli zero, TAIL_LEN, e8, m1, tu, ma
+ vle8.v TMP0, (t0)
+ vse8.v TMP3, (t1)
+
+ // Encrypt/decrypt again and store the last full block.
+ vsetivli zero, 4, e32, m1, ta, ma
+ vxor.vv TMP0, TMP0, TMP2
+ aes_crypt TMP0, \enc, \keylen
+ vxor.vv TMP0, TMP0, TMP2
+ vse32.v TMP0, (OUTP)
+
+ ret
+.endm
+
+.macro aes_xts_crypt enc
+
+ // Check whether the length is a multiple of the AES block size.
+ andi TAIL_LEN, LEN, 15
+ beqz TAIL_LEN, 1f
+
+ // The length isn't a multiple of the AES block size, so ciphertext
+ // stealing will be required. Ciphertext stealing involves special
+ // handling of the partial block and the last full block, so subtract
+ // the length of both from the length to be processed in the main loop.
+ sub LEN, LEN, TAIL_LEN
+ addi LEN, LEN, -16
+1:
+ srli LEN32, LEN, 2
+ // LEN and LEN32 now contain the total length of the blocks that will be
+ // processed in the main loop, in bytes and 32-bit words respectively.
+
+ xts_init
+ aes_begin KEYP, 128f, 192f
+ __aes_xts_crypt \enc, 256
+128:
+ __aes_xts_crypt \enc, 128
+192:
+ __aes_xts_crypt \enc, 192
+.endm
+
+// void aes_xts_encrypt_zvkned_zvbb_zvkg(const struct crypto_aes_ctx *key,
+// const u8 *in, u8 *out, size_t len,
+// u8 tweak[16]);
+//
+// |key| is the data key. |tweak| contains the next tweak; the encryption of
+// the original IV with the tweak key was already done. This function supports
+// incremental computation, but |len| must always be >= 16 (AES_BLOCK_SIZE), and
+// |len| must be a multiple of 16 except on the last call. If |len| is a
+// multiple of 16, then this function updates |tweak| to contain the next tweak.
+SYM_FUNC_START(aes_xts_encrypt_zvkned_zvbb_zvkg)
+ aes_xts_crypt 1
+SYM_FUNC_END(aes_xts_encrypt_zvkned_zvbb_zvkg)
+
+// Same prototype and calling convention as the encryption function
+SYM_FUNC_START(aes_xts_decrypt_zvkned_zvbb_zvkg)
+ aes_xts_crypt 0
+SYM_FUNC_END(aes_xts_decrypt_zvkned_zvbb_zvkg)
diff --git a/arch/riscv/crypto/aes-riscv64-zvkned-zvkb.S b/arch/riscv/crypto/aes-riscv64-zvkned-zvkb.S
new file mode 100644
index 00000000000000..9962d45005870c
--- /dev/null
+++ b/arch/riscv/crypto/aes-riscv64-zvkned-zvkb.S
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */
+//
+// This file is dual-licensed, meaning that you can use it under your
+// choice of either of the following two licenses:
+//
+// Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
+//
+// Licensed under the Apache License 2.0 (the "License"). You can obtain
+// a copy in the file LICENSE in the source distribution or at
+// https://www.openssl.org/source/license.html
+//
+// or
+//
+// Copyright (c) 2023, Jerry Shih <jerry.shih@sifive.com>
+// Copyright 2024 Google LLC
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The generated code of this file depends on the following RISC-V extensions:
+// - RV64I
+// - RISC-V Vector ('V') with VLEN >= 128
+// - RISC-V Vector AES block cipher extension ('Zvkned')
+// - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
+
+#include <linux/linkage.h>
+
+.text
+.option arch, +zvkned, +zvkb
+
+#include "aes-macros.S"
+
+#define KEYP a0
+#define INP a1
+#define OUTP a2
+#define LEN a3
+#define IVP a4
+
+#define LEN32 a5
+#define VL_E32 a6
+#define VL_BLOCKS a7
+
+.macro aes_ctr32_crypt keylen
+ // LEN32 = number of blocks, rounded up, in 32-bit words.
+ addi t0, LEN, 15
+ srli t0, t0, 4
+ slli LEN32, t0, 2
+
+ // Create a mask that selects the last 32-bit word of each 128-bit
+ // block. This is the word that contains the (big-endian) counter.
+ li t0, 0x88
+ vsetvli t1, zero, e8, m1, ta, ma
+ vmv.v.x v0, t0
+
+ // Load the IV into v31. The last 32-bit word contains the counter.
+ vsetivli zero, 4, e32, m1, ta, ma
+ vle32.v v31, (IVP)
+
+ // Convert the big-endian counter into little-endian.
+ vsetivli zero, 4, e32, m1, ta, mu
+ vrev8.v v31, v31, v0.t
+
+ // Splat the IV to v16 (with LMUL=4). The number of copies is the
+ // maximum number of blocks that will be processed per iteration.
+ vsetvli zero, LEN32, e32, m4, ta, ma
+ vmv.v.i v16, 0
+ vaesz.vs v16, v31
+
+ // v20 = [x, x, x, 0, x, x, x, 1, ...]
+ viota.m v20, v0, v0.t
+ // v16 = [IV0, IV1, IV2, counter+0, IV0, IV1, IV2, counter+1, ...]
+ vsetvli VL_E32, LEN32, e32, m4, ta, mu
+ vadd.vv v16, v16, v20, v0.t
+
+ j 2f
+1:
+ // Set the number of blocks to process in this iteration. vl=VL_E32 is
+ // the length in 32-bit words, i.e. 4 times the number of blocks.
+ vsetvli VL_E32, LEN32, e32, m4, ta, mu
+
+ // Increment the counters by the number of blocks processed in the
+ // previous iteration.
+ vadd.vx v16, v16, VL_BLOCKS, v0.t
+2:
+ // Prepare the AES inputs into v24.
+ vmv.v.v v24, v16
+ vrev8.v v24, v24, v0.t // Convert counters back to big-endian.
+
+ // Encrypt the AES inputs to create the next portion of the keystream.
+ aes_encrypt v24, \keylen
+
+ // XOR the data with the keystream.
+ vsetvli t0, LEN, e8, m4, ta, ma
+ vle8.v v20, (INP)
+ vxor.vv v20, v20, v24
+ vse8.v v20, (OUTP)
+
+ // Advance the pointers and update the remaining length.
+ add INP, INP, t0
+ add OUTP, OUTP, t0
+ sub LEN, LEN, t0
+ sub LEN32, LEN32, VL_E32
+ srli VL_BLOCKS, VL_E32, 2
+
+ // Repeat if more data remains.
+ bnez LEN, 1b
+
+ // Update *IVP to contain the next counter.
+ vsetivli zero, 4, e32, m1, ta, mu
+ vadd.vx v16, v16, VL_BLOCKS, v0.t
+ vrev8.v v16, v16, v0.t // Convert counters back to big-endian.
+ vse32.v v16, (IVP)
+
+ ret
+.endm
+
+// void aes_ctr32_crypt_zvkned_zvkb(const struct crypto_aes_ctx *key,
+// const u8 *in, u8 *out, size_t len,
+// u8 iv[16]);
+SYM_FUNC_START(aes_ctr32_crypt_zvkned_zvkb)
+ aes_begin KEYP, 128f, 192f
+ aes_ctr32_crypt 256
+128:
+ aes_ctr32_crypt 128
+192:
+ aes_ctr32_crypt 192
+SYM_FUNC_END(aes_ctr32_crypt_zvkned_zvkb)
diff --git a/arch/riscv/crypto/aes-riscv64-zvkned.S b/arch/riscv/crypto/aes-riscv64-zvkned.S
new file mode 100644
index 00000000000000..23d063f94ce61d
--- /dev/null
+++ b/arch/riscv/crypto/aes-riscv64-zvkned.S
@@ -0,0 +1,339 @@
+/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */
+//
+// This file is dual-licensed, meaning that you can use it under your
+// choice of either of the following two licenses:
+//
+// Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
+//
+// Licensed under the Apache License 2.0 (the "License"). You can obtain
+// a copy in the file LICENSE in the source distribution or at
+// https://www.openssl.org/source/license.html
+//
+// or
+//
+// Copyright (c) 2023, Christoph Müllner <christoph.muellner@vrull.eu>
+// Copyright (c) 2023, Phoebe Chen <phoebe.chen@sifive.com>
+// Copyright (c) 2023, Jerry Shih <jerry.shih@sifive.com>
+// Copyright 2024 Google LLC
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The generated code of this file depends on the following RISC-V extensions:
+// - RV64I
+// - RISC-V Vector ('V') with VLEN >= 128
+// - RISC-V Vector AES block cipher extension ('Zvkned')
+
+#include <linux/linkage.h>
+
+.text
+.option arch, +zvkned
+
+#include "aes-macros.S"
+
+#define KEYP a0
+#define INP a1
+#define OUTP a2
+#define LEN a3
+#define IVP a4
+
+.macro __aes_crypt_zvkned enc, keylen
+ vle32.v v16, (INP)
+ aes_crypt v16, \enc, \keylen
+ vse32.v v16, (OUTP)
+ ret
+.endm
+
+.macro aes_crypt_zvkned enc
+ aes_begin KEYP, 128f, 192f
+ __aes_crypt_zvkned \enc, 256
+128:
+ __aes_crypt_zvkned \enc, 128
+192:
+ __aes_crypt_zvkned \enc, 192
+.endm
+
+// void aes_encrypt_zvkned(const struct crypto_aes_ctx *key,
+// const u8 in[16], u8 out[16]);
+SYM_FUNC_START(aes_encrypt_zvkned)
+ aes_crypt_zvkned 1
+SYM_FUNC_END(aes_encrypt_zvkned)
+
+// Same prototype and calling convention as the encryption function
+SYM_FUNC_START(aes_decrypt_zvkned)
+ aes_crypt_zvkned 0
+SYM_FUNC_END(aes_decrypt_zvkned)
+
+.macro __aes_ecb_crypt enc, keylen
+ srli t0, LEN, 2
+ // t0 is the remaining length in 32-bit words. It's a multiple of 4.
+1:
+ vsetvli t1, t0, e32, m8, ta, ma
+ sub t0, t0, t1 // Subtract number of words processed
+ slli t1, t1, 2 // Words to bytes
+ vle32.v v16, (INP)
+ aes_crypt v16, \enc, \keylen
+ vse32.v v16, (OUTP)
+ add INP, INP, t1
+ add OUTP, OUTP, t1
+ bnez t0, 1b
+
+ ret
+.endm
+
+.macro aes_ecb_crypt enc
+ aes_begin KEYP, 128f, 192f
+ __aes_ecb_crypt \enc, 256
+128:
+ __aes_ecb_crypt \enc, 128
+192:
+ __aes_ecb_crypt \enc, 192
+.endm
+
+// void aes_ecb_encrypt_zvkned(const struct crypto_aes_ctx *key,
+// const u8 *in, u8 *out, size_t len);
+//
+// |len| must be nonzero and a multiple of 16 (AES_BLOCK_SIZE).
+SYM_FUNC_START(aes_ecb_encrypt_zvkned)
+ aes_ecb_crypt 1
+SYM_FUNC_END(aes_ecb_encrypt_zvkned)
+
+// Same prototype and calling convention as the encryption function
+SYM_FUNC_START(aes_ecb_decrypt_zvkned)
+ aes_ecb_crypt 0
+SYM_FUNC_END(aes_ecb_decrypt_zvkned)
+
+.macro aes_cbc_encrypt keylen
+ vle32.v v16, (IVP) // Load IV
+1:
+ vle32.v v17, (INP) // Load plaintext block
+ vxor.vv v16, v16, v17 // XOR with IV or prev ciphertext block
+ aes_encrypt v16, \keylen // Encrypt
+ vse32.v v16, (OUTP) // Store ciphertext block
+ addi INP, INP, 16
+ addi OUTP, OUTP, 16
+ addi LEN, LEN, -16
+ bnez LEN, 1b
+
+ vse32.v v16, (IVP) // Store next IV
+ ret
+.endm
+
+.macro aes_cbc_decrypt keylen
+ srli LEN, LEN, 2 // Convert LEN from bytes to words
+ vle32.v v16, (IVP) // Load IV
+1:
+ vsetvli t0, LEN, e32, m4, ta, ma
+ vle32.v v20, (INP) // Load ciphertext blocks
+ vslideup.vi v16, v20, 4 // Setup prev ciphertext blocks
+ addi t1, t0, -4
+ vslidedown.vx v24, v20, t1 // Save last ciphertext block
+ aes_decrypt v20, \keylen // Decrypt the blocks
+ vxor.vv v20, v20, v16 // XOR with prev ciphertext blocks
+ vse32.v v20, (OUTP) // Store plaintext blocks
+ vmv.v.v v16, v24 // Next "IV" is last ciphertext block
+ slli t1, t0, 2 // Words to bytes
+ add INP, INP, t1
+ add OUTP, OUTP, t1
+ sub LEN, LEN, t0
+ bnez LEN, 1b
+
+ vsetivli zero, 4, e32, m1, ta, ma
+ vse32.v v16, (IVP) // Store next IV
+ ret
+.endm
+
+// void aes_cbc_encrypt_zvkned(const struct crypto_aes_ctx *key,
+// const u8 *in, u8 *out, size_t len, u8 iv[16]);
+//
+// |len| must be nonzero and a multiple of 16 (AES_BLOCK_SIZE).
+SYM_FUNC_START(aes_cbc_encrypt_zvkned)
+ aes_begin KEYP, 128f, 192f
+ aes_cbc_encrypt 256
+128:
+ aes_cbc_encrypt 128
+192:
+ aes_cbc_encrypt 192
+SYM_FUNC_END(aes_cbc_encrypt_zvkned)
+
+// Same prototype and calling convention as the encryption function
+SYM_FUNC_START(aes_cbc_decrypt_zvkned)
+ aes_begin KEYP, 128f, 192f
+ aes_cbc_decrypt 256
+128:
+ aes_cbc_decrypt 128
+192:
+ aes_cbc_decrypt 192
+SYM_FUNC_END(aes_cbc_decrypt_zvkned)
+
+.macro aes_cbc_cts_encrypt keylen
+
+ // CBC-encrypt all blocks except the last. But don't store the
+ // second-to-last block to the output buffer yet, since it will be
+ // handled specially in the ciphertext stealing step. Exception: if the
+ // message is single-block, still encrypt the last (and only) block.
+ li t0, 16
+ j 2f
+1:
+ vse32.v v16, (OUTP) // Store ciphertext block
+ addi OUTP, OUTP, 16
+2:
+ vle32.v v17, (INP) // Load plaintext block
+ vxor.vv v16, v16, v17 // XOR with IV or prev ciphertext block
+ aes_encrypt v16, \keylen // Encrypt
+ addi INP, INP, 16
+ addi LEN, LEN, -16
+ bgt LEN, t0, 1b // Repeat if more than one block remains
+
+ // Special case: if the message is a single block, just do CBC.
+ beqz LEN, .Lcts_encrypt_done\@
+
+ // Encrypt the last two blocks using ciphertext stealing as follows:
+ // C[n-1] = Encrypt(Encrypt(P[n-1] ^ C[n-2]) ^ P[n])
+ // C[n] = Encrypt(P[n-1] ^ C[n-2])[0..LEN]
+ //
+ // C[i] denotes the i'th ciphertext block, and likewise P[i] the i'th
+ // plaintext block. Block n, the last block, may be partial; its length
+ // is 1 <= LEN <= 16. If there are only 2 blocks, C[n-2] means the IV.
+ //
+ // v16 already contains Encrypt(P[n-1] ^ C[n-2]).
+ // INP points to P[n]. OUTP points to where C[n-1] should go.
+ // To support in-place encryption, load P[n] before storing C[n].
+ addi t0, OUTP, 16 // Get pointer to where C[n] should go
+ vsetvli zero, LEN, e8, m1, tu, ma
+ vle8.v v17, (INP) // Load P[n]
+ vse8.v v16, (t0) // Store C[n]
+ vxor.vv v16, v16, v17 // v16 = Encrypt(P[n-1] ^ C[n-2]) ^ P[n]
+ vsetivli zero, 4, e32, m1, ta, ma
+ aes_encrypt v16, \keylen
+.Lcts_encrypt_done\@:
+ vse32.v v16, (OUTP) // Store C[n-1] (or C[n] in single-block case)
+ ret
+.endm
+
+#define LEN32 t4 // Length of remaining full blocks in 32-bit words
+#define LEN_MOD16 t5 // Length of message in bytes mod 16
+
+.macro aes_cbc_cts_decrypt keylen
+ andi LEN32, LEN, ~15
+ srli LEN32, LEN32, 2
+ andi LEN_MOD16, LEN, 15
+
+ // Save C[n-2] in v28 so that it's available later during the ciphertext
+ // stealing step. If there are fewer than three blocks, C[n-2] means
+ // the IV, otherwise it means the third-to-last ciphertext block.
+ vmv.v.v v28, v16 // IV
+ add t0, LEN, -33
+ bltz t0, .Lcts_decrypt_loop\@
+ andi t0, t0, ~15
+ add t0, t0, INP
+ vle32.v v28, (t0)
+
+ // CBC-decrypt all full blocks. For the last full block, or the last 2
+ // full blocks if the message is block-aligned, this doesn't write the
+ // correct output blocks (unless the message is only a single block),
+ // because it XORs the wrong values with the raw AES plaintexts. But we
+ // fix this after this loop without redoing the AES decryptions. This
+ // approach allows more of the AES decryptions to be parallelized.
+.Lcts_decrypt_loop\@:
+ vsetvli t0, LEN32, e32, m4, ta, ma
+ addi t1, t0, -4
+ vle32.v v20, (INP) // Load next set of ciphertext blocks
+ vmv.v.v v24, v16 // Get IV or last ciphertext block of prev set
+ vslideup.vi v24, v20, 4 // Setup prev ciphertext blocks
+ vslidedown.vx v16, v20, t1 // Save last ciphertext block of this set
+ aes_decrypt v20, \keylen // Decrypt this set of blocks
+ vxor.vv v24, v24, v20 // XOR prev ciphertext blocks with decrypted blocks
+ vse32.v v24, (OUTP) // Store this set of plaintext blocks
+ sub LEN32, LEN32, t0
+ slli t0, t0, 2 // Words to bytes
+ add INP, INP, t0
+ add OUTP, OUTP, t0
+ bnez LEN32, .Lcts_decrypt_loop\@
+
+ vsetivli zero, 4, e32, m4, ta, ma
+ vslidedown.vx v20, v20, t1 // Extract raw plaintext of last full block
+ addi t0, OUTP, -16 // Get pointer to last full plaintext block
+ bnez LEN_MOD16, .Lcts_decrypt_non_block_aligned\@
+
+ // Special case: if the message is a single block, just do CBC.
+ li t1, 16
+ beq LEN, t1, .Lcts_decrypt_done\@
+
+ // Block-aligned message. Just fix up the last 2 blocks. We need:
+ //
+ // P[n-1] = Decrypt(C[n]) ^ C[n-2]
+ // P[n] = Decrypt(C[n-1]) ^ C[n]
+ //
+ // We have C[n] in v16, Decrypt(C[n]) in v20, and C[n-2] in v28.
+ // Together with Decrypt(C[n-1]) ^ C[n-2] from the output buffer, this
+ // is everything needed to fix the output without re-decrypting blocks.
+ addi t1, OUTP, -32 // Get pointer to where P[n-1] should go
+ vxor.vv v20, v20, v28 // Decrypt(C[n]) ^ C[n-2] == P[n-1]
+ vle32.v v24, (t1) // Decrypt(C[n-1]) ^ C[n-2]
+ vse32.v v20, (t1) // Store P[n-1]
+ vxor.vv v20, v24, v16 // Decrypt(C[n-1]) ^ C[n-2] ^ C[n] == P[n] ^ C[n-2]
+ j .Lcts_decrypt_finish\@
+
+.Lcts_decrypt_non_block_aligned\@:
+ // Decrypt the last two blocks using ciphertext stealing as follows:
+ //
+ // P[n-1] = Decrypt(C[n] || Decrypt(C[n-1])[LEN_MOD16..16]) ^ C[n-2]
+ // P[n] = (Decrypt(C[n-1]) ^ C[n])[0..LEN_MOD16]
+ //
+ // We already have Decrypt(C[n-1]) in v20 and C[n-2] in v28.
+ vmv.v.v v16, v20 // v16 = Decrypt(C[n-1])
+ vsetvli zero, LEN_MOD16, e8, m1, tu, ma
+ vle8.v v20, (INP) // v20 = C[n] || Decrypt(C[n-1])[LEN_MOD16..16]
+ vxor.vv v16, v16, v20 // v16 = Decrypt(C[n-1]) ^ C[n]
+ vse8.v v16, (OUTP) // Store P[n]
+ vsetivli zero, 4, e32, m1, ta, ma
+ aes_decrypt v20, \keylen // v20 = Decrypt(C[n] || Decrypt(C[n-1])[LEN_MOD16..16])
+.Lcts_decrypt_finish\@:
+ vxor.vv v20, v20, v28 // XOR with C[n-2]
+ vse32.v v20, (t0) // Store last full plaintext block
+.Lcts_decrypt_done\@:
+ ret
+.endm
+
+.macro aes_cbc_cts_crypt keylen
+ vle32.v v16, (IVP) // Load IV
+ beqz a5, .Lcts_decrypt\@
+ aes_cbc_cts_encrypt \keylen
+.Lcts_decrypt\@:
+ aes_cbc_cts_decrypt \keylen
+.endm
+
+// void aes_cbc_cts_crypt_zvkned(const struct crypto_aes_ctx *key,
+// const u8 *in, u8 *out, size_t len,
+// const u8 iv[16], bool enc);
+//
+// Encrypts or decrypts a message with the CS3 variant of AES-CBC-CTS.
+// This is the variant that unconditionally swaps the last two blocks.
+SYM_FUNC_START(aes_cbc_cts_crypt_zvkned)
+ aes_begin KEYP, 128f, 192f
+ aes_cbc_cts_crypt 256
+128:
+ aes_cbc_cts_crypt 128
+192:
+ aes_cbc_cts_crypt 192
+SYM_FUNC_END(aes_cbc_cts_crypt_zvkned)
diff --git a/arch/riscv/crypto/chacha-riscv64-glue.c b/arch/riscv/crypto/chacha-riscv64-glue.c
new file mode 100644
index 00000000000000..10b46f36375aff
--- /dev/null
+++ b/arch/riscv/crypto/chacha-riscv64-glue.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ChaCha20 using the RISC-V vector crypto extensions
+ *
+ * Copyright (C) 2023 SiFive, Inc.
+ * Author: Jerry Shih <jerry.shih@sifive.com>
+ */
+
+#include <asm/simd.h>
+#include <asm/vector.h>
+#include <crypto/internal/chacha.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/linkage.h>
+#include <linux/module.h>
+
+asmlinkage void chacha20_zvkb(const u32 key[8], const u8 *in, u8 *out,
+ size_t len, const u32 iv[4]);
+
+static int riscv64_chacha20_crypt(struct skcipher_request *req)
+{
+ u32 iv[CHACHA_IV_SIZE / sizeof(u32)];
+ u8 block_buffer[CHACHA_BLOCK_SIZE];
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
+ unsigned int tail_bytes;
+ int err;
+
+ iv[0] = get_unaligned_le32(req->iv);
+ iv[1] = get_unaligned_le32(req->iv + 4);
+ iv[2] = get_unaligned_le32(req->iv + 8);
+ iv[3] = get_unaligned_le32(req->iv + 12);
+
+ err = skcipher_walk_virt(&walk, req, false);
+ while (walk.nbytes) {
+ nbytes = walk.nbytes & ~(CHACHA_BLOCK_SIZE - 1);
+ tail_bytes = walk.nbytes & (CHACHA_BLOCK_SIZE - 1);
+ kernel_vector_begin();
+ if (nbytes) {
+ chacha20_zvkb(ctx->key, walk.src.virt.addr,
+ walk.dst.virt.addr, nbytes, iv);
+ iv[0] += nbytes / CHACHA_BLOCK_SIZE;
+ }
+ if (walk.nbytes == walk.total && tail_bytes > 0) {
+ memcpy(block_buffer, walk.src.virt.addr + nbytes,
+ tail_bytes);
+ chacha20_zvkb(ctx->key, block_buffer, block_buffer,
+ CHACHA_BLOCK_SIZE, iv);
+ memcpy(walk.dst.virt.addr + nbytes, block_buffer,
+ tail_bytes);
+ tail_bytes = 0;
+ }
+ kernel_vector_end();
+
+ err = skcipher_walk_done(&walk, tail_bytes);
+ }
+
+ return err;
+}
+
+static struct skcipher_alg riscv64_chacha_alg = {
+ .setkey = chacha20_setkey,
+ .encrypt = riscv64_chacha20_crypt,
+ .decrypt = riscv64_chacha20_crypt,
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = CHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .walksize = 4 * CHACHA_BLOCK_SIZE,
+ .base = {
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct chacha_ctx),
+ .cra_priority = 300,
+ .cra_name = "chacha20",
+ .cra_driver_name = "chacha20-riscv64-zvkb",
+ .cra_module = THIS_MODULE,
+ },
+};
+
+static int __init riscv64_chacha_mod_init(void)
+{
+ if (riscv_isa_extension_available(NULL, ZVKB) &&
+ riscv_vector_vlen() >= 128)
+ return crypto_register_skcipher(&riscv64_chacha_alg);
+
+ return -ENODEV;
+}
+
+static void __exit riscv64_chacha_mod_exit(void)
+{
+ crypto_unregister_skcipher(&riscv64_chacha_alg);
+}
+
+module_init(riscv64_chacha_mod_init);
+module_exit(riscv64_chacha_mod_exit);
+
+MODULE_DESCRIPTION("ChaCha20 (RISC-V accelerated)");
+MODULE_AUTHOR("Jerry Shih <jerry.shih@sifive.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CRYPTO("chacha20");
diff --git a/arch/riscv/crypto/chacha-riscv64-zvkb.S b/arch/riscv/crypto/chacha-riscv64-zvkb.S
new file mode 100644
index 00000000000000..bf057737ac6935
--- /dev/null
+++ b/arch/riscv/crypto/chacha-riscv64-zvkb.S
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */
+//
+// This file is dual-licensed, meaning that you can use it under your
+// choice of either of the following two licenses:
+//
+// Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
+//
+// Licensed under the Apache License 2.0 (the "License"). You can obtain
+// a copy in the file LICENSE in the source distribution or at
+// https://www.openssl.org/source/license.html
+//
+// or
+//
+// Copyright (c) 2023, Jerry Shih <jerry.shih@sifive.com>
+// Copyright 2024 Google LLC
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The generated code of this file depends on the following RISC-V extensions:
+// - RV64I
+// - RISC-V Vector ('V') with VLEN >= 128
+// - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
+
+#include <linux/linkage.h>
+
+.text
+.option arch, +zvkb
+
+#define KEYP a0
+#define INP a1
+#define OUTP a2
+#define LEN a3
+#define IVP a4
+
+#define CONSTS0 a5
+#define CONSTS1 a6
+#define CONSTS2 a7
+#define CONSTS3 t0
+#define TMP t1
+#define VL t2
+#define STRIDE t3
+#define NROUNDS t4
+#define KEY0 s0
+#define KEY1 s1
+#define KEY2 s2
+#define KEY3 s3
+#define KEY4 s4
+#define KEY5 s5
+#define KEY6 s6
+#define KEY7 s7
+#define COUNTER s8
+#define NONCE0 s9
+#define NONCE1 s10
+#define NONCE2 s11
+
+.macro chacha_round a0, b0, c0, d0, a1, b1, c1, d1, \
+ a2, b2, c2, d2, a3, b3, c3, d3
+ // a += b; d ^= a; d = rol(d, 16);
+ vadd.vv \a0, \a0, \b0
+ vadd.vv \a1, \a1, \b1
+ vadd.vv \a2, \a2, \b2
+ vadd.vv \a3, \a3, \b3
+ vxor.vv \d0, \d0, \a0
+ vxor.vv \d1, \d1, \a1
+ vxor.vv \d2, \d2, \a2
+ vxor.vv \d3, \d3, \a3
+ vror.vi \d0, \d0, 32 - 16
+ vror.vi \d1, \d1, 32 - 16
+ vror.vi \d2, \d2, 32 - 16
+ vror.vi \d3, \d3, 32 - 16
+
+ // c += d; b ^= c; b = rol(b, 12);
+ vadd.vv \c0, \c0, \d0
+ vadd.vv \c1, \c1, \d1
+ vadd.vv \c2, \c2, \d2
+ vadd.vv \c3, \c3, \d3
+ vxor.vv \b0, \b0, \c0
+ vxor.vv \b1, \b1, \c1
+ vxor.vv \b2, \b2, \c2
+ vxor.vv \b3, \b3, \c3
+ vror.vi \b0, \b0, 32 - 12
+ vror.vi \b1, \b1, 32 - 12
+ vror.vi \b2, \b2, 32 - 12
+ vror.vi \b3, \b3, 32 - 12
+
+ // a += b; d ^= a; d = rol(d, 8);
+ vadd.vv \a0, \a0, \b0
+ vadd.vv \a1, \a1, \b1
+ vadd.vv \a2, \a2, \b2
+ vadd.vv \a3, \a3, \b3
+ vxor.vv \d0, \d0, \a0
+ vxor.vv \d1, \d1, \a1
+ vxor.vv \d2, \d2, \a2
+ vxor.vv \d3, \d3, \a3
+ vror.vi \d0, \d0, 32 - 8
+ vror.vi \d1, \d1, 32 - 8
+ vror.vi \d2, \d2, 32 - 8
+ vror.vi \d3, \d3, 32 - 8
+
+ // c += d; b ^= c; b = rol(b, 7);
+ vadd.vv \c0, \c0, \d0
+ vadd.vv \c1, \c1, \d1
+ vadd.vv \c2, \c2, \d2
+ vadd.vv \c3, \c3, \d3
+ vxor.vv \b0, \b0, \c0
+ vxor.vv \b1, \b1, \c1
+ vxor.vv \b2, \b2, \c2
+ vxor.vv \b3, \b3, \c3
+ vror.vi \b0, \b0, 32 - 7
+ vror.vi \b1, \b1, 32 - 7
+ vror.vi \b2, \b2, 32 - 7
+ vror.vi \b3, \b3, 32 - 7
+.endm
+
+// void chacha20_zvkb(const u32 key[8], const u8 *in, u8 *out, size_t len,
+// const u32 iv[4]);
+//
+// |len| must be nonzero and a multiple of 64 (CHACHA_BLOCK_SIZE).
+// The counter is treated as 32-bit, following the RFC7539 convention.
+SYM_FUNC_START(chacha20_zvkb)
+ srli LEN, LEN, 6 // Bytes to blocks
+
+ addi sp, sp, -96
+ sd s0, 0(sp)
+ sd s1, 8(sp)
+ sd s2, 16(sp)
+ sd s3, 24(sp)
+ sd s4, 32(sp)
+ sd s5, 40(sp)
+ sd s6, 48(sp)
+ sd s7, 56(sp)
+ sd s8, 64(sp)
+ sd s9, 72(sp)
+ sd s10, 80(sp)
+ sd s11, 88(sp)
+
+ li STRIDE, 64
+
+ // Set up the initial state matrix in scalar registers.
+ li CONSTS0, 0x61707865 // "expa" little endian
+ li CONSTS1, 0x3320646e // "nd 3" little endian
+ li CONSTS2, 0x79622d32 // "2-by" little endian
+ li CONSTS3, 0x6b206574 // "te k" little endian
+ lw KEY0, 0(KEYP)
+ lw KEY1, 4(KEYP)
+ lw KEY2, 8(KEYP)
+ lw KEY3, 12(KEYP)
+ lw KEY4, 16(KEYP)
+ lw KEY5, 20(KEYP)
+ lw KEY6, 24(KEYP)
+ lw KEY7, 28(KEYP)
+ lw COUNTER, 0(IVP)
+ lw NONCE0, 4(IVP)
+ lw NONCE1, 8(IVP)
+ lw NONCE2, 12(IVP)
+
+.Lblock_loop:
+ // Set vl to the number of blocks to process in this iteration.
+ vsetvli VL, LEN, e32, m1, ta, ma
+
+ // Set up the initial state matrix for the next VL blocks in v0-v15.
+ // v{i} holds the i'th 32-bit word of the state matrix for all blocks.
+ // Note that only the counter word, at index 12, differs across blocks.
+ vmv.v.x v0, CONSTS0
+ vmv.v.x v1, CONSTS1
+ vmv.v.x v2, CONSTS2
+ vmv.v.x v3, CONSTS3
+ vmv.v.x v4, KEY0
+ vmv.v.x v5, KEY1
+ vmv.v.x v6, KEY2
+ vmv.v.x v7, KEY3
+ vmv.v.x v8, KEY4
+ vmv.v.x v9, KEY5
+ vmv.v.x v10, KEY6
+ vmv.v.x v11, KEY7
+ vid.v v12
+ vadd.vx v12, v12, COUNTER
+ vmv.v.x v13, NONCE0
+ vmv.v.x v14, NONCE1
+ vmv.v.x v15, NONCE2
+
+ // Load the first half of the input data for each block into v16-v23.
+ // v{16+i} holds the i'th 32-bit word for all blocks.
+ vlsseg8e32.v v16, (INP), STRIDE
+
+ li NROUNDS, 20
+.Lnext_doubleround:
+ addi NROUNDS, NROUNDS, -2
+ // column round
+ chacha_round v0, v4, v8, v12, v1, v5, v9, v13, \
+ v2, v6, v10, v14, v3, v7, v11, v15
+ // diagonal round
+ chacha_round v0, v5, v10, v15, v1, v6, v11, v12, \
+ v2, v7, v8, v13, v3, v4, v9, v14
+ bnez NROUNDS, .Lnext_doubleround
+
+ // Load the second half of the input data for each block into v24-v31.
+ // v{24+i} holds the {8+i}'th 32-bit word for all blocks.
+ addi TMP, INP, 32
+ vlsseg8e32.v v24, (TMP), STRIDE
+
+ // Finalize the first half of the keystream for each block.
+ vadd.vx v0, v0, CONSTS0
+ vadd.vx v1, v1, CONSTS1
+ vadd.vx v2, v2, CONSTS2
+ vadd.vx v3, v3, CONSTS3
+ vadd.vx v4, v4, KEY0
+ vadd.vx v5, v5, KEY1
+ vadd.vx v6, v6, KEY2
+ vadd.vx v7, v7, KEY3
+
+ // Encrypt/decrypt the first half of the data for each block.
+ vxor.vv v16, v16, v0
+ vxor.vv v17, v17, v1
+ vxor.vv v18, v18, v2
+ vxor.vv v19, v19, v3
+ vxor.vv v20, v20, v4
+ vxor.vv v21, v21, v5
+ vxor.vv v22, v22, v6
+ vxor.vv v23, v23, v7
+
+ // Store the first half of the output data for each block.
+ vssseg8e32.v v16, (OUTP), STRIDE
+
+ // Finalize the second half of the keystream for each block.
+ vadd.vx v8, v8, KEY4
+ vadd.vx v9, v9, KEY5
+ vadd.vx v10, v10, KEY6
+ vadd.vx v11, v11, KEY7
+ vid.v v0
+ vadd.vx v12, v12, COUNTER
+ vadd.vx v13, v13, NONCE0
+ vadd.vx v14, v14, NONCE1
+ vadd.vx v15, v15, NONCE2
+ vadd.vv v12, v12, v0
+
+ // Encrypt/decrypt the second half of the data for each block.
+ vxor.vv v24, v24, v8
+ vxor.vv v25, v25, v9
+ vxor.vv v26, v26, v10
+ vxor.vv v27, v27, v11
+ vxor.vv v29, v29, v13
+ vxor.vv v28, v28, v12
+ vxor.vv v30, v30, v14
+ vxor.vv v31, v31, v15
+
+ // Store the second half of the output data for each block.
+ addi TMP, OUTP, 32
+ vssseg8e32.v v24, (TMP), STRIDE
+
+ // Update the counter, the remaining number of blocks, and the input and
+ // output pointers according to the number of blocks processed (VL).
+ add COUNTER, COUNTER, VL
+ sub LEN, LEN, VL
+ slli TMP, VL, 6
+ add OUTP, OUTP, TMP
+ add INP, INP, TMP
+ bnez LEN, .Lblock_loop
+
+ ld s0, 0(sp)
+ ld s1, 8(sp)
+ ld s2, 16(sp)
+ ld s3, 24(sp)
+ ld s4, 32(sp)
+ ld s5, 40(sp)
+ ld s6, 48(sp)
+ ld s7, 56(sp)
+ ld s8, 64(sp)
+ ld s9, 72(sp)
+ ld s10, 80(sp)
+ ld s11, 88(sp)
+ addi sp, sp, 96
+ ret
+SYM_FUNC_END(chacha20_zvkb)
diff --git a/arch/riscv/crypto/ghash-riscv64-glue.c b/arch/riscv/crypto/ghash-riscv64-glue.c
new file mode 100644
index 00000000000000..312e7891fd0a36
--- /dev/null
+++ b/arch/riscv/crypto/ghash-riscv64-glue.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * GHASH using the RISC-V vector crypto extensions
+ *
+ * Copyright (C) 2023 VRULL GmbH
+ * Author: Heiko Stuebner <heiko.stuebner@vrull.eu>
+ *
+ * Copyright (C) 2023 SiFive, Inc.
+ * Author: Jerry Shih <jerry.shih@sifive.com>
+ */
+
+#include <asm/simd.h>
+#include <asm/vector.h>
+#include <crypto/ghash.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
+#include <linux/linkage.h>
+#include <linux/module.h>
+
+asmlinkage void ghash_zvkg(be128 *accumulator, const be128 *key, const u8 *data,
+ size_t len);
+
+struct riscv64_ghash_tfm_ctx {
+ be128 key;
+};
+
+struct riscv64_ghash_desc_ctx {
+ be128 accumulator;
+ u8 buffer[GHASH_BLOCK_SIZE];
+ u32 bytes;
+};
+
+static int riscv64_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct riscv64_ghash_tfm_ctx *tctx = crypto_shash_ctx(tfm);
+
+ if (keylen != GHASH_BLOCK_SIZE)
+ return -EINVAL;
+
+ memcpy(&tctx->key, key, GHASH_BLOCK_SIZE);
+
+ return 0;
+}
+
+static int riscv64_ghash_init(struct shash_desc *desc)
+{
+ struct riscv64_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ *dctx = (struct riscv64_ghash_desc_ctx){};
+
+ return 0;
+}
+
+static inline void
+riscv64_ghash_blocks(const struct riscv64_ghash_tfm_ctx *tctx,
+ struct riscv64_ghash_desc_ctx *dctx,
+ const u8 *src, size_t srclen)
+{
+ /* The srclen is nonzero and a multiple of 16. */
+ if (crypto_simd_usable()) {
+ kernel_vector_begin();
+ ghash_zvkg(&dctx->accumulator, &tctx->key, src, srclen);
+ kernel_vector_end();
+ } else {
+ do {
+ crypto_xor((u8 *)&dctx->accumulator, src,
+ GHASH_BLOCK_SIZE);
+ gf128mul_lle(&dctx->accumulator, &tctx->key);
+ src += GHASH_BLOCK_SIZE;
+ srclen -= GHASH_BLOCK_SIZE;
+ } while (srclen);
+ }
+}
+
+static int riscv64_ghash_update(struct shash_desc *desc, const u8 *src,
+ unsigned int srclen)
+{
+ const struct riscv64_ghash_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct riscv64_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+ unsigned int len;
+
+ if (dctx->bytes) {
+ if (dctx->bytes + srclen < GHASH_BLOCK_SIZE) {
+ memcpy(dctx->buffer + dctx->bytes, src, srclen);
+ dctx->bytes += srclen;
+ return 0;
+ }
+ memcpy(dctx->buffer + dctx->bytes, src,
+ GHASH_BLOCK_SIZE - dctx->bytes);
+ riscv64_ghash_blocks(tctx, dctx, dctx->buffer,
+ GHASH_BLOCK_SIZE);
+ src += GHASH_BLOCK_SIZE - dctx->bytes;
+ srclen -= GHASH_BLOCK_SIZE - dctx->bytes;
+ dctx->bytes = 0;
+ }
+
+ len = round_down(srclen, GHASH_BLOCK_SIZE);
+ if (len) {
+ riscv64_ghash_blocks(tctx, dctx, src, len);
+ src += len;
+ srclen -= len;
+ }
+
+ if (srclen) {
+ memcpy(dctx->buffer, src, srclen);
+ dctx->bytes = srclen;
+ }
+
+ return 0;
+}
+
+static int riscv64_ghash_final(struct shash_desc *desc, u8 *out)
+{
+ const struct riscv64_ghash_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct riscv64_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+ int i;
+
+ if (dctx->bytes) {
+ for (i = dctx->bytes; i < GHASH_BLOCK_SIZE; i++)
+ dctx->buffer[i] = 0;
+
+ riscv64_ghash_blocks(tctx, dctx, dctx->buffer,
+ GHASH_BLOCK_SIZE);
+ }
+
+ memcpy(out, &dctx->accumulator, GHASH_DIGEST_SIZE);
+ return 0;
+}
+
+static struct shash_alg riscv64_ghash_alg = {
+ .init = riscv64_ghash_init,
+ .update = riscv64_ghash_update,
+ .final = riscv64_ghash_final,
+ .setkey = riscv64_ghash_setkey,
+ .descsize = sizeof(struct riscv64_ghash_desc_ctx),
+ .digestsize = GHASH_DIGEST_SIZE,
+ .base = {
+ .cra_blocksize = GHASH_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct riscv64_ghash_tfm_ctx),
+ .cra_priority = 300,
+ .cra_name = "ghash",
+ .cra_driver_name = "ghash-riscv64-zvkg",
+ .cra_module = THIS_MODULE,
+ },
+};
+
+static int __init riscv64_ghash_mod_init(void)
+{
+ if (riscv_isa_extension_available(NULL, ZVKG) &&
+ riscv_vector_vlen() >= 128)
+ return crypto_register_shash(&riscv64_ghash_alg);
+
+ return -ENODEV;
+}
+
+static void __exit riscv64_ghash_mod_exit(void)
+{
+ crypto_unregister_shash(&riscv64_ghash_alg);
+}
+
+module_init(riscv64_ghash_mod_init);
+module_exit(riscv64_ghash_mod_exit);
+
+MODULE_DESCRIPTION("GHASH (RISC-V accelerated)");
+MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@vrull.eu>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CRYPTO("ghash");
diff --git a/arch/riscv/crypto/ghash-riscv64-zvkg.S b/arch/riscv/crypto/ghash-riscv64-zvkg.S
new file mode 100644
index 00000000000000..f2b43fb4d434fc
--- /dev/null
+++ b/arch/riscv/crypto/ghash-riscv64-zvkg.S
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */
+//
+// This file is dual-licensed, meaning that you can use it under your
+// choice of either of the following two licenses:
+//
+// Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
+//
+// Licensed under the Apache License 2.0 (the "License"). You can obtain
+// a copy in the file LICENSE in the source distribution or at
+// https://www.openssl.org/source/license.html
+//
+// or
+//
+// Copyright (c) 2023, Christoph Müllner <christoph.muellner@vrull.eu>
+// Copyright (c) 2023, Jerry Shih <jerry.shih@sifive.com>
+// Copyright 2024 Google LLC
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The generated code of this file depends on the following RISC-V extensions:
+// - RV64I
+// - RISC-V Vector ('V') with VLEN >= 128
+// - RISC-V Vector GCM/GMAC extension ('Zvkg')
+
+#include <linux/linkage.h>
+
+.text
+.option arch, +zvkg
+
+#define ACCUMULATOR a0
+#define KEY a1
+#define DATA a2
+#define LEN a3
+
+// void ghash_zvkg(be128 *accumulator, const be128 *key, const u8 *data,
+// size_t len);
+//
+// |len| must be nonzero and a multiple of 16 (GHASH_BLOCK_SIZE).
+SYM_FUNC_START(ghash_zvkg)
+ vsetivli zero, 4, e32, m1, ta, ma
+ vle32.v v1, (ACCUMULATOR)
+ vle32.v v2, (KEY)
+.Lnext_block:
+ vle32.v v3, (DATA)
+ vghsh.vv v1, v2, v3
+ addi DATA, DATA, 16
+ addi LEN, LEN, -16
+ bnez LEN, .Lnext_block
+
+ vse32.v v1, (ACCUMULATOR)
+ ret
+SYM_FUNC_END(ghash_zvkg)
diff --git a/arch/riscv/crypto/sha256-riscv64-glue.c b/arch/riscv/crypto/sha256-riscv64-glue.c
new file mode 100644
index 00000000000000..71e051e40a64f4
--- /dev/null
+++ b/arch/riscv/crypto/sha256-riscv64-glue.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SHA-256 and SHA-224 using the RISC-V vector crypto extensions
+ *
+ * Copyright (C) 2022 VRULL GmbH
+ * Author: Heiko Stuebner <heiko.stuebner@vrull.eu>
+ *
+ * Copyright (C) 2023 SiFive, Inc.
+ * Author: Jerry Shih <jerry.shih@sifive.com>
+ */
+
+#include <asm/simd.h>
+#include <asm/vector.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
+#include <crypto/sha256_base.h>
+#include <linux/linkage.h>
+#include <linux/module.h>
+
+/*
+ * Note: the asm function only uses the 'state' field of struct sha256_state.
+ * It is assumed to be the first field.
+ */
+asmlinkage void sha256_transform_zvknha_or_zvknhb_zvkb(
+ struct sha256_state *state, const u8 *data, int num_blocks);
+
+static int riscv64_sha256_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ /*
+ * Ensure struct sha256_state begins directly with the SHA-256
+ * 256-bit internal state, as this is what the asm function expects.
+ */
+ BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0);
+
+ if (crypto_simd_usable()) {
+ kernel_vector_begin();
+ sha256_base_do_update(desc, data, len,
+ sha256_transform_zvknha_or_zvknhb_zvkb);
+ kernel_vector_end();
+ } else {
+ crypto_sha256_update(desc, data, len);
+ }
+ return 0;
+}
+
+static int riscv64_sha256_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ if (crypto_simd_usable()) {
+ kernel_vector_begin();
+ if (len)
+ sha256_base_do_update(
+ desc, data, len,
+ sha256_transform_zvknha_or_zvknhb_zvkb);
+ sha256_base_do_finalize(
+ desc, sha256_transform_zvknha_or_zvknhb_zvkb);
+ kernel_vector_end();
+
+ return sha256_base_finish(desc, out);
+ }
+
+ return crypto_sha256_finup(desc, data, len, out);
+}
+
+static int riscv64_sha256_final(struct shash_desc *desc, u8 *out)
+{
+ return riscv64_sha256_finup(desc, NULL, 0, out);
+}
+
+static int riscv64_sha256_digest(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ return sha256_base_init(desc) ?:
+ riscv64_sha256_finup(desc, data, len, out);
+}
+
+static struct shash_alg riscv64_sha256_algs[] = {
+ {
+ .init = sha256_base_init,
+ .update = riscv64_sha256_update,
+ .final = riscv64_sha256_final,
+ .finup = riscv64_sha256_finup,
+ .digest = riscv64_sha256_digest,
+ .descsize = sizeof(struct sha256_state),
+ .digestsize = SHA256_DIGEST_SIZE,
+ .base = {
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_priority = 300,
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-riscv64-zvknha_or_zvknhb-zvkb",
+ .cra_module = THIS_MODULE,
+ },
+ }, {
+ .init = sha224_base_init,
+ .update = riscv64_sha256_update,
+ .final = riscv64_sha256_final,
+ .finup = riscv64_sha256_finup,
+ .descsize = sizeof(struct sha256_state),
+ .digestsize = SHA224_DIGEST_SIZE,
+ .base = {
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_priority = 300,
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-riscv64-zvknha_or_zvknhb-zvkb",
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int __init riscv64_sha256_mod_init(void)
+{
+ /* Both zvknha and zvknhb provide the SHA-256 instructions. */
+ if ((riscv_isa_extension_available(NULL, ZVKNHA) ||
+ riscv_isa_extension_available(NULL, ZVKNHB)) &&
+ riscv_isa_extension_available(NULL, ZVKB) &&
+ riscv_vector_vlen() >= 128)
+ return crypto_register_shashes(riscv64_sha256_algs,
+ ARRAY_SIZE(riscv64_sha256_algs));
+
+ return -ENODEV;
+}
+
+static void __exit riscv64_sha256_mod_exit(void)
+{
+ crypto_unregister_shashes(riscv64_sha256_algs,
+ ARRAY_SIZE(riscv64_sha256_algs));
+}
+
+module_init(riscv64_sha256_mod_init);
+module_exit(riscv64_sha256_mod_exit);
+
+MODULE_DESCRIPTION("SHA-256 (RISC-V accelerated)");
+MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@vrull.eu>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CRYPTO("sha256");
+MODULE_ALIAS_CRYPTO("sha224");
diff --git a/arch/riscv/crypto/sha256-riscv64-zvknha_or_zvknhb-zvkb.S b/arch/riscv/crypto/sha256-riscv64-zvknha_or_zvknhb-zvkb.S
new file mode 100644
index 00000000000000..8ebcc17de4dce3
--- /dev/null
+++ b/arch/riscv/crypto/sha256-riscv64-zvknha_or_zvknhb-zvkb.S
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */
+//
+// This file is dual-licensed, meaning that you can use it under your
+// choice of either of the following two licenses:
+//
+// Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
+//
+// Licensed under the Apache License 2.0 (the "License"). You can obtain
+// a copy in the file LICENSE in the source distribution or at
+// https://www.openssl.org/source/license.html
+//
+// or
+//
+// Copyright (c) 2023, Christoph Müllner <christoph.muellner@vrull.eu>
+// Copyright (c) 2023, Phoebe Chen <phoebe.chen@sifive.com>
+// Copyright 2024 Google LLC
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The generated code of this file depends on the following RISC-V extensions:
+// - RV64I
+// - RISC-V Vector ('V') with VLEN >= 128
+// - RISC-V Vector SHA-2 Secure Hash extension ('Zvknha' or 'Zvknhb')
+// - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
+
+#include <linux/cfi_types.h>
+
+.text
+.option arch, +zvknha, +zvkb
+
+#define STATEP a0
+#define DATA a1
+#define NUM_BLOCKS a2
+
+#define STATEP_C a3
+
+#define MASK v0
+#define INDICES v1
+#define W0 v2
+#define W1 v3
+#define W2 v4
+#define W3 v5
+#define VTMP v6
+#define FEBA v7
+#define HGDC v8
+#define K0 v10
+#define K1 v11
+#define K2 v12
+#define K3 v13
+#define K4 v14
+#define K5 v15
+#define K6 v16
+#define K7 v17
+#define K8 v18
+#define K9 v19
+#define K10 v20
+#define K11 v21
+#define K12 v22
+#define K13 v23
+#define K14 v24
+#define K15 v25
+#define PREV_FEBA v26
+#define PREV_HGDC v27
+
+// Do 4 rounds of SHA-256. w0 contains the current 4 message schedule words.
+//
+// If not all the message schedule words have been computed yet, then this also
+// computes 4 more message schedule words. w1-w3 contain the next 3 groups of 4
+// message schedule words; this macro computes the group after w3 and writes it
+// to w0. This means that the next (w0, w1, w2, w3) is the current (w1, w2, w3,
+// w0), so the caller must cycle through the registers accordingly.
+.macro sha256_4rounds last, k, w0, w1, w2, w3
+ vadd.vv VTMP, \k, \w0
+ vsha2cl.vv HGDC, FEBA, VTMP
+ vsha2ch.vv FEBA, HGDC, VTMP
+.if !\last
+ vmerge.vvm VTMP, \w2, \w1, MASK
+ vsha2ms.vv \w0, VTMP, \w3
+.endif
+.endm
+
+.macro sha256_16rounds last, k0, k1, k2, k3
+ sha256_4rounds \last, \k0, W0, W1, W2, W3
+ sha256_4rounds \last, \k1, W1, W2, W3, W0
+ sha256_4rounds \last, \k2, W2, W3, W0, W1
+ sha256_4rounds \last, \k3, W3, W0, W1, W2
+.endm
+
+// void sha256_transform_zvknha_or_zvknhb_zvkb(u32 state[8], const u8 *data,
+// int num_blocks);
+SYM_TYPED_FUNC_START(sha256_transform_zvknha_or_zvknhb_zvkb)
+
+ // Load the round constants into K0-K15.
+ vsetivli zero, 4, e32, m1, ta, ma
+ la t0, K256
+ vle32.v K0, (t0)
+ addi t0, t0, 16
+ vle32.v K1, (t0)
+ addi t0, t0, 16
+ vle32.v K2, (t0)
+ addi t0, t0, 16
+ vle32.v K3, (t0)
+ addi t0, t0, 16
+ vle32.v K4, (t0)
+ addi t0, t0, 16
+ vle32.v K5, (t0)
+ addi t0, t0, 16
+ vle32.v K6, (t0)
+ addi t0, t0, 16
+ vle32.v K7, (t0)
+ addi t0, t0, 16
+ vle32.v K8, (t0)
+ addi t0, t0, 16
+ vle32.v K9, (t0)
+ addi t0, t0, 16
+ vle32.v K10, (t0)
+ addi t0, t0, 16
+ vle32.v K11, (t0)
+ addi t0, t0, 16
+ vle32.v K12, (t0)
+ addi t0, t0, 16
+ vle32.v K13, (t0)
+ addi t0, t0, 16
+ vle32.v K14, (t0)
+ addi t0, t0, 16
+ vle32.v K15, (t0)
+
+ // Setup mask for the vmerge to replace the first word (idx==0) in
+ // message scheduling. There are 4 words, so an 8-bit mask suffices.
+ vsetivli zero, 1, e8, m1, ta, ma
+ vmv.v.i MASK, 0x01
+
+ // Load the state. The state is stored as {a,b,c,d,e,f,g,h}, but we
+ // need {f,e,b,a},{h,g,d,c}. The dst vtype is e32m1 and the index vtype
+ // is e8mf4. We use index-load with the i8 indices {20, 16, 4, 0},
+ // loaded using the 32-bit little endian value 0x00041014.
+ li t0, 0x00041014
+ vsetivli zero, 1, e32, m1, ta, ma
+ vmv.v.x INDICES, t0
+ addi STATEP_C, STATEP, 8
+ vsetivli zero, 4, e32, m1, ta, ma
+ vluxei8.v FEBA, (STATEP), INDICES
+ vluxei8.v HGDC, (STATEP_C), INDICES
+
+.Lnext_block:
+ addi NUM_BLOCKS, NUM_BLOCKS, -1
+
+ // Save the previous state, as it's needed later.
+ vmv.v.v PREV_FEBA, FEBA
+ vmv.v.v PREV_HGDC, HGDC
+
+ // Load the next 512-bit message block and endian-swap each 32-bit word.
+ vle32.v W0, (DATA)
+ vrev8.v W0, W0
+ addi DATA, DATA, 16
+ vle32.v W1, (DATA)
+ vrev8.v W1, W1
+ addi DATA, DATA, 16
+ vle32.v W2, (DATA)
+ vrev8.v W2, W2
+ addi DATA, DATA, 16
+ vle32.v W3, (DATA)
+ vrev8.v W3, W3
+ addi DATA, DATA, 16
+
+ // Do the 64 rounds of SHA-256.
+ sha256_16rounds 0, K0, K1, K2, K3
+ sha256_16rounds 0, K4, K5, K6, K7
+ sha256_16rounds 0, K8, K9, K10, K11
+ sha256_16rounds 1, K12, K13, K14, K15
+
+ // Add the previous state.
+ vadd.vv FEBA, FEBA, PREV_FEBA
+ vadd.vv HGDC, HGDC, PREV_HGDC
+
+ // Repeat if more blocks remain.
+ bnez NUM_BLOCKS, .Lnext_block
+
+ // Store the new state and return.
+ vsuxei8.v FEBA, (STATEP), INDICES
+ vsuxei8.v HGDC, (STATEP_C), INDICES
+ ret
+SYM_FUNC_END(sha256_transform_zvknha_or_zvknhb_zvkb)
+
+.section ".rodata"
+.p2align 2
+.type K256, @object
+K256:
+ .word 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5
+ .word 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5
+ .word 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3
+ .word 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174
+ .word 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc
+ .word 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da
+ .word 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7
+ .word 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967
+ .word 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13
+ .word 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85
+ .word 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3
+ .word 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070
+ .word 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5
+ .word 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3
+ .word 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208
+ .word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
+.size K256, . - K256
diff --git a/arch/riscv/crypto/sha512-riscv64-glue.c b/arch/riscv/crypto/sha512-riscv64-glue.c
new file mode 100644
index 00000000000000..43b56a08aeb51a
--- /dev/null
+++ b/arch/riscv/crypto/sha512-riscv64-glue.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SHA-512 and SHA-384 using the RISC-V vector crypto extensions
+ *
+ * Copyright (C) 2023 VRULL GmbH
+ * Author: Heiko Stuebner <heiko.stuebner@vrull.eu>
+ *
+ * Copyright (C) 2023 SiFive, Inc.
+ * Author: Jerry Shih <jerry.shih@sifive.com>
+ */
+
+#include <asm/simd.h>
+#include <asm/vector.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
+#include <crypto/sha512_base.h>
+#include <linux/linkage.h>
+#include <linux/module.h>
+
+/*
+ * Note: the asm function only uses the 'state' field of struct sha512_state.
+ * It is assumed to be the first field.
+ */
+asmlinkage void sha512_transform_zvknhb_zvkb(
+ struct sha512_state *state, const u8 *data, int num_blocks);
+
+static int riscv64_sha512_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ /*
+ * Ensure struct sha512_state begins directly with the SHA-512
+ * 512-bit internal state, as this is what the asm function expects.
+ */
+ BUILD_BUG_ON(offsetof(struct sha512_state, state) != 0);
+
+ if (crypto_simd_usable()) {
+ kernel_vector_begin();
+ sha512_base_do_update(desc, data, len,
+ sha512_transform_zvknhb_zvkb);
+ kernel_vector_end();
+ } else {
+ crypto_sha512_update(desc, data, len);
+ }
+ return 0;
+}
+
+static int riscv64_sha512_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ if (crypto_simd_usable()) {
+ kernel_vector_begin();
+ if (len)
+ sha512_base_do_update(desc, data, len,
+ sha512_transform_zvknhb_zvkb);
+ sha512_base_do_finalize(desc, sha512_transform_zvknhb_zvkb);
+ kernel_vector_end();
+
+ return sha512_base_finish(desc, out);
+ }
+
+ return crypto_sha512_finup(desc, data, len, out);
+}
+
+static int riscv64_sha512_final(struct shash_desc *desc, u8 *out)
+{
+ return riscv64_sha512_finup(desc, NULL, 0, out);
+}
+
+static int riscv64_sha512_digest(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ return sha512_base_init(desc) ?:
+ riscv64_sha512_finup(desc, data, len, out);
+}
+
+static struct shash_alg riscv64_sha512_algs[] = {
+ {
+ .init = sha512_base_init,
+ .update = riscv64_sha512_update,
+ .final = riscv64_sha512_final,
+ .finup = riscv64_sha512_finup,
+ .digest = riscv64_sha512_digest,
+ .descsize = sizeof(struct sha512_state),
+ .digestsize = SHA512_DIGEST_SIZE,
+ .base = {
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_priority = 300,
+ .cra_name = "sha512",
+ .cra_driver_name = "sha512-riscv64-zvknhb-zvkb",
+ .cra_module = THIS_MODULE,
+ },
+ }, {
+ .init = sha384_base_init,
+ .update = riscv64_sha512_update,
+ .final = riscv64_sha512_final,
+ .finup = riscv64_sha512_finup,
+ .descsize = sizeof(struct sha512_state),
+ .digestsize = SHA384_DIGEST_SIZE,
+ .base = {
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_priority = 300,
+ .cra_name = "sha384",
+ .cra_driver_name = "sha384-riscv64-zvknhb-zvkb",
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int __init riscv64_sha512_mod_init(void)
+{
+ if (riscv_isa_extension_available(NULL, ZVKNHB) &&
+ riscv_isa_extension_available(NULL, ZVKB) &&
+ riscv_vector_vlen() >= 128)
+ return crypto_register_shashes(riscv64_sha512_algs,
+ ARRAY_SIZE(riscv64_sha512_algs));
+
+ return -ENODEV;
+}
+
+static void __exit riscv64_sha512_mod_exit(void)
+{
+ crypto_unregister_shashes(riscv64_sha512_algs,
+ ARRAY_SIZE(riscv64_sha512_algs));
+}
+
+module_init(riscv64_sha512_mod_init);
+module_exit(riscv64_sha512_mod_exit);
+
+MODULE_DESCRIPTION("SHA-512 (RISC-V accelerated)");
+MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@vrull.eu>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CRYPTO("sha512");
+MODULE_ALIAS_CRYPTO("sha384");
diff --git a/arch/riscv/crypto/sha512-riscv64-zvknhb-zvkb.S b/arch/riscv/crypto/sha512-riscv64-zvknhb-zvkb.S
new file mode 100644
index 00000000000000..3a9ae210f91586
--- /dev/null
+++ b/arch/riscv/crypto/sha512-riscv64-zvknhb-zvkb.S
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */
+//
+// This file is dual-licensed, meaning that you can use it under your
+// choice of either of the following two licenses:
+//
+// Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
+//
+// Licensed under the Apache License 2.0 (the "License"). You can obtain
+// a copy in the file LICENSE in the source distribution or at
+// https://www.openssl.org/source/license.html
+//
+// or
+//
+// Copyright (c) 2023, Christoph Müllner <christoph.muellner@vrull.eu>
+// Copyright (c) 2023, Phoebe Chen <phoebe.chen@sifive.com>
+// Copyright 2024 Google LLC
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The generated code of this file depends on the following RISC-V extensions:
+// - RV64I
+// - RISC-V Vector ('V') with VLEN >= 128
+// - RISC-V Vector SHA-2 Secure Hash extension ('Zvknhb')
+// - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
+
+#include <linux/cfi_types.h>
+
+.text
+.option arch, +zvknhb, +zvkb
+
+#define STATEP a0
+#define DATA a1
+#define NUM_BLOCKS a2
+
+#define STATEP_C a3
+#define K a4
+
+#define MASK v0
+#define INDICES v1
+#define W0 v10 // LMUL=2
+#define W1 v12 // LMUL=2
+#define W2 v14 // LMUL=2
+#define W3 v16 // LMUL=2
+#define VTMP v20 // LMUL=2
+#define FEBA v22 // LMUL=2
+#define HGDC v24 // LMUL=2
+#define PREV_FEBA v26 // LMUL=2
+#define PREV_HGDC v28 // LMUL=2
+
+// Do 4 rounds of SHA-512. w0 contains the current 4 message schedule words.
+//
+// If not all the message schedule words have been computed yet, then this also
+// computes 4 more message schedule words. w1-w3 contain the next 3 groups of 4
+// message schedule words; this macro computes the group after w3 and writes it
+// to w0. This means that the next (w0, w1, w2, w3) is the current (w1, w2, w3,
+// w0), so the caller must cycle through the registers accordingly.
+.macro sha512_4rounds last, w0, w1, w2, w3
+ vle64.v VTMP, (K)
+ addi K, K, 32
+ vadd.vv VTMP, VTMP, \w0
+ vsha2cl.vv HGDC, FEBA, VTMP
+ vsha2ch.vv FEBA, HGDC, VTMP
+.if !\last
+ vmerge.vvm VTMP, \w2, \w1, MASK
+ vsha2ms.vv \w0, VTMP, \w3
+.endif
+.endm
+
+.macro sha512_16rounds last
+ sha512_4rounds \last, W0, W1, W2, W3
+ sha512_4rounds \last, W1, W2, W3, W0
+ sha512_4rounds \last, W2, W3, W0, W1
+ sha512_4rounds \last, W3, W0, W1, W2
+.endm
+
+// void sha512_transform_zvknhb_zvkb(u64 state[8], const u8 *data,
+// int num_blocks);
+SYM_TYPED_FUNC_START(sha512_transform_zvknhb_zvkb)
+
+ // Setup mask for the vmerge to replace the first word (idx==0) in
+ // message scheduling. There are 4 words, so an 8-bit mask suffices.
+ vsetivli zero, 1, e8, m1, ta, ma
+ vmv.v.i MASK, 0x01
+
+ // Load the state. The state is stored as {a,b,c,d,e,f,g,h}, but we
+ // need {f,e,b,a},{h,g,d,c}. The dst vtype is e64m2 and the index vtype
+ // is e8mf4. We use index-load with the i8 indices {40, 32, 8, 0},
+ // loaded using the 32-bit little endian value 0x00082028.
+ li t0, 0x00082028
+ vsetivli zero, 1, e32, m1, ta, ma
+ vmv.v.x INDICES, t0
+ addi STATEP_C, STATEP, 16
+ vsetivli zero, 4, e64, m2, ta, ma
+ vluxei8.v FEBA, (STATEP), INDICES
+ vluxei8.v HGDC, (STATEP_C), INDICES
+
+.Lnext_block:
+ la K, K512
+ addi NUM_BLOCKS, NUM_BLOCKS, -1
+
+ // Save the previous state, as it's needed later.
+ vmv.v.v PREV_FEBA, FEBA
+ vmv.v.v PREV_HGDC, HGDC
+
+ // Load the next 1024-bit message block and endian-swap each 64-bit word
+ vle64.v W0, (DATA)
+ vrev8.v W0, W0
+ addi DATA, DATA, 32
+ vle64.v W1, (DATA)
+ vrev8.v W1, W1
+ addi DATA, DATA, 32
+ vle64.v W2, (DATA)
+ vrev8.v W2, W2
+ addi DATA, DATA, 32
+ vle64.v W3, (DATA)
+ vrev8.v W3, W3
+ addi DATA, DATA, 32
+
+ // Do the 80 rounds of SHA-512.
+ sha512_16rounds 0
+ sha512_16rounds 0
+ sha512_16rounds 0
+ sha512_16rounds 0
+ sha512_16rounds 1
+
+ // Add the previous state.
+ vadd.vv FEBA, FEBA, PREV_FEBA
+ vadd.vv HGDC, HGDC, PREV_HGDC
+
+ // Repeat if more blocks remain.
+ bnez NUM_BLOCKS, .Lnext_block
+
+ // Store the new state and return.
+ vsuxei8.v FEBA, (STATEP), INDICES
+ vsuxei8.v HGDC, (STATEP_C), INDICES
+ ret
+SYM_FUNC_END(sha512_transform_zvknhb_zvkb)
+
+.section ".rodata"
+.p2align 3
+.type K512, @object
+K512:
+ .dword 0x428a2f98d728ae22, 0x7137449123ef65cd
+ .dword 0xb5c0fbcfec4d3b2f, 0xe9b5dba58189dbbc
+ .dword 0x3956c25bf348b538, 0x59f111f1b605d019
+ .dword 0x923f82a4af194f9b, 0xab1c5ed5da6d8118
+ .dword 0xd807aa98a3030242, 0x12835b0145706fbe
+ .dword 0x243185be4ee4b28c, 0x550c7dc3d5ffb4e2
+ .dword 0x72be5d74f27b896f, 0x80deb1fe3b1696b1
+ .dword 0x9bdc06a725c71235, 0xc19bf174cf692694
+ .dword 0xe49b69c19ef14ad2, 0xefbe4786384f25e3
+ .dword 0x0fc19dc68b8cd5b5, 0x240ca1cc77ac9c65
+ .dword 0x2de92c6f592b0275, 0x4a7484aa6ea6e483
+ .dword 0x5cb0a9dcbd41fbd4, 0x76f988da831153b5
+ .dword 0x983e5152ee66dfab, 0xa831c66d2db43210
+ .dword 0xb00327c898fb213f, 0xbf597fc7beef0ee4
+ .dword 0xc6e00bf33da88fc2, 0xd5a79147930aa725
+ .dword 0x06ca6351e003826f, 0x142929670a0e6e70
+ .dword 0x27b70a8546d22ffc, 0x2e1b21385c26c926
+ .dword 0x4d2c6dfc5ac42aed, 0x53380d139d95b3df
+ .dword 0x650a73548baf63de, 0x766a0abb3c77b2a8
+ .dword 0x81c2c92e47edaee6, 0x92722c851482353b
+ .dword 0xa2bfe8a14cf10364, 0xa81a664bbc423001
+ .dword 0xc24b8b70d0f89791, 0xc76c51a30654be30
+ .dword 0xd192e819d6ef5218, 0xd69906245565a910
+ .dword 0xf40e35855771202a, 0x106aa07032bbd1b8
+ .dword 0x19a4c116b8d2d0c8, 0x1e376c085141ab53
+ .dword 0x2748774cdf8eeb99, 0x34b0bcb5e19b48a8
+ .dword 0x391c0cb3c5c95a63, 0x4ed8aa4ae3418acb
+ .dword 0x5b9cca4f7763e373, 0x682e6ff3d6b2b8a3
+ .dword 0x748f82ee5defb2fc, 0x78a5636f43172f60
+ .dword 0x84c87814a1f0ab72, 0x8cc702081a6439ec
+ .dword 0x90befffa23631e28, 0xa4506cebde82bde9
+ .dword 0xbef9a3f7b2c67915, 0xc67178f2e372532b
+ .dword 0xca273eceea26619c, 0xd186b8c721c0c207
+ .dword 0xeada7dd6cde0eb1e, 0xf57d4f7fee6ed178
+ .dword 0x06f067aa72176fba, 0x0a637dc5a2c898a6
+ .dword 0x113f9804bef90dae, 0x1b710b35131c471b
+ .dword 0x28db77f523047d84, 0x32caab7b40c72493
+ .dword 0x3c9ebe0a15c9bebc, 0x431d67c49c100d4c
+ .dword 0x4cc5d4becb3e42b6, 0x597f299cfc657e2a
+ .dword 0x5fcb6fab3ad6faec, 0x6c44198c4a475817
+.size K512, . - K512
diff --git a/arch/riscv/crypto/sm3-riscv64-glue.c b/arch/riscv/crypto/sm3-riscv64-glue.c
new file mode 100644
index 00000000000000..e1737a970c7c91
--- /dev/null
+++ b/arch/riscv/crypto/sm3-riscv64-glue.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SM3 using the RISC-V vector crypto extensions
+ *
+ * Copyright (C) 2023 VRULL GmbH
+ * Author: Heiko Stuebner <heiko.stuebner@vrull.eu>
+ *
+ * Copyright (C) 2023 SiFive, Inc.
+ * Author: Jerry Shih <jerry.shih@sifive.com>
+ */
+
+#include <asm/simd.h>
+#include <asm/vector.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
+#include <crypto/sm3_base.h>
+#include <linux/linkage.h>
+#include <linux/module.h>
+
+/*
+ * Note: the asm function only uses the 'state' field of struct sm3_state.
+ * It is assumed to be the first field.
+ */
+asmlinkage void sm3_transform_zvksh_zvkb(
+ struct sm3_state *state, const u8 *data, int num_blocks);
+
+static int riscv64_sm3_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ /*
+ * Ensure struct sm3_state begins directly with the SM3
+ * 256-bit internal state, as this is what the asm function expects.
+ */
+ BUILD_BUG_ON(offsetof(struct sm3_state, state) != 0);
+
+ if (crypto_simd_usable()) {
+ kernel_vector_begin();
+ sm3_base_do_update(desc, data, len, sm3_transform_zvksh_zvkb);
+ kernel_vector_end();
+ } else {
+ sm3_update(shash_desc_ctx(desc), data, len);
+ }
+ return 0;
+}
+
+static int riscv64_sm3_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ struct sm3_state *ctx;
+
+ if (crypto_simd_usable()) {
+ kernel_vector_begin();
+ if (len)
+ sm3_base_do_update(desc, data, len,
+ sm3_transform_zvksh_zvkb);
+ sm3_base_do_finalize(desc, sm3_transform_zvksh_zvkb);
+ kernel_vector_end();
+
+ return sm3_base_finish(desc, out);
+ }
+
+ ctx = shash_desc_ctx(desc);
+ if (len)
+ sm3_update(ctx, data, len);
+ sm3_final(ctx, out);
+
+ return 0;
+}
+
+static int riscv64_sm3_final(struct shash_desc *desc, u8 *out)
+{
+ return riscv64_sm3_finup(desc, NULL, 0, out);
+}
+
+static struct shash_alg riscv64_sm3_alg = {
+ .init = sm3_base_init,
+ .update = riscv64_sm3_update,
+ .final = riscv64_sm3_final,
+ .finup = riscv64_sm3_finup,
+ .descsize = sizeof(struct sm3_state),
+ .digestsize = SM3_DIGEST_SIZE,
+ .base = {
+ .cra_blocksize = SM3_BLOCK_SIZE,
+ .cra_priority = 300,
+ .cra_name = "sm3",
+ .cra_driver_name = "sm3-riscv64-zvksh-zvkb",
+ .cra_module = THIS_MODULE,
+ },
+};
+
+static int __init riscv64_sm3_mod_init(void)
+{
+ if (riscv_isa_extension_available(NULL, ZVKSH) &&
+ riscv_isa_extension_available(NULL, ZVKB) &&
+ riscv_vector_vlen() >= 128)
+ return crypto_register_shash(&riscv64_sm3_alg);
+
+ return -ENODEV;
+}
+
+static void __exit riscv64_sm3_mod_exit(void)
+{
+ crypto_unregister_shash(&riscv64_sm3_alg);
+}
+
+module_init(riscv64_sm3_mod_init);
+module_exit(riscv64_sm3_mod_exit);
+
+MODULE_DESCRIPTION("SM3 (RISC-V accelerated)");
+MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@vrull.eu>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CRYPTO("sm3");
diff --git a/arch/riscv/crypto/sm3-riscv64-zvksh-zvkb.S b/arch/riscv/crypto/sm3-riscv64-zvksh-zvkb.S
new file mode 100644
index 00000000000000..a2b65d961c04a2
--- /dev/null
+++ b/arch/riscv/crypto/sm3-riscv64-zvksh-zvkb.S
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */
+//
+// This file is dual-licensed, meaning that you can use it under your
+// choice of either of the following two licenses:
+//
+// Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
+//
+// Licensed under the Apache License 2.0 (the "License"). You can obtain
+// a copy in the file LICENSE in the source distribution or at
+// https://www.openssl.org/source/license.html
+//
+// or
+//
+// Copyright (c) 2023, Christoph Müllner <christoph.muellner@vrull.eu>
+// Copyright (c) 2023, Jerry Shih <jerry.shih@sifive.com>
+// Copyright 2024 Google LLC
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The generated code of this file depends on the following RISC-V extensions:
+// - RV64I
+// - RISC-V Vector ('V') with VLEN >= 128
+// - RISC-V Vector SM3 Secure Hash extension ('Zvksh')
+// - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
+
+#include <linux/cfi_types.h>
+
+.text
+.option arch, +zvksh, +zvkb
+
+#define STATEP a0
+#define DATA a1
+#define NUM_BLOCKS a2
+
+#define STATE v0 // LMUL=2
+#define PREV_STATE v2 // LMUL=2
+#define W0 v4 // LMUL=2
+#define W1 v6 // LMUL=2
+#define VTMP v8 // LMUL=2
+
+.macro sm3_8rounds i, w0, w1
+ // Do 4 rounds using W_{0+i}..W_{7+i}.
+ vsm3c.vi STATE, \w0, \i + 0
+ vslidedown.vi VTMP, \w0, 2
+ vsm3c.vi STATE, VTMP, \i + 1
+
+ // Compute W_{4+i}..W_{11+i}.
+ vslidedown.vi VTMP, \w0, 4
+ vslideup.vi VTMP, \w1, 4
+
+ // Do 4 rounds using W_{4+i}..W_{11+i}.
+ vsm3c.vi STATE, VTMP, \i + 2
+ vslidedown.vi VTMP, VTMP, 2
+ vsm3c.vi STATE, VTMP, \i + 3
+
+.if \i < 28
+ // Compute W_{16+i}..W_{23+i}.
+ vsm3me.vv \w0, \w1, \w0
+.endif
+ // For the next 8 rounds, w0 and w1 are swapped.
+.endm
+
+// void sm3_transform_zvksh_zvkb(u32 state[8], const u8 *data, int num_blocks);
+SYM_TYPED_FUNC_START(sm3_transform_zvksh_zvkb)
+
+ // Load the state and endian-swap each 32-bit word.
+ vsetivli zero, 8, e32, m2, ta, ma
+ vle32.v STATE, (STATEP)
+ vrev8.v STATE, STATE
+
+.Lnext_block:
+ addi NUM_BLOCKS, NUM_BLOCKS, -1
+
+ // Save the previous state, as it's needed later.
+ vmv.v.v PREV_STATE, STATE
+
+ // Load the next 512-bit message block into W0-W1.
+ vle32.v W0, (DATA)
+ addi DATA, DATA, 32
+ vle32.v W1, (DATA)
+ addi DATA, DATA, 32
+
+ // Do the 64 rounds of SM3.
+ sm3_8rounds 0, W0, W1
+ sm3_8rounds 4, W1, W0
+ sm3_8rounds 8, W0, W1
+ sm3_8rounds 12, W1, W0
+ sm3_8rounds 16, W0, W1
+ sm3_8rounds 20, W1, W0
+ sm3_8rounds 24, W0, W1
+ sm3_8rounds 28, W1, W0
+
+ // XOR in the previous state.
+ vxor.vv STATE, STATE, PREV_STATE
+
+ // Repeat if more blocks remain.
+ bnez NUM_BLOCKS, .Lnext_block
+
+ // Store the new state and return.
+ vrev8.v STATE, STATE
+ vse32.v STATE, (STATEP)
+ ret
+SYM_FUNC_END(sm3_transform_zvksh_zvkb)
diff --git a/arch/riscv/crypto/sm4-riscv64-glue.c b/arch/riscv/crypto/sm4-riscv64-glue.c
new file mode 100644
index 00000000000000..47fb84ebe577d5
--- /dev/null
+++ b/arch/riscv/crypto/sm4-riscv64-glue.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SM4 using the RISC-V vector crypto extensions
+ *
+ * Copyright (C) 2023 VRULL GmbH
+ * Author: Heiko Stuebner <heiko.stuebner@vrull.eu>
+ *
+ * Copyright (C) 2023 SiFive, Inc.
+ * Author: Jerry Shih <jerry.shih@sifive.com>
+ */
+
+#include <asm/simd.h>
+#include <asm/vector.h>
+#include <crypto/internal/cipher.h>
+#include <crypto/internal/simd.h>
+#include <crypto/sm4.h>
+#include <linux/linkage.h>
+#include <linux/module.h>
+
+asmlinkage void sm4_expandkey_zvksed_zvkb(const u8 user_key[SM4_KEY_SIZE],
+ u32 rkey_enc[SM4_RKEY_WORDS],
+ u32 rkey_dec[SM4_RKEY_WORDS]);
+asmlinkage void sm4_crypt_zvksed_zvkb(const u32 rkey[SM4_RKEY_WORDS],
+ const u8 in[SM4_BLOCK_SIZE],
+ u8 out[SM4_BLOCK_SIZE]);
+
+static int riscv64_sm4_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (crypto_simd_usable()) {
+ if (keylen != SM4_KEY_SIZE)
+ return -EINVAL;
+ kernel_vector_begin();
+ sm4_expandkey_zvksed_zvkb(key, ctx->rkey_enc, ctx->rkey_dec);
+ kernel_vector_end();
+ return 0;
+ }
+ return sm4_expandkey(ctx, key, keylen);
+}
+
+static void riscv64_sm4_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+ const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (crypto_simd_usable()) {
+ kernel_vector_begin();
+ sm4_crypt_zvksed_zvkb(ctx->rkey_enc, src, dst);
+ kernel_vector_end();
+ } else {
+ sm4_crypt_block(ctx->rkey_enc, dst, src);
+ }
+}
+
+static void riscv64_sm4_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+ const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (crypto_simd_usable()) {
+ kernel_vector_begin();
+ sm4_crypt_zvksed_zvkb(ctx->rkey_dec, src, dst);
+ kernel_vector_end();
+ } else {
+ sm4_crypt_block(ctx->rkey_dec, dst, src);
+ }
+}
+
+static struct crypto_alg riscv64_sm4_alg = {
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = SM4_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sm4_ctx),
+ .cra_priority = 300,
+ .cra_name = "sm4",
+ .cra_driver_name = "sm4-riscv64-zvksed-zvkb",
+ .cra_cipher = {
+ .cia_min_keysize = SM4_KEY_SIZE,
+ .cia_max_keysize = SM4_KEY_SIZE,
+ .cia_setkey = riscv64_sm4_setkey,
+ .cia_encrypt = riscv64_sm4_encrypt,
+ .cia_decrypt = riscv64_sm4_decrypt,
+ },
+ .cra_module = THIS_MODULE,
+};
+
+static int __init riscv64_sm4_mod_init(void)
+{
+ if (riscv_isa_extension_available(NULL, ZVKSED) &&
+ riscv_isa_extension_available(NULL, ZVKB) &&
+ riscv_vector_vlen() >= 128)
+ return crypto_register_alg(&riscv64_sm4_alg);
+
+ return -ENODEV;
+}
+
+static void __exit riscv64_sm4_mod_exit(void)
+{
+ crypto_unregister_alg(&riscv64_sm4_alg);
+}
+
+module_init(riscv64_sm4_mod_init);
+module_exit(riscv64_sm4_mod_exit);
+
+MODULE_DESCRIPTION("SM4 (RISC-V accelerated)");
+MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@vrull.eu>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CRYPTO("sm4");
diff --git a/arch/riscv/crypto/sm4-riscv64-zvksed-zvkb.S b/arch/riscv/crypto/sm4-riscv64-zvksed-zvkb.S
new file mode 100644
index 00000000000000..fae62179a4a3d0
--- /dev/null
+++ b/arch/riscv/crypto/sm4-riscv64-zvksed-zvkb.S
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */
+//
+// This file is dual-licensed, meaning that you can use it under your
+// choice of either of the following two licenses:
+//
+// Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
+//
+// Licensed under the Apache License 2.0 (the "License"). You can obtain
+// a copy in the file LICENSE in the source distribution or at
+// https://www.openssl.org/source/license.html
+//
+// or
+//
+// Copyright (c) 2023, Christoph Müllner <christoph.muellner@vrull.eu>
+// Copyright (c) 2023, Jerry Shih <jerry.shih@sifive.com>
+// Copyright 2024 Google LLC
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The generated code of this file depends on the following RISC-V extensions:
+// - RV64I
+// - RISC-V Vector ('V') with VLEN >= 128
+// - RISC-V Vector SM4 Block Cipher extension ('Zvksed')
+// - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
+
+#include <linux/linkage.h>
+
+.text
+.option arch, +zvksed, +zvkb
+
+// void sm4_expandkey_zksed_zvkb(const u8 user_key[16], u32 rkey_enc[32],
+// u32 rkey_dec[32]);
+SYM_FUNC_START(sm4_expandkey_zvksed_zvkb)
+ vsetivli zero, 4, e32, m1, ta, ma
+
+ // Load the user key.
+ vle32.v v1, (a0)
+ vrev8.v v1, v1
+
+ // XOR the user key with the family key.
+ la t0, FAMILY_KEY
+ vle32.v v2, (t0)
+ vxor.vv v1, v1, v2
+
+ // Compute the round keys. Store them in forwards order in rkey_enc
+ // and in reverse order in rkey_dec.
+ addi a2, a2, 31*4
+ li t0, -4
+ .set i, 0
+.rept 8
+ vsm4k.vi v1, v1, i
+ vse32.v v1, (a1) // Store to rkey_enc.
+ vsse32.v v1, (a2), t0 // Store to rkey_dec.
+.if i < 7
+ addi a1, a1, 16
+ addi a2, a2, -16
+.endif
+ .set i, i + 1
+.endr
+
+ ret
+SYM_FUNC_END(sm4_expandkey_zvksed_zvkb)
+
+// void sm4_crypt_zvksed_zvkb(const u32 rkey[32], const u8 in[16], u8 out[16]);
+SYM_FUNC_START(sm4_crypt_zvksed_zvkb)
+ vsetivli zero, 4, e32, m1, ta, ma
+
+ // Load the input data.
+ vle32.v v1, (a1)
+ vrev8.v v1, v1
+
+ // Do the 32 rounds of SM4, 4 at a time.
+ .set i, 0
+.rept 8
+ vle32.v v2, (a0)
+ vsm4r.vs v1, v2
+.if i < 7
+ addi a0, a0, 16
+.endif
+ .set i, i + 1
+.endr
+
+ // Store the output data (in reverse element order).
+ vrev8.v v1, v1
+ li t0, -4
+ addi a2, a2, 12
+ vsse32.v v1, (a2), t0
+
+ ret
+SYM_FUNC_END(sm4_crypt_zvksed_zvkb)
+
+.section ".rodata"
+.p2align 2
+.type FAMILY_KEY, @object
+FAMILY_KEY:
+ .word 0xA3B1BAC6, 0x56AA3350, 0x677D9197, 0xB27022DC
+.size FAMILY_KEY, . - FAMILY_KEY
diff --git a/arch/riscv/errata/andes/errata.c b/arch/riscv/errata/andes/errata.c
index 17a90486972468..f2708a9494a10f 100644
--- a/arch/riscv/errata/andes/errata.c
+++ b/arch/riscv/errata/andes/errata.c
@@ -18,9 +18,9 @@
#include <asm/sbi.h>
#include <asm/vendorid_list.h>
-#define ANDESTECH_AX45MP_MARCHID 0x8000000000008a45UL
-#define ANDESTECH_AX45MP_MIMPID 0x500UL
-#define ANDESTECH_SBI_EXT_ANDES 0x0900031E
+#define ANDES_AX45MP_MARCHID 0x8000000000008a45UL
+#define ANDES_AX45MP_MIMPID 0x500UL
+#define ANDES_SBI_EXT_ANDES 0x0900031E
#define ANDES_SBI_EXT_IOCP_SW_WORKAROUND 1
@@ -32,7 +32,7 @@ static long ax45mp_iocp_sw_workaround(void)
* ANDES_SBI_EXT_IOCP_SW_WORKAROUND SBI EXT checks if the IOCP is missing and
* cache is controllable only then CMO will be applied to the platform.
*/
- ret = sbi_ecall(ANDESTECH_SBI_EXT_ANDES, ANDES_SBI_EXT_IOCP_SW_WORKAROUND,
+ ret = sbi_ecall(ANDES_SBI_EXT_ANDES, ANDES_SBI_EXT_IOCP_SW_WORKAROUND,
0, 0, 0, 0, 0, 0);
return ret.error ? 0 : ret.value;
@@ -50,7 +50,7 @@ static void errata_probe_iocp(unsigned int stage, unsigned long arch_id, unsigne
done = true;
- if (arch_id != ANDESTECH_AX45MP_MARCHID || impid != ANDESTECH_AX45MP_MIMPID)
+ if (arch_id != ANDES_AX45MP_MARCHID || impid != ANDES_AX45MP_MIMPID)
return;
if (!ax45mp_iocp_sw_workaround())
diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
index b0487b39e6747a..776354895b81e7 100644
--- a/arch/riscv/include/asm/asm.h
+++ b/arch/riscv/include/asm/asm.h
@@ -183,6 +183,16 @@
REG_L x31, PT_T6(sp)
.endm
+/* Annotate a function as being unsuitable for kprobes. */
+#ifdef CONFIG_KPROBES
+#define ASM_NOKPROBE(name) \
+ .pushsection "_kprobe_blacklist", "aw"; \
+ RISCV_PTR name; \
+ .popsection
+#else
+#define ASM_NOKPROBE(name)
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_ASM_H */
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index f5dfef6c2153f1..0e0522e588ca6e 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -17,7 +17,6 @@
#endif
#include <asm/cmpxchg.h>
-#include <asm/barrier.h>
#define __atomic_acquire_fence() \
__asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
@@ -207,7 +206,7 @@ static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int
" add %[rc], %[p], %[a]\n"
" sc.w.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
: [a]"r" (a), [u]"r" (u)
@@ -228,7 +227,7 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a,
" add %[rc], %[p], %[a]\n"
" sc.d.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
: [a]"r" (a), [u]"r" (u)
@@ -248,7 +247,7 @@ static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
" addi %[rc], %[p], 1\n"
" sc.w.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
@@ -268,7 +267,7 @@ static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
" addi %[rc], %[p], -1\n"
" sc.w.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
@@ -288,7 +287,7 @@ static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
" bltz %[rc], 1f\n"
" sc.w.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
@@ -310,7 +309,7 @@ static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
" addi %[rc], %[p], 1\n"
" sc.d.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
@@ -331,7 +330,7 @@ static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
" addi %[rc], %[p], -1\n"
" sc.d.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
@@ -352,7 +351,7 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
" bltz %[rc], 1f\n"
" sc.d.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h
index 110752594228e2..880b56d8480d19 100644
--- a/arch/riscv/include/asm/barrier.h
+++ b/arch/riscv/include/asm/barrier.h
@@ -11,28 +11,27 @@
#define _ASM_RISCV_BARRIER_H
#ifndef __ASSEMBLY__
+#include <asm/fence.h>
#define nop() __asm__ __volatile__ ("nop")
#define __nops(n) ".rept " #n "\nnop\n.endr\n"
#define nops(n) __asm__ __volatile__ (__nops(n))
-#define RISCV_FENCE(p, s) \
- __asm__ __volatile__ ("fence " #p "," #s : : : "memory")
/* These barriers need to enforce ordering on both devices or memory. */
-#define mb() RISCV_FENCE(iorw,iorw)
-#define rmb() RISCV_FENCE(ir,ir)
-#define wmb() RISCV_FENCE(ow,ow)
+#define __mb() RISCV_FENCE(iorw, iorw)
+#define __rmb() RISCV_FENCE(ir, ir)
+#define __wmb() RISCV_FENCE(ow, ow)
/* These barriers do not need to enforce ordering on devices, just memory. */
-#define __smp_mb() RISCV_FENCE(rw,rw)
-#define __smp_rmb() RISCV_FENCE(r,r)
-#define __smp_wmb() RISCV_FENCE(w,w)
+#define __smp_mb() RISCV_FENCE(rw, rw)
+#define __smp_rmb() RISCV_FENCE(r, r)
+#define __smp_wmb() RISCV_FENCE(w, w)
#define __smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
- RISCV_FENCE(rw,w); \
+ RISCV_FENCE(rw, w); \
WRITE_ONCE(*p, v); \
} while (0)
@@ -40,7 +39,7 @@ do { \
({ \
typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
- RISCV_FENCE(r,rw); \
+ RISCV_FENCE(r, rw); \
___p1; \
})
@@ -69,7 +68,7 @@ do { \
* instances the scheduler pairs this with an mb(), so nothing is necessary on
* the new hart.
*/
-#define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw)
+#define smp_mb__after_spinlock() RISCV_FENCE(iorw, iorw)
#include <asm-generic/barrier.h>
diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h
index 329d8244a9b3fd..880606b0469a83 100644
--- a/arch/riscv/include/asm/bitops.h
+++ b/arch/riscv/include/asm/bitops.h
@@ -22,6 +22,16 @@
#include <asm-generic/bitops/fls.h>
#else
+#define __HAVE_ARCH___FFS
+#define __HAVE_ARCH___FLS
+#define __HAVE_ARCH_FFS
+#define __HAVE_ARCH_FLS
+
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/__fls.h>
+#include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/fls.h>
+
#include <asm/alternative-macros.h>
#include <asm/hwcap.h>
@@ -37,8 +47,6 @@
static __always_inline unsigned long variable__ffs(unsigned long word)
{
- int num;
-
asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);
@@ -52,32 +60,7 @@ static __always_inline unsigned long variable__ffs(unsigned long word)
return word;
legacy:
- num = 0;
-#if BITS_PER_LONG == 64
- if ((word & 0xffffffff) == 0) {
- num += 32;
- word >>= 32;
- }
-#endif
- if ((word & 0xffff) == 0) {
- num += 16;
- word >>= 16;
- }
- if ((word & 0xff) == 0) {
- num += 8;
- word >>= 8;
- }
- if ((word & 0xf) == 0) {
- num += 4;
- word >>= 4;
- }
- if ((word & 0x3) == 0) {
- num += 2;
- word >>= 2;
- }
- if ((word & 0x1) == 0)
- num += 1;
- return num;
+ return generic___ffs(word);
}
/**
@@ -93,8 +76,6 @@ legacy:
static __always_inline unsigned long variable__fls(unsigned long word)
{
- int num;
-
asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);
@@ -108,32 +89,7 @@ static __always_inline unsigned long variable__fls(unsigned long word)
return BITS_PER_LONG - 1 - word;
legacy:
- num = BITS_PER_LONG - 1;
-#if BITS_PER_LONG == 64
- if (!(word & (~0ul << 32))) {
- num -= 32;
- word <<= 32;
- }
-#endif
- if (!(word & (~0ul << (BITS_PER_LONG - 16)))) {
- num -= 16;
- word <<= 16;
- }
- if (!(word & (~0ul << (BITS_PER_LONG - 8)))) {
- num -= 8;
- word <<= 8;
- }
- if (!(word & (~0ul << (BITS_PER_LONG - 4)))) {
- num -= 4;
- word <<= 4;
- }
- if (!(word & (~0ul << (BITS_PER_LONG - 2)))) {
- num -= 2;
- word <<= 2;
- }
- if (!(word & (~0ul << (BITS_PER_LONG - 1))))
- num -= 1;
- return num;
+ return generic___fls(word);
}
/**
@@ -149,46 +105,23 @@ legacy:
static __always_inline int variable_ffs(int x)
{
- int r;
-
- if (!x)
- return 0;
-
asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);
+ if (!x)
+ return 0;
+
asm volatile (".option push\n"
".option arch,+zbb\n"
CTZW "%0, %1\n"
".option pop\n"
- : "=r" (r) : "r" (x) :);
+ : "=r" (x) : "r" (x) :);
- return r + 1;
+ return x + 1;
legacy:
- r = 1;
- if (!(x & 0xffff)) {
- x >>= 16;
- r += 16;
- }
- if (!(x & 0xff)) {
- x >>= 8;
- r += 8;
- }
- if (!(x & 0xf)) {
- x >>= 4;
- r += 4;
- }
- if (!(x & 3)) {
- x >>= 2;
- r += 2;
- }
- if (!(x & 1)) {
- x >>= 1;
- r += 1;
- }
- return r;
+ return generic_ffs(x);
}
/**
@@ -204,46 +137,23 @@ legacy:
static __always_inline int variable_fls(unsigned int x)
{
- int r;
-
- if (!x)
- return 0;
-
asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);
+ if (!x)
+ return 0;
+
asm volatile (".option push\n"
".option arch,+zbb\n"
CLZW "%0, %1\n"
".option pop\n"
- : "=r" (r) : "r" (x) :);
+ : "=r" (x) : "r" (x) :);
- return 32 - r;
+ return 32 - x;
legacy:
- r = 32;
- if (!(x & 0xffff0000u)) {
- x <<= 16;
- r -= 16;
- }
- if (!(x & 0xff000000u)) {
- x <<= 8;
- r -= 8;
- }
- if (!(x & 0xf0000000u)) {
- x <<= 4;
- r -= 4;
- }
- if (!(x & 0xc0000000u)) {
- x <<= 2;
- r -= 2;
- }
- if (!(x & 0x80000000u)) {
- x <<= 1;
- r -= 1;
- }
- return r;
+ return generic_fls(x);
}
/**
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 2f4726d3cfcc25..2fee65cc844324 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -8,7 +8,6 @@
#include <linux/bug.h>
-#include <asm/barrier.h>
#include <asm/fence.h>
#define __xchg_relaxed(ptr, new, size) \
@@ -313,7 +312,7 @@
" bne %0, %z3, 1f\n" \
" sc.w.rl %1, %z4, %2\n" \
" bnez %1, 0b\n" \
- " fence rw, rw\n" \
+ RISCV_FULL_BARRIER \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
: "rJ" ((long)__old), "rJ" (__new) \
@@ -325,7 +324,7 @@
" bne %0, %z3, 1f\n" \
" sc.d.rl %1, %z4, %2\n" \
" bnez %1, 0b\n" \
- " fence rw, rw\n" \
+ RISCV_FULL_BARRIER \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
: "rJ" (__old), "rJ" (__new) \
diff --git a/arch/riscv/include/asm/compat.h b/arch/riscv/include/asm/compat.h
index 2ac955b51148f4..aa103530a5c83a 100644
--- a/arch/riscv/include/asm/compat.h
+++ b/arch/riscv/include/asm/compat.h
@@ -14,9 +14,28 @@
static inline int is_compat_task(void)
{
+ if (!IS_ENABLED(CONFIG_COMPAT))
+ return 0;
+
return test_thread_flag(TIF_32BIT);
}
+static inline int is_compat_thread(struct thread_info *thread)
+{
+ if (!IS_ENABLED(CONFIG_COMPAT))
+ return 0;
+
+ return test_ti_thread_flag(thread, TIF_32BIT);
+}
+
+static inline void set_compat_task(bool is_compat)
+{
+ if (is_compat)
+ set_thread_flag(TIF_32BIT);
+ else
+ clear_thread_flag(TIF_32BIT);
+}
+
struct compat_user_regs_struct {
compat_ulong_t pc;
compat_ulong_t ra;
diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
index 0bd11862b7607b..3478054461510e 100644
--- a/arch/riscv/include/asm/cpufeature.h
+++ b/arch/riscv/include/asm/cpufeature.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright 2022-2023 Rivos, Inc
+ * Copyright 2022-2024 Rivos, Inc
*/
#ifndef _ASM_CPUFEATURE_H
@@ -28,29 +28,38 @@ struct riscv_isainfo {
DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
-DECLARE_PER_CPU(long, misaligned_access_speed);
-
/* Per-cpu ISA extensions. */
extern struct riscv_isainfo hart_isa[NR_CPUS];
void riscv_user_isa_enable(void);
-#ifdef CONFIG_RISCV_MISALIGNED
-bool unaligned_ctl_available(void);
-bool check_unaligned_access_emulated(int cpu);
+#if defined(CONFIG_RISCV_MISALIGNED)
+bool check_unaligned_access_emulated_all_cpus(void);
void unaligned_emulation_finish(void);
+bool unaligned_ctl_available(void);
+DECLARE_PER_CPU(long, misaligned_access_speed);
#else
static inline bool unaligned_ctl_available(void)
{
return false;
}
+#endif
+
+#if defined(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS)
+DECLARE_STATIC_KEY_FALSE(fast_unaligned_access_speed_key);
-static inline bool check_unaligned_access_emulated(int cpu)
+static __always_inline bool has_fast_unaligned_accesses(void)
{
- return false;
+ return static_branch_likely(&fast_unaligned_access_speed_key);
+}
+#else
+static __always_inline bool has_fast_unaligned_accesses(void)
+{
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
+ return true;
+ else
+ return false;
}
-
-static inline void unaligned_emulation_finish(void) {}
#endif
unsigned long riscv_get_elf_hwcap(void);
@@ -135,6 +144,4 @@ static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsi
return __riscv_isa_extension_available(hart_isa[cpu].isa, ext);
}
-DECLARE_STATIC_KEY_FALSE(fast_misaligned_access_speed_key);
-
#endif
diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h
index 06c236bfab53b3..c7aea7886d22aa 100644
--- a/arch/riscv/include/asm/elf.h
+++ b/arch/riscv/include/asm/elf.h
@@ -53,13 +53,9 @@ extern bool compat_elf_check_arch(Elf32_Ehdr *hdr);
#define ELF_ET_DYN_BASE ((DEFAULT_MAP_WINDOW / 3) * 2)
#ifdef CONFIG_64BIT
-#ifdef CONFIG_COMPAT
-#define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \
+#define STACK_RND_MASK (is_compat_task() ? \
0x7ff >> (PAGE_SHIFT - 12) : \
0x3ffff >> (PAGE_SHIFT - 12))
-#else
-#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
-#endif
#endif
/*
@@ -139,10 +135,7 @@ do { \
#ifdef CONFIG_COMPAT
#define SET_PERSONALITY(ex) \
-do { if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
- set_thread_flag(TIF_32BIT); \
- else \
- clear_thread_flag(TIF_32BIT); \
+do { set_compat_task((ex).e_ident[EI_CLASS] == ELFCLASS32); \
if (personality(current->personality) != PER_LINUX32) \
set_personality(PER_LINUX | \
(current->personality & (~PER_MASK))); \
diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
index ea33288f8a25b4..1f2dbfb8a8bfc8 100644
--- a/arch/riscv/include/asm/errata_list.h
+++ b/arch/riscv/include/asm/errata_list.h
@@ -12,8 +12,8 @@
#include <asm/vendorid_list.h>
#ifdef CONFIG_ERRATA_ANDES
-#define ERRATA_ANDESTECH_NO_IOCP 0
-#define ERRATA_ANDESTECH_NUMBER 1
+#define ERRATA_ANDES_NO_IOCP 0
+#define ERRATA_ANDES_NUMBER 1
#endif
#ifdef CONFIG_ERRATA_SIFIVE
@@ -112,15 +112,6 @@ asm volatile(ALTERNATIVE( \
#define THEAD_C9XX_RV_IRQ_PMU 17
#define THEAD_C9XX_CSR_SCOUNTEROF 0x5c5
-#define ALT_SBI_PMU_OVERFLOW(__ovl) \
-asm volatile(ALTERNATIVE( \
- "csrr %0, " __stringify(CSR_SSCOUNTOVF), \
- "csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF), \
- THEAD_VENDOR_ID, ERRATA_THEAD_PMU, \
- CONFIG_ERRATA_THEAD_PMU) \
- : "=r" (__ovl) : \
- : "memory")
-
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/riscv/include/asm/fence.h b/arch/riscv/include/asm/fence.h
index 2b443a3a487f3c..6bcd80325dfc17 100644
--- a/arch/riscv/include/asm/fence.h
+++ b/arch/riscv/include/asm/fence.h
@@ -1,12 +1,18 @@
#ifndef _ASM_RISCV_FENCE_H
#define _ASM_RISCV_FENCE_H
+#define RISCV_FENCE_ASM(p, s) "\tfence " #p "," #s "\n"
+#define RISCV_FENCE(p, s) \
+ ({ __asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"); })
+
#ifdef CONFIG_SMP
-#define RISCV_ACQUIRE_BARRIER "\tfence r , rw\n"
-#define RISCV_RELEASE_BARRIER "\tfence rw, w\n"
+#define RISCV_ACQUIRE_BARRIER RISCV_FENCE_ASM(r, rw)
+#define RISCV_RELEASE_BARRIER RISCV_FENCE_ASM(rw, w)
+#define RISCV_FULL_BARRIER RISCV_FENCE_ASM(rw, rw)
#else
#define RISCV_ACQUIRE_BARRIER
#define RISCV_RELEASE_BARRIER
+#define RISCV_FULL_BARRIER
#endif
#endif /* _ASM_RISCV_FENCE_H */
diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
index 1f2d2599c655d2..e17d0078a65116 100644
--- a/arch/riscv/include/asm/hwcap.h
+++ b/arch/riscv/include/asm/hwcap.h
@@ -80,6 +80,7 @@
#define RISCV_ISA_EXT_ZFA 71
#define RISCV_ISA_EXT_ZTSO 72
#define RISCV_ISA_EXT_ZACAS 73
+#define RISCV_ISA_EXT_XANDESPMU 74
#define RISCV_ISA_EXT_XLINUXENVCFG 127
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
index 42497d487a1746..1c5c641075d2fd 100644
--- a/arch/riscv/include/asm/io.h
+++ b/arch/riscv/include/asm/io.h
@@ -47,10 +47,10 @@
* sufficient to ensure this works sanely on controllers that support I/O
* writes.
*/
-#define __io_pbr() __asm__ __volatile__ ("fence io,i" : : : "memory");
-#define __io_par(v) __asm__ __volatile__ ("fence i,ior" : : : "memory");
-#define __io_pbw() __asm__ __volatile__ ("fence iow,o" : : : "memory");
-#define __io_paw() __asm__ __volatile__ ("fence o,io" : : : "memory");
+#define __io_pbr() RISCV_FENCE(io, i)
+#define __io_par(v) RISCV_FENCE(i, ior)
+#define __io_pbw() RISCV_FENCE(iow, o)
+#define __io_paw() RISCV_FENCE(o, io)
/*
* Accesses from a single hart to a single I/O address must be ordered. This
diff --git a/arch/riscv/include/asm/membarrier.h b/arch/riscv/include/asm/membarrier.h
new file mode 100644
index 00000000000000..47b240d0d596a0
--- /dev/null
+++ b/arch/riscv/include/asm/membarrier.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_RISCV_MEMBARRIER_H
+#define _ASM_RISCV_MEMBARRIER_H
+
+static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
+ struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ /*
+ * Only need the full barrier when switching between processes.
+ * Barrier when switching from kernel to userspace is not
+ * required here, given that it is implied by mmdrop(). Barrier
+ * when switching from userspace to kernel is not needed after
+ * store to rq->curr.
+ */
+ if (IS_ENABLED(CONFIG_SMP) &&
+ likely(!(atomic_read(&next->membarrier_state) &
+ (MEMBARRIER_STATE_PRIVATE_EXPEDITED |
+ MEMBARRIER_STATE_GLOBAL_EXPEDITED)) || !prev))
+ return;
+
+ /*
+ * The membarrier system call requires a full memory barrier
+ * after storing to rq->curr, before going back to user-space.
+ *
+ * This barrier is also needed for the SYNC_CORE command when
+ * switching between processes; in particular, on a transition
+ * from a thread belonging to another mm to a thread belonging
+ * to the mm for which a membarrier SYNC_CORE is done on CPU0:
+ *
+ * - [CPU0] sets all bits in the mm icache_stale_mask (in
+ * prepare_sync_core_cmd());
+ *
+ * - [CPU1] stores to rq->curr (by the scheduler);
+ *
+ * - [CPU0] loads rq->curr within membarrier and observes
+ * cpu_rq(1)->curr->mm != mm, so the IPI is skipped on
+ * CPU1; this means membarrier relies on switch_mm() to
+ * issue the sync-core;
+ *
+ * - [CPU1] switch_mm() loads icache_stale_mask; if the bit
+ * is zero, switch_mm() may incorrectly skip the sync-core.
+ *
+ * Matches a full barrier in the proximity of the membarrier
+ * system call entry.
+ */
+ smp_mb();
+}
+
+#endif /* _ASM_RISCV_MEMBARRIER_H */
diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h
index 4c58ee7f95ecfa..06cadfd7a237ac 100644
--- a/arch/riscv/include/asm/mmio.h
+++ b/arch/riscv/include/asm/mmio.h
@@ -12,6 +12,7 @@
#define _ASM_RISCV_MMIO_H
#include <linux/types.h>
+#include <asm/fence.h>
#include <asm/mmiowb.h>
/* Generic IO read/write. These perform native-endian accesses. */
@@ -131,8 +132,8 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
* doesn't define any ordering between the memory space and the I/O space.
*/
#define __io_br() do {} while (0)
-#define __io_ar(v) ({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); })
-#define __io_bw() ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); })
+#define __io_ar(v) RISCV_FENCE(i, ir)
+#define __io_bw() RISCV_FENCE(w, o)
#define __io_aw() mmiowb_set_pending()
#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
diff --git a/arch/riscv/include/asm/mmiowb.h b/arch/riscv/include/asm/mmiowb.h
index 0b2333e71fdc5d..52ce4a399d9b2b 100644
--- a/arch/riscv/include/asm/mmiowb.h
+++ b/arch/riscv/include/asm/mmiowb.h
@@ -7,7 +7,7 @@
* "o,w" is sufficient to ensure that all writes to the device have completed
* before the write to the spinlock is allowed to commit.
*/
-#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory");
+#define mmiowb() RISCV_FENCE(o, w)
#include <linux/smp.h>
#include <asm-generic/mmiowb.h>
diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h
index c80bb9990d32ef..deaf971253a201 100644
--- a/arch/riscv/include/asm/pgalloc.h
+++ b/arch/riscv/include/asm/pgalloc.h
@@ -95,13 +95,19 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
__pud_free(mm, pud);
}
-#define __pud_free_tlb(tlb, pud, addr) \
-do { \
- if (pgtable_l4_enabled) { \
- pagetable_pud_dtor(virt_to_ptdesc(pud)); \
- tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(pud)); \
- } \
-} while (0)
+static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
+ unsigned long addr)
+{
+ if (pgtable_l4_enabled) {
+ struct ptdesc *ptdesc = virt_to_ptdesc(pud);
+
+ pagetable_pud_dtor(ptdesc);
+ if (riscv_use_ipi_for_rfence())
+ tlb_remove_page_ptdesc(tlb, ptdesc);
+ else
+ tlb_remove_ptdesc(tlb, ptdesc);
+ }
+}
#define p4d_alloc_one p4d_alloc_one
static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
@@ -130,11 +136,16 @@ static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
__p4d_free(mm, p4d);
}
-#define __p4d_free_tlb(tlb, p4d, addr) \
-do { \
- if (pgtable_l5_enabled) \
- tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(p4d)); \
-} while (0)
+static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
+ unsigned long addr)
+{
+ if (pgtable_l5_enabled) {
+ if (riscv_use_ipi_for_rfence())
+ tlb_remove_page_ptdesc(tlb, virt_to_ptdesc(p4d));
+ else
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
+ }
+}
#endif /* __PAGETABLE_PMD_FOLDED */
static inline void sync_kernel_mappings(pgd_t *pgd)
@@ -159,19 +170,31 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
#ifndef __PAGETABLE_PMD_FOLDED
-#define __pmd_free_tlb(tlb, pmd, addr) \
-do { \
- pagetable_pmd_dtor(virt_to_ptdesc(pmd)); \
- tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(pmd)); \
-} while (0)
+static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
+ unsigned long addr)
+{
+ struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
+
+ pagetable_pmd_dtor(ptdesc);
+ if (riscv_use_ipi_for_rfence())
+ tlb_remove_page_ptdesc(tlb, ptdesc);
+ else
+ tlb_remove_ptdesc(tlb, ptdesc);
+}
#endif /* __PAGETABLE_PMD_FOLDED */
-#define __pte_free_tlb(tlb, pte, buf) \
-do { \
- pagetable_pte_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc((tlb), page_ptdesc(pte));\
-} while (0)
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
+ unsigned long addr)
+{
+ struct ptdesc *ptdesc = page_ptdesc(pte);
+
+ pagetable_pte_dtor(ptdesc);
+ if (riscv_use_ipi_for_rfence())
+ tlb_remove_page_ptdesc(tlb, ptdesc);
+ else
+ tlb_remove_ptdesc(tlb, ptdesc);
+}
#endif /* CONFIG_MMU */
#endif /* _ASM_RISCV_PGALLOC_H */
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 20242402fc11ba..97fcde30e2477d 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -127,17 +127,11 @@
#define VA_USER_SV48 (UL(1) << (VA_BITS_SV48 - 1))
#define VA_USER_SV57 (UL(1) << (VA_BITS_SV57 - 1))
-#ifdef CONFIG_COMPAT
#define MMAP_VA_BITS_64 ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS)
#define MMAP_MIN_VA_BITS_64 (VA_BITS_SV39)
#define MMAP_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_VA_BITS_64)
#define MMAP_MIN_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_MIN_VA_BITS_64)
#else
-#define MMAP_VA_BITS ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS)
-#define MMAP_MIN_VA_BITS (VA_BITS_SV39)
-#endif /* CONFIG_COMPAT */
-
-#else
#include <asm/pgtable-32.h>
#endif /* CONFIG_64BIT */
@@ -439,9 +433,11 @@ static inline pte_t pte_mkhuge(pte_t pte)
return pte;
}
+#ifdef CONFIG_RISCV_ISA_SVNAPOT
#define pte_leaf_size(pte) (pte_napot(pte) ? \
napot_cont_size(napot_cont_order(pte)) :\
PAGE_SIZE)
+#endif
#ifdef CONFIG_NUMA_BALANCING
/*
@@ -517,12 +513,12 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
WRITE_ONCE(*ptep, pteval);
}
-void flush_icache_pte(pte_t pte);
+void flush_icache_pte(struct mm_struct *mm, pte_t pte);
-static inline void __set_pte_at(pte_t *ptep, pte_t pteval)
+static inline void __set_pte_at(struct mm_struct *mm, pte_t *ptep, pte_t pteval)
{
if (pte_present(pteval) && pte_exec(pteval))
- flush_icache_pte(pteval);
+ flush_icache_pte(mm, pteval);
set_pte(ptep, pteval);
}
@@ -535,7 +531,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
page_table_check_ptes_set(mm, ptep, pteval, nr);
for (;;) {
- __set_pte_at(ptep, pteval);
+ __set_pte_at(mm, ptep, pteval);
if (--nr == 0)
break;
ptep++;
@@ -547,7 +543,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
static inline void pte_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- __set_pte_at(ptep, __pte(0));
+ __set_pte_at(mm, ptep, __pte(0));
}
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* defined in mm/pgtable.c */
@@ -662,6 +658,12 @@ static inline int pmd_write(pmd_t pmd)
return pte_write(pmd_pte(pmd));
}
+#define pud_write pud_write
+static inline int pud_write(pud_t pud)
+{
+ return pte_write(pud_pte(pud));
+}
+
#define pmd_dirty pmd_dirty
static inline int pmd_dirty(pmd_t pmd)
{
@@ -713,14 +715,14 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
page_table_check_pmd_set(mm, pmdp, pmd);
- return __set_pte_at((pte_t *)pmdp, pmd_pte(pmd));
+ return __set_pte_at(mm, (pte_t *)pmdp, pmd_pte(pmd));
}
static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
pud_t *pudp, pud_t pud)
{
page_table_check_pud_set(mm, pudp, pud);
- return __set_pte_at((pte_t *)pudp, pud_pte(pud));
+ return __set_pte_at(mm, (pte_t *)pudp, pud_pte(pud));
}
#ifdef CONFIG_PAGE_TABLE_CHECK
@@ -871,8 +873,8 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
#define TASK_SIZE_MIN (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2)
#ifdef CONFIG_COMPAT
-#define TASK_SIZE_32 (_AC(0x80000000, UL))
-#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
+#define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE)
+#define TASK_SIZE (is_compat_task() ? \
TASK_SIZE_32 : TASK_SIZE_64)
#else
#define TASK_SIZE TASK_SIZE_64
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index a8509cc31ab25a..0faf5f161f1e49 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -14,22 +14,21 @@
#include <asm/ptrace.h>
-#ifdef CONFIG_64BIT
-#define DEFAULT_MAP_WINDOW (UL(1) << (MMAP_VA_BITS - 1))
-#define STACK_TOP_MAX TASK_SIZE
-
+/*
+ * addr is a hint to the maximum userspace address that mmap should provide, so
+ * this macro needs to return the largest address space available so that
+ * mmap_end < addr, being mmap_end the top of that address space.
+ * See Documentation/arch/riscv/vm-layout.rst for more details.
+ */
#define arch_get_mmap_end(addr, len, flags) \
({ \
unsigned long mmap_end; \
typeof(addr) _addr = (addr); \
- if ((_addr) == 0 || (IS_ENABLED(CONFIG_COMPAT) && is_compat_task())) \
- mmap_end = STACK_TOP_MAX; \
- else if ((_addr) >= VA_USER_SV57) \
+ if ((_addr) == 0 || is_compat_task() || \
+ ((_addr + len) > BIT(VA_BITS - 1))) \
mmap_end = STACK_TOP_MAX; \
- else if ((((_addr) >= VA_USER_SV48)) && (VA_BITS >= VA_BITS_SV48)) \
- mmap_end = VA_USER_SV48; \
else \
- mmap_end = VA_USER_SV39; \
+ mmap_end = (_addr + len); \
mmap_end; \
})
@@ -39,17 +38,17 @@
typeof(addr) _addr = (addr); \
typeof(base) _base = (base); \
unsigned long rnd_gap = DEFAULT_MAP_WINDOW - (_base); \
- if ((_addr) == 0 || (IS_ENABLED(CONFIG_COMPAT) && is_compat_task())) \
+ if ((_addr) == 0 || is_compat_task() || \
+ ((_addr + len) > BIT(VA_BITS - 1))) \
mmap_base = (_base); \
- else if (((_addr) >= VA_USER_SV57) && (VA_BITS >= VA_BITS_SV57)) \
- mmap_base = VA_USER_SV57 - rnd_gap; \
- else if ((((_addr) >= VA_USER_SV48)) && (VA_BITS >= VA_BITS_SV48)) \
- mmap_base = VA_USER_SV48 - rnd_gap; \
else \
- mmap_base = VA_USER_SV39 - rnd_gap; \
+ mmap_base = (_addr + len) - rnd_gap; \
mmap_base; \
})
+#ifdef CONFIG_64BIT
+#define DEFAULT_MAP_WINDOW (UL(1) << (MMAP_VA_BITS - 1))
+#define STACK_TOP_MAX TASK_SIZE_64
#else
#define DEFAULT_MAP_WINDOW TASK_SIZE
#define STACK_TOP_MAX TASK_SIZE
diff --git a/arch/riscv/include/asm/simd.h b/arch/riscv/include/asm/simd.h
index 54efbf523d49c6..adb50f3ec2057b 100644
--- a/arch/riscv/include/asm/simd.h
+++ b/arch/riscv/include/asm/simd.h
@@ -34,9 +34,9 @@ static __must_check inline bool may_use_simd(void)
return false;
/*
- * Nesting is acheived in preempt_v by spreading the control for
+ * Nesting is achieved in preempt_v by spreading the control for
* preemptible and non-preemptible kernel-mode Vector into two fields.
- * Always try to match with prempt_v if kernel V-context exists. Then,
+ * Always try to match with preempt_v if kernel V-context exists. Then,
* fallback to check non preempt_v if nesting happens, or if the config
* is not set.
*/
diff --git a/arch/riscv/include/asm/suspend.h b/arch/riscv/include/asm/suspend.h
index 491296a335d0ce..4718096fa5e3fc 100644
--- a/arch/riscv/include/asm/suspend.h
+++ b/arch/riscv/include/asm/suspend.h
@@ -56,4 +56,7 @@ int hibernate_resume_nonboot_cpu_disable(void);
asmlinkage void hibernate_restore_image(unsigned long resume_satp, unsigned long satp_temp,
unsigned long cpu_resume);
asmlinkage int hibernate_core_restore_code(void);
+bool riscv_sbi_hsm_is_supported(void);
+bool riscv_sbi_suspend_state_is_valid(u32 state);
+int riscv_sbi_hart_suspend(u32 state);
#endif
diff --git a/arch/riscv/include/asm/sync_core.h b/arch/riscv/include/asm/sync_core.h
new file mode 100644
index 00000000000000..9153016da8f14b
--- /dev/null
+++ b/arch/riscv/include/asm/sync_core.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_SYNC_CORE_H
+#define _ASM_RISCV_SYNC_CORE_H
+
+/*
+ * RISC-V implements return to user-space through an xRET instruction,
+ * which is not core serializing.
+ */
+static inline void sync_core_before_usermode(void)
+{
+ asm volatile ("fence.i" ::: "memory");
+}
+
+#ifdef CONFIG_SMP
+/*
+ * Ensure the next switch_mm() on every CPU issues a core serializing
+ * instruction for the given @mm.
+ */
+static inline void prepare_sync_core_cmd(struct mm_struct *mm)
+{
+ cpumask_setall(&mm->context.icache_stale_mask);
+}
+#else
+static inline void prepare_sync_core_cmd(struct mm_struct *mm)
+{
+}
+#endif /* CONFIG_SMP */
+
+#endif /* _ASM_RISCV_SYNC_CORE_H */
diff --git a/arch/riscv/include/asm/syscall_wrapper.h b/arch/riscv/include/asm/syscall_wrapper.h
index eeec04b7dae67b..980094c2e9761d 100644
--- a/arch/riscv/include/asm/syscall_wrapper.h
+++ b/arch/riscv/include/asm/syscall_wrapper.h
@@ -12,25 +12,51 @@
asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *);
-#define SC_RISCV_REGS_TO_ARGS(x, ...) \
- __MAP(x,__SC_ARGS \
- ,,regs->orig_a0,,regs->a1,,regs->a2 \
+#ifdef CONFIG_64BIT
+
+#define __SYSCALL_SE_DEFINEx(x, prefix, name, ...) \
+ static long __se_##prefix##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ static long __se_##prefix##name(__MAP(x,__SC_LONG,__VA_ARGS__))
+
+#define SC_RISCV_REGS_TO_ARGS(x, ...) \
+ __MAP(x,__SC_ARGS \
+ ,,regs->orig_a0,,regs->a1,,regs->a2 \
,,regs->a3,,regs->a4,,regs->a5,,regs->a6)
+#else
+/*
+ * Use type aliasing to ensure registers a0-a6 are correctly passed to the syscall
+ * implementation when >word-size arguments are used.
+ */
+#define __SYSCALL_SE_DEFINEx(x, prefix, name, ...) \
+ __diag_push(); \
+ __diag_ignore(GCC, 8, "-Wattribute-alias", \
+ "Type aliasing is used to sanitize syscall arguments"); \
+ static long __se_##prefix##name(ulong, ulong, ulong, ulong, ulong, ulong, \
+ ulong) \
+ __attribute__((alias(__stringify(___se_##prefix##name)))); \
+ __diag_pop(); \
+ static long noinline ___se_##prefix##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ static long ___se_##prefix##name(__MAP(x,__SC_LONG,__VA_ARGS__))
+
+#define SC_RISCV_REGS_TO_ARGS(x, ...) \
+ regs->orig_a0,regs->a1,regs->a2,regs->a3,regs->a4,regs->a5,regs->a6
+
+#endif /* CONFIG_64BIT */
+
#ifdef CONFIG_COMPAT
#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
asmlinkage long __riscv_compat_sys##name(const struct pt_regs *regs); \
ALLOW_ERROR_INJECTION(__riscv_compat_sys##name, ERRNO); \
- static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
- asmlinkage long __riscv_compat_sys##name(const struct pt_regs *regs) \
+ __SYSCALL_SE_DEFINEx(x, compat_sys, name, __VA_ARGS__) \
{ \
- return __se_compat_sys##name(SC_RISCV_REGS_TO_ARGS(x,__VA_ARGS__)); \
+ return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \
} \
- static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ asmlinkage long __riscv_compat_sys##name(const struct pt_regs *regs) \
{ \
- return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \
+ return __se_compat_sys##name(SC_RISCV_REGS_TO_ARGS(x,__VA_ARGS__)); \
} \
static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
@@ -51,19 +77,18 @@ asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *);
#define __SYSCALL_DEFINEx(x, name, ...) \
asmlinkage long __riscv_sys##name(const struct pt_regs *regs); \
ALLOW_ERROR_INJECTION(__riscv_sys##name, ERRNO); \
- static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
- asmlinkage long __riscv_sys##name(const struct pt_regs *regs) \
- { \
- return __se_sys##name(SC_RISCV_REGS_TO_ARGS(x,__VA_ARGS__)); \
- } \
- static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ __SYSCALL_SE_DEFINEx(x, sys, name, __VA_ARGS__) \
{ \
long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \
__MAP(x,__SC_TEST,__VA_ARGS__); \
__PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \
return ret; \
} \
+ asmlinkage long __riscv_sys##name(const struct pt_regs *regs) \
+ { \
+ return __se_sys##name(SC_RISCV_REGS_TO_ARGS(x,__VA_ARGS__)); \
+ } \
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
#define SYSCALL_DEFINE0(sname) \
diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h
index 50b63b5c15bd8b..1f6c38420d8e07 100644
--- a/arch/riscv/include/asm/tlb.h
+++ b/arch/riscv/include/asm/tlb.h
@@ -10,6 +10,24 @@ struct mmu_gather;
static void tlb_flush(struct mmu_gather *tlb);
+#ifdef CONFIG_MMU
+#include <linux/swap.h>
+
+/*
+ * While riscv platforms with riscv_ipi_for_rfence as true require an IPI to
+ * perform TLB shootdown, some platforms with riscv_ipi_for_rfence as false use
+ * SBI to perform TLB shootdown. To keep software pagetable walkers safe in this
+ * case we switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the
+ * comment below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in include/asm-generic/tlb.h
+ * for more details.
+ */
+static inline void __tlb_remove_table(void *table)
+{
+ free_page_and_swap_cache(table);
+}
+
+#endif /* CONFIG_MMU */
+
#define tlb_flush tlb_flush
#include <asm-generic/tlb.h>
diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h
index 0cd6f0a027d1f7..731dcd0ed4de92 100644
--- a/arch/riscv/include/asm/vector.h
+++ b/arch/riscv/include/asm/vector.h
@@ -284,4 +284,15 @@ static inline bool riscv_v_vstate_ctrl_user_allowed(void) { return false; }
#endif /* CONFIG_RISCV_ISA_V */
+/*
+ * Return the implementation's vlen value.
+ *
+ * riscv_v_vsize contains the value of "32 vector registers with vlenb length"
+ * so rebuild the vlen value in bits from it.
+ */
+static inline int riscv_vector_vlen(void)
+{
+ return riscv_v_vsize / 32 * 8;
+}
+
#endif /* ! __ASM_RISCV_VECTOR_H */
diff --git a/arch/riscv/include/asm/vendorid_list.h b/arch/riscv/include/asm/vendorid_list.h
index e55407ace0c361..2f2bb0c84f9a71 100644
--- a/arch/riscv/include/asm/vendorid_list.h
+++ b/arch/riscv/include/asm/vendorid_list.h
@@ -5,7 +5,7 @@
#ifndef ASM_VENDOR_LIST_H
#define ASM_VENDOR_LIST_H
-#define ANDESTECH_VENDOR_ID 0x31e
+#define ANDES_VENDOR_ID 0x31e
#define SIFIVE_VENDOR_ID 0x489
#define THEAD_VENDOR_ID 0x5b7
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index 5e591f831638f2..81d94a8ee10f29 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -39,7 +39,6 @@ extra-y += vmlinux.lds
obj-y += head.o
obj-y += soc.o
obj-$(CONFIG_RISCV_ALTERNATIVE) += alternative.o
-obj-y += copy-unaligned.o
obj-y += cpu.o
obj-y += cpufeature.o
obj-y += entry.o
@@ -64,6 +63,9 @@ obj-y += tests/
obj-$(CONFIG_MMU) += vdso.o vdso/
obj-$(CONFIG_RISCV_MISALIGNED) += traps_misaligned.o
+obj-$(CONFIG_RISCV_MISALIGNED) += unaligned_access_speed.o
+obj-$(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS) += copy-unaligned.o
+
obj-$(CONFIG_FPU) += fpu.o
obj-$(CONFIG_RISCV_ISA_V) += vector.o
obj-$(CONFIG_RISCV_ISA_V) += kernel_mode_vector.o
diff --git a/arch/riscv/kernel/alternative.c b/arch/riscv/kernel/alternative.c
index 319a1da0358b49..0128b161bfdab2 100644
--- a/arch/riscv/kernel/alternative.c
+++ b/arch/riscv/kernel/alternative.c
@@ -43,7 +43,7 @@ static void riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info
switch (cpu_mfr_info->vendor_id) {
#ifdef CONFIG_ERRATA_ANDES
- case ANDESTECH_VENDOR_ID:
+ case ANDES_VENDOR_ID:
cpu_mfr_info->patch_func = andes_errata_patch_func;
break;
#endif
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index 79a5a35fab964d..3ed2359eae353f 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -11,7 +11,6 @@
#include <linux/cpu.h>
#include <linux/cpuhotplug.h>
#include <linux/ctype.h>
-#include <linux/jump_label.h>
#include <linux/log2.h>
#include <linux/memory.h>
#include <linux/module.h>
@@ -21,21 +20,13 @@
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/hwcap.h>
-#include <asm/hwprobe.h>
#include <asm/patch.h>
#include <asm/processor.h>
#include <asm/sbi.h>
#include <asm/vector.h>
-#include "copy-unaligned.h"
-
#define NUM_ALPHA_EXTS ('z' - 'a' + 1)
-#define MISALIGNED_ACCESS_JIFFIES_LG2 1
-#define MISALIGNED_BUFFER_SIZE 0x4000
-#define MISALIGNED_BUFFER_ORDER get_order(MISALIGNED_BUFFER_SIZE)
-#define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80)
-
unsigned long elf_hwcap __read_mostly;
/* Host ISA bitmap */
@@ -44,11 +35,6 @@ static DECLARE_BITMAP(riscv_isa, RISCV_ISA_EXT_MAX) __read_mostly;
/* Per-cpu ISA extensions. */
struct riscv_isainfo hart_isa[NR_CPUS];
-/* Performance information */
-DEFINE_PER_CPU(long, misaligned_access_speed);
-
-static cpumask_t fast_misaligned_access;
-
/**
* riscv_isa_extension_base() - Get base extension word
*
@@ -318,6 +304,7 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = {
__RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL),
__RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT),
__RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT),
+ __RISCV_ISA_EXT_DATA(xandespmu, RISCV_ISA_EXT_XANDESPMU),
};
const size_t riscv_isa_ext_count = ARRAY_SIZE(riscv_isa_ext);
@@ -731,247 +718,6 @@ unsigned long riscv_get_elf_hwcap(void)
return hwcap;
}
-static int check_unaligned_access(void *param)
-{
- int cpu = smp_processor_id();
- u64 start_cycles, end_cycles;
- u64 word_cycles;
- u64 byte_cycles;
- int ratio;
- unsigned long start_jiffies, now;
- struct page *page = param;
- void *dst;
- void *src;
- long speed = RISCV_HWPROBE_MISALIGNED_SLOW;
-
- if (check_unaligned_access_emulated(cpu))
- return 0;
-
- /* Make an unaligned destination buffer. */
- dst = (void *)((unsigned long)page_address(page) | 0x1);
- /* Unalign src as well, but differently (off by 1 + 2 = 3). */
- src = dst + (MISALIGNED_BUFFER_SIZE / 2);
- src += 2;
- word_cycles = -1ULL;
- /* Do a warmup. */
- __riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
- preempt_disable();
- start_jiffies = jiffies;
- while ((now = jiffies) == start_jiffies)
- cpu_relax();
-
- /*
- * For a fixed amount of time, repeatedly try the function, and take
- * the best time in cycles as the measurement.
- */
- while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
- start_cycles = get_cycles64();
- /* Ensure the CSR read can't reorder WRT to the copy. */
- mb();
- __riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
- /* Ensure the copy ends before the end time is snapped. */
- mb();
- end_cycles = get_cycles64();
- if ((end_cycles - start_cycles) < word_cycles)
- word_cycles = end_cycles - start_cycles;
- }
-
- byte_cycles = -1ULL;
- __riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
- start_jiffies = jiffies;
- while ((now = jiffies) == start_jiffies)
- cpu_relax();
-
- while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
- start_cycles = get_cycles64();
- mb();
- __riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
- mb();
- end_cycles = get_cycles64();
- if ((end_cycles - start_cycles) < byte_cycles)
- byte_cycles = end_cycles - start_cycles;
- }
-
- preempt_enable();
-
- /* Don't divide by zero. */
- if (!word_cycles || !byte_cycles) {
- pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned access speed\n",
- cpu);
-
- return 0;
- }
-
- if (word_cycles < byte_cycles)
- speed = RISCV_HWPROBE_MISALIGNED_FAST;
-
- ratio = div_u64((byte_cycles * 100), word_cycles);
- pr_info("cpu%d: Ratio of byte access time to unaligned word access is %d.%02d, unaligned accesses are %s\n",
- cpu,
- ratio / 100,
- ratio % 100,
- (speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow");
-
- per_cpu(misaligned_access_speed, cpu) = speed;
-
- /*
- * Set the value of fast_misaligned_access of a CPU. These operations
- * are atomic to avoid race conditions.
- */
- if (speed == RISCV_HWPROBE_MISALIGNED_FAST)
- cpumask_set_cpu(cpu, &fast_misaligned_access);
- else
- cpumask_clear_cpu(cpu, &fast_misaligned_access);
-
- return 0;
-}
-
-static void check_unaligned_access_nonboot_cpu(void *param)
-{
- unsigned int cpu = smp_processor_id();
- struct page **pages = param;
-
- if (smp_processor_id() != 0)
- check_unaligned_access(pages[cpu]);
-}
-
-DEFINE_STATIC_KEY_FALSE(fast_misaligned_access_speed_key);
-
-static void modify_unaligned_access_branches(cpumask_t *mask, int weight)
-{
- if (cpumask_weight(mask) == weight)
- static_branch_enable_cpuslocked(&fast_misaligned_access_speed_key);
- else
- static_branch_disable_cpuslocked(&fast_misaligned_access_speed_key);
-}
-
-static void set_unaligned_access_static_branches_except_cpu(int cpu)
-{
- /*
- * Same as set_unaligned_access_static_branches, except excludes the
- * given CPU from the result. When a CPU is hotplugged into an offline
- * state, this function is called before the CPU is set to offline in
- * the cpumask, and thus the CPU needs to be explicitly excluded.
- */
-
- cpumask_t fast_except_me;
-
- cpumask_and(&fast_except_me, &fast_misaligned_access, cpu_online_mask);
- cpumask_clear_cpu(cpu, &fast_except_me);
-
- modify_unaligned_access_branches(&fast_except_me, num_online_cpus() - 1);
-}
-
-static void set_unaligned_access_static_branches(void)
-{
- /*
- * This will be called after check_unaligned_access_all_cpus so the
- * result of unaligned access speed for all CPUs will be available.
- *
- * To avoid the number of online cpus changing between reading
- * cpu_online_mask and calling num_online_cpus, cpus_read_lock must be
- * held before calling this function.
- */
-
- cpumask_t fast_and_online;
-
- cpumask_and(&fast_and_online, &fast_misaligned_access, cpu_online_mask);
-
- modify_unaligned_access_branches(&fast_and_online, num_online_cpus());
-}
-
-static int lock_and_set_unaligned_access_static_branch(void)
-{
- cpus_read_lock();
- set_unaligned_access_static_branches();
- cpus_read_unlock();
-
- return 0;
-}
-
-arch_initcall_sync(lock_and_set_unaligned_access_static_branch);
-
-static int riscv_online_cpu(unsigned int cpu)
-{
- static struct page *buf;
-
- /* We are already set since the last check */
- if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN)
- goto exit;
-
- buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
- if (!buf) {
- pr_warn("Allocation failure, not measuring misaligned performance\n");
- return -ENOMEM;
- }
-
- check_unaligned_access(buf);
- __free_pages(buf, MISALIGNED_BUFFER_ORDER);
-
-exit:
- set_unaligned_access_static_branches();
-
- return 0;
-}
-
-static int riscv_offline_cpu(unsigned int cpu)
-{
- set_unaligned_access_static_branches_except_cpu(cpu);
-
- return 0;
-}
-
-/* Measure unaligned access on all CPUs present at boot in parallel. */
-static int check_unaligned_access_all_cpus(void)
-{
- unsigned int cpu;
- unsigned int cpu_count = num_possible_cpus();
- struct page **bufs = kzalloc(cpu_count * sizeof(struct page *),
- GFP_KERNEL);
-
- if (!bufs) {
- pr_warn("Allocation failure, not measuring misaligned performance\n");
- return 0;
- }
-
- /*
- * Allocate separate buffers for each CPU so there's no fighting over
- * cache lines.
- */
- for_each_cpu(cpu, cpu_online_mask) {
- bufs[cpu] = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
- if (!bufs[cpu]) {
- pr_warn("Allocation failure, not measuring misaligned performance\n");
- goto out;
- }
- }
-
- /* Check everybody except 0, who stays behind to tend jiffies. */
- on_each_cpu(check_unaligned_access_nonboot_cpu, bufs, 1);
-
- /* Check core 0. */
- smp_call_on_cpu(0, check_unaligned_access, bufs[0], true);
-
- /*
- * Setup hotplug callbacks for any new CPUs that come online or go
- * offline.
- */
- cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online",
- riscv_online_cpu, riscv_offline_cpu);
-
-out:
- unaligned_emulation_finish();
- for_each_cpu(cpu, cpu_online_mask) {
- if (bufs[cpu])
- __free_pages(bufs[cpu], MISALIGNED_BUFFER_ORDER);
- }
-
- kfree(bufs);
- return 0;
-}
-
-arch_initcall(check_unaligned_access_all_cpus);
-
void riscv_user_isa_enable(void)
{
if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_ZICBOZ))
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 9d1a305d55087b..68a24cf9481afe 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -111,6 +111,7 @@ SYM_CODE_START(handle_exception)
1:
tail do_trap_unknown
SYM_CODE_END(handle_exception)
+ASM_NOKPROBE(handle_exception)
/*
* The ret_from_exception must be called with interrupt disabled. Here is the
@@ -184,6 +185,7 @@ SYM_CODE_START_NOALIGN(ret_from_exception)
sret
#endif
SYM_CODE_END(ret_from_exception)
+ASM_NOKPROBE(ret_from_exception)
#ifdef CONFIG_VMAP_STACK
SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)
@@ -219,6 +221,7 @@ SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)
move a0, sp
tail handle_bad_stack
SYM_CODE_END(handle_kernel_stack_overflow)
+ASM_NOKPROBE(handle_kernel_stack_overflow)
#endif
SYM_CODE_START(ret_from_fork)
diff --git a/arch/riscv/kernel/pi/Makefile b/arch/riscv/kernel/pi/Makefile
index 07915dc9279e92..b75f150b923d68 100644
--- a/arch/riscv/kernel/pi/Makefile
+++ b/arch/riscv/kernel/pi/Makefile
@@ -9,6 +9,9 @@ KBUILD_CFLAGS := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) -fpie \
-fno-asynchronous-unwind-tables -fno-unwind-tables \
$(call cc-option,-fno-addrsig)
+# Disable LTO
+KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO), $(KBUILD_CFLAGS))
+
KBUILD_CFLAGS += -mcmodel=medany
CFLAGS_cmdline_early.o += -D__NO_FORTIFY
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
index e8515aa9d80bf8..92731ff8c79ad0 100644
--- a/arch/riscv/kernel/ptrace.c
+++ b/arch/riscv/kernel/ptrace.c
@@ -377,14 +377,14 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
return ret;
}
+#else
+static const struct user_regset_view compat_riscv_user_native_view = {};
#endif /* CONFIG_COMPAT */
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
-#ifdef CONFIG_COMPAT
- if (test_tsk_thread_flag(task, TIF_32BIT))
+ if (is_compat_thread(&task->thread_info))
return &compat_riscv_user_native_view;
else
-#endif
return &riscv_user_native_view;
}
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index c4ed7d977f57b2..d41090fc320353 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -28,7 +28,6 @@
#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
-#include <asm/cpufeature.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/numa.h>
diff --git a/arch/riscv/kernel/suspend.c b/arch/riscv/kernel/suspend.c
index 299795341e8a22..8a327b485b90e7 100644
--- a/arch/riscv/kernel/suspend.c
+++ b/arch/riscv/kernel/suspend.c
@@ -132,4 +132,53 @@ static int __init sbi_system_suspend_init(void)
}
arch_initcall(sbi_system_suspend_init);
+
+static int sbi_suspend_finisher(unsigned long suspend_type,
+ unsigned long resume_addr,
+ unsigned long opaque)
+{
+ struct sbiret ret;
+
+ ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND,
+ suspend_type, resume_addr, opaque, 0, 0, 0);
+
+ return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0;
+}
+
+int riscv_sbi_hart_suspend(u32 state)
+{
+ if (state & SBI_HSM_SUSP_NON_RET_BIT)
+ return cpu_suspend(state, sbi_suspend_finisher);
+ else
+ return sbi_suspend_finisher(state, 0, 0);
+}
+
+bool riscv_sbi_suspend_state_is_valid(u32 state)
+{
+ if (state > SBI_HSM_SUSPEND_RET_DEFAULT &&
+ state < SBI_HSM_SUSPEND_RET_PLATFORM)
+ return false;
+
+ if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT &&
+ state < SBI_HSM_SUSPEND_NON_RET_PLATFORM)
+ return false;
+
+ return true;
+}
+
+bool riscv_sbi_hsm_is_supported(void)
+{
+ /*
+ * The SBI HSM suspend function is only available when:
+ * 1) SBI version is 0.3 or higher
+ * 2) SBI HSM extension is available
+ */
+ if (sbi_spec_version < sbi_mk_version(0, 3) ||
+ !sbi_probe_extension(SBI_EXT_HSM)) {
+ pr_info("HSM suspend not available\n");
+ return false;
+ }
+
+ return true;
+}
#endif /* CONFIG_RISCV_SBI */
diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c
index a7c56b41efd24d..8cae41a502dd4a 100644
--- a/arch/riscv/kernel/sys_hwprobe.c
+++ b/arch/riscv/kernel/sys_hwprobe.c
@@ -147,6 +147,7 @@ static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext)
return (pair.value & ext);
}
+#if defined(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS)
static u64 hwprobe_misaligned(const struct cpumask *cpus)
{
int cpu;
@@ -169,6 +170,18 @@ static u64 hwprobe_misaligned(const struct cpumask *cpus)
return perf;
}
+#else
+static u64 hwprobe_misaligned(const struct cpumask *cpus)
+{
+ if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_UNALIGNED_ACCESS))
+ return RISCV_HWPROBE_MISALIGNED_FAST;
+
+ if (IS_ENABLED(CONFIG_RISCV_EMULATED_UNALIGNED_ACCESS) && unaligned_ctl_available())
+ return RISCV_HWPROBE_MISALIGNED_EMULATED;
+
+ return RISCV_HWPROBE_MISALIGNED_SLOW;
+}
+#endif
static void hwprobe_one_pair(struct riscv_hwprobe *pair,
const struct cpumask *cpus)
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index a1b9be3c4332d9..868d6280cf667e 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -6,6 +6,7 @@
#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/randomize_kstack.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/signal.h>
@@ -310,7 +311,8 @@ asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
}
}
-asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs)
+asmlinkage __visible __trap_section __no_stack_protector
+void do_trap_ecall_u(struct pt_regs *regs)
{
if (user_mode(regs)) {
long syscall = regs->a7;
@@ -322,10 +324,23 @@ asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs)
syscall = syscall_enter_from_user_mode(regs, syscall);
+ add_random_kstack_offset();
+
if (syscall >= 0 && syscall < NR_syscalls)
syscall_handler(regs, syscall);
else if (syscall != -1)
regs->a0 = -ENOSYS;
+ /*
+ * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
+ * so the maximum stack offset is 1k bytes (10 bits).
+ *
+ * The actual entropy will be further reduced by the compiler when
+ * applying stack alignment constraints: 16-byte (i.e. 4-bit) aligned
+ * for RV32I or RV64I.
+ *
+ * The resulting 6 bits of entropy is seen in SP[9:4].
+ */
+ choose_random_kstack_offset(get_random_u16());
syscall_exit_to_user_mode(regs);
} else {
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
index 8ded225e8c5b13..2adb7c3e4dd5bf 100644
--- a/arch/riscv/kernel/traps_misaligned.c
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -413,7 +413,9 @@ int handle_misaligned_load(struct pt_regs *regs)
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
+#ifdef CONFIG_RISCV_PROBE_UNALIGNED_ACCESS
*this_cpu_ptr(&misaligned_access_speed) = RISCV_HWPROBE_MISALIGNED_EMULATED;
+#endif
if (!unaligned_enabled)
return -1;
@@ -596,7 +598,7 @@ int handle_misaligned_store(struct pt_regs *regs)
return 0;
}
-bool check_unaligned_access_emulated(int cpu)
+static bool check_unaligned_access_emulated(int cpu)
{
long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
unsigned long tmp_var, tmp_val;
@@ -623,7 +625,7 @@ bool check_unaligned_access_emulated(int cpu)
return misaligned_emu_detected;
}
-void unaligned_emulation_finish(void)
+bool check_unaligned_access_emulated_all_cpus(void)
{
int cpu;
@@ -632,13 +634,12 @@ void unaligned_emulation_finish(void)
* accesses emulated since tasks requesting such control can run on any
* CPU.
*/
- for_each_present_cpu(cpu) {
- if (per_cpu(misaligned_access_speed, cpu) !=
- RISCV_HWPROBE_MISALIGNED_EMULATED) {
- return;
- }
- }
+ for_each_online_cpu(cpu)
+ if (!check_unaligned_access_emulated(cpu))
+ return false;
+
unaligned_ctl = true;
+ return true;
}
bool unaligned_ctl_available(void)
diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c
new file mode 100644
index 00000000000000..a9a6bcb02acf11
--- /dev/null
+++ b/arch/riscv/kernel/unaligned_access_speed.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2024 Rivos Inc.
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/jump_label.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+#include <asm/cpufeature.h>
+#include <asm/hwprobe.h>
+
+#include "copy-unaligned.h"
+
+#define MISALIGNED_ACCESS_JIFFIES_LG2 1
+#define MISALIGNED_BUFFER_SIZE 0x4000
+#define MISALIGNED_BUFFER_ORDER get_order(MISALIGNED_BUFFER_SIZE)
+#define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80)
+
+DEFINE_PER_CPU(long, misaligned_access_speed);
+
+#ifdef CONFIG_RISCV_PROBE_UNALIGNED_ACCESS
+static cpumask_t fast_misaligned_access;
+static int check_unaligned_access(void *param)
+{
+ int cpu = smp_processor_id();
+ u64 start_cycles, end_cycles;
+ u64 word_cycles;
+ u64 byte_cycles;
+ int ratio;
+ unsigned long start_jiffies, now;
+ struct page *page = param;
+ void *dst;
+ void *src;
+ long speed = RISCV_HWPROBE_MISALIGNED_SLOW;
+
+ if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN)
+ return 0;
+
+ /* Make an unaligned destination buffer. */
+ dst = (void *)((unsigned long)page_address(page) | 0x1);
+ /* Unalign src as well, but differently (off by 1 + 2 = 3). */
+ src = dst + (MISALIGNED_BUFFER_SIZE / 2);
+ src += 2;
+ word_cycles = -1ULL;
+ /* Do a warmup. */
+ __riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
+ preempt_disable();
+ start_jiffies = jiffies;
+ while ((now = jiffies) == start_jiffies)
+ cpu_relax();
+
+ /*
+ * For a fixed amount of time, repeatedly try the function, and take
+ * the best time in cycles as the measurement.
+ */
+ while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
+ start_cycles = get_cycles64();
+ /* Ensure the CSR read can't reorder WRT to the copy. */
+ mb();
+ __riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
+ /* Ensure the copy ends before the end time is snapped. */
+ mb();
+ end_cycles = get_cycles64();
+ if ((end_cycles - start_cycles) < word_cycles)
+ word_cycles = end_cycles - start_cycles;
+ }
+
+ byte_cycles = -1ULL;
+ __riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
+ start_jiffies = jiffies;
+ while ((now = jiffies) == start_jiffies)
+ cpu_relax();
+
+ while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
+ start_cycles = get_cycles64();
+ mb();
+ __riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
+ mb();
+ end_cycles = get_cycles64();
+ if ((end_cycles - start_cycles) < byte_cycles)
+ byte_cycles = end_cycles - start_cycles;
+ }
+
+ preempt_enable();
+
+ /* Don't divide by zero. */
+ if (!word_cycles || !byte_cycles) {
+ pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned access speed\n",
+ cpu);
+
+ return 0;
+ }
+
+ if (word_cycles < byte_cycles)
+ speed = RISCV_HWPROBE_MISALIGNED_FAST;
+
+ ratio = div_u64((byte_cycles * 100), word_cycles);
+ pr_info("cpu%d: Ratio of byte access time to unaligned word access is %d.%02d, unaligned accesses are %s\n",
+ cpu,
+ ratio / 100,
+ ratio % 100,
+ (speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow");
+
+ per_cpu(misaligned_access_speed, cpu) = speed;
+
+ /*
+ * Set the value of fast_misaligned_access of a CPU. These operations
+ * are atomic to avoid race conditions.
+ */
+ if (speed == RISCV_HWPROBE_MISALIGNED_FAST)
+ cpumask_set_cpu(cpu, &fast_misaligned_access);
+ else
+ cpumask_clear_cpu(cpu, &fast_misaligned_access);
+
+ return 0;
+}
+
+static void check_unaligned_access_nonboot_cpu(void *param)
+{
+ unsigned int cpu = smp_processor_id();
+ struct page **pages = param;
+
+ if (smp_processor_id() != 0)
+ check_unaligned_access(pages[cpu]);
+}
+
+DEFINE_STATIC_KEY_FALSE(fast_unaligned_access_speed_key);
+
+static void modify_unaligned_access_branches(cpumask_t *mask, int weight)
+{
+ if (cpumask_weight(mask) == weight)
+ static_branch_enable_cpuslocked(&fast_unaligned_access_speed_key);
+ else
+ static_branch_disable_cpuslocked(&fast_unaligned_access_speed_key);
+}
+
+static void set_unaligned_access_static_branches_except_cpu(int cpu)
+{
+ /*
+ * Same as set_unaligned_access_static_branches, except excludes the
+ * given CPU from the result. When a CPU is hotplugged into an offline
+ * state, this function is called before the CPU is set to offline in
+ * the cpumask, and thus the CPU needs to be explicitly excluded.
+ */
+
+ cpumask_t fast_except_me;
+
+ cpumask_and(&fast_except_me, &fast_misaligned_access, cpu_online_mask);
+ cpumask_clear_cpu(cpu, &fast_except_me);
+
+ modify_unaligned_access_branches(&fast_except_me, num_online_cpus() - 1);
+}
+
+static void set_unaligned_access_static_branches(void)
+{
+ /*
+ * This will be called after check_unaligned_access_all_cpus so the
+ * result of unaligned access speed for all CPUs will be available.
+ *
+ * To avoid the number of online cpus changing between reading
+ * cpu_online_mask and calling num_online_cpus, cpus_read_lock must be
+ * held before calling this function.
+ */
+
+ cpumask_t fast_and_online;
+
+ cpumask_and(&fast_and_online, &fast_misaligned_access, cpu_online_mask);
+
+ modify_unaligned_access_branches(&fast_and_online, num_online_cpus());
+}
+
+static int lock_and_set_unaligned_access_static_branch(void)
+{
+ cpus_read_lock();
+ set_unaligned_access_static_branches();
+ cpus_read_unlock();
+
+ return 0;
+}
+
+arch_initcall_sync(lock_and_set_unaligned_access_static_branch);
+
+static int riscv_online_cpu(unsigned int cpu)
+{
+ static struct page *buf;
+
+ /* We are already set since the last check */
+ if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN)
+ goto exit;
+
+ buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
+ if (!buf) {
+ pr_warn("Allocation failure, not measuring misaligned performance\n");
+ return -ENOMEM;
+ }
+
+ check_unaligned_access(buf);
+ __free_pages(buf, MISALIGNED_BUFFER_ORDER);
+
+exit:
+ set_unaligned_access_static_branches();
+
+ return 0;
+}
+
+static int riscv_offline_cpu(unsigned int cpu)
+{
+ set_unaligned_access_static_branches_except_cpu(cpu);
+
+ return 0;
+}
+
+/* Measure unaligned access speed on all CPUs present at boot in parallel. */
+static int check_unaligned_access_speed_all_cpus(void)
+{
+ unsigned int cpu;
+ unsigned int cpu_count = num_possible_cpus();
+ struct page **bufs = kcalloc(cpu_count, sizeof(*bufs), GFP_KERNEL);
+
+ if (!bufs) {
+ pr_warn("Allocation failure, not measuring misaligned performance\n");
+ return 0;
+ }
+
+ /*
+ * Allocate separate buffers for each CPU so there's no fighting over
+ * cache lines.
+ */
+ for_each_cpu(cpu, cpu_online_mask) {
+ bufs[cpu] = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
+ if (!bufs[cpu]) {
+ pr_warn("Allocation failure, not measuring misaligned performance\n");
+ goto out;
+ }
+ }
+
+ /* Check everybody except 0, who stays behind to tend jiffies. */
+ on_each_cpu(check_unaligned_access_nonboot_cpu, bufs, 1);
+
+ /* Check core 0. */
+ smp_call_on_cpu(0, check_unaligned_access, bufs[0], true);
+
+ /*
+ * Setup hotplug callbacks for any new CPUs that come online or go
+ * offline.
+ */
+ cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online",
+ riscv_online_cpu, riscv_offline_cpu);
+
+out:
+ for_each_cpu(cpu, cpu_online_mask) {
+ if (bufs[cpu])
+ __free_pages(bufs[cpu], MISALIGNED_BUFFER_ORDER);
+ }
+
+ kfree(bufs);
+ return 0;
+}
+
+static int check_unaligned_access_all_cpus(void)
+{
+ bool all_cpus_emulated = check_unaligned_access_emulated_all_cpus();
+
+ if (!all_cpus_emulated)
+ return check_unaligned_access_speed_all_cpus();
+
+ return 0;
+}
+#else /* CONFIG_RISCV_PROBE_UNALIGNED_ACCESS */
+static int check_unaligned_access_all_cpus(void)
+{
+ check_unaligned_access_emulated_all_cpus();
+
+ return 0;
+}
+#endif
+
+arch_initcall(check_unaligned_access_all_cpus);
diff --git a/arch/riscv/lib/csum.c b/arch/riscv/lib/csum.c
index 74af3ab520b6d4..7fb12c59e57191 100644
--- a/arch/riscv/lib/csum.c
+++ b/arch/riscv/lib/csum.c
@@ -3,7 +3,7 @@
* Checksum library
*
* Influenced by arch/arm64/lib/csum.c
- * Copyright (C) 2023 Rivos Inc.
+ * Copyright (C) 2023-2024 Rivos Inc.
*/
#include <linux/bitops.h>
#include <linux/compiler.h>
@@ -318,10 +318,7 @@ unsigned int do_csum(const unsigned char *buff, int len)
* branches. The largest chunk of overlap was delegated into the
* do_csum_common function.
*/
- if (static_branch_likely(&fast_misaligned_access_speed_key))
- return do_csum_no_alignment(buff, len);
-
- if (((unsigned long)buff & OFFSET_MASK) == 0)
+ if (has_fast_unaligned_accesses() || (((unsigned long)buff & OFFSET_MASK) == 0))
return do_csum_no_alignment(buff, len);
return do_csum_with_alignment(buff, len);
diff --git a/arch/riscv/lib/uaccess_vector.S b/arch/riscv/lib/uaccess_vector.S
index 51ab5588e9ff36..7c45f26de4f79b 100644
--- a/arch/riscv/lib/uaccess_vector.S
+++ b/arch/riscv/lib/uaccess_vector.S
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <linux/linkage.h>
-#include <asm-generic/export.h>
#include <asm/asm.h>
#include <asm/asm-extable.h>
#include <asm/csr.h>
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index 55a34f2020a85a..bc61ee5975e412 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -82,12 +82,12 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
#endif /* CONFIG_SMP */
#ifdef CONFIG_MMU
-void flush_icache_pte(pte_t pte)
+void flush_icache_pte(struct mm_struct *mm, pte_t pte)
{
struct folio *folio = page_folio(pte_page(pte));
if (!test_bit(PG_dcache_clean, &folio->flags)) {
- flush_icache_all();
+ flush_icache_mm(mm, false);
set_bit(PG_dcache_clean, &folio->flags);
}
}
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
index 217fd4de613422..ba8eb3944687cf 100644
--- a/arch/riscv/mm/context.c
+++ b/arch/riscv/mm/context.c
@@ -323,6 +323,8 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (unlikely(prev == next))
return;
+ membarrier_arch_switch_mm(prev, next, task);
+
/*
* Mark the current MM context as inactive, and the next as
* active. This is at least used by the icache flushing
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index b5ffb2ef54ad22..fe8e159394d8ee 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -764,6 +764,11 @@ static int __init print_no5lvl(char *p)
}
early_param("no5lvl", print_no5lvl);
+static void __init set_mmap_rnd_bits_max(void)
+{
+ mmap_rnd_bits_max = MMAP_VA_BITS - PAGE_SHIFT - 3;
+}
+
/*
* There is a simple way to determine if 4-level is supported by the
* underlying hardware: establish 1:1 mapping in 4-level page table mode
@@ -1078,6 +1083,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
#if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL)
set_satp_mode(dtb_pa);
+ set_mmap_rnd_bits_max();
#endif
/*
diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c
index ef887efcb67900..533ec9055fa0da 100644
--- a/arch/riscv/mm/pgtable.c
+++ b/arch/riscv/mm/pgtable.c
@@ -10,7 +10,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
pte_t entry, int dirty)
{
if (!pte_same(ptep_get(ptep), entry))
- __set_pte_at(ptep, entry);
+ __set_pte_at(vma->vm_mm, ptep, entry);
/*
* update_mmu_cache will unconditionally execute, handling both
* the case that the PTE changed and the spurious fault case.
diff --git a/arch/x86/boot/compressed/efi_mixed.S b/arch/x86/boot/compressed/efi_mixed.S
index f4e22ef774ab6b..719e939050cbfa 100644
--- a/arch/x86/boot/compressed/efi_mixed.S
+++ b/arch/x86/boot/compressed/efi_mixed.S
@@ -49,6 +49,11 @@ SYM_FUNC_START(startup_64_mixed_mode)
lea efi32_boot_args(%rip), %rdx
mov 0(%rdx), %edi
mov 4(%rdx), %esi
+
+ /* Switch to the firmware's stack */
+ movl efi32_boot_sp(%rip), %esp
+ andl $~7, %esp
+
#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
mov 8(%rdx), %edx // saved bootparams pointer
test %edx, %edx
@@ -254,6 +259,9 @@ SYM_FUNC_START_LOCAL(efi32_entry)
/* Store firmware IDT descriptor */
sidtl (efi32_boot_idt - 1b)(%ebx)
+ /* Store firmware stack pointer */
+ movl %esp, (efi32_boot_sp - 1b)(%ebx)
+
/* Store boot arguments */
leal (efi32_boot_args - 1b)(%ebx), %ebx
movl %ecx, 0(%ebx)
@@ -318,5 +326,6 @@ SYM_DATA_END(efi32_boot_idt)
SYM_DATA_LOCAL(efi32_boot_cs, .word 0)
SYM_DATA_LOCAL(efi32_boot_ds, .word 0)
+SYM_DATA_LOCAL(efi32_boot_sp, .long 0)
SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0, 0)
SYM_DATA(efi_is64, .byte 1)
diff --git a/arch/x86/configs/tiny.config b/arch/x86/configs/tiny.config
index 66c9e2aab16cc3..be3ee4294903bd 100644
--- a/arch/x86/configs/tiny.config
+++ b/arch/x86/configs/tiny.config
@@ -1,5 +1,6 @@
CONFIG_NOHIGHMEM=y
# CONFIG_HIGHMEM4G is not set
# CONFIG_HIGHMEM64G is not set
+# CONFIG_UNWINDER_ORC is not set
CONFIG_UNWINDER_GUESS=y
# CONFIG_UNWINDER_FRAME_POINTER is not set
diff --git a/arch/x86/include/asm/crash_reserve.h b/arch/x86/include/asm/crash_reserve.h
index 152239f9554195..7835b2cdff04a7 100644
--- a/arch/x86/include/asm/crash_reserve.h
+++ b/arch/x86/include/asm/crash_reserve.h
@@ -39,4 +39,6 @@ static inline unsigned long crash_low_size_default(void)
#endif
}
+#define HAVE_ARCH_ADD_CRASH_RES_TO_IOMEM_EARLY
+
#endif /* _X86_CRASH_RESERVE_H */
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index b65e9c46b92210..d0941f4c272495 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -127,6 +127,7 @@
#define INTEL_FAM6_ARROWLAKE_H 0xC5
#define INTEL_FAM6_ARROWLAKE 0xC6
+#define INTEL_FAM6_ARROWLAKE_U 0xB5
#define INTEL_FAM6_LUNARLAKE_M 0xBD
diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
index a800abb1a99255..d8416b3bf832e4 100644
--- a/arch/x86/include/asm/suspend_32.h
+++ b/arch/x86/include/asm/suspend_32.h
@@ -12,11 +12,6 @@
/* image of the saved processor state */
struct saved_context {
- /*
- * On x86_32, all segment registers except gs are saved at kernel
- * entry in pt_regs.
- */
- u16 gs;
unsigned long cr0, cr2, cr3, cr4;
u64 misc_enable;
struct saved_msrs saved_msrs;
@@ -27,6 +22,11 @@ struct saved_context {
unsigned long tr;
unsigned long safety;
unsigned long return_address;
+ /*
+ * On x86_32, all segment registers except gs are saved at kernel
+ * entry in pt_regs.
+ */
+ u16 gs;
bool misc_enable_saved;
} __attribute__((packed));
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index ba8cf5e9ce5632..5c1e6d6be267af 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -2307,6 +2307,8 @@ void arch_smt_update(void)
void __init arch_cpu_finalize_init(void)
{
+ struct cpuinfo_x86 *c = this_cpu_ptr(&cpu_info);
+
identify_boot_cpu();
select_idle_routine();
@@ -2345,6 +2347,13 @@ void __init arch_cpu_finalize_init(void)
fpu__init_system();
fpu__init_cpu();
+ /*
+ * Ensure that access to the per CPU representation has the initial
+ * boot CPU configuration.
+ */
+ *c = boot_cpu_data;
+ c->initialized = true;
+
alternative_instructions();
if (IS_ENABLED(CONFIG_X86_64)) {
diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c
index 3259b1d4fefe3c..aaca8d235dc2bb 100644
--- a/arch/x86/kernel/cpu/topology.c
+++ b/arch/x86/kernel/cpu/topology.c
@@ -415,6 +415,17 @@ void __init topology_init_possible_cpus(void)
unsigned int total = assigned + disabled;
u32 apicid, firstid;
+ /*
+ * If there was no APIC registered, then fake one so that the
+ * topology bitmap is populated. That ensures that the code below
+ * is valid and the various query interfaces can be used
+ * unconditionally. This does not affect the actual APIC code in
+ * any way because either the local APIC address has not been
+ * registered or the local APIC was disabled on the command line.
+ */
+ if (topo_info.boot_cpu_apic_id == BAD_APICID)
+ topology_register_boot_apic(0);
+
if (!restrict_to_up()) {
if (WARN_ON_ONCE(assigned > nr_cpu_ids)) {
disabled += assigned - nr_cpu_ids;
diff --git a/arch/x86/kernel/cpu/topology_common.c b/arch/x86/kernel/cpu/topology_common.c
index a50ae8d63d1c87..9a6069e7133c9c 100644
--- a/arch/x86/kernel/cpu/topology_common.c
+++ b/arch/x86/kernel/cpu/topology_common.c
@@ -140,7 +140,7 @@ static void parse_topology(struct topo_scan *tscan, bool early)
}
}
-static void topo_set_ids(struct topo_scan *tscan)
+static void topo_set_ids(struct topo_scan *tscan, bool early)
{
struct cpuinfo_x86 *c = tscan->c;
u32 apicid = c->topo.apicid;
@@ -148,8 +148,10 @@ static void topo_set_ids(struct topo_scan *tscan)
c->topo.pkg_id = topo_shift_apicid(apicid, TOPO_PKG_DOMAIN);
c->topo.die_id = topo_shift_apicid(apicid, TOPO_DIE_DOMAIN);
- c->topo.logical_pkg_id = topology_get_logical_id(apicid, TOPO_PKG_DOMAIN);
- c->topo.logical_die_id = topology_get_logical_id(apicid, TOPO_DIE_DOMAIN);
+ if (!early) {
+ c->topo.logical_pkg_id = topology_get_logical_id(apicid, TOPO_PKG_DOMAIN);
+ c->topo.logical_die_id = topology_get_logical_id(apicid, TOPO_DIE_DOMAIN);
+ }
/* Package relative core ID */
c->topo.core_id = (apicid & topo_domain_mask(TOPO_PKG_DOMAIN)) >>
@@ -187,7 +189,7 @@ void cpu_parse_topology(struct cpuinfo_x86 *c)
tscan.dom_shifts[dom], x86_topo_system.dom_shifts[dom]);
}
- topo_set_ids(&tscan);
+ topo_set_ids(&tscan, false);
}
void __init cpu_init_topology(struct cpuinfo_x86 *c)
@@ -208,7 +210,7 @@ void __init cpu_init_topology(struct cpuinfo_x86 *c)
x86_topo_system.dom_size[dom] = 1U << sft;
}
- topo_set_ids(&tscan);
+ topo_set_ids(&tscan, true);
/*
* AMD systems have Nodes per package which cannot be mapped to
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index b66f540de054a7..6f1b379e3b3851 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1016,17 +1016,6 @@ void __init e820__reserve_setup_data(void)
e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
- /*
- * SETUP_EFI, SETUP_IMA and SETUP_RNG_SEED are supplied by
- * kexec and do not need to be reserved.
- */
- if (data->type != SETUP_EFI &&
- data->type != SETUP_IMA &&
- data->type != SETUP_RNG_SEED)
- e820__range_update_kexec(pa_data,
- sizeof(*data) + data->len,
- E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
-
if (data->type == SETUP_INDIRECT) {
len += data->len;
early_memunmap(data, sizeof(*data));
@@ -1038,12 +1027,9 @@ void __init e820__reserve_setup_data(void)
indirect = (struct setup_indirect *)data->data;
- if (indirect->type != SETUP_INDIRECT) {
+ if (indirect->type != SETUP_INDIRECT)
e820__range_update(indirect->addr, indirect->len,
E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
- e820__range_update_kexec(indirect->addr, indirect->len,
- E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
- }
}
pa_data = pa_next;
@@ -1051,7 +1037,6 @@ void __init e820__reserve_setup_data(void)
}
e820__update_table(e820_table);
- e820__update_table(e820_table_kexec);
pr_info("extended physical RAM map:\n");
e820__print_table("reserve setup_data");
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 117e74c44e756c..33a214b1a4cec1 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -178,10 +178,11 @@ void fpu__init_cpu_xstate(void)
* Must happen after CR4 setup and before xsetbv() to allow KVM
* lazy passthrough. Write independent of the dynamic state static
* key as that does not work on the boot CPU. This also ensures
- * that any stale state is wiped out from XFD.
+ * that any stale state is wiped out from XFD. Reset the per CPU
+ * xfd cache too.
*/
if (cpu_feature_enabled(X86_FEATURE_XFD))
- wrmsrl(MSR_IA32_XFD, init_fpstate.xfd);
+ xfd_set_state(init_fpstate.xfd);
/*
* XCR_XFEATURE_ENABLED_MASK (aka. XCR0) sets user features
diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h
index 3518fb26d06b02..19ca623ffa2ac7 100644
--- a/arch/x86/kernel/fpu/xstate.h
+++ b/arch/x86/kernel/fpu/xstate.h
@@ -148,20 +148,26 @@ static inline void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rs
#endif
#ifdef CONFIG_X86_64
+static inline void xfd_set_state(u64 xfd)
+{
+ wrmsrl(MSR_IA32_XFD, xfd);
+ __this_cpu_write(xfd_state, xfd);
+}
+
static inline void xfd_update_state(struct fpstate *fpstate)
{
if (fpu_state_size_dynamic()) {
u64 xfd = fpstate->xfd;
- if (__this_cpu_read(xfd_state) != xfd) {
- wrmsrl(MSR_IA32_XFD, xfd);
- __this_cpu_write(xfd_state, xfd);
- }
+ if (__this_cpu_read(xfd_state) != xfd)
+ xfd_set_state(xfd);
}
}
extern int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu);
#else
+static inline void xfd_set_state(u64 xfd) { }
+
static inline void xfd_update_state(struct fpstate *fpstate) { }
static inline int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu) {
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 212e8e06aeba23..a817ed0724d1e7 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -81,6 +81,13 @@ static inline bool check_la57_support(void)
if (!(native_read_cr4() & X86_CR4_LA57))
return false;
+ RIP_REL_REF(__pgtable_l5_enabled) = 1;
+ RIP_REL_REF(pgdir_shift) = 48;
+ RIP_REL_REF(ptrs_per_p4d) = 512;
+ RIP_REL_REF(page_offset_base) = __PAGE_OFFSET_BASE_L5;
+ RIP_REL_REF(vmalloc_base) = __VMALLOC_BASE_L5;
+ RIP_REL_REF(vmemmap_base) = __VMEMMAP_BASE_L5;
+
return true;
}
@@ -175,7 +182,7 @@ unsigned long __head __startup_64(unsigned long physaddr,
p4d = (p4dval_t *)&RIP_REL_REF(level4_kernel_pgt);
p4d[MAX_PTRS_PER_P4D - 1] += load_delta;
- pgd[pgd_index(__START_KERNEL_map)] = (pgdval_t)p4d | _PAGE_TABLE_NOENC;
+ pgd[pgd_index(__START_KERNEL_map)] = (pgdval_t)p4d | _PAGE_TABLE;
}
RIP_REL_REF(level3_kernel_pgt)[PTRS_PER_PUD - 2].pud += load_delta;
@@ -431,15 +438,6 @@ asmlinkage __visible void __init __noreturn x86_64_start_kernel(char * real_mode
(__START_KERNEL & PGDIR_MASK)));
BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
- if (check_la57_support()) {
- __pgtable_l5_enabled = 1;
- pgdir_shift = 48;
- ptrs_per_p4d = 512;
- page_offset_base = __PAGE_OFFSET_BASE_L5;
- vmalloc_base = __VMALLOC_BASE_L5;
- vmemmap_base = __VMEMMAP_BASE_L5;
- }
-
cr4_init_shadow();
/* Kill off the identity-map trampoline */
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 091b3ab76a1801..d0e49bd7c6f3fa 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -373,7 +373,16 @@ out:
kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset,
bool *on_func_entry)
{
- if (is_endbr(*(u32 *)addr)) {
+ u32 insn;
+
+ /*
+ * Since 'addr' is not guaranteed to be safe to access, use
+ * copy_from_kernel_nofault() to read the instruction:
+ */
+ if (copy_from_kernel_nofault(&insn, (void *)addr, sizeof(u32)))
+ return NULL;
+
+ if (is_endbr(insn)) {
*on_func_entry = !offset || offset == 4;
if (*on_func_entry)
offset = 4;
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 1ccd30c8246faa..e89171b0347a6b 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -197,12 +197,12 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
if (!smp_check_mpc(mpc, oem, str))
return 0;
- /* Initialize the lapic mapping */
- if (!acpi_lapic)
- register_lapic_address(mpc->lapic);
-
- if (early)
+ if (early) {
+ /* Initialize the lapic mapping */
+ if (!acpi_lapic)
+ register_lapic_address(mpc->lapic);
return 1;
+ }
/* Now process the configuration blocks. */
while (count < mpc->length) {
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 3e1e96efadfe7e..ef206500ed6f22 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1206,16 +1206,6 @@ void __init i386_reserve_resources(void)
#endif /* CONFIG_X86_32 */
-#ifndef CONFIG_SMP
-void __init smp_prepare_boot_cpu(void)
-{
- struct cpuinfo_x86 *c = &cpu_data(0);
-
- *c = boot_cpu_data;
- c->initialized = true;
-}
-#endif
-
static struct notifier_block kernel_offset_notifier = {
.notifier_call = dump_kernel_offset
};
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index fe355c89f6c112..76bb65045c649a 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -313,14 +313,6 @@ static void notrace start_secondary(void *unused)
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
-static void __init smp_store_boot_cpu_info(void)
-{
- struct cpuinfo_x86 *c = &cpu_data(0);
-
- *c = boot_cpu_data;
- c->initialized = true;
-}
-
/*
* The bootstrap kernel entry code has set these up. Save them for
* a given CPU
@@ -1039,29 +1031,15 @@ static __init void disable_smp(void)
cpumask_set_cpu(0, topology_die_cpumask(0));
}
-static void __init smp_cpu_index_default(void)
-{
- int i;
- struct cpuinfo_x86 *c;
-
- for_each_possible_cpu(i) {
- c = &cpu_data(i);
- /* mark all to hotplug */
- c->cpu_index = nr_cpu_ids;
- }
-}
-
void __init smp_prepare_cpus_common(void)
{
unsigned int i;
- smp_cpu_index_default();
-
- /*
- * Setup boot CPU information
- */
- smp_store_boot_cpu_info(); /* Final full version of the data */
- mb();
+ /* Mark all except the boot CPU as hotpluggable */
+ for_each_possible_cpu(i) {
+ if (i)
+ per_cpu(cpu_info.cpu_index, i) = nr_cpu_ids;
+ }
for_each_possible_cpu(i) {
zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 44661c2e30ca5d..2903ce19f15cfa 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1497,6 +1497,9 @@ endif
if PPC
source "arch/powerpc/crypto/Kconfig"
endif
+if RISCV
+source "arch/riscv/crypto/Kconfig"
+endif
if S390
source "arch/s390/crypto/Kconfig"
endif
diff --git a/crypto/asymmetric_keys/mscode_parser.c b/crypto/asymmetric_keys/mscode_parser.c
index 05402ef8964ed4..8aecbe4637f36e 100644
--- a/crypto/asymmetric_keys/mscode_parser.c
+++ b/crypto/asymmetric_keys/mscode_parser.c
@@ -75,6 +75,9 @@ int mscode_note_digest_algo(void *context, size_t hdrlen,
oid = look_up_OID(value, vlen);
switch (oid) {
+ case OID_sha1:
+ ctx->digest_algo = "sha1";
+ break;
case OID_sha256:
ctx->digest_algo = "sha256";
break;
diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
index 5b08c50722d0f5..231ad7b3789d5e 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.c
+++ b/crypto/asymmetric_keys/pkcs7_parser.c
@@ -227,6 +227,9 @@ int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen,
struct pkcs7_parse_context *ctx = context;
switch (ctx->last_oid) {
+ case OID_sha1:
+ ctx->sinfo->sig->hash_algo = "sha1";
+ break;
case OID_sha256:
ctx->sinfo->sig->hash_algo = "sha256";
break;
@@ -278,6 +281,7 @@ int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen,
ctx->sinfo->sig->pkey_algo = "rsa";
ctx->sinfo->sig->encoding = "pkcs1";
break;
+ case OID_id_ecdsa_with_sha1:
case OID_id_ecdsa_with_sha224:
case OID_id_ecdsa_with_sha256:
case OID_id_ecdsa_with_sha384:
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index e5f22691febd59..e314fd57e6f88a 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -115,7 +115,8 @@ software_key_determine_akcipher(const struct public_key *pkey,
*/
if (!hash_algo)
return -EINVAL;
- if (strcmp(hash_algo, "sha224") != 0 &&
+ if (strcmp(hash_algo, "sha1") != 0 &&
+ strcmp(hash_algo, "sha224") != 0 &&
strcmp(hash_algo, "sha256") != 0 &&
strcmp(hash_algo, "sha384") != 0 &&
strcmp(hash_algo, "sha512") != 0 &&
diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c
index 398983be77e8bc..2deff81f8af50b 100644
--- a/crypto/asymmetric_keys/signature.c
+++ b/crypto/asymmetric_keys/signature.c
@@ -115,7 +115,7 @@ EXPORT_SYMBOL_GPL(decrypt_blob);
* Sign the specified data blob using the private key specified by params->key.
* The signature is wrapped in an encoding if params->encoding is specified
* (eg. "pkcs1"). If the encoding needs to know the digest type, this can be
- * passed through params->hash_algo (eg. "sha512").
+ * passed through params->hash_algo (eg. "sha1").
*
* Returns the length of the data placed in the signature buffer or an error.
*/
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index 487204d394266e..bb0bffa271b53c 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -198,6 +198,10 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag,
default:
return -ENOPKG; /* Unsupported combination */
+ case OID_sha1WithRSAEncryption:
+ ctx->cert->sig->hash_algo = "sha1";
+ goto rsa_pkcs1;
+
case OID_sha256WithRSAEncryption:
ctx->cert->sig->hash_algo = "sha256";
goto rsa_pkcs1;
@@ -214,6 +218,10 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag,
ctx->cert->sig->hash_algo = "sha224";
goto rsa_pkcs1;
+ case OID_id_ecdsa_with_sha1:
+ ctx->cert->sig->hash_algo = "sha1";
+ goto ecdsa;
+
case OID_id_rsassa_pkcs1_v1_5_with_sha3_256:
ctx->cert->sig->hash_algo = "sha3-256";
goto rsa_pkcs1;
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 986f331a5fc247..12e1c892f36661 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -653,6 +653,30 @@ static const struct akcipher_testvec rsa_tv_template[] = {
static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = {
{
.key =
+ "\x04\xf7\x46\xf8\x2f\x15\xf6\x22\x8e\xd7\x57\x4f\xcc\xe7\xbb\xc1"
+ "\xd4\x09\x73\xcf\xea\xd0\x15\x07\x3d\xa5\x8a\x8a\x95\x43\xe4\x68"
+ "\xea\xc6\x25\xc1\xc1\x01\x25\x4c\x7e\xc3\x3c\xa6\x04\x0a\xe7\x08"
+ "\x98",
+ .key_len = 49,
+ .params =
+ "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
+ "\xce\x3d\x03\x01\x01",
+ .param_len = 21,
+ .m =
+ "\xcd\xb9\xd2\x1c\xb7\x6f\xcd\x44\xb3\xfd\x63\xea\xa3\x66\x7f\xae"
+ "\x63\x85\xe7\x82",
+ .m_size = 20,
+ .algo = OID_id_ecdsa_with_sha1,
+ .c =
+ "\x30\x35\x02\x19\x00\xba\xe5\x93\x83\x6e\xb6\x3b\x63\xa0\x27\x91"
+ "\xc6\xf6\x7f\xc3\x09\xad\x59\xad\x88\x27\xd6\x92\x6b\x02\x18\x10"
+ "\x68\x01\x9d\xba\xce\x83\x08\xef\x95\x52\x7b\xa0\x0f\xe4\x18\x86"
+ "\x80\x6f\xa5\x79\x77\xda\xd0",
+ .c_size = 55,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ }, {
+ .key =
"\x04\xb6\x4b\xb1\xd1\xac\xba\x24\x8f\x65\xb2\x60\x00\x90\xbf\xbd"
"\x78\x05\x73\xe9\x79\x1d\x6f\x7c\x0b\xd2\xc3\x93\xa7\x28\xe1\x75"
"\xf7\xd5\x95\x1d\x28\x10\xc0\x75\x50\x5c\x1a\x4f\x3f\x8f\xa5\xee"
@@ -756,6 +780,32 @@ static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = {
static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = {
{
.key =
+ "\x04\xb9\x7b\xbb\xd7\x17\x64\xd2\x7e\xfc\x81\x5d\x87\x06\x83\x41"
+ "\x22\xd6\x9a\xaa\x87\x17\xec\x4f\x63\x55\x2f\x94\xba\xdd\x83\xe9"
+ "\x34\x4b\xf3\xe9\x91\x13\x50\xb6\xcb\xca\x62\x08\xe7\x3b\x09\xdc"
+ "\xc3\x63\x4b\x2d\xb9\x73\x53\xe4\x45\xe6\x7c\xad\xe7\x6b\xb0\xe8"
+ "\xaf",
+ .key_len = 65,
+ .params =
+ "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
+ "\xce\x3d\x03\x01\x07",
+ .param_len = 21,
+ .m =
+ "\xc2\x2b\x5f\x91\x78\x34\x26\x09\x42\x8d\x6f\x51\xb2\xc5\xaf\x4c"
+ "\x0b\xde\x6a\x42",
+ .m_size = 20,
+ .algo = OID_id_ecdsa_with_sha1,
+ .c =
+ "\x30\x46\x02\x21\x00\xf9\x25\xce\x9f\x3a\xa6\x35\x81\xcf\xd4\xe7"
+ "\xb7\xf0\x82\x56\x41\xf7\xd4\xad\x8d\x94\x5a\x69\x89\xee\xca\x6a"
+ "\x52\x0e\x48\x4d\xcc\x02\x21\x00\xd7\xe4\xef\x52\x66\xd3\x5b\x9d"
+ "\x8a\xfa\x54\x93\x29\xa7\x70\x86\xf1\x03\x03\xf3\x3b\xe2\x73\xf7"
+ "\xfb\x9d\x8b\xde\xd4\x8d\x6f\xad",
+ .c_size = 72,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ }, {
+ .key =
"\x04\x8b\x6d\xc0\x33\x8e\x2d\x8b\x67\xf5\xeb\xc4\x7f\xa0\xf5\xd9"
"\x7b\x03\xa5\x78\x9a\xb5\xea\x14\xe4\x23\xd0\xaf\xd7\x0e\x2e\xa0"
"\xc9\x8b\xdb\x95\xf8\xb3\xaf\xac\x00\x2c\x2c\x1f\x7a\xfd\x95\x88"
@@ -866,6 +916,36 @@ static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = {
static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = {
{
+ .key = /* secp384r1(sha1) */
+ "\x04\x89\x25\xf3\x97\x88\xcb\xb0\x78\xc5\x72\x9a\x14\x6e\x7a\xb1"
+ "\x5a\xa5\x24\xf1\x95\x06\x9e\x28\xfb\xc4\xb9\xbe\x5a\x0d\xd9\x9f"
+ "\xf3\xd1\x4d\x2d\x07\x99\xbd\xda\xa7\x66\xec\xbb\xea\xba\x79\x42"
+ "\xc9\x34\x89\x6a\xe7\x0b\xc3\xf2\xfe\x32\x30\xbe\xba\xf9\xdf\x7e"
+ "\x4b\x6a\x07\x8e\x26\x66\x3f\x1d\xec\xa2\x57\x91\x51\xdd\x17\x0e"
+ "\x0b\x25\xd6\x80\x5c\x3b\xe6\x1a\x98\x48\x91\x45\x7a\x73\xb0\xc3"
+ "\xf1",
+ .key_len = 97,
+ .params =
+ "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04"
+ "\x00\x22",
+ .param_len = 18,
+ .m =
+ "\x12\x55\x28\xf0\x77\xd5\xb6\x21\x71\x32\x48\xcd\x28\xa8\x25\x22"
+ "\x3a\x69\xc1\x93",
+ .m_size = 20,
+ .algo = OID_id_ecdsa_with_sha1,
+ .c =
+ "\x30\x66\x02\x31\x00\xf5\x0f\x24\x4c\x07\x93\x6f\x21\x57\x55\x07"
+ "\x20\x43\x30\xde\xa0\x8d\x26\x8e\xae\x63\x3f\xbc\x20\x3a\xc6\xf1"
+ "\x32\x3c\xce\x70\x2b\x78\xf1\x4c\x26\xe6\x5b\x86\xcf\xec\x7c\x7e"
+ "\xd0\x87\xd7\xd7\x6e\x02\x31\x00\xcd\xbb\x7e\x81\x5d\x8f\x63\xc0"
+ "\x5f\x63\xb1\xbe\x5e\x4c\x0e\xa1\xdf\x28\x8c\x1b\xfa\xf9\x95\x88"
+ "\x74\xa0\x0f\xbf\xaf\xc3\x36\x76\x4a\xa1\x59\xf1\x1c\xa4\x58\x26"
+ "\x79\x12\x2a\xb7\xc5\x15\x92\xc5",
+ .c_size = 104,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ }, {
.key = /* secp384r1(sha224) */
"\x04\x69\x6c\xcf\x62\xee\xd0\x0d\xe5\xb5\x2f\x70\x54\xcf\x26\xa0"
"\xd9\x98\x8d\x92\x2a\xab\x9b\x11\xcb\x48\x18\xa1\xa9\x0d\xd5\x18"
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 5d1fb83b2d32ec..ff1689bb3124dd 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -286,7 +286,7 @@ config ACPI_CPPC_LIB
config ACPI_PROCESSOR
tristate "Processor"
- depends on X86 || ARM64 || LOONGARCH
+ depends on X86 || ARM64 || LOONGARCH || RISCV
select ACPI_PROCESSOR_IDLE
select ACPI_CPU_FREQ_PSS if X86 || LOONGARCH
select THERMAL
diff --git a/drivers/acpi/riscv/Makefile b/drivers/acpi/riscv/Makefile
index 8b3b126e0b940b..86b0925f612d98 100644
--- a/drivers/acpi/riscv/Makefile
+++ b/drivers/acpi/riscv/Makefile
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-y += rhct.o
+obj-y += rhct.o
+obj-$(CONFIG_ACPI_PROCESSOR_IDLE) += cpuidle.o
+obj-$(CONFIG_ACPI_CPPC_LIB) += cppc.o
diff --git a/drivers/acpi/riscv/cppc.c b/drivers/acpi/riscv/cppc.c
new file mode 100644
index 00000000000000..4cdff387deff6c
--- /dev/null
+++ b/drivers/acpi/riscv/cppc.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Implement CPPC FFH helper routines for RISC-V.
+ *
+ * Copyright (C) 2024 Ventana Micro Systems Inc.
+ */
+
+#include <acpi/cppc_acpi.h>
+#include <asm/csr.h>
+#include <asm/sbi.h>
+
+#define SBI_EXT_CPPC 0x43505043
+
+/* CPPC interfaces defined in SBI spec */
+#define SBI_CPPC_PROBE 0x0
+#define SBI_CPPC_READ 0x1
+#define SBI_CPPC_READ_HI 0x2
+#define SBI_CPPC_WRITE 0x3
+
+/* RISC-V FFH definitions from RISC-V FFH spec */
+#define FFH_CPPC_TYPE(r) (((r) & GENMASK_ULL(63, 60)) >> 60)
+#define FFH_CPPC_SBI_REG(r) ((r) & GENMASK(31, 0))
+#define FFH_CPPC_CSR_NUM(r) ((r) & GENMASK(11, 0))
+
+#define FFH_CPPC_SBI 0x1
+#define FFH_CPPC_CSR 0x2
+
+struct sbi_cppc_data {
+ u64 val;
+ u32 reg;
+ struct sbiret ret;
+};
+
+static bool cppc_ext_present;
+
+static int __init sbi_cppc_init(void)
+{
+ if (sbi_spec_version >= sbi_mk_version(2, 0) &&
+ sbi_probe_extension(SBI_EXT_CPPC) > 0) {
+ pr_info("SBI CPPC extension detected\n");
+ cppc_ext_present = true;
+ } else {
+ pr_info("SBI CPPC extension NOT detected!!\n");
+ cppc_ext_present = false;
+ }
+
+ return 0;
+}
+device_initcall(sbi_cppc_init);
+
+static void sbi_cppc_read(void *read_data)
+{
+ struct sbi_cppc_data *data = (struct sbi_cppc_data *)read_data;
+
+ data->ret = sbi_ecall(SBI_EXT_CPPC, SBI_CPPC_READ,
+ data->reg, 0, 0, 0, 0, 0);
+}
+
+static void sbi_cppc_write(void *write_data)
+{
+ struct sbi_cppc_data *data = (struct sbi_cppc_data *)write_data;
+
+ data->ret = sbi_ecall(SBI_EXT_CPPC, SBI_CPPC_WRITE,
+ data->reg, data->val, 0, 0, 0, 0);
+}
+
+static void cppc_ffh_csr_read(void *read_data)
+{
+ struct sbi_cppc_data *data = (struct sbi_cppc_data *)read_data;
+
+ switch (data->reg) {
+ /* Support only TIME CSR for now */
+ case CSR_TIME:
+ data->ret.value = csr_read(CSR_TIME);
+ data->ret.error = 0;
+ break;
+ default:
+ data->ret.error = -EINVAL;
+ break;
+ }
+}
+
+static void cppc_ffh_csr_write(void *write_data)
+{
+ struct sbi_cppc_data *data = (struct sbi_cppc_data *)write_data;
+
+ data->ret.error = -EINVAL;
+}
+
+/*
+ * Refer to drivers/acpi/cppc_acpi.c for the description of the functions
+ * below.
+ */
+bool cpc_ffh_supported(void)
+{
+ return true;
+}
+
+int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val)
+{
+ struct sbi_cppc_data data;
+
+ if (WARN_ON_ONCE(irqs_disabled()))
+ return -EPERM;
+
+ if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_SBI) {
+ if (!cppc_ext_present)
+ return -EINVAL;
+
+ data.reg = FFH_CPPC_SBI_REG(reg->address);
+
+ smp_call_function_single(cpu, sbi_cppc_read, &data, 1);
+
+ *val = data.ret.value;
+
+ return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0;
+ } else if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_CSR) {
+ data.reg = FFH_CPPC_CSR_NUM(reg->address);
+
+ smp_call_function_single(cpu, cppc_ffh_csr_read, &data, 1);
+
+ *val = data.ret.value;
+
+ return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0;
+ }
+
+ return -EINVAL;
+}
+
+int cpc_write_ffh(int cpu, struct cpc_reg *reg, u64 val)
+{
+ struct sbi_cppc_data data;
+
+ if (WARN_ON_ONCE(irqs_disabled()))
+ return -EPERM;
+
+ if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_SBI) {
+ if (!cppc_ext_present)
+ return -EINVAL;
+
+ data.reg = FFH_CPPC_SBI_REG(reg->address);
+ data.val = val;
+
+ smp_call_function_single(cpu, sbi_cppc_write, &data, 1);
+
+ return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0;
+ } else if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_CSR) {
+ data.reg = FFH_CPPC_CSR_NUM(reg->address);
+ data.val = val;
+
+ smp_call_function_single(cpu, cppc_ffh_csr_write, &data, 1);
+
+ return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0;
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/acpi/riscv/cpuidle.c b/drivers/acpi/riscv/cpuidle.c
new file mode 100644
index 00000000000000..624f9bbdb58c47
--- /dev/null
+++ b/drivers/acpi/riscv/cpuidle.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024, Ventana Micro Systems Inc
+ * Author: Sunil V L <sunilvl@ventanamicro.com>
+ *
+ */
+
+#include <linux/acpi.h>
+#include <acpi/processor.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpuidle.h>
+#include <linux/suspend.h>
+#include <asm/cpuidle.h>
+#include <asm/sbi.h>
+#include <asm/suspend.h>
+
+#define RISCV_FFH_LPI_TYPE_MASK GENMASK_ULL(63, 60)
+#define RISCV_FFH_LPI_RSVD_MASK GENMASK_ULL(59, 32)
+
+#define RISCV_FFH_LPI_TYPE_SBI BIT_ULL(60)
+
+static int acpi_cpu_init_idle(unsigned int cpu)
+{
+ int i;
+ struct acpi_lpi_state *lpi;
+ struct acpi_processor *pr = per_cpu(processors, cpu);
+
+ if (unlikely(!pr || !pr->flags.has_lpi))
+ return -EINVAL;
+
+ if (!riscv_sbi_hsm_is_supported())
+ return -ENODEV;
+
+ if (pr->power.count <= 1)
+ return -ENODEV;
+
+ for (i = 1; i < pr->power.count; i++) {
+ u32 state;
+
+ lpi = &pr->power.lpi_states[i];
+
+ /*
+ * Validate Entry Method as per FFH spec.
+ * bits[63:60] should be 0x1
+ * bits[59:32] should be 0x0
+ * bits[31:0] represent a SBI power_state
+ */
+ if (((lpi->address & RISCV_FFH_LPI_TYPE_MASK) != RISCV_FFH_LPI_TYPE_SBI) ||
+ (lpi->address & RISCV_FFH_LPI_RSVD_MASK)) {
+ pr_warn("Invalid LPI entry method %#llx\n", lpi->address);
+ return -EINVAL;
+ }
+
+ state = lpi->address;
+ if (!riscv_sbi_suspend_state_is_valid(state)) {
+ pr_warn("Invalid SBI power state %#x\n", state);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int acpi_processor_ffh_lpi_probe(unsigned int cpu)
+{
+ return acpi_cpu_init_idle(cpu);
+}
+
+int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
+{
+ u32 state = lpi->address;
+
+ if (state & SBI_HSM_SUSP_NON_RET_BIT)
+ return CPU_PM_CPU_IDLE_ENTER_PARAM(riscv_sbi_hart_suspend,
+ lpi->index,
+ state);
+ else
+ return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(riscv_sbi_hart_suspend,
+ lpi->index,
+ state);
+}
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 1b399ec8c07d1e..25c9d85667f1a2 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2787,7 +2787,6 @@ do_request:
pending = set_next_request();
spin_unlock_irq(&floppy_lock);
if (!pending) {
- do_floppy = NULL;
unlock_fdc();
return;
}
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index 44a61dc6f93208..ab1c8c2b66b887 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/bitfield.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
@@ -31,10 +32,7 @@
#define GT_CONTROL_COMP_ENABLE BIT(1) /* banked */
#define GT_CONTROL_IRQ_ENABLE BIT(2) /* banked */
#define GT_CONTROL_AUTO_INC BIT(3) /* banked */
-#define GT_CONTROL_PRESCALER_SHIFT 8
-#define GT_CONTROL_PRESCALER_MAX 0xF
-#define GT_CONTROL_PRESCALER_MASK (GT_CONTROL_PRESCALER_MAX << \
- GT_CONTROL_PRESCALER_SHIFT)
+#define GT_CONTROL_PRESCALER_MASK GENMASK(15, 8)
#define GT_INT_STATUS 0x0c
#define GT_INT_STATUS_EVENT_FLAG BIT(0)
@@ -52,7 +50,8 @@
*/
static void __iomem *gt_base;
static struct notifier_block gt_clk_rate_change_nb;
-static u32 gt_psv_new, gt_psv_bck, gt_target_rate;
+static u32 gt_psv_new, gt_psv_bck;
+static unsigned long gt_target_rate;
static int gt_ppi;
static struct clock_event_device __percpu *gt_evt;
@@ -88,7 +87,7 @@ static u64 gt_counter_read(void)
return _gt_counter_read();
}
-/**
+/*
* To ensure that updates to comparator value register do not set the
* Interrupt Status Register proceed as follows:
* 1. Clear the Comp Enable bit in the Timer Control Register.
@@ -247,7 +246,7 @@ static void gt_write_presc(u32 psv)
reg = readl(gt_base + GT_CONTROL);
reg &= ~GT_CONTROL_PRESCALER_MASK;
- reg |= psv << GT_CONTROL_PRESCALER_SHIFT;
+ reg |= FIELD_PREP(GT_CONTROL_PRESCALER_MASK, psv);
writel(reg, gt_base + GT_CONTROL);
}
@@ -256,8 +255,7 @@ static u32 gt_read_presc(void)
u32 reg;
reg = readl(gt_base + GT_CONTROL);
- reg &= GT_CONTROL_PRESCALER_MASK;
- return reg >> GT_CONTROL_PRESCALER_SHIFT;
+ return FIELD_GET(GT_CONTROL_PRESCALER_MASK, reg);
}
static void __init gt_delay_timer_init(void)
@@ -272,9 +270,9 @@ static int __init gt_clocksource_init(void)
writel(0, gt_base + GT_COUNTER0);
writel(0, gt_base + GT_COUNTER1);
/* set prescaler and enable timer on all the cores */
- writel(((CONFIG_ARM_GT_INITIAL_PRESCALER_VAL - 1) <<
- GT_CONTROL_PRESCALER_SHIFT)
- | GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL);
+ writel(FIELD_PREP(GT_CONTROL_PRESCALER_MASK,
+ CONFIG_ARM_GT_INITIAL_PRESCALER_VAL - 1) |
+ GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL);
#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
sched_clock_register(gt_sched_clock_read, 64, gt_target_rate);
@@ -290,18 +288,17 @@ static int gt_clk_rate_change_cb(struct notifier_block *nb,
switch (event) {
case PRE_RATE_CHANGE:
{
- int psv;
-
- psv = DIV_ROUND_CLOSEST(ndata->new_rate,
- gt_target_rate);
+ unsigned long psv;
- if (abs(gt_target_rate - (ndata->new_rate / psv)) > MAX_F_ERR)
+ psv = DIV_ROUND_CLOSEST(ndata->new_rate, gt_target_rate);
+ if (!psv ||
+ abs(gt_target_rate - (ndata->new_rate / psv)) > MAX_F_ERR)
return NOTIFY_BAD;
psv--;
/* prescaler within legal range? */
- if (psv < 0 || psv > GT_CONTROL_PRESCALER_MAX)
+ if (!FIELD_FIT(GT_CONTROL_PRESCALER_MASK, psv))
return NOTIFY_BAD;
/*
@@ -411,7 +408,7 @@ static int __init global_timer_of_register(struct device_node *np)
err = gt_clocksource_init();
if (err)
goto out_irq;
-
+
err = cpuhp_setup_state(CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
"clockevents/arm/global_timer:starting",
gt_starting_cpu, gt_dying_cpu);
diff --git a/drivers/clocksource/timer-clint.c b/drivers/clocksource/timer-clint.c
index 9a55e733ae995d..09fd292eb83df0 100644
--- a/drivers/clocksource/timer-clint.c
+++ b/drivers/clocksource/timer-clint.c
@@ -131,7 +131,7 @@ static int clint_timer_starting_cpu(unsigned int cpu)
struct clock_event_device *ce = per_cpu_ptr(&clint_clock_event, cpu);
ce->cpumask = cpumask_of(cpu);
- clockevents_config_and_register(ce, clint_timer_freq, 100, 0x7fffffff);
+ clockevents_config_and_register(ce, clint_timer_freq, 100, ULONG_MAX);
enable_percpu_irq(clint_timer_irq,
irq_get_trigger_type(clint_timer_irq));
diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
index 6a878d227a13b5..489e69169ed4e3 100644
--- a/drivers/clocksource/timer-imx-gpt.c
+++ b/drivers/clocksource/timer-imx-gpt.c
@@ -258,9 +258,8 @@ static irqreturn_t mxc_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *ced = dev_id;
struct imx_timer *imxtm = to_imx_timer(ced);
- uint32_t tstat;
- tstat = readl_relaxed(imxtm->base + imxtm->gpt->reg_tstat);
+ readl_relaxed(imxtm->base + imxtm->gpt->reg_tstat);
imxtm->gpt->gpt_irq_acknowledge(imxtm);
diff --git a/drivers/clocksource/timer-imx-sysctr.c b/drivers/clocksource/timer-imx-sysctr.c
index 5a7a951c4efcdf..44525813be1e28 100644
--- a/drivers/clocksource/timer-imx-sysctr.c
+++ b/drivers/clocksource/timer-imx-sysctr.c
@@ -4,48 +4,62 @@
#include <linux/interrupt.h>
#include <linux/clockchips.h>
+#include <linux/slab.h>
#include "timer-of.h"
#define CMP_OFFSET 0x10000
+#define RD_OFFSET 0x20000
#define CNTCV_LO 0x8
#define CNTCV_HI 0xc
#define CMPCV_LO (CMP_OFFSET + 0x20)
#define CMPCV_HI (CMP_OFFSET + 0x24)
#define CMPCR (CMP_OFFSET + 0x2c)
+#define CNTCV_LO_IMX95 (RD_OFFSET + 0x8)
+#define CNTCV_HI_IMX95 (RD_OFFSET + 0xc)
#define SYS_CTR_EN 0x1
#define SYS_CTR_IRQ_MASK 0x2
#define SYS_CTR_CLK_DIV 0x3
-static void __iomem *sys_ctr_base __ro_after_init;
-static u32 cmpcr __ro_after_init;
+struct sysctr_private {
+ u32 cmpcr;
+ u32 lo_off;
+ u32 hi_off;
+};
-static void sysctr_timer_enable(bool enable)
+static void sysctr_timer_enable(struct clock_event_device *evt, bool enable)
{
- writel(enable ? cmpcr | SYS_CTR_EN : cmpcr, sys_ctr_base + CMPCR);
+ struct timer_of *to = to_timer_of(evt);
+ struct sysctr_private *priv = to->private_data;
+ void __iomem *base = timer_of_base(to);
+
+ writel(enable ? priv->cmpcr | SYS_CTR_EN : priv->cmpcr, base + CMPCR);
}
-static void sysctr_irq_acknowledge(void)
+static void sysctr_irq_acknowledge(struct clock_event_device *evt)
{
/*
* clear the enable bit(EN =0) will clear
* the status bit(ISTAT = 0), then the interrupt
* signal will be negated(acknowledged).
*/
- sysctr_timer_enable(false);
+ sysctr_timer_enable(evt, false);
}
-static inline u64 sysctr_read_counter(void)
+static inline u64 sysctr_read_counter(struct clock_event_device *evt)
{
+ struct timer_of *to = to_timer_of(evt);
+ struct sysctr_private *priv = to->private_data;
+ void __iomem *base = timer_of_base(to);
u32 cnt_hi, tmp_hi, cnt_lo;
do {
- cnt_hi = readl_relaxed(sys_ctr_base + CNTCV_HI);
- cnt_lo = readl_relaxed(sys_ctr_base + CNTCV_LO);
- tmp_hi = readl_relaxed(sys_ctr_base + CNTCV_HI);
+ cnt_hi = readl_relaxed(base + priv->hi_off);
+ cnt_lo = readl_relaxed(base + priv->lo_off);
+ tmp_hi = readl_relaxed(base + priv->hi_off);
} while (tmp_hi != cnt_hi);
return ((u64) cnt_hi << 32) | cnt_lo;
@@ -54,22 +68,24 @@ static inline u64 sysctr_read_counter(void)
static int sysctr_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
+ struct timer_of *to = to_timer_of(evt);
+ void __iomem *base = timer_of_base(to);
u32 cmp_hi, cmp_lo;
u64 next;
- sysctr_timer_enable(false);
+ sysctr_timer_enable(evt, false);
- next = sysctr_read_counter();
+ next = sysctr_read_counter(evt);
next += delta;
cmp_hi = (next >> 32) & 0x00fffff;
cmp_lo = next & 0xffffffff;
- writel_relaxed(cmp_hi, sys_ctr_base + CMPCV_HI);
- writel_relaxed(cmp_lo, sys_ctr_base + CMPCV_LO);
+ writel_relaxed(cmp_hi, base + CMPCV_HI);
+ writel_relaxed(cmp_lo, base + CMPCV_LO);
- sysctr_timer_enable(true);
+ sysctr_timer_enable(evt, true);
return 0;
}
@@ -81,7 +97,7 @@ static int sysctr_set_state_oneshot(struct clock_event_device *evt)
static int sysctr_set_state_shutdown(struct clock_event_device *evt)
{
- sysctr_timer_enable(false);
+ sysctr_timer_enable(evt, false);
return 0;
}
@@ -90,7 +106,7 @@ static irqreturn_t sysctr_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
- sysctr_irq_acknowledge();
+ sysctr_irq_acknowledge(evt);
evt->event_handler(evt);
@@ -117,34 +133,75 @@ static struct timer_of to_sysctr = {
},
};
-static void __init sysctr_clockevent_init(void)
+static int __init __sysctr_timer_init(struct device_node *np)
{
+ struct sysctr_private *priv;
+ void __iomem *base;
+ int ret;
+
+ priv = kzalloc(sizeof(struct sysctr_private), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = timer_of_init(np, &to_sysctr);
+ if (ret) {
+ kfree(priv);
+ return ret;
+ }
+
+ if (!of_property_read_bool(np, "nxp,no-divider")) {
+ /* system counter clock is divided by 3 internally */
+ to_sysctr.of_clk.rate /= SYS_CTR_CLK_DIV;
+ }
+
to_sysctr.clkevt.cpumask = cpu_possible_mask;
+ to_sysctr.private_data = priv;
+
+ base = timer_of_base(&to_sysctr);
+ priv->cmpcr = readl(base + CMPCR) & ~SYS_CTR_EN;
+
+ return 0;
+}
+
+static int __init sysctr_timer_init(struct device_node *np)
+{
+ struct sysctr_private *priv;
+ int ret;
+
+ ret = __sysctr_timer_init(np);
+ if (ret)
+ return ret;
+
+ priv = to_sysctr.private_data;
+ priv->lo_off = CNTCV_LO;
+ priv->hi_off = CNTCV_HI;
clockevents_config_and_register(&to_sysctr.clkevt,
timer_of_rate(&to_sysctr),
0xff, 0x7fffffff);
+
+ return 0;
}
-static int __init sysctr_timer_init(struct device_node *np)
+static int __init sysctr_timer_imx95_init(struct device_node *np)
{
- int ret = 0;
+ struct sysctr_private *priv;
+ int ret;
- ret = timer_of_init(np, &to_sysctr);
+ ret = __sysctr_timer_init(np);
if (ret)
return ret;
- if (!of_property_read_bool(np, "nxp,no-divider")) {
- /* system counter clock is divided by 3 internally */
- to_sysctr.of_clk.rate /= SYS_CTR_CLK_DIV;
- }
-
- sys_ctr_base = timer_of_base(&to_sysctr);
- cmpcr = readl(sys_ctr_base + CMPCR);
- cmpcr &= ~SYS_CTR_EN;
+ priv = to_sysctr.private_data;
+ priv->lo_off = CNTCV_LO_IMX95;
+ priv->hi_off = CNTCV_HI_IMX95;
- sysctr_clockevent_init();
+ clockevents_config_and_register(&to_sysctr.clkevt,
+ timer_of_rate(&to_sysctr),
+ 0xff, 0x7fffffff);
return 0;
}
+
TIMER_OF_DECLARE(sysctr_timer, "nxp,sysctr-timer", sysctr_timer_init);
+TIMER_OF_DECLARE(sysctr_timer_imx95, "nxp,imx95-sysctr-timer", sysctr_timer_imx95_init);
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index e66dcbd6656658..48ce50c5f5e68e 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -108,13 +108,16 @@ static int riscv_timer_starting_cpu(unsigned int cpu)
{
struct clock_event_device *ce = per_cpu_ptr(&riscv_clock_event, cpu);
+ /* Clear timer interrupt */
+ riscv_clock_event_stop();
+
ce->cpumask = cpumask_of(cpu);
ce->irq = riscv_clock_event_irq;
if (riscv_timer_cannot_wake_cpu)
ce->features |= CLOCK_EVT_FEAT_C3STOP;
if (static_branch_likely(&riscv_sstc_available))
ce->rating = 450;
- clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff);
+ clockevents_config_and_register(ce, riscv_timebase, 100, ULONG_MAX);
enable_percpu_irq(riscv_clock_event_irq,
irq_get_trigger_type(riscv_clock_event_irq));
diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c
index c9a753f96ba12d..0a4ea3288bfbea 100644
--- a/drivers/clocksource/timer-stm32.c
+++ b/drivers/clocksource/timer-stm32.c
@@ -73,7 +73,7 @@ static void stm32_timer_of_bits_set(struct timer_of *to, int bits)
* Accessor helper to get the number of bits in the timer-of private
* structure.
*
- * Returns an integer corresponding to the number of bits.
+ * Returns: an integer corresponding to the number of bits.
*/
static int stm32_timer_of_bits_get(struct timer_of *to)
{
@@ -177,7 +177,7 @@ static irqreturn_t stm32_clock_event_handler(int irq, void *dev_id)
}
/**
- * stm32_timer_width - Sort out the timer width (32/16)
+ * stm32_timer_set_width - Sort out the timer width (32/16)
* @to: a pointer to a timer-of structure
*
* Write the 32-bit max value and read/return the result. If the timer
diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c
index 59b0be482f32c6..a86529a707370d 100644
--- a/drivers/clocksource/timer-ti-32k.c
+++ b/drivers/clocksource/timer-ti-32k.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* timer-ti-32k.c - OMAP2 32k Timer Support
*
* Copyright (C) 2009 Nokia Corporation
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 35efb53d5492a3..94e55c40970a6c 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -302,4 +302,33 @@ config QORIQ_CPUFREQ
which are capable of changing the CPU's frequency dynamically.
endif
+
+config ACPI_CPPC_CPUFREQ
+ tristate "CPUFreq driver based on the ACPI CPPC spec"
+ depends on ACPI_PROCESSOR
+ depends on ARM || ARM64 || RISCV
+ select ACPI_CPPC_LIB
+ help
+ This adds a CPUFreq driver which uses CPPC methods
+ as described in the ACPIv5.1 spec. CPPC stands for
+ Collaborative Processor Performance Controls. It
+ is based on an abstract continuous scale of CPU
+ performance values which allows the remote power
+ processor to flexibly optimize for power and
+ performance. CPPC relies on power management firmware
+ support for its operation.
+
+ If in doubt, say N.
+
+config ACPI_CPPC_CPUFREQ_FIE
+ bool "Frequency Invariance support for CPPC cpufreq driver"
+ depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY
+ depends on ARM || ARM64 || RISCV
+ default y
+ help
+ This extends frequency invariance support in the CPPC cpufreq driver,
+ by using CPPC delivered and reference performance counters.
+
+ If in doubt, say N.
+
endmenu
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index a0ebad77666e30..96b404ce829f31 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -3,32 +3,6 @@
# ARM CPU Frequency scaling drivers
#
-config ACPI_CPPC_CPUFREQ
- tristate "CPUFreq driver based on the ACPI CPPC spec"
- depends on ACPI_PROCESSOR
- select ACPI_CPPC_LIB
- help
- This adds a CPUFreq driver which uses CPPC methods
- as described in the ACPIv5.1 spec. CPPC stands for
- Collaborative Processor Performance Controls. It
- is based on an abstract continuous scale of CPU
- performance values which allows the remote power
- processor to flexibly optimize for power and
- performance. CPPC relies on power management firmware
- support for its operation.
-
- If in doubt, say N.
-
-config ACPI_CPPC_CPUFREQ_FIE
- bool "Frequency Invariance support for CPPC cpufreq driver"
- depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY
- default y
- help
- This extends frequency invariance support in the CPPC cpufreq driver,
- by using CPPC delivered and reference performance counters.
-
- If in doubt, say N.
-
config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
tristate "Allwinner nvmem based SUN50I CPUFreq driver"
depends on ARCH_SUNXI
diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
index e8094fc92491eb..a6e123dfe394d8 100644
--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
+++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
@@ -73,26 +73,6 @@ static inline bool sbi_is_domain_state_available(void)
return data->available;
}
-static int sbi_suspend_finisher(unsigned long suspend_type,
- unsigned long resume_addr,
- unsigned long opaque)
-{
- struct sbiret ret;
-
- ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND,
- suspend_type, resume_addr, opaque, 0, 0, 0);
-
- return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0;
-}
-
-static int sbi_suspend(u32 state)
-{
- if (state & SBI_HSM_SUSP_NON_RET_BIT)
- return cpu_suspend(state, sbi_suspend_finisher);
- else
- return sbi_suspend_finisher(state, 0, 0);
-}
-
static __cpuidle int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int idx)
{
@@ -100,9 +80,9 @@ static __cpuidle int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
u32 state = states[idx];
if (state & SBI_HSM_SUSP_NON_RET_BIT)
- return CPU_PM_CPU_IDLE_ENTER_PARAM(sbi_suspend, idx, state);
+ return CPU_PM_CPU_IDLE_ENTER_PARAM(riscv_sbi_hart_suspend, idx, state);
else
- return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(sbi_suspend,
+ return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(riscv_sbi_hart_suspend,
idx, state);
}
@@ -133,7 +113,7 @@ static __cpuidle int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
else
state = states[idx];
- ret = sbi_suspend(state) ? -1 : idx;
+ ret = riscv_sbi_hart_suspend(state) ? -1 : idx;
ct_cpuidle_exit();
@@ -206,17 +186,6 @@ static const struct of_device_id sbi_cpuidle_state_match[] = {
{ },
};
-static bool sbi_suspend_state_is_valid(u32 state)
-{
- if (state > SBI_HSM_SUSPEND_RET_DEFAULT &&
- state < SBI_HSM_SUSPEND_RET_PLATFORM)
- return false;
- if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT &&
- state < SBI_HSM_SUSPEND_NON_RET_PLATFORM)
- return false;
- return true;
-}
-
static int sbi_dt_parse_state_node(struct device_node *np, u32 *state)
{
int err = of_property_read_u32(np, "riscv,sbi-suspend-param", state);
@@ -226,7 +195,7 @@ static int sbi_dt_parse_state_node(struct device_node *np, u32 *state)
return err;
}
- if (!sbi_suspend_state_is_valid(*state)) {
+ if (!riscv_sbi_suspend_state_is_valid(*state)) {
pr_warn("Invalid SBI suspend state %#x\n", *state);
return -EINVAL;
}
@@ -607,16 +576,8 @@ static int __init sbi_cpuidle_init(void)
int ret;
struct platform_device *pdev;
- /*
- * The SBI HSM suspend function is only available when:
- * 1) SBI version is 0.3 or higher
- * 2) SBI HSM extension is available
- */
- if ((sbi_spec_version < sbi_mk_version(0, 3)) ||
- !sbi_probe_extension(SBI_EXT_HSM)) {
- pr_info("HSM suspend not available\n");
+ if (!riscv_sbi_hsm_is_supported())
return 0;
- }
ret = platform_driver_register(&sbi_cpuidle_driver);
if (ret)
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index 1cd304de538815..b2191ade9011c6 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -806,6 +806,8 @@ static int save_iaa_wq(struct idxd_wq *wq)
return -EINVAL;
cpus_per_iaa = (nr_nodes * nr_cpus_per_node) / nr_iaa;
+ if (!cpus_per_iaa)
+ cpus_per_iaa = 1;
out:
return 0;
}
@@ -821,10 +823,12 @@ static void remove_iaa_wq(struct idxd_wq *wq)
}
}
- if (nr_iaa)
+ if (nr_iaa) {
cpus_per_iaa = (nr_nodes * nr_cpus_per_node) / nr_iaa;
- else
- cpus_per_iaa = 0;
+ if (!cpus_per_iaa)
+ cpus_per_iaa = 1;
+ } else
+ cpus_per_iaa = 1;
}
static int wq_table_add_wqs(int iaa, int cpu)
diff --git a/drivers/firmware/efi/earlycon.c b/drivers/firmware/efi/earlycon.c
index f80a9af3d16e94..d18a1a5de14495 100644
--- a/drivers/firmware/efi/earlycon.c
+++ b/drivers/firmware/efi/earlycon.c
@@ -252,7 +252,7 @@ static int __init efi_earlycon_setup(struct earlycon_device *device,
if (si->lfb_depth != 32)
return -ENODEV;
- font = get_default_font(xres, yres, -1, -1);
+ font = get_default_font(xres, yres, NULL, NULL);
if (!font)
return -ENODEV;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 8859fb0b006d3b..fdf07dd6f4591d 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -203,6 +203,8 @@ static bool generic_ops_supported(void)
name_size = sizeof(name);
+ if (!efi.get_next_variable)
+ return false;
status = efi.get_next_variable(&name_size, &name, &guid);
if (status == EFI_UNSUPPORTED)
return false;
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
index 4e96a855fdf47b..7e185285955021 100644
--- a/drivers/firmware/efi/libstub/randomalloc.c
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -120,7 +120,7 @@ efi_status_t efi_random_alloc(unsigned long size,
continue;
}
- target = round_up(md->phys_addr, align) + target_slot * align;
+ target = round_up(max(md->phys_addr, alloc_min), align) + target_slot * align;
pages = size / EFI_PAGE_SIZE;
status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 57888614e90f16..6a6ffc6707bd0e 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -476,7 +476,8 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
efi_status_t status;
char *cmdline_ptr;
- memset(_bss, 0, _ebss - _bss);
+ if (efi_is_native())
+ memset(_bss, 0, _ebss - _bss);
efi_system_table = sys_table_arg;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 182ed8f6785003..5a0c476361c300 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -68,6 +68,7 @@ config DRM_USE_DYNAMIC_DEBUG
config DRM_KUNIT_TEST_HELPERS
tristate
depends on DRM && KUNIT
+ select DRM_KMS_HELPER
help
KUnit Helpers for KMS drivers.
@@ -80,7 +81,6 @@ config DRM_KUNIT_TEST
select DRM_EXEC
select DRM_EXPORT_FOR_TESTS if m
select DRM_GEM_SHMEM_HELPER
- select DRM_KMS_HELPER
select DRM_KUNIT_TEST_HELPERS
select DRM_LIB_RANDOM
select PRIME_NUMBERS
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index f5f2945711be0c..35dd6effa9a34a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -146,7 +146,7 @@ int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev)
{
int ret;
- if (!adev->kfd.init_complete)
+ if (!adev->kfd.init_complete || adev->kfd.client.dev)
return 0;
ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 14dc9d2d8d53ad..df58a6a1a67ec5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -2869,14 +2869,16 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
mutex_lock(&process_info->lock);
- drm_exec_init(&exec, 0, 0);
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
drm_exec_until_all_locked(&exec) {
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
drm_exec_retry_on_contention(&exec);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ pr_err("Locking VM PD failed, ret: %d\n", ret);
goto ttm_reserve_fail;
+ }
}
/* Reserve all BOs and page tables/directory. Add all BOs from
@@ -2889,8 +2891,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
gobj = &mem->bo->tbo.base;
ret = drm_exec_prepare_obj(&exec, gobj, 1);
drm_exec_retry_on_contention(&exec);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ pr_err("drm_exec_prepare_obj failed, ret: %d\n", ret);
goto ttm_reserve_fail;
+ }
}
}
@@ -2950,8 +2954,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
* validations above would invalidate DMABuf imports again.
*/
ret = process_validate_vms(process_info, &exec.ticket);
- if (ret)
+ if (ret) {
+ pr_debug("Validating VMs failed, ret: %d\n", ret);
goto validate_map_fail;
+ }
/* Update mappings not managed by KFD */
list_for_each_entry(peer_vm, &process_info->vm_list_head,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 1e9454e6e4cb4e..5dc24c971b41f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4040,10 +4040,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
* early on during init and before calling to RREG32.
*/
adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
- if (!adev->reset_domain) {
- r = -ENOMEM;
- goto unmap_memory;
- }
+ if (!adev->reset_domain)
+ return -ENOMEM;
/* detect hw virtualization here */
amdgpu_detect_virtualization(adev);
@@ -4053,7 +4051,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_device_get_job_timeout_settings(adev);
if (r) {
dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
- goto unmap_memory;
+ return r;
}
amdgpu_device_set_mcbp(adev);
@@ -4061,12 +4059,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* early init functions */
r = amdgpu_device_ip_early_init(adev);
if (r)
- goto unmap_memory;
+ return r;
/* Get rid of things like offb */
r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
if (r)
- goto unmap_memory;
+ return r;
/* Enable TMZ based on IP_VERSION */
amdgpu_gmc_tmz_set(adev);
@@ -4076,7 +4074,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (adev->gmc.xgmi.supported) {
r = adev->gfxhub.funcs->get_xgmi_info(adev);
if (r)
- goto unmap_memory;
+ return r;
}
/* enable PCIE atomic ops */
@@ -4345,8 +4343,6 @@ release_ras_con:
failed:
amdgpu_vf_error_trans_all(adev);
-unmap_memory:
- iounmap(adev->rmmio);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 15b188aaf68180..80b9642f2bc4f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -2479,8 +2479,11 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
}
for (i = 0; i < mgpu_info.num_dgpu; i++) {
adev = mgpu_info.gpu_ins[i].adev;
- if (!adev->kfd.init_complete)
+ if (!adev->kfd.init_complete) {
+ kgd2kfd_init_zone_device(adev);
amdgpu_amdkfd_device_init(adev);
+ amdgpu_amdkfd_drm_client_create(adev);
+ }
amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index f8b48fd93108ce..55d5508987ffe5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -687,7 +687,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock);
if (r)
- DRM_ERROR("KCQ enable failed\n");
+ DRM_ERROR("KGQ enable failed\n");
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
index 55b65fc04b651e..431ec72655ec80 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
@@ -129,13 +129,25 @@ static const struct mmu_interval_notifier_ops amdgpu_hmm_hsa_ops = {
*/
int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr)
{
+ int r;
+
if (bo->kfd_bo)
- return mmu_interval_notifier_insert(&bo->notifier, current->mm,
+ r = mmu_interval_notifier_insert(&bo->notifier, current->mm,
addr, amdgpu_bo_size(bo),
&amdgpu_hmm_hsa_ops);
- return mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
- amdgpu_bo_size(bo),
- &amdgpu_hmm_gfx_ops);
+ else
+ r = mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
+ amdgpu_bo_size(bo),
+ &amdgpu_hmm_gfx_ops);
+ if (r)
+ /*
+ * Make sure amdgpu_hmm_unregister() doesn't call
+ * mmu_interval_notifier_remove() when the notifier isn't properly
+ * initialized.
+ */
+ bo->notifier.mm = NULL;
+
+ return r;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 3c2b1413058bb7..94b310fdb719d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -1830,6 +1830,10 @@ static int psp_hdcp_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
+ /* bypass hdcp initialization if dmu is harvested */
+ if (!amdgpu_device_has_display_hardware(psp->adev))
+ return 0;
+
if (!psp->hdcp_context.context.bin_desc.size_bytes ||
!psp->hdcp_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
@@ -1862,6 +1866,9 @@ int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
if (amdgpu_sriov_vf(psp->adev))
return 0;
+ if (!psp->hdcp_context.context.initialized)
+ return 0;
+
return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
}
@@ -1897,6 +1904,10 @@ static int psp_dtm_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
+ /* bypass dtm initialization if dmu is harvested */
+ if (!amdgpu_device_has_display_hardware(psp->adev))
+ return 0;
+
if (!psp->dtm_context.context.bin_desc.size_bytes ||
!psp->dtm_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
@@ -1929,6 +1940,9 @@ int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
if (amdgpu_sriov_vf(psp->adev))
return 0;
+ if (!psp->dtm_context.context.initialized)
+ return 0;
+
return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
}
@@ -2063,6 +2077,10 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
+ /* bypass securedisplay initialization if dmu is harvested */
+ if (!amdgpu_device_has_display_hardware(psp->adev))
+ return 0;
+
if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
!psp->securedisplay_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 8722beba494e56..fc418e670fdae2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -864,6 +864,7 @@ static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
gtt->ttm.dma_address, flags);
}
+ gtt->bound = true;
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index b2535023764f49..9c514a606a2f4d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -60,6 +60,7 @@
#define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin"
#define FIRMWARE_VCN4_0_5 "amdgpu/vcn_4_0_5.bin"
#define FIRMWARE_VCN4_0_6 "amdgpu/vcn_4_0_6.bin"
+#define FIRMWARE_VCN4_0_6_1 "amdgpu/vcn_4_0_6_1.bin"
#define FIRMWARE_VCN5_0_0 "amdgpu/vcn_5_0_0.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN);
@@ -85,6 +86,7 @@ MODULE_FIRMWARE(FIRMWARE_VCN4_0_3);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_5);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_6);
+MODULE_FIRMWARE(FIRMWARE_VCN4_0_6_1);
MODULE_FIRMWARE(FIRMWARE_VCN5_0_0);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
@@ -93,14 +95,22 @@ int amdgpu_vcn_early_init(struct amdgpu_device *adev)
{
char ucode_prefix[30];
char fw_name[40];
- int r;
+ int r, i;
- amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
- r = amdgpu_ucode_request(adev, &adev->vcn.fw, fw_name);
- if (r)
- amdgpu_ucode_release(&adev->vcn.fw);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
+ if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 6) &&
+ i == 1) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_%d.bin", ucode_prefix, i);
+ }
+ r = amdgpu_ucode_request(adev, &adev->vcn.fw[i], fw_name);
+ if (r) {
+ amdgpu_ucode_release(&adev->vcn.fw[i]);
+ return r;
+ }
+ }
return r;
}
@@ -141,7 +151,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
}
}
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[0]->data;
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
/* Bit 20-23, it is encode major and non-zero for new naming convention.
@@ -256,9 +266,10 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
+
+ amdgpu_ucode_release(&adev->vcn.fw[j]);
}
- amdgpu_ucode_release(&adev->vcn.fw);
mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
mutex_destroy(&adev->vcn.vcn_pg_lock);
@@ -354,11 +365,12 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
const struct common_firmware_header *hdr;
unsigned int offset;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
- memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
+ memcpy_toio(adev->vcn.inst[i].cpu_addr,
+ adev->vcn.fw[i]->data + offset,
le32_to_cpu(hdr->ucode_size_bytes));
drm_dev_exit(idx);
}
@@ -1043,11 +1055,11 @@ void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
-
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (adev->vcn.harvest_config & (1 << i))
continue;
+
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data;
/* currently only support 2 FW instances */
if (i >= 2) {
dev_info(adev->dev, "More then 2 VCN FW instances!\n");
@@ -1055,7 +1067,7 @@ void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
}
idx = AMDGPU_UCODE_ID_VCN + i;
adev->firmware.ucode[idx].ucode_id = idx;
- adev->firmware.ucode[idx].fw = adev->vcn.fw;
+ adev->firmware.ucode[idx].fw = adev->vcn.fw[i];
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 1985f71b4373b8..a418393d89ec91 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -306,7 +306,7 @@ struct amdgpu_vcn_ras {
struct amdgpu_vcn {
unsigned fw_version;
struct delayed_work idle_work;
- const struct firmware *fw; /* VCN firmware */
+ const struct firmware *fw[AMDGPU_MAX_VCN_INSTANCES]; /* VCN firmware */
unsigned num_enc_rings;
enum amd_powergating_state cur_state;
bool indirect_sram;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
index 70c5cc80ecdc00..7a65a2b128ec43 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -575,9 +575,6 @@ static unsigned int vpe_ring_init_cond_exec(struct amdgpu_ring *ring,
{
unsigned int ret;
- if (ring->adev->vpe.collaborate_mode)
- return ~0;
-
amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_COND_EXE, 0));
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 904b9ff5ead2f5..f90905ef32c76d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -3657,6 +3657,9 @@ static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev)
static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
{
+ if (amdgpu_sriov_vf(adev))
+ return;
+
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(10, 1, 10):
soc15_program_register_sequence(adev,
@@ -4982,7 +4985,8 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
u32 tmp;
int i;
- WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
+ if (!amdgpu_sriov_vf(adev))
+ WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
gfx_v10_0_setup_rb(adev);
gfx_v10_0_get_cu_info(adev, &adev->gfx.cu_info);
@@ -7163,7 +7167,7 @@ static int gfx_v10_0_hw_init(void *handle)
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0))
gfx_v10_3_program_pbb_mode(adev);
- if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0) && !amdgpu_sriov_vf(adev))
gfx_v10_3_set_power_brake_sequence(adev);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
index cd0e8a321e4604..17509f32f61a4f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
@@ -155,6 +155,9 @@ static void gfxhub_v2_1_init_system_aperture_regs(struct amdgpu_device *adev)
{
uint64_t value;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
/* Program the AGP BAR */
WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0);
WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
index 16fe428c0722d2..7aed96fa10a9d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
@@ -418,6 +418,12 @@ static u32 ih_v7_0_get_wptr(struct amdgpu_device *adev,
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
index b3961968c10c4c..238ea40c245002 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
@@ -99,16 +99,15 @@ mmhub_v3_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(3, 3, 0):
case IP_VERSION(3, 3, 1):
- mmhub_cid = mmhub_client_ids_v3_3[cid][rw];
+ mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3) ?
+ mmhub_client_ids_v3_3[cid][rw] :
+ cid == 0x140 ? "UMSCH" : NULL;
break;
default:
mmhub_cid = NULL;
break;
}
- if (!mmhub_cid && cid == 0x140)
- mmhub_cid = "UMSCH";
-
dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
mmhub_cid ? mmhub_cid : "unknown", cid);
dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 2d904ee72701af..34237a1b1f2e45 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -431,16 +431,11 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 doorbell_offset, doorbell;
u32 rb_cntl, ib_cntl;
- int i, unset = 0;
+ int i;
for_each_inst(i, inst_mask) {
sdma[i] = &adev->sdma.instance[i].ring;
- if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) {
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
- unset = 1;
- }
-
rb_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
@@ -487,20 +482,10 @@ static void sdma_v4_4_2_inst_rlc_stop(struct amdgpu_device *adev,
static void sdma_v4_4_2_inst_page_stop(struct amdgpu_device *adev,
uint32_t inst_mask)
{
- struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 rb_cntl, ib_cntl;
int i;
- bool unset = false;
for_each_inst(i, inst_mask) {
- sdma[i] = &adev->sdma.instance[i].page;
-
- if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
- (!unset)) {
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
- unset = true;
- }
-
rb_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL,
RB_ENABLE, 0);
@@ -950,13 +935,7 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
r = amdgpu_ring_test_helper(page);
if (r)
return r;
-
- if (adev->mman.buffer_funcs_ring == page)
- amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
-
- if (adev->mman.buffer_funcs_ring == ring)
- amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 25ba27151ac0f2..aaceecd558cf96 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -304,7 +304,7 @@ static int vcn_v1_0_resume(void *handle)
*/
static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -371,7 +371,7 @@ static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
uint32_t offset;
/* cache window 0: fw */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 18794394c5a052..e357d8cf0c0154 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -330,7 +330,7 @@ static int vcn_v2_0_resume(void *handle)
*/
static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
uint32_t offset;
if (amdgpu_sriov_vf(adev))
@@ -386,7 +386,7 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -1878,7 +1878,7 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
init_table += header->vcn_table_offset;
- size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index aba403d7180657..1cd8a94b0fbc23 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -414,13 +414,15 @@ static int vcn_v2_5_resume(void *handle)
*/
static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size;
uint32_t offset;
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
+
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
/* cache window 0: fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
@@ -469,7 +471,7 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst_idx]->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -1240,7 +1242,7 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
- size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
/* mc resume*/
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V1_0_INSERT_DIRECT_WT(
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index e02af4de521c6f..8f82fb887e9c20 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -449,7 +449,7 @@ static int vcn_v3_0_resume(void *handle)
*/
static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst]->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -499,7 +499,7 @@ static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst_idx]->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -1332,7 +1332,7 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
mmUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
- cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 8ab01ae919d2e3..832d15f7b5f61c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -382,7 +382,7 @@ static void vcn_v4_0_mc_resume(struct amdgpu_device *adev, int inst)
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -442,7 +442,7 @@ static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
{
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -1289,7 +1289,7 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
regUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
- cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 810bbfccd6f2ea..203fa988322bdd 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -332,7 +332,7 @@ static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx)
uint32_t offset, size, vcn_inst;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
vcn_inst = GET_INST(VCN, inst_idx);
@@ -407,7 +407,7 @@ static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -894,7 +894,7 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev)
MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
- cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index 0468955338b755..501e53e69f2a0c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -45,7 +45,7 @@
#define mmUVD_DPG_LMA_DATA_BASE_IDX regUVD_DPG_LMA_DATA_BASE_IDX
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
-#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
+#define VCN1_VID_SOC_ADDRESS_3_0 (0x48300 + 0x38000)
#define VCN_HARVEST_MMSCH 0
@@ -329,7 +329,7 @@ static void vcn_v4_0_5_mc_resume(struct amdgpu_device *adev, int inst)
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -390,7 +390,7 @@ static void vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -486,7 +486,8 @@ static void vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
/* VCN global tiling registers */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
- VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+ VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG),
+ adev->gfx.config.gb_addr_config, 0, indirect);
}
/**
@@ -911,7 +912,6 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
VCN, inst_idx, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
-
if (indirect)
amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index d6ee9958ba5fcc..bc60c554eb3296 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -290,7 +290,7 @@ static void vcn_v5_0_0_mc_resume(struct amdgpu_device *adev, int inst)
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -351,7 +351,7 @@ static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 1c9c6096e28fb3..2851719d712161 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1767,6 +1767,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
adev->dm.dc->debug.force_subvp_mclk_switch = true;
+ if (amdgpu_dc_debug_mask & DC_ENABLE_DML2)
+ adev->dm.dc->debug.using_dml2 = true;
+
adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
/* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
@@ -11271,18 +11274,24 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (!adev->dm.freesync_module)
goto update;
- if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
- || sink->sink_signal == SIGNAL_TYPE_EDP) {
+ if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ sink->sink_signal == SIGNAL_TYPE_EDP)) {
bool edid_check_required = false;
- if (edid) {
- edid_check_required = is_dp_capable_without_timing_msa(
- adev->dm.dc,
- amdgpu_dm_connector);
+ if (is_dp_capable_without_timing_msa(adev->dm.dc,
+ amdgpu_dm_connector)) {
+ if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) {
+ freesync_capable = true;
+ amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
+ amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
+ } else {
+ edid_check_required = edid->version > 1 ||
+ (edid->version == 1 &&
+ edid->revision > 1);
+ }
}
- if (edid_check_required == true && (edid->version > 1 ||
- (edid->version == 1 && edid->revision > 1))) {
+ if (edid_check_required) {
for (i = 0; i < 4; i++) {
timing = &edid->detailed_timings[i];
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index 668f05c8654ef0..bec252e1dd27a9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -216,6 +216,16 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
if (clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz > 1950)
clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = 1950;
+ /* DPPCLK */
+ dcn32_init_single_clock(clk_mgr, PPCLK_DPPCLK,
+ &clk_mgr_base->bw_params->clk_table.entries[0].dppclk_mhz,
+ &num_entries_per_clk->num_dppclk_levels);
+ num_levels = num_entries_per_clk->num_dppclk_levels;
+ clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DPPCLK);
+ //HW recommends limit of 1950 MHz in display clock for all DCN3.2.x
+ if (clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz > 1950)
+ clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz = 1950;
+
if (num_entries_per_clk->num_dcfclk_levels &&
num_entries_per_clk->num_dtbclk_levels &&
num_entries_per_clk->num_dispclk_levels)
@@ -240,6 +250,10 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
= khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_dpp_clk_khz);
}
+ for (i = 0; i < num_levels; i++)
+ if (clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz > 1950)
+ clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz = 1950;
+
/* Get UCLK, update bounding box */
clk_mgr_base->funcs->get_memclk_states_from_smu(clk_mgr_base);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 5211c1c0f3c0cf..e7dc128f6284b4 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1302,6 +1302,54 @@ static void disable_vbios_mode_if_required(
}
}
+/**
+ * wait_for_blank_complete - wait for all active OPPs to finish pending blank
+ * pattern updates
+ *
+ * @dc: [in] dc reference
+ * @context: [in] hardware context in use
+ */
+static void wait_for_blank_complete(struct dc *dc,
+ struct dc_state *context)
+{
+ struct pipe_ctx *opp_head;
+ struct dce_hwseq *hws = dc->hwseq;
+ int i;
+
+ if (!hws->funcs.wait_for_blank_complete)
+ return;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ opp_head = &context->res_ctx.pipe_ctx[i];
+
+ if (!resource_is_pipe_type(opp_head, OPP_HEAD) ||
+ dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM)
+ continue;
+
+ hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp);
+ }
+}
+
+static void wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context)
+{
+ struct pipe_ctx *otg_master;
+ struct timing_generator *tg;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ otg_master = &context->res_ctx.pipe_ctx[i];
+ if (!resource_is_pipe_type(otg_master, OTG_MASTER) ||
+ dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM)
+ continue;
+ tg = otg_master->stream_res.tg;
+ if (tg->funcs->wait_odm_doublebuffer_pending_clear)
+ tg->funcs->wait_odm_doublebuffer_pending_clear(tg);
+ }
+
+ /* ODM update may require to reprogram blank pattern for each OPP */
+ wait_for_blank_complete(dc, context);
+}
+
static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
{
int i;
@@ -1993,6 +2041,11 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
context->stream_count == 0) {
/* Must wait for no flips to be pending before doing optimize bw */
wait_for_no_pipes_pending(dc, context);
+ /*
+ * optimized dispclk depends on ODM setup. Need to wait for ODM
+ * update pending complete before optimizing bandwidth.
+ */
+ wait_for_odm_update_pending_complete(dc, context);
/* pplib is notified if disp_num changed */
dc->hwss.optimize_bandwidth(dc, context);
/* Need to do otg sync again as otg could be out of sync due to otg
@@ -3270,6 +3323,9 @@ static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_s
if (stream->link->replay_settings.config.replay_supported)
return true;
+ if (stream->ctx->dce_version >= DCN_VERSION_3_5 && stream->abm_level)
+ return true;
+
return false;
}
@@ -3493,7 +3549,7 @@ static void commit_planes_for_stream_fast(struct dc *dc,
top_pipe_to_program->stream->update_flags.raw = 0;
}
-static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context)
+static void wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context)
{
/*
* This function calls HWSS to wait for any potentially double buffered
@@ -3531,6 +3587,7 @@ static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state
}
}
}
+ wait_for_odm_update_pending_complete(dc, dc_context);
}
static void commit_planes_for_stream(struct dc *dc,
@@ -4844,22 +4901,16 @@ void dc_exit_ips_for_hw_access(struct dc *dc)
bool dc_dmub_is_ips_idle_state(struct dc *dc)
{
- uint32_t idle_state = 0;
-
if (dc->debug.disable_idle_power_optimizations)
return false;
if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
return false;
- if (dc->hwss.get_idle_state)
- idle_state = dc->hwss.get_idle_state(dc);
-
- if (!(idle_state & DMUB_IPS1_ALLOW_MASK) ||
- !(idle_state & DMUB_IPS2_ALLOW_MASK))
- return true;
+ if (!dc->ctx->dmub_srv)
+ return false;
- return false;
+ return dc->ctx->dmub_srv->idle_allowed;
}
/* set min and max memory clock to lowest and highest DPM level, respectively */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index 180ac47868c22a..5cc7f8da209c59 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -334,7 +334,8 @@ static void dc_state_free(struct kref *kref)
void dc_state_release(struct dc_state *state)
{
- kref_put(&state->refcount, dc_state_free);
+ if (state != NULL)
+ kref_put(&state->refcount, dc_state_free);
}
/*
* dc_state_add_stream() - Add a new dc_stream_state to a dc_state.
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 9900dda2eef5cd..be2ac5c442a480 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -1085,9 +1085,9 @@ struct replay_settings {
/* SMU optimization is enabled */
bool replay_smu_opt_enable;
/* Current Coasting vtotal */
- uint16_t coasting_vtotal;
+ uint32_t coasting_vtotal;
/* Coasting vtotal table */
- uint16_t coasting_vtotal_table[PR_COASTING_TYPE_NUM];
+ uint32_t coasting_vtotal_table[PR_COASTING_TYPE_NUM];
/* Maximum link off frame count */
enum replay_link_off_frame_count_level link_off_frame_count_level;
/* Replay pseudo vtotal for abm + ips on full screen video which can improve ips residency */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index 48a40dcc7050bd..5838a11efd00c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -384,6 +384,7 @@ static const struct opp_funcs dcn10_opp_funcs = {
.opp_set_disp_pattern_generator = NULL,
.opp_program_dpg_dimensions = NULL,
.dpg_is_blanked = NULL,
+ .dpg_is_pending = NULL,
.opp_destroy = opp1_destroy
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
index 0784d01986610d..fbf1b6370eb23a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
@@ -337,6 +337,19 @@ bool opp2_dpg_is_blanked(struct output_pixel_processor *opp)
(double_buffer_pending == 0);
}
+bool opp2_dpg_is_pending(struct output_pixel_processor *opp)
+{
+ struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
+ uint32_t double_buffer_pending;
+ uint32_t dpg_en;
+
+ REG_GET(DPG_CONTROL, DPG_EN, &dpg_en);
+
+ REG_GET(DPG_STATUS, DPG_DOUBLE_BUFFER_PENDING, &double_buffer_pending);
+
+ return (dpg_en == 1 && double_buffer_pending == 1);
+}
+
void opp2_program_left_edge_extra_pixel (
struct output_pixel_processor *opp,
bool count)
@@ -363,6 +376,7 @@ static struct opp_funcs dcn20_opp_funcs = {
.opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator,
.opp_program_dpg_dimensions = opp2_program_dpg_dimensions,
.dpg_is_blanked = opp2_dpg_is_blanked,
+ .dpg_is_pending = opp2_dpg_is_pending,
.opp_dpg_set_blank_color = opp2_dpg_set_blank_color,
.opp_destroy = opp1_destroy,
.opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
index 3ab221bdd27dd2..8f186abd558db4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
@@ -159,6 +159,8 @@ void opp2_program_dpg_dimensions(
bool opp2_dpg_is_blanked(struct output_pixel_processor *opp);
+bool opp2_dpg_is_pending(struct output_pixel_processor *opp);
+
void opp2_dpg_set_blank_color(
struct output_pixel_processor *opp,
const struct tg_color *color);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c
index 8e77db46a40901..6a71ba3dfc6327 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c
@@ -50,6 +50,7 @@ static struct opp_funcs dcn201_opp_funcs = {
.opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator,
.opp_program_dpg_dimensions = opp2_program_dpg_dimensions,
.dpg_is_blanked = opp2_dpg_is_blanked,
+ .dpg_is_pending = opp2_dpg_is_pending,
.opp_dpg_set_blank_color = opp2_dpg_set_blank_color,
.opp_destroy = opp1_destroy,
.opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index 87760600e154da..f98def6c8c2d23 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -782,3 +782,9 @@ void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc
pipe_cnt++;
}
}
+
+void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context)
+{
+ if (dcn32_subvp_in_use(dc, context) && context->bw_ctx.bw.dcn.clk.dcfclk_khz <= MIN_SUBVP_DCFCLK_KHZ)
+ context->bw_ctx.bw.dcn.clk.dcfclk_khz = MIN_SUBVP_DCFCLK_KHZ;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index b49e1dc9d8ba51..a0a65e0991041d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -623,6 +623,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
* - Not TMZ surface
*/
if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) &&
+ !(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) &&
(!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&
dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE &&
(refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) &&
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index 1ba6933d2b3617..17a58f41fc6a85 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -824,13 +824,25 @@ static struct scaler_data get_scaler_data_for_plane(const struct dc_plane_state
static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_stream_state *in)
{
+ dml_uint_t width, height;
+
+ if (in->timing.h_addressable > 3840)
+ width = 3840;
+ else
+ width = in->timing.h_addressable; // 4K max
+
+ if (in->timing.v_addressable > 2160)
+ height = 2160;
+ else
+ height = in->timing.v_addressable; // 4K max
+
out->CursorBPP[location] = dml_cur_32bit;
out->CursorWidth[location] = 256;
out->GPUVMMinPageSizeKBytes[location] = 256;
- out->ViewportWidth[location] = in->timing.h_addressable;
- out->ViewportHeight[location] = in->timing.v_addressable;
+ out->ViewportWidth[location] = width;
+ out->ViewportHeight[location] = height;
out->ViewportStationary[location] = false;
out->ViewportWidthChroma[location] = 0;
out->ViewportHeightChroma[location] = 0;
@@ -849,7 +861,7 @@ static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned
out->HTapsChroma[location] = 0;
out->VTapsChroma[location] = 0;
out->SourceScan[location] = dml_rotation_0;
- out->ScalerRecoutWidth[location] = in->timing.h_addressable;
+ out->ScalerRecoutWidth[location] = width;
out->LBBitPerPixel[location] = 57;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index 2a58a7687bdb57..72cca367062e16 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -703,13 +703,8 @@ static inline struct dml2_context *dml2_allocate_memory(void)
return (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
}
-bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
+static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
{
- // Allocate Mode Lib Ctx
- *dml2 = dml2_allocate_memory();
-
- if (!(*dml2))
- return false;
// Store config options
(*dml2)->config = *config;
@@ -737,9 +732,18 @@ bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options
initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc);
initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states);
+}
+
+bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
+{
+ // Allocate Mode Lib Ctx
+ *dml2 = dml2_allocate_memory();
+
+ if (!(*dml2))
+ return false;
+
+ dml2_init(in_dc, config, dml2);
- /*Initialize DML20 instance which calls dml2_core_create, and core_dcn3_populate_informative*/
- //dml2_initialize_instance(&(*dml_ctx)->v20.dml_init);
return true;
}
@@ -779,3 +783,11 @@ bool dml2_create_copy(struct dml2_context **dst_dml2,
return true;
}
+
+void dml2_reinit(const struct dc *in_dc,
+ const struct dml2_configuration_options *config,
+ struct dml2_context **dml2)
+{
+
+ dml2_init(in_dc, config, dml2);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
index ee0eb184eb6d7e..cc662d682fd4de 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
@@ -214,6 +214,9 @@ void dml2_copy(struct dml2_context *dst_dml2,
struct dml2_context *src_dml2);
bool dml2_create_copy(struct dml2_context **dst_dml2,
struct dml2_context *src_dml2);
+void dml2_reinit(const struct dc *in_dc,
+ const struct dml2_configuration_options *config,
+ struct dml2_context **dml2);
/*
* dml2_validate - Determines if a display configuration is supported or not.
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index c55d5155ecb9c0..8b3536c380b8de 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -1498,6 +1498,11 @@ static void dcn20_detect_pipe_changes(struct dc_state *old_state,
return;
}
+ if (resource_is_pipe_type(new_pipe, OTG_MASTER) &&
+ resource_is_odm_topology_changed(new_pipe, old_pipe))
+ /* Detect odm changes */
+ new_pipe->update_flags.bits.odm = 1;
+
/* Exit on unchanged, unused pipe */
if (!old_pipe->plane_state && !new_pipe->plane_state)
return;
@@ -1551,10 +1556,6 @@ static void dcn20_detect_pipe_changes(struct dc_state *old_state,
/* Detect top pipe only changes */
if (resource_is_pipe_type(new_pipe, OTG_MASTER)) {
- /* Detect odm changes */
- if (resource_is_odm_topology_changed(new_pipe, old_pipe))
- new_pipe->update_flags.bits.odm = 1;
-
/* Detect global sync changes */
if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset
|| old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start
@@ -1999,19 +2000,20 @@ void dcn20_program_front_end_for_ctx(
DC_LOGGER_INIT(dc->ctx->logger);
unsigned int prev_hubp_count = 0;
unsigned int hubp_count = 0;
+ struct pipe_ctx *pipe;
if (resource_is_pipe_topology_changed(dc->current_state, context))
resource_log_pipe_topology_update(dc, context);
if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) {
- ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
+ if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) {
+ ASSERT(!pipe->plane_state->triplebuffer_flips);
/*turn off triple buffer for full update*/
dc->hwss.program_triplebuffer(
- dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
+ dc, pipe, pipe->plane_state->triplebuffer_flips);
}
}
}
@@ -2085,12 +2087,22 @@ void dcn20_program_front_end_for_ctx(
DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
}
+ /* update ODM for blanked OTG master pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ if (resource_is_pipe_type(pipe, OTG_MASTER) &&
+ !resource_is_pipe_type(pipe, DPP_PIPE) &&
+ pipe->update_flags.bits.odm &&
+ hws->funcs.update_odm)
+ hws->funcs.update_odm(dc, context, pipe);
+ }
+
/*
* Program all updated pipes, order matters for mpcc setup. Start with
* top pipe and program all pipes that follow in order
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ pipe = &context->res_ctx.pipe_ctx[i];
if (pipe->plane_state && !pipe->top_pipe) {
while (pipe) {
@@ -2129,17 +2141,6 @@ void dcn20_program_front_end_for_ctx(
context->stream_status[0].plane_count > 1) {
pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);
}
-
- /* when dynamic ODM is active, pipes must be reconfigured when all planes are
- * disabled, as some transitions will leave software and hardware state
- * mismatched.
- */
- if (dc->debug.enable_single_display_2to1_odm_policy &&
- pipe->stream &&
- pipe->update_flags.bits.disable &&
- !pipe->prev_odm_pipe &&
- hws->funcs.update_odm)
- hws->funcs.update_odm(dc, context, pipe);
}
}
@@ -2451,7 +2452,7 @@ bool dcn20_wait_for_blank_complete(
int counter;
for (counter = 0; counter < 1000; counter++) {
- if (opp->funcs->dpg_is_blanked(opp))
+ if (!opp->funcs->dpg_is_pending(opp))
break;
udelay(100);
@@ -2462,7 +2463,7 @@ bool dcn20_wait_for_blank_complete(
return false;
}
- return true;
+ return opp->funcs->dpg_is_blanked(opp);
}
bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index 7e6b7f2a6dc9ea..8bc3d01537bbd4 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -812,10 +812,20 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
if (pipe_ctx == NULL)
return;
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL)
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) {
pipe_ctx->stream_res.stream_enc->funcs->set_avmute(
pipe_ctx->stream_res.stream_enc,
enable);
+
+ /* Wait for two frame to make sure AV mute is sent out */
+ if (enable) {
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+ }
+ }
}
void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index aa36d7a56ca8c3..c0b526cf178654 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -1156,6 +1156,13 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
dsc->funcs->dsc_disconnect(dsc);
}
}
+
+ if (!resource_is_pipe_type(pipe_ctx, DPP_PIPE))
+ /*
+ * blank pattern is generated by OPP, reprogram blank pattern
+ * due to OPP count change
+ */
+ dc->hwseq->funcs.blank_pixel_data(dc, pipe_ctx, true);
}
unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div)
@@ -1778,3 +1785,26 @@ void dcn32_prepare_bandwidth(struct dc *dc,
context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
}
}
+
+void dcn32_interdependent_update_lock(struct dc *dc,
+ struct dc_state *context, bool lock)
+{
+ unsigned int i;
+ struct pipe_ctx *pipe;
+ struct timing_generator *tg;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ tg = pipe->stream_res.tg;
+
+ if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
+ !tg->funcs->is_tg_enabled(tg) ||
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
+ continue;
+
+ if (lock)
+ dc->hwss.pipe_control_lock(dc, pipe, true);
+ else
+ dc->hwss.pipe_control_lock(dc, pipe, false);
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
index 069e20bc87c0a7..f55c11fc56ec7a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
@@ -129,4 +129,6 @@ bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc,
void dcn32_prepare_bandwidth(struct dc *dc,
struct dc_state *context);
+void dcn32_interdependent_update_lock(struct dc *dc,
+ struct dc_state *context, bool lock);
#endif /* __DC_HWSS_DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
index 2b073123d3ede2..67d661dbd5b7c7 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
@@ -58,7 +58,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.disable_plane = dcn20_disable_plane,
.disable_pixel_data = dcn20_disable_pixel_data,
.pipe_control_lock = dcn20_pipe_control_lock,
- .interdependent_update_lock = dcn10_lock_all_pipes,
+ .interdependent_update_lock = dcn32_interdependent_update_lock,
.cursor_lock = dcn10_cursor_lock,
.prepare_bandwidth = dcn32_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index aee5372e292c5a..d89c92370d5b3a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -337,6 +337,9 @@ struct opp_funcs {
bool (*dpg_is_blanked)(
struct output_pixel_processor *opp);
+ bool (*dpg_is_pending)(struct output_pixel_processor *opp);
+
+
void (*opp_dpg_set_blank_color)(
struct output_pixel_processor *opp,
const struct tg_color *color);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index d98d72f35be5bd..ffad8fe16c54dc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -331,6 +331,7 @@ struct timing_generator_funcs {
void (*init_odm)(struct timing_generator *tg);
void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg);
+ void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
index 26fe81f213da55..bf29fc58ea6a62 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
@@ -285,12 +285,12 @@ struct link_service {
enum replay_FW_Message_type msg,
union dmub_replay_cmd_set *cmd_data);
bool (*edp_set_coasting_vtotal)(
- struct dc_link *link, uint16_t coasting_vtotal);
+ struct dc_link *link, uint32_t coasting_vtotal);
bool (*edp_replay_residency)(const struct dc_link *link,
unsigned int *residency, const bool is_start,
const bool is_alpm);
bool (*edp_set_replay_power_opt_and_coasting_vtotal)(struct dc_link *link,
- const unsigned int *power_opts, uint16_t coasting_vtotal);
+ const unsigned int *power_opts, uint32_t coasting_vtotal);
bool (*edp_wait_for_t12)(struct dc_link *link);
bool (*edp_is_ilr_optimization_required)(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index acfbbc638cc647..3baa2bdd6dd652 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -1034,7 +1034,7 @@ bool edp_send_replay_cmd(struct dc_link *link,
return true;
}
-bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal)
+bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal)
{
struct dc *dc = link->ctx->dc;
struct dmub_replay *replay = dc->res_pool->replay;
@@ -1073,7 +1073,7 @@ bool edp_replay_residency(const struct dc_link *link,
}
bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
- const unsigned int *power_opts, uint16_t coasting_vtotal)
+ const unsigned int *power_opts, uint32_t coasting_vtotal)
{
struct dc *dc = link->ctx->dc;
struct dmub_replay *replay = dc->res_pool->replay;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
index 34e521af7bb482..a158c6234d4225 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
@@ -59,12 +59,12 @@ bool edp_setup_replay(struct dc_link *link,
bool edp_send_replay_cmd(struct dc_link *link,
enum replay_FW_Message_type msg,
union dmub_replay_cmd_set *cmd_data);
-bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal);
+bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal);
bool edp_replay_residency(const struct dc_link *link,
unsigned int *residency, const bool is_start, const bool is_alpm);
bool edp_get_replay_state(const struct dc_link *link, uint64_t *state);
bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
- const unsigned int *power_opts, uint16_t coasting_vtotal);
+ const unsigned int *power_opts, uint32_t coasting_vtotal);
bool edp_wait_for_t12(struct dc_link *link);
bool edp_is_ilr_optimization_required(struct dc_link *link,
struct dc_crtc_timing *crtc_timing);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
index ab81594a7fadcc..6c2e84d3967fc5 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
@@ -557,7 +557,8 @@ struct dcn_optc_registers {
type OTG_CRC_DATA_STREAM_SPLIT_MODE;\
type OTG_CRC_DATA_FORMAT;\
type OTG_V_TOTAL_LAST_USED_BY_DRR;\
- type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;
+ type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;\
+ type OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING;
#define TG_REG_FIELD_LIST_DCN3_2(type) \
type OTG_H_TIMING_DIV_MODE_MANUAL;
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
index 82349354332548..f07a4c7e48bc23 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
@@ -122,6 +122,13 @@ void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combi
}
}
+void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(tg);
+
+ REG_WAIT(OTG_DOUBLE_BUFFER_CONTROL, OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING, 0, 2, 50000);
+}
+
void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -345,6 +352,7 @@ static struct timing_generator_funcs dcn32_tg_funcs = {
.set_odm_bypass = optc32_set_odm_bypass,
.set_odm_combine = optc32_set_odm_combine,
.get_odm_combine_segments = optc32_get_odm_combine_segments,
+ .wait_odm_doublebuffer_pending_clear = optc32_wait_odm_doublebuffer_pending_clear,
.set_h_timing_div_manual_mode = optc32_set_h_timing_div_manual_mode,
.get_optc_source = optc2_get_optc_source,
.set_out_mux = optc3_set_out_mux,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
index 8ce3b178cab065..0c2c1469556196 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
@@ -183,5 +183,6 @@ void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool man
void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combine_segments);
void optc32_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
+void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg);
#endif /* __DC_OPTC_DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index 3f3951f3ba9834..ce1754cc1f4631 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -1771,6 +1771,7 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
dcn32_override_min_req_memclk(dc, context);
+ dcn32_override_min_req_dcfclk(dc, context);
BW_VAL_TRACE_END_WATERMARKS();
@@ -1930,6 +1931,8 @@ static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw
{
DC_FP_START();
dcn32_update_bw_bounding_box_fpu(dc, bw_params);
+ if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
+ dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
DC_FP_END();
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
index 0c87b0fabba7d9..2258c5c7212d86 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
@@ -42,6 +42,7 @@
#define SUBVP_ACTIVE_MARGIN_LIST_LEN 2
#define DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ 1800
#define DCN3_2_VMIN_DISPCLK_HZ 717000000
+#define MIN_SUBVP_DCFCLK_KHZ 400000
#define TO_DCN32_RES_POOL(pool)\
container_of(pool, struct dcn32_resource_pool, base)
@@ -181,6 +182,8 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes);
+void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context);
+
/* definitions for run time init of reg offsets */
/* CLK SRC */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index b356fed1726d92..296a0a8e71459f 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -1581,6 +1581,8 @@ static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
{
DC_FP_START();
dcn321_update_bw_bounding_box_fpu(dc, bw_params);
+ if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
+ dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
DC_FP_END();
}
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index a529e369b2ace9..af3fe8bb0728b1 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -3238,6 +3238,14 @@ struct dmub_cmd_replay_set_coasting_vtotal_data {
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
+ /**
+ * 16-bit value dicated by driver that indicates the coasting vtotal high byte part.
+ */
+ uint16_t coasting_vtotal_high;
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad[2];
};
/**
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index 8c137d7c032e1f..7c9805705fd380 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -513,6 +513,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+ if (!display)
+ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+
hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index;
if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0)
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index e304e8435fb8f1..2a3698fd2dc242 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -975,7 +975,7 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
void set_replay_coasting_vtotal(struct dc_link *link,
enum replay_coasting_vtotal_type type,
- uint16_t vtotal)
+ uint32_t vtotal)
{
link->replay_settings.coasting_vtotal_table[type] = vtotal;
}
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index bef4815e1703d7..ff7e6f3cd6be23 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -56,7 +56,7 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
void init_replay_config(struct dc_link *link, struct replay_config *pr_config);
void set_replay_coasting_vtotal(struct dc_link *link,
enum replay_coasting_vtotal_type type,
- uint16_t vtotal);
+ uint32_t vtotal);
void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal);
void calculate_replay_link_off_frame_count(struct dc_link *link,
uint16_t vtotal, uint16_t htotal);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 1d96eb274d72d4..0c2d04f978ac92 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -1283,10 +1283,8 @@ static int arcturus_get_power_limit(struct smu_context *smu,
uint32_t *max_power_limit,
uint32_t *min_power_limit)
{
- struct smu_11_0_powerplay_table *powerplay_table =
- (struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
PPTable_t *pptable = smu->smu_table.driver_pptable;
- uint32_t power_limit, od_percent_upper, od_percent_lower;
+ uint32_t power_limit;
if (smu_v11_0_get_current_power_limit(smu, &power_limit)) {
/* the last hope to figure out the ppt limit */
@@ -1302,26 +1300,10 @@ static int arcturus_get_power_limit(struct smu_context *smu,
*current_power_limit = power_limit;
if (default_power_limit)
*default_power_limit = power_limit;
-
- if (smu->od_enabled)
- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
- else
- od_percent_upper = 0;
-
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
-
- dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
- od_percent_upper, od_percent_lower, power_limit);
-
- if (max_power_limit) {
- *max_power_limit = power_limit * (100 + od_percent_upper);
- *max_power_limit /= 100;
- }
-
- if (min_power_limit) {
- *min_power_limit = power_limit * (100 - od_percent_lower);
- *min_power_limit /= 100;
- }
+ if (max_power_limit)
+ *max_power_limit = power_limit;
+ if (min_power_limit)
+ *min_power_limit = power_limit;
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index ed189a3878ebe7..836b1df7992862 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2339,7 +2339,7 @@ static int navi10_get_power_limit(struct smu_context *smu,
(struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
PPTable_t *pptable = smu->smu_table.driver_pptable;
- uint32_t power_limit, od_percent_upper, od_percent_lower;
+ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
if (smu_v11_0_get_current_power_limit(smu, &power_limit)) {
/* the last hope to figure out the ppt limit */
@@ -2356,13 +2356,16 @@ static int navi10_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled &&
- navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT))
- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
- else
- od_percent_upper = 0;
-
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ if (powerplay_table) {
+ if (smu->od_enabled &&
+ navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
+ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ } else if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
+ od_percent_upper = 0;
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ }
+ }
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index e2ad2b972ab0b3..1f18b61884f3f2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -617,6 +617,12 @@ static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *s
return throttler_status;
}
+static bool sienna_cichlid_is_od_feature_supported(struct smu_11_0_7_overdrive_table *od_table,
+ enum SMU_11_0_7_ODFEATURE_CAP cap)
+{
+ return od_table->cap[cap];
+}
+
static int sienna_cichlid_get_power_limit(struct smu_context *smu,
uint32_t *current_power_limit,
uint32_t *default_power_limit,
@@ -625,7 +631,8 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu,
{
struct smu_11_0_7_powerplay_table *powerplay_table =
(struct smu_11_0_7_powerplay_table *)smu->smu_table.power_play_table;
- uint32_t power_limit, od_percent_upper, od_percent_lower;
+ struct smu_11_0_7_overdrive_table *od_settings = smu->od_settings;
+ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
uint16_t *table_member;
GET_PPTABLE_MEMBER(SocketPowerLimitAc, &table_member);
@@ -640,12 +647,16 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled)
- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
- else
- od_percent_upper = 0;
-
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
+ if (powerplay_table) {
+ if (smu->od_enabled &&
+ sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_POWER_LIMIT)) {
+ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
+ } else if ((sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_POWER_LIMIT))) {
+ od_percent_upper = 0;
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
+ }
+ }
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
@@ -1250,12 +1261,6 @@ static bool sienna_cichlid_is_support_fine_grained_dpm(struct smu_context *smu,
return dpm_desc->SnapToDiscrete == 0;
}
-static bool sienna_cichlid_is_od_feature_supported(struct smu_11_0_7_overdrive_table *od_table,
- enum SMU_11_0_7_ODFEATURE_CAP cap)
-{
- return od_table->cap[cap];
-}
-
static void sienna_cichlid_get_od_setting_range(struct smu_11_0_7_overdrive_table *od_table,
enum SMU_11_0_7_ODSETTING_ID setting,
uint32_t *min, uint32_t *max)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 9b80f18ea6c359..9c03296f92cdd4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2356,7 +2356,7 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
(struct smu_13_0_0_powerplay_table *)table_context->power_play_table;
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
- uint32_t power_limit, od_percent_upper, od_percent_lower;
+ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
if (smu_v13_0_get_current_power_limit(smu, &power_limit))
@@ -2369,12 +2369,16 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled)
- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
- else
- od_percent_upper = 0;
-
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+ if (powerplay_table) {
+ if (smu->od_enabled &&
+ smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
+ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+ } else if (smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
+ od_percent_upper = 0;
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+ }
+ }
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 3dc7b60cb0754d..7318964f1f148f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -2320,7 +2320,7 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
(struct smu_13_0_7_powerplay_table *)table_context->power_play_table;
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
- uint32_t power_limit, od_percent_upper, od_percent_lower;
+ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
if (smu_v13_0_get_current_power_limit(smu, &power_limit))
@@ -2333,12 +2333,16 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled)
- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
- else
- od_percent_upper = 0;
-
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+ if (powerplay_table) {
+ if (smu->od_enabled &&
+ (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT))) {
+ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+ } else if (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
+ od_percent_upper = 0;
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+ }
+ }
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index e7c4bef74aa46a..4b2ae27f0a57f2 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -441,23 +441,21 @@ lt8912_connector_mode_valid(struct drm_connector *connector,
static int lt8912_connector_get_modes(struct drm_connector *connector)
{
const struct drm_edid *drm_edid;
- int ret = -1;
- int num = 0;
struct lt8912 *lt = connector_to_lt8912(connector);
u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ int ret, num;
drm_edid = drm_bridge_edid_read(lt->hdmi_port, connector);
drm_edid_connector_update(connector, drm_edid);
- if (drm_edid) {
- num = drm_edid_connector_add_modes(connector);
- } else {
- return ret;
- }
+ if (!drm_edid)
+ return 0;
+
+ num = drm_edid_connector_add_modes(connector);
ret = drm_display_info_set_bus_formats(&connector->display_info,
&bus_format, 1);
- if (ret)
- num = ret;
+ if (ret < 0)
+ num = 0;
drm_edid_free(drm_edid);
return num;
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index bcf8bccd86d6cc..f4f593ad8f7957 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -294,8 +294,8 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc,
static int lt9611uxc_connector_get_modes(struct drm_connector *connector)
{
struct lt9611uxc *lt9611uxc = connector_to_lt9611uxc(connector);
- unsigned int count;
const struct drm_edid *drm_edid;
+ int count;
drm_edid = drm_bridge_edid_read(&lt9611uxc->bridge, connector);
drm_edid_connector_update(connector, drm_edid);
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index e814020bbcd3b3..cfbe020de54e01 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -274,19 +274,24 @@ EXPORT_SYMBOL(drm_panel_disable);
* The modes probed from the panel are automatically added to the connector
* that the panel is attached to.
*
- * Return: The number of modes available from the panel on success or a
- * negative error code on failure.
+ * Return: The number of modes available from the panel on success, or 0 on
+ * failure (no modes).
*/
int drm_panel_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
if (!panel)
- return -EINVAL;
+ return 0;
- if (panel->funcs && panel->funcs->get_modes)
- return panel->funcs->get_modes(panel, connector);
+ if (panel->funcs && panel->funcs->get_modes) {
+ int num;
- return -EOPNOTSUPP;
+ num = panel->funcs->get_modes(panel, connector);
+ if (num > 0)
+ return num;
+ }
+
+ return 0;
}
EXPORT_SYMBOL(drm_panel_get_modes);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 4d60cc810b5773..bf2dd1f46b6c4f 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -422,6 +422,13 @@ static int drm_helper_probe_get_modes(struct drm_connector *connector)
count = connector_funcs->get_modes(connector);
+ /* The .get_modes() callback should not return negative values. */
+ if (count < 0) {
+ drm_err(connector->dev, ".get_modes() returned %pe\n",
+ ERR_PTR(count));
+ count = 0;
+ }
+
/*
* Fallback for when DDC probe failed in drm_get_edid() and thus skipped
* override/firmware EDID.
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index ca31bad6c5760d..f48c4343f4690f 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -74,16 +74,15 @@ static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data,
{
struct exynos_dp_device *dp = to_dp(plat_data);
struct drm_display_mode *mode;
- int num_modes = 0;
if (dp->plat_data.panel)
- return num_modes;
+ return 0;
mode = drm_mode_create(connector->dev);
if (!mode) {
DRM_DEV_ERROR(dp->dev,
"failed to create a new display mode.\n");
- return num_modes;
+ return 0;
}
drm_display_mode_from_videomode(&dp->vm, mode);
@@ -94,7 +93,7 @@ static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data,
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
- return num_modes + 1;
+ return 1;
}
static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 00382f28748ac0..f5bbba9ad22526 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -316,14 +316,14 @@ static int vidi_get_modes(struct drm_connector *connector)
*/
if (!ctx->raw_edid) {
DRM_DEV_DEBUG_KMS(ctx->dev, "raw_edid is null.\n");
- return -EFAULT;
+ return 0;
}
edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
if (!edid) {
DRM_DEV_DEBUG_KMS(ctx->dev, "failed to allocate edid\n");
- return -ENOMEM;
+ return 0;
}
drm_connector_update_edid_property(connector, edid);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 43bed6cbaaea07..b1d02dec3774d0 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -887,11 +887,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
int ret;
if (!hdata->ddc_adpt)
- return -ENODEV;
+ return 0;
edid = drm_get_edid(connector, hdata->ddc_adpt);
if (!edid)
- return -ENODEV;
+ return 0;
hdata->dvi_mode = !connector->display_info.is_hdmi;
DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n",
diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
index 70349739dd89be..55dedd73f528c8 100644
--- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c
+++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
@@ -72,14 +72,14 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
int ret;
if (!mode)
- return -EINVAL;
+ return 0;
ret = of_get_drm_display_mode(np, &imxpd->mode,
&imxpd->bus_flags,
OF_USE_NATIVE_MODE);
if (ret) {
drm_mode_destroy(connector->dev, mode);
- return ret;
+ return 0;
}
drm_mode_copy(mode, &imxpd->mode);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 56dcd25db1ce2c..db8cbf6151129d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1256,6 +1256,8 @@ out:
drm_vma_node_unmap(&nvbo->bo.base.vma_node,
bdev->dev_mapping);
nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource);
+ nvbo->bo.resource->bus.offset = 0;
+ nvbo->bo.resource->bus.addr = NULL;
goto retry;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c
index 666eb93b1742ca..11b4c9c274a1a5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c
@@ -41,7 +41,6 @@ r535_devinit_new(const struct nvkm_devinit_func *hw,
rm->dtor = r535_devinit_dtor;
rm->post = hw->post;
- rm->disable = hw->disable;
ret = nv50_devinit_new_(rm, device, type, inst, pdevinit);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
index a73a5b58979045..9994cbd6f1c40c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
@@ -1430,6 +1430,10 @@ r535_gsp_msg_post_event(void *priv, u32 fn, void *repv, u32 repc)
/**
* r535_gsp_msg_run_cpu_sequencer() -- process I/O commands from the GSP
+ * @priv: gsp pointer
+ * @fn: function number (ignored)
+ * @repv: pointer to libos print RPC
+ * @repc: message size
*
* The GSP sequencer is a list of I/O commands that the GSP can send to
* the driver to perform for various purposes. The most common usage is to
@@ -1781,6 +1785,7 @@ static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size)
/**
* r535_gsp_libos_init() -- create the libos arguments structure
+ * @gsp: gsp pointer
*
* The logging buffers are byte queues that contain encoded printf-like
* messages from GSP-RM. They need to be decoded by a special application
@@ -1920,6 +1925,10 @@ nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3)
/**
* nvkm_gsp_radix3_sg - build a radix3 table from a S/G list
+ * @gsp: gsp pointer
+ * @sgt: S/G list to traverse
+ * @size: size of the image, in bytes
+ * @rx3: radix3 array to update
*
* The GSP uses a three-level page table, called radix3, to map the firmware.
* Each 64-bit "pointer" in the table is either the bus address of an entry in
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 69001a3dc0df23..2d1880c61b50d1 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -166,7 +166,7 @@ sun4i_hdmi_connector_clock_valid(const struct drm_connector *connector,
unsigned long long clock)
{
const struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
- unsigned long diff = clock / 200; /* +-0.5% allowed by HDMI spec */
+ unsigned long diff = div_u64(clock, 200); /* +-0.5% allowed by HDMI spec */
long rounded_rate;
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 34f807ed1c315e..d8751ea2030329 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -509,7 +509,7 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
edid = drm_get_edid(connector, vc4_hdmi->ddc);
cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
if (!edid)
- return -ENODEV;
+ return 0;
drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index 952496c6260dfb..826c8b38967250 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -235,6 +235,29 @@ retry:
goto err_unlock_list;
}
+ if (!args->num_batch_buffer) {
+ err = xe_vm_lock(vm, true);
+ if (err)
+ goto err_unlock_list;
+
+ if (!xe_vm_in_lr_mode(vm)) {
+ struct dma_fence *fence;
+
+ fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm);
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
+ goto err_unlock_list;
+ }
+ for (i = 0; i < num_syncs; i++)
+ xe_sync_entry_signal(&syncs[i], NULL, fence);
+ xe_exec_queue_last_fence_set(q, vm, fence);
+ dma_fence_put(fence);
+ }
+
+ xe_vm_unlock(vm);
+ goto err_unlock_list;
+ }
+
vm_exec.vm = &vm->gpuvm;
vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
if (xe_vm_in_lr_mode(vm)) {
@@ -254,24 +277,6 @@ retry:
goto err_exec;
}
- if (!args->num_batch_buffer) {
- if (!xe_vm_in_lr_mode(vm)) {
- struct dma_fence *fence;
-
- fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm);
- if (IS_ERR(fence)) {
- err = PTR_ERR(fence);
- goto err_exec;
- }
- for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], NULL, fence);
- xe_exec_queue_last_fence_set(q, vm, fence);
- dma_fence_put(fence);
- }
-
- goto err_exec;
- }
-
if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) {
err = -EWOULDBLOCK; /* Aliased to -EAGAIN */
skip_retry = true;
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 73c535193a984b..241c294270d916 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -69,7 +69,7 @@ static bool access_is_atomic(enum access_type access_type)
static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
{
return BIT(tile->id) & vma->tile_present &&
- !(BIT(tile->id) & vma->usm.tile_invalidated);
+ !(BIT(tile->id) & vma->tile_invalidated);
}
static bool vma_matches(struct xe_vma *vma, u64 page_addr)
@@ -226,7 +226,7 @@ retry_userptr:
if (xe_vma_is_userptr(vma))
ret = xe_vma_userptr_check_repin(to_userptr_vma(vma));
- vma->usm.tile_invalidated &= ~BIT(tile->id);
+ vma->tile_invalidated &= ~BIT(tile->id);
unlock_dma_resv:
drm_exec_fini(&exec);
diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
index 4ddc55527f9ab3..846f14507d5ff2 100644
--- a/drivers/gpu/drm/xe/xe_trace.h
+++ b/drivers/gpu/drm/xe/xe_trace.h
@@ -468,7 +468,7 @@ DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate,
TP_ARGS(vma)
);
-DEFINE_EVENT(xe_vma, xe_vma_usm_invalidate,
+DEFINE_EVENT(xe_vma, xe_vma_invalidate,
TP_PROTO(struct xe_vma *vma),
TP_ARGS(vma)
);
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index d28260351af2e3..f88faef4142bde 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -708,6 +708,7 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
int err = 0;
LIST_HEAD(tmp_evict);
+ xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
lockdep_assert_held_write(&vm->lock);
/* Collect invalidated userptrs */
@@ -724,11 +725,27 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
userptr.repin_link) {
err = xe_vma_userptr_pin_pages(uvma);
- if (err < 0)
- return err;
+ if (err == -EFAULT) {
+ list_del_init(&uvma->userptr.repin_link);
- list_del_init(&uvma->userptr.repin_link);
- list_move_tail(&uvma->vma.combined_links.rebind, &vm->rebind_list);
+ /* Wait for pending binds */
+ xe_vm_lock(vm, false);
+ dma_resv_wait_timeout(xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+
+ err = xe_vm_invalidate_vma(&uvma->vma);
+ xe_vm_unlock(vm);
+ if (err)
+ return err;
+ } else {
+ if (err < 0)
+ return err;
+
+ list_del_init(&uvma->userptr.repin_link);
+ list_move_tail(&uvma->vma.combined_links.rebind,
+ &vm->rebind_list);
+ }
}
return 0;
@@ -2024,7 +2041,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
return err;
}
- if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
+ if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) {
return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
true, first_op, last_op);
} else {
@@ -3214,9 +3231,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
u8 id;
int ret;
- xe_assert(xe, xe_vm_in_fault_mode(xe_vma_vm(vma)));
xe_assert(xe, !xe_vma_is_null(vma));
- trace_xe_vma_usm_invalidate(vma);
+ trace_xe_vma_invalidate(vma);
/* Check that we don't race with page-table updates */
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
@@ -3254,7 +3270,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
}
}
- vma->usm.tile_invalidated = vma->tile_mask;
+ vma->tile_invalidated = vma->tile_mask;
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 79b5cab5771199..ae5fb565f6bf48 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -84,11 +84,8 @@ struct xe_vma {
struct work_struct destroy_work;
};
- /** @usm: unified shared memory state */
- struct {
- /** @tile_invalidated: VMA has been invalidated */
- u8 tile_invalidated;
- } usm;
+ /** @tile_invalidated: VMA has been invalidated */
+ u8 tile_invalidated;
/** @tile_mask: Tile mask of where to create binding for this VMA */
u8 tile_mask;
diff --git a/drivers/gpu/drm/xe/xe_vram_freq.c b/drivers/gpu/drm/xe/xe_vram_freq.c
index 079cc283a18667..c5f6b5a5d1176e 100644
--- a/drivers/gpu/drm/xe/xe_vram_freq.c
+++ b/drivers/gpu/drm/xe/xe_vram_freq.c
@@ -111,8 +111,10 @@ void xe_vram_freq_sysfs_init(struct xe_tile *tile)
return;
kobj = kobject_create_and_add("memory", tile->sysfs);
- if (!kobj)
+ if (!kobj) {
drm_warn(&xe->drm, "failed to add memory directory, err: %d\n", -ENOMEM);
+ return;
+ }
err = sysfs_create_group(kobj, &freq_group_attrs);
if (err) {
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index b10574d42b7ac9..4f41a3c7824d06 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -6,21 +6,30 @@
* I2C master mode controller driver, used in Nomadik 8815
* and Ux500 platforms.
*
+ * The Mobileye EyeQ5 platform is also supported; it uses
+ * the same Ux500/DB8500 IP block with two quirks:
+ * - The memory bus only supports 32-bit accesses.
+ * - A register must be configured for the I2C speed mode;
+ * it is located in a shared register region called OLB.
+ *
* Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
* Author: Sachin Verma <sachin.verma@st.com>
*/
-#include <linux/init.h>
-#include <linux/module.h>
#include <linux/amba/bus.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/i2c.h>
-#include <linux/err.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/pm_runtime.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
#define DRIVER_NAME "nmk-i2c"
@@ -42,61 +51,63 @@
#define I2C_ICR (0x038)
/* Control registers */
-#define I2C_CR_PE (0x1 << 0) /* Peripheral Enable */
-#define I2C_CR_OM (0x3 << 1) /* Operating mode */
-#define I2C_CR_SAM (0x1 << 3) /* Slave addressing mode */
-#define I2C_CR_SM (0x3 << 4) /* Speed mode */
-#define I2C_CR_SGCM (0x1 << 6) /* Slave general call mode */
-#define I2C_CR_FTX (0x1 << 7) /* Flush Transmit */
-#define I2C_CR_FRX (0x1 << 8) /* Flush Receive */
-#define I2C_CR_DMA_TX_EN (0x1 << 9) /* DMA Tx enable */
-#define I2C_CR_DMA_RX_EN (0x1 << 10) /* DMA Rx Enable */
-#define I2C_CR_DMA_SLE (0x1 << 11) /* DMA sync. logic enable */
-#define I2C_CR_LM (0x1 << 12) /* Loopback mode */
-#define I2C_CR_FON (0x3 << 13) /* Filtering on */
-#define I2C_CR_FS (0x3 << 15) /* Force stop enable */
+#define I2C_CR_PE BIT(0) /* Peripheral Enable */
+#define I2C_CR_OM GENMASK(2, 1) /* Operating mode */
+#define I2C_CR_SAM BIT(3) /* Slave addressing mode */
+#define I2C_CR_SM GENMASK(5, 4) /* Speed mode */
+#define I2C_CR_SGCM BIT(6) /* Slave general call mode */
+#define I2C_CR_FTX BIT(7) /* Flush Transmit */
+#define I2C_CR_FRX BIT(8) /* Flush Receive */
+#define I2C_CR_DMA_TX_EN BIT(9) /* DMA Tx enable */
+#define I2C_CR_DMA_RX_EN BIT(10) /* DMA Rx Enable */
+#define I2C_CR_DMA_SLE BIT(11) /* DMA sync. logic enable */
+#define I2C_CR_LM BIT(12) /* Loopback mode */
+#define I2C_CR_FON GENMASK(14, 13) /* Filtering on */
+#define I2C_CR_FS GENMASK(16, 15) /* Force stop enable */
+
+/* Slave control register (SCR) */
+#define I2C_SCR_SLSU GENMASK(31, 16) /* Slave data setup time */
/* Master controller (MCR) register */
-#define I2C_MCR_OP (0x1 << 0) /* Operation */
-#define I2C_MCR_A7 (0x7f << 1) /* 7-bit address */
-#define I2C_MCR_EA10 (0x7 << 8) /* 10-bit Extended address */
-#define I2C_MCR_SB (0x1 << 11) /* Extended address */
-#define I2C_MCR_AM (0x3 << 12) /* Address type */
-#define I2C_MCR_STOP (0x1 << 14) /* Stop condition */
-#define I2C_MCR_LENGTH (0x7ff << 15) /* Transaction length */
+#define I2C_MCR_OP BIT(0) /* Operation */
+#define I2C_MCR_A7 GENMASK(7, 1) /* 7-bit address */
+#define I2C_MCR_EA10 GENMASK(10, 8) /* 10-bit Extended address */
+#define I2C_MCR_SB BIT(11) /* Extended address */
+#define I2C_MCR_AM GENMASK(13, 12) /* Address type */
+#define I2C_MCR_STOP BIT(14) /* Stop condition */
+#define I2C_MCR_LENGTH GENMASK(25, 15) /* Transaction length */
/* Status register (SR) */
-#define I2C_SR_OP (0x3 << 0) /* Operation */
-#define I2C_SR_STATUS (0x3 << 2) /* controller status */
-#define I2C_SR_CAUSE (0x7 << 4) /* Abort cause */
-#define I2C_SR_TYPE (0x3 << 7) /* Receive type */
-#define I2C_SR_LENGTH (0x7ff << 9) /* Transfer length */
+#define I2C_SR_OP GENMASK(1, 0) /* Operation */
+#define I2C_SR_STATUS GENMASK(3, 2) /* controller status */
+#define I2C_SR_CAUSE GENMASK(6, 4) /* Abort cause */
+#define I2C_SR_TYPE GENMASK(8, 7) /* Receive type */
+#define I2C_SR_LENGTH GENMASK(19, 9) /* Transfer length */
+
+/* Baud-rate counter register (BRCR) */
+#define I2C_BRCR_BRCNT1 GENMASK(31, 16) /* Baud-rate counter 1 */
+#define I2C_BRCR_BRCNT2 GENMASK(15, 0) /* Baud-rate counter 2 */
/* Interrupt mask set/clear (IMSCR) bits */
-#define I2C_IT_TXFE (0x1 << 0)
-#define I2C_IT_TXFNE (0x1 << 1)
-#define I2C_IT_TXFF (0x1 << 2)
-#define I2C_IT_TXFOVR (0x1 << 3)
-#define I2C_IT_RXFE (0x1 << 4)
-#define I2C_IT_RXFNF (0x1 << 5)
-#define I2C_IT_RXFF (0x1 << 6)
-#define I2C_IT_RFSR (0x1 << 16)
-#define I2C_IT_RFSE (0x1 << 17)
-#define I2C_IT_WTSR (0x1 << 18)
-#define I2C_IT_MTD (0x1 << 19)
-#define I2C_IT_STD (0x1 << 20)
-#define I2C_IT_MAL (0x1 << 24)
-#define I2C_IT_BERR (0x1 << 25)
-#define I2C_IT_MTDWS (0x1 << 28)
-
-#define GEN_MASK(val, mask, sb) (((val) << (sb)) & (mask))
+#define I2C_IT_TXFE BIT(0)
+#define I2C_IT_TXFNE BIT(1)
+#define I2C_IT_TXFF BIT(2)
+#define I2C_IT_TXFOVR BIT(3)
+#define I2C_IT_RXFE BIT(4)
+#define I2C_IT_RXFNF BIT(5)
+#define I2C_IT_RXFF BIT(6)
+#define I2C_IT_RFSR BIT(16)
+#define I2C_IT_RFSE BIT(17)
+#define I2C_IT_WTSR BIT(18)
+#define I2C_IT_MTD BIT(19)
+#define I2C_IT_STD BIT(20)
+#define I2C_IT_MAL BIT(24)
+#define I2C_IT_BERR BIT(25)
+#define I2C_IT_MTDWS BIT(28)
/* some bits in ICR are reserved */
#define I2C_CLEAR_ALL_INTS 0x131f007f
-/* first three msb bits are reserved */
-#define IRQ_MASK(mask) (mask & 0x1fffffff)
-
/* maximum threshold value */
#define MAX_I2C_FIFO_THRESHOLD 15
@@ -107,6 +118,15 @@ enum i2c_freq_mode {
I2C_FREQ_MODE_FAST_PLUS, /* up to 1 Mb/s */
};
+/* Mobileye EyeQ5 offset into a shared register region (called OLB) */
+#define NMK_I2C_EYEQ5_OLB_IOCR2 0x0B8
+
+enum i2c_eyeq5_speed {
+ I2C_EYEQ5_SPEED_FAST,
+ I2C_EYEQ5_SPEED_FAST_PLUS,
+ I2C_EYEQ5_SPEED_HIGH_SPEED,
+};
+
/**
* struct i2c_vendor_data - per-vendor variations
* @has_mtdws: variant has the MTDWS bit
@@ -131,6 +151,12 @@ enum i2c_operation {
I2C_READ = 0x01
};
+enum i2c_operating_mode {
+ I2C_OM_SLAVE,
+ I2C_OM_MASTER,
+ I2C_OM_MASTER_OR_SLAVE,
+};
+
/**
* struct i2c_nmk_client - client specific data
* @slave_adr: 7-bit slave address
@@ -159,11 +185,13 @@ struct i2c_nmk_client {
* @clk_freq: clock frequency for the operation mode
* @tft: Tx FIFO Threshold in bytes
* @rft: Rx FIFO Threshold in bytes
- * @timeout: Slave response timeout (ms)
+ * @timeout_usecs: Slave response timeout
* @sm: speed mode
* @stop: stop condition.
- * @xfer_complete: acknowledge completion for a I2C message.
+ * @xfer_wq: xfer done wait queue.
+ * @xfer_done: xfer done boolean.
* @result: controller propogated result.
+ * @has_32b_bus: controller is on a bus that only supports 32-bit accesses.
*/
struct nmk_i2c_dev {
struct i2c_vendor_data *vendor;
@@ -176,11 +204,13 @@ struct nmk_i2c_dev {
u32 clk_freq;
unsigned char tft;
unsigned char rft;
- int timeout;
+ u32 timeout_usecs;
enum i2c_freq_mode sm;
int stop;
- struct completion xfer_complete;
+ struct wait_queue_head xfer_wq;
+ bool xfer_done;
int result;
+ bool has_32b_bus;
};
/* controller's abort causes */
@@ -204,18 +234,36 @@ static inline void i2c_clr_bit(void __iomem *reg, u32 mask)
writel(readl(reg) & ~mask, reg);
}
+static inline u8 nmk_i2c_readb(const struct nmk_i2c_dev *priv,
+ unsigned long reg)
+{
+ if (priv->has_32b_bus)
+ return readl(priv->virtbase + reg);
+ else
+ return readb(priv->virtbase + reg);
+}
+
+static inline void nmk_i2c_writeb(const struct nmk_i2c_dev *priv, u32 val,
+ unsigned long reg)
+{
+ if (priv->has_32b_bus)
+ writel(val, priv->virtbase + reg);
+ else
+ writeb(val, priv->virtbase + reg);
+}
+
/**
* flush_i2c_fifo() - This function flushes the I2C FIFO
- * @dev: private data of I2C Driver
+ * @priv: private data of I2C Driver
*
* This function flushes the I2C Tx and Rx FIFOs. It returns
* 0 on successful flushing of FIFO
*/
-static int flush_i2c_fifo(struct nmk_i2c_dev *dev)
+static int flush_i2c_fifo(struct nmk_i2c_dev *priv)
{
#define LOOP_ATTEMPTS 10
+ ktime_t timeout;
int i;
- unsigned long timeout;
/*
* flush the transmit and receive FIFO. The flushing
@@ -224,19 +272,19 @@ static int flush_i2c_fifo(struct nmk_i2c_dev *dev)
* bits, until then no one must access Tx, Rx FIFO and
* should poll on these bits waiting for the completion.
*/
- writel((I2C_CR_FTX | I2C_CR_FRX), dev->virtbase + I2C_CR);
+ writel((I2C_CR_FTX | I2C_CR_FRX), priv->virtbase + I2C_CR);
for (i = 0; i < LOOP_ATTEMPTS; i++) {
- timeout = jiffies + dev->adap.timeout;
+ timeout = ktime_add_us(ktime_get(), priv->timeout_usecs);
- while (!time_after(jiffies, timeout)) {
- if ((readl(dev->virtbase + I2C_CR) &
+ while (ktime_after(timeout, ktime_get())) {
+ if ((readl(priv->virtbase + I2C_CR) &
(I2C_CR_FTX | I2C_CR_FRX)) == 0)
- return 0;
+ return 0;
}
}
- dev_err(&dev->adev->dev,
+ dev_err(&priv->adev->dev,
"flushing operation timed out giving up after %d attempts",
LOOP_ATTEMPTS);
@@ -245,120 +293,121 @@ static int flush_i2c_fifo(struct nmk_i2c_dev *dev)
/**
* disable_all_interrupts() - Disable all interrupts of this I2c Bus
- * @dev: private data of I2C Driver
+ * @priv: private data of I2C Driver
*/
-static void disable_all_interrupts(struct nmk_i2c_dev *dev)
+static void disable_all_interrupts(struct nmk_i2c_dev *priv)
{
- u32 mask = IRQ_MASK(0);
- writel(mask, dev->virtbase + I2C_IMSCR);
+ writel(0, priv->virtbase + I2C_IMSCR);
}
/**
* clear_all_interrupts() - Clear all interrupts of I2C Controller
- * @dev: private data of I2C Driver
+ * @priv: private data of I2C Driver
*/
-static void clear_all_interrupts(struct nmk_i2c_dev *dev)
+static void clear_all_interrupts(struct nmk_i2c_dev *priv)
{
- u32 mask;
- mask = IRQ_MASK(I2C_CLEAR_ALL_INTS);
- writel(mask, dev->virtbase + I2C_ICR);
+ writel(I2C_CLEAR_ALL_INTS, priv->virtbase + I2C_ICR);
}
/**
* init_hw() - initialize the I2C hardware
- * @dev: private data of I2C Driver
+ * @priv: private data of I2C Driver
*/
-static int init_hw(struct nmk_i2c_dev *dev)
+static int init_hw(struct nmk_i2c_dev *priv)
{
int stat;
- stat = flush_i2c_fifo(dev);
+ stat = flush_i2c_fifo(priv);
if (stat)
goto exit;
/* disable the controller */
- i2c_clr_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
+ i2c_clr_bit(priv->virtbase + I2C_CR, I2C_CR_PE);
- disable_all_interrupts(dev);
+ disable_all_interrupts(priv);
- clear_all_interrupts(dev);
+ clear_all_interrupts(priv);
- dev->cli.operation = I2C_NO_OPERATION;
+ priv->cli.operation = I2C_NO_OPERATION;
exit:
return stat;
}
/* enable peripheral, master mode operation */
-#define DEFAULT_I2C_REG_CR ((1 << 1) | I2C_CR_PE)
+#define DEFAULT_I2C_REG_CR (FIELD_PREP(I2C_CR_OM, I2C_OM_MASTER) | I2C_CR_PE)
+
+/* grab top three bits from extended I2C addresses */
+#define ADR_3MSB_BITS GENMASK(9, 7)
/**
* load_i2c_mcr_reg() - load the MCR register
- * @dev: private data of controller
+ * @priv: private data of controller
* @flags: message flags
*/
-static u32 load_i2c_mcr_reg(struct nmk_i2c_dev *dev, u16 flags)
+static u32 load_i2c_mcr_reg(struct nmk_i2c_dev *priv, u16 flags)
{
u32 mcr = 0;
unsigned short slave_adr_3msb_bits;
- mcr |= GEN_MASK(dev->cli.slave_adr, I2C_MCR_A7, 1);
+ mcr |= FIELD_PREP(I2C_MCR_A7, priv->cli.slave_adr);
if (unlikely(flags & I2C_M_TEN)) {
/* 10-bit address transaction */
- mcr |= GEN_MASK(2, I2C_MCR_AM, 12);
+ mcr |= FIELD_PREP(I2C_MCR_AM, 2);
/*
* Get the top 3 bits.
* EA10 represents extended address in MCR. This includes
* the extension (MSB bits) of the 7 bit address loaded
* in A7
*/
- slave_adr_3msb_bits = (dev->cli.slave_adr >> 7) & 0x7;
+ slave_adr_3msb_bits = FIELD_GET(ADR_3MSB_BITS,
+ priv->cli.slave_adr);
- mcr |= GEN_MASK(slave_adr_3msb_bits, I2C_MCR_EA10, 8);
+ mcr |= FIELD_PREP(I2C_MCR_EA10, slave_adr_3msb_bits);
} else {
/* 7-bit address transaction */
- mcr |= GEN_MASK(1, I2C_MCR_AM, 12);
+ mcr |= FIELD_PREP(I2C_MCR_AM, 1);
}
/* start byte procedure not applied */
- mcr |= GEN_MASK(0, I2C_MCR_SB, 11);
+ mcr |= FIELD_PREP(I2C_MCR_SB, 0);
/* check the operation, master read/write? */
- if (dev->cli.operation == I2C_WRITE)
- mcr |= GEN_MASK(I2C_WRITE, I2C_MCR_OP, 0);
+ if (priv->cli.operation == I2C_WRITE)
+ mcr |= FIELD_PREP(I2C_MCR_OP, I2C_WRITE);
else
- mcr |= GEN_MASK(I2C_READ, I2C_MCR_OP, 0);
+ mcr |= FIELD_PREP(I2C_MCR_OP, I2C_READ);
/* stop or repeated start? */
- if (dev->stop)
- mcr |= GEN_MASK(1, I2C_MCR_STOP, 14);
+ if (priv->stop)
+ mcr |= FIELD_PREP(I2C_MCR_STOP, 1);
else
- mcr &= ~(GEN_MASK(1, I2C_MCR_STOP, 14));
+ mcr &= ~FIELD_PREP(I2C_MCR_STOP, 1);
- mcr |= GEN_MASK(dev->cli.count, I2C_MCR_LENGTH, 15);
+ mcr |= FIELD_PREP(I2C_MCR_LENGTH, priv->cli.count);
return mcr;
}
/**
* setup_i2c_controller() - setup the controller
- * @dev: private data of controller
+ * @priv: private data of controller
*/
-static void setup_i2c_controller(struct nmk_i2c_dev *dev)
+static void setup_i2c_controller(struct nmk_i2c_dev *priv)
{
u32 brcr1, brcr2;
u32 i2c_clk, div;
u32 ns;
u16 slsu;
- writel(0x0, dev->virtbase + I2C_CR);
- writel(0x0, dev->virtbase + I2C_HSMCR);
- writel(0x0, dev->virtbase + I2C_TFTR);
- writel(0x0, dev->virtbase + I2C_RFTR);
- writel(0x0, dev->virtbase + I2C_DMAR);
+ writel(0x0, priv->virtbase + I2C_CR);
+ writel(0x0, priv->virtbase + I2C_HSMCR);
+ writel(0x0, priv->virtbase + I2C_TFTR);
+ writel(0x0, priv->virtbase + I2C_RFTR);
+ writel(0x0, priv->virtbase + I2C_DMAR);
- i2c_clk = clk_get_rate(dev->clk);
+ i2c_clk = clk_get_rate(priv->clk);
/*
* set the slsu:
@@ -373,7 +422,7 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
* slsu = cycles / (1000000000 / f) + 1
*/
ns = DIV_ROUND_UP_ULL(1000000000ULL, i2c_clk);
- switch (dev->sm) {
+ switch (priv->sm) {
case I2C_FREQ_MODE_FAST:
case I2C_FREQ_MODE_FAST_PLUS:
slsu = DIV_ROUND_UP(100, ns); /* Fast */
@@ -388,15 +437,15 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
}
slsu += 1;
- dev_dbg(&dev->adev->dev, "calculated SLSU = %04x\n", slsu);
- writel(slsu << 16, dev->virtbase + I2C_SCR);
+ dev_dbg(&priv->adev->dev, "calculated SLSU = %04x\n", slsu);
+ writel(FIELD_PREP(I2C_SCR_SLSU, slsu), priv->virtbase + I2C_SCR);
/*
* The spec says, in case of std. mode the divider is
* 2 whereas it is 3 for fast and fastplus mode of
* operation. TODO - high speed support.
*/
- div = (dev->clk_freq > I2C_MAX_STANDARD_MODE_FREQ) ? 3 : 2;
+ div = (priv->clk_freq > I2C_MAX_STANDARD_MODE_FREQ) ? 3 : 2;
/*
* generate the mask for baud rate counters. The controller
@@ -405,11 +454,11 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
* plus operation. Currently we do not supprt high speed mode
* so set brcr1 to 0.
*/
- brcr1 = 0 << 16;
- brcr2 = (i2c_clk/(dev->clk_freq * div)) & 0xffff;
+ brcr1 = FIELD_PREP(I2C_BRCR_BRCNT1, 0);
+ brcr2 = FIELD_PREP(I2C_BRCR_BRCNT2, i2c_clk / (priv->clk_freq * div));
/* set the baud rate counter register */
- writel((brcr1 | brcr2), dev->virtbase + I2C_BRCR);
+ writel((brcr1 | brcr2), priv->virtbase + I2C_BRCR);
/*
* set the speed mode. Currently we support
@@ -417,125 +466,142 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
* TODO - support for fast mode plus (up to 1Mb/s)
* and high speed (up to 3.4 Mb/s)
*/
- if (dev->sm > I2C_FREQ_MODE_FAST) {
- dev_err(&dev->adev->dev,
+ if (priv->sm > I2C_FREQ_MODE_FAST) {
+ dev_err(&priv->adev->dev,
"do not support this mode defaulting to std. mode\n");
- brcr2 = i2c_clk / (I2C_MAX_STANDARD_MODE_FREQ * 2) & 0xffff;
- writel((brcr1 | brcr2), dev->virtbase + I2C_BRCR);
- writel(I2C_FREQ_MODE_STANDARD << 4,
- dev->virtbase + I2C_CR);
+ brcr2 = FIELD_PREP(I2C_BRCR_BRCNT2,
+ i2c_clk / (I2C_MAX_STANDARD_MODE_FREQ * 2));
+ writel((brcr1 | brcr2), priv->virtbase + I2C_BRCR);
+ writel(FIELD_PREP(I2C_CR_SM, I2C_FREQ_MODE_STANDARD),
+ priv->virtbase + I2C_CR);
}
- writel(dev->sm << 4, dev->virtbase + I2C_CR);
+ writel(FIELD_PREP(I2C_CR_SM, priv->sm), priv->virtbase + I2C_CR);
/* set the Tx and Rx FIFO threshold */
- writel(dev->tft, dev->virtbase + I2C_TFTR);
- writel(dev->rft, dev->virtbase + I2C_RFTR);
+ writel(priv->tft, priv->virtbase + I2C_TFTR);
+ writel(priv->rft, priv->virtbase + I2C_RFTR);
+}
+
+static bool nmk_i2c_wait_xfer_done(struct nmk_i2c_dev *priv)
+{
+ if (priv->timeout_usecs < jiffies_to_usecs(1)) {
+ unsigned long timeout_usecs = priv->timeout_usecs;
+ ktime_t timeout = ktime_set(0, timeout_usecs * NSEC_PER_USEC);
+
+ wait_event_hrtimeout(priv->xfer_wq, priv->xfer_done, timeout);
+ } else {
+ unsigned long timeout = usecs_to_jiffies(priv->timeout_usecs);
+
+ wait_event_timeout(priv->xfer_wq, priv->xfer_done, timeout);
+ }
+
+ return priv->xfer_done;
}
/**
* read_i2c() - Read from I2C client device
- * @dev: private data of I2C Driver
+ * @priv: private data of I2C Driver
* @flags: message flags
*
* This function reads from i2c client device when controller is in
* master mode. There is a completion timeout. If there is no transfer
* before timeout error is returned.
*/
-static int read_i2c(struct nmk_i2c_dev *dev, u16 flags)
+static int read_i2c(struct nmk_i2c_dev *priv, u16 flags)
{
- int status = 0;
u32 mcr, irq_mask;
- unsigned long timeout;
+ int status = 0;
+ bool xfer_done;
- mcr = load_i2c_mcr_reg(dev, flags);
- writel(mcr, dev->virtbase + I2C_MCR);
+ mcr = load_i2c_mcr_reg(priv, flags);
+ writel(mcr, priv->virtbase + I2C_MCR);
/* load the current CR value */
- writel(readl(dev->virtbase + I2C_CR) | DEFAULT_I2C_REG_CR,
- dev->virtbase + I2C_CR);
+ writel(readl(priv->virtbase + I2C_CR) | DEFAULT_I2C_REG_CR,
+ priv->virtbase + I2C_CR);
/* enable the controller */
- i2c_set_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
+ i2c_set_bit(priv->virtbase + I2C_CR, I2C_CR_PE);
- init_completion(&dev->xfer_complete);
+ init_waitqueue_head(&priv->xfer_wq);
+ priv->xfer_done = false;
/* enable interrupts by setting the mask */
irq_mask = (I2C_IT_RXFNF | I2C_IT_RXFF |
I2C_IT_MAL | I2C_IT_BERR);
- if (dev->stop || !dev->vendor->has_mtdws)
+ if (priv->stop || !priv->vendor->has_mtdws)
irq_mask |= I2C_IT_MTD;
else
irq_mask |= I2C_IT_MTDWS;
- irq_mask = I2C_CLEAR_ALL_INTS & IRQ_MASK(irq_mask);
+ irq_mask &= I2C_CLEAR_ALL_INTS;
- writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask,
- dev->virtbase + I2C_IMSCR);
+ writel(readl(priv->virtbase + I2C_IMSCR) | irq_mask,
+ priv->virtbase + I2C_IMSCR);
- timeout = wait_for_completion_timeout(
- &dev->xfer_complete, dev->adap.timeout);
+ xfer_done = nmk_i2c_wait_xfer_done(priv);
- if (timeout == 0) {
+ if (!xfer_done) {
/* Controller timed out */
- dev_err(&dev->adev->dev, "read from slave 0x%x timed out\n",
- dev->cli.slave_adr);
+ dev_err(&priv->adev->dev, "read from slave 0x%x timed out\n",
+ priv->cli.slave_adr);
status = -ETIMEDOUT;
}
return status;
}
-static void fill_tx_fifo(struct nmk_i2c_dev *dev, int no_bytes)
+static void fill_tx_fifo(struct nmk_i2c_dev *priv, int no_bytes)
{
int count;
for (count = (no_bytes - 2);
(count > 0) &&
- (dev->cli.count != 0);
+ (priv->cli.count != 0);
count--) {
/* write to the Tx FIFO */
- writeb(*dev->cli.buffer,
- dev->virtbase + I2C_TFR);
- dev->cli.buffer++;
- dev->cli.count--;
- dev->cli.xfer_bytes++;
+ nmk_i2c_writeb(priv, *priv->cli.buffer, I2C_TFR);
+ priv->cli.buffer++;
+ priv->cli.count--;
+ priv->cli.xfer_bytes++;
}
}
/**
* write_i2c() - Write data to I2C client.
- * @dev: private data of I2C Driver
+ * @priv: private data of I2C Driver
* @flags: message flags
*
* This function writes data to I2C client
*/
-static int write_i2c(struct nmk_i2c_dev *dev, u16 flags)
+static int write_i2c(struct nmk_i2c_dev *priv, u16 flags)
{
- u32 status = 0;
u32 mcr, irq_mask;
- unsigned long timeout;
+ u32 status = 0;
+ bool xfer_done;
- mcr = load_i2c_mcr_reg(dev, flags);
+ mcr = load_i2c_mcr_reg(priv, flags);
- writel(mcr, dev->virtbase + I2C_MCR);
+ writel(mcr, priv->virtbase + I2C_MCR);
/* load the current CR value */
- writel(readl(dev->virtbase + I2C_CR) | DEFAULT_I2C_REG_CR,
- dev->virtbase + I2C_CR);
+ writel(readl(priv->virtbase + I2C_CR) | DEFAULT_I2C_REG_CR,
+ priv->virtbase + I2C_CR);
/* enable the controller */
- i2c_set_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
+ i2c_set_bit(priv->virtbase + I2C_CR, I2C_CR_PE);
- init_completion(&dev->xfer_complete);
+ init_waitqueue_head(&priv->xfer_wq);
+ priv->xfer_done = false;
/* enable interrupts by settings the masks */
irq_mask = (I2C_IT_TXFOVR | I2C_IT_MAL | I2C_IT_BERR);
/* Fill the TX FIFO with transmit data */
- fill_tx_fifo(dev, MAX_I2C_FIFO_THRESHOLD);
+ fill_tx_fifo(priv, MAX_I2C_FIFO_THRESHOLD);
- if (dev->cli.count != 0)
+ if (priv->cli.count != 0)
irq_mask |= I2C_IT_TXFNE;
/*
@@ -543,23 +609,22 @@ static int write_i2c(struct nmk_i2c_dev *dev, u16 flags)
* set the MTDWS bit (Master Transaction Done Without Stop)
* to start repeated start operation
*/
- if (dev->stop || !dev->vendor->has_mtdws)
+ if (priv->stop || !priv->vendor->has_mtdws)
irq_mask |= I2C_IT_MTD;
else
irq_mask |= I2C_IT_MTDWS;
- irq_mask = I2C_CLEAR_ALL_INTS & IRQ_MASK(irq_mask);
+ irq_mask &= I2C_CLEAR_ALL_INTS;
- writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask,
- dev->virtbase + I2C_IMSCR);
+ writel(readl(priv->virtbase + I2C_IMSCR) | irq_mask,
+ priv->virtbase + I2C_IMSCR);
- timeout = wait_for_completion_timeout(
- &dev->xfer_complete, dev->adap.timeout);
+ xfer_done = nmk_i2c_wait_xfer_done(priv);
- if (timeout == 0) {
+ if (!xfer_done) {
/* Controller timed out */
- dev_err(&dev->adev->dev, "write to slave 0x%x timed out\n",
- dev->cli.slave_adr);
+ dev_err(&priv->adev->dev, "write to slave 0x%x timed out\n",
+ priv->cli.slave_adr);
status = -ETIMEDOUT;
}
@@ -568,44 +633,39 @@ static int write_i2c(struct nmk_i2c_dev *dev, u16 flags)
/**
* nmk_i2c_xfer_one() - transmit a single I2C message
- * @dev: device with a message encoded into it
+ * @priv: device with a message encoded into it
* @flags: message flags
*/
-static int nmk_i2c_xfer_one(struct nmk_i2c_dev *dev, u16 flags)
+static int nmk_i2c_xfer_one(struct nmk_i2c_dev *priv, u16 flags)
{
int status;
if (flags & I2C_M_RD) {
/* read operation */
- dev->cli.operation = I2C_READ;
- status = read_i2c(dev, flags);
+ priv->cli.operation = I2C_READ;
+ status = read_i2c(priv, flags);
} else {
/* write operation */
- dev->cli.operation = I2C_WRITE;
- status = write_i2c(dev, flags);
+ priv->cli.operation = I2C_WRITE;
+ status = write_i2c(priv, flags);
}
- if (status || (dev->result)) {
+ if (status || priv->result) {
u32 i2c_sr;
u32 cause;
- i2c_sr = readl(dev->virtbase + I2C_SR);
- /*
- * Check if the controller I2C operation status
- * is set to ABORT(11b).
- */
- if (((i2c_sr >> 2) & 0x3) == 0x3) {
- /* get the abort cause */
- cause = (i2c_sr >> 4) & 0x7;
- dev_err(&dev->adev->dev, "%s\n",
+ i2c_sr = readl(priv->virtbase + I2C_SR);
+ if (FIELD_GET(I2C_SR_STATUS, i2c_sr) == I2C_ABORT) {
+ cause = FIELD_GET(I2C_SR_CAUSE, i2c_sr);
+ dev_err(&priv->adev->dev, "%s\n",
cause >= ARRAY_SIZE(abort_causes) ?
"unknown reason" :
abort_causes[cause]);
}
- (void) init_hw(dev);
+ init_hw(priv);
- status = status ? status : dev->result;
+ status = status ? status : priv->result;
}
return status;
@@ -663,24 +723,24 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
{
int status = 0;
int i;
- struct nmk_i2c_dev *dev = i2c_get_adapdata(i2c_adap);
+ struct nmk_i2c_dev *priv = i2c_get_adapdata(i2c_adap);
int j;
- pm_runtime_get_sync(&dev->adev->dev);
+ pm_runtime_get_sync(&priv->adev->dev);
/* Attempt three times to send the message queue */
for (j = 0; j < 3; j++) {
/* setup the i2c controller */
- setup_i2c_controller(dev);
+ setup_i2c_controller(priv);
for (i = 0; i < num_msgs; i++) {
- dev->cli.slave_adr = msgs[i].addr;
- dev->cli.buffer = msgs[i].buf;
- dev->cli.count = msgs[i].len;
- dev->stop = (i < (num_msgs - 1)) ? 0 : 1;
- dev->result = 0;
+ priv->cli.slave_adr = msgs[i].addr;
+ priv->cli.buffer = msgs[i].buf;
+ priv->cli.count = msgs[i].len;
+ priv->stop = (i < (num_msgs - 1)) ? 0 : 1;
+ priv->result = 0;
- status = nmk_i2c_xfer_one(dev, msgs[i].flags);
+ status = nmk_i2c_xfer_one(priv, msgs[i].flags);
if (status != 0)
break;
}
@@ -688,7 +748,7 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
break;
}
- pm_runtime_put_sync(&dev->adev->dev);
+ pm_runtime_put_sync(&priv->adev->dev);
/* return the no. messages processed */
if (status)
@@ -699,14 +759,14 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
/**
* disable_interrupts() - disable the interrupts
- * @dev: private data of controller
+ * @priv: private data of controller
* @irq: interrupt number
*/
-static int disable_interrupts(struct nmk_i2c_dev *dev, u32 irq)
+static int disable_interrupts(struct nmk_i2c_dev *priv, u32 irq)
{
- irq = IRQ_MASK(irq);
- writel(readl(dev->virtbase + I2C_IMSCR) & ~(I2C_CLEAR_ALL_INTS & irq),
- dev->virtbase + I2C_IMSCR);
+ irq &= I2C_CLEAR_ALL_INTS;
+ writel(readl(priv->virtbase + I2C_IMSCR) & ~irq,
+ priv->virtbase + I2C_IMSCR);
return 0;
}
@@ -723,38 +783,39 @@ static int disable_interrupts(struct nmk_i2c_dev *dev, u32 irq)
*/
static irqreturn_t i2c_irq_handler(int irq, void *arg)
{
- struct nmk_i2c_dev *dev = arg;
+ struct nmk_i2c_dev *priv = arg;
+ struct device *dev = &priv->adev->dev;
u32 tft, rft;
u32 count;
u32 misr, src;
/* load Tx FIFO and Rx FIFO threshold values */
- tft = readl(dev->virtbase + I2C_TFTR);
- rft = readl(dev->virtbase + I2C_RFTR);
+ tft = readl(priv->virtbase + I2C_TFTR);
+ rft = readl(priv->virtbase + I2C_RFTR);
/* read interrupt status register */
- misr = readl(dev->virtbase + I2C_MISR);
+ misr = readl(priv->virtbase + I2C_MISR);
src = __ffs(misr);
- switch ((1 << src)) {
+ switch (BIT(src)) {
/* Transmit FIFO nearly empty interrupt */
case I2C_IT_TXFNE:
{
- if (dev->cli.operation == I2C_READ) {
+ if (priv->cli.operation == I2C_READ) {
/*
* in read operation why do we care for writing?
* so disable the Transmit FIFO interrupt
*/
- disable_interrupts(dev, I2C_IT_TXFNE);
+ disable_interrupts(priv, I2C_IT_TXFNE);
} else {
- fill_tx_fifo(dev, (MAX_I2C_FIFO_THRESHOLD - tft));
+ fill_tx_fifo(priv, (MAX_I2C_FIFO_THRESHOLD - tft));
/*
* if done, close the transfer by disabling the
* corresponding TXFNE interrupt
*/
- if (dev->cli.count == 0)
- disable_interrupts(dev, I2C_IT_TXFNE);
+ if (priv->cli.count == 0)
+ disable_interrupts(priv, I2C_IT_TXFNE);
}
}
break;
@@ -768,60 +829,63 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
case I2C_IT_RXFNF:
for (count = rft; count > 0; count--) {
/* Read the Rx FIFO */
- *dev->cli.buffer = readb(dev->virtbase + I2C_RFR);
- dev->cli.buffer++;
+ *priv->cli.buffer = nmk_i2c_readb(priv, I2C_RFR);
+ priv->cli.buffer++;
}
- dev->cli.count -= rft;
- dev->cli.xfer_bytes += rft;
+ priv->cli.count -= rft;
+ priv->cli.xfer_bytes += rft;
break;
/* Rx FIFO full */
case I2C_IT_RXFF:
for (count = MAX_I2C_FIFO_THRESHOLD; count > 0; count--) {
- *dev->cli.buffer = readb(dev->virtbase + I2C_RFR);
- dev->cli.buffer++;
+ *priv->cli.buffer = nmk_i2c_readb(priv, I2C_RFR);
+ priv->cli.buffer++;
}
- dev->cli.count -= MAX_I2C_FIFO_THRESHOLD;
- dev->cli.xfer_bytes += MAX_I2C_FIFO_THRESHOLD;
+ priv->cli.count -= MAX_I2C_FIFO_THRESHOLD;
+ priv->cli.xfer_bytes += MAX_I2C_FIFO_THRESHOLD;
break;
/* Master Transaction Done with/without stop */
case I2C_IT_MTD:
case I2C_IT_MTDWS:
- if (dev->cli.operation == I2C_READ) {
- while (!(readl(dev->virtbase + I2C_RISR)
+ if (priv->cli.operation == I2C_READ) {
+ while (!(readl(priv->virtbase + I2C_RISR)
& I2C_IT_RXFE)) {
- if (dev->cli.count == 0)
+ if (priv->cli.count == 0)
break;
- *dev->cli.buffer =
- readb(dev->virtbase + I2C_RFR);
- dev->cli.buffer++;
- dev->cli.count--;
- dev->cli.xfer_bytes++;
+ *priv->cli.buffer =
+ nmk_i2c_readb(priv, I2C_RFR);
+ priv->cli.buffer++;
+ priv->cli.count--;
+ priv->cli.xfer_bytes++;
}
}
- disable_all_interrupts(dev);
- clear_all_interrupts(dev);
+ disable_all_interrupts(priv);
+ clear_all_interrupts(priv);
- if (dev->cli.count) {
- dev->result = -EIO;
- dev_err(&dev->adev->dev,
- "%lu bytes still remain to be xfered\n",
- dev->cli.count);
- (void) init_hw(dev);
+ if (priv->cli.count) {
+ priv->result = -EIO;
+ dev_err(dev, "%lu bytes still remain to be xfered\n",
+ priv->cli.count);
+ init_hw(priv);
}
- complete(&dev->xfer_complete);
+ priv->xfer_done = true;
+ wake_up(&priv->xfer_wq);
+
break;
/* Master Arbitration lost interrupt */
case I2C_IT_MAL:
- dev->result = -EIO;
- (void) init_hw(dev);
+ priv->result = -EIO;
+ init_hw(priv);
+
+ i2c_set_bit(priv->virtbase + I2C_ICR, I2C_IT_MAL);
+ priv->xfer_done = true;
+ wake_up(&priv->xfer_wq);
- i2c_set_bit(dev->virtbase + I2C_ICR, I2C_IT_MAL);
- complete(&dev->xfer_complete);
break;
@@ -831,15 +895,20 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
* during the transaction.
*/
case I2C_IT_BERR:
- dev->result = -EIO;
- /* get the status */
- if (((readl(dev->virtbase + I2C_SR) >> 2) & 0x3) == I2C_ABORT)
- (void) init_hw(dev);
+ {
+ u32 sr;
- i2c_set_bit(dev->virtbase + I2C_ICR, I2C_IT_BERR);
- complete(&dev->xfer_complete);
+ sr = readl(priv->virtbase + I2C_SR);
+ priv->result = -EIO;
+ if (FIELD_GET(I2C_SR_STATUS, sr) == I2C_ABORT)
+ init_hw(priv);
- break;
+ i2c_set_bit(priv->virtbase + I2C_ICR, I2C_IT_BERR);
+ priv->xfer_done = true;
+ wake_up(&priv->xfer_wq);
+
+ }
+ break;
/*
* Tx FIFO overrun interrupt.
@@ -847,11 +916,13 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
* the Tx FIFO is full.
*/
case I2C_IT_TXFOVR:
- dev->result = -EIO;
- (void) init_hw(dev);
+ priv->result = -EIO;
+ init_hw(priv);
+
+ dev_err(dev, "Tx Fifo Over run\n");
+ priv->xfer_done = true;
+ wake_up(&priv->xfer_wq);
- dev_err(&dev->adev->dev, "Tx Fifo Over run\n");
- complete(&dev->xfer_complete);
break;
@@ -863,10 +934,10 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
case I2C_IT_RFSE:
case I2C_IT_WTSR:
case I2C_IT_STD:
- dev_err(&dev->adev->dev, "unhandled Interrupt\n");
+ dev_err(dev, "unhandled Interrupt\n");
break;
default:
- dev_err(&dev->adev->dev, "spurious Interrupt..\n");
+ dev_err(dev, "spurious Interrupt..\n");
break;
}
@@ -893,9 +964,9 @@ static int nmk_i2c_resume_early(struct device *dev)
static int nmk_i2c_runtime_suspend(struct device *dev)
{
struct amba_device *adev = to_amba_device(dev);
- struct nmk_i2c_dev *nmk_i2c = amba_get_drvdata(adev);
+ struct nmk_i2c_dev *priv = amba_get_drvdata(adev);
- clk_disable_unprepare(nmk_i2c->clk);
+ clk_disable_unprepare(priv->clk);
pinctrl_pm_select_idle_state(dev);
return 0;
}
@@ -903,10 +974,10 @@ static int nmk_i2c_runtime_suspend(struct device *dev)
static int nmk_i2c_runtime_resume(struct device *dev)
{
struct amba_device *adev = to_amba_device(dev);
- struct nmk_i2c_dev *nmk_i2c = amba_get_drvdata(adev);
+ struct nmk_i2c_dev *priv = amba_get_drvdata(adev);
int ret;
- ret = clk_prepare_enable(nmk_i2c->clk);
+ ret = clk_prepare_enable(priv->clk);
if (ret) {
dev_err(dev, "can't prepare_enable clock\n");
return ret;
@@ -914,9 +985,9 @@ static int nmk_i2c_runtime_resume(struct device *dev)
pinctrl_pm_select_default_state(dev);
- ret = init_hw(nmk_i2c);
+ ret = init_hw(priv);
if (ret) {
- clk_disable_unprepare(nmk_i2c->clk);
+ clk_disable_unprepare(priv->clk);
pinctrl_pm_select_idle_state(dev);
}
@@ -939,107 +1010,160 @@ static const struct i2c_algorithm nmk_i2c_algo = {
};
static void nmk_i2c_of_probe(struct device_node *np,
- struct nmk_i2c_dev *nmk)
+ struct nmk_i2c_dev *priv)
{
+ u32 timeout_usecs;
+
/* Default to 100 kHz if no frequency is given in the node */
- if (of_property_read_u32(np, "clock-frequency", &nmk->clk_freq))
- nmk->clk_freq = I2C_MAX_STANDARD_MODE_FREQ;
+ if (of_property_read_u32(np, "clock-frequency", &priv->clk_freq))
+ priv->clk_freq = I2C_MAX_STANDARD_MODE_FREQ;
/* This driver only supports 'standard' and 'fast' modes of operation. */
- if (nmk->clk_freq <= I2C_MAX_STANDARD_MODE_FREQ)
- nmk->sm = I2C_FREQ_MODE_STANDARD;
+ if (priv->clk_freq <= I2C_MAX_STANDARD_MODE_FREQ)
+ priv->sm = I2C_FREQ_MODE_STANDARD;
+ else
+ priv->sm = I2C_FREQ_MODE_FAST;
+ priv->tft = 1; /* Tx FIFO threshold */
+ priv->rft = 8; /* Rx FIFO threshold */
+
+ /* Slave response timeout */
+ if (!of_property_read_u32(np, "i2c-transfer-timeout-us", &timeout_usecs))
+ priv->timeout_usecs = timeout_usecs;
+ else
+ priv->timeout_usecs = 200 * USEC_PER_MSEC;
+}
+
+static const unsigned int nmk_i2c_eyeq5_masks[] = {
+ GENMASK(5, 4),
+ GENMASK(7, 6),
+ GENMASK(9, 8),
+ GENMASK(11, 10),
+ GENMASK(13, 12),
+};
+
+static int nmk_i2c_eyeq5_probe(struct nmk_i2c_dev *priv)
+{
+ struct device *dev = &priv->adev->dev;
+ struct device_node *np = dev->of_node;
+ unsigned int mask, speed_mode;
+ struct regmap *olb;
+ unsigned int id;
+
+ priv->has_32b_bus = true;
+
+ olb = syscon_regmap_lookup_by_phandle_args(np, "mobileye,olb", 1, &id);
+ if (IS_ERR(olb))
+ return PTR_ERR(olb);
+ if (id >= ARRAY_SIZE(nmk_i2c_eyeq5_masks))
+ return -ENOENT;
+
+ if (priv->clk_freq <= 400000)
+ speed_mode = I2C_EYEQ5_SPEED_FAST;
+ else if (priv->clk_freq <= 1000000)
+ speed_mode = I2C_EYEQ5_SPEED_FAST_PLUS;
else
- nmk->sm = I2C_FREQ_MODE_FAST;
- nmk->tft = 1; /* Tx FIFO threshold */
- nmk->rft = 8; /* Rx FIFO threshold */
- nmk->timeout = 200; /* Slave response timeout(ms) */
+ speed_mode = I2C_EYEQ5_SPEED_HIGH_SPEED;
+
+ mask = nmk_i2c_eyeq5_masks[id];
+ regmap_update_bits(olb, NMK_I2C_EYEQ5_OLB_IOCR2,
+ mask, speed_mode << __fls(mask));
+
+ return 0;
}
static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret = 0;
+ struct nmk_i2c_dev *priv;
struct device_node *np = adev->dev.of_node;
- struct nmk_i2c_dev *dev;
+ struct device *dev = &adev->dev;
struct i2c_adapter *adap;
struct i2c_vendor_data *vendor = id->data;
u32 max_fifo_threshold = (vendor->fifodepth / 2) - 1;
- dev = devm_kzalloc(&adev->dev, sizeof(*dev), GFP_KERNEL);
- if (!dev)
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- dev->vendor = vendor;
- dev->adev = adev;
- nmk_i2c_of_probe(np, dev);
+ priv->vendor = vendor;
+ priv->adev = adev;
+ priv->has_32b_bus = false;
+ nmk_i2c_of_probe(np, priv);
+
+ if (of_device_is_compatible(np, "mobileye,eyeq5-i2c")) {
+ ret = nmk_i2c_eyeq5_probe(priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed OLB lookup\n");
+ }
- if (dev->tft > max_fifo_threshold) {
- dev_warn(&adev->dev, "requested TX FIFO threshold %u, adjusted down to %u\n",
- dev->tft, max_fifo_threshold);
- dev->tft = max_fifo_threshold;
+ if (priv->tft > max_fifo_threshold) {
+ dev_warn(dev, "requested TX FIFO threshold %u, adjusted down to %u\n",
+ priv->tft, max_fifo_threshold);
+ priv->tft = max_fifo_threshold;
}
- if (dev->rft > max_fifo_threshold) {
- dev_warn(&adev->dev, "requested RX FIFO threshold %u, adjusted down to %u\n",
- dev->rft, max_fifo_threshold);
- dev->rft = max_fifo_threshold;
+ if (priv->rft > max_fifo_threshold) {
+ dev_warn(dev, "requested RX FIFO threshold %u, adjusted down to %u\n",
+ priv->rft, max_fifo_threshold);
+ priv->rft = max_fifo_threshold;
}
- amba_set_drvdata(adev, dev);
+ amba_set_drvdata(adev, priv);
- dev->virtbase = devm_ioremap(&adev->dev, adev->res.start,
- resource_size(&adev->res));
- if (!dev->virtbase)
+ priv->virtbase = devm_ioremap(dev, adev->res.start,
+ resource_size(&adev->res));
+ if (!priv->virtbase)
return -ENOMEM;
- dev->irq = adev->irq[0];
- ret = devm_request_irq(&adev->dev, dev->irq, i2c_irq_handler, 0,
- DRIVER_NAME, dev);
+ priv->irq = adev->irq[0];
+ ret = devm_request_irq(dev, priv->irq, i2c_irq_handler, 0,
+ DRIVER_NAME, priv);
if (ret)
- return dev_err_probe(&adev->dev, ret,
- "cannot claim the irq %d\n", dev->irq);
+ return dev_err_probe(dev, ret,
+ "cannot claim the irq %d\n", priv->irq);
- dev->clk = devm_clk_get_enabled(&adev->dev, NULL);
- if (IS_ERR(dev->clk))
- return dev_err_probe(&adev->dev, PTR_ERR(dev->clk),
+ priv->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk),
"could enable i2c clock\n");
- init_hw(dev);
+ init_hw(priv);
- adap = &dev->adap;
+ adap = &priv->adap;
adap->dev.of_node = np;
- adap->dev.parent = &adev->dev;
+ adap->dev.parent = dev;
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_DEPRECATED;
adap->algo = &nmk_i2c_algo;
- adap->timeout = msecs_to_jiffies(dev->timeout);
+ adap->timeout = usecs_to_jiffies(priv->timeout_usecs);
snprintf(adap->name, sizeof(adap->name),
"Nomadik I2C at %pR", &adev->res);
- i2c_set_adapdata(adap, dev);
+ i2c_set_adapdata(adap, priv);
- dev_info(&adev->dev,
+ dev_info(dev,
"initialize %s on virtual base %p\n",
- adap->name, dev->virtbase);
+ adap->name, priv->virtbase);
ret = i2c_add_adapter(adap);
if (ret)
return ret;
- pm_runtime_put(&adev->dev);
+ pm_runtime_put(dev);
return 0;
}
static void nmk_i2c_remove(struct amba_device *adev)
{
- struct nmk_i2c_dev *dev = amba_get_drvdata(adev);
+ struct nmk_i2c_dev *priv = amba_get_drvdata(adev);
- i2c_del_adapter(&dev->adap);
- flush_i2c_fifo(dev);
- disable_all_interrupts(dev);
- clear_all_interrupts(dev);
+ i2c_del_adapter(&priv->adap);
+ flush_i2c_fifo(priv);
+ disable_all_interrupts(priv);
+ clear_all_interrupts(priv);
/* disable the controller */
- i2c_clr_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
+ i2c_clr_bit(priv->virtbase + I2C_CR, I2C_CR_PE);
}
static struct i2c_vendor_data vendor_stn8815 = {
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index f5dfc33b97c0ab..c3f4ff08ac3851 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -49,6 +49,7 @@
#include <linux/pm.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <dt-bindings/mux/mux.h>
@@ -116,6 +117,9 @@ struct pca954x {
unsigned int irq_mask;
raw_spinlock_t lock;
struct regulator *supply;
+
+ struct gpio_desc *reset_gpio;
+ struct reset_control *reset_cont;
};
/* Provide specs for the MAX735x, PCA954x and PCA984x types we know about */
@@ -518,6 +522,35 @@ static int pca954x_init(struct i2c_client *client, struct pca954x *data)
return ret;
}
+static int pca954x_get_reset(struct device *dev, struct pca954x *data)
+{
+ data->reset_cont = devm_reset_control_get_optional_shared(dev, NULL);
+ if (IS_ERR(data->reset_cont))
+ return dev_err_probe(dev, PTR_ERR(data->reset_cont),
+ "Failed to get reset\n");
+ else if (data->reset_cont)
+ return 0;
+
+ /*
+ * fallback to legacy reset-gpios
+ */
+ data->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(data->reset_gpio)) {
+ return dev_err_probe(dev, PTR_ERR(data->reset_gpio),
+ "Failed to get reset gpio");
+ }
+
+ return 0;
+}
+
+static void pca954x_reset_deassert(struct pca954x *data)
+{
+ if (data->reset_cont)
+ reset_control_deassert(data->reset_cont);
+ else
+ gpiod_set_value_cansleep(data->reset_gpio, 0);
+}
+
/*
* I2C init/probing/exit functions
*/
@@ -526,7 +559,6 @@ static int pca954x_probe(struct i2c_client *client)
const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct i2c_adapter *adap = client->adapter;
struct device *dev = &client->dev;
- struct gpio_desc *gpio;
struct i2c_mux_core *muxc;
struct pca954x *data;
int num;
@@ -554,15 +586,13 @@ static int pca954x_probe(struct i2c_client *client)
return dev_err_probe(dev, ret,
"Failed to enable vdd supply\n");
- /* Reset the mux if a reset GPIO is specified. */
- gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(gpio)) {
- ret = PTR_ERR(gpio);
+ ret = pca954x_get_reset(dev, data);
+ if (ret)
goto fail_cleanup;
- }
- if (gpio) {
+
+ if (data->reset_cont || data->reset_gpio) {
udelay(1);
- gpiod_set_value_cansleep(gpio, 0);
+ pca954x_reset_deassert(data);
/* Give the chip some time to recover. */
udelay(1);
}
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index b58f5a3311c3a8..e4cb26f6a9434e 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -1711,6 +1711,14 @@ static size_t iommu_dma_opt_mapping_size(void)
return iova_rcache_range();
}
+static size_t iommu_dma_max_mapping_size(struct device *dev)
+{
+ if (dev_is_untrusted(dev))
+ return swiotlb_max_mapping_size(dev);
+
+ return SIZE_MAX;
+}
+
static const struct dma_map_ops iommu_dma_ops = {
.flags = DMA_F_PCI_P2PDMA_SUPPORTED,
.alloc = iommu_dma_alloc,
@@ -1733,6 +1741,7 @@ static const struct dma_map_ops iommu_dma_ops = {
.unmap_resource = iommu_dma_unmap_resource,
.get_merge_boundary = iommu_dma_get_merge_boundary,
.opt_mapping_size = iommu_dma_opt_mapping_size,
+ .max_mapping_size = iommu_dma_max_mapping_size,
};
/*
diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
index 9494fc26259c35..ae67fec2ab468f 100644
--- a/drivers/irqchip/irq-renesas-rzg2l.c
+++ b/drivers/irqchip/irq-renesas-rzg2l.c
@@ -85,10 +85,9 @@ static struct rzg2l_irqc_priv *irq_data_to_priv(struct irq_data *data)
return data->domain->host_data;
}
-static void rzg2l_irq_eoi(struct irq_data *d)
+static void rzg2l_clear_irq_int(struct rzg2l_irqc_priv *priv, unsigned int hwirq)
{
- unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START;
- struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ unsigned int hw_irq = hwirq - IRQC_IRQ_START;
u32 bit = BIT(hw_irq);
u32 iitsr, iscr;
@@ -99,20 +98,30 @@ static void rzg2l_irq_eoi(struct irq_data *d)
* ISCR can only be cleared if the type is falling-edge, rising-edge or
* falling/rising-edge.
*/
- if ((iscr & bit) && (iitsr & IITSR_IITSEL_MASK(hw_irq)))
+ if ((iscr & bit) && (iitsr & IITSR_IITSEL_MASK(hw_irq))) {
writel_relaxed(iscr & ~bit, priv->base + ISCR);
+ /*
+ * Enforce that the posted write is flushed to prevent that the
+ * just handled interrupt is raised again.
+ */
+ readl_relaxed(priv->base + ISCR);
+ }
}
-static void rzg2l_tint_eoi(struct irq_data *d)
+static void rzg2l_clear_tint_int(struct rzg2l_irqc_priv *priv, unsigned int hwirq)
{
- unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_TINT_START;
- struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
- u32 bit = BIT(hw_irq);
+ u32 bit = BIT(hwirq - IRQC_TINT_START);
u32 reg;
reg = readl_relaxed(priv->base + TSCR);
- if (reg & bit)
+ if (reg & bit) {
writel_relaxed(reg & ~bit, priv->base + TSCR);
+ /*
+ * Enforce that the posted write is flushed to prevent that the
+ * just handled interrupt is raised again.
+ */
+ readl_relaxed(priv->base + TSCR);
+ }
}
static void rzg2l_irqc_eoi(struct irq_data *d)
@@ -122,9 +131,9 @@ static void rzg2l_irqc_eoi(struct irq_data *d)
raw_spin_lock(&priv->lock);
if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT)
- rzg2l_irq_eoi(d);
+ rzg2l_clear_irq_int(priv, hw_irq);
else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ)
- rzg2l_tint_eoi(d);
+ rzg2l_clear_tint_int(priv, hw_irq);
raw_spin_unlock(&priv->lock);
irq_chip_eoi_parent(d);
}
@@ -142,7 +151,7 @@ static void rzg2l_irqc_irq_disable(struct irq_data *d)
raw_spin_lock(&priv->lock);
reg = readl_relaxed(priv->base + TSSR(tssr_index));
- reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset));
+ reg &= ~(TIEN << TSSEL_SHIFT(tssr_offset));
writel_relaxed(reg, priv->base + TSSR(tssr_index));
raw_spin_unlock(&priv->lock);
}
@@ -154,7 +163,6 @@ static void rzg2l_irqc_irq_enable(struct irq_data *d)
unsigned int hw_irq = irqd_to_hwirq(d);
if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) {
- unsigned long tint = (uintptr_t)irq_data_get_irq_chip_data(d);
struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
u32 offset = hw_irq - IRQC_TINT_START;
u32 tssr_offset = TSSR_OFFSET(offset);
@@ -163,7 +171,7 @@ static void rzg2l_irqc_irq_enable(struct irq_data *d)
raw_spin_lock(&priv->lock);
reg = readl_relaxed(priv->base + TSSR(tssr_index));
- reg |= (TIEN | tint) << TSSEL_SHIFT(tssr_offset);
+ reg |= TIEN << TSSEL_SHIFT(tssr_offset);
writel_relaxed(reg, priv->base + TSSR(tssr_index));
raw_spin_unlock(&priv->lock);
}
@@ -172,8 +180,10 @@ static void rzg2l_irqc_irq_enable(struct irq_data *d)
static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
{
- unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START;
struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
+ u32 iitseln = hwirq - IRQC_IRQ_START;
+ bool clear_irq_int = false;
u16 sense, tmp;
switch (type & IRQ_TYPE_SENSE_MASK) {
@@ -183,14 +193,17 @@ static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
case IRQ_TYPE_EDGE_FALLING:
sense = IITSR_IITSEL_EDGE_FALLING;
+ clear_irq_int = true;
break;
case IRQ_TYPE_EDGE_RISING:
sense = IITSR_IITSEL_EDGE_RISING;
+ clear_irq_int = true;
break;
case IRQ_TYPE_EDGE_BOTH:
sense = IITSR_IITSEL_EDGE_BOTH;
+ clear_irq_int = true;
break;
default:
@@ -199,21 +212,40 @@ static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
raw_spin_lock(&priv->lock);
tmp = readl_relaxed(priv->base + IITSR);
- tmp &= ~IITSR_IITSEL_MASK(hw_irq);
- tmp |= IITSR_IITSEL(hw_irq, sense);
+ tmp &= ~IITSR_IITSEL_MASK(iitseln);
+ tmp |= IITSR_IITSEL(iitseln, sense);
+ if (clear_irq_int)
+ rzg2l_clear_irq_int(priv, hwirq);
writel_relaxed(tmp, priv->base + IITSR);
raw_spin_unlock(&priv->lock);
return 0;
}
+static u32 rzg2l_disable_tint_and_set_tint_source(struct irq_data *d, struct rzg2l_irqc_priv *priv,
+ u32 reg, u32 tssr_offset, u8 tssr_index)
+{
+ u32 tint = (u32)(uintptr_t)irq_data_get_irq_chip_data(d);
+ u32 tien = reg & (TIEN << TSSEL_SHIFT(tssr_offset));
+
+ /* Clear the relevant byte in reg */
+ reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset));
+ /* Set TINT and leave TIEN clear */
+ reg |= tint << TSSEL_SHIFT(tssr_offset);
+ writel_relaxed(reg, priv->base + TSSR(tssr_index));
+
+ return reg | tien;
+}
+
static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type)
{
struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
unsigned int hwirq = irqd_to_hwirq(d);
u32 titseln = hwirq - IRQC_TINT_START;
+ u32 tssr_offset = TSSR_OFFSET(titseln);
+ u8 tssr_index = TSSR_INDEX(titseln);
u8 index, sense;
- u32 reg;
+ u32 reg, tssr;
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_RISING:
@@ -235,10 +267,14 @@ static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type)
}
raw_spin_lock(&priv->lock);
+ tssr = readl_relaxed(priv->base + TSSR(tssr_index));
+ tssr = rzg2l_disable_tint_and_set_tint_source(d, priv, tssr, tssr_offset, tssr_index);
reg = readl_relaxed(priv->base + TITSR(index));
reg &= ~(IRQ_MASK << (titseln * TITSEL_WIDTH));
reg |= sense << (titseln * TITSEL_WIDTH);
writel_relaxed(reg, priv->base + TITSR(index));
+ rzg2l_clear_tint_int(priv, hwirq);
+ writel_relaxed(tssr, priv->base + TSSR(tssr_index));
raw_spin_unlock(&priv->lock);
return 0;
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index d822ab2f739b07..37b9f8f1ae1a27 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1699,7 +1699,6 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
struct bio_vec bv;
sector_t sector, logical_sector, area, offset;
struct page *page;
- void *buffer;
get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset,
@@ -1708,13 +1707,14 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
logical_sector = dio->range.logical_sector;
page = mempool_alloc(&ic->recheck_pool, GFP_NOIO);
- buffer = page_to_virt(page);
__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
unsigned pos = 0;
do {
+ sector_t alignment;
char *mem;
+ char *buffer = page_to_virt(page);
int r;
struct dm_io_request io_req;
struct dm_io_region io_loc;
@@ -1727,6 +1727,14 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
io_loc.sector = sector;
io_loc.count = ic->sectors_per_block;
+ /* Align the bio to logical block size */
+ alignment = dio->range.logical_sector | bio_sectors(bio) | (PAGE_SIZE >> SECTOR_SHIFT);
+ alignment &= -alignment;
+ io_loc.sector = round_down(io_loc.sector, alignment);
+ io_loc.count += sector - io_loc.sector;
+ buffer += (sector - io_loc.sector) << SECTOR_SHIFT;
+ io_loc.count = round_up(io_loc.count, alignment);
+
r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
if (unlikely(r)) {
dio->bi_status = errno_to_blk_status(r);
@@ -1848,12 +1856,12 @@ again:
r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
if (unlikely(r)) {
+ if (likely(checksums != checksums_onstack))
+ kfree(checksums);
if (r > 0) {
- integrity_recheck(dio, checksums);
+ integrity_recheck(dio, checksums_onstack);
goto skip_io;
}
- if (likely(checksums != checksums_onstack))
- kfree(checksums);
goto error;
}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index bf7a574499a34d..0ace06d1bee384 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -684,8 +684,10 @@ static void dm_exception_table_exit(struct dm_exception_table *et,
for (i = 0; i < size; i++) {
slot = et->table + i;
- hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list)
+ hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list) {
kmem_cache_free(mem, ex);
+ cond_resched();
+ }
}
kvfree(et->table);
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index b92767d6bdd244..5178c02b21eba9 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -417,7 +417,7 @@ static void lkdtm_FAM_BOUNDS(void)
pr_err("FAIL: survived access of invalid flexible array member index!\n");
if (!__has_attribute(__counted_by__))
- pr_warn("This is expected since this %s was built a compiler supporting __counted_by\n",
+ pr_warn("This is expected since this %s was built with a compiler that does not support __counted_by\n",
lkdtm_kernel_info);
else if (IS_ENABLED(CONFIG_UBSAN_BOUNDS))
pr_expected_config(CONFIG_UBSAN_TRAP);
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index 7499a540121e8b..e28a3af83c0eb8 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -113,4 +113,17 @@ config MTD_UBI_FAULT_INJECTION
testing purposes.
If in doubt, say "N".
+
+config MTD_UBI_NVMEM
+ tristate "UBI virtual NVMEM"
+ default n
+ depends on NVMEM
+ help
+ This option enabled an additional driver exposing UBI volumes as NVMEM
+ providers, intended for platforms where UBI is part of the firmware
+ specification and used to store also e.g. MAC addresses or board-
+ specific Wi-Fi calibration data.
+
+ If in doubt, say "N".
+
endif # MTD_UBI
diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile
index 543673605ca729..4b51aaf00d1a2a 100644
--- a/drivers/mtd/ubi/Makefile
+++ b/drivers/mtd/ubi/Makefile
@@ -7,3 +7,4 @@ ubi-$(CONFIG_MTD_UBI_FASTMAP) += fastmap.o
ubi-$(CONFIG_MTD_UBI_BLOCK) += block.o
obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
+obj-$(CONFIG_MTD_UBI_NVMEM) += nvmem.o
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 5c8fdcc088a0df..f82e3423acb9fb 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -65,10 +65,10 @@ struct ubiblock_pdu {
};
/* Numbers of elements set in the @ubiblock_param array */
-static int ubiblock_devs __initdata;
+static int ubiblock_devs;
/* MTD devices specification parameters */
-static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
+static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES];
struct ubiblock {
struct ubi_volume_desc *desc;
@@ -536,6 +536,70 @@ static int ubiblock_resize(struct ubi_volume_info *vi)
return 0;
}
+static bool
+match_volume_desc(struct ubi_volume_info *vi, const char *name, int ubi_num, int vol_id)
+{
+ int err, len, cur_ubi_num, cur_vol_id;
+
+ if (ubi_num == -1) {
+ /* No ubi num, name must be a vol device path */
+ err = ubi_get_num_by_path(name, &cur_ubi_num, &cur_vol_id);
+ if (err || vi->ubi_num != cur_ubi_num || vi->vol_id != cur_vol_id)
+ return false;
+
+ return true;
+ }
+
+ if (vol_id == -1) {
+ /* Got ubi_num, but no vol_id, name must be volume name */
+ if (vi->ubi_num != ubi_num)
+ return false;
+
+ len = strnlen(name, UBI_VOL_NAME_MAX + 1);
+ if (len < 1 || vi->name_len != len)
+ return false;
+
+ if (strcmp(name, vi->name))
+ return false;
+
+ return true;
+ }
+
+ if (vi->ubi_num != ubi_num)
+ return false;
+
+ if (vi->vol_id != vol_id)
+ return false;
+
+ return true;
+}
+
+static void
+ubiblock_create_from_param(struct ubi_volume_info *vi)
+{
+ int i, ret = 0;
+ struct ubiblock_param *p;
+
+ /*
+ * Iterate over ubiblock cmdline parameters. If a parameter matches the
+ * newly added volume create the ubiblock device for it.
+ */
+ for (i = 0; i < ubiblock_devs; i++) {
+ p = &ubiblock_param[i];
+
+ if (!match_volume_desc(vi, p->name, p->ubi_num, p->vol_id))
+ continue;
+
+ ret = ubiblock_create(vi);
+ if (ret) {
+ pr_err(
+ "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
+ vi->name, p->ubi_num, p->vol_id, ret);
+ }
+ break;
+ }
+}
+
static int ubiblock_notify(struct notifier_block *nb,
unsigned long notification_type, void *ns_ptr)
{
@@ -543,10 +607,7 @@ static int ubiblock_notify(struct notifier_block *nb,
switch (notification_type) {
case UBI_VOLUME_ADDED:
- /*
- * We want to enforce explicit block device creation for
- * volumes, so when a volume is added we do nothing.
- */
+ ubiblock_create_from_param(&nt->vi);
break;
case UBI_VOLUME_REMOVED:
ubiblock_remove(&nt->vi);
@@ -572,56 +633,6 @@ static struct notifier_block ubiblock_notifier = {
.notifier_call = ubiblock_notify,
};
-static struct ubi_volume_desc * __init
-open_volume_desc(const char *name, int ubi_num, int vol_id)
-{
- if (ubi_num == -1)
- /* No ubi num, name must be a vol device path */
- return ubi_open_volume_path(name, UBI_READONLY);
- else if (vol_id == -1)
- /* No vol_id, must be vol_name */
- return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
- else
- return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
-}
-
-static void __init ubiblock_create_from_param(void)
-{
- int i, ret = 0;
- struct ubiblock_param *p;
- struct ubi_volume_desc *desc;
- struct ubi_volume_info vi;
-
- /*
- * If there is an error creating one of the ubiblocks, continue on to
- * create the following ubiblocks. This helps in a circumstance where
- * the kernel command-line specifies multiple block devices and some
- * may be broken, but we still want the working ones to come up.
- */
- for (i = 0; i < ubiblock_devs; i++) {
- p = &ubiblock_param[i];
-
- desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
- if (IS_ERR(desc)) {
- pr_err(
- "UBI: block: can't open volume on ubi%d_%d, err=%ld\n",
- p->ubi_num, p->vol_id, PTR_ERR(desc));
- continue;
- }
-
- ubi_get_volume_info(desc, &vi);
- ubi_close_volume(desc);
-
- ret = ubiblock_create(&vi);
- if (ret) {
- pr_err(
- "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
- vi.name, p->ubi_num, p->vol_id, ret);
- continue;
- }
- }
-}
-
static void ubiblock_remove_all(void)
{
struct ubiblock *next;
@@ -647,18 +658,7 @@ int __init ubiblock_init(void)
if (ubiblock_major < 0)
return ubiblock_major;
- /*
- * Attach block devices from 'block=' module param.
- * Even if one block device in the param list fails to come up,
- * still allow the module to load and leave any others up.
- */
- ubiblock_create_from_param();
-
- /*
- * Block devices are only created upon user requests, so we ignore
- * existing volumes.
- */
- ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
+ ret = ubi_register_volume_notifier(&ubiblock_notifier, 0);
if (ret)
goto err_unreg;
return 0;
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 7d4ff1193db6f0..a7e3a6246c0e93 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -27,6 +27,7 @@
#include <linux/log2.h>
#include <linux/kthread.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/slab.h>
#include <linux/major.h>
#include "ubi.h"
@@ -92,7 +93,7 @@ static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
/* Serializes UBI devices creations and removals */
DEFINE_MUTEX(ubi_devices_mutex);
-/* Protects @ubi_devices and @ubi->ref_count */
+/* Protects @ubi_devices, @ubi->ref_count and @ubi->is_dead */
static DEFINE_SPINLOCK(ubi_devices_lock);
/* "Show" method for files in '/<sysfs>/class/ubi/' */
@@ -260,6 +261,9 @@ struct ubi_device *ubi_get_device(int ubi_num)
spin_lock(&ubi_devices_lock);
ubi = ubi_devices[ubi_num];
+ if (ubi && ubi->is_dead)
+ ubi = NULL;
+
if (ubi) {
ubi_assert(ubi->ref_count >= 0);
ubi->ref_count += 1;
@@ -297,7 +301,7 @@ struct ubi_device *ubi_get_by_major(int major)
spin_lock(&ubi_devices_lock);
for (i = 0; i < UBI_MAX_DEVICES; i++) {
ubi = ubi_devices[i];
- if (ubi && MAJOR(ubi->cdev.dev) == major) {
+ if (ubi && !ubi->is_dead && MAJOR(ubi->cdev.dev) == major) {
ubi_assert(ubi->ref_count >= 0);
ubi->ref_count += 1;
get_device(&ubi->dev);
@@ -326,7 +330,7 @@ int ubi_major2num(int major)
for (i = 0; i < UBI_MAX_DEVICES; i++) {
struct ubi_device *ubi = ubi_devices[i];
- if (ubi && MAJOR(ubi->cdev.dev) == major) {
+ if (ubi && !ubi->is_dead && MAJOR(ubi->cdev.dev) == major) {
ubi_num = ubi->ubi_num;
break;
}
@@ -513,7 +517,7 @@ static void ubi_free_volumes_from(struct ubi_device *ubi, int from)
int i;
for (i = from; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
- if (!ubi->volumes[i])
+ if (!ubi->volumes[i] || ubi->volumes[i]->is_dead)
continue;
ubi_eba_replace_table(ubi->volumes[i], NULL);
ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
@@ -1098,7 +1102,6 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
return -EINVAL;
spin_lock(&ubi_devices_lock);
- put_device(&ubi->dev);
ubi->ref_count -= 1;
if (ubi->ref_count) {
if (!anyway) {
@@ -1109,6 +1112,13 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
ubi_err(ubi, "%s reference count %d, destroy anyway",
ubi->ubi_name, ubi->ref_count);
}
+ ubi->is_dead = true;
+ spin_unlock(&ubi_devices_lock);
+
+ ubi_notify_all(ubi, UBI_VOLUME_SHUTDOWN, NULL);
+
+ spin_lock(&ubi_devices_lock);
+ put_device(&ubi->dev);
ubi_devices[ubi_num] = NULL;
spin_unlock(&ubi_devices_lock);
@@ -1219,43 +1229,43 @@ static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
return mtd;
}
-static int __init ubi_init(void)
+static void ubi_notify_add(struct mtd_info *mtd)
{
- int err, i, k;
+ struct device_node *np = mtd_get_of_node(mtd);
+ int err;
- /* Ensure that EC and VID headers have correct size */
- BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
- BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
+ if (!of_device_is_compatible(np, "linux,ubi"))
+ return;
- if (mtd_devs > UBI_MAX_DEVICES) {
- pr_err("UBI error: too many MTD devices, maximum is %d\n",
- UBI_MAX_DEVICES);
- return -EINVAL;
- }
+ /*
+ * we are already holding &mtd_table_mutex, but still need
+ * to bump refcount
+ */
+ err = __get_mtd_device(mtd);
+ if (err)
+ return;
- /* Create base sysfs directory and sysfs files */
- err = class_register(&ubi_class);
+ /* called while holding mtd_table_mutex */
+ mutex_lock_nested(&ubi_devices_mutex, SINGLE_DEPTH_NESTING);
+ err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, 0, 0, false, false);
+ mutex_unlock(&ubi_devices_mutex);
if (err < 0)
- return err;
-
- err = misc_register(&ubi_ctrl_cdev);
- if (err) {
- pr_err("UBI error: cannot register device\n");
- goto out;
- }
+ __put_mtd_device(mtd);
+}
- ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
- sizeof(struct ubi_wl_entry),
- 0, 0, NULL);
- if (!ubi_wl_entry_slab) {
- err = -ENOMEM;
- goto out_dev_unreg;
- }
+static void ubi_notify_remove(struct mtd_info *mtd)
+{
+ /* do nothing for now */
+}
- err = ubi_debugfs_init();
- if (err)
- goto out_slab;
+static struct mtd_notifier ubi_mtd_notifier = {
+ .add = ubi_notify_add,
+ .remove = ubi_notify_remove,
+};
+static int __init ubi_init_attach(void)
+{
+ int err, i, k;
/* Attach MTD devices */
for (i = 0; i < mtd_devs; i++) {
@@ -1304,25 +1314,79 @@ static int __init ubi_init(void)
}
}
+ return 0;
+
+out_detach:
+ for (k = 0; k < i; k++)
+ if (ubi_devices[k]) {
+ mutex_lock(&ubi_devices_mutex);
+ ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
+ mutex_unlock(&ubi_devices_mutex);
+ }
+ return err;
+}
+#ifndef CONFIG_MTD_UBI_MODULE
+late_initcall(ubi_init_attach);
+#endif
+
+static int __init ubi_init(void)
+{
+ int err;
+
+ /* Ensure that EC and VID headers have correct size */
+ BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
+ BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
+
+ if (mtd_devs > UBI_MAX_DEVICES) {
+ pr_err("UBI error: too many MTD devices, maximum is %d\n",
+ UBI_MAX_DEVICES);
+ return -EINVAL;
+ }
+
+ /* Create base sysfs directory and sysfs files */
+ err = class_register(&ubi_class);
+ if (err < 0)
+ return err;
+
+ err = misc_register(&ubi_ctrl_cdev);
+ if (err) {
+ pr_err("UBI error: cannot register device\n");
+ goto out;
+ }
+
+ ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
+ sizeof(struct ubi_wl_entry),
+ 0, 0, NULL);
+ if (!ubi_wl_entry_slab) {
+ err = -ENOMEM;
+ goto out_dev_unreg;
+ }
+
+ err = ubi_debugfs_init();
+ if (err)
+ goto out_slab;
+
err = ubiblock_init();
if (err) {
pr_err("UBI error: block: cannot initialize, error %d\n", err);
/* See comment above re-ubi_is_module(). */
if (ubi_is_module())
- goto out_detach;
+ goto out_slab;
+ }
+
+ register_mtd_user(&ubi_mtd_notifier);
+
+ if (ubi_is_module()) {
+ err = ubi_init_attach();
+ if (err)
+ goto out_mtd_notifier;
}
return 0;
-out_detach:
- for (k = 0; k < i; k++)
- if (ubi_devices[k]) {
- mutex_lock(&ubi_devices_mutex);
- ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
- mutex_unlock(&ubi_devices_mutex);
- }
- ubi_debugfs_exit();
+out_mtd_notifier:
+ unregister_mtd_user(&ubi_mtd_notifier);
out_slab:
kmem_cache_destroy(ubi_wl_entry_slab);
out_dev_unreg:
@@ -1332,13 +1396,15 @@ out:
pr_err("UBI error: cannot initialize UBI, error %d\n", err);
return err;
}
-late_initcall(ubi_init);
+device_initcall(ubi_init);
+
static void __exit ubi_exit(void)
{
int i;
ubiblock_exit();
+ unregister_mtd_user(&ubi_mtd_notifier);
for (i = 0; i < UBI_MAX_DEVICES; i++)
if (ubi_devices[i]) {
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 8d1f0e05892c1f..e5ac3cd0bbae62 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -1456,7 +1456,14 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
}
ubi_assert(vol->eba_tbl->entries[lnum].pnum == from);
+
+ /**
+ * The volumes_lock lock is needed here to prevent the expired old eba_tbl
+ * being updated when the eba_tbl is copied in the ubi_resize_volume() process.
+ */
+ spin_lock(&ubi->volumes_lock);
vol->eba_tbl->entries[lnum].pnum = to;
+ spin_unlock(&ubi->volumes_lock);
out_unlock_buf:
mutex_unlock(&ubi->buf_mutex);
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 2a728c31e6b854..9a4940874be5ba 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -85,9 +85,10 @@ size_t ubi_calc_fm_size(struct ubi_device *ubi)
sizeof(struct ubi_fm_scan_pool) +
sizeof(struct ubi_fm_scan_pool) +
(ubi->peb_count * sizeof(struct ubi_fm_ec)) +
- (sizeof(struct ubi_fm_eba) +
- (ubi->peb_count * sizeof(__be32))) +
- sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
+ ((sizeof(struct ubi_fm_eba) +
+ sizeof(struct ubi_fm_volhdr)) *
+ (UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT)) +
+ (ubi->peb_count * sizeof(__be32));
return roundup(size, ubi->leb_size);
}
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 5db653eacbd451..f1ea8677467fbf 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -152,7 +152,7 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
spin_lock(&ubi->volumes_lock);
vol = ubi->volumes[vol_id];
- if (!vol)
+ if (!vol || vol->is_dead)
goto out_unlock;
err = -EBUSY;
@@ -280,6 +280,41 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
/**
+ * ubi_get_num_by_path - get UBI device and volume number from device path
+ * @pathname: volume character device node path
+ * @ubi_num: pointer to UBI device number to be set
+ * @vol_id: pointer to UBI volume ID to be set
+ *
+ * Returns 0 on success and sets ubi_num and vol_id, returns error otherwise.
+ */
+int ubi_get_num_by_path(const char *pathname, int *ubi_num, int *vol_id)
+{
+ int error;
+ struct path path;
+ struct kstat stat;
+
+ error = kern_path(pathname, LOOKUP_FOLLOW, &path);
+ if (error)
+ return error;
+
+ error = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
+ path_put(&path);
+ if (error)
+ return error;
+
+ if (!S_ISCHR(stat.mode))
+ return -EINVAL;
+
+ *ubi_num = ubi_major2num(MAJOR(stat.rdev));
+ *vol_id = MINOR(stat.rdev) - 1;
+
+ if (*vol_id < 0 || *ubi_num < 0)
+ return -ENODEV;
+
+ return 0;
+}
+
+/**
* ubi_open_volume_path - open UBI volume by its character device node path.
* @pathname: volume character device node path
* @mode: open mode
@@ -290,32 +325,17 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
{
int error, ubi_num, vol_id;
- struct path path;
- struct kstat stat;
dbg_gen("open volume %s, mode %d", pathname, mode);
if (!pathname || !*pathname)
return ERR_PTR(-EINVAL);
- error = kern_path(pathname, LOOKUP_FOLLOW, &path);
- if (error)
- return ERR_PTR(error);
-
- error = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
- path_put(&path);
+ error = ubi_get_num_by_path(pathname, &ubi_num, &vol_id);
if (error)
return ERR_PTR(error);
- if (!S_ISCHR(stat.mode))
- return ERR_PTR(-EINVAL);
-
- ubi_num = ubi_major2num(MAJOR(stat.rdev));
- vol_id = MINOR(stat.rdev) - 1;
-
- if (vol_id >= 0 && ubi_num >= 0)
- return ubi_open_volume(ubi_num, vol_id, mode);
- return ERR_PTR(-ENODEV);
+ return ubi_open_volume(ubi_num, vol_id, mode);
}
EXPORT_SYMBOL_GPL(ubi_open_volume_path);
diff --git a/drivers/mtd/ubi/nvmem.c b/drivers/mtd/ubi/nvmem.c
new file mode 100644
index 00000000000000..8aeb9c428e5101
--- /dev/null
+++ b/drivers/mtd/ubi/nvmem.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 Daniel Golle <daniel@makrotopia.org>
+ */
+
+/* UBI NVMEM provider */
+#include "ubi.h"
+#include <linux/nvmem-provider.h>
+#include <asm/div64.h>
+
+/* List of all NVMEM devices */
+static LIST_HEAD(nvmem_devices);
+static DEFINE_MUTEX(devices_mutex);
+
+struct ubi_nvmem {
+ struct nvmem_device *nvmem;
+ int ubi_num;
+ int vol_id;
+ int usable_leb_size;
+ struct list_head list;
+};
+
+static int ubi_nvmem_reg_read(void *priv, unsigned int from,
+ void *val, size_t bytes)
+{
+ size_t to_read, bytes_left = bytes;
+ struct ubi_nvmem *unv = priv;
+ struct ubi_volume_desc *desc;
+ uint32_t offs;
+ uint64_t lnum = from;
+ int err = 0;
+
+ desc = ubi_open_volume(unv->ubi_num, unv->vol_id, UBI_READONLY);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ offs = do_div(lnum, unv->usable_leb_size);
+ while (bytes_left) {
+ to_read = unv->usable_leb_size - offs;
+
+ if (to_read > bytes_left)
+ to_read = bytes_left;
+
+ err = ubi_read(desc, lnum, val, offs, to_read);
+ if (err)
+ break;
+
+ lnum += 1;
+ offs = 0;
+ bytes_left -= to_read;
+ val += to_read;
+ }
+ ubi_close_volume(desc);
+
+ if (err)
+ return err;
+
+ return bytes_left == 0 ? 0 : -EIO;
+}
+
+static int ubi_nvmem_add(struct ubi_volume_info *vi)
+{
+ struct device_node *np = dev_of_node(vi->dev);
+ struct nvmem_config config = {};
+ struct ubi_nvmem *unv;
+ int ret;
+
+ if (!np)
+ return 0;
+
+ if (!of_get_child_by_name(np, "nvmem-layout"))
+ return 0;
+
+ if (WARN_ON_ONCE(vi->usable_leb_size <= 0) ||
+ WARN_ON_ONCE(vi->size <= 0))
+ return -EINVAL;
+
+ unv = kzalloc(sizeof(struct ubi_nvmem), GFP_KERNEL);
+ if (!unv)
+ return -ENOMEM;
+
+ config.id = NVMEM_DEVID_NONE;
+ config.dev = vi->dev;
+ config.name = dev_name(vi->dev);
+ config.owner = THIS_MODULE;
+ config.priv = unv;
+ config.reg_read = ubi_nvmem_reg_read;
+ config.size = vi->usable_leb_size * vi->size;
+ config.word_size = 1;
+ config.stride = 1;
+ config.read_only = true;
+ config.root_only = true;
+ config.ignore_wp = true;
+ config.of_node = np;
+
+ unv->ubi_num = vi->ubi_num;
+ unv->vol_id = vi->vol_id;
+ unv->usable_leb_size = vi->usable_leb_size;
+ unv->nvmem = nvmem_register(&config);
+ if (IS_ERR(unv->nvmem)) {
+ ret = dev_err_probe(vi->dev, PTR_ERR(unv->nvmem),
+ "Failed to register NVMEM device\n");
+ kfree(unv);
+ return ret;
+ }
+
+ mutex_lock(&devices_mutex);
+ list_add_tail(&unv->list, &nvmem_devices);
+ mutex_unlock(&devices_mutex);
+
+ return 0;
+}
+
+static void ubi_nvmem_remove(struct ubi_volume_info *vi)
+{
+ struct ubi_nvmem *unv_c, *unv = NULL;
+
+ mutex_lock(&devices_mutex);
+ list_for_each_entry(unv_c, &nvmem_devices, list)
+ if (unv_c->ubi_num == vi->ubi_num && unv_c->vol_id == vi->vol_id) {
+ unv = unv_c;
+ break;
+ }
+
+ if (!unv) {
+ mutex_unlock(&devices_mutex);
+ return;
+ }
+
+ list_del(&unv->list);
+ mutex_unlock(&devices_mutex);
+ nvmem_unregister(unv->nvmem);
+ kfree(unv);
+}
+
+/**
+ * nvmem_notify - UBI notification handler.
+ * @nb: registered notifier block
+ * @l: notification type
+ * @ns_ptr: pointer to the &struct ubi_notification object
+ */
+static int nvmem_notify(struct notifier_block *nb, unsigned long l,
+ void *ns_ptr)
+{
+ struct ubi_notification *nt = ns_ptr;
+
+ switch (l) {
+ case UBI_VOLUME_RESIZED:
+ ubi_nvmem_remove(&nt->vi);
+ fallthrough;
+ case UBI_VOLUME_ADDED:
+ ubi_nvmem_add(&nt->vi);
+ break;
+ case UBI_VOLUME_SHUTDOWN:
+ ubi_nvmem_remove(&nt->vi);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block nvmem_notifier = {
+ .notifier_call = nvmem_notify,
+};
+
+static int __init ubi_nvmem_init(void)
+{
+ return ubi_register_volume_notifier(&nvmem_notifier, 0);
+}
+
+static void __exit ubi_nvmem_exit(void)
+{
+ struct ubi_nvmem *unv, *tmp;
+
+ mutex_lock(&devices_mutex);
+ list_for_each_entry_safe(unv, tmp, &nvmem_devices, list) {
+ nvmem_unregister(unv->nvmem);
+ list_del(&unv->list);
+ kfree(unv);
+ }
+ mutex_unlock(&devices_mutex);
+
+ ubi_unregister_volume_notifier(&nvmem_notifier);
+}
+
+module_init(ubi_nvmem_init);
+module_exit(ubi_nvmem_exit);
+MODULE_DESCRIPTION("NVMEM layer over UBI volumes");
+MODULE_AUTHOR("Daniel Golle");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 0b42bb45dd840b..32009a24869e7e 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -337,6 +337,7 @@ struct ubi_volume {
int writers;
int exclusive;
int metaonly;
+ bool is_dead;
int reserved_pebs;
int vol_type;
@@ -561,6 +562,7 @@ struct ubi_device {
spinlock_t volumes_lock;
int ref_count;
int image_seq;
+ bool is_dead;
int rsvd_pebs;
int avail_pebs;
@@ -955,6 +957,7 @@ void ubi_free_internal_volumes(struct ubi_device *ubi);
void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di);
void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
struct ubi_volume_info *vi);
+int ubi_get_num_by_path(const char *pathname, int *ubi_num, int *vol_id);
/* scan.c */
int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
int pnum, const struct ubi_vid_hdr *vid_hdr);
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 2c867d16f89f7d..5a3558bbb90356 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -59,7 +59,7 @@ static ssize_t vol_attribute_show(struct device *dev,
struct ubi_device *ubi = vol->ubi;
spin_lock(&ubi->volumes_lock);
- if (!ubi->volumes[vol->vol_id]) {
+ if (!ubi->volumes[vol->vol_id] || ubi->volumes[vol->vol_id]->is_dead) {
spin_unlock(&ubi->volumes_lock);
return -ENODEV;
}
@@ -124,6 +124,31 @@ static void vol_release(struct device *dev)
kfree(vol);
}
+static struct fwnode_handle *find_volume_fwnode(struct ubi_volume *vol)
+{
+ struct fwnode_handle *fw_vols, *fw_vol;
+ const char *volname;
+ u32 volid;
+
+ fw_vols = device_get_named_child_node(vol->dev.parent->parent, "volumes");
+ if (!fw_vols)
+ return NULL;
+
+ fwnode_for_each_child_node(fw_vols, fw_vol) {
+ if (!fwnode_property_read_string(fw_vol, "volname", &volname) &&
+ strncmp(volname, vol->name, vol->name_len))
+ continue;
+
+ if (!fwnode_property_read_u32(fw_vol, "volid", &volid) &&
+ vol->vol_id != volid)
+ continue;
+
+ return fw_vol;
+ }
+
+ return NULL;
+}
+
/**
* ubi_create_volume - create volume.
* @ubi: UBI device description object
@@ -189,7 +214,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
/* Ensure that the name is unique */
for (i = 0; i < ubi->vtbl_slots; i++)
- if (ubi->volumes[i] &&
+ if (ubi->volumes[i] && !ubi->volumes[i]->is_dead &&
ubi->volumes[i]->name_len == req->name_len &&
!strcmp(ubi->volumes[i]->name, req->name)) {
ubi_err(ubi, "volume \"%s\" exists (ID %d)",
@@ -223,6 +248,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
vol->name_len = req->name_len;
memcpy(vol->name, req->name, vol->name_len);
vol->ubi = ubi;
+ device_set_node(&vol->dev, find_volume_fwnode(vol));
/*
* Finish all pending erases because there may be some LEBs belonging
@@ -352,6 +378,19 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
err = -EBUSY;
goto out_unlock;
}
+
+ /*
+ * Mark volume as dead at this point to prevent that anyone
+ * can take a reference to the volume from now on.
+ * This is necessary as we have to release the spinlock before
+ * calling ubi_volume_notify.
+ */
+ vol->is_dead = true;
+ spin_unlock(&ubi->volumes_lock);
+
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_SHUTDOWN);
+
+ spin_lock(&ubi->volumes_lock);
ubi->volumes[vol_id] = NULL;
spin_unlock(&ubi->volumes_lock);
@@ -408,6 +447,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
struct ubi_device *ubi = vol->ubi;
struct ubi_vtbl_record vtbl_rec;
struct ubi_eba_table *new_eba_tbl = NULL;
+ struct ubi_eba_table *old_eba_tbl = NULL;
int vol_id = vol->vol_id;
if (ubi->ro_mode)
@@ -453,10 +493,13 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
err = -ENOSPC;
goto out_free;
}
+
ubi->avail_pebs -= pebs;
ubi->rsvd_pebs += pebs;
ubi_eba_copy_table(vol, new_eba_tbl, vol->reserved_pebs);
- ubi_eba_replace_table(vol, new_eba_tbl);
+ old_eba_tbl = vol->eba_tbl;
+ vol->eba_tbl = new_eba_tbl;
+ vol->reserved_pebs = reserved_pebs;
spin_unlock(&ubi->volumes_lock);
}
@@ -471,7 +514,9 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
ubi->avail_pebs -= pebs;
ubi_update_reserved(ubi);
ubi_eba_copy_table(vol, new_eba_tbl, reserved_pebs);
- ubi_eba_replace_table(vol, new_eba_tbl);
+ old_eba_tbl = vol->eba_tbl;
+ vol->eba_tbl = new_eba_tbl;
+ vol->reserved_pebs = reserved_pebs;
spin_unlock(&ubi->volumes_lock);
}
@@ -493,7 +538,6 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
if (err)
goto out_acc;
- vol->reserved_pebs = reserved_pebs;
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
vol->used_ebs = reserved_pebs;
vol->last_eb_bytes = vol->usable_leb_size;
@@ -501,19 +545,23 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
(long long)vol->used_ebs * vol->usable_leb_size;
}
+ /* destroy old table */
+ ubi_eba_destroy_table(old_eba_tbl);
ubi_volume_notify(ubi, vol, UBI_VOLUME_RESIZED);
self_check_volumes(ubi);
return err;
out_acc:
- if (pebs > 0) {
- spin_lock(&ubi->volumes_lock);
- ubi->rsvd_pebs -= pebs;
- ubi->avail_pebs += pebs;
- spin_unlock(&ubi->volumes_lock);
- }
- return err;
-
+ spin_lock(&ubi->volumes_lock);
+ vol->reserved_pebs = reserved_pebs - pebs;
+ ubi->rsvd_pebs -= pebs;
+ ubi->avail_pebs += pebs;
+ if (pebs > 0)
+ ubi_eba_copy_table(vol, old_eba_tbl, vol->reserved_pebs);
+ else
+ ubi_eba_copy_table(vol, old_eba_tbl, reserved_pebs);
+ vol->eba_tbl = old_eba_tbl;
+ spin_unlock(&ubi->volumes_lock);
out_free:
ubi_eba_destroy_table(new_eba_tbl);
return err;
@@ -592,6 +640,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
vol->dev.class = &ubi_class;
vol->dev.groups = volume_dev_groups;
dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
+ device_set_node(&vol->dev, find_volume_fwnode(vol));
err = device_register(&vol->dev);
if (err) {
cdev_del(&vol->cdev);
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index f700f0e4f2ec4d..6e5489e233dd2a 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -791,6 +791,12 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
* The number of supported volumes is limited by the eraseblock size
* and by the UBI_MAX_VOLUMES constant.
*/
+
+ if (ubi->leb_size < UBI_VTBL_RECORD_SIZE) {
+ ubi_err(ubi, "LEB size too small for a volume record");
+ return -EINVAL;
+ }
+
ubi->vtbl_slots = ubi->leb_size / UBI_VTBL_RECORD_SIZE;
if (ubi->vtbl_slots > UBI_MAX_VOLUMES)
ubi->vtbl_slots = UBI_MAX_VOLUMES;
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index d2fd315556a39d..a545a7917e4fc1 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -956,7 +956,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
u16 q_idx)
{
- DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
return -EINVAL;
@@ -978,7 +978,7 @@ int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
static int
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
{
- DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
int err = 0;
u16 q_idx;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 4d8111aeb0ff0c..db4b2844e1f718 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -4695,7 +4695,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd)
{
- DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
u16 i, buf_size = __struct_size(qg_list);
struct ice_q_ctx *q_ctx;
int status = -ENOENT;
@@ -4917,7 +4917,7 @@ int
ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
u16 *q_id)
{
- DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
u16 qg_size = __struct_size(qg_list);
struct ice_hw *hw;
int status = 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index 7532d11ad7f337..fc91c4d411863e 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -1938,8 +1938,8 @@ static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw,
*/
static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
{
- DEFINE_FLEX(struct ice_aqc_get_pkg_info_resp, pkg_info, pkg_info,
- ICE_PKG_CNT);
+ DEFINE_RAW_FLEX(struct ice_aqc_get_pkg_info_resp, pkg_info, pkg_info,
+ ICE_PKG_CNT);
u16 size = __struct_size(pkg_info);
u32 i;
@@ -1990,8 +1990,8 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw,
struct ice_pkg_hdr *ospkg,
struct ice_seg **seg)
{
- DEFINE_FLEX(struct ice_aqc_get_pkg_info_resp, pkg, pkg_info,
- ICE_PKG_CNT);
+ DEFINE_RAW_FLEX(struct ice_aqc_get_pkg_info_resp, pkg, pkg_info,
+ ICE_PKG_CNT);
u16 size = __struct_size(pkg);
enum ice_ddp_state state;
u32 i;
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index a7a342809935f7..f0e76f0a6d6031 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -491,7 +491,7 @@ static void
ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport,
u16 vsi_num, u8 tc)
{
- DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
@@ -849,7 +849,7 @@ static void
ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num,
u8 tc)
{
- DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
@@ -1873,7 +1873,7 @@ static void
ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw,
u16 vsi_num, u8 tc)
{
- DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index d174a4eeb899cc..a1525992d14bc7 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -237,7 +237,7 @@ static int
ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
u32 node_teid)
{
- DEFINE_FLEX(struct ice_aqc_delete_elem, buf, teid, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_delete_elem, buf, teid, 1);
u16 buf_size = __struct_size(buf);
u16 num_groups_removed = 0;
int status;
@@ -2219,7 +2219,7 @@ int
ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
u16 num_items, u32 *list)
{
- DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
u16 buf_len = __struct_size(buf);
struct ice_sched_node *node;
u16 i, grps_movd = 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index ba0ef91e4c19c1..b4ea935e830054 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -1812,7 +1812,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
enum ice_sw_lkup_type lkup_type,
enum ice_adminq_opc opc)
{
- DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
u16 buf_len = __struct_size(sw_buf);
struct ice_aqc_res_elem *vsi_ele;
int status;
@@ -2081,7 +2081,7 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc,
*/
int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
{
- DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
u16 buf_len = __struct_size(sw_buf);
int status;
@@ -4420,7 +4420,7 @@ int
ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 *counter_id)
{
- DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
u16 buf_len = __struct_size(buf);
int status;
@@ -4448,7 +4448,7 @@ int
ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 counter_id)
{
- DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
u16 buf_len = __struct_size(buf);
int status;
@@ -4478,7 +4478,7 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
*/
int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id)
{
- DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
u16 buf_len = __struct_size(buf);
u16 res_type;
int status;
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index a480cdeac2883c..dd6ec0865141a9 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -1532,7 +1532,7 @@ put_dev:
return ret;
}
-static int apple_nvme_remove(struct platform_device *pdev)
+static void apple_nvme_remove(struct platform_device *pdev)
{
struct apple_nvme *anv = platform_get_drvdata(pdev);
@@ -1547,8 +1547,6 @@ static int apple_nvme_remove(struct platform_device *pdev)
apple_rtkit_shutdown(anv->rtk);
apple_nvme_detach_genpd(anv);
-
- return 0;
}
static void apple_nvme_shutdown(struct platform_device *pdev)
@@ -1598,7 +1596,7 @@ static struct platform_driver apple_nvme_driver = {
.pm = pm_sleep_ptr(&apple_nvme_pm_ops),
},
.probe = apple_nvme_probe,
- .remove = apple_nvme_remove,
+ .remove_new = apple_nvme_remove,
.shutdown = apple_nvme_shutdown,
};
module_platform_driver(apple_nvme_driver);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 00864a63447099..943d72bdd794ca 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1807,9 +1807,6 @@ static void nvme_config_discard(struct nvme_ns *ns, struct queue_limits *lim)
{
struct nvme_ctrl *ctrl = ns->ctrl;
- BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
- NVME_DSM_MAX_RANGES);
-
if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns->head, UINT_MAX))
lim->max_hw_discard_sectors =
nvme_lba_to_sect(ns->head, ctrl->dmrsl);
@@ -3237,7 +3234,7 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
if (ctrl->shutdown_timeout != shutdown_timeout)
dev_info(ctrl->device,
- "Shutdown timeout set to %u seconds\n",
+ "D3 entry latency set to %u seconds\n",
ctrl->shutdown_timeout);
} else
ctrl->shutdown_timeout = shutdown_timeout;
@@ -4391,7 +4388,8 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
set->ops = ops;
set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
if (ctrl->ops->flags & NVME_F_FABRICS)
- set->reserved_tags = NVMF_RESERVED_TAGS;
+ /* Reserved for fabric connect and keep alive */
+ set->reserved_tags = 2;
set->numa_node = ctrl->numa_node;
set->flags = BLK_MQ_F_NO_SCHED;
if (ctrl->ops->flags & NVME_F_BLOCKING)
@@ -4460,7 +4458,8 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
if (ctrl->quirks & NVME_QUIRK_SHARED_TAGS)
set->reserved_tags = NVME_AQ_DEPTH;
else if (ctrl->ops->flags & NVME_F_FABRICS)
- set->reserved_tags = NVMF_RESERVED_TAGS;
+ /* Reserved for fabric connect */
+ set->reserved_tags = 1;
set->numa_node = ctrl->numa_node;
set->flags = BLK_MQ_F_SHOULD_MERGE;
if (ctrl->ops->flags & NVME_F_BLOCKING)
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 06cc54851b1be3..37c974c38dcb07 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -19,13 +19,6 @@
#define NVMF_DEF_FAIL_FAST_TMO -1
/*
- * Reserved one command for internal usage. This command is used for sending
- * the connect command, as well as for the keep alive command on the admin
- * queue once live.
- */
-#define NVMF_RESERVED_TAGS 1
-
-/*
* Define a host as seen by the target. We allocate one at boot, but also
* allow the override it when creating controllers. This is both to provide
* persistence of the Host NQN over multiple boots, and to allow using
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index e6267a6aa3801e..8e0bb9692685d4 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -3363,6 +3363,9 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_BOGUS_NID, },
{ PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
+ NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
NVME_QUIRK_BOGUS_NID, },
diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c
index fc3eed00f9ff11..e05571b2a1b0c9 100644
--- a/drivers/nvme/host/pr.c
+++ b/drivers/nvme/host/pr.c
@@ -97,8 +97,7 @@ static int nvme_sc_to_pr_err(int nvme_sc)
static int nvme_send_pr_command(struct block_device *bdev,
struct nvme_command *c, void *data, unsigned int data_len)
{
- if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
- nvme_disk_is_ns_head(bdev->bd_disk))
+ if (nvme_disk_is_ns_head(bdev->bd_disk))
return nvme_send_ns_head_pr_command(bdev, c, data, data_len);
return nvme_send_ns_pr_command(bdev->bd_disk->private_data, c, data,
diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c
index 09fcaa519e5bc2..3c55f7edd18193 100644
--- a/drivers/nvme/host/sysfs.c
+++ b/drivers/nvme/host/sysfs.c
@@ -236,8 +236,7 @@ static ssize_t nuse_show(struct device *dev, struct device_attribute *attr,
struct block_device *bdev = disk->part0;
int ret;
- if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
- bdev->bd_disk->fops == &nvme_ns_head_ops)
+ if (nvme_disk_is_ns_head(bdev->bd_disk))
ret = ns_head_update_nuse(head);
else
ret = ns_update_nuse(bdev->bd_disk->private_data);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 3692b56cb58dba..fdbcdcedcee99f 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -37,6 +37,14 @@ module_param(so_priority, int, 0644);
MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
/*
+ * Use the unbound workqueue for nvme_tcp_wq, then we can set the cpu affinity
+ * from sysfs.
+ */
+static bool wq_unbound;
+module_param(wq_unbound, bool, 0644);
+MODULE_PARM_DESC(wq_unbound, "Use unbound workqueue for nvme-tcp IO context (default false)");
+
+/*
* TLS handshake timeout
*/
static int tls_handshake_timeout = 10;
@@ -1546,7 +1554,10 @@ static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
else if (nvme_tcp_poll_queue(queue))
n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
ctrl->io_queues[HCTX_TYPE_READ] - 1;
- queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
+ if (wq_unbound)
+ queue->io_cpu = WORK_CPU_UNBOUND;
+ else
+ queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
}
static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid)
@@ -2785,6 +2796,8 @@ static struct nvmf_transport_ops nvme_tcp_transport = {
static int __init nvme_tcp_init_module(void)
{
+ unsigned int wq_flags = WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_SYSFS;
+
BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8);
BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72);
BUILD_BUG_ON(sizeof(struct nvme_tcp_data_pdu) != 24);
@@ -2794,8 +2807,10 @@ static int __init nvme_tcp_init_module(void)
BUILD_BUG_ON(sizeof(struct nvme_tcp_icresp_pdu) != 128);
BUILD_BUG_ON(sizeof(struct nvme_tcp_term_pdu) != 24);
- nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
- WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ if (wq_unbound)
+ wq_flags |= WQ_UNBOUND;
+
+ nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq", wq_flags, 0);
if (!nvme_tcp_wq)
return -ENOMEM;
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
index 1c36fcedea2008..0288315f005028 100644
--- a/drivers/nvme/host/trace.c
+++ b/drivers/nvme/host/trace.c
@@ -119,7 +119,10 @@ static const char *nvme_trace_get_lba_status(struct trace_seq *p,
static const char *nvme_trace_admin_format_nvm(struct trace_seq *p, u8 *cdw10)
{
const char *ret = trace_seq_buffer_ptr(p);
- u8 lbaf = cdw10[0] & 0xF;
+ /*
+ * lbafu(bit 13:12) is already in the upper 4 bits, lbafl: bit 03:00.
+ */
+ u8 lbaf = (cdw10[1] & 0x30) | (cdw10[0] & 0xF);
u8 mset = (cdw10[0] >> 4) & 0x1;
u8 pi = (cdw10[0] >> 5) & 0x7;
u8 pil = cdw10[1] & 0x1;
@@ -164,12 +167,27 @@ static const char *nvme_trace_dsm(struct trace_seq *p, u8 *cdw10)
static const char *nvme_trace_zone_mgmt_send(struct trace_seq *p, u8 *cdw10)
{
+ static const char * const zsa_strs[] = {
+ [0x01] = "close zone",
+ [0x02] = "finish zone",
+ [0x03] = "open zone",
+ [0x04] = "reset zone",
+ [0x05] = "offline zone",
+ [0x10] = "set zone descriptor extension"
+ };
const char *ret = trace_seq_buffer_ptr(p);
u64 slba = get_unaligned_le64(cdw10);
+ const char *zsa_str;
u8 zsa = cdw10[12];
u8 all = cdw10[13];
- trace_seq_printf(p, "slba=%llu, zsa=%u, all=%u", slba, zsa, all);
+ if (zsa < ARRAY_SIZE(zsa_strs) && zsa_strs[zsa])
+ zsa_str = zsa_strs[zsa];
+ else
+ zsa_str = "reserved";
+
+ trace_seq_printf(p, "slba=%llu, zsa=%u:%s, all=%u",
+ slba, zsa, zsa_str, all);
trace_seq_putc(p, 0);
return ret;
@@ -177,15 +195,86 @@ static const char *nvme_trace_zone_mgmt_send(struct trace_seq *p, u8 *cdw10)
static const char *nvme_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10)
{
+ static const char * const zrasf_strs[] = {
+ [0x00] = "list all zones",
+ [0x01] = "list the zones in the ZSE: Empty state",
+ [0x02] = "list the zones in the ZSIO: Implicitly Opened state",
+ [0x03] = "list the zones in the ZSEO: Explicitly Opened state",
+ [0x04] = "list the zones in the ZSC: Closed state",
+ [0x05] = "list the zones in the ZSF: Full state",
+ [0x06] = "list the zones in the ZSRO: Read Only state",
+ [0x07] = "list the zones in the ZSO: Offline state",
+ [0x09] = "list the zones that have the zone attribute"
+ };
const char *ret = trace_seq_buffer_ptr(p);
u64 slba = get_unaligned_le64(cdw10);
u32 numd = get_unaligned_le32(cdw10 + 8);
u8 zra = cdw10[12];
u8 zrasf = cdw10[13];
+ const char *zrasf_str;
u8 pr = cdw10[14];
- trace_seq_printf(p, "slba=%llu, numd=%u, zra=%u, zrasf=%u, pr=%u",
- slba, numd, zra, zrasf, pr);
+ if (zrasf < ARRAY_SIZE(zrasf_strs) && zrasf_strs[zrasf])
+ zrasf_str = zrasf_strs[zrasf];
+ else
+ zrasf_str = "reserved";
+
+ trace_seq_printf(p, "slba=%llu, numd=%u, zra=%u, zrasf=%u:%s, pr=%u",
+ slba, numd, zra, zrasf, zrasf_str, pr);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_resv_reg(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 rrega = cdw10[0] & 0x7;
+ u8 iekey = (cdw10[0] >> 3) & 0x1;
+ u8 ptpl = (cdw10[3] >> 6) & 0x3;
+
+ trace_seq_printf(p, "rrega=%u, iekey=%u, ptpl=%u",
+ rrega, iekey, ptpl);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_resv_acq(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 racqa = cdw10[0] & 0x7;
+ u8 iekey = (cdw10[0] >> 3) & 0x1;
+ u8 rtype = cdw10[1];
+
+ trace_seq_printf(p, "racqa=%u, iekey=%u, rtype=%u",
+ racqa, iekey, rtype);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_resv_rel(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 rrela = cdw10[0] & 0x7;
+ u8 iekey = (cdw10[0] >> 3) & 0x1;
+ u8 rtype = cdw10[1];
+
+ trace_seq_printf(p, "rrela=%u, iekey=%u, rtype=%u",
+ rrela, iekey, rtype);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_resv_report(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u32 numd = get_unaligned_le32(cdw10);
+ u8 eds = cdw10[4] & 0x1;
+
+ trace_seq_printf(p, "numd=%u, eds=%u", numd, eds);
trace_seq_putc(p, 0);
return ret;
@@ -243,6 +332,14 @@ const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p,
return nvme_trace_zone_mgmt_send(p, cdw10);
case nvme_cmd_zone_mgmt_recv:
return nvme_trace_zone_mgmt_recv(p, cdw10);
+ case nvme_cmd_resv_register:
+ return nvme_trace_resv_reg(p, cdw10);
+ case nvme_cmd_resv_acquire:
+ return nvme_trace_resv_acq(p, cdw10);
+ case nvme_cmd_resv_release:
+ return nvme_trace_resv_rel(p, cdw10);
+ case nvme_cmd_resv_report:
+ return nvme_trace_resv_report(p, cdw10);
default:
return nvme_trace_common(p, cdw10);
}
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index f2bb9d95ecf4bc..5b8c63e74639d7 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -53,7 +53,6 @@ struct nvmet_rdma_cmd {
enum {
NVMET_RDMA_REQ_INLINE_DATA = (1 << 0),
- NVMET_RDMA_REQ_INVALIDATE_RKEY = (1 << 1),
};
struct nvmet_rdma_rsp {
@@ -722,7 +721,7 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req)
struct rdma_cm_id *cm_id = rsp->queue->cm_id;
struct ib_send_wr *first_wr;
- if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
+ if (rsp->invalidate_rkey) {
rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey;
} else {
@@ -905,10 +904,8 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
goto error_out;
rsp->n_rdma += ret;
- if (invalidate) {
+ if (invalidate)
rsp->invalidate_rkey = key;
- rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY;
- }
return 0;
@@ -1047,6 +1044,7 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
rsp->req.cmd = cmd->nvme_cmd;
rsp->req.port = queue->port;
rsp->n_rdma = 0;
+ rsp->invalidate_rkey = 0;
if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
unsigned long flags;
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 2aa5762e9f50d0..a5422e2c979add 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -898,6 +898,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
pr_err("bad nvme-tcp pdu length (%d)\n",
le32_to_cpu(icreq->hdr.plen));
nvmet_tcp_fatal_error(queue);
+ return -EPROTO;
}
if (icreq->pfv != NVME_TCP_PFV_1_0) {
diff --git a/drivers/nvme/target/trace.c b/drivers/nvme/target/trace.c
index 6ee1f3db81d040..8d1806a828879a 100644
--- a/drivers/nvme/target/trace.c
+++ b/drivers/nvme/target/trace.c
@@ -119,6 +119,67 @@ const char *nvmet_trace_parse_admin_cmd(struct trace_seq *p,
}
}
+static const char *nvmet_trace_zone_mgmt_send(struct trace_seq *p, u8 *cdw10)
+{
+ static const char * const zsa_strs[] = {
+ [0x01] = "close zone",
+ [0x02] = "finish zone",
+ [0x03] = "open zone",
+ [0x04] = "reset zone",
+ [0x05] = "offline zone",
+ [0x10] = "set zone descriptor extension"
+ };
+ const char *ret = trace_seq_buffer_ptr(p);
+ u64 slba = get_unaligned_le64(cdw10);
+ const char *zsa_str;
+ u8 zsa = cdw10[12];
+ u8 all = cdw10[13];
+
+ if (zsa < ARRAY_SIZE(zsa_strs) && zsa_strs[zsa])
+ zsa_str = zsa_strs[zsa];
+ else
+ zsa_str = "reserved";
+
+ trace_seq_printf(p, "slba=%llu, zsa=%u:%s, all=%u",
+ slba, zsa, zsa_str, all);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvmet_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10)
+{
+ static const char * const zrasf_strs[] = {
+ [0x00] = "list all zones",
+ [0x01] = "list the zones in the ZSE: Empty state",
+ [0x02] = "list the zones in the ZSIO: Implicitly Opened state",
+ [0x03] = "list the zones in the ZSEO: Explicitly Opened state",
+ [0x04] = "list the zones in the ZSC: Closed state",
+ [0x05] = "list the zones in the ZSF: Full state",
+ [0x06] = "list the zones in the ZSRO: Read Only state",
+ [0x07] = "list the zones in the ZSO: Offline state",
+ [0x09] = "list the zones that have the zone attribute"
+ };
+ const char *ret = trace_seq_buffer_ptr(p);
+ u64 slba = get_unaligned_le64(cdw10);
+ u32 numd = get_unaligned_le32(&cdw10[8]);
+ u8 zra = cdw10[12];
+ u8 zrasf = cdw10[13];
+ const char *zrasf_str;
+ u8 pr = cdw10[14];
+
+ if (zrasf < ARRAY_SIZE(zrasf_strs) && zrasf_strs[zrasf])
+ zrasf_str = zrasf_strs[zrasf];
+ else
+ zrasf_str = "reserved";
+
+ trace_seq_printf(p, "slba=%llu, numd=%u, zra=%u, zrasf=%u:%s, pr=%u",
+ slba, numd, zra, zrasf, zrasf_str, pr);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
const char *nvmet_trace_parse_nvm_cmd(struct trace_seq *p,
u8 opcode, u8 *cdw10)
{
@@ -126,9 +187,14 @@ const char *nvmet_trace_parse_nvm_cmd(struct trace_seq *p,
case nvme_cmd_read:
case nvme_cmd_write:
case nvme_cmd_write_zeroes:
+ case nvme_cmd_zone_append:
return nvmet_trace_read_write(p, cdw10);
case nvme_cmd_dsm:
return nvmet_trace_dsm(p, cdw10);
+ case nvme_cmd_zone_mgmt_send:
+ return nvmet_trace_zone_mgmt_send(p, cdw10);
+ case nvme_cmd_zone_mgmt_recv:
+ return nvmet_trace_zone_mgmt_recv(p, cdw10);
default:
return nvmet_trace_common(p, cdw10);
}
@@ -176,6 +242,34 @@ static const char *nvmet_trace_fabrics_property_get(struct trace_seq *p,
return ret;
}
+static const char *nvmet_trace_fabrics_auth_send(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 spsp0 = spc[1];
+ u8 spsp1 = spc[2];
+ u8 secp = spc[3];
+ u32 tl = get_unaligned_le32(spc + 4);
+
+ trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, tl=%u",
+ spsp0, spsp1, secp, tl);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *nvmet_trace_fabrics_auth_receive(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 spsp0 = spc[1];
+ u8 spsp1 = spc[2];
+ u8 secp = spc[3];
+ u32 al = get_unaligned_le32(spc + 4);
+
+ trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, al=%u",
+ spsp0, spsp1, secp, al);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
static const char *nvmet_trace_fabrics_common(struct trace_seq *p, u8 *spc)
{
const char *ret = trace_seq_buffer_ptr(p);
@@ -195,6 +289,10 @@ const char *nvmet_trace_parse_fabrics_cmd(struct trace_seq *p,
return nvmet_trace_fabrics_connect(p, spc);
case nvme_fabrics_type_property_get:
return nvmet_trace_fabrics_property_get(p, spc);
+ case nvme_fabrics_type_auth_send:
+ return nvmet_trace_fabrics_auth_send(p, spc);
+ case nvme_fabrics_type_auth_receive:
+ return nvmet_trace_fabrics_auth_receive(p, spc);
default:
return nvmet_trace_fabrics_common(p, spc);
}
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 004d86230aa630..7526a9e714fa98 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -87,7 +87,8 @@ config RISCV_PMU_SBI
filtering, counter configuration.
config STARFIVE_STARLINK_PMU
- depends on ARCH_STARFIVE || (COMPILE_TEST && 64BIT)
+ depends on ARCH_STARFIVE || COMPILE_TEST
+ depends on 64BIT
bool "StarFive StarLink PMU"
help
Provide support for StarLink Performance Monitor Unit.
@@ -95,6 +96,20 @@ config STARFIVE_STARLINK_PMU
an L3 memory system. The L3 cache events are added into perf event
subsystem, allowing monitoring of various L3 cache perf events.
+config ANDES_CUSTOM_PMU
+ bool "Andes custom PMU support"
+ depends on ARCH_RENESAS && RISCV_ALTERNATIVE && RISCV_PMU_SBI
+ default y
+ help
+ The Andes cores implement the PMU overflow extension very
+ similar to the standard Sscofpmf and Smcntrpmf extension.
+
+ This will patch the overflow and pending CSRs and handle the
+ non-standard behaviour via the regular SBI PMU driver and
+ interface.
+
+ If you don't know what to do here, say "Y".
+
config ARM_PMU_ACPI
depends on ARM_PMU && ACPI
def_bool y
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 452aab49db1e8c..8cbe6e5f9c39a6 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -19,11 +19,33 @@
#include <linux/of.h>
#include <linux/cpu_pm.h>
#include <linux/sched/clock.h>
+#include <linux/soc/andes/irq.h>
#include <asm/errata_list.h>
#include <asm/sbi.h>
#include <asm/cpufeature.h>
+#define ALT_SBI_PMU_OVERFLOW(__ovl) \
+asm volatile(ALTERNATIVE_2( \
+ "csrr %0, " __stringify(CSR_SSCOUNTOVF), \
+ "csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF), \
+ THEAD_VENDOR_ID, ERRATA_THEAD_PMU, \
+ CONFIG_ERRATA_THEAD_PMU, \
+ "csrr %0, " __stringify(ANDES_CSR_SCOUNTEROF), \
+ 0, RISCV_ISA_EXT_XANDESPMU, \
+ CONFIG_ANDES_CUSTOM_PMU) \
+ : "=r" (__ovl) : \
+ : "memory")
+
+#define ALT_SBI_PMU_OVF_CLEAR_PENDING(__irq_mask) \
+asm volatile(ALTERNATIVE( \
+ "csrc " __stringify(CSR_IP) ", %0\n\t", \
+ "csrc " __stringify(ANDES_CSR_SLIP) ", %0\n\t", \
+ 0, RISCV_ISA_EXT_XANDESPMU, \
+ CONFIG_ANDES_CUSTOM_PMU) \
+ : : "r"(__irq_mask) \
+ : "memory")
+
#define SYSCTL_NO_USER_ACCESS 0
#define SYSCTL_USER_ACCESS 1
#define SYSCTL_LEGACY 2
@@ -61,6 +83,7 @@ static int sysctl_perf_user_access __read_mostly = SYSCTL_USER_ACCESS;
static union sbi_pmu_ctr_info *pmu_ctr_list;
static bool riscv_pmu_use_irq;
static unsigned int riscv_pmu_irq_num;
+static unsigned int riscv_pmu_irq_mask;
static unsigned int riscv_pmu_irq;
/* Cache the available counters in a bitmask */
@@ -694,7 +717,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
event = cpu_hw_evt->events[fidx];
if (!event) {
- csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
+ ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask);
return IRQ_NONE;
}
@@ -708,7 +731,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
* Overflow interrupt pending bit should only be cleared after stopping
* all the counters to avoid any race condition.
*/
- csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
+ ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask);
/* No overflow bit is set */
if (!overflow)
@@ -780,8 +803,7 @@ static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
if (riscv_pmu_use_irq) {
cpu_hw_evt->irq = riscv_pmu_irq;
- csr_clear(CSR_IP, BIT(riscv_pmu_irq_num));
- csr_set(CSR_IE, BIT(riscv_pmu_irq_num));
+ ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask);
enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE);
}
@@ -792,7 +814,6 @@ static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node)
{
if (riscv_pmu_use_irq) {
disable_percpu_irq(riscv_pmu_irq);
- csr_clear(CSR_IE, BIT(riscv_pmu_irq_num));
}
/* Disable all counters access for user mode now */
@@ -816,8 +837,14 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde
riscv_cached_mimpid(0) == 0) {
riscv_pmu_irq_num = THEAD_C9XX_RV_IRQ_PMU;
riscv_pmu_use_irq = true;
+ } else if (riscv_isa_extension_available(NULL, XANDESPMU) &&
+ IS_ENABLED(CONFIG_ANDES_CUSTOM_PMU)) {
+ riscv_pmu_irq_num = ANDES_SLI_CAUSE_BASE + ANDES_RV_IRQ_PMOVI;
+ riscv_pmu_use_irq = true;
}
+ riscv_pmu_irq_mask = BIT(riscv_pmu_irq_num % BITS_PER_LONG);
+
if (!riscv_pmu_use_irq)
return -EOPNOTSUPP;
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index d79a96679a26c9..d6596583ed4e78 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -284,9 +284,9 @@ static int img_pwm_probe(struct platform_device *pdev)
return PTR_ERR(imgchip->sys_clk);
}
- imgchip->pwm_clk = devm_clk_get(&pdev->dev, "imgchip");
+ imgchip->pwm_clk = devm_clk_get(&pdev->dev, "pwm");
if (IS_ERR(imgchip->pwm_clk)) {
- dev_err(&pdev->dev, "failed to get imgchip clock\n");
+ dev_err(&pdev->dev, "failed to get pwm clock\n");
return PTR_ERR(imgchip->pwm_clk);
}
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index d019ca6dee9bff..dabac9772741fa 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2274,6 +2274,17 @@ struct regulator *_regulator_get(struct device *dev, const char *id,
if (ret > 0) {
rdev->use_count = 1;
regulator->enable_count = 1;
+
+ /* Propagate the regulator state to its supply */
+ if (rdev->supply) {
+ ret = regulator_enable(rdev->supply);
+ if (ret < 0) {
+ destroy_regulator(regulator);
+ module_put(rdev->owner);
+ put_device(&rdev->dev);
+ return ERR_PTR(ret);
+ }
+ }
} else {
rdev->use_count = 0;
regulator->enable_count = 0;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index e37a4341f442d8..c63e32d012f23c 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1858,7 +1858,8 @@ config RTC_DRV_MT2712
config RTC_DRV_MT6397
tristate "MediaTek PMIC based RTC"
- depends on MFD_MT6397 || (COMPILE_TEST && IRQ_DOMAIN)
+ depends on MFD_MT6397 || COMPILE_TEST
+ select IRQ_DOMAIN
help
This selects the MediaTek(R) RTC driver. RTC is part of MediaTek
MT6397 PMIC. You should enable MT6397 PMIC MFD before select
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 921ee182797439..e31fa0ad127e95 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -21,7 +21,6 @@
#include "rtc-core.h"
static DEFINE_IDA(rtc_ida);
-struct class *rtc_class;
static void rtc_device_release(struct device *dev)
{
@@ -199,6 +198,11 @@ static SIMPLE_DEV_PM_OPS(rtc_class_dev_pm_ops, rtc_suspend, rtc_resume);
#define RTC_CLASS_DEV_PM_OPS NULL
#endif
+const struct class rtc_class = {
+ .name = "rtc",
+ .pm = RTC_CLASS_DEV_PM_OPS,
+};
+
/* Ensure the caller will set the id before releasing the device */
static struct rtc_device *rtc_allocate_device(void)
{
@@ -220,7 +224,7 @@ static struct rtc_device *rtc_allocate_device(void)
rtc->irq_freq = 1;
rtc->max_user_freq = 64;
- rtc->dev.class = rtc_class;
+ rtc->dev.class = &rtc_class;
rtc->dev.groups = rtc_get_dev_attribute_groups();
rtc->dev.release = rtc_device_release;
@@ -475,13 +479,14 @@ EXPORT_SYMBOL_GPL(devm_rtc_device_register);
static int __init rtc_init(void)
{
- rtc_class = class_create("rtc");
- if (IS_ERR(rtc_class)) {
- pr_err("couldn't create class\n");
- return PTR_ERR(rtc_class);
- }
- rtc_class->pm = RTC_CLASS_DEV_PM_OPS;
+ int err;
+
+ err = class_register(&rtc_class);
+ if (err)
+ return err;
+
rtc_dev_init();
+
return 0;
}
subsys_initcall(rtc_init);
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 1b63111cdda2e9..5faafb4aa55cc3 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -696,7 +696,7 @@ struct rtc_device *rtc_class_open(const char *name)
struct device *dev;
struct rtc_device *rtc = NULL;
- dev = class_find_device_by_name(rtc_class, name);
+ dev = class_find_device_by_name(&rtc_class, name);
if (dev)
rtc = to_rtc_device(dev);
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 1109cad8383849..8b087d9556bee8 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -22,26 +22,24 @@
#include <linux/io.h>
#include <linux/module.h>
-enum ds1511reg {
- DS1511_SEC = 0x0,
- DS1511_MIN = 0x1,
- DS1511_HOUR = 0x2,
- DS1511_DOW = 0x3,
- DS1511_DOM = 0x4,
- DS1511_MONTH = 0x5,
- DS1511_YEAR = 0x6,
- DS1511_CENTURY = 0x7,
- DS1511_AM1_SEC = 0x8,
- DS1511_AM2_MIN = 0x9,
- DS1511_AM3_HOUR = 0xa,
- DS1511_AM4_DATE = 0xb,
- DS1511_WD_MSEC = 0xc,
- DS1511_WD_SEC = 0xd,
- DS1511_CONTROL_A = 0xe,
- DS1511_CONTROL_B = 0xf,
- DS1511_RAMADDR_LSB = 0x10,
- DS1511_RAMDATA = 0x13
-};
+#define DS1511_SEC 0x0
+#define DS1511_MIN 0x1
+#define DS1511_HOUR 0x2
+#define DS1511_DOW 0x3
+#define DS1511_DOM 0x4
+#define DS1511_MONTH 0x5
+#define DS1511_YEAR 0x6
+#define DS1511_CENTURY 0x7
+#define DS1511_AM1_SEC 0x8
+#define DS1511_AM2_MIN 0x9
+#define DS1511_AM3_HOUR 0xa
+#define DS1511_AM4_DATE 0xb
+#define DS1511_WD_MSEC 0xc
+#define DS1511_WD_SEC 0xd
+#define DS1511_CONTROL_A 0xe
+#define DS1511_CONTROL_B 0xf
+#define DS1511_RAMADDR_LSB 0x10
+#define DS1511_RAMDATA 0x13
#define DS1511_BLF1 0x80
#define DS1511_BLF2 0x40
@@ -61,35 +59,10 @@ enum ds1511reg {
#define DS1511_WDS 0x01
#define DS1511_RAM_MAX 0x100
-#define RTC_CMD DS1511_CONTROL_B
-#define RTC_CMD1 DS1511_CONTROL_A
-
-#define RTC_ALARM_SEC DS1511_AM1_SEC
-#define RTC_ALARM_MIN DS1511_AM2_MIN
-#define RTC_ALARM_HOUR DS1511_AM3_HOUR
-#define RTC_ALARM_DATE DS1511_AM4_DATE
-
-#define RTC_SEC DS1511_SEC
-#define RTC_MIN DS1511_MIN
-#define RTC_HOUR DS1511_HOUR
-#define RTC_DOW DS1511_DOW
-#define RTC_DOM DS1511_DOM
-#define RTC_MON DS1511_MONTH
-#define RTC_YEAR DS1511_YEAR
-#define RTC_CENTURY DS1511_CENTURY
-
-#define RTC_TIE DS1511_TIE
-#define RTC_TE DS1511_TE
-
-struct rtc_plat_data {
+struct ds1511_data {
struct rtc_device *rtc;
void __iomem *ioaddr; /* virtual base address */
int irq;
- unsigned int irqen;
- int alrm_sec;
- int alrm_min;
- int alrm_hour;
- int alrm_mday;
spinlock_t lock;
};
@@ -98,95 +71,33 @@ static DEFINE_SPINLOCK(ds1511_lock);
static __iomem char *ds1511_base;
static u32 reg_spacing = 1;
-static noinline void
-rtc_write(uint8_t val, uint32_t reg)
+static void rtc_write(uint8_t val, uint32_t reg)
{
writeb(val, ds1511_base + (reg * reg_spacing));
}
-static noinline uint8_t
-rtc_read(enum ds1511reg reg)
+static uint8_t rtc_read(uint32_t reg)
{
return readb(ds1511_base + (reg * reg_spacing));
}
-static inline void
-rtc_disable_update(void)
+static void rtc_disable_update(void)
{
- rtc_write((rtc_read(RTC_CMD) & ~RTC_TE), RTC_CMD);
+ rtc_write((rtc_read(DS1511_CONTROL_B) & ~DS1511_TE), DS1511_CONTROL_B);
}
-static void
-rtc_enable_update(void)
+static void rtc_enable_update(void)
{
- rtc_write((rtc_read(RTC_CMD) | RTC_TE), RTC_CMD);
-}
-
-/*
- * #define DS1511_WDOG_RESET_SUPPORT
- *
- * Uncomment this if you want to use these routines in
- * some platform code.
- */
-#ifdef DS1511_WDOG_RESET_SUPPORT
-/*
- * just enough code to set the watchdog timer so that it
- * will reboot the system
- */
-void
-ds1511_wdog_set(unsigned long deciseconds)
-{
- /*
- * the wdog timer can take 99.99 seconds
- */
- deciseconds %= 10000;
- /*
- * set the wdog values in the wdog registers
- */
- rtc_write(bin2bcd(deciseconds % 100), DS1511_WD_MSEC);
- rtc_write(bin2bcd(deciseconds / 100), DS1511_WD_SEC);
- /*
- * set wdog enable and wdog 'steering' bit to issue a reset
- */
- rtc_write(rtc_read(RTC_CMD) | DS1511_WDE | DS1511_WDS, RTC_CMD);
-}
-
-void
-ds1511_wdog_disable(void)
-{
- /*
- * clear wdog enable and wdog 'steering' bits
- */
- rtc_write(rtc_read(RTC_CMD) & ~(DS1511_WDE | DS1511_WDS), RTC_CMD);
- /*
- * clear the wdog counter
- */
- rtc_write(0, DS1511_WD_MSEC);
- rtc_write(0, DS1511_WD_SEC);
+ rtc_write((rtc_read(DS1511_CONTROL_B) | DS1511_TE), DS1511_CONTROL_B);
}
-#endif
-/*
- * set the rtc chip's idea of the time.
- * stupidly, some callers call with year unmolested;
- * and some call with year = year - 1900. thanks.
- */
static int ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm)
{
u8 mon, day, dow, hrs, min, sec, yrs, cen;
unsigned long flags;
- /*
- * won't have to change this for a while
- */
- if (rtc_tm->tm_year < 1900)
- rtc_tm->tm_year += 1900;
-
- if (rtc_tm->tm_year < 1970)
- return -EINVAL;
-
yrs = rtc_tm->tm_year % 100;
- cen = rtc_tm->tm_year / 100;
+ cen = 19 + rtc_tm->tm_year / 100;
mon = rtc_tm->tm_mon + 1; /* tm_mon starts at zero */
day = rtc_tm->tm_mday;
dow = rtc_tm->tm_wday & 0x7; /* automatic BCD */
@@ -194,15 +105,6 @@ static int ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm)
min = rtc_tm->tm_min;
sec = rtc_tm->tm_sec;
- if ((mon > 12) || (day == 0))
- return -EINVAL;
-
- if (day > rtc_month_days(rtc_tm->tm_mon, rtc_tm->tm_year))
- return -EINVAL;
-
- if ((hrs >= 24) || (min >= 60) || (sec >= 60))
- return -EINVAL;
-
/*
* each register is a different number of valid bits
*/
@@ -216,14 +118,14 @@ static int ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm)
spin_lock_irqsave(&ds1511_lock, flags);
rtc_disable_update();
- rtc_write(cen, RTC_CENTURY);
- rtc_write(yrs, RTC_YEAR);
- rtc_write((rtc_read(RTC_MON) & 0xe0) | mon, RTC_MON);
- rtc_write(day, RTC_DOM);
- rtc_write(hrs, RTC_HOUR);
- rtc_write(min, RTC_MIN);
- rtc_write(sec, RTC_SEC);
- rtc_write(dow, RTC_DOW);
+ rtc_write(cen, DS1511_CENTURY);
+ rtc_write(yrs, DS1511_YEAR);
+ rtc_write((rtc_read(DS1511_MONTH) & 0xe0) | mon, DS1511_MONTH);
+ rtc_write(day, DS1511_DOM);
+ rtc_write(hrs, DS1511_HOUR);
+ rtc_write(min, DS1511_MIN);
+ rtc_write(sec, DS1511_SEC);
+ rtc_write(dow, DS1511_DOW);
rtc_enable_update();
spin_unlock_irqrestore(&ds1511_lock, flags);
@@ -238,14 +140,14 @@ static int ds1511_rtc_read_time(struct device *dev, struct rtc_time *rtc_tm)
spin_lock_irqsave(&ds1511_lock, flags);
rtc_disable_update();
- rtc_tm->tm_sec = rtc_read(RTC_SEC) & 0x7f;
- rtc_tm->tm_min = rtc_read(RTC_MIN) & 0x7f;
- rtc_tm->tm_hour = rtc_read(RTC_HOUR) & 0x3f;
- rtc_tm->tm_mday = rtc_read(RTC_DOM) & 0x3f;
- rtc_tm->tm_wday = rtc_read(RTC_DOW) & 0x7;
- rtc_tm->tm_mon = rtc_read(RTC_MON) & 0x1f;
- rtc_tm->tm_year = rtc_read(RTC_YEAR) & 0x7f;
- century = rtc_read(RTC_CENTURY);
+ rtc_tm->tm_sec = rtc_read(DS1511_SEC) & 0x7f;
+ rtc_tm->tm_min = rtc_read(DS1511_MIN) & 0x7f;
+ rtc_tm->tm_hour = rtc_read(DS1511_HOUR) & 0x3f;
+ rtc_tm->tm_mday = rtc_read(DS1511_DOM) & 0x3f;
+ rtc_tm->tm_wday = rtc_read(DS1511_DOW) & 0x7;
+ rtc_tm->tm_mon = rtc_read(DS1511_MONTH) & 0x1f;
+ rtc_tm->tm_year = rtc_read(DS1511_YEAR) & 0x7f;
+ century = rtc_read(DS1511_CENTURY);
rtc_enable_update();
spin_unlock_irqrestore(&ds1511_lock, flags);
@@ -271,106 +173,67 @@ static int ds1511_rtc_read_time(struct device *dev, struct rtc_time *rtc_tm)
return 0;
}
-/*
- * write the alarm register settings
- *
- * we only have the use to interrupt every second, otherwise
- * known as the update interrupt, or the interrupt if the whole
- * date/hours/mins/secs matches. the ds1511 has many more
- * permutations, but the kernel doesn't.
- */
-static void
-ds1511_rtc_update_alarm(struct rtc_plat_data *pdata)
+static void ds1511_rtc_alarm_enable(unsigned int enabled)
{
- unsigned long flags;
-
- spin_lock_irqsave(&pdata->lock, flags);
- rtc_write(pdata->alrm_mday < 0 || (pdata->irqen & RTC_UF) ?
- 0x80 : bin2bcd(pdata->alrm_mday) & 0x3f,
- RTC_ALARM_DATE);
- rtc_write(pdata->alrm_hour < 0 || (pdata->irqen & RTC_UF) ?
- 0x80 : bin2bcd(pdata->alrm_hour) & 0x3f,
- RTC_ALARM_HOUR);
- rtc_write(pdata->alrm_min < 0 || (pdata->irqen & RTC_UF) ?
- 0x80 : bin2bcd(pdata->alrm_min) & 0x7f,
- RTC_ALARM_MIN);
- rtc_write(pdata->alrm_sec < 0 || (pdata->irqen & RTC_UF) ?
- 0x80 : bin2bcd(pdata->alrm_sec) & 0x7f,
- RTC_ALARM_SEC);
- rtc_write(rtc_read(RTC_CMD) | (pdata->irqen ? RTC_TIE : 0), RTC_CMD);
- rtc_read(RTC_CMD1); /* clear interrupts */
- spin_unlock_irqrestore(&pdata->lock, flags);
+ rtc_write(rtc_read(DS1511_CONTROL_B) | (enabled ? DS1511_TIE : 0), DS1511_CONTROL_B);
}
-static int
-ds1511_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+static int ds1511_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- struct rtc_plat_data *pdata = dev_get_drvdata(dev);
+ struct ds1511_data *ds1511 = dev_get_drvdata(dev);
+ unsigned long flags;
- if (pdata->irq <= 0)
- return -EINVAL;
+ spin_lock_irqsave(&ds1511->lock, flags);
+ rtc_write(bin2bcd(alrm->time.tm_mday) & 0x3f, DS1511_AM4_DATE);
+ rtc_write(bin2bcd(alrm->time.tm_hour) & 0x3f, DS1511_AM3_HOUR);
+ rtc_write(bin2bcd(alrm->time.tm_min) & 0x7f, DS1511_AM2_MIN);
+ rtc_write(bin2bcd(alrm->time.tm_sec) & 0x7f, DS1511_AM1_SEC);
+ ds1511_rtc_alarm_enable(alrm->enabled);
- pdata->alrm_mday = alrm->time.tm_mday;
- pdata->alrm_hour = alrm->time.tm_hour;
- pdata->alrm_min = alrm->time.tm_min;
- pdata->alrm_sec = alrm->time.tm_sec;
- if (alrm->enabled)
- pdata->irqen |= RTC_AF;
+ rtc_read(DS1511_CONTROL_A); /* clear interrupts */
+ spin_unlock_irqrestore(&ds1511->lock, flags);
- ds1511_rtc_update_alarm(pdata);
return 0;
}
-static int
-ds1511_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+static int ds1511_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- struct rtc_plat_data *pdata = dev_get_drvdata(dev);
-
- if (pdata->irq <= 0)
- return -EINVAL;
+ alrm->time.tm_mday = bcd2bin(rtc_read(DS1511_AM4_DATE) & 0x3f);
+ alrm->time.tm_hour = bcd2bin(rtc_read(DS1511_AM3_HOUR) & 0x3f);
+ alrm->time.tm_min = bcd2bin(rtc_read(DS1511_AM2_MIN) & 0x7f);
+ alrm->time.tm_sec = bcd2bin(rtc_read(DS1511_AM1_SEC) & 0x7f);
+ alrm->enabled = !!(rtc_read(DS1511_CONTROL_B) & DS1511_TIE);
- alrm->time.tm_mday = pdata->alrm_mday < 0 ? 0 : pdata->alrm_mday;
- alrm->time.tm_hour = pdata->alrm_hour < 0 ? 0 : pdata->alrm_hour;
- alrm->time.tm_min = pdata->alrm_min < 0 ? 0 : pdata->alrm_min;
- alrm->time.tm_sec = pdata->alrm_sec < 0 ? 0 : pdata->alrm_sec;
- alrm->enabled = (pdata->irqen & RTC_AF) ? 1 : 0;
return 0;
}
-static irqreturn_t
-ds1511_interrupt(int irq, void *dev_id)
+static irqreturn_t ds1511_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct ds1511_data *ds1511 = platform_get_drvdata(pdev);
unsigned long events = 0;
- spin_lock(&pdata->lock);
+ spin_lock(&ds1511->lock);
/*
* read and clear interrupt
*/
- if (rtc_read(RTC_CMD1) & DS1511_IRQF) {
- events = RTC_IRQF;
- if (rtc_read(RTC_ALARM_SEC) & 0x80)
- events |= RTC_UF;
- else
- events |= RTC_AF;
- rtc_update_irq(pdata->rtc, 1, events);
+ if (rtc_read(DS1511_CONTROL_A) & DS1511_IRQF) {
+ events = RTC_IRQF | RTC_AF;
+ rtc_update_irq(ds1511->rtc, 1, events);
}
- spin_unlock(&pdata->lock);
+ spin_unlock(&ds1511->lock);
return events ? IRQ_HANDLED : IRQ_NONE;
}
static int ds1511_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
- struct rtc_plat_data *pdata = dev_get_drvdata(dev);
-
- if (pdata->irq <= 0)
- return -EINVAL;
- if (enabled)
- pdata->irqen |= RTC_AF;
- else
- pdata->irqen &= ~RTC_AF;
- ds1511_rtc_update_alarm(pdata);
+ struct ds1511_data *ds1511 = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ds1511->lock, flags);
+ ds1511_rtc_alarm_enable(enabled);
+ spin_unlock_irqrestore(&ds1511->lock, flags);
+
return 0;
}
@@ -408,7 +271,7 @@ static int ds1511_nvram_write(void *priv, unsigned int pos, void *buf,
static int ds1511_rtc_probe(struct platform_device *pdev)
{
- struct rtc_plat_data *pdata;
+ struct ds1511_data *ds1511;
int ret = 0;
struct nvmem_config ds1511_nvmem_cfg = {
.name = "ds1511_nvram",
@@ -420,21 +283,21 @@ static int ds1511_rtc_probe(struct platform_device *pdev)
.priv = &pdev->dev,
};
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
+ ds1511 = devm_kzalloc(&pdev->dev, sizeof(*ds1511), GFP_KERNEL);
+ if (!ds1511)
return -ENOMEM;
ds1511_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ds1511_base))
return PTR_ERR(ds1511_base);
- pdata->ioaddr = ds1511_base;
- pdata->irq = platform_get_irq(pdev, 0);
+ ds1511->ioaddr = ds1511_base;
+ ds1511->irq = platform_get_irq(pdev, 0);
/*
* turn on the clock and the crystal, etc.
*/
- rtc_write(DS1511_BME, RTC_CMD);
- rtc_write(0, RTC_CMD1);
+ rtc_write(DS1511_BME, DS1511_CONTROL_B);
+ rtc_write(0, DS1511_CONTROL_A);
/*
* clear the wdog counter
*/
@@ -448,38 +311,43 @@ static int ds1511_rtc_probe(struct platform_device *pdev)
/*
* check for a dying bat-tree
*/
- if (rtc_read(RTC_CMD1) & DS1511_BLF1)
+ if (rtc_read(DS1511_CONTROL_A) & DS1511_BLF1)
dev_warn(&pdev->dev, "voltage-low detected.\n");
- spin_lock_init(&pdata->lock);
- platform_set_drvdata(pdev, pdata);
-
- pdata->rtc = devm_rtc_allocate_device(&pdev->dev);
- if (IS_ERR(pdata->rtc))
- return PTR_ERR(pdata->rtc);
-
- pdata->rtc->ops = &ds1511_rtc_ops;
+ spin_lock_init(&ds1511->lock);
+ platform_set_drvdata(pdev, ds1511);
- ret = devm_rtc_register_device(pdata->rtc);
- if (ret)
- return ret;
+ ds1511->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(ds1511->rtc))
+ return PTR_ERR(ds1511->rtc);
- devm_rtc_nvmem_register(pdata->rtc, &ds1511_nvmem_cfg);
+ ds1511->rtc->ops = &ds1511_rtc_ops;
+ ds1511->rtc->range_max = RTC_TIMESTAMP_END_2099;
+ ds1511->rtc->alarm_offset_max = 28 * 24 * 60 * 60 - 1;
/*
* if the platform has an interrupt in mind for this device,
* then by all means, set it
*/
- if (pdata->irq > 0) {
- rtc_read(RTC_CMD1);
- if (devm_request_irq(&pdev->dev, pdata->irq, ds1511_interrupt,
+ if (ds1511->irq > 0) {
+ rtc_read(DS1511_CONTROL_A);
+ if (devm_request_irq(&pdev->dev, ds1511->irq, ds1511_interrupt,
IRQF_SHARED, pdev->name, pdev) < 0) {
dev_warn(&pdev->dev, "interrupt not available.\n");
- pdata->irq = 0;
+ ds1511->irq = 0;
}
}
+ if (ds1511->irq == 0)
+ clear_bit(RTC_FEATURE_ALARM, ds1511->rtc->features);
+
+ ret = devm_rtc_register_device(ds1511->rtc);
+ if (ret)
+ return ret;
+
+ devm_rtc_nvmem_register(ds1511->rtc, &ds1511_nvmem_cfg);
+
return 0;
}
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 866489ad56d67d..0013bff0447d5b 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -909,10 +909,7 @@ static int m41t80_probe(struct i2c_client *client)
if (IS_ERR(m41t80_data->rtc))
return PTR_ERR(m41t80_data->rtc);
-#ifdef CONFIG_OF
- wakeup_source = of_property_read_bool(client->dev.of_node,
- "wakeup-source");
-#endif
+ wakeup_source = device_property_read_bool(&client->dev, "wakeup-source");
if (client->irq > 0) {
unsigned long irqflags = IRQF_TRIGGER_LOW;
diff --git a/drivers/rtc/rtc-max31335.c b/drivers/rtc/rtc-max31335.c
index 402fda8fd54884..a2441e5c2c74d6 100644
--- a/drivers/rtc/rtc-max31335.c
+++ b/drivers/rtc/rtc-max31335.c
@@ -204,7 +204,7 @@ static bool max31335_volatile_reg(struct device *dev, unsigned int reg)
return true;
/* interrupt status register */
- if (reg == MAX31335_INT_EN1_A1IE)
+ if (reg == MAX31335_STATUS1)
return true;
/* temperature registers */
diff --git a/drivers/rtc/rtc-nct3018y.c b/drivers/rtc/rtc-nct3018y.c
index f488a189a4651f..076d8b99f91314 100644
--- a/drivers/rtc/rtc-nct3018y.c
+++ b/drivers/rtc/rtc-nct3018y.c
@@ -102,6 +102,8 @@ static int nct3018y_get_alarm_mode(struct i2c_client *client, unsigned char *ala
if (flags < 0)
return flags;
*alarm_enable = flags & NCT3018Y_BIT_AIE;
+ dev_dbg(&client->dev, "%s:alarm_enable:%x\n", __func__, *alarm_enable);
+
}
if (alarm_flag) {
@@ -110,11 +112,9 @@ static int nct3018y_get_alarm_mode(struct i2c_client *client, unsigned char *ala
if (flags < 0)
return flags;
*alarm_flag = flags & NCT3018Y_BIT_AF;
+ dev_dbg(&client->dev, "%s:alarm_flag:%x\n", __func__, *alarm_flag);
}
- dev_dbg(&client->dev, "%s:alarm_enable:%x alarm_flag:%x\n",
- __func__, *alarm_enable, *alarm_flag);
-
return 0;
}
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index d1efde3e7a8094..98b77f790b0c5a 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -370,6 +370,30 @@ static int pcf8523_rtc_set_offset(struct device *dev, long offset)
return regmap_write(pcf8523->regmap, PCF8523_REG_OFFSET, value);
}
+#ifdef CONFIG_PM_SLEEP
+static int pcf8523_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (client->irq > 0 && device_may_wakeup(dev))
+ enable_irq_wake(client->irq);
+
+ return 0;
+}
+
+static int pcf8523_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (client->irq > 0 && device_may_wakeup(dev))
+ disable_irq_wake(client->irq);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(pcf8523_pm, pcf8523_suspend, pcf8523_resume);
+
static const struct rtc_class_ops pcf8523_rtc_ops = {
.read_time = pcf8523_rtc_read_time,
.set_time = pcf8523_rtc_set_time,
@@ -487,6 +511,7 @@ static struct i2c_driver pcf8523_driver = {
.driver = {
.name = "rtc-pcf8523",
.of_match_table = pcf8523_of_match,
+ .pm = &pcf8523_pm,
},
.probe = pcf8523_probe,
.id_table = pcf8523_id,
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 8b40f75fc9d7c6..634f2f501c6c6a 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -241,6 +241,11 @@ config SCSI_SCAN_ASYNC
Note that this setting also affects whether resuming from
system suspend will be performed asynchronously.
+config SCSI_PROTO_TEST
+ tristate "scsi_proto.h unit tests" if !KUNIT_ALL_TESTS
+ depends on SCSI && KUNIT
+ default KUNIT_ALL_TESTS
+
menu "SCSI Transports"
depends on SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index f055bfd54a6832..1313ddf2fd1a1e 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -24,6 +24,8 @@ obj-$(CONFIG_SCSI_COMMON) += scsi_common.o
obj-$(CONFIG_RAID_ATTRS) += raid_class.o
+obj-$(CONFIG_SCSI_PROTO_TEST) += scsi_proto_test.o
+
# --- NOTE ORDERING HERE ---
# For kernel non-modular link, transport attributes need to
# be initialised before drivers
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 8cad9792a56275..3e0c0381277acd 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -517,6 +517,8 @@ void scsi_attach_vpd(struct scsi_device *sdev)
scsi_update_vpd_page(sdev, 0xb1, &sdev->vpd_pgb1);
if (vpd_buf->data[i] == 0xb2)
scsi_update_vpd_page(sdev, 0xb2, &sdev->vpd_pgb2);
+ if (vpd_buf->data[i] == 0xb7)
+ scsi_update_vpd_page(sdev, 0xb7, &sdev->vpd_pgb7);
}
kfree(vpd_buf);
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 914d9c12e7412d..acf0592d63dae4 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -43,6 +43,7 @@
#include <linux/prefetch.h>
#include <linux/debugfs.h>
#include <linux/async.h>
+#include <linux/cleanup.h>
#include <net/checksum.h>
@@ -532,6 +533,8 @@ static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_get_stream_status(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip);
static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
@@ -606,6 +609,9 @@ static const struct opcode_info_t sa_in_16_iarr[] = {
{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
{16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
+ {0, 0x9e, 0x16, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL,
+ {16, 0x16, 0, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
+ 0, 0} }, /* GET STREAM STATUS */
};
static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
@@ -896,6 +902,8 @@ static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
static int poll_queues; /* iouring iopoll interface.*/
+static atomic_long_t writes_by_group_number[64];
+
static char sdebug_proc_name[] = MY_NAME;
static const char *my_name = MY_NAME;
@@ -1867,6 +1875,19 @@ static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
return 0x3c;
}
+#define SDEBUG_BLE_LEN_AFTER_B4 28 /* thus vpage 32 bytes long */
+
+enum { MAXIMUM_NUMBER_OF_STREAMS = 6, PERMANENT_STREAM_COUNT = 5 };
+
+/* Block limits extension VPD page (SBC-4) */
+static int inquiry_vpd_b7(unsigned char *arrb4)
+{
+ memset(arrb4, 0, SDEBUG_BLE_LEN_AFTER_B4);
+ arrb4[1] = 1; /* Reduced stream control support (RSCS) */
+ put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS, &arrb4[2]);
+ return SDEBUG_BLE_LEN_AFTER_B4;
+}
+
#define SDEBUG_LONG_INQ_SZ 96
#define SDEBUG_MAX_INQ_ARR_SZ 584
@@ -1903,7 +1924,8 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
u32 len;
char lu_id_str[6];
int host_no = devip->sdbg_host->shost->host_no;
-
+
+ arr[1] = cmd[2];
port_group_id = (((host_no + 1) & 0x7f) << 8) +
(devip->channel & 0x7f);
if (sdebug_vpd_use_hostno == 0)
@@ -1914,7 +1936,6 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
(devip->target * 1000) - 3;
len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
if (0 == cmd[2]) { /* supported vital product data pages */
- arr[1] = cmd[2]; /*sanity */
n = 4;
arr[n++] = 0x0; /* this page */
arr[n++] = 0x80; /* unit serial number */
@@ -1932,26 +1953,22 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
arr[n++] = 0xb2; /* LB Provisioning */
if (is_zbc)
arr[n++] = 0xb6; /* ZB dev. char. */
+ arr[n++] = 0xb7; /* Block limits extension */
}
arr[3] = n - 4; /* number of supported VPD pages */
} else if (0x80 == cmd[2]) { /* unit serial number */
- arr[1] = cmd[2]; /*sanity */
arr[3] = len;
memcpy(&arr[4], lu_id_str, len);
} else if (0x83 == cmd[2]) { /* device identification */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
target_dev_id, lu_id_num,
lu_id_str, len,
&devip->lu_name);
} else if (0x84 == cmd[2]) { /* Software interface ident. */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_84(&arr[4]);
} else if (0x85 == cmd[2]) { /* Management network addresses */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_85(&arr[4]);
} else if (0x86 == cmd[2]) { /* extended inquiry */
- arr[1] = cmd[2]; /*sanity */
arr[3] = 0x3c; /* number of following entries */
if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
arr[4] = 0x4; /* SPT: GRD_CHK:1 */
@@ -1959,33 +1976,32 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
else
arr[4] = 0x0; /* no protection stuff */
- arr[5] = 0x7; /* head of q, ordered + simple q's */
+ /*
+ * GROUP_SUP=1; HEADSUP=1 (HEAD OF QUEUE); ORDSUP=1
+ * (ORDERED queuing); SIMPSUP=1 (SIMPLE queuing).
+ */
+ arr[5] = 0x17;
} else if (0x87 == cmd[2]) { /* mode page policy */
- arr[1] = cmd[2]; /*sanity */
arr[3] = 0x8; /* number of following entries */
arr[4] = 0x2; /* disconnect-reconnect mp */
arr[6] = 0x80; /* mlus, shared */
arr[8] = 0x18; /* protocol specific lu */
arr[10] = 0x82; /* mlus, per initiator port */
} else if (0x88 == cmd[2]) { /* SCSI Ports */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
- arr[1] = cmd[2]; /*sanity */
n = inquiry_vpd_89(&arr[4]);
put_unaligned_be16(n, arr + 2);
} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_b0(&arr[4]);
} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_b1(devip, &arr[4]);
} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_b2(&arr[4]);
} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_b6(devip, &arr[4]);
+ } else if (cmd[2] == 0xb7) { /* block limits extension page */
+ arr[3] = inquiry_vpd_b7(&arr[4]);
} else {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
kfree(arr);
@@ -2554,6 +2570,40 @@ static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
return sizeof(ctrl_m_pg);
}
+/* IO Advice Hints Grouping mode page */
+static int resp_grouping_m_pg(unsigned char *p, int pcontrol, int target)
+{
+ /* IO Advice Hints Grouping mode page */
+ struct grouping_m_pg {
+ u8 page_code; /* OR 0x40 when subpage_code > 0 */
+ u8 subpage_code;
+ __be16 page_length;
+ u8 reserved[12];
+ struct scsi_io_group_descriptor descr[MAXIMUM_NUMBER_OF_STREAMS];
+ };
+ static const struct grouping_m_pg gr_m_pg = {
+ .page_code = 0xa | 0x40,
+ .subpage_code = 5,
+ .page_length = cpu_to_be16(sizeof(gr_m_pg) - 4),
+ .descr = {
+ { .st_enble = 1 },
+ { .st_enble = 1 },
+ { .st_enble = 1 },
+ { .st_enble = 1 },
+ { .st_enble = 1 },
+ { .st_enble = 0 },
+ }
+ };
+
+ BUILD_BUG_ON(sizeof(struct grouping_m_pg) !=
+ 16 + MAXIMUM_NUMBER_OF_STREAMS * 16);
+ memcpy(p, &gr_m_pg, sizeof(gr_m_pg));
+ if (1 == pcontrol) {
+ /* There are no changeable values so clear from byte 4 on. */
+ memset(p + 4, 0, sizeof(gr_m_pg) - 4);
+ }
+ return sizeof(gr_m_pg);
+}
static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
{ /* Informational Exceptions control mode page for mode_sense */
@@ -2627,7 +2677,8 @@ static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
return sizeof(sas_sha_m_pg);
}
-#define SDEBUG_MAX_MSENSE_SZ 256
+/* PAGE_SIZE is more than necessary but provides room for future expansion. */
+#define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE
static int resp_mode_sense(struct scsi_cmnd *scp,
struct sdebug_dev_info *devip)
@@ -2638,10 +2689,13 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
int target_dev_id;
int target = scp->device->id;
unsigned char *ap;
- unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
+ unsigned char *arr __free(kfree);
unsigned char *cmd = scp->cmnd;
- bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
+ bool dbd, llbaa, msense_6, is_disk, is_zbc;
+ arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
+ if (!arr)
+ return -ENOMEM;
dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
pcontrol = (cmd[2] & 0xc0) >> 6;
pcode = cmd[2] & 0x3f;
@@ -2699,45 +2753,63 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
ap = arr + offset;
}
- if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
- /* TODO: Control Extension page */
- mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
- return check_condition_result;
- }
- bad_pcode = false;
-
+ /*
+ * N.B. If len>0 before resp_*_pg() call, then form of that call should be:
+ * len += resp_*_pg(ap + len, pcontrol, target);
+ */
switch (pcode) {
case 0x1: /* Read-Write error recovery page, direct access */
+ if (subpcode > 0x0 && subpcode < 0xff)
+ goto bad_subpcode;
len = resp_err_recov_pg(ap, pcontrol, target);
offset += len;
break;
case 0x2: /* Disconnect-Reconnect page, all devices */
+ if (subpcode > 0x0 && subpcode < 0xff)
+ goto bad_subpcode;
len = resp_disconnect_pg(ap, pcontrol, target);
offset += len;
break;
case 0x3: /* Format device page, direct access */
+ if (subpcode > 0x0 && subpcode < 0xff)
+ goto bad_subpcode;
if (is_disk) {
len = resp_format_pg(ap, pcontrol, target);
offset += len;
- } else
- bad_pcode = true;
+ } else {
+ goto bad_pcode;
+ }
break;
case 0x8: /* Caching page, direct access */
+ if (subpcode > 0x0 && subpcode < 0xff)
+ goto bad_subpcode;
if (is_disk || is_zbc) {
len = resp_caching_pg(ap, pcontrol, target);
offset += len;
- } else
- bad_pcode = true;
+ } else {
+ goto bad_pcode;
+ }
break;
case 0xa: /* Control Mode page, all devices */
- len = resp_ctrl_m_pg(ap, pcontrol, target);
+ switch (subpcode) {
+ case 0:
+ len = resp_ctrl_m_pg(ap, pcontrol, target);
+ break;
+ case 0x05:
+ len = resp_grouping_m_pg(ap, pcontrol, target);
+ break;
+ case 0xff:
+ len = resp_ctrl_m_pg(ap, pcontrol, target);
+ len += resp_grouping_m_pg(ap + len, pcontrol, target);
+ break;
+ default:
+ goto bad_subpcode;
+ }
offset += len;
break;
case 0x19: /* if spc==1 then sas phy, control+discover */
- if ((subpcode > 0x2) && (subpcode < 0xff)) {
- mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
- return check_condition_result;
- }
+ if (subpcode > 0x2 && subpcode < 0xff)
+ goto bad_subpcode;
len = 0;
if ((0x0 == subpcode) || (0xff == subpcode))
len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
@@ -2749,49 +2821,50 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
offset += len;
break;
case 0x1c: /* Informational Exceptions Mode page, all devices */
+ if (subpcode > 0x0 && subpcode < 0xff)
+ goto bad_subpcode;
len = resp_iec_m_pg(ap, pcontrol, target);
offset += len;
break;
case 0x3f: /* Read all Mode pages */
- if ((0 == subpcode) || (0xff == subpcode)) {
- len = resp_err_recov_pg(ap, pcontrol, target);
- len += resp_disconnect_pg(ap + len, pcontrol, target);
- if (is_disk) {
- len += resp_format_pg(ap + len, pcontrol,
- target);
- len += resp_caching_pg(ap + len, pcontrol,
- target);
- } else if (is_zbc) {
- len += resp_caching_pg(ap + len, pcontrol,
- target);
- }
- len += resp_ctrl_m_pg(ap + len, pcontrol, target);
- len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
- if (0xff == subpcode) {
- len += resp_sas_pcd_m_spg(ap + len, pcontrol,
- target, target_dev_id);
- len += resp_sas_sha_m_spg(ap + len, pcontrol);
- }
- len += resp_iec_m_pg(ap + len, pcontrol, target);
- offset += len;
- } else {
- mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
- return check_condition_result;
+ if (subpcode > 0x0 && subpcode < 0xff)
+ goto bad_subpcode;
+ len = resp_err_recov_pg(ap, pcontrol, target);
+ len += resp_disconnect_pg(ap + len, pcontrol, target);
+ if (is_disk) {
+ len += resp_format_pg(ap + len, pcontrol, target);
+ len += resp_caching_pg(ap + len, pcontrol, target);
+ } else if (is_zbc) {
+ len += resp_caching_pg(ap + len, pcontrol, target);
}
+ len += resp_ctrl_m_pg(ap + len, pcontrol, target);
+ if (0xff == subpcode)
+ len += resp_grouping_m_pg(ap + len, pcontrol, target);
+ len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
+ if (0xff == subpcode) {
+ len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
+ target_dev_id);
+ len += resp_sas_sha_m_spg(ap + len, pcontrol);
+ }
+ len += resp_iec_m_pg(ap + len, pcontrol, target);
+ offset += len;
break;
default:
- bad_pcode = true;
- break;
- }
- if (bad_pcode) {
- mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
- return check_condition_result;
+ goto bad_pcode;
}
if (msense_6)
arr[0] = offset - 1;
else
put_unaligned_be16((offset - 2), arr + 0);
return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
+
+bad_pcode:
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
+ return check_condition_result;
+
+bad_subpcode:
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
+ return check_condition_result;
}
#define SDEBUG_MAX_MSELECT_SZ 512
@@ -3306,7 +3379,8 @@ static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
/* Returns number of bytes copied or -1 if error. */
static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
- u32 sg_skip, u64 lba, u32 num, bool do_write)
+ u32 sg_skip, u64 lba, u32 num, bool do_write,
+ u8 group_number)
{
int ret;
u64 block, rest = 0;
@@ -3325,6 +3399,10 @@ static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
return 0;
if (scp->sc_data_direction != dir)
return -1;
+
+ if (do_write && group_number < ARRAY_SIZE(writes_by_group_number))
+ atomic_long_inc(&writes_by_group_number[group_number]);
+
fsp = sip->storep;
block = do_div(lba, sdebug_store_sectors);
@@ -3698,7 +3776,7 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
}
}
- ret = do_device_access(sip, scp, 0, lba, num, false);
+ ret = do_device_access(sip, scp, 0, lba, num, false, 0);
sdeb_read_unlock(sip);
if (unlikely(ret == -1))
return DID_ERROR << 16;
@@ -3883,6 +3961,7 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
bool check_prot;
u32 num;
+ u8 group = 0;
u32 ei_lba;
int ret;
u64 lba;
@@ -3894,11 +3973,13 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
ei_lba = 0;
lba = get_unaligned_be64(cmd + 2);
num = get_unaligned_be32(cmd + 10);
+ group = cmd[14] & 0x3f;
check_prot = true;
break;
case WRITE_10:
ei_lba = 0;
lba = get_unaligned_be32(cmd + 2);
+ group = cmd[6] & 0x3f;
num = get_unaligned_be16(cmd + 7);
check_prot = true;
break;
@@ -3913,15 +3994,18 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
ei_lba = 0;
lba = get_unaligned_be32(cmd + 2);
num = get_unaligned_be32(cmd + 6);
+ group = cmd[6] & 0x3f;
check_prot = true;
break;
case 0x53: /* XDWRITEREAD(10) */
ei_lba = 0;
lba = get_unaligned_be32(cmd + 2);
+ group = cmd[6] & 0x1f;
num = get_unaligned_be16(cmd + 7);
check_prot = false;
break;
default: /* assume WRITE(32) */
+ group = cmd[6] & 0x3f;
lba = get_unaligned_be64(cmd + 12);
ei_lba = get_unaligned_be32(cmd + 20);
num = get_unaligned_be32(cmd + 28);
@@ -3976,7 +4060,7 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
}
}
- ret = do_device_access(sip, scp, 0, lba, num, true);
+ ret = do_device_access(sip, scp, 0, lba, num, true, group);
if (unlikely(scsi_debug_lbp()))
map_region(sip, lba, num);
/* If ZBC zone then bump its write pointer */
@@ -4028,12 +4112,14 @@ static int resp_write_scat(struct scsi_cmnd *scp,
u32 lb_size = sdebug_sector_size;
u32 ei_lba;
u64 lba;
+ u8 group;
int ret, res;
bool is_16;
static const u32 lrd_size = 32; /* + parameter list header size */
if (cmd[0] == VARIABLE_LENGTH_CMD) {
is_16 = false;
+ group = cmd[6] & 0x3f;
wrprotect = (cmd[10] >> 5) & 0x7;
lbdof = get_unaligned_be16(cmd + 12);
num_lrd = get_unaligned_be16(cmd + 16);
@@ -4044,6 +4130,7 @@ static int resp_write_scat(struct scsi_cmnd *scp,
lbdof = get_unaligned_be16(cmd + 4);
num_lrd = get_unaligned_be16(cmd + 8);
bt_len = get_unaligned_be32(cmd + 10);
+ group = cmd[14] & 0x3f;
if (unlikely(have_dif_prot)) {
if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
wrprotect) {
@@ -4132,7 +4219,7 @@ static int resp_write_scat(struct scsi_cmnd *scp,
}
}
- ret = do_device_access(sip, scp, sg_off, lba, num, true);
+ ret = do_device_access(sip, scp, sg_off, lba, num, true, group);
/* If ZBC zone then bump its write pointer */
if (sdebug_dev_is_zoned(devip))
zbc_inc_wp(devip, lba, num);
@@ -4507,6 +4594,51 @@ static int resp_get_lba_status(struct scsi_cmnd *scp,
return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
}
+static int resp_get_stream_status(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ u16 starting_stream_id, stream_id;
+ const u8 *cmd = scp->cmnd;
+ u32 alloc_len, offset;
+ u8 arr[256] = {};
+ struct scsi_stream_status_header *h = (void *)arr;
+
+ starting_stream_id = get_unaligned_be16(cmd + 4);
+ alloc_len = get_unaligned_be32(cmd + 10);
+
+ if (alloc_len < 8) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
+ return check_condition_result;
+ }
+
+ if (starting_stream_id >= MAXIMUM_NUMBER_OF_STREAMS) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
+ return check_condition_result;
+ }
+
+ /*
+ * The GET STREAM STATUS command only reports status information
+ * about open streams. Treat the non-permanent stream as open.
+ */
+ put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS,
+ &h->number_of_open_streams);
+
+ for (offset = 8, stream_id = starting_stream_id;
+ offset + 8 <= min_t(u32, alloc_len, sizeof(arr)) &&
+ stream_id < MAXIMUM_NUMBER_OF_STREAMS;
+ offset += 8, stream_id++) {
+ struct scsi_stream_status *stream_status = (void *)arr + offset;
+
+ stream_status->perm = stream_id < PERMANENT_STREAM_COUNT;
+ put_unaligned_be16(stream_id,
+ &stream_status->stream_identifier);
+ stream_status->rel_lifetime = stream_id + 1;
+ }
+ put_unaligned_be32(offset - 8, &h->len); /* PARAMETER DATA LENGTH */
+
+ return fill_from_dev_buffer(scp, arr, min(offset, alloc_len));
+}
+
static int resp_sync_cache(struct scsi_cmnd *scp,
struct sdebug_dev_info *devip)
{
@@ -7182,6 +7314,30 @@ static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
}
static DRIVER_ATTR_RO(tur_ms_to_ready);
+static ssize_t group_number_stats_show(struct device_driver *ddp, char *buf)
+{
+ char *p = buf, *end = buf + PAGE_SIZE;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
+ p += scnprintf(p, end - p, "%d %ld\n", i,
+ atomic_long_read(&writes_by_group_number[i]));
+
+ return p - buf;
+}
+
+static ssize_t group_number_stats_store(struct device_driver *ddp,
+ const char *buf, size_t count)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
+ atomic_long_set(&writes_by_group_number[i], 0);
+
+ return count;
+}
+static DRIVER_ATTR_RW(group_number_stats);
+
/* Note: The following array creates attribute files in the
/sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
files (over those found in the /sys/module/scsi_debug/parameters
@@ -7228,6 +7384,7 @@ static struct attribute *sdebug_drv_attrs[] = {
&driver_attr_cdb_len.attr,
&driver_attr_tur_ms_to_ready.attr,
&driver_attr_zbc.attr,
+ &driver_attr_group_number_stats.attr,
NULL,
};
ATTRIBUTE_GROUPS(sdebug_drv);
diff --git a/drivers/scsi/scsi_proto_test.c b/drivers/scsi/scsi_proto_test.c
new file mode 100644
index 00000000000000..7fa0a78a2ad168
--- /dev/null
+++ b/drivers/scsi/scsi_proto_test.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Google LLC
+ */
+#include <kunit/test.h>
+#include <asm-generic/unaligned.h>
+#include <scsi/scsi_proto.h>
+
+static void test_scsi_proto(struct kunit *test)
+{
+ static const union {
+ struct scsi_io_group_descriptor desc;
+ u8 arr[sizeof(struct scsi_io_group_descriptor)];
+ } d = { .arr = { 0x45, 0, 0, 0, 0xb0, 0xe4, 0xe3 } };
+ KUNIT_EXPECT_EQ(test, d.desc.io_advice_hints_mode + 0, 1);
+ KUNIT_EXPECT_EQ(test, d.desc.st_enble + 0, 1);
+ KUNIT_EXPECT_EQ(test, d.desc.cs_enble + 0, 0);
+ KUNIT_EXPECT_EQ(test, d.desc.ic_enable + 0, 1);
+ KUNIT_EXPECT_EQ(test, d.desc.acdlu + 0, 1);
+ KUNIT_EXPECT_EQ(test, d.desc.rlbsr + 0, 3);
+ KUNIT_EXPECT_EQ(test, d.desc.lbm_descriptor_type + 0, 0);
+ KUNIT_EXPECT_EQ(test, d.desc.params[0] + 0, 0xe4);
+ KUNIT_EXPECT_EQ(test, d.desc.params[1] + 0, 0xe3);
+
+ static const union {
+ struct scsi_stream_status s;
+ u8 arr[sizeof(struct scsi_stream_status)];
+ } ss = { .arr = { 0x80, 0, 0x12, 0x34, 0x3f } };
+ KUNIT_EXPECT_EQ(test, ss.s.perm + 0, 1);
+ KUNIT_EXPECT_EQ(test, get_unaligned_be16(&ss.s.stream_identifier),
+ 0x1234);
+ KUNIT_EXPECT_EQ(test, ss.s.rel_lifetime + 0, 0x3f);
+
+ static const union {
+ struct scsi_stream_status_header h;
+ u8 arr[sizeof(struct scsi_stream_status_header)];
+ } sh = { .arr = { 1, 2, 3, 4, 0, 0, 5, 6 } };
+ KUNIT_EXPECT_EQ(test, get_unaligned_be32(&sh.h.len), 0x1020304);
+ KUNIT_EXPECT_EQ(test, get_unaligned_be16(&sh.h.number_of_open_streams),
+ 0x506);
+}
+
+static struct kunit_case scsi_proto_test_cases[] = {
+ KUNIT_CASE(test_scsi_proto),
+ {}
+};
+
+static struct kunit_suite scsi_proto_test_suite = {
+ .name = "scsi_proto",
+ .test_cases = scsi_proto_test_cases,
+};
+kunit_test_suite(scsi_proto_test_suite);
+
+MODULE_DESCRIPTION("<scsi/scsi_proto.h> unit tests");
+MODULE_AUTHOR("Bart Van Assche");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 49dd34426d5e06..775df00021e4d5 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -449,6 +449,7 @@ static void scsi_device_dev_release(struct device *dev)
struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL;
struct scsi_vpd *vpd_pgb0 = NULL, *vpd_pgb1 = NULL, *vpd_pgb2 = NULL;
+ struct scsi_vpd *vpd_pgb7 = NULL;
unsigned long flags;
might_sleep();
@@ -494,6 +495,8 @@ static void scsi_device_dev_release(struct device *dev)
lockdep_is_held(&sdev->inquiry_mutex));
vpd_pgb2 = rcu_replace_pointer(sdev->vpd_pgb2, vpd_pgb2,
lockdep_is_held(&sdev->inquiry_mutex));
+ vpd_pgb7 = rcu_replace_pointer(sdev->vpd_pgb7, vpd_pgb7,
+ lockdep_is_held(&sdev->inquiry_mutex));
mutex_unlock(&sdev->inquiry_mutex);
if (vpd_pg0)
@@ -510,6 +513,8 @@ static void scsi_device_dev_release(struct device *dev)
kfree_rcu(vpd_pgb1, rcu);
if (vpd_pgb2)
kfree_rcu(vpd_pgb2, rcu);
+ if (vpd_pgb7)
+ kfree_rcu(vpd_pgb7, rcu);
kfree(sdev->inquiry);
kfree(sdev);
@@ -921,6 +926,7 @@ sdev_vpd_pg_attr(pg89);
sdev_vpd_pg_attr(pgb0);
sdev_vpd_pg_attr(pgb1);
sdev_vpd_pg_attr(pgb2);
+sdev_vpd_pg_attr(pgb7);
sdev_vpd_pg_attr(pg0);
static ssize_t show_inquiry(struct file *filep, struct kobject *kobj,
@@ -1295,6 +1301,9 @@ static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj,
if (attr == &dev_attr_vpd_pgb2 && !sdev->vpd_pgb2)
return 0;
+ if (attr == &dev_attr_vpd_pgb7 && !sdev->vpd_pgb7)
+ return 0;
+
return S_IRUGO;
}
@@ -1347,6 +1356,7 @@ static struct bin_attribute *scsi_sdev_bin_attrs[] = {
&dev_attr_vpd_pgb0,
&dev_attr_vpd_pgb1,
&dev_attr_vpd_pgb2,
+ &dev_attr_vpd_pgb7,
&dev_attr_inquiry,
NULL
};
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 2cc73c650ca60a..ccff8f2e2e75bd 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -47,6 +47,7 @@
#include <linux/blkpg.h>
#include <linux/blk-pm.h>
#include <linux/delay.h>
+#include <linux/rw_hint.h>
#include <linux/major.h>
#include <linux/mutex.h>
#include <linux/string_helpers.h>
@@ -1080,12 +1081,38 @@ static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
return BLK_STS_OK;
}
+/**
+ * sd_group_number() - Compute the GROUP NUMBER field
+ * @cmd: SCSI command for which to compute the value of the six-bit GROUP NUMBER
+ * field.
+ *
+ * From SBC-5 r05 (https://www.t10.org/cgi-bin/ac.pl?t=f&f=sbc5r05.pdf):
+ * 0: no relative lifetime.
+ * 1: shortest relative lifetime.
+ * 2: second shortest relative lifetime.
+ * 3 - 0x3d: intermediate relative lifetimes.
+ * 0x3e: second longest relative lifetime.
+ * 0x3f: longest relative lifetime.
+ */
+static u8 sd_group_number(struct scsi_cmnd *cmd)
+{
+ const struct request *rq = scsi_cmd_to_rq(cmd);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
+
+ if (!sdkp->rscs)
+ return 0;
+
+ return min3((u32)rq->write_hint, (u32)sdkp->permanent_stream_count,
+ 0x3fu);
+}
+
static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write,
sector_t lba, unsigned int nr_blocks,
unsigned char flags, unsigned int dld)
{
cmd->cmd_len = SD_EXT_CDB_SIZE;
cmd->cmnd[0] = VARIABLE_LENGTH_CMD;
+ cmd->cmnd[6] = sd_group_number(cmd);
cmd->cmnd[7] = 0x18; /* Additional CDB len */
cmd->cmnd[9] = write ? WRITE_32 : READ_32;
cmd->cmnd[10] = flags;
@@ -1104,7 +1131,7 @@ static blk_status_t sd_setup_rw16_cmnd(struct scsi_cmnd *cmd, bool write,
cmd->cmd_len = 16;
cmd->cmnd[0] = write ? WRITE_16 : READ_16;
cmd->cmnd[1] = flags | ((dld >> 2) & 0x01);
- cmd->cmnd[14] = (dld & 0x03) << 6;
+ cmd->cmnd[14] = ((dld & 0x03) << 6) | sd_group_number(cmd);
cmd->cmnd[15] = 0;
put_unaligned_be64(lba, &cmd->cmnd[2]);
put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
@@ -1119,7 +1146,7 @@ static blk_status_t sd_setup_rw10_cmnd(struct scsi_cmnd *cmd, bool write,
cmd->cmd_len = 10;
cmd->cmnd[0] = write ? WRITE_10 : READ_10;
cmd->cmnd[1] = flags;
- cmd->cmnd[6] = 0;
+ cmd->cmnd[6] = sd_group_number(cmd);
cmd->cmnd[9] = 0;
put_unaligned_be32(lba, &cmd->cmnd[2]);
put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
@@ -1256,7 +1283,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks,
protect | fua, dld);
} else if ((nr_blocks > 0xff) || (lba > 0x1fffff) ||
- sdp->use_10_for_rw || protect) {
+ sdp->use_10_for_rw || protect || rq->write_hint) {
ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks,
protect | fua);
} else {
@@ -3059,6 +3086,70 @@ defaults:
sdkp->DPOFUA = 0;
}
+static bool sd_is_perm_stream(struct scsi_disk *sdkp, unsigned int stream_id)
+{
+ u8 cdb[16] = { SERVICE_ACTION_IN_16, SAI_GET_STREAM_STATUS };
+ struct {
+ struct scsi_stream_status_header h;
+ struct scsi_stream_status s;
+ } buf;
+ struct scsi_device *sdev = sdkp->device;
+ struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
+ int res;
+
+ put_unaligned_be16(stream_id, &cdb[4]);
+ put_unaligned_be32(sizeof(buf), &cdb[10]);
+
+ res = scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, &buf, sizeof(buf),
+ SD_TIMEOUT, sdkp->max_retries, &exec_args);
+ if (res < 0)
+ return false;
+ if (scsi_status_is_check_condition(res) && scsi_sense_valid(&sshdr))
+ sd_print_sense_hdr(sdkp, &sshdr);
+ if (res)
+ return false;
+ if (get_unaligned_be32(&buf.h.len) < sizeof(struct scsi_stream_status))
+ return false;
+ return buf.h.stream_status[0].perm;
+}
+
+static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer)
+{
+ struct scsi_device *sdp = sdkp->device;
+ const struct scsi_io_group_descriptor *desc, *start, *end;
+ struct scsi_sense_hdr sshdr;
+ struct scsi_mode_data data;
+ int res;
+
+ res = scsi_mode_sense(sdp, /*dbd=*/0x8, /*modepage=*/0x0a,
+ /*subpage=*/0x05, buffer, SD_BUF_SIZE, SD_TIMEOUT,
+ sdkp->max_retries, &data, &sshdr);
+ if (res < 0)
+ return;
+ start = (void *)buffer + data.header_length + 16;
+ end = (void *)buffer + ALIGN_DOWN(data.header_length + data.length,
+ sizeof(*end));
+ /*
+ * From "SBC-5 Constrained Streams with Data Lifetimes": Device severs
+ * should assign the lowest numbered stream identifiers to permanent
+ * streams.
+ */
+ for (desc = start; desc < end; desc++)
+ if (!desc->st_enble || !sd_is_perm_stream(sdkp, desc - start))
+ break;
+ sdkp->permanent_stream_count = desc - start;
+ if (sdkp->rscs && sdkp->permanent_stream_count < 2)
+ sd_printk(KERN_INFO, sdkp,
+ "Unexpected: RSCS has been set and the permanent stream count is %u\n",
+ sdkp->permanent_stream_count);
+ else if (sdkp->permanent_stream_count)
+ sd_printk(KERN_INFO, sdkp, "permanent stream count = %d\n",
+ sdkp->permanent_stream_count);
+}
+
/*
* The ATO bit indicates whether the DIF application tag is available
* for use by the operating system.
@@ -3166,6 +3257,18 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
rcu_read_unlock();
}
+/* Parse the Block Limits Extension VPD page (0xb7) */
+static void sd_read_block_limits_ext(struct scsi_disk *sdkp)
+{
+ struct scsi_vpd *vpd;
+
+ rcu_read_lock();
+ vpd = rcu_dereference(sdkp->device->vpd_pgb7);
+ if (vpd && vpd->len >= 2)
+ sdkp->rscs = vpd->data[5] & 1;
+ rcu_read_unlock();
+}
+
/**
* sd_read_block_characteristics - Query block dev. characteristics
* @sdkp: disk to query
@@ -3541,6 +3644,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
if (scsi_device_supports_vpd(sdp)) {
sd_read_block_provisioning(sdkp);
sd_read_block_limits(sdkp);
+ sd_read_block_limits_ext(sdkp);
sd_read_block_characteristics(sdkp);
sd_zbc_read_zones(sdkp, buffer);
sd_read_cpr(sdkp);
@@ -3550,6 +3654,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_write_protect_flag(sdkp, buffer);
sd_read_cache_type(sdkp, buffer);
+ sd_read_io_hints(sdkp, buffer);
sd_read_app_tag_own(sdkp, buffer);
sd_read_write_same(sdkp, buffer);
sd_read_security(sdkp, buffer);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 409dda5350d10f..5c4285a582b220 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -125,6 +125,8 @@ struct scsi_disk {
unsigned int physical_block_size;
unsigned int max_medium_access_timeouts;
unsigned int medium_access_timed_out;
+ /* number of permanent streams */
+ u16 permanent_stream_count;
u8 media_present;
u8 write_prot;
u8 protection_type;/* Data Integrity Field */
@@ -151,6 +153,7 @@ struct scsi_disk {
unsigned urswrz : 1;
unsigned security : 1;
unsigned ignore_medium_access_errors : 1;
+ unsigned rscs : 1; /* reduced stream control support */
};
#define to_scsi_disk(obj) container_of(obj, struct scsi_disk, disk_dev)
diff --git a/drivers/siox/siox-bus-gpio.c b/drivers/siox/siox-bus-gpio.c
index aeefeb7255247d..9e01642e72de31 100644
--- a/drivers/siox/siox-bus-gpio.c
+++ b/drivers/siox/siox-bus-gpio.c
@@ -91,63 +91,42 @@ static int siox_gpio_probe(struct platform_device *pdev)
int ret;
struct siox_master *smaster;
- smaster = siox_master_alloc(&pdev->dev, sizeof(*ddata));
- if (!smaster) {
- dev_err(dev, "failed to allocate siox master\n");
- return -ENOMEM;
- }
+ smaster = devm_siox_master_alloc(dev, sizeof(*ddata));
+ if (!smaster)
+ return dev_err_probe(dev, -ENOMEM,
+ "failed to allocate siox master\n");
platform_set_drvdata(pdev, smaster);
ddata = siox_master_get_devdata(smaster);
ddata->din = devm_gpiod_get(dev, "din", GPIOD_IN);
- if (IS_ERR(ddata->din)) {
- ret = dev_err_probe(dev, PTR_ERR(ddata->din),
- "Failed to get din GPIO\n");
- goto err;
- }
+ if (IS_ERR(ddata->din))
+ return dev_err_probe(dev, PTR_ERR(ddata->din),
+ "Failed to get din GPIO\n");
ddata->dout = devm_gpiod_get(dev, "dout", GPIOD_OUT_LOW);
- if (IS_ERR(ddata->dout)) {
- ret = dev_err_probe(dev, PTR_ERR(ddata->dout),
- "Failed to get dout GPIO\n");
- goto err;
- }
+ if (IS_ERR(ddata->dout))
+ return dev_err_probe(dev, PTR_ERR(ddata->dout),
+ "Failed to get dout GPIO\n");
ddata->dclk = devm_gpiod_get(dev, "dclk", GPIOD_OUT_LOW);
- if (IS_ERR(ddata->dclk)) {
- ret = dev_err_probe(dev, PTR_ERR(ddata->dclk),
- "Failed to get dclk GPIO\n");
- goto err;
- }
+ if (IS_ERR(ddata->dclk))
+ return dev_err_probe(dev, PTR_ERR(ddata->dclk),
+ "Failed to get dclk GPIO\n");
ddata->dld = devm_gpiod_get(dev, "dld", GPIOD_OUT_LOW);
- if (IS_ERR(ddata->dld)) {
- ret = dev_err_probe(dev, PTR_ERR(ddata->dld),
- "Failed to get dld GPIO\n");
- goto err;
- }
+ if (IS_ERR(ddata->dld))
+ return dev_err_probe(dev, PTR_ERR(ddata->dld),
+ "Failed to get dld GPIO\n");
smaster->pushpull = siox_gpio_pushpull;
/* XXX: determine automatically like spi does */
smaster->busno = 0;
- ret = siox_master_register(smaster);
- if (ret) {
- dev_err_probe(dev, ret,
- "Failed to register siox master\n");
-err:
- siox_master_put(smaster);
- }
-
- return ret;
-}
-
-static int siox_gpio_remove(struct platform_device *pdev)
-{
- struct siox_master *master = platform_get_drvdata(pdev);
-
- siox_master_unregister(master);
+ ret = devm_siox_master_register(dev, smaster);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register siox master\n");
return 0;
}
@@ -160,7 +139,6 @@ MODULE_DEVICE_TABLE(of, siox_gpio_dt_ids);
static struct platform_driver siox_gpio_driver = {
.probe = siox_gpio_probe,
- .remove = siox_gpio_remove,
.driver = {
.name = DRIVER_NAME,
diff --git a/drivers/siox/siox-core.c b/drivers/siox/siox-core.c
index f937cbc8c5aa18..24a45920a240dd 100644
--- a/drivers/siox/siox-core.c
+++ b/drivers/siox/siox-core.c
@@ -707,6 +707,31 @@ struct siox_master *siox_master_alloc(struct device *dev,
}
EXPORT_SYMBOL_GPL(siox_master_alloc);
+static void devm_siox_master_put(void *data)
+{
+ struct siox_master *smaster = data;
+
+ siox_master_put(smaster);
+}
+
+struct siox_master *devm_siox_master_alloc(struct device *dev,
+ size_t size)
+{
+ struct siox_master *smaster;
+ int ret;
+
+ smaster = siox_master_alloc(dev, size);
+ if (!smaster)
+ return NULL;
+
+ ret = devm_add_action_or_reset(dev, devm_siox_master_put, smaster);
+ if (ret)
+ return NULL;
+
+ return smaster;
+}
+EXPORT_SYMBOL_GPL(devm_siox_master_alloc);
+
int siox_master_register(struct siox_master *smaster)
{
int ret;
@@ -717,6 +742,8 @@ int siox_master_register(struct siox_master *smaster)
if (!smaster->pushpull)
return -EINVAL;
+ get_device(&smaster->dev);
+
dev_set_name(&smaster->dev, "siox-%d", smaster->busno);
mutex_init(&smaster->lock);
@@ -768,6 +795,25 @@ void siox_master_unregister(struct siox_master *smaster)
}
EXPORT_SYMBOL_GPL(siox_master_unregister);
+static void devm_siox_master_unregister(void *data)
+{
+ struct siox_master *smaster = data;
+
+ siox_master_unregister(smaster);
+}
+
+int devm_siox_master_register(struct device *dev, struct siox_master *smaster)
+{
+ int ret;
+
+ ret = siox_master_register(smaster);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, devm_siox_master_unregister, smaster);
+}
+EXPORT_SYMBOL_GPL(devm_siox_master_register);
+
static struct siox_device *siox_device_add(struct siox_master *smaster,
const char *type, size_t inbytes,
size_t outbytes, u8 statustype)
diff --git a/drivers/siox/siox.h b/drivers/siox/siox.h
index f08b43b713c5cd..513f2c8312f7d6 100644
--- a/drivers/siox/siox.h
+++ b/drivers/siox/siox.h
@@ -45,5 +45,9 @@ static inline void siox_master_put(struct siox_master *smaster)
put_device(&smaster->dev);
}
+struct siox_master *devm_siox_master_alloc(struct device *dev, size_t size);
+
int siox_master_register(struct siox_master *smaster);
void siox_master_unregister(struct siox_master *smaster);
+
+int devm_siox_master_register(struct device *dev, struct siox_master *smaster);
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 11991eb1263644..079035db7dd859 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -830,11 +830,11 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
is_target = of_property_read_bool((&pdev->dev)->of_node, "spi-slave");
if (is_target)
- controller = spi_alloc_target(&pdev->dev,
- sizeof(struct fsl_lpspi_data));
+ controller = devm_spi_alloc_target(&pdev->dev,
+ sizeof(struct fsl_lpspi_data));
else
- controller = spi_alloc_host(&pdev->dev,
- sizeof(struct fsl_lpspi_data));
+ controller = devm_spi_alloc_host(&pdev->dev,
+ sizeof(struct fsl_lpspi_data));
if (!controller)
return -ENOMEM;
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 833a1bb7a91438..c3e5cee18bea73 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -668,8 +668,8 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
ctrl |= (MX51_ECSPI_CTRL_MAX_BURST * BITS_PER_BYTE - 1)
<< MX51_ECSPI_CTRL_BL_OFFSET;
else
- ctrl |= spi_imx->count / DIV_ROUND_UP(spi_imx->bits_per_word,
- BITS_PER_BYTE) * spi_imx->bits_per_word
+ ctrl |= (spi_imx->count / DIV_ROUND_UP(spi_imx->bits_per_word,
+ BITS_PER_BYTE) * spi_imx->bits_per_word - 1)
<< MX51_ECSPI_CTRL_BL_OFFSET;
}
}
diff --git a/drivers/spi/spi-lm70llp.c b/drivers/spi/spi-lm70llp.c
index f982bdebd02838..3c0c24ed1f3dbe 100644
--- a/drivers/spi/spi-lm70llp.c
+++ b/drivers/spi/spi-lm70llp.c
@@ -29,10 +29,10 @@
*
* Datasheet and Schematic:
* The LM70 is a temperature sensor chip from National Semiconductor; its
- * datasheet is available at http://www.national.com/pf/LM/LM70.html
+ * datasheet is available at https://www.ti.com/lit/gpn/lm70
* The schematic for this particular board (the LM70EVAL-LLP) is
* available (on page 4) here:
- * http://www.national.com/appinfo/tempsensors/files/LM70LLPEVALmanual.pdf
+ * https://download.datasheets.com/pdfs/documentation/nat/kit&board/lm70llpevalmanual.pdf
*
* Also see Documentation/spi/spi-lm70llp.rst. The SPI<->parport code here is
* (heavily) based on spi-butterfly by David Brownell.
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index c9d6d42a88f55b..17b8baf749e6aa 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -382,7 +382,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
* read path) and expect the core to use the regular SPI
* interface in other cases.
*/
- if (!ret || ret != -ENOTSUPP || ret != -EOPNOTSUPP) {
+ if (!ret || (ret != -ENOTSUPP && ret != -EOPNOTSUPP)) {
spi_mem_add_op_stats(ctlr->pcpu_statistics, op, ret);
spi_mem_add_op_stats(mem->spi->pcpu_statistics, op, ret);
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 8d4633b353eef6..e4cb22fe007523 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -788,17 +788,19 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len);
mtk_spi_setup_packet(host);
- cnt = mdata->xfer_len / 4;
- iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
- trans->tx_buf + mdata->num_xfered, cnt);
+ if (trans->tx_buf) {
+ cnt = mdata->xfer_len / 4;
+ iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
+ trans->tx_buf + mdata->num_xfered, cnt);
- remainder = mdata->xfer_len % 4;
- if (remainder > 0) {
- reg_val = 0;
- memcpy(&reg_val,
- trans->tx_buf + (cnt * 4) + mdata->num_xfered,
- remainder);
- writel(reg_val, mdata->base + SPI_TX_DATA_REG);
+ remainder = mdata->xfer_len % 4;
+ if (remainder > 0) {
+ reg_val = 0;
+ memcpy(&reg_val,
+ trans->tx_buf + (cnt * 4) + mdata->num_xfered,
+ remainder);
+ writel(reg_val, mdata->base + SPI_TX_DATA_REG);
+ }
}
mtk_spi_enable_transfer(host);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index f18738ae95f8f9..ff75838c1b5dfa 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1063,10 +1063,14 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
if (spi->mode & SPI_CS_HIGH)
enable = !enable;
- if (spi_is_csgpiod(spi)) {
- if (!spi->controller->set_cs_timing && !activate)
- spi_delay_exec(&spi->cs_hold, NULL);
+ /*
+ * Handle chip select delays for GPIO based CS or controllers without
+ * programmable chip select timing.
+ */
+ if ((spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) && !activate)
+ spi_delay_exec(&spi->cs_hold, NULL);
+ if (spi_is_csgpiod(spi)) {
if (!(spi->mode & SPI_NO_CS)) {
/*
* Historically ACPI has no means of the GPIO polarity and
@@ -1099,16 +1103,16 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
spi->controller->set_cs)
spi->controller->set_cs(spi, !enable);
-
- if (!spi->controller->set_cs_timing) {
- if (activate)
- spi_delay_exec(&spi->cs_setup, NULL);
- else
- spi_delay_exec(&spi->cs_inactive, NULL);
- }
} else if (spi->controller->set_cs) {
spi->controller->set_cs(spi, !enable);
}
+
+ if (spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) {
+ if (activate)
+ spi_delay_exec(&spi->cs_setup, NULL);
+ else
+ spi_delay_exec(&spi->cs_inactive, NULL);
+ }
}
#ifdef CONFIG_HAS_DMA
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index bb77de6fa067ef..009158fef2a8f1 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -792,7 +792,7 @@ static int uio_mmap_dma_coherent(struct vm_area_struct *vma)
*/
vma->vm_pgoff = 0;
- addr = (void *)mem->addr;
+ addr = (void *)(uintptr_t)mem->addr;
ret = dma_mmap_coherent(mem->dma_device,
vma,
addr,
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index d5f9384df1255f..13cc35ab5d29a7 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -60,7 +60,7 @@ static int uio_dmem_genirq_open(struct uio_info *info, struct inode *inode)
addr = dma_alloc_coherent(&priv->pdev->dev, uiomem->size,
&uiomem->dma_addr, GFP_KERNEL);
- uiomem->addr = addr ? (phys_addr_t) addr : DMEM_MAP_ERROR;
+ uiomem->addr = addr ? (uintptr_t) addr : DMEM_MAP_ERROR;
++uiomem;
}
priv->refcnt++;
@@ -89,7 +89,7 @@ static int uio_dmem_genirq_release(struct uio_info *info, struct inode *inode)
break;
if (uiomem->addr) {
dma_free_coherent(uiomem->dma_device, uiomem->size,
- (void *) uiomem->addr,
+ (void *) (uintptr_t) uiomem->addr,
uiomem->dma_addr);
}
uiomem->addr = DMEM_MAP_ERROR;
diff --git a/drivers/uio/uio_pruss.c b/drivers/uio/uio_pruss.c
index 72b33f7d4c40fc..f67881cba645ba 100644
--- a/drivers/uio/uio_pruss.c
+++ b/drivers/uio/uio_pruss.c
@@ -191,7 +191,7 @@ static int pruss_probe(struct platform_device *pdev)
p->mem[1].size = sram_pool_sz;
p->mem[1].memtype = UIO_MEM_PHYS;
- p->mem[2].addr = (phys_addr_t) gdev->ddr_vaddr;
+ p->mem[2].addr = (uintptr_t) gdev->ddr_vaddr;
p->mem[2].dma_addr = gdev->ddr_paddr;
p->mem[2].size = extram_pool_sz;
p->mem[2].memtype = UIO_MEM_DMA_COHERENT;
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index a61b8260b8f36c..e3179e987cdb32 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -1523,7 +1523,7 @@ config FB_FSL_DIU
config FB_SH_MOBILE_LCDC
tristate "SuperH Mobile LCDC framebuffer support"
depends on FB && HAVE_CLK && HAS_IOMEM
- depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
+ depends on SUPERH || COMPILE_TEST
depends on FB_DEVICE
select FB_BACKLIGHT
select FB_DEFERRED_IO
diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c
index dca9c0325b3f0a..082501feceb96a 100644
--- a/drivers/video/fbdev/arkfb.c
+++ b/drivers/video/fbdev/arkfb.c
@@ -622,8 +622,13 @@ static int arkfb_set_par(struct fb_info *info)
info->tileops = NULL;
/* in 4bpp supports 8p wide tiles only, any tiles otherwise */
- info->pixmap.blit_x = (bpp == 4) ? (1 << (8 - 1)) : (~(u32)0);
- info->pixmap.blit_y = ~(u32)0;
+ if (bpp == 4) {
+ bitmap_zero(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ set_bit(8 - 1, info->pixmap.blit_x);
+ } else {
+ bitmap_fill(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ }
+ bitmap_fill(info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT);
offset_value = (info->var.xres_virtual * bpp) / 64;
screen_size = info->var.yres_virtual * info->fix.line_length;
@@ -635,8 +640,10 @@ static int arkfb_set_par(struct fb_info *info)
info->tileops = &arkfb_tile_ops;
/* supports 8x16 tiles only */
- info->pixmap.blit_x = 1 << (8 - 1);
- info->pixmap.blit_y = 1 << (16 - 1);
+ bitmap_zero(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ set_bit(8 - 1, info->pixmap.blit_x);
+ bitmap_zero(info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT);
+ set_bit(16 - 1, info->pixmap.blit_y);
offset_value = info->var.xres_virtual / 16;
screen_size = (info->var.xres_virtual * info->var.yres_virtual) / 64;
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 98d0e2dbcd2f36..fcabc668e9fbe7 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -2479,12 +2479,12 @@ static int fbcon_set_font(struct vc_data *vc, const struct console_font *font,
h > FBCON_SWAP(info->var.rotate, info->var.yres, info->var.xres))
return -EINVAL;
- if (font->width > 32 || font->height > 32)
+ if (font->width > FB_MAX_BLIT_WIDTH || font->height > FB_MAX_BLIT_HEIGHT)
return -EINVAL;
/* Make sure drawing engine can handle the font */
- if (!(info->pixmap.blit_x & BIT(font->width - 1)) ||
- !(info->pixmap.blit_y & BIT(font->height - 1)))
+ if (!test_bit(font->width - 1, info->pixmap.blit_x) ||
+ !test_bit(font->height - 1, info->pixmap.blit_y))
return -EINVAL;
/* Make sure driver can handle the font length */
@@ -3050,8 +3050,8 @@ void fbcon_get_requirement(struct fb_info *info,
vc = vc_cons[i].d;
if (vc && vc->vc_mode == KD_TEXT &&
info->node == con2fb_map[i]) {
- caps->x |= 1 << (vc->vc_font.width - 1);
- caps->y |= 1 << (vc->vc_font.height - 1);
+ set_bit(vc->vc_font.width - 1, caps->x);
+ set_bit(vc->vc_font.height - 1, caps->y);
charcnt = vc->vc_font.charcount;
if (caps->len < charcnt)
caps->len = charcnt;
@@ -3062,8 +3062,10 @@ void fbcon_get_requirement(struct fb_info *info,
if (vc && vc->vc_mode == KD_TEXT &&
info->node == con2fb_map[fg_console]) {
- caps->x = 1 << (vc->vc_font.width - 1);
- caps->y = 1 << (vc->vc_font.height - 1);
+ bitmap_zero(caps->x, FB_MAX_BLIT_WIDTH);
+ set_bit(vc->vc_font.width - 1, caps->x);
+ bitmap_zero(caps->y, FB_MAX_BLIT_HEIGHT);
+ set_bit(vc->vc_font.height - 1, caps->y);
caps->len = vc->vc_font.charcount;
}
}
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 48287366e0d4a1..4c4ad0a86a5044 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -212,8 +212,8 @@ static int fb_check_caps(struct fb_info *info, struct fb_var_screeninfo *var,
fbcon_get_requirement(info, &caps);
info->fbops->fb_get_caps(info, &fbcaps, var);
- if (((fbcaps.x ^ caps.x) & caps.x) ||
- ((fbcaps.y ^ caps.y) & caps.y) ||
+ if (!bitmap_subset(caps.x, fbcaps.x, FB_MAX_BLIT_WIDTH) ||
+ !bitmap_subset(caps.y, fbcaps.y, FB_MAX_BLIT_HEIGHT) ||
(fbcaps.len < caps.len))
err = -EINVAL;
@@ -420,11 +420,11 @@ static int do_register_framebuffer(struct fb_info *fb_info)
}
fb_info->pixmap.offset = 0;
- if (!fb_info->pixmap.blit_x)
- fb_info->pixmap.blit_x = ~(u32)0;
+ if (bitmap_empty(fb_info->pixmap.blit_x, FB_MAX_BLIT_WIDTH))
+ bitmap_fill(fb_info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
- if (!fb_info->pixmap.blit_y)
- fb_info->pixmap.blit_y = ~(u32)0;
+ if (bitmap_empty(fb_info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT))
+ bitmap_fill(fb_info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT);
if (!fb_info->modelist.prev || !fb_info->modelist.next)
INIT_LIST_HEAD(&fb_info->modelist);
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index 79e5bfbdd34c26..0a26399dbc899d 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -1311,7 +1311,7 @@ int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, struct fb_inf
int fb_videomode_from_videomode(const struct videomode *vm,
struct fb_videomode *fbmode)
{
- unsigned int htotal, vtotal;
+ unsigned int htotal, vtotal, total;
fbmode->xres = vm->hactive;
fbmode->left_margin = vm->hback_porch;
@@ -1344,8 +1344,9 @@ int fb_videomode_from_videomode(const struct videomode *vm,
vtotal = vm->vactive + vm->vfront_porch + vm->vback_porch +
vm->vsync_len;
/* prevent division by zero */
- if (htotal && vtotal) {
- fbmode->refresh = vm->pixelclock / (htotal * vtotal);
+ total = htotal * vtotal;
+ if (total) {
+ fbmode->refresh = vm->pixelclock / total;
/* a mode must have htotal and vtotal != 0 or it is invalid */
} else {
fbmode->refresh = 0;
diff --git a/drivers/video/fbdev/core/svgalib.c b/drivers/video/fbdev/core/svgalib.c
index 2cba158888ea4e..821b89a0a6453b 100644
--- a/drivers/video/fbdev/core/svgalib.c
+++ b/drivers/video/fbdev/core/svgalib.c
@@ -354,12 +354,19 @@ void svga_get_caps(struct fb_info *info, struct fb_blit_caps *caps,
{
if (var->bits_per_pixel == 0) {
/* can only support 256 8x16 bitmap */
- caps->x = 1 << (8 - 1);
- caps->y = 1 << (16 - 1);
+ bitmap_zero(caps->x, FB_MAX_BLIT_WIDTH);
+ set_bit(8 - 1, caps->x);
+ bitmap_zero(caps->y, FB_MAX_BLIT_HEIGHT);
+ set_bit(16 - 1, caps->y);
caps->len = 256;
} else {
- caps->x = (var->bits_per_pixel == 4) ? 1 << (8 - 1) : ~(u32)0;
- caps->y = ~(u32)0;
+ if (var->bits_per_pixel == 4) {
+ bitmap_zero(caps->x, FB_MAX_BLIT_WIDTH);
+ set_bit(8 - 1, caps->x);
+ } else {
+ bitmap_fill(caps->x, FB_MAX_BLIT_WIDTH);
+ }
+ bitmap_fill(caps->y, FB_MAX_BLIT_HEIGHT);
caps->len = ~(u32)0;
}
}
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
index 7c402e9fd7a98d..baec312d7b33bc 100644
--- a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
@@ -32,15 +32,6 @@
#define CARMINE_MEM_SIZE 0x8000000
#define DRV_NAME "mb862xxfb"
-#if defined(CONFIG_SOCRATES)
-static struct mb862xx_gc_mode socrates_gc_mode = {
- /* Mode for Prime View PM070WL4 TFT LCD Panel */
- { "800x480", 45, 800, 480, 40000, 86, 42, 33, 10, 128, 2, 0, 0, 0 },
- /* 16 bits/pixel, 16MB, 133MHz, SDRAM memory mode value */
- 16, 0x1000000, GC_CCF_COT_133, 0x4157ba63
-};
-#endif
-
/* Helpers */
static inline int h_total(struct fb_var_screeninfo *var)
{
@@ -666,6 +657,15 @@ static int mb862xx_gdc_init(struct mb862xxfb_par *par)
return 0;
}
+#if defined(CONFIG_SOCRATES)
+static struct mb862xx_gc_mode socrates_gc_mode = {
+ /* Mode for Prime View PM070WL4 TFT LCD Panel */
+ { "800x480", 45, 800, 480, 40000, 86, 42, 33, 10, 128, 2, 0, 0, 0 },
+ /* 16 bits/pixel, 16MB, 133MHz, SDRAM memory mode value */
+ 16, 0x1000000, GC_CCF_COT_133, 0x4157ba63
+};
+#endif
+
static int of_platform_mb862xx_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
index 477789cff8e08d..d487941853e647 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
@@ -225,17 +225,12 @@ static ssize_t tpo_td043_gamma_show(struct device *dev,
{
struct panel_drv_data *ddata = dev_get_drvdata(dev);
ssize_t len = 0;
- int ret;
int i;
- for (i = 0; i < ARRAY_SIZE(ddata->gamma); i++) {
- ret = snprintf(buf + len, PAGE_SIZE - len, "%u ",
- ddata->gamma[i]);
- if (ret < 0)
- return ret;
- len += ret;
- }
- buf[len - 1] = '\n';
+ for (i = 0; i < ARRAY_SIZE(ddata->gamma); i++)
+ len += sysfs_emit_at(buf, len, "%u ", ddata->gamma[i]);
+ if (len)
+ buf[len - 1] = '\n';
return len;
}
diff --git a/drivers/video/fbdev/s3fb.c b/drivers/video/fbdev/s3fb.c
index 07722a5ea8eff8..ff84106ecf1c11 100644
--- a/drivers/video/fbdev/s3fb.c
+++ b/drivers/video/fbdev/s3fb.c
@@ -617,8 +617,13 @@ static int s3fb_set_par(struct fb_info *info)
info->tileops = NULL;
/* in 4bpp supports 8p wide tiles only, any tiles otherwise */
- info->pixmap.blit_x = (bpp == 4) ? (1 << (8 - 1)) : (~(u32)0);
- info->pixmap.blit_y = ~(u32)0;
+ if (bpp == 4) {
+ bitmap_zero(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ set_bit(8 - 1, info->pixmap.blit_x);
+ } else {
+ bitmap_fill(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ }
+ bitmap_fill(info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT);
offset_value = (info->var.xres_virtual * bpp) / 64;
screen_size = info->var.yres_virtual * info->fix.line_length;
@@ -630,8 +635,10 @@ static int s3fb_set_par(struct fb_info *info)
info->tileops = fasttext ? &s3fb_fast_tile_ops : &s3fb_tile_ops;
/* supports 8x16 tiles only */
- info->pixmap.blit_x = 1 << (8 - 1);
- info->pixmap.blit_y = 1 << (16 - 1);
+ bitmap_zero(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ set_bit(8 - 1, info->pixmap.blit_x);
+ bitmap_zero(info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT);
+ set_bit(16 - 1, info->pixmap.blit_y);
offset_value = info->var.xres_virtual / 16;
screen_size = (info->var.xres_virtual * info->var.yres_virtual) / 64;
diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
index e1f421e91b4fb0..73f00c079a9499 100644
--- a/drivers/video/fbdev/uvesafb.c
+++ b/drivers/video/fbdev/uvesafb.c
@@ -1546,7 +1546,7 @@ static ssize_t uvesafb_show_vbe_ver(struct device *dev,
struct fb_info *info = dev_get_drvdata(dev);
struct uvesafb_par *par = info->par;
- return snprintf(buf, PAGE_SIZE, "%.4x\n", par->vbe_ib.vbe_version);
+ return sysfs_emit(buf, "%.4x\n", par->vbe_ib.vbe_version);
}
static DEVICE_ATTR(vbe_version, S_IRUGO, uvesafb_show_vbe_ver, NULL);
diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
index b485e919820134..a87bafbb119cc1 100644
--- a/drivers/video/fbdev/vga16fb.c
+++ b/drivers/video/fbdev/vga16fb.c
@@ -1353,7 +1353,11 @@ static int vga16fb_probe(struct platform_device *dev)
info->var = vga16fb_defined;
info->fix = vga16fb_fix;
/* supports rectangles with widths of multiples of 8 */
- info->pixmap.blit_x = 1 << 7 | 1 << 15 | 1 << 23 | 1 << 31;
+ bitmap_zero(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ set_bit(8 - 1, info->pixmap.blit_x);
+ set_bit(16 - 1, info->pixmap.blit_x);
+ set_bit(24 - 1, info->pixmap.blit_x);
+ set_bit(32 - 1, info->pixmap.blit_x);
info->flags = FBINFO_HWACCEL_YPAN;
i = (info->var.bits_per_pixel == 8) ? 256 : 16;
diff --git a/drivers/video/fbdev/via/accel.c b/drivers/video/fbdev/via/accel.c
index 0a1bc7a4d7853c..1e04026f080918 100644
--- a/drivers/video/fbdev/via/accel.c
+++ b/drivers/video/fbdev/via/accel.c
@@ -115,7 +115,7 @@ static int hw_bitblt_1(void __iomem *engine, u8 op, u32 width, u32 height,
if (op != VIA_BITBLT_FILL) {
tmp = src_mem ? 0 : src_addr;
- if (dst_addr & 0xE0000007) {
+ if (tmp & 0xE0000007) {
printk(KERN_WARNING "hw_bitblt_1: Unsupported source "
"address %X\n", tmp);
return -EINVAL;
@@ -260,7 +260,7 @@ static int hw_bitblt_2(void __iomem *engine, u8 op, u32 width, u32 height,
writel(tmp, engine + 0x18);
tmp = src_mem ? 0 : src_addr;
- if (dst_addr & 0xE0000007) {
+ if (tmp & 0xE0000007) {
printk(KERN_WARNING "hw_bitblt_2: Unsupported source "
"address %X\n", tmp);
return -EINVAL;
diff --git a/drivers/video/fbdev/vt8623fb.c b/drivers/video/fbdev/vt8623fb.c
index f8d022cb61e8d1..df984f3a7ff641 100644
--- a/drivers/video/fbdev/vt8623fb.c
+++ b/drivers/video/fbdev/vt8623fb.c
@@ -390,8 +390,13 @@ static int vt8623fb_set_par(struct fb_info *info)
info->tileops = NULL;
/* in 4bpp supports 8p wide tiles only, any tiles otherwise */
- info->pixmap.blit_x = (bpp == 4) ? (1 << (8 - 1)) : (~(u32)0);
- info->pixmap.blit_y = ~(u32)0;
+ if (bpp == 4) {
+ bitmap_zero(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ set_bit(8 - 1, info->pixmap.blit_x);
+ } else {
+ bitmap_fill(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ }
+ bitmap_fill(info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT);
offset_value = (info->var.xres_virtual * bpp) / 64;
fetch_value = ((info->var.xres * bpp) / 128) + 4;
@@ -408,8 +413,10 @@ static int vt8623fb_set_par(struct fb_info *info)
info->tileops = &vt8623fb_tile_ops;
/* supports 8x16 tiles only */
- info->pixmap.blit_x = 1 << (8 - 1);
- info->pixmap.blit_y = 1 << (16 - 1);
+ bitmap_zero(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ set_bit(8 - 1, info->pixmap.blit_x);
+ bitmap_zero(info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT);
+ set_bit(16 - 1, info->pixmap.blit_y);
offset_value = info->var.xres_virtual / 16;
fetch_value = (info->var.xres / 8) + 8;
diff --git a/drivers/video/sticore.c b/drivers/video/sticore.c
index 7115b325817f62..88a1758616e021 100644
--- a/drivers/video/sticore.c
+++ b/drivers/video/sticore.c
@@ -529,7 +529,7 @@ sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
if (fbfont_name && strlen(fbfont_name))
fbfont = find_font(fbfont_name);
if (!fbfont)
- fbfont = get_default_font(1024,768, ~(u32)0, ~(u32)0);
+ fbfont = get_default_font(1024, 768, NULL, NULL);
if (!fbfont)
return NULL;
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 360a5304ec03ce..b01b1bbf249371 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -344,17 +344,21 @@ void v9fs_evict_inode(struct inode *inode)
struct v9fs_inode __maybe_unused *v9inode = V9FS_I(inode);
__le32 __maybe_unused version;
- truncate_inode_pages_final(&inode->i_data);
+ if (!is_bad_inode(inode)) {
+ truncate_inode_pages_final(&inode->i_data);
- version = cpu_to_le32(v9inode->qid.version);
- netfs_clear_inode_writeback(inode, &version);
+ version = cpu_to_le32(v9inode->qid.version);
+ netfs_clear_inode_writeback(inode, &version);
- clear_inode(inode);
- filemap_fdatawrite(&inode->i_data);
+ clear_inode(inode);
+ filemap_fdatawrite(&inode->i_data);
#ifdef CONFIG_9P_FSCACHE
- fscache_relinquish_cookie(v9fs_inode_cookie(v9inode), false);
+ if (v9fs_inode_cookie(v9inode))
+ fscache_relinquish_cookie(v9fs_inode_cookie(v9inode), false);
#endif
+ } else
+ clear_inode(inode);
}
struct inode *v9fs_fid_iget(struct super_block *sb, struct p9_fid *fid)
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index ef9db3e035062b..55dde186041a38 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -78,11 +78,11 @@ struct inode *v9fs_fid_iget_dotl(struct super_block *sb, struct p9_fid *fid)
retval = v9fs_init_inode(v9ses, inode, &fid->qid,
st->st_mode, new_decode_dev(st->st_rdev));
+ v9fs_stat2inode_dotl(st, inode, 0);
kfree(st);
if (retval)
goto error;
- v9fs_stat2inode_dotl(st, inode, 0);
v9fs_set_netfs_context(inode);
v9fs_cache_inode_get_cookie(inode);
retval = v9fs_get_acl(inode, fid);
@@ -297,7 +297,6 @@ static int v9fs_vfs_mkdir_dotl(struct mnt_idmap *idmap,
umode_t omode)
{
int err;
- struct v9fs_session_info *v9ses;
struct p9_fid *fid = NULL, *dfid = NULL;
kgid_t gid;
const unsigned char *name;
@@ -307,7 +306,6 @@ static int v9fs_vfs_mkdir_dotl(struct mnt_idmap *idmap,
struct posix_acl *dacl = NULL, *pacl = NULL;
p9_debug(P9_DEBUG_VFS, "name %pd\n", dentry);
- v9ses = v9fs_inode2v9ses(dir);
omode |= S_IFDIR;
if (dir->i_mode & S_ISGID)
@@ -739,7 +737,6 @@ v9fs_vfs_mknod_dotl(struct mnt_idmap *idmap, struct inode *dir,
kgid_t gid;
const unsigned char *name;
umode_t mode;
- struct v9fs_session_info *v9ses;
struct p9_fid *fid = NULL, *dfid = NULL;
struct inode *inode;
struct p9_qid qid;
@@ -749,7 +746,6 @@ v9fs_vfs_mknod_dotl(struct mnt_idmap *idmap, struct inode *dir,
dir->i_ino, dentry, omode,
MAJOR(rdev), MINOR(rdev));
- v9ses = v9fs_inode2v9ses(dir);
dfid = v9fs_parent_fid(dentry);
if (IS_ERR(dfid)) {
err = PTR_ERR(dfid);
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 1920ed69279b58..3314249e867474 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -1359,7 +1359,7 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid));
SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid));
rcu_read_unlock();
- strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
+ get_task_comm(psinfo->pr_fname, p);
return 0;
}
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 5f7587ca1ca772..1e09aeea69c22e 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -1559,7 +1559,8 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
* needing to allocate extents from the block group.
*/
used = btrfs_space_info_used(space_info, true);
- if (space_info->total_bytes - block_group->length < used) {
+ if (space_info->total_bytes - block_group->length < used &&
+ block_group->zone_unusable < block_group->length) {
/*
* Add a reference for the list, compensate for the ref
* drop under the "next" label for the
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 7441245b1ceb15..61594eaf1f8969 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4333,6 +4333,19 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
goto done;
+ /*
+ * Between the initial test_bit(EXTENT_BUFFER_UPTODATE) and the above
+ * test_and_set_bit(EXTENT_BUFFER_READING), someone else could have
+ * started and finished reading the same eb. In this case, UPTODATE
+ * will now be set, and we shouldn't read it in again.
+ */
+ if (unlikely(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) {
+ clear_bit(EXTENT_BUFFER_READING, &eb->bflags);
+ smp_mb__after_atomic();
+ wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);
+ return 0;
+ }
+
clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
eb->read_mirror = 0;
check_buffer_tree_ref(eb);
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 347ca13d15a975..445f7716f1e2f7 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -309,7 +309,7 @@ int unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen)
btrfs_warn(fs_info,
"no extent map found for inode %llu (root %lld) when unpinning extent range [%llu, %llu), generation %llu",
btrfs_ino(inode), btrfs_root_id(inode->root),
- start, len, gen);
+ start, start + len, gen);
ret = -ENOENT;
goto out;
}
@@ -318,7 +318,7 @@ int unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen)
btrfs_warn(fs_info,
"found extent map for inode %llu (root %lld) with unexpected start offset %llu when unpinning extent range [%llu, %llu), generation %llu",
btrfs_ino(inode), btrfs_root_id(inode->root),
- em->start, start, len, gen);
+ em->start, start, start + len, gen);
ret = -EUCLEAN;
goto out;
}
@@ -340,9 +340,9 @@ int unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen)
em->mod_len = em->len;
}
- free_extent_map(em);
out:
write_unlock(&tree->lock);
+ free_extent_map(em);
return ret;
}
@@ -629,13 +629,13 @@ int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info,
*/
ret = merge_extent_mapping(em_tree, existing,
em, start);
- if (ret) {
+ if (WARN_ON(ret)) {
free_extent_map(em);
*em_in = NULL;
- WARN_ONCE(ret,
-"extent map merge error existing [%llu, %llu) with em [%llu, %llu) start %llu\n",
- existing->start, existing->len,
- orig_start, orig_len, start);
+ btrfs_warn(fs_info,
+"extent map merge error existing [%llu, %llu) with em [%llu, %llu) start %llu",
+ existing->start, extent_map_end(existing),
+ orig_start, orig_start + orig_len, start);
}
free_extent_map(existing);
}
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index c4bd0e60db5925..fa25004ab04e7b 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -2812,7 +2812,17 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
gen = btrfs_get_last_trans_committed(fs_info);
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
- bytenr = btrfs_sb_offset(i);
+ ret = btrfs_sb_log_location(scrub_dev, i, 0, &bytenr);
+ if (ret == -ENOENT)
+ break;
+
+ if (ret) {
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.super_errors++;
+ spin_unlock(&sctx->stat_lock);
+ continue;
+ }
+
if (bytenr + BTRFS_SUPER_INFO_SIZE >
scrub_dev->commit_total_bytes)
break;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 1dc1f1946ae0eb..f15591f3e54fa4 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -692,6 +692,16 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
device->bdev = file_bdev(bdev_file);
clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
+ if (device->devt != device->bdev->bd_dev) {
+ btrfs_warn(NULL,
+ "device %s maj:min changed from %d:%d to %d:%d",
+ device->name->str, MAJOR(device->devt),
+ MINOR(device->devt), MAJOR(device->bdev->bd_dev),
+ MINOR(device->bdev->bd_dev));
+
+ device->devt = device->bdev->bd_dev;
+ }
+
fs_devices->open_devices++;
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
device->devid != BTRFS_DEV_REPLACE_DEVID) {
@@ -1174,23 +1184,30 @@ static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
struct btrfs_device *device;
struct btrfs_device *latest_dev = NULL;
struct btrfs_device *tmp_device;
+ int ret = 0;
list_for_each_entry_safe(device, tmp_device, &fs_devices->devices,
dev_list) {
- int ret;
+ int ret2;
- ret = btrfs_open_one_device(fs_devices, device, flags, holder);
- if (ret == 0 &&
+ ret2 = btrfs_open_one_device(fs_devices, device, flags, holder);
+ if (ret2 == 0 &&
(!latest_dev || device->generation > latest_dev->generation)) {
latest_dev = device;
- } else if (ret == -ENODATA) {
+ } else if (ret2 == -ENODATA) {
fs_devices->num_devices--;
list_del(&device->dev_list);
btrfs_free_device(device);
}
+ if (ret == 0 && ret2 != 0)
+ ret = ret2;
}
- if (fs_devices->open_devices == 0)
+
+ if (fs_devices->open_devices == 0) {
+ if (ret)
+ return ret;
return -EINVAL;
+ }
fs_devices->opened = 1;
fs_devices->latest_dev = latest_dev;
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 5a3d5ec75c5a94..4cba80b34387c1 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -1574,11 +1574,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
if (!map)
return -EINVAL;
- cache->physical_map = btrfs_clone_chunk_map(map, GFP_NOFS);
- if (!cache->physical_map) {
- ret = -ENOMEM;
- goto out;
- }
+ cache->physical_map = map;
zone_info = kcalloc(map->num_stripes, sizeof(*zone_info), GFP_NOFS);
if (!zone_info) {
@@ -1690,7 +1686,6 @@ out:
}
bitmap_free(active);
kfree(zone_info);
- btrfs_free_chunk_map(map);
return ret;
}
@@ -2175,6 +2170,7 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
struct btrfs_chunk_map *map;
const bool is_metadata = (block_group->flags &
(BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM));
+ struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
int ret = 0;
int i;
@@ -2250,6 +2246,7 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
btrfs_clear_data_reloc_bg(block_group);
spin_unlock(&block_group->lock);
+ down_read(&dev_replace->rwsem);
map = block_group->physical_map;
for (i = 0; i < map->num_stripes; i++) {
struct btrfs_device *device = map->stripes[i].dev;
@@ -2266,13 +2263,16 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
zinfo->zone_size >> SECTOR_SHIFT);
memalloc_nofs_restore(nofs_flags);
- if (ret)
+ if (ret) {
+ up_read(&dev_replace->rwsem);
return ret;
+ }
if (!(block_group->flags & BTRFS_BLOCK_GROUP_DATA))
zinfo->reserved_active_zones++;
btrfs_dev_clear_active_zone(device, physical);
}
+ up_read(&dev_replace->rwsem);
if (!fully_written)
btrfs_dec_block_group_ro(block_group);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 7fb4aae9741246..55051ad09c1919 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -4634,6 +4634,14 @@ unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
iput(inode);
spin_lock(&mdsc->cap_delay_lock);
}
+
+ /*
+ * Make sure too many dirty caps or general
+ * slowness doesn't block mdsc delayed work,
+ * preventing send_renew_caps() from running.
+ */
+ if (jiffies - loop_start >= 5 * HZ)
+ break;
}
spin_unlock(&mdsc->cap_delay_lock);
doutc(cl, "done\n");
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index abe8028d95bf4e..16873d07692f42 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1138,7 +1138,12 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
}
idx = 0;
- left = ret > 0 ? ret : 0;
+ if (ret <= 0)
+ left = 0;
+ else if (off + ret > i_size)
+ left = i_size - off;
+ else
+ left = ret;
while (left > 0) {
size_t plen, copied;
@@ -1167,15 +1172,13 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
}
if (ret > 0) {
- if (off > *ki_pos) {
- if (off >= i_size) {
- *retry_op = CHECK_EOF;
- ret = i_size - *ki_pos;
- *ki_pos = i_size;
- } else {
- ret = off - *ki_pos;
- *ki_pos = off;
- }
+ if (off >= i_size) {
+ *retry_op = CHECK_EOF;
+ ret = i_size - *ki_pos;
+ *ki_pos = i_size;
+ } else {
+ ret = off - *ki_pos;
+ *ki_pos = off;
}
if (last_objver)
@@ -2126,14 +2129,16 @@ again:
int statret;
struct page *page = NULL;
loff_t i_size;
+ int mask = CEPH_STAT_CAP_SIZE;
if (retry_op == READ_INLINE) {
page = __page_cache_alloc(GFP_KERNEL);
if (!page)
return -ENOMEM;
+
+ mask = CEPH_STAT_CAP_INLINE_DATA;
}
- statret = __ceph_do_getattr(inode, page,
- CEPH_STAT_CAP_INLINE_DATA, !!page);
+ statret = __ceph_do_getattr(inode, page, mask, !!page);
if (statret < 0) {
if (page)
__free_page(page);
@@ -2174,7 +2179,7 @@ again:
/* hit EOF or hole? */
if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
ret < len) {
- doutc(cl, "hit hole, ppos %lld < size %lld, reading more\n",
+ doutc(cl, "may hit hole, ppos %lld < size %lld, reading more\n",
iocb->ki_pos, i_size);
read += ret;
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index 69308fd73e4a92..c0eb139adb07a8 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -430,7 +430,6 @@ static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
switch (mode) {
case EROFS_MOUNT_DAX_ALWAYS:
- warnfc(fc, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
set_opt(&ctx->opt, DAX_ALWAYS);
clear_opt(&ctx->opt, DAX_NEVER);
return true;
diff --git a/fs/exec.c b/fs/exec.c
index ff6f26671cfc02..cf1df7f16e55cc 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -895,6 +895,7 @@ int transfer_args_to_stack(struct linux_binprm *bprm,
goto out;
}
+ bprm->exec += *sp_location - MAX_ARG_PAGES * PAGE_SIZE;
*sp_location = sp;
out:
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 789af5c8fade9d..aa1626955b2cf5 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1718,7 +1718,8 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
struct buffer_head *dibh, *bh;
struct gfs2_holder rd_gh;
unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
- u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift;
+ unsigned int bsize = 1 << bsize_shift;
+ u64 lblock = (offset + bsize - 1) >> bsize_shift;
__u16 start_list[GFS2_MAX_META_HEIGHT];
__u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL;
unsigned int start_aligned, end_aligned;
@@ -1729,7 +1730,7 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
u64 prev_bnr = 0;
__be64 *start, *end;
- if (offset >= maxsize) {
+ if (offset + bsize - 1 >= maxsize) {
/*
* The starting point lies beyond the allocated metadata;
* there are no blocks to deallocate.
diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
index 3de5047a7ff988..a0017724d52393 100644
--- a/fs/smb/client/cached_dir.c
+++ b/fs/smb/client/cached_dir.c
@@ -239,7 +239,8 @@ replay_again:
.tcon = tcon,
.path = path,
.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE),
- .desired_access = FILE_READ_DATA | FILE_READ_ATTRIBUTES,
+ .desired_access = FILE_READ_DATA | FILE_READ_ATTRIBUTES |
+ FILE_READ_EA,
.disposition = FILE_OPEN,
.fid = pfid,
.replay = !!(retries),
diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
index 81d9aafd2210af..aa6f1ecb7c0e8f 100644
--- a/fs/smb/client/cifsfs.c
+++ b/fs/smb/client/cifsfs.c
@@ -151,10 +151,6 @@ MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
"vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
" and less secure. Default: n/N/0");
-extern mempool_t *cifs_sm_req_poolp;
-extern mempool_t *cifs_req_poolp;
-extern mempool_t *cifs_mid_poolp;
-
struct workqueue_struct *cifsiod_wq;
struct workqueue_struct *decrypt_wq;
struct workqueue_struct *fileinfo_put_wq;
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index 8be62ed053a25e..7ed9d05f6890b4 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -355,6 +355,9 @@ struct smb_version_operations {
/* informational QFS call */
void (*qfs_tcon)(const unsigned int, struct cifs_tcon *,
struct cifs_sb_info *);
+ /* query for server interfaces */
+ int (*query_server_interfaces)(const unsigned int, struct cifs_tcon *,
+ bool);
/* check if a path is accessible or not */
int (*is_path_accessible)(const unsigned int, struct cifs_tcon *,
struct cifs_sb_info *, const char *);
@@ -2104,6 +2107,8 @@ extern struct workqueue_struct *cifsoplockd_wq;
extern struct workqueue_struct *deferredclose_wq;
extern __u32 cifs_lock_secret;
+extern mempool_t *cifs_sm_req_poolp;
+extern mempool_t *cifs_req_poolp;
extern mempool_t *cifs_mid_poolp;
/* Operations for different SMB versions */
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index 86ae578904a269..9b85b5341822e7 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -52,9 +52,6 @@
#include "fs_context.h"
#include "cifs_swn.h"
-extern mempool_t *cifs_req_poolp;
-extern bool disable_legacy_dialects;
-
/* FIXME: should these be tunable? */
#define TLINK_ERROR_EXPIRE (1 * HZ)
#define TLINK_IDLE_EXPIRE (600 * HZ)
@@ -123,12 +120,16 @@ static void smb2_query_server_interfaces(struct work_struct *work)
struct cifs_tcon *tcon = container_of(work,
struct cifs_tcon,
query_interfaces.work);
+ struct TCP_Server_Info *server = tcon->ses->server;
/*
* query server network interfaces, in case they change
*/
+ if (!server->ops->query_server_interfaces)
+ return;
+
xid = get_xid();
- rc = SMB3_request_interfaces(xid, tcon, false);
+ rc = server->ops->query_server_interfaces(xid, tcon, false);
free_xid(xid);
if (rc) {
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index ec25d3c3e1ee39..16aadce492b2ec 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -486,7 +486,6 @@ struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
cfile->uid = current_fsuid();
cfile->dentry = dget(dentry);
cfile->f_flags = file->f_flags;
- cfile->status_file_deleted = false;
cfile->invalidHandle = false;
cfile->deferred_close_scheduled = false;
cfile->tlink = cifs_get_tlink(tlink);
@@ -1073,6 +1072,19 @@ void smb2_deferred_work_close(struct work_struct *work)
_cifsFileInfo_put(cfile, true, false);
}
+static bool
+smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
+{
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct cifsInodeInfo *cinode = CIFS_I(inode);
+
+ return (cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
+ (cinode->oplock == CIFS_CACHE_RHW_FLG ||
+ cinode->oplock == CIFS_CACHE_RH_FLG) &&
+ !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags));
+
+}
+
int cifs_close(struct inode *inode, struct file *file)
{
struct cifsFileInfo *cfile;
@@ -1086,10 +1098,8 @@ int cifs_close(struct inode *inode, struct file *file)
cfile = file->private_data;
file->private_data = NULL;
dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
- if ((cifs_sb->ctx->closetimeo && cinode->oplock == CIFS_CACHE_RHW_FLG)
- && cinode->lease_granted &&
- !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) &&
- dclose && !(cfile->status_file_deleted)) {
+ if ((cfile->status_file_deleted == false) &&
+ (smb2_can_defer_close(inode, dclose))) {
if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
inode_set_mtime_to_ts(inode,
inode_set_ctime_current(inode));
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index 8177ec59afeef6..d28ab0af604936 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -401,7 +401,6 @@ cifs_get_file_info_unix(struct file *filp)
cifs_unix_basic_to_fattr(&fattr, &find_data, cifs_sb);
} else if (rc == -EREMOTE) {
cifs_create_junction_fattr(&fattr, inode->i_sb);
- rc = 0;
} else
goto cifs_gfiunix_out;
@@ -820,8 +819,10 @@ cifs_get_file_info(struct file *filp)
void *page = alloc_dentry_path();
const unsigned char *path;
- if (!server->ops->query_file_info)
+ if (!server->ops->query_file_info) {
+ free_dentry_path(page);
return -ENOSYS;
+ }
xid = get_xid();
rc = server->ops->query_file_info(xid, tcon, cfile, &data);
@@ -835,8 +836,8 @@ cifs_get_file_info(struct file *filp)
}
path = build_path_from_dentry(dentry, page);
if (IS_ERR(path)) {
- free_dentry_path(page);
- return PTR_ERR(path);
+ rc = PTR_ERR(path);
+ goto cgfi_exit;
}
cifs_open_info_to_fattr(&fattr, &data, inode->i_sb);
if (fattr.cf_flags & CIFS_FATTR_DELETE_PENDING)
@@ -844,7 +845,6 @@ cifs_get_file_info(struct file *filp)
break;
case -EREMOTE:
cifs_create_junction_fattr(&fattr, inode->i_sb);
- rc = 0;
break;
case -EOPNOTSUPP:
case -EINVAL:
@@ -1009,7 +1009,6 @@ static int reparse_info_to_fattr(struct cifs_open_info_data *data,
struct kvec rsp_iov, *iov = NULL;
int rsp_buftype = CIFS_NO_BUFFER;
u32 tag = data->reparse.tag;
- struct inode *inode = NULL;
int rc = 0;
if (!tag && server->ops->query_reparse_point) {
@@ -1049,12 +1048,8 @@ static int reparse_info_to_fattr(struct cifs_open_info_data *data,
if (tcon->posix_extensions)
smb311_posix_info_to_fattr(fattr, data, sb);
- else {
+ else
cifs_open_info_to_fattr(fattr, data, sb);
- inode = cifs_iget(sb, fattr);
- if (inode && fattr->cf_flags & CIFS_FATTR_DELETE_PENDING)
- cifs_mark_open_handles_for_deleted_file(inode, full_path);
- }
out:
fattr->cf_cifstag = data->reparse.tag;
free_rsp_buf(rsp_buftype, rsp_iov.iov_base);
@@ -1109,9 +1104,9 @@ static int cifs_get_fattr(struct cifs_open_info_data *data,
full_path, fattr);
} else {
cifs_open_info_to_fattr(fattr, data, sb);
- if (fattr->cf_flags & CIFS_FATTR_DELETE_PENDING)
- cifs_mark_open_handles_for_deleted_file(*inode, full_path);
}
+ if (!rc && fattr->cf_flags & CIFS_FATTR_DELETE_PENDING)
+ cifs_mark_open_handles_for_deleted_file(*inode, full_path);
break;
case -EREMOTE:
/* DFS link, no metadata available on this server */
@@ -1340,6 +1335,8 @@ int smb311_posix_get_inode_info(struct inode **inode,
goto out;
rc = update_inode_info(sb, &fattr, inode);
+ if (!rc && fattr.cf_flags & CIFS_FATTR_DELETE_PENDING)
+ cifs_mark_open_handles_for_deleted_file(*inode, full_path);
out:
kfree(fattr.cf_symlink_target);
return rc;
@@ -1501,6 +1498,9 @@ iget_root:
goto out;
}
+ if (!rc && fattr.cf_flags & CIFS_FATTR_DELETE_PENDING)
+ cifs_mark_open_handles_for_deleted_file(inode, path);
+
if (rc && tcon->pipe) {
cifs_dbg(FYI, "ipc connection - fake read inode\n");
spin_lock(&inode->i_lock);
diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
index 9428a0db77183f..c3771fc81328ff 100644
--- a/fs/smb/client/misc.c
+++ b/fs/smb/client/misc.c
@@ -27,9 +27,6 @@
#include "fs_context.h"
#include "cached_dir.h"
-extern mempool_t *cifs_sm_req_poolp;
-extern mempool_t *cifs_req_poolp;
-
/* The xid serves as a useful identifier for each incoming vfs request,
in a similar way to the mid which is useful to track each sent smb,
and CurrentXid can also provide a running counter (although it
diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
index 8f37373fd33344..3216f786908fbb 100644
--- a/fs/smb/client/sess.c
+++ b/fs/smb/client/sess.c
@@ -230,7 +230,7 @@ int cifs_try_adding_channels(struct cifs_ses *ses)
spin_lock(&ses->iface_lock);
if (!ses->iface_count) {
spin_unlock(&ses->iface_lock);
- cifs_dbg(VFS, "server %s does not advertise interfaces\n",
+ cifs_dbg(ONCE, "server %s does not advertise interfaces\n",
ses->server->hostname);
break;
}
@@ -396,7 +396,7 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
spin_lock(&ses->iface_lock);
if (!ses->iface_count) {
spin_unlock(&ses->iface_lock);
- cifs_dbg(VFS, "server %s does not advertise interfaces\n", ses->server->hostname);
+ cifs_dbg(ONCE, "server %s does not advertise interfaces\n", ses->server->hostname);
return;
}
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 6ee22d0dbc0061..2ed456948f34ca 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -5290,6 +5290,7 @@ struct smb_version_operations smb30_operations = {
.tree_connect = SMB2_tcon,
.tree_disconnect = SMB2_tdis,
.qfs_tcon = smb3_qfs_tcon,
+ .query_server_interfaces = SMB3_request_interfaces,
.is_path_accessible = smb2_is_path_accessible,
.can_echo = smb2_can_echo,
.echo = SMB2_echo,
@@ -5405,6 +5406,7 @@ struct smb_version_operations smb311_operations = {
.tree_connect = SMB2_tcon,
.tree_disconnect = SMB2_tdis,
.qfs_tcon = smb3_qfs_tcon,
+ .query_server_interfaces = SMB3_request_interfaces,
.is_path_accessible = smb2_is_path_accessible,
.can_echo = smb2_can_echo,
.echo = SMB2_echo,
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index e5e6b14f8cae37..3ea688558e6c9b 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -409,14 +409,15 @@ skip_sess_setup:
spin_unlock(&ses->ses_lock);
if (!rc &&
- (server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
+ (server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL) &&
+ server->ops->query_server_interfaces) {
mutex_unlock(&ses->session_mutex);
/*
* query server network interfaces, in case they change
*/
xid = get_xid();
- rc = SMB3_request_interfaces(xid, tcon, false);
+ rc = server->ops->query_server_interfaces(xid, tcon, false);
free_xid(xid);
if (rc == -EOPNOTSUPP && ses->chan_count > 1) {
diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h
index 20784f76a60455..1b594307c9d5a0 100644
--- a/fs/smb/common/smb2pdu.h
+++ b/fs/smb/common/smb2pdu.h
@@ -227,7 +227,7 @@ struct smb2_compression_hdr {
__le32 OriginalCompressedSegmentSize;
__le16 CompressionAlgorithm;
__le16 Flags;
- __le16 Offset; /* this is the size of the uncompressed SMB2 header below */
+ __le32 Offset; /* this is the size of the uncompressed SMB2 header below */
/* uncompressed SMB2 header (READ or WRITE) goes here */
/* compressed data goes here */
} __packed;
@@ -280,15 +280,16 @@ struct smb3_blob_data {
#define SE_GROUP_RESOURCE 0x20000000
#define SE_GROUP_LOGON_ID 0xC0000000
-/* struct sid_attr_data is SidData array in BlobData format then le32 Attr */
-
struct sid_array_data {
__le16 SidAttrCount;
/* SidAttrList - array of sid_attr_data structs */
} __packed;
-struct luid_attr_data {
-
+/* struct sid_attr_data is SidData array in BlobData format then le32 Attr */
+struct sid_attr_data {
+ __le16 BlobSize;
+ __u8 BlobData[];
+ /* __le32 Attr */
} __packed;
/*
@@ -502,6 +503,7 @@ struct smb2_encryption_neg_context {
#define SMB3_COMPRESS_LZ77_HUFF cpu_to_le16(0x0003)
/* Pattern scanning algorithm See MS-SMB2 3.1.4.4.1 */
#define SMB3_COMPRESS_PATTERN cpu_to_le16(0x0004) /* Pattern_V1 */
+#define SMB3_COMPRESS_LZ4 cpu_to_le16(0x0005)
/* Compression Flags */
#define SMB2_COMPRESSION_CAPABILITIES_FLAG_NONE cpu_to_le32(0x00000000)
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index d013c5b3f1edae..ac77ac1fd73e35 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -1742,17 +1742,22 @@ int dbg_check_idx_size(struct ubifs_info *c, long long idx_size)
err = dbg_walk_index(c, NULL, add_size, &calc);
if (err) {
ubifs_err(c, "error %d while walking the index", err);
- return err;
+ goto out_err;
}
if (calc != idx_size) {
ubifs_err(c, "index size check failed: calculated size is %lld, should be %lld",
calc, idx_size);
dump_stack();
- return -EINVAL;
+ err = -EINVAL;
+ goto out_err;
}
return 0;
+
+out_err:
+ ubifs_destroy_tnc_tree(c);
+ return err;
}
/**
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 551148de66cd86..eac0fef801f143 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -1133,6 +1133,8 @@ out_cancel:
dir_ui->ui_size = dir->i_size;
mutex_unlock(&dir_ui->ui_mutex);
out_inode:
+ /* Free inode->i_link before inode is marked as bad. */
+ fscrypt_free_inode(inode);
make_bad_inode(inode);
iput(inode);
out_fname:
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 5029eb3390a560..a1f46919934c7d 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -96,36 +96,36 @@ dump:
return -EINVAL;
}
-static int do_readpage(struct page *page)
+static int do_readpage(struct folio *folio)
{
void *addr;
int err = 0, i;
unsigned int block, beyond;
- struct ubifs_data_node *dn;
- struct inode *inode = page->mapping->host;
+ struct ubifs_data_node *dn = NULL;
+ struct inode *inode = folio->mapping->host;
struct ubifs_info *c = inode->i_sb->s_fs_info;
loff_t i_size = i_size_read(inode);
dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
- inode->i_ino, page->index, i_size, page->flags);
- ubifs_assert(c, !PageChecked(page));
- ubifs_assert(c, !PagePrivate(page));
+ inode->i_ino, folio->index, i_size, folio->flags);
+ ubifs_assert(c, !folio_test_checked(folio));
+ ubifs_assert(c, !folio->private);
- addr = kmap(page);
+ addr = kmap_local_folio(folio, 0);
- block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
+ block = folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
beyond = (i_size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
if (block >= beyond) {
/* Reading beyond inode */
- SetPageChecked(page);
- memset(addr, 0, PAGE_SIZE);
+ folio_set_checked(folio);
+ addr = folio_zero_tail(folio, 0, addr);
goto out;
}
dn = kmalloc(UBIFS_MAX_DATA_NODE_SZ, GFP_NOFS);
if (!dn) {
err = -ENOMEM;
- goto error;
+ goto out;
}
i = 0;
@@ -150,39 +150,35 @@ static int do_readpage(struct page *page)
memset(addr + ilen, 0, dlen - ilen);
}
}
- if (++i >= UBIFS_BLOCKS_PER_PAGE)
+ if (++i >= (UBIFS_BLOCKS_PER_PAGE << folio_order(folio)))
break;
block += 1;
addr += UBIFS_BLOCK_SIZE;
+ if (folio_test_highmem(folio) && (offset_in_page(addr) == 0)) {
+ kunmap_local(addr - UBIFS_BLOCK_SIZE);
+ addr = kmap_local_folio(folio, i * UBIFS_BLOCK_SIZE);
+ }
}
+
if (err) {
struct ubifs_info *c = inode->i_sb->s_fs_info;
if (err == -ENOENT) {
/* Not found, so it must be a hole */
- SetPageChecked(page);
+ folio_set_checked(folio);
dbg_gen("hole");
- goto out_free;
+ err = 0;
+ } else {
+ ubifs_err(c, "cannot read page %lu of inode %lu, error %d",
+ folio->index, inode->i_ino, err);
}
- ubifs_err(c, "cannot read page %lu of inode %lu, error %d",
- page->index, inode->i_ino, err);
- goto error;
}
-out_free:
- kfree(dn);
out:
- SetPageUptodate(page);
- ClearPageError(page);
- flush_dcache_page(page);
- kunmap(page);
- return 0;
-
-error:
kfree(dn);
- ClearPageUptodate(page);
- SetPageError(page);
- flush_dcache_page(page);
- kunmap(page);
+ if (!err)
+ folio_mark_uptodate(folio);
+ flush_dcache_folio(folio);
+ kunmap_local(addr);
return err;
}
@@ -222,16 +218,16 @@ static int write_begin_slow(struct address_space *mapping,
pgoff_t index = pos >> PAGE_SHIFT;
struct ubifs_budget_req req = { .new_page = 1 };
int err, appending = !!(pos + len > inode->i_size);
- struct page *page;
+ struct folio *folio;
dbg_gen("ino %lu, pos %llu, len %u, i_size %lld",
inode->i_ino, pos, len, inode->i_size);
/*
- * At the slow path we have to budget before locking the page, because
- * budgeting may force write-back, which would wait on locked pages and
- * deadlock if we had the page locked. At this point we do not know
- * anything about the page, so assume that this is a new page which is
+ * At the slow path we have to budget before locking the folio, because
+ * budgeting may force write-back, which would wait on locked folios and
+ * deadlock if we had the folio locked. At this point we do not know
+ * anything about the folio, so assume that this is a new folio which is
* written to a hole. This corresponds to largest budget. Later the
* budget will be amended if this is not true.
*/
@@ -243,45 +239,43 @@ static int write_begin_slow(struct address_space *mapping,
if (unlikely(err))
return err;
- page = grab_cache_page_write_begin(mapping, index);
- if (unlikely(!page)) {
+ folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR(folio)) {
ubifs_release_budget(c, &req);
- return -ENOMEM;
+ return PTR_ERR(folio);
}
- if (!PageUptodate(page)) {
- if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE)
- SetPageChecked(page);
+ if (!folio_test_uptodate(folio)) {
+ if (pos == folio_pos(folio) && len >= folio_size(folio))
+ folio_set_checked(folio);
else {
- err = do_readpage(page);
+ err = do_readpage(folio);
if (err) {
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
ubifs_release_budget(c, &req);
return err;
}
}
-
- SetPageUptodate(page);
- ClearPageError(page);
}
- if (PagePrivate(page))
+ if (folio->private)
/*
- * The page is dirty, which means it was budgeted twice:
+ * The folio is dirty, which means it was budgeted twice:
* o first time the budget was allocated by the task which
- * made the page dirty and set the PG_private flag;
+ * made the folio dirty and set the private field;
* o and then we budgeted for it for the second time at the
* very beginning of this function.
*
- * So what we have to do is to release the page budget we
+ * So what we have to do is to release the folio budget we
* allocated.
*/
release_new_page_budget(c);
- else if (!PageChecked(page))
+ else if (!folio_test_checked(folio))
/*
- * We are changing a page which already exists on the media.
- * This means that changing the page does not make the amount
+ * We are changing a folio which already exists on the media.
+ * This means that changing the folio does not make the amount
* of indexing information larger, and this part of the budget
* which we have already acquired may be released.
*/
@@ -304,14 +298,14 @@ static int write_begin_slow(struct address_space *mapping,
ubifs_release_dirty_inode_budget(c, ui);
}
- *pagep = page;
+ *pagep = &folio->page;
return 0;
}
/**
* allocate_budget - allocate budget for 'ubifs_write_begin()'.
* @c: UBIFS file-system description object
- * @page: page to allocate budget for
+ * @folio: folio to allocate budget for
* @ui: UBIFS inode object the page belongs to
* @appending: non-zero if the page is appended
*
@@ -322,15 +316,15 @@ static int write_begin_slow(struct address_space *mapping,
*
* Returns: %0 in case of success and %-ENOSPC in case of failure.
*/
-static int allocate_budget(struct ubifs_info *c, struct page *page,
+static int allocate_budget(struct ubifs_info *c, struct folio *folio,
struct ubifs_inode *ui, int appending)
{
struct ubifs_budget_req req = { .fast = 1 };
- if (PagePrivate(page)) {
+ if (folio->private) {
if (!appending)
/*
- * The page is dirty and we are not appending, which
+ * The folio is dirty and we are not appending, which
* means no budget is needed at all.
*/
return 0;
@@ -354,11 +348,11 @@ static int allocate_budget(struct ubifs_info *c, struct page *page,
*/
req.dirtied_ino = 1;
} else {
- if (PageChecked(page))
+ if (folio_test_checked(folio))
/*
* The page corresponds to a hole and does not
* exist on the media. So changing it makes
- * make the amount of indexing information
+ * the amount of indexing information
* larger, and we have to budget for a new
* page.
*/
@@ -428,7 +422,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
pgoff_t index = pos >> PAGE_SHIFT;
int err, appending = !!(pos + len > inode->i_size);
int skipped_read = 0;
- struct page *page;
+ struct folio *folio;
ubifs_assert(c, ubifs_inode(inode)->ui_size == inode->i_size);
ubifs_assert(c, !c->ro_media && !c->ro_mount);
@@ -437,13 +431,14 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
return -EROFS;
/* Try out the fast-path part first */
- page = grab_cache_page_write_begin(mapping, index);
- if (unlikely(!page))
- return -ENOMEM;
+ folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
/* The page is not loaded from the flash */
- if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE) {
+ if (pos == folio_pos(folio) && len >= folio_size(folio)) {
/*
* We change whole page so no need to load it. But we
* do not know whether this page exists on the media or
@@ -453,32 +448,27 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
* media. Thus, we are setting the @PG_checked flag
* here.
*/
- SetPageChecked(page);
+ folio_set_checked(folio);
skipped_read = 1;
} else {
- err = do_readpage(page);
+ err = do_readpage(folio);
if (err) {
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
return err;
}
}
-
- SetPageUptodate(page);
- ClearPageError(page);
}
- err = allocate_budget(c, page, ui, appending);
+ err = allocate_budget(c, folio, ui, appending);
if (unlikely(err)) {
ubifs_assert(c, err == -ENOSPC);
/*
* If we skipped reading the page because we were going to
* write all of it, then it is not up to date.
*/
- if (skipped_read) {
- ClearPageChecked(page);
- ClearPageUptodate(page);
- }
+ if (skipped_read)
+ folio_clear_checked(folio);
/*
* Budgeting failed which means it would have to force
* write-back but didn't, because we set the @fast flag in the
@@ -490,8 +480,8 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
ubifs_assert(c, mutex_is_locked(&ui->ui_mutex));
mutex_unlock(&ui->ui_mutex);
}
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
return write_begin_slow(mapping, pos, len, pagep);
}
@@ -502,22 +492,21 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
* with @ui->ui_mutex locked if we are appending pages, and unlocked
* otherwise. This is an optimization (slightly hacky though).
*/
- *pagep = page;
+ *pagep = &folio->page;
return 0;
-
}
/**
* cancel_budget - cancel budget.
* @c: UBIFS file-system description object
- * @page: page to cancel budget for
+ * @folio: folio to cancel budget for
* @ui: UBIFS inode object the page belongs to
* @appending: non-zero if the page is appended
*
* This is a helper function for a page write operation. It unlocks the
* @ui->ui_mutex in case of appending.
*/
-static void cancel_budget(struct ubifs_info *c, struct page *page,
+static void cancel_budget(struct ubifs_info *c, struct folio *folio,
struct ubifs_inode *ui, int appending)
{
if (appending) {
@@ -525,8 +514,8 @@ static void cancel_budget(struct ubifs_info *c, struct page *page,
ubifs_release_dirty_inode_budget(c, ui);
mutex_unlock(&ui->ui_mutex);
}
- if (!PagePrivate(page)) {
- if (PageChecked(page))
+ if (!folio->private) {
+ if (folio_test_checked(folio))
release_new_page_budget(c);
else
release_existing_page_budget(c);
@@ -537,6 +526,7 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
+ struct folio *folio = page_folio(page);
struct inode *inode = mapping->host;
struct ubifs_inode *ui = ubifs_inode(inode);
struct ubifs_info *c = inode->i_sb->s_fs_info;
@@ -544,44 +534,47 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
int appending = !!(end_pos > inode->i_size);
dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld",
- inode->i_ino, pos, page->index, len, copied, inode->i_size);
+ inode->i_ino, pos, folio->index, len, copied, inode->i_size);
- if (unlikely(copied < len && len == PAGE_SIZE)) {
+ if (unlikely(copied < len && !folio_test_uptodate(folio))) {
/*
- * VFS copied less data to the page that it intended and
+ * VFS copied less data to the folio than it intended and
* declared in its '->write_begin()' call via the @len
- * argument. If the page was not up-to-date, and @len was
- * @PAGE_SIZE, the 'ubifs_write_begin()' function did
+ * argument. If the folio was not up-to-date,
+ * the 'ubifs_write_begin()' function did
* not load it from the media (for optimization reasons). This
- * means that part of the page contains garbage. So read the
- * page now.
+ * means that part of the folio contains garbage. So read the
+ * folio now.
*/
dbg_gen("copied %d instead of %d, read page and repeat",
copied, len);
- cancel_budget(c, page, ui, appending);
- ClearPageChecked(page);
+ cancel_budget(c, folio, ui, appending);
+ folio_clear_checked(folio);
/*
* Return 0 to force VFS to repeat the whole operation, or the
* error code if 'do_readpage()' fails.
*/
- copied = do_readpage(page);
+ copied = do_readpage(folio);
goto out;
}
- if (!PagePrivate(page)) {
- attach_page_private(page, (void *)1);
+ if (len == folio_size(folio))
+ folio_mark_uptodate(folio);
+
+ if (!folio->private) {
+ folio_attach_private(folio, (void *)1);
atomic_long_inc(&c->dirty_pg_cnt);
- __set_page_dirty_nobuffers(page);
+ filemap_dirty_folio(mapping, folio);
}
if (appending) {
i_size_write(inode, end_pos);
ui->ui_size = end_pos;
/*
- * Note, we do not set @I_DIRTY_PAGES (which means that the
- * inode has dirty pages), this has been done in
- * '__set_page_dirty_nobuffers()'.
+ * We do not set @I_DIRTY_PAGES (which means that
+ * the inode has dirty pages), this was done in
+ * filemap_dirty_folio().
*/
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
ubifs_assert(c, mutex_is_locked(&ui->ui_mutex));
@@ -589,43 +582,43 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
}
out:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
return copied;
}
/**
* populate_page - copy data nodes into a page for bulk-read.
* @c: UBIFS file-system description object
- * @page: page
+ * @folio: folio
* @bu: bulk-read information
* @n: next zbranch slot
*
* Returns: %0 on success and a negative error code on failure.
*/
-static int populate_page(struct ubifs_info *c, struct page *page,
+static int populate_page(struct ubifs_info *c, struct folio *folio,
struct bu_info *bu, int *n)
{
int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 0, read = 0;
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
loff_t i_size = i_size_read(inode);
unsigned int page_block;
void *addr, *zaddr;
pgoff_t end_index;
dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
- inode->i_ino, page->index, i_size, page->flags);
+ inode->i_ino, folio->index, i_size, folio->flags);
- addr = zaddr = kmap(page);
+ addr = zaddr = kmap_local_folio(folio, 0);
end_index = (i_size - 1) >> PAGE_SHIFT;
- if (!i_size || page->index > end_index) {
+ if (!i_size || folio->index > end_index) {
hole = 1;
- memset(addr, 0, PAGE_SIZE);
+ addr = folio_zero_tail(folio, 0, addr);
goto out_hole;
}
- page_block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
+ page_block = folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
while (1) {
int err, len, out_len, dlen;
@@ -674,9 +667,13 @@ static int populate_page(struct ubifs_info *c, struct page *page,
break;
addr += UBIFS_BLOCK_SIZE;
page_block += 1;
+ if (folio_test_highmem(folio) && (offset_in_page(addr) == 0)) {
+ kunmap_local(addr - UBIFS_BLOCK_SIZE);
+ addr = kmap_local_folio(folio, i * UBIFS_BLOCK_SIZE);
+ }
}
- if (end_index == page->index) {
+ if (end_index == folio->index) {
int len = i_size & (PAGE_SIZE - 1);
if (len && len < read)
@@ -685,22 +682,19 @@ static int populate_page(struct ubifs_info *c, struct page *page,
out_hole:
if (hole) {
- SetPageChecked(page);
+ folio_set_checked(folio);
dbg_gen("hole");
}
- SetPageUptodate(page);
- ClearPageError(page);
- flush_dcache_page(page);
- kunmap(page);
+ folio_mark_uptodate(folio);
+ flush_dcache_folio(folio);
+ kunmap_local(addr);
*n = nn;
return 0;
out_err:
- ClearPageUptodate(page);
- SetPageError(page);
- flush_dcache_page(page);
- kunmap(page);
+ flush_dcache_folio(folio);
+ kunmap_local(addr);
ubifs_err(c, "bad data node (block %u, inode %lu)",
page_block, inode->i_ino);
return -EINVAL;
@@ -710,15 +704,15 @@ out_err:
* ubifs_do_bulk_read - do bulk-read.
* @c: UBIFS file-system description object
* @bu: bulk-read information
- * @page1: first page to read
+ * @folio1: first folio to read
*
* Returns: %1 if the bulk-read is done, otherwise %0 is returned.
*/
static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
- struct page *page1)
+ struct folio *folio1)
{
- pgoff_t offset = page1->index, end_index;
- struct address_space *mapping = page1->mapping;
+ pgoff_t offset = folio1->index, end_index;
+ struct address_space *mapping = folio1->mapping;
struct inode *inode = mapping->host;
struct ubifs_inode *ui = ubifs_inode(inode);
int err, page_idx, page_cnt, ret = 0, n = 0;
@@ -768,11 +762,11 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
goto out_warn;
}
- err = populate_page(c, page1, bu, &n);
+ err = populate_page(c, folio1, bu, &n);
if (err)
goto out_warn;
- unlock_page(page1);
+ folio_unlock(folio1);
ret = 1;
isize = i_size_read(inode);
@@ -782,19 +776,19 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
for (page_idx = 1; page_idx < page_cnt; page_idx++) {
pgoff_t page_offset = offset + page_idx;
- struct page *page;
+ struct folio *folio;
if (page_offset > end_index)
break;
- page = pagecache_get_page(mapping, page_offset,
+ folio = __filemap_get_folio(mapping, page_offset,
FGP_LOCK|FGP_ACCESSED|FGP_CREAT|FGP_NOWAIT,
ra_gfp_mask);
- if (!page)
+ if (IS_ERR(folio))
break;
- if (!PageUptodate(page))
- err = populate_page(c, page, bu, &n);
- unlock_page(page);
- put_page(page);
+ if (!folio_test_uptodate(folio))
+ err = populate_page(c, folio, bu, &n);
+ folio_unlock(folio);
+ folio_put(folio);
if (err)
break;
}
@@ -817,7 +811,7 @@ out_bu_off:
/**
* ubifs_bulk_read - determine whether to bulk-read and, if so, do it.
- * @page: page from which to start bulk-read.
+ * @folio: folio from which to start bulk-read.
*
* Some flash media are capable of reading sequentially at faster rates. UBIFS
* bulk-read facility is designed to take advantage of that, by reading in one
@@ -826,12 +820,12 @@ out_bu_off:
*
* Returns: %1 if a bulk-read is done and %0 otherwise.
*/
-static int ubifs_bulk_read(struct page *page)
+static int ubifs_bulk_read(struct folio *folio)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct ubifs_info *c = inode->i_sb->s_fs_info;
struct ubifs_inode *ui = ubifs_inode(inode);
- pgoff_t index = page->index, last_page_read = ui->last_page_read;
+ pgoff_t index = folio->index, last_page_read = ui->last_page_read;
struct bu_info *bu;
int err = 0, allocated = 0;
@@ -879,8 +873,8 @@ static int ubifs_bulk_read(struct page *page)
bu->buf_len = c->max_bu_buf_len;
data_key_init(c, &bu->key, inode->i_ino,
- page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT);
- err = ubifs_do_bulk_read(c, bu, page);
+ folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT);
+ err = ubifs_do_bulk_read(c, bu, folio);
if (!allocated)
mutex_unlock(&c->bu_mutex);
@@ -894,69 +888,71 @@ out_unlock:
static int ubifs_read_folio(struct file *file, struct folio *folio)
{
- struct page *page = &folio->page;
-
- if (ubifs_bulk_read(page))
+ if (ubifs_bulk_read(folio))
return 0;
- do_readpage(page);
+ do_readpage(folio);
folio_unlock(folio);
return 0;
}
-static int do_writepage(struct page *page, int len)
+static int do_writepage(struct folio *folio, size_t len)
{
- int err = 0, i, blen;
+ int err = 0, blen;
unsigned int block;
void *addr;
+ size_t offset = 0;
union ubifs_key key;
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct ubifs_info *c = inode->i_sb->s_fs_info;
#ifdef UBIFS_DEBUG
struct ubifs_inode *ui = ubifs_inode(inode);
spin_lock(&ui->ui_lock);
- ubifs_assert(c, page->index <= ui->synced_i_size >> PAGE_SHIFT);
+ ubifs_assert(c, folio->index <= ui->synced_i_size >> PAGE_SHIFT);
spin_unlock(&ui->ui_lock);
#endif
- /* Update radix tree tags */
- set_page_writeback(page);
+ folio_start_writeback(folio);
- addr = kmap(page);
- block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
- i = 0;
- while (len) {
- blen = min_t(int, len, UBIFS_BLOCK_SIZE);
+ addr = kmap_local_folio(folio, offset);
+ block = folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
+ for (;;) {
+ blen = min_t(size_t, len, UBIFS_BLOCK_SIZE);
data_key_init(c, &key, inode->i_ino, block);
err = ubifs_jnl_write_data(c, inode, &key, addr, blen);
if (err)
break;
- if (++i >= UBIFS_BLOCKS_PER_PAGE)
+ len -= blen;
+ if (!len)
break;
block += 1;
addr += blen;
- len -= blen;
+ if (folio_test_highmem(folio) && !offset_in_page(addr)) {
+ kunmap_local(addr - blen);
+ offset += PAGE_SIZE;
+ addr = kmap_local_folio(folio, offset);
+ }
}
+ kunmap_local(addr);
if (err) {
- SetPageError(page);
- ubifs_err(c, "cannot write page %lu of inode %lu, error %d",
- page->index, inode->i_ino, err);
+ mapping_set_error(folio->mapping, err);
+ ubifs_err(c, "cannot write folio %lu of inode %lu, error %d",
+ folio->index, inode->i_ino, err);
ubifs_ro_mode(c, err);
}
- ubifs_assert(c, PagePrivate(page));
- if (PageChecked(page))
+ ubifs_assert(c, folio->private != NULL);
+ if (folio_test_checked(folio))
release_new_page_budget(c);
else
release_existing_page_budget(c);
atomic_long_dec(&c->dirty_pg_cnt);
- detach_page_private(page);
- ClearPageChecked(page);
+ folio_detach_private(folio);
+ folio_clear_checked(folio);
- kunmap(page);
- unlock_page(page);
- end_page_writeback(page);
+ folio_unlock(folio);
+ folio_end_writeback(folio);
return err;
}
@@ -1006,22 +1002,21 @@ static int do_writepage(struct page *page, int len)
* on the page lock and it would not write the truncated inode node to the
* journal before we have finished.
*/
-static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
+static int ubifs_writepage(struct folio *folio, struct writeback_control *wbc,
+ void *data)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct ubifs_info *c = inode->i_sb->s_fs_info;
struct ubifs_inode *ui = ubifs_inode(inode);
loff_t i_size = i_size_read(inode), synced_i_size;
- pgoff_t end_index = i_size >> PAGE_SHIFT;
- int err, len = i_size & (PAGE_SIZE - 1);
- void *kaddr;
+ int err, len = folio_size(folio);
dbg_gen("ino %lu, pg %lu, pg flags %#lx",
- inode->i_ino, page->index, page->flags);
- ubifs_assert(c, PagePrivate(page));
+ inode->i_ino, folio->index, folio->flags);
+ ubifs_assert(c, folio->private != NULL);
- /* Is the page fully outside @i_size? (truncate in progress) */
- if (page->index > end_index || (page->index == end_index && !len)) {
+ /* Is the folio fully outside @i_size? (truncate in progress) */
+ if (folio_pos(folio) >= i_size) {
err = 0;
goto out_unlock;
}
@@ -1030,9 +1025,9 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
synced_i_size = ui->synced_i_size;
spin_unlock(&ui->ui_lock);
- /* Is the page fully inside @i_size? */
- if (page->index < end_index) {
- if (page->index >= synced_i_size >> PAGE_SHIFT) {
+ /* Is the folio fully inside i_size? */
+ if (folio_pos(folio) + len <= i_size) {
+ if (folio_pos(folio) >= synced_i_size) {
err = inode->i_sb->s_op->write_inode(inode, NULL);
if (err)
goto out_redirty;
@@ -1045,20 +1040,18 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
* with this.
*/
}
- return do_writepage(page, PAGE_SIZE);
+ return do_writepage(folio, len);
}
/*
- * The page straddles @i_size. It must be zeroed out on each and every
+ * The folio straddles @i_size. It must be zeroed out on each and every
* writepage invocation because it may be mmapped. "A file is mapped
* in multiples of the page size. For a file that is not a multiple of
* the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
- kaddr = kmap_atomic(page);
- memset(kaddr + len, 0, PAGE_SIZE - len);
- flush_dcache_page(page);
- kunmap_atomic(kaddr);
+ len = i_size - folio_pos(folio);
+ folio_zero_segment(folio, len, folio_size(folio));
if (i_size > synced_i_size) {
err = inode->i_sb->s_op->write_inode(inode, NULL);
@@ -1066,19 +1059,25 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
goto out_redirty;
}
- return do_writepage(page, len);
+ return do_writepage(folio, len);
out_redirty:
/*
- * redirty_page_for_writepage() won't call ubifs_dirty_inode() because
+ * folio_redirty_for_writepage() won't call ubifs_dirty_inode() because
* it passes I_DIRTY_PAGES flag while calling __mark_inode_dirty(), so
* there is no need to do space budget for dirty inode.
*/
- redirty_page_for_writepage(wbc, page);
+ folio_redirty_for_writepage(wbc, folio);
out_unlock:
- unlock_page(page);
+ folio_unlock(folio);
return err;
}
+static int ubifs_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ return write_cache_pages(mapping, wbc, ubifs_writepage, NULL);
+}
+
/**
* do_attr_changes - change inode attributes.
* @inode: inode to change attributes for
@@ -1155,11 +1154,11 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode,
if (offset) {
pgoff_t index = new_size >> PAGE_SHIFT;
- struct page *page;
+ struct folio *folio;
- page = find_lock_page(inode->i_mapping, index);
- if (page) {
- if (PageDirty(page)) {
+ folio = filemap_lock_folio(inode->i_mapping, index);
+ if (!IS_ERR(folio)) {
+ if (folio_test_dirty(folio)) {
/*
* 'ubifs_jnl_truncate()' will try to truncate
* the last data node, but it contains
@@ -1168,14 +1167,14 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode,
* 'ubifs_jnl_truncate()' will see an already
* truncated (and up to date) data node.
*/
- ubifs_assert(c, PagePrivate(page));
+ ubifs_assert(c, folio->private != NULL);
- clear_page_dirty_for_io(page);
+ folio_clear_dirty_for_io(folio);
if (UBIFS_BLOCKS_PER_PAGE_SHIFT)
- offset = new_size &
- (PAGE_SIZE - 1);
- err = do_writepage(page, offset);
- put_page(page);
+ offset = offset_in_folio(folio,
+ new_size);
+ err = do_writepage(folio, offset);
+ folio_put(folio);
if (err)
goto out_budg;
/*
@@ -1188,8 +1187,8 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode,
* to 'ubifs_jnl_truncate()' to save it from
* having to read it.
*/
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
}
}
}
@@ -1512,14 +1511,14 @@ static bool ubifs_release_folio(struct folio *folio, gfp_t unused_gfp_flags)
*/
static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
{
- struct page *page = vmf->page;
+ struct folio *folio = page_folio(vmf->page);
struct inode *inode = file_inode(vmf->vma->vm_file);
struct ubifs_info *c = inode->i_sb->s_fs_info;
struct timespec64 now = current_time(inode);
struct ubifs_budget_req req = { .new_page = 1 };
int err, update_time;
- dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, page->index,
+ dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, folio->index,
i_size_read(inode));
ubifs_assert(c, !c->ro_media && !c->ro_mount);
@@ -1527,17 +1526,17 @@ static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
return VM_FAULT_SIGBUS; /* -EROFS */
/*
- * We have not locked @page so far so we may budget for changing the
- * page. Note, we cannot do this after we locked the page, because
+ * We have not locked @folio so far so we may budget for changing the
+ * folio. Note, we cannot do this after we locked the folio, because
* budgeting may cause write-back which would cause deadlock.
*
- * At the moment we do not know whether the page is dirty or not, so we
- * assume that it is not and budget for a new page. We could look at
+ * At the moment we do not know whether the folio is dirty or not, so we
+ * assume that it is not and budget for a new folio. We could look at
* the @PG_private flag and figure this out, but we may race with write
- * back and the page state may change by the time we lock it, so this
+ * back and the folio state may change by the time we lock it, so this
* would need additional care. We do not bother with this at the
* moment, although it might be good idea to do. Instead, we allocate
- * budget for a new page and amend it later on if the page was in fact
+ * budget for a new folio and amend it later on if the folio was in fact
* dirty.
*
* The budgeting-related logic of this function is similar to what we
@@ -1560,21 +1559,21 @@ static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
- lock_page(page);
- if (unlikely(page->mapping != inode->i_mapping ||
- page_offset(page) > i_size_read(inode))) {
- /* Page got truncated out from underneath us */
+ folio_lock(folio);
+ if (unlikely(folio->mapping != inode->i_mapping ||
+ folio_pos(folio) >= i_size_read(inode))) {
+ /* Folio got truncated out from underneath us */
goto sigbus;
}
- if (PagePrivate(page))
+ if (folio->private)
release_new_page_budget(c);
else {
- if (!PageChecked(page))
+ if (!folio_test_checked(folio))
ubifs_convert_page_budget(c);
- attach_page_private(page, (void *)1);
+ folio_attach_private(folio, (void *)1);
atomic_long_inc(&c->dirty_pg_cnt);
- __set_page_dirty_nobuffers(page);
+ filemap_dirty_folio(folio->mapping, folio);
}
if (update_time) {
@@ -1590,11 +1589,11 @@ static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
ubifs_release_dirty_inode_budget(c, ui);
}
- wait_for_stable_page(page);
+ folio_wait_stable(folio);
return VM_FAULT_LOCKED;
sigbus:
- unlock_page(page);
+ folio_unlock(folio);
ubifs_release_budget(c, &req);
return VM_FAULT_SIGBUS;
}
@@ -1648,7 +1647,7 @@ static int ubifs_symlink_getattr(struct mnt_idmap *idmap,
const struct address_space_operations ubifs_file_address_operations = {
.read_folio = ubifs_read_folio,
- .writepage = ubifs_writepage,
+ .writepages = ubifs_writepages,
.write_begin = ubifs_write_begin,
.write_end = ubifs_write_end,
.invalidate_folio = ubifs_invalidate_folio,
diff --git a/fs/ubifs/find.c b/fs/ubifs/find.c
index 873e6e1c92b5c7..6ebf3c04ac5fea 100644
--- a/fs/ubifs/find.c
+++ b/fs/ubifs/find.c
@@ -82,8 +82,9 @@ static int valuable(struct ubifs_info *c, const struct ubifs_lprops *lprops)
*/
static int scan_for_dirty_cb(struct ubifs_info *c,
const struct ubifs_lprops *lprops, int in_tree,
- struct scan_data *data)
+ void *arg)
{
+ struct scan_data *data = arg;
int ret = LPT_SCAN_CONTINUE;
/* Exclude LEBs that are currently in use */
@@ -166,8 +167,7 @@ static const struct ubifs_lprops *scan_for_dirty(struct ubifs_info *c,
data.pick_free = pick_free;
data.lnum = -1;
data.exclude_index = exclude_index;
- err = ubifs_lpt_scan_nolock(c, -1, c->lscan_lnum,
- (ubifs_lpt_scan_callback)scan_for_dirty_cb,
+ err = ubifs_lpt_scan_nolock(c, -1, c->lscan_lnum, scan_for_dirty_cb,
&data);
if (err)
return ERR_PTR(err);
@@ -349,8 +349,9 @@ out:
*/
static int scan_for_free_cb(struct ubifs_info *c,
const struct ubifs_lprops *lprops, int in_tree,
- struct scan_data *data)
+ void *arg)
{
+ struct scan_data *data = arg;
int ret = LPT_SCAN_CONTINUE;
/* Exclude LEBs that are currently in use */
@@ -446,7 +447,7 @@ const struct ubifs_lprops *do_find_free_space(struct ubifs_info *c,
data.pick_free = pick_free;
data.lnum = -1;
err = ubifs_lpt_scan_nolock(c, -1, c->lscan_lnum,
- (ubifs_lpt_scan_callback)scan_for_free_cb,
+ scan_for_free_cb,
&data);
if (err)
return ERR_PTR(err);
@@ -589,8 +590,9 @@ out:
*/
static int scan_for_idx_cb(struct ubifs_info *c,
const struct ubifs_lprops *lprops, int in_tree,
- struct scan_data *data)
+ void *arg)
{
+ struct scan_data *data = arg;
int ret = LPT_SCAN_CONTINUE;
/* Exclude LEBs that are currently in use */
@@ -625,8 +627,7 @@ static const struct ubifs_lprops *scan_for_leb_for_idx(struct ubifs_info *c)
int err;
data.lnum = -1;
- err = ubifs_lpt_scan_nolock(c, -1, c->lscan_lnum,
- (ubifs_lpt_scan_callback)scan_for_idx_cb,
+ err = ubifs_lpt_scan_nolock(c, -1, c->lscan_lnum, scan_for_idx_cb,
&data);
if (err)
return ERR_PTR(err);
@@ -726,11 +727,10 @@ out:
return err;
}
-static int cmp_dirty_idx(const struct ubifs_lprops **a,
- const struct ubifs_lprops **b)
+static int cmp_dirty_idx(const void *a, const void *b)
{
- const struct ubifs_lprops *lpa = *a;
- const struct ubifs_lprops *lpb = *b;
+ const struct ubifs_lprops *lpa = *(const struct ubifs_lprops **)a;
+ const struct ubifs_lprops *lpb = *(const struct ubifs_lprops **)b;
return lpa->dirty + lpa->free - lpb->dirty - lpb->free;
}
@@ -754,7 +754,7 @@ int ubifs_save_dirty_idx_lnums(struct ubifs_info *c)
sizeof(void *) * c->dirty_idx.cnt);
/* Sort it so that the dirtiest is now at the end */
sort(c->dirty_idx.arr, c->dirty_idx.cnt, sizeof(void *),
- (int (*)(const void *, const void *))cmp_dirty_idx, NULL);
+ cmp_dirty_idx, NULL);
dbg_find("found %d dirty index LEBs", c->dirty_idx.cnt);
if (c->dirty_idx.cnt)
dbg_find("dirtiest index LEB is %d with dirty %d and free %d",
@@ -782,8 +782,9 @@ int ubifs_save_dirty_idx_lnums(struct ubifs_info *c)
*/
static int scan_dirty_idx_cb(struct ubifs_info *c,
const struct ubifs_lprops *lprops, int in_tree,
- struct scan_data *data)
+ void *arg)
{
+ struct scan_data *data = arg;
int ret = LPT_SCAN_CONTINUE;
/* Exclude LEBs that are currently in use */
@@ -842,8 +843,7 @@ static int find_dirty_idx_leb(struct ubifs_info *c)
if (c->pnodes_have >= c->pnode_cnt)
/* All pnodes are in memory, so skip scan */
return -ENOSPC;
- err = ubifs_lpt_scan_nolock(c, -1, c->lscan_lnum,
- (ubifs_lpt_scan_callback)scan_dirty_idx_cb,
+ err = ubifs_lpt_scan_nolock(c, -1, c->lscan_lnum, scan_dirty_idx_cb,
&data);
if (err)
return err;
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index f0a5538c84b0f2..74aee92433d75c 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -293,6 +293,96 @@ static int write_head(struct ubifs_info *c, int jhead, void *buf, int len,
}
/**
+ * __queue_and_wait - queue a task and wait until the task is waked up.
+ * @c: UBIFS file-system description object
+ *
+ * This function adds current task in queue and waits until the task is waked
+ * up. This function should be called with @c->reserve_space_wq locked.
+ */
+static void __queue_and_wait(struct ubifs_info *c)
+{
+ DEFINE_WAIT(wait);
+
+ __add_wait_queue_entry_tail_exclusive(&c->reserve_space_wq, &wait);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ spin_unlock(&c->reserve_space_wq.lock);
+
+ schedule();
+ finish_wait(&c->reserve_space_wq, &wait);
+}
+
+/**
+ * wait_for_reservation - try queuing current task to wait until waked up.
+ * @c: UBIFS file-system description object
+ *
+ * This function queues current task to wait until waked up, if queuing is
+ * started(@c->need_wait_space is not %0). Returns %true if current task is
+ * added in queue, otherwise %false is returned.
+ */
+static bool wait_for_reservation(struct ubifs_info *c)
+{
+ if (likely(atomic_read(&c->need_wait_space) == 0))
+ /* Quick path to check whether queuing is started. */
+ return false;
+
+ spin_lock(&c->reserve_space_wq.lock);
+ if (atomic_read(&c->need_wait_space) == 0) {
+ /* Queuing is not started, don't queue current task. */
+ spin_unlock(&c->reserve_space_wq.lock);
+ return false;
+ }
+
+ __queue_and_wait(c);
+ return true;
+}
+
+/**
+ * wake_up_reservation - wake up first task in queue or stop queuing.
+ * @c: UBIFS file-system description object
+ *
+ * This function wakes up the first task in queue if it exists, or stops
+ * queuing if no tasks in queue.
+ */
+static void wake_up_reservation(struct ubifs_info *c)
+{
+ spin_lock(&c->reserve_space_wq.lock);
+ if (waitqueue_active(&c->reserve_space_wq))
+ wake_up_locked(&c->reserve_space_wq);
+ else
+ /*
+ * Compared with wait_for_reservation(), set @c->need_wait_space
+ * under the protection of wait queue lock, which can avoid that
+ * @c->need_wait_space is set to 0 after new task queued.
+ */
+ atomic_set(&c->need_wait_space, 0);
+ spin_unlock(&c->reserve_space_wq.lock);
+}
+
+/**
+ * wake_up_reservation - add current task in queue or start queuing.
+ * @c: UBIFS file-system description object
+ *
+ * This function starts queuing if queuing is not started, otherwise adds
+ * current task in queue.
+ */
+static void add_or_start_queue(struct ubifs_info *c)
+{
+ spin_lock(&c->reserve_space_wq.lock);
+ if (atomic_cmpxchg(&c->need_wait_space, 0, 1) == 0) {
+ /* Starts queuing, task can go on directly. */
+ spin_unlock(&c->reserve_space_wq.lock);
+ return;
+ }
+
+ /*
+ * There are at least two tasks have retried more than 32 times
+ * at certain point, first task has started queuing, just queue
+ * the left tasks.
+ */
+ __queue_and_wait(c);
+}
+
+/**
* make_reservation - reserve journal space.
* @c: UBIFS file-system description object
* @jhead: journal head
@@ -311,33 +401,27 @@ static int write_head(struct ubifs_info *c, int jhead, void *buf, int len,
static int make_reservation(struct ubifs_info *c, int jhead, int len)
{
int err, cmt_retries = 0, nospc_retries = 0;
+ bool blocked = wait_for_reservation(c);
again:
down_read(&c->commit_sem);
err = reserve_space(c, jhead, len);
- if (!err)
+ if (!err) {
/* c->commit_sem will get released via finish_reservation(). */
- return 0;
+ goto out_wake_up;
+ }
up_read(&c->commit_sem);
if (err == -ENOSPC) {
/*
* GC could not make any progress. We should try to commit
- * once because it could make some dirty space and GC would
- * make progress, so make the error -EAGAIN so that the below
+ * because it could make some dirty space and GC would make
+ * progress, so make the error -EAGAIN so that the below
* will commit and re-try.
*/
- if (nospc_retries++ < 2) {
- dbg_jnl("no space, retry");
- err = -EAGAIN;
- }
-
- /*
- * This means that the budgeting is incorrect. We always have
- * to be able to write to the media, because all operations are
- * budgeted. Deletions are not budgeted, though, but we reserve
- * an extra LEB for them.
- */
+ nospc_retries++;
+ dbg_jnl("no space, retry");
+ err = -EAGAIN;
}
if (err != -EAGAIN)
@@ -349,15 +433,37 @@ again:
*/
if (cmt_retries > 128) {
/*
- * This should not happen unless the journal size limitations
- * are too tough.
+ * This should not happen unless:
+ * 1. The journal size limitations are too tough.
+ * 2. The budgeting is incorrect. We always have to be able to
+ * write to the media, because all operations are budgeted.
+ * Deletions are not budgeted, though, but we reserve an
+ * extra LEB for them.
*/
- ubifs_err(c, "stuck in space allocation");
+ ubifs_err(c, "stuck in space allocation, nospc_retries %d",
+ nospc_retries);
err = -ENOSPC;
goto out;
- } else if (cmt_retries > 32)
- ubifs_warn(c, "too many space allocation re-tries (%d)",
- cmt_retries);
+ } else if (cmt_retries > 32) {
+ /*
+ * It's almost impossible to happen, unless there are many tasks
+ * making reservation concurrently and someone task has retried
+ * gc + commit for many times, generated available space during
+ * this period are grabbed by other tasks.
+ * But if it happens, start queuing up all tasks that will make
+ * space reservation, then there is only one task making space
+ * reservation at any time, and it can always make success under
+ * the premise of correct budgeting.
+ */
+ ubifs_warn(c, "too many space allocation cmt_retries (%d) "
+ "nospc_retries (%d), start queuing tasks",
+ cmt_retries, nospc_retries);
+
+ if (!blocked) {
+ blocked = true;
+ add_or_start_queue(c);
+ }
+ }
dbg_jnl("-EAGAIN, commit and retry (retried %d times)",
cmt_retries);
@@ -365,7 +471,7 @@ again:
err = ubifs_run_commit(c);
if (err)
- return err;
+ goto out_wake_up;
goto again;
out:
@@ -380,6 +486,27 @@ out:
cmt_retries = dbg_check_lprops(c);
up_write(&c->commit_sem);
}
+out_wake_up:
+ if (blocked) {
+ /*
+ * Only tasks that have ever started queuing or ever been queued
+ * can wake up other queued tasks, which can make sure that
+ * there is only one task waked up to make space reservation.
+ * For example:
+ * task A task B task C
+ * make_reservation make_reservation
+ * reserve_space // 0
+ * wake_up_reservation
+ * atomic_cmpxchg // 0, start queuing
+ * reserve_space
+ * wait_for_reservation
+ * __queue_and_wait
+ * add_wait_queue
+ * if (blocked) // false
+ * // So that task C won't be waked up to race with task B
+ */
+ wake_up_reservation(c);
+ }
return err;
}
diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c
index 6d6cd85c2b4cab..a11c3dab7e168c 100644
--- a/fs/ubifs/lprops.c
+++ b/fs/ubifs/lprops.c
@@ -1014,8 +1014,9 @@ out:
*/
static int scan_check_cb(struct ubifs_info *c,
const struct ubifs_lprops *lp, int in_tree,
- struct ubifs_lp_stats *lst)
+ void *arg)
{
+ struct ubifs_lp_stats *lst = arg;
struct ubifs_scan_leb *sleb;
struct ubifs_scan_node *snod;
int cat, lnum = lp->lnum, is_idx = 0, used = 0, free, dirty, ret;
@@ -1269,8 +1270,7 @@ int dbg_check_lprops(struct ubifs_info *c)
memset(&lst, 0, sizeof(struct ubifs_lp_stats));
err = ubifs_lpt_scan_nolock(c, c->main_first, c->leb_cnt - 1,
- (ubifs_lpt_scan_callback)scan_check_cb,
- &lst);
+ scan_check_cb, &lst);
if (err && err != -ENOSPC)
goto out;
diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c
index c4d079328b929f..07351fdce72230 100644
--- a/fs/ubifs/lpt_commit.c
+++ b/fs/ubifs/lpt_commit.c
@@ -1646,7 +1646,6 @@ static int dbg_check_ltab_lnum(struct ubifs_info *c, int lnum)
len -= node_len;
}
- err = 0;
out:
vfree(buf);
return err;
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 7f4031a15f4df8..291583005dd123 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -2151,6 +2151,8 @@ static struct ubifs_info *alloc_ubifs_info(struct ubi_volume_desc *ubi)
mutex_init(&c->bu_mutex);
mutex_init(&c->write_reserve_mutex);
init_waitqueue_head(&c->cmt_wq);
+ init_waitqueue_head(&c->reserve_space_wq);
+ atomic_set(&c->need_wait_space, 0);
c->buds = RB_ROOT;
c->old_idx = RB_ROOT;
c->size_tree = RB_ROOT;
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index f4728e65d1bda4..45cacdcd4746bc 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -3116,14 +3116,7 @@ static void tnc_destroy_cnext(struct ubifs_info *c)
void ubifs_tnc_close(struct ubifs_info *c)
{
tnc_destroy_cnext(c);
- if (c->zroot.znode) {
- long n, freed;
-
- n = atomic_long_read(&c->clean_zn_cnt);
- freed = ubifs_destroy_tnc_subtree(c, c->zroot.znode);
- ubifs_assert(c, freed == n);
- atomic_long_sub(n, &ubifs_clean_zn_cnt);
- }
+ ubifs_destroy_tnc_tree(c);
kfree(c->gap_lebs);
kfree(c->ilebs);
destroy_old_idx(c);
diff --git a/fs/ubifs/tnc_misc.c b/fs/ubifs/tnc_misc.c
index 4d686e34e64d99..d3f8a6aa1f49da 100644
--- a/fs/ubifs/tnc_misc.c
+++ b/fs/ubifs/tnc_misc.c
@@ -251,6 +251,28 @@ long ubifs_destroy_tnc_subtree(const struct ubifs_info *c,
}
/**
+ * ubifs_destroy_tnc_tree - destroy all znodes connected to the TNC tree.
+ * @c: UBIFS file-system description object
+ *
+ * This function destroys the whole TNC tree and updates clean global znode
+ * count.
+ */
+void ubifs_destroy_tnc_tree(struct ubifs_info *c)
+{
+ long n, freed;
+
+ if (!c->zroot.znode)
+ return;
+
+ n = atomic_long_read(&c->clean_zn_cnt);
+ freed = ubifs_destroy_tnc_subtree(c, c->zroot.znode);
+ ubifs_assert(c, freed == n);
+ atomic_long_sub(n, &ubifs_clean_zn_cnt);
+
+ c->zroot.znode = NULL;
+}
+
+/**
* read_znode - read an indexing node from flash and fill znode.
* @c: UBIFS file-system description object
* @zzbr: the zbranch describing the node to read
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 3916dc4f30caa6..1f3ea879d93afb 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -1047,6 +1047,8 @@ struct ubifs_debug_info;
* @bg_bud_bytes: number of bud bytes when background commit is initiated
* @old_buds: buds to be released after commit ends
* @max_bud_cnt: maximum number of buds
+ * @need_wait_space: Non %0 means space reservation tasks need to wait in queue
+ * @reserve_space_wq: wait queue to sleep on if @need_wait_space is not %0
*
* @commit_sem: synchronizes committer with other processes
* @cmt_state: commit state
@@ -1305,6 +1307,8 @@ struct ubifs_info {
long long bg_bud_bytes;
struct list_head old_buds;
int max_bud_cnt;
+ atomic_t need_wait_space;
+ wait_queue_head_t reserve_space_wq;
struct rw_semaphore commit_sem;
int cmt_state;
@@ -1903,6 +1907,7 @@ struct ubifs_znode *ubifs_tnc_postorder_next(const struct ubifs_info *c,
struct ubifs_znode *znode);
long ubifs_destroy_tnc_subtree(const struct ubifs_info *c,
struct ubifs_znode *zr);
+void ubifs_destroy_tnc_tree(struct ubifs_info *c);
struct ubifs_znode *ubifs_load_znode(struct ubifs_info *c,
struct ubifs_zbranch *zbr,
struct ubifs_znode *parent, int iip);
diff --git a/fs/xfs/xfs_buf_mem.c b/fs/xfs/xfs_buf_mem.c
index 8ad38c64708ec5..9bb2d24de70941 100644
--- a/fs/xfs/xfs_buf_mem.c
+++ b/fs/xfs/xfs_buf_mem.c
@@ -81,8 +81,6 @@ xmbuf_alloc(
/* ensure all writes are below EOF to avoid pagecache zeroing */
i_size_write(inode, inode->i_sb->s_maxbytes);
- trace_xmbuf_create(btp);
-
error = xfs_buf_cache_init(btp->bt_cache);
if (error)
goto out_file;
@@ -99,6 +97,8 @@ xmbuf_alloc(
if (error)
goto out_bcache;
+ trace_xmbuf_create(btp);
+
*btpp = btp;
return 0;
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 30d36596a2e46a..c98cb468c35780 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -811,6 +811,12 @@ restart:
* caller should throw away the dquot and start over. Otherwise, the dquot
* is returned locked (and held by the cache) as if there had been a cache
* hit.
+ *
+ * The insert needs to be done under memalloc_nofs context because the radix
+ * tree can do memory allocation during insert. The qi->qi_tree_lock is taken in
+ * memory reclaim when freeing unused dquots, so we cannot have the radix tree
+ * node allocation recursing into filesystem reclaim whilst we hold the
+ * qi_tree_lock.
*/
static int
xfs_qm_dqget_cache_insert(
@@ -820,25 +826,27 @@ xfs_qm_dqget_cache_insert(
xfs_dqid_t id,
struct xfs_dquot *dqp)
{
+ unsigned int nofs_flags;
int error;
+ nofs_flags = memalloc_nofs_save();
mutex_lock(&qi->qi_tree_lock);
error = radix_tree_insert(tree, id, dqp);
if (unlikely(error)) {
/* Duplicate found! Caller must try again. */
- mutex_unlock(&qi->qi_tree_lock);
trace_xfs_dqget_dup(dqp);
- return error;
+ goto out_unlock;
}
/* Return a locked dquot to the caller, with a reference taken. */
xfs_dqlock(dqp);
dqp->q_nrefs = 1;
-
qi->qi_dquots++;
- mutex_unlock(&qi->qi_tree_lock);
- return 0;
+out_unlock:
+ mutex_unlock(&qi->qi_tree_lock);
+ memalloc_nofs_restore(nofs_flags);
+ return error;
}
/* Check our input parameters. */
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 56b07d8ed431f2..aea97fc074f8de 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -4626,6 +4626,7 @@ TRACE_EVENT(xmbuf_create,
char *path;
struct file *file = btp->bt_file;
+ __entry->dev = btp->bt_mount->m_super->s_dev;
__entry->ino = file_inode(file)->i_ino;
memset(pathname, 0, sizeof(pathname));
path = file_path(file, pathname, sizeof(pathname) - 1);
@@ -4633,7 +4634,8 @@ TRACE_EVENT(xmbuf_create,
path = "(unknown)";
strncpy(__entry->pathname, path, sizeof(__entry->pathname));
),
- TP_printk("xmino 0x%lx path '%s'",
+ TP_printk("dev %d:%d xmino 0x%lx path '%s'",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->pathname)
);
@@ -4642,6 +4644,7 @@ TRACE_EVENT(xmbuf_free,
TP_PROTO(struct xfs_buftarg *btp),
TP_ARGS(btp),
TP_STRUCT__entry(
+ __field(dev_t, dev)
__field(unsigned long, ino)
__field(unsigned long long, bytes)
__field(loff_t, size)
@@ -4650,11 +4653,13 @@ TRACE_EVENT(xmbuf_free,
struct file *file = btp->bt_file;
struct inode *inode = file_inode(file);
+ __entry->dev = btp->bt_mount->m_super->s_dev;
__entry->size = i_size_read(inode);
__entry->bytes = (inode->i_blocks << SECTOR_SHIFT) + inode->i_bytes;
__entry->ino = inode->i_ino;
),
- TP_printk("xmino 0x%lx mem_bytes 0x%llx isize 0x%llx",
+ TP_printk("dev %d:%d xmino 0x%lx mem_bytes 0x%llx isize 0x%llx",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->bytes,
__entry->size)
diff --git a/include/asm-generic/bitops/__ffs.h b/include/asm-generic/bitops/__ffs.h
index 39e56e1c720324..446fea6dda78b9 100644
--- a/include/asm-generic/bitops/__ffs.h
+++ b/include/asm-generic/bitops/__ffs.h
@@ -5,12 +5,12 @@
#include <asm/types.h>
/**
- * __ffs - find first bit in word.
+ * generic___ffs - find first bit in word.
* @word: The word to search
*
* Undefined if no bit exists, so code should check against 0 first.
*/
-static __always_inline unsigned long __ffs(unsigned long word)
+static __always_inline unsigned long generic___ffs(unsigned long word)
{
int num = 0;
@@ -41,4 +41,8 @@ static __always_inline unsigned long __ffs(unsigned long word)
return num;
}
+#ifndef __HAVE_ARCH___FFS
+#define __ffs(word) generic___ffs(word)
+#endif
+
#endif /* _ASM_GENERIC_BITOPS___FFS_H_ */
diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
index 03f721a8a2b199..54ccccf96e21ea 100644
--- a/include/asm-generic/bitops/__fls.h
+++ b/include/asm-generic/bitops/__fls.h
@@ -5,12 +5,12 @@
#include <asm/types.h>
/**
- * __fls - find last (most-significant) set bit in a long word
+ * generic___fls - find last (most-significant) set bit in a long word
* @word: the word to search
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
-static __always_inline unsigned long __fls(unsigned long word)
+static __always_inline unsigned long generic___fls(unsigned long word)
{
int num = BITS_PER_LONG - 1;
@@ -41,4 +41,8 @@ static __always_inline unsigned long __fls(unsigned long word)
return num;
}
+#ifndef __HAVE_ARCH___FLS
+#define __fls(word) generic___fls(word)
+#endif
+
#endif /* _ASM_GENERIC_BITOPS___FLS_H_ */
diff --git a/include/asm-generic/bitops/ffs.h b/include/asm-generic/bitops/ffs.h
index 323fd5d6ae263a..4c43f242daeb17 100644
--- a/include/asm-generic/bitops/ffs.h
+++ b/include/asm-generic/bitops/ffs.h
@@ -3,14 +3,14 @@
#define _ASM_GENERIC_BITOPS_FFS_H_
/**
- * ffs - find first bit set
+ * generic_ffs - find first bit set
* @x: the word to search
*
* This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from ffz (man ffs).
*/
-static inline int ffs(int x)
+static inline int generic_ffs(int x)
{
int r = 1;
@@ -39,4 +39,8 @@ static inline int ffs(int x)
return r;
}
+#ifndef __HAVE_ARCH_FFS
+#define ffs(x) generic_ffs(x)
+#endif
+
#endif /* _ASM_GENERIC_BITOPS_FFS_H_ */
diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
index b168bb10e1be17..26f3ce1dd6e448 100644
--- a/include/asm-generic/bitops/fls.h
+++ b/include/asm-generic/bitops/fls.h
@@ -3,14 +3,14 @@
#define _ASM_GENERIC_BITOPS_FLS_H_
/**
- * fls - find last (most-significant) bit set
+ * generic_fls - find last (most-significant) bit set
* @x: the word to search
*
* This is defined the same way as ffs.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
-static __always_inline int fls(unsigned int x)
+static __always_inline int generic_fls(unsigned int x)
{
int r = 32;
@@ -39,4 +39,8 @@ static __always_inline int fls(unsigned int x)
return r;
}
+#ifndef __HAVE_ARCH_FLS
+#define fls(x) generic_fls(x)
+#endif
+
#endif /* _ASM_GENERIC_BITOPS_FLS_H_ */
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 3606e1a7f965b7..4baca0d9107b05 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -541,7 +541,7 @@ struct drm_bridge_funcs {
* The @get_modes callback is mostly intended to support non-probeable
* displays such as many fixed panels. Bridges that support reading
* EDID shall leave @get_modes unimplemented and implement the
- * &drm_bridge_funcs->get_edid callback instead.
+ * &drm_bridge_funcs->edid_read callback instead.
*
* This callback is optional. Bridges that implement it shall set the
* DRM_BRIDGE_OP_MODES flag in their &drm_bridge->ops.
@@ -687,7 +687,7 @@ enum drm_bridge_ops {
/**
* @DRM_BRIDGE_OP_EDID: The bridge can retrieve the EDID of the display
* connected to its output. Bridges that set this flag shall implement
- * the &drm_bridge_funcs->get_edid callback.
+ * the &drm_bridge_funcs->edid_read callback.
*/
DRM_BRIDGE_OP_EDID = BIT(1),
/**
diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
index 0c9f917a4d4be9..81572d32db0c2b 100644
--- a/include/drm/drm_fixed.h
+++ b/include/drm/drm_fixed.h
@@ -71,7 +71,6 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
}
#define DRM_FIXED_POINT 32
-#define DRM_FIXED_POINT_HALF 16
#define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT)
#define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
#define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK)
@@ -90,7 +89,7 @@ static inline int drm_fixp2int(s64 a)
static inline int drm_fixp2int_round(s64 a)
{
- return drm_fixp2int(a + (1 << (DRM_FIXED_POINT_HALF - 1)));
+ return drm_fixp2int(a + DRM_FIXED_ONE / 2);
}
static inline int drm_fixp2int_ceil(s64 a)
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 881b03e4dc2882..9ed42469540eb6 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -898,7 +898,8 @@ struct drm_connector_helper_funcs {
*
* RETURNS:
*
- * The number of modes added by calling drm_mode_probed_add().
+ * The number of modes added by calling drm_mode_probed_add(). Return 0
+ * on failures (no modes) instead of negative error codes.
*/
int (*get_modes)(struct drm_connector *connector);
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 52730e4236811b..c00cc6c0878a1e 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -116,6 +116,14 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
*/
#define __stringify_label(n) #n
+#define __annotate_reachable(c) ({ \
+ asm volatile(__stringify_label(c) ":\n\t" \
+ ".pushsection .discard.reachable\n\t" \
+ ".long " __stringify_label(c) "b - .\n\t" \
+ ".popsection\n\t"); \
+})
+#define annotate_reachable() __annotate_reachable(__COUNTER__)
+
#define __annotate_unreachable(c) ({ \
asm volatile(__stringify_label(c) ":\n\t" \
".pushsection .discard.unreachable\n\t" \
@@ -128,6 +136,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#define __annotate_jump_table __section(".rodata..c_jump_table")
#else /* !CONFIG_OBJTOOL */
+#define annotate_reachable()
#define annotate_unreachable()
#define __annotate_jump_table
#endif /* CONFIG_OBJTOOL */
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
index 23270b16e1dbf3..d33352c2e3865f 100644
--- a/include/linux/crash_core.h
+++ b/include/linux/crash_core.h
@@ -8,6 +8,12 @@
struct kimage;
+struct crash_mem {
+ unsigned int max_nr_ranges;
+ unsigned int nr_ranges;
+ struct range ranges[] __counted_by(max_nr_ranges);
+};
+
#ifdef CONFIG_CRASH_DUMP
int crash_shrink_memory(unsigned long new_size);
@@ -51,12 +57,6 @@ static inline unsigned int crash_get_elfcorehdr_size(void) { return 0; }
/* Alignment required for elf header segment */
#define ELF_CORE_HEADER_ALIGN 4096
-struct crash_mem {
- unsigned int max_nr_ranges;
- unsigned int nr_ranges;
- struct range ranges[] __counted_by(max_nr_ranges);
-};
-
extern int crash_exclude_mem_range(struct crash_mem *mem,
unsigned long long mstart,
unsigned long long mend);
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 708e6a177b1be0..0dd27364d56fe0 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -145,9 +145,13 @@ struct fb_event {
void *data;
};
+/* Enough for the VT console needs, see its max_font_width/height */
+#define FB_MAX_BLIT_WIDTH 64
+#define FB_MAX_BLIT_HEIGHT 128
+
struct fb_blit_caps {
- u32 x;
- u32 y;
+ DECLARE_BITMAP(x, FB_MAX_BLIT_WIDTH);
+ DECLARE_BITMAP(y, FB_MAX_BLIT_HEIGHT);
u32 len;
u32 flags;
};
@@ -194,10 +198,12 @@ struct fb_pixmap {
u32 scan_align; /* alignment per scanline */
u32 access_align; /* alignment per read/write (bits) */
u32 flags; /* see FB_PIXMAP_* */
- u32 blit_x; /* supported bit block dimensions (1-32)*/
- u32 blit_y; /* Format: blit_x = 1 << (width - 1) */
- /* blit_y = 1 << (height - 1) */
- /* if 0, will be set to 0xffffffff (all)*/
+ /* supported bit block dimensions */
+ /* Format: test_bit(width - 1, blit_x) */
+ /* test_bit(height - 1, blit_y) */
+ /* if zero, will be set to full (all) */
+ DECLARE_BITMAP(blit_x, FB_MAX_BLIT_WIDTH);
+ DECLARE_BITMAP(blit_y, FB_MAX_BLIT_HEIGHT);
/* access methods */
void (*writeio)(struct fb_info *info, void __iomem *dst, void *src, unsigned int size);
void (*readio) (struct fb_info *info, void *dst, void __iomem *src, unsigned int size);
diff --git a/include/linux/font.h b/include/linux/font.h
index abf1442ce71919..81caffd51bb494 100644
--- a/include/linux/font.h
+++ b/include/linux/font.h
@@ -57,7 +57,8 @@ extern const struct font_desc *find_font(const char *name);
/* Get the default font for a specific screen size */
extern const struct font_desc *get_default_font(int xres, int yres,
- u32 font_w, u32 font_h);
+ unsigned long *font_w,
+ unsigned long *font_h);
/* Max. length for the name of a predefined font */
#define MAX_FONT_NAME 32
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2c0910bc3e4a2c..0436b919f1c7fc 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -87,7 +87,7 @@ extern int sysctl_legacy_va_layout;
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
extern const int mmap_rnd_bits_min;
-extern const int mmap_rnd_bits_max;
+extern int mmap_rnd_bits_max __ro_after_init;
extern int mmap_rnd_bits __read_mostly;
#endif
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
diff --git a/include/linux/mman.h b/include/linux/mman.h
index dc7048824be81d..bcb201ab7a412e 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -162,6 +162,14 @@ calc_vm_flag_bits(unsigned long flags)
unsigned long vm_commit_limit(void);
+#ifndef arch_memory_deny_write_exec_supported
+static inline bool arch_memory_deny_write_exec_supported(void)
+{
+ return true;
+}
+#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported
+#endif
+
/*
* Denies creating a writable executable mapping or gaining executable permissions.
*
diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h
index a529347fd75b2a..562f92504f2b7c 100644
--- a/include/linux/mtd/ubi.h
+++ b/include/linux/mtd/ubi.h
@@ -192,6 +192,7 @@ struct ubi_device_info {
* or a volume was removed)
* @UBI_VOLUME_RESIZED: a volume has been re-sized
* @UBI_VOLUME_RENAMED: a volume has been re-named
+ * @UBI_VOLUME_SHUTDOWN: a volume is going to removed, shutdown users
* @UBI_VOLUME_UPDATED: data has been written to a volume
*
* These constants define which type of event has happened when a volume
@@ -202,6 +203,7 @@ enum {
UBI_VOLUME_REMOVED,
UBI_VOLUME_RESIZED,
UBI_VOLUME_RENAMED,
+ UBI_VOLUME_SHUTDOWN,
UBI_VOLUME_UPDATED,
};
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h
index 3921fbed0b2868..51421fdbb0bad4 100644
--- a/include/linux/oid_registry.h
+++ b/include/linux/oid_registry.h
@@ -17,10 +17,12 @@
* build_OID_registry.pl to generate the data for look_up_OID().
*/
enum OID {
+ OID_id_dsa_with_sha1, /* 1.2.840.10030.4.3 */
OID_id_dsa, /* 1.2.840.10040.4.1 */
OID_id_ecPublicKey, /* 1.2.840.10045.2.1 */
OID_id_prime192v1, /* 1.2.840.10045.3.1.1 */
OID_id_prime256v1, /* 1.2.840.10045.3.1.7 */
+ OID_id_ecdsa_with_sha1, /* 1.2.840.10045.4.1 */
OID_id_ecdsa_with_sha224, /* 1.2.840.10045.4.3.1 */
OID_id_ecdsa_with_sha256, /* 1.2.840.10045.4.3.2 */
OID_id_ecdsa_with_sha384, /* 1.2.840.10045.4.3.3 */
@@ -28,6 +30,7 @@ enum OID {
/* PKCS#1 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-1(1)} */
OID_rsaEncryption, /* 1.2.840.113549.1.1.1 */
+ OID_sha1WithRSAEncryption, /* 1.2.840.113549.1.1.5 */
OID_sha256WithRSAEncryption, /* 1.2.840.113549.1.1.11 */
OID_sha384WithRSAEncryption, /* 1.2.840.113549.1.1.12 */
OID_sha512WithRSAEncryption, /* 1.2.840.113549.1.1.13 */
@@ -64,6 +67,7 @@ enum OID {
OID_PKU2U, /* 1.3.5.1.5.2.7 */
OID_Scram, /* 1.3.6.1.5.5.14 */
OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */
+ OID_sha1, /* 1.3.14.3.2.26 */
OID_id_ansip384r1, /* 1.3.132.0.34 */
OID_sha256, /* 2.16.840.1.101.3.4.2.1 */
OID_sha384, /* 2.16.840.1.101.3.4.2.2 */
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
index aa691f2119b0bb..0c7e3dcfe8670c 100644
--- a/include/linux/overflow.h
+++ b/include/linux/overflow.h
@@ -398,7 +398,7 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
* @count: Number of elements in the array; must be compile-time const.
* @initializer: initializer expression (could be empty for no init).
*/
-#define _DEFINE_FLEX(type, name, member, count, initializer) \
+#define _DEFINE_FLEX(type, name, member, count, initializer...) \
_Static_assert(__builtin_constant_p(count), \
"onstack flex array members require compile-time const count"); \
union { \
@@ -408,8 +408,8 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
type *name = (type *)&name##_u
/**
- * DEFINE_FLEX() - Define an on-stack instance of structure with a trailing
- * flexible array member.
+ * DEFINE_RAW_FLEX() - Define an on-stack instance of structure with a trailing
+ * flexible array member, when it does not have a __counted_by annotation.
*
* @type: structure type name, including "struct" keyword.
* @name: Name for a variable to define.
@@ -420,7 +420,24 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
* flexible array member.
* Use __struct_size(@name) to get compile-time size of it afterwards.
*/
-#define DEFINE_FLEX(type, name, member, count) \
+#define DEFINE_RAW_FLEX(type, name, member, count) \
_DEFINE_FLEX(type, name, member, count, = {})
+/**
+ * DEFINE_FLEX() - Define an on-stack instance of structure with a trailing
+ * flexible array member.
+ *
+ * @TYPE: structure type name, including "struct" keyword.
+ * @NAME: Name for a variable to define.
+ * @MEMBER: Name of the array member.
+ * @COUNTER: Name of the __counted_by member.
+ * @COUNT: Number of elements in the array; must be compile-time const.
+ *
+ * Define a zeroed, on-stack, instance of @TYPE structure with a trailing
+ * flexible array member.
+ * Use __struct_size(@NAME) to get compile-time size of it afterwards.
+ */
+#define DEFINE_FLEX(TYPE, NAME, MEMBER, COUNTER, COUNT) \
+ _DEFINE_FLEX(TYPE, NAME, MEMBER, COUNT, = { .obj.COUNTER = COUNT, })
+
#endif /* __LINUX_OVERFLOW_H */
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index fcc06c300a72c3..5d3a0cccc6bf8d 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -11,8 +11,8 @@
#include <linux/types.h>
-/* 15 pointers + header align the folio_batch structure to a power of two */
-#define PAGEVEC_SIZE 15
+/* 31 pointers + header align the folio_batch structure to a power of two */
+#define PAGEVEC_SIZE 31
struct folio;
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 5f8e438a0312bd..3f4d315aaec9e6 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -42,7 +42,7 @@ static inline time64_t rtc_tm_sub(struct rtc_time *lhs, struct rtc_time *rhs)
#include <linux/timerqueue.h>
#include <linux/workqueue.h>
-extern struct class *rtc_class;
+extern const struct class rtc_class;
/*
* For these RTC methods the device parameter is the physical device
diff --git a/include/linux/sync_core.h b/include/linux/sync_core.h
index 013da4b8b3272c..67bb9794b87585 100644
--- a/include/linux/sync_core.h
+++ b/include/linux/sync_core.h
@@ -17,5 +17,19 @@ static inline void sync_core_before_usermode(void)
}
#endif
-#endif /* _LINUX_SYNC_CORE_H */
+#ifdef CONFIG_ARCH_HAS_PREPARE_SYNC_CORE_CMD
+#include <asm/sync_core.h>
+#else
+/*
+ * This is a dummy prepare_sync_core_cmd() implementation that can be used on
+ * all architectures which provide unconditional core serializing instructions
+ * in switch_mm().
+ * If your architecture doesn't provide such core serializing instructions in
+ * switch_mm(), you may need to write your own functions.
+ */
+static inline void prepare_sync_core_cmd(struct mm_struct *mm)
+{
+}
+#endif
+#endif /* _LINUX_SYNC_CORE_H */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index fddc767c47871f..9c540f5468ebff 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -149,6 +149,7 @@ struct scsi_device {
struct scsi_vpd __rcu *vpd_pgb0;
struct scsi_vpd __rcu *vpd_pgb1;
struct scsi_vpd __rcu *vpd_pgb2;
+ struct scsi_vpd __rcu *vpd_pgb7;
struct scsi_target *sdev_target;
diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h
index 07d65c1f59db33..843106e1109f43 100644
--- a/include/scsi/scsi_proto.h
+++ b/include/scsi/scsi_proto.h
@@ -10,6 +10,7 @@
#ifndef _SCSI_PROTO_H_
#define _SCSI_PROTO_H_
+#include <linux/build_bug.h>
#include <linux/types.h>
/*
@@ -126,6 +127,7 @@
#define SAI_READ_CAPACITY_16 0x10
#define SAI_GET_LBA_STATUS 0x12
#define SAI_REPORT_REFERRALS 0x13
+#define SAI_GET_STREAM_STATUS 0x16
/* values for maintenance in */
#define MI_REPORT_IDENTIFYING_INFORMATION 0x05
#define MI_REPORT_TARGET_PGS 0x0a
@@ -275,6 +277,82 @@ struct scsi_lun {
__u8 scsi_lun[8];
};
+/* SBC-5 IO advice hints group descriptor */
+struct scsi_io_group_descriptor {
+#if defined(__BIG_ENDIAN)
+ u8 io_advice_hints_mode: 2;
+ u8 reserved1: 3;
+ u8 st_enble: 1;
+ u8 cs_enble: 1;
+ u8 ic_enable: 1;
+#elif defined(__LITTLE_ENDIAN)
+ u8 ic_enable: 1;
+ u8 cs_enble: 1;
+ u8 st_enble: 1;
+ u8 reserved1: 3;
+ u8 io_advice_hints_mode: 2;
+#else
+#error
+#endif
+ u8 reserved2[3];
+ /* Logical block markup descriptor */
+#if defined(__BIG_ENDIAN)
+ u8 acdlu: 1;
+ u8 reserved3: 1;
+ u8 rlbsr: 2;
+ u8 lbm_descriptor_type: 4;
+#elif defined(__LITTLE_ENDIAN)
+ u8 lbm_descriptor_type: 4;
+ u8 rlbsr: 2;
+ u8 reserved3: 1;
+ u8 acdlu: 1;
+#else
+#error
+#endif
+ u8 params[2];
+ u8 reserved4;
+ u8 reserved5[8];
+};
+
+static_assert(sizeof(struct scsi_io_group_descriptor) == 16);
+
+/* SCSI stream status descriptor */
+struct scsi_stream_status {
+#if defined(__BIG_ENDIAN)
+ u8 perm: 1;
+ u8 reserved1: 7;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved1: 7;
+ u8 perm: 1;
+#else
+#error
+#endif
+ u8 reserved2;
+ __be16 stream_identifier;
+#if defined(__BIG_ENDIAN)
+ u8 reserved3: 2;
+ u8 rel_lifetime: 6;
+#elif defined(__LITTLE_ENDIAN)
+ u8 rel_lifetime: 6;
+ u8 reserved3: 2;
+#else
+#error
+#endif
+ u8 reserved4[3];
+};
+
+static_assert(sizeof(struct scsi_stream_status) == 8);
+
+/* GET STREAM STATUS parameter data */
+struct scsi_stream_status_header {
+ __be32 len; /* length in bytes of stream_status[] array. */
+ u16 reserved;
+ __be16 number_of_open_streams;
+ DECLARE_FLEX_ARRAY(struct scsi_stream_status, stream_status);
+};
+
+static_assert(sizeof(struct scsi_stream_status_header) == 8);
+
/* SPC asymmetric access states */
#define SCSI_ACCESS_STATE_OPTIMAL 0x00
#define SCSI_ACCESS_STATE_ACTIVE 0x01
diff --git a/init/Kconfig b/init/Kconfig
index d56d71fe0f8787..aa02aec6aa7d29 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1986,6 +1986,9 @@ source "kernel/Kconfig.locks"
config ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
bool
+config ARCH_HAS_PREPARE_SYNC_CORE_CMD
+ bool
+
config ARCH_HAS_SYNC_CORE_BEFORE_USERMODE
bool
diff --git a/init/initramfs.c b/init/initramfs.c
index da79760b8be3a6..3127e0bf7bbd15 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -682,7 +682,7 @@ static void __init populate_initrd_image(char *err)
printk(KERN_INFO "rootfs image is not initramfs (%s); looks like an initrd\n",
err);
- file = filp_open("/initrd.image", O_WRONLY | O_CREAT, 0700);
+ file = filp_open("/initrd.image", O_WRONLY|O_CREAT|O_LARGEFILE, 0700);
if (IS_ERR(file))
return;
diff --git a/io_uring/futex.c b/io_uring/futex.c
index 3c3575303c3d00..792a03df58deac 100644
--- a/io_uring/futex.c
+++ b/io_uring/futex.c
@@ -159,6 +159,7 @@ bool io_futex_remove_all(struct io_ring_ctx *ctx, struct task_struct *task,
hlist_for_each_entry_safe(req, tmp, &ctx->futex_list, hash_node) {
if (!io_match_task_safe(req, task, cancel_all))
continue;
+ hlist_del_init(&req->hash_node);
__io_futex_cancel(ctx, req);
found = true;
}
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index cf348c33f4855e..5d4b448fdc5038 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2181,6 +2181,13 @@ static void io_init_req_drain(struct io_kiocb *req)
}
}
+static __cold int io_init_fail_req(struct io_kiocb *req, int err)
+{
+ /* ensure per-opcode data is cleared if we fail before prep */
+ memset(&req->cmd.data, 0, sizeof(req->cmd.data));
+ return err;
+}
+
static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
const struct io_uring_sqe *sqe)
__must_hold(&ctx->uring_lock)
@@ -2202,29 +2209,29 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
if (unlikely(opcode >= IORING_OP_LAST)) {
req->opcode = 0;
- return -EINVAL;
+ return io_init_fail_req(req, -EINVAL);
}
def = &io_issue_defs[opcode];
if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
/* enforce forwards compatibility on users */
if (sqe_flags & ~SQE_VALID_FLAGS)
- return -EINVAL;
+ return io_init_fail_req(req, -EINVAL);
if (sqe_flags & IOSQE_BUFFER_SELECT) {
if (!def->buffer_select)
- return -EOPNOTSUPP;
+ return io_init_fail_req(req, -EOPNOTSUPP);
req->buf_index = READ_ONCE(sqe->buf_group);
}
if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS)
ctx->drain_disabled = true;
if (sqe_flags & IOSQE_IO_DRAIN) {
if (ctx->drain_disabled)
- return -EOPNOTSUPP;
+ return io_init_fail_req(req, -EOPNOTSUPP);
io_init_req_drain(req);
}
}
if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) {
if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags))
- return -EACCES;
+ return io_init_fail_req(req, -EACCES);
/* knock it to the slow queue path, will be drained there */
if (ctx->drain_active)
req->flags |= REQ_F_FORCE_ASYNC;
@@ -2237,9 +2244,9 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
}
if (!def->ioprio && sqe->ioprio)
- return -EINVAL;
+ return io_init_fail_req(req, -EINVAL);
if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
+ return io_init_fail_req(req, -EINVAL);
if (def->needs_file) {
struct io_submit_state *state = &ctx->submit_state;
@@ -2263,12 +2270,12 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
req->creds = xa_load(&ctx->personalities, personality);
if (!req->creds)
- return -EINVAL;
+ return io_init_fail_req(req, -EINVAL);
get_cred(req->creds);
ret = security_uring_override_creds(req->creds);
if (ret) {
put_cred(req->creds);
- return ret;
+ return io_init_fail_req(req, ret);
}
req->flags |= REQ_F_CREDS;
}
@@ -2539,7 +2546,7 @@ static bool current_pending_io(void)
static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq)
{
- int io_wait, ret;
+ int ret;
if (unlikely(READ_ONCE(ctx->check_cq)))
return 1;
@@ -2557,7 +2564,6 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
* can take into account that the task is waiting for IO - turns out
* to be important for low QD IO.
*/
- io_wait = current->in_iowait;
if (current_pending_io())
current->in_iowait = 1;
ret = 0;
@@ -2565,7 +2571,7 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
schedule();
else if (!schedule_hrtimeout(&iowq->timeout, HRTIMER_MODE_ABS))
ret = -ETIME;
- current->in_iowait = io_wait;
+ current->in_iowait = 0;
return ret;
}
@@ -2697,13 +2703,9 @@ void io_mem_free(void *ptr)
static void io_pages_free(struct page ***pages, int npages)
{
- struct page **page_array;
+ struct page **page_array = *pages;
int i;
- if (!pages)
- return;
-
- page_array = *pages;
if (!page_array)
return;
@@ -2719,7 +2721,7 @@ static void *__io_uaddr_map(struct page ***pages, unsigned short *npages,
struct page **page_array;
unsigned int nr_pages;
void *page_addr;
- int ret, i;
+ int ret, i, pinned;
*npages = 0;
@@ -2733,12 +2735,12 @@ static void *__io_uaddr_map(struct page ***pages, unsigned short *npages,
if (!page_array)
return ERR_PTR(-ENOMEM);
- ret = pin_user_pages_fast(uaddr, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
- page_array);
- if (ret != nr_pages) {
-err:
- io_pages_free(&page_array, ret > 0 ? ret : 0);
- return ret < 0 ? ERR_PTR(ret) : ERR_PTR(-EFAULT);
+
+ pinned = pin_user_pages_fast(uaddr, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
+ page_array);
+ if (pinned != nr_pages) {
+ ret = (pinned < 0) ? pinned : -EFAULT;
+ goto free_pages;
}
page_addr = page_address(page_array[0]);
@@ -2752,7 +2754,7 @@ err:
* didn't support this feature.
*/
if (PageHighMem(page_array[i]))
- goto err;
+ goto free_pages;
/*
* No support for discontig pages for now, should either be a
@@ -2761,13 +2763,17 @@ err:
* just fail them with EINVAL.
*/
if (page_address(page_array[i]) != page_addr)
- goto err;
+ goto free_pages;
page_addr += PAGE_SIZE;
}
*pages = page_array;
*npages = nr_pages;
return page_to_virt(page_array[0]);
+
+free_pages:
+ io_pages_free(&page_array, pinned > 0 ? pinned : 0);
+ return ERR_PTR(ret);
}
static void *io_rings_map(struct io_ring_ctx *ctx, unsigned long uaddr,
@@ -2789,14 +2795,15 @@ static void io_rings_free(struct io_ring_ctx *ctx)
if (!(ctx->flags & IORING_SETUP_NO_MMAP)) {
io_mem_free(ctx->rings);
io_mem_free(ctx->sq_sqes);
- ctx->rings = NULL;
- ctx->sq_sqes = NULL;
} else {
io_pages_free(&ctx->ring_pages, ctx->n_ring_pages);
ctx->n_ring_pages = 0;
io_pages_free(&ctx->sqe_pages, ctx->n_sqe_pages);
ctx->n_sqe_pages = 0;
}
+
+ ctx->rings = NULL;
+ ctx->sq_sqes = NULL;
}
void *io_mem_alloc(size_t size)
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index 9be42bff936b95..693c26da4ee1a3 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -199,7 +199,7 @@ void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
bl = io_buffer_get_list(ctx, req->buf_index);
if (likely(bl)) {
- if (bl->is_mapped)
+ if (bl->is_buf_ring)
ret = io_ring_buffer_select(req, len, bl, issue_flags);
else
ret = io_provided_buffer_select(req, len, bl);
@@ -253,7 +253,7 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx,
if (!nbufs)
return 0;
- if (bl->is_mapped) {
+ if (bl->is_buf_ring) {
i = bl->buf_ring->tail - bl->head;
if (bl->is_mmap) {
/*
@@ -274,7 +274,7 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx,
}
/* make sure it's seen as empty */
INIT_LIST_HEAD(&bl->buf_list);
- bl->is_mapped = 0;
+ bl->is_buf_ring = 0;
return i;
}
@@ -361,7 +361,7 @@ int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
if (bl) {
ret = -EINVAL;
/* can't use provide/remove buffers command on mapped buffers */
- if (!bl->is_mapped)
+ if (!bl->is_buf_ring)
ret = __io_remove_buffers(ctx, bl, p->nbufs);
}
io_ring_submit_unlock(ctx, issue_flags);
@@ -519,7 +519,7 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
}
}
/* can't add buffers via this command for a mapped buffer ring */
- if (bl->is_mapped) {
+ if (bl->is_buf_ring) {
ret = -EINVAL;
goto err;
}
@@ -575,7 +575,7 @@ static int io_pin_pbuf_ring(struct io_uring_buf_reg *reg,
bl->buf_pages = pages;
bl->buf_nr_pages = nr_pages;
bl->buf_ring = br;
- bl->is_mapped = 1;
+ bl->is_buf_ring = 1;
bl->is_mmap = 0;
return 0;
error_unpin:
@@ -642,7 +642,7 @@ static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx,
}
ibf->inuse = 1;
bl->buf_ring = ibf->mem;
- bl->is_mapped = 1;
+ bl->is_buf_ring = 1;
bl->is_mmap = 1;
return 0;
}
@@ -688,7 +688,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
bl = io_buffer_get_list(ctx, reg.bgid);
if (bl) {
/* if mapped buffer ring OR classic exists, don't allow */
- if (bl->is_mapped || !list_empty(&bl->buf_list))
+ if (bl->is_buf_ring || !list_empty(&bl->buf_list))
return -EEXIST;
} else {
free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
@@ -730,7 +730,7 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
bl = io_buffer_get_list(ctx, reg.bgid);
if (!bl)
return -ENOENT;
- if (!bl->is_mapped)
+ if (!bl->is_buf_ring)
return -EINVAL;
__io_remove_buffers(ctx, bl, -1U);
@@ -757,7 +757,7 @@ int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg)
bl = io_buffer_get_list(ctx, buf_status.buf_group);
if (!bl)
return -ENOENT;
- if (!bl->is_mapped)
+ if (!bl->is_buf_ring)
return -EINVAL;
buf_status.head = bl->head;
diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h
index 5218bfd79e871e..1c7b654ee7263a 100644
--- a/io_uring/kbuf.h
+++ b/io_uring/kbuf.h
@@ -26,7 +26,7 @@ struct io_buffer_list {
__u16 mask;
/* ring mapped provided buffers */
- __u8 is_mapped;
+ __u8 is_buf_ring;
/* ring mapped provided buffers, but mmap'ed by application */
__u8 is_mmap;
/* bl is visible from an RCU point of view for lookup */
diff --git a/io_uring/net.c b/io_uring/net.c
index 19451f0dbf8136..1e7665ff6ef702 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -326,7 +326,10 @@ int io_send_prep_async(struct io_kiocb *req)
struct io_async_msghdr *io;
int ret;
- if (!zc->addr || req_has_async_data(req))
+ if (req_has_async_data(req))
+ return 0;
+ zc->done_io = 0;
+ if (!zc->addr)
return 0;
io = io_msg_alloc_async_prep(req);
if (!io)
@@ -353,8 +356,10 @@ static int io_setup_async_addr(struct io_kiocb *req,
int io_sendmsg_prep_async(struct io_kiocb *req)
{
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
int ret;
+ sr->done_io = 0;
if (!io_msg_alloc_async_prep(req))
return -ENOMEM;
ret = io_sendmsg_copy_hdr(req, req->async_data);
@@ -608,9 +613,11 @@ static int io_recvmsg_copy_hdr(struct io_kiocb *req,
int io_recvmsg_prep_async(struct io_kiocb *req)
{
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *iomsg;
int ret;
+ sr->done_io = 0;
if (!io_msg_alloc_async_prep(req))
return -ENOMEM;
iomsg = req->async_data;
diff --git a/io_uring/poll.c b/io_uring/poll.c
index 5f779139cae184..6db1dcb2c79776 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -996,7 +996,6 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
struct io_hash_bucket *bucket;
struct io_kiocb *preq;
int ret2, ret = 0;
- struct io_tw_state ts = { .locked = true };
io_ring_submit_lock(ctx, issue_flags);
preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket);
@@ -1045,7 +1044,8 @@ found:
req_set_fail(preq);
io_req_set_res(preq, -ECANCELED, 0);
- io_req_task_complete(preq, &ts);
+ preq->io_task_work.func = io_req_task_complete;
+ io_req_task_work_add(preq);
out:
io_ring_submit_unlock(ctx, issue_flags);
if (ret < 0) {
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 47e097ab5d7e4f..0585ebcc9773d3 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -947,6 +947,8 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
*/
if (io_kbuf_recycle(req, issue_flags))
rw->len = 0;
+ if (issue_flags & IO_URING_F_MULTISHOT)
+ return IOU_ISSUE_SKIP_COMPLETE;
return -EAGAIN;
}
diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
index 363052b4ea76a2..3983708cef5b43 100644
--- a/io_uring/sqpoll.c
+++ b/io_uring/sqpoll.c
@@ -274,6 +274,10 @@ static int io_sq_thread(void *data)
char buf[TASK_COMM_LEN];
DEFINE_WAIT(wait);
+ /* offload context creation failed, just exit */
+ if (!current->io_uring)
+ goto err_out;
+
snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
set_task_comm(current, buf);
@@ -371,7 +375,7 @@ static int io_sq_thread(void *data)
atomic_or(IORING_SQ_NEED_WAKEUP, &ctx->rings->sq_flags);
io_run_task_work();
mutex_unlock(&sqd->lock);
-
+err_out:
complete(&sqd->exited);
do_exit(0);
}
diff --git a/io_uring/waitid.c b/io_uring/waitid.c
index 6f851978606d98..77d340666cb95b 100644
--- a/io_uring/waitid.c
+++ b/io_uring/waitid.c
@@ -125,12 +125,6 @@ static void io_waitid_complete(struct io_kiocb *req, int ret)
lockdep_assert_held(&req->ctx->uring_lock);
- /*
- * Did cancel find it meanwhile?
- */
- if (hlist_unhashed(&req->hash_node))
- return;
-
hlist_del_init(&req->hash_node);
ret = io_waitid_finish(req, ret);
@@ -202,6 +196,7 @@ bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct task_struct *task,
hlist_for_each_entry_safe(req, tmp, &ctx->waitid_list, hash_node) {
if (!io_match_task_safe(req, task, cancel_all))
continue;
+ hlist_del_init(&req->hash_node);
__io_waitid_cancel(ctx, req);
found = true;
}
diff --git a/kernel/crash_reserve.c b/kernel/crash_reserve.c
index bbb6c3cb00e460..066668799f7572 100644
--- a/kernel/crash_reserve.c
+++ b/kernel/crash_reserve.c
@@ -366,8 +366,10 @@ static int __init reserve_crashkernel_low(unsigned long long low_size)
crashk_low_res.start = low_base;
crashk_low_res.end = low_base + low_size - 1;
+#ifdef HAVE_ARCH_ADD_CRASH_RES_TO_IOMEM_EARLY
insert_resource(&iomem_resource, &crashk_low_res);
#endif
+#endif
return 0;
}
@@ -448,8 +450,12 @@ retry:
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
+#ifdef HAVE_ARCH_ADD_CRASH_RES_TO_IOMEM_EARLY
+ insert_resource(&iomem_resource, &crashk_res);
+#endif
}
+#ifndef HAVE_ARCH_ADD_CRASH_RES_TO_IOMEM_EARLY
static __init int insert_crashkernel_resources(void)
{
if (crashk_res.start < crashk_res.end)
@@ -462,3 +468,4 @@ static __init int insert_crashkernel_resources(void)
}
early_initcall(insert_crashkernel_resources);
#endif
+#endif
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 77974cea3e6913..86fe172b595823 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -1003,8 +1003,7 @@ static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool
dma_addr_t tbl_dma_addr =
phys_to_dma_unencrypted(dev, pool->start) & boundary_mask;
unsigned long max_slots = get_max_slots(boundary_mask);
- unsigned int iotlb_align_mask =
- dma_get_min_align_mask(dev) | alloc_align_mask;
+ unsigned int iotlb_align_mask = dma_get_min_align_mask(dev);
unsigned int nslots = nr_slots(alloc_size), stride;
unsigned int offset = swiotlb_align_offset(dev, orig_addr);
unsigned int index, slots_checked, count = 0, i;
@@ -1016,18 +1015,29 @@ static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool
BUG_ON(area_index >= pool->nareas);
/*
- * For allocations of PAGE_SIZE or larger only look for page aligned
- * allocations.
+ * Historically, swiotlb allocations >= PAGE_SIZE were guaranteed to be
+ * page-aligned in the absence of any other alignment requirements.
+ * 'alloc_align_mask' was later introduced to specify the alignment
+ * explicitly, however this is passed as zero for streaming mappings
+ * and so we preserve the old behaviour there in case any drivers are
+ * relying on it.
*/
- if (alloc_size >= PAGE_SIZE)
- iotlb_align_mask |= ~PAGE_MASK;
- iotlb_align_mask &= ~(IO_TLB_SIZE - 1);
+ if (!alloc_align_mask && !iotlb_align_mask && alloc_size >= PAGE_SIZE)
+ alloc_align_mask = PAGE_SIZE - 1;
+
+ /*
+ * Ensure that the allocation is at least slot-aligned and update
+ * 'iotlb_align_mask' to ignore bits that will be preserved when
+ * offsetting into the allocation.
+ */
+ alloc_align_mask |= (IO_TLB_SIZE - 1);
+ iotlb_align_mask &= ~alloc_align_mask;
/*
* For mappings with an alignment requirement don't bother looping to
* unaligned slots once we found an aligned one.
*/
- stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1;
+ stride = get_max_slots(max(alloc_align_mask, iotlb_align_mask));
spin_lock_irqsave(&area->lock, flags);
if (unlikely(nslots > pool->area_nslabs - area->used))
@@ -1037,11 +1047,14 @@ static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool
index = area->index;
for (slots_checked = 0; slots_checked < pool->area_nslabs; ) {
+ phys_addr_t tlb_addr;
+
slot_index = slot_base + index;
+ tlb_addr = slot_addr(tbl_dma_addr, slot_index);
- if (orig_addr &&
- (slot_addr(tbl_dma_addr, slot_index) &
- iotlb_align_mask) != (orig_addr & iotlb_align_mask)) {
+ if ((tlb_addr & alloc_align_mask) ||
+ (orig_addr && (tlb_addr & iotlb_align_mask) !=
+ (orig_addr & iotlb_align_mask))) {
index = wrap_area_index(pool, index + 1);
slots_checked++;
continue;
@@ -1677,16 +1690,24 @@ struct page *swiotlb_alloc(struct device *dev, size_t size)
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
struct io_tlb_pool *pool;
phys_addr_t tlb_addr;
+ unsigned int align;
int index;
if (!mem)
return NULL;
- index = swiotlb_find_slots(dev, 0, size, 0, &pool);
+ align = (1 << (get_order(size) + PAGE_SHIFT)) - 1;
+ index = swiotlb_find_slots(dev, 0, size, align, &pool);
if (index == -1)
return NULL;
tlb_addr = slot_addr(pool->start, index);
+ if (unlikely(!PAGE_ALIGNED(tlb_addr))) {
+ dev_WARN_ONCE(dev, 1, "Cannot allocate pages from non page-aligned swiotlb addr 0x%pa.\n",
+ &tlb_addr);
+ swiotlb_release_slots(dev, tlb_addr);
+ return NULL;
+ }
return pfn_to_page(PFN_DOWN(tlb_addr));
}
diff --git a/kernel/entry/common.c b/kernel/entry/common.c
index 88cb3c88aaa5c6..90843cc3858806 100644
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
@@ -57,8 +57,14 @@ long syscall_trace_enter(struct pt_regs *regs, long syscall,
/* Either of the above might have changed the syscall number */
syscall = syscall_get_nr(current, regs);
- if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT))
+ if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT)) {
trace_sys_enter(regs, syscall);
+ /*
+ * Probes or BPF hooks in the tracepoint may have changed the
+ * system call number as well.
+ */
+ syscall = syscall_get_nr(current, regs);
+ }
syscall_enter_audit(regs, syscall);
diff --git a/kernel/module/Kconfig b/kernel/module/Kconfig
index c3ced519e14ba4..f3e0329337f61c 100644
--- a/kernel/module/Kconfig
+++ b/kernel/module/Kconfig
@@ -236,6 +236,10 @@ choice
possible to load a signed module containing the algorithm to check
the signature on that module.
+config MODULE_SIG_SHA1
+ bool "Sign modules with SHA-1"
+ select CRYPTO_SHA1
+
config MODULE_SIG_SHA256
bool "Sign modules with SHA-256"
select CRYPTO_SHA256
@@ -265,6 +269,7 @@ endchoice
config MODULE_SIG_HASH
string
depends on MODULE_SIG || IMA_APPRAISE_MODSIG
+ default "sha1" if MODULE_SIG_SHA1
default "sha256" if MODULE_SIG_SHA256
default "sha384" if MODULE_SIG_SHA384
default "sha512" if MODULE_SIG_SHA512
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
index b663a97f5867a1..d4856ec6157061 100644
--- a/kernel/power/suspend_test.c
+++ b/kernel/power/suspend_test.c
@@ -201,7 +201,7 @@ static int __init test_suspend(void)
}
/* RTCs have initialized by now too ... can we use one? */
- dev = class_find_device(rtc_class, NULL, NULL, has_wakealarm);
+ dev = class_find_device(&rtc_class, NULL, NULL, has_wakealarm);
if (dev) {
rtc = rtc_class_open(dev_name(dev));
put_device(dev);
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index ca5146006b94c6..adf99c05adcafc 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -2009,6 +2009,12 @@ static int console_trylock_spinning(void)
*/
mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_);
+ /*
+ * Update @console_may_schedule for trylock because the previous
+ * owner may have been schedulable.
+ */
+ console_may_schedule = 0;
+
return 1;
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d44efa0d06112a..7019a40457a6da 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6647,7 +6647,9 @@ static void __sched notrace __schedule(unsigned int sched_mode)
* if (signal_pending_state()) if (p->state & @state)
*
* Also, the membarrier system call requires a full memory barrier
- * after coming from user-space, before storing to rq->curr.
+ * after coming from user-space, before storing to rq->curr; this
+ * barrier matches a full barrier in the proximity of the membarrier
+ * system call exit.
*/
rq_lock(rq, &rf);
smp_mb__after_spinlock();
@@ -6718,12 +6720,20 @@ static void __sched notrace __schedule(unsigned int sched_mode)
*
* Here are the schemes providing that barrier on the
* various architectures:
- * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC.
- * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC.
+ * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC,
+ * RISC-V. switch_mm() relies on membarrier_arch_switch_mm()
+ * on PowerPC and on RISC-V.
* - finish_lock_switch() for weakly-ordered
* architectures where spin_unlock is a full barrier,
* - switch_to() for arm64 (weakly-ordered, spin_unlock
* is a RELEASE barrier),
+ *
+ * The barrier matches a full barrier in the proximity of
+ * the membarrier system call entry.
+ *
+ * On RISC-V, this barrier pairing is also needed for the
+ * SYNC_CORE command when switching between processes, cf.
+ * the inline comments in membarrier_arch_switch_mm().
*/
++*switch_count;
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
index 4e715b9b278e7f..809194cd779f42 100644
--- a/kernel/sched/membarrier.c
+++ b/kernel/sched/membarrier.c
@@ -254,7 +254,7 @@ static int membarrier_global_expedited(void)
return 0;
/*
- * Matches memory barriers around rq->curr modification in
+ * Matches memory barriers after rq->curr modification in
* scheduler.
*/
smp_mb(); /* system call entry is not a mb. */
@@ -304,7 +304,7 @@ static int membarrier_global_expedited(void)
/*
* Memory barrier on the caller thread _after_ we finished
- * waiting for the last IPI. Matches memory barriers around
+ * waiting for the last IPI. Matches memory barriers before
* rq->curr modification in scheduler.
*/
smp_mb(); /* exit from system call is not a mb */
@@ -324,6 +324,7 @@ static int membarrier_private_expedited(int flags, int cpu_id)
MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY))
return -EPERM;
ipi_func = ipi_sync_core;
+ prepare_sync_core_cmd(mm);
} else if (flags == MEMBARRIER_FLAG_RSEQ) {
if (!IS_ENABLED(CONFIG_RSEQ))
return -EINVAL;
@@ -343,8 +344,12 @@ static int membarrier_private_expedited(int flags, int cpu_id)
return 0;
/*
- * Matches memory barriers around rq->curr modification in
+ * Matches memory barriers after rq->curr modification in
* scheduler.
+ *
+ * On RISC-V, this barrier pairing is also needed for the
+ * SYNC_CORE command when switching between processes, cf.
+ * the inline comments in membarrier_arch_switch_mm().
*/
smp_mb(); /* system call entry is not a mb. */
@@ -420,7 +425,7 @@ out:
/*
* Memory barrier on the caller thread _after_ we finished
- * waiting for the last IPI. Matches memory barriers around
+ * waiting for the last IPI. Matches memory barriers before
* rq->curr modification in scheduler.
*/
smp_mb(); /* exit from system call is not a mb */
diff --git a/kernel/sys.c b/kernel/sys.c
index f8e543f1e38a06..8bb106a56b3a5f 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -2408,8 +2408,11 @@ static inline int prctl_set_mdwe(unsigned long bits, unsigned long arg3,
if (bits & PR_MDWE_NO_INHERIT && !(bits & PR_MDWE_REFUSE_EXEC_GAIN))
return -EINVAL;
- /* PARISC cannot allow mdwe as it needs writable stacks */
- if (IS_ENABLED(CONFIG_PARISC))
+ /*
+ * EOPNOTSUPP might be more appropriate here in principle, but
+ * existing userspace depends on EINVAL specifically.
+ */
+ if (!arch_memory_deny_write_exec_supported())
return -EINVAL;
current_bits = get_current_mdwe();
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 4657cb8e8b1f94..5abfa43906732b 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -134,7 +134,7 @@ static struct class_interface alarmtimer_rtc_interface = {
static int alarmtimer_rtc_interface_setup(void)
{
- alarmtimer_rtc_interface.class = rtc_class;
+ alarmtimer_rtc_interface.class = &rtc_class;
return class_interface_register(&alarmtimer_rtc_interface);
}
static void alarmtimer_rtc_interface_remove(void)
diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
index 9de66bbbb3d155..4782edcbe7b9b4 100644
--- a/kernel/time/posix-clock.c
+++ b/kernel/time/posix-clock.c
@@ -129,15 +129,17 @@ static int posix_clock_open(struct inode *inode, struct file *fp)
goto out;
}
pccontext->clk = clk;
- fp->private_data = pccontext;
- if (clk->ops.open)
+ if (clk->ops.open) {
err = clk->ops.open(pccontext, fp->f_mode);
- else
- err = 0;
-
- if (!err) {
- get_device(clk->dev);
+ if (err) {
+ kfree(pccontext);
+ goto out;
+ }
}
+
+ fp->private_data = pccontext;
+ get_device(clk->dev);
+ err = 0;
out:
up_read(&clk->rwsem);
return err;
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index e69e75d3858c21..dee29f1f5b75f3 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -642,7 +642,8 @@ trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
* the base lock:
*/
if (base->is_idle) {
- WARN_ON_ONCE(!(timer->flags & TIMER_PINNED));
+ WARN_ON_ONCE(!(timer->flags & TIMER_PINNED ||
+ tick_nohz_full_cpu(base->cpu)));
wake_up_nohz_cpu(base->cpu);
}
}
@@ -2292,6 +2293,13 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
*/
if (!base_local->is_idle && time_after(nextevt, basej + 1)) {
base_local->is_idle = true;
+ /*
+ * Global timers queued locally while running in a task
+ * in nohz_full mode need a self-IPI to kick reprogramming
+ * in IRQ tail.
+ */
+ if (tick_nohz_full_cpu(base_local->cpu))
+ base_global->is_idle = true;
trace_timer_base_idle(true, base_local->cpu);
}
*idle = base_local->is_idle;
@@ -2364,6 +2372,8 @@ void timer_clear_idle(void)
* path. Required for BASE_LOCAL only.
*/
__this_cpu_write(timer_bases[BASE_LOCAL].is_idle, false);
+ if (tick_nohz_full_cpu(smp_processor_id()))
+ __this_cpu_write(timer_bases[BASE_GLOBAL].is_idle, false);
trace_timer_base_idle(false, smp_processor_id());
/* Activate without holding the timer_base->lock */
diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c
index 611cd904f0357e..c63a0afdcebed5 100644
--- a/kernel/time/timer_migration.c
+++ b/kernel/time/timer_migration.c
@@ -1038,8 +1038,15 @@ void tmigr_handle_remote(void)
* in tmigr_handle_remote_up() anyway. Keep this check to speed up the
* return when nothing has to be done.
*/
- if (!tmigr_check_migrator(tmc->tmgroup, tmc->childmask))
- return;
+ if (!tmigr_check_migrator(tmc->tmgroup, tmc->childmask)) {
+ /*
+ * If this CPU was an idle migrator, make sure to clear its wakeup
+ * value so it won't chase timers that have already expired elsewhere.
+ * This avoids endless requeue from tmigr_new_timer().
+ */
+ if (READ_ONCE(tmc->wakeup) == KTIME_MAX)
+ return;
+ }
data.now = get_jiffies_update(&data.basej);
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index 217169de0920ed..dfe3ee6035ecc7 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -839,7 +839,7 @@ out:
void store_trace_entry_data(void *edata, struct trace_probe *tp, struct pt_regs *regs)
{
struct probe_entry_arg *earg = tp->entry_arg;
- unsigned long val;
+ unsigned long val = 0;
int i;
if (!earg)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index f4a12980a8da34..c63a5fbf1f1c2b 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -2703,18 +2703,6 @@ config MEMCPY_KUNIT_TEST
If unsure, say N.
-config MEMCPY_SLOW_KUNIT_TEST
- bool "Include exhaustive memcpy tests"
- depends on MEMCPY_KUNIT_TEST
- default y
- help
- Some memcpy tests are quite exhaustive in checking for overlaps
- and bit ranges. These can be very slow, so they are split out
- as a separate config, in case they need to be disabled.
-
- Note this config option will be replaced by the use of KUnit test
- attributes.
-
config IS_SIGNED_TYPE_KUNIT_TEST
tristate "Test is_signed_type() macro" if !KUNIT_ALL_TESTS
depends on KUNIT
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 48a67058f84eba..e81e1ac4a919b5 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -119,6 +119,8 @@ config UBSAN_SIGNED_WRAP
bool "Perform checking for signed arithmetic wrap-around"
default UBSAN
depends on !COMPILE_TEST
+ # The no_sanitize attribute was introduced in GCC with version 8.
+ depends on !CC_IS_GCC || GCC_VERSION >= 80000
depends on $(cc-option,-fsanitize=signed-integer-overflow)
help
This option enables -fsanitize=signed-integer-overflow which checks
diff --git a/lib/fonts/fonts.c b/lib/fonts/fonts.c
index 97386643860887..47e34950b665c3 100644
--- a/lib/fonts/fonts.c
+++ b/lib/fonts/fonts.c
@@ -96,18 +96,21 @@ EXPORT_SYMBOL(find_font);
* get_default_font - get default font
* @xres: screen size of X
* @yres: screen size of Y
- * @font_w: bit array of supported widths (1 - 32)
- * @font_h: bit array of supported heights (1 - 32)
+ * @font_w: bit array of supported widths (1 - FB_MAX_BLIT_WIDTH)
+ * @font_h: bit array of supported heights (1 - FB_MAX_BLIT_HEIGHT)
*
* Get the default font for a specified screen size.
* Dimensions are in pixels.
*
+ * font_w or font_h being NULL means all values are supported.
+ *
* Returns %NULL if no font is found, or a pointer to the
* chosen font.
*
*/
-const struct font_desc *get_default_font(int xres, int yres, u32 font_w,
- u32 font_h)
+const struct font_desc *get_default_font(int xres, int yres,
+ unsigned long *font_w,
+ unsigned long *font_h)
{
int i, c, cc, res;
const struct font_desc *f, *g;
@@ -135,8 +138,8 @@ const struct font_desc *get_default_font(int xres, int yres, u32 font_w,
if (res > 20)
c += 20 - res;
- if ((font_w & (1U << (f->width - 1))) &&
- (font_h & (1U << (f->height - 1))))
+ if ((!font_w || test_bit(f->width - 1, font_w)) &&
+ (!font_h || test_bit(f->height - 1, font_h)))
c += 1000;
if (c > cc) {
diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c
index 30e00ef0bf2e0f..fd16e6ce53d1db 100644
--- a/lib/memcpy_kunit.c
+++ b/lib/memcpy_kunit.c
@@ -309,9 +309,6 @@ static void set_random_nonzero(struct kunit *test, u8 *byte)
static void init_large(struct kunit *test)
{
- if (!IS_ENABLED(CONFIG_MEMCPY_SLOW_KUNIT_TEST))
- kunit_skip(test, "Slow test skipped. Enable with CONFIG_MEMCPY_SLOW_KUNIT_TEST=y");
-
/* Get many bit patterns. */
get_random_bytes(large_src, ARRAY_SIZE(large_src));
diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c
index 65e8a72a83bfaa..4ef31b0bb74d68 100644
--- a/lib/overflow_kunit.c
+++ b/lib/overflow_kunit.c
@@ -1172,6 +1172,24 @@ static void castable_to_type_test(struct kunit *test)
#undef TEST_CASTABLE_TO_TYPE
}
+struct foo {
+ int a;
+ u32 counter;
+ s16 array[] __counted_by(counter);
+};
+
+static void DEFINE_FLEX_test(struct kunit *test)
+{
+ DEFINE_RAW_FLEX(struct foo, two, array, 2);
+ DEFINE_FLEX(struct foo, eight, array, counter, 8);
+ DEFINE_FLEX(struct foo, empty, array, counter, 0);
+
+ KUNIT_EXPECT_EQ(test, __struct_size(two),
+ sizeof(struct foo) + sizeof(s16) + sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __struct_size(eight), 24);
+ KUNIT_EXPECT_EQ(test, __struct_size(empty), sizeof(struct foo));
+}
+
static struct kunit_case overflow_test_cases[] = {
KUNIT_CASE(u8_u8__u8_overflow_test),
KUNIT_CASE(s8_s8__s8_overflow_test),
@@ -1194,6 +1212,7 @@ static struct kunit_case overflow_test_cases[] = {
KUNIT_CASE(overflows_type_test),
KUNIT_CASE(same_type_test),
KUNIT_CASE(castable_to_type_test),
+ KUNIT_CASE(DEFINE_FLEX_test),
{}
};
diff --git a/mm/filemap.c b/mm/filemap.c
index 7437b2bd75c1ab..30de18c4fd28a9 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -4197,7 +4197,23 @@ static void filemap_cachestat(struct address_space *mapping,
/* shmem file - in swap cache */
swp_entry_t swp = radix_to_swp_entry(folio);
+ /* swapin error results in poisoned entry */
+ if (non_swap_entry(swp))
+ goto resched;
+
+ /*
+ * Getting a swap entry from the shmem
+ * inode means we beat
+ * shmem_unuse(). rcu_read_lock()
+ * ensures swapoff waits for us before
+ * freeing the swapper space. However,
+ * we can race with swapping and
+ * invalidation, so there might not be
+ * a shadow in the swapcache (yet).
+ */
shadow = get_shadow_from_swap_cache(swp);
+ if (!shadow)
+ goto resched;
}
#endif
if (workingset_test_recent(shadow, true, &workingset))
diff --git a/mm/memory.c b/mm/memory.c
index f2bc6dd15eb830..904f70b994985a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1536,7 +1536,9 @@ static inline int zap_present_ptes(struct mmu_gather *tlb,
ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm);
arch_check_zapped_pte(vma, ptent);
tlb_remove_tlb_entry(tlb, pte, addr);
- VM_WARN_ON_ONCE(userfaultfd_wp(vma));
+ if (userfaultfd_pte_wp(vma, ptent))
+ zap_install_uffd_wp_if_needed(vma, addr, pte, 1,
+ details, ptent);
ksm_might_unmap_zero_page(mm, ptent);
return 1;
}
diff --git a/mm/mmap.c b/mm/mmap.c
index 04da02114c6f99..6dbda99a47da1c 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -64,7 +64,7 @@
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN;
-const int mmap_rnd_bits_max = CONFIG_ARCH_MMAP_RND_BITS_MAX;
+int mmap_rnd_bits_max __ro_after_init = CONFIG_ARCH_MMAP_RND_BITS_MAX;
int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS;
#endif
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
diff --git a/mm/page_owner.c b/mm/page_owner.c
index e7139952ffd9de..d17d1351ec84af 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -54,6 +54,22 @@ static depot_stack_handle_t early_handle;
static void init_early_allocated_pages(void);
+static inline void set_current_in_page_owner(void)
+{
+ /*
+ * Avoid recursion.
+ *
+ * We might need to allocate more memory from page_owner code, so make
+ * sure to signal it in order to avoid recursion.
+ */
+ current->in_page_owner = 1;
+}
+
+static inline void unset_current_in_page_owner(void)
+{
+ current->in_page_owner = 0;
+}
+
static int __init early_page_owner_param(char *buf)
{
int ret = kstrtobool(buf, &page_owner_enabled);
@@ -133,23 +149,16 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags)
depot_stack_handle_t handle;
unsigned int nr_entries;
- /*
- * Avoid recursion.
- *
- * Sometimes page metadata allocation tracking requires more
- * memory to be allocated:
- * - when new stack trace is saved to stack depot
- */
if (current->in_page_owner)
return dummy_handle;
- current->in_page_owner = 1;
+ set_current_in_page_owner();
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
handle = stack_depot_save(entries, nr_entries, flags);
if (!handle)
handle = failure_handle;
+ unset_current_in_page_owner();
- current->in_page_owner = 0;
return handle;
}
@@ -164,9 +173,13 @@ static void add_stack_record_to_list(struct stack_record *stack_record,
gfp_mask &= (GFP_ATOMIC | GFP_KERNEL);
gfp_mask |= __GFP_NOWARN;
+ set_current_in_page_owner();
stack = kmalloc(sizeof(*stack), gfp_mask);
- if (!stack)
+ if (!stack) {
+ unset_current_in_page_owner();
return;
+ }
+ unset_current_in_page_owner();
stack->stack_record = stack_record;
stack->next = NULL;
diff --git a/mm/shmem_quota.c b/mm/shmem_quota.c
index 062d1c1097ae35..ce514e700d2f65 100644
--- a/mm/shmem_quota.c
+++ b/mm/shmem_quota.c
@@ -116,7 +116,7 @@ static int shmem_free_file_info(struct super_block *sb, int type)
static int shmem_get_next_id(struct super_block *sb, struct kqid *qid)
{
struct mem_dqinfo *info = sb_dqinfo(sb, qid->type);
- struct rb_node *node = ((struct rb_root *)info->dqi_priv)->rb_node;
+ struct rb_node *node;
qid_t id = from_kqid(&init_user_ns, *qid);
struct quota_info *dqopt = sb_dqopt(sb);
struct quota_id *entry = NULL;
@@ -126,6 +126,7 @@ static int shmem_get_next_id(struct super_block *sb, struct kqid *qid)
return -ESRCH;
down_read(&dqopt->dqio_sem);
+ node = ((struct rb_root *)info->dqi_priv)->rb_node;
while (node) {
entry = rb_entry(node, struct quota_id, node);
@@ -165,7 +166,7 @@ out_unlock:
static int shmem_acquire_dquot(struct dquot *dquot)
{
struct mem_dqinfo *info = sb_dqinfo(dquot->dq_sb, dquot->dq_id.type);
- struct rb_node **n = &((struct rb_root *)info->dqi_priv)->rb_node;
+ struct rb_node **n;
struct shmem_sb_info *sbinfo = dquot->dq_sb->s_fs_info;
struct rb_node *parent = NULL, *new_node = NULL;
struct quota_id *new_entry, *entry;
@@ -176,6 +177,8 @@ static int shmem_acquire_dquot(struct dquot *dquot)
mutex_lock(&dquot->dq_lock);
down_write(&dqopt->dqio_sem);
+ n = &((struct rb_root *)info->dqi_priv)->rb_node;
+
while (*n) {
parent = *n;
entry = rb_entry(parent, struct quota_id, node);
@@ -264,7 +267,7 @@ static bool shmem_is_empty_dquot(struct dquot *dquot)
static int shmem_release_dquot(struct dquot *dquot)
{
struct mem_dqinfo *info = sb_dqinfo(dquot->dq_sb, dquot->dq_id.type);
- struct rb_node *node = ((struct rb_root *)info->dqi_priv)->rb_node;
+ struct rb_node *node;
qid_t id = from_kqid(&init_user_ns, dquot->dq_id);
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
struct quota_id *entry = NULL;
@@ -275,6 +278,7 @@ static int shmem_release_dquot(struct dquot *dquot)
goto out_dqlock;
down_write(&dqopt->dqio_sem);
+ node = ((struct rb_root *)info->dqi_priv)->rb_node;
while (node) {
entry = rb_entry(node, struct quota_id, node);
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 712160cd41ecac..3c3539c573e7fe 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -1444,7 +1444,8 @@ static int uffd_move_lock(struct mm_struct *mm,
*/
down_read(&(*dst_vmap)->vm_lock->lock);
if (*dst_vmap != *src_vmap)
- down_read(&(*src_vmap)->vm_lock->lock);
+ down_read_nested(&(*src_vmap)->vm_lock->lock,
+ SINGLE_DEPTH_NESTING);
}
mmap_read_unlock(mm);
return err;
diff --git a/mm/zswap.c b/mm/zswap.c
index 9dec853647c8e4..caed028945b046 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1080,7 +1080,17 @@ static void zswap_decompress(struct zswap_entry *entry, struct page *page)
mutex_lock(&acomp_ctx->mutex);
src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
- if (acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) {
+ /*
+ * If zpool_map_handle is atomic, we cannot reliably utilize its mapped buffer
+ * to do crypto_acomp_decompress() which might sleep. In such cases, we must
+ * resort to copying the buffer to a temporary one.
+ * Meanwhile, zpool_map_handle() might return a non-linearly mapped buffer,
+ * such as a kmap address of high memory or even ever a vmap address.
+ * However, sg_init_one is only equipped to handle linearly mapped low memory.
+ * In such cases, we also must copy the buffer to a temporary and lowmem one.
+ */
+ if ((acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) ||
+ !virt_addr_valid(src)) {
memcpy(acomp_ctx->buffer, src, entry->length);
src = acomp_ctx->buffer;
zpool_unmap_handle(zpool, entry->handle);
@@ -1094,7 +1104,7 @@ static void zswap_decompress(struct zswap_entry *entry, struct page *page)
BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
mutex_unlock(&acomp_ctx->mutex);
- if (!acomp_ctx->is_sleepable || zpool_can_sleep_mapped(zpool))
+ if (src != acomp_ctx->buffer)
zpool_unmap_handle(zpool, entry->handle);
}
@@ -1313,6 +1323,14 @@ static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
return 0;
+ /*
+ * The shrinker resumes swap writeback, which will enter block
+ * and may enter fs. XXX: Harmonize with vmscan.c __GFP_FS
+ * rules (may_enter_fs()), which apply on a per-folio basis.
+ */
+ if (!gfp_has_io_fs(sc->gfp_mask))
+ return 0;
+
#ifdef CONFIG_MEMCG_KMEM
mem_cgroup_flush_stats(memcg);
nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
@@ -1618,6 +1636,7 @@ bool zswap_load(struct folio *folio)
swp_entry_t swp = folio->swap;
pgoff_t offset = swp_offset(swp);
struct page *page = &folio->page;
+ bool swapcache = folio_test_swapcache(folio);
struct zswap_tree *tree = swap_zswap_tree(swp);
struct zswap_entry *entry;
u8 *dst;
@@ -1630,7 +1649,20 @@ bool zswap_load(struct folio *folio)
spin_unlock(&tree->lock);
return false;
}
- zswap_rb_erase(&tree->rbroot, entry);
+ /*
+ * When reading into the swapcache, invalidate our entry. The
+ * swapcache can be the authoritative owner of the page and
+ * its mappings, and the pressure that results from having two
+ * in-memory copies outweighs any benefits of caching the
+ * compression work.
+ *
+ * (Most swapins go through the swapcache. The notable
+ * exception is the singleton fault on SWP_SYNCHRONOUS_IO
+ * files, which reads into a private page and may free it if
+ * the fault fails. We remain the primary owner of the entry.)
+ */
+ if (swapcache)
+ zswap_rb_erase(&tree->rbroot, entry);
spin_unlock(&tree->lock);
if (entry->length)
@@ -1645,9 +1677,10 @@ bool zswap_load(struct folio *folio)
if (entry->objcg)
count_objcg_event(entry->objcg, ZSWPIN);
- zswap_entry_free(entry);
-
- folio_mark_dirty(folio);
+ if (swapcache) {
+ zswap_entry_free(entry);
+ folio_mark_dirty(folio);
+ }
return true;
}
diff --git a/scripts/Makefile b/scripts/Makefile
index 6673cbb6194fe0..bc90520a54266c 100644
--- a/scripts/Makefile
+++ b/scripts/Makefile
@@ -33,9 +33,12 @@ HOSTLDLIBS_sign-file = $(shell $(HOSTPKG_CONFIG) --libs libcrypto 2> /dev/null |
ifdef CONFIG_UNWINDER_ORC
ifeq ($(ARCH),x86_64)
-ARCH := x86
+SRCARCH := x86
endif
-HOSTCFLAGS_sorttable.o += -I$(srctree)/tools/arch/x86/include
+ifeq ($(ARCH),loongarch)
+SRCARCH := loongarch
+endif
+HOSTCFLAGS_sorttable.o += -I$(srctree)/tools/arch/$(SRCARCH)/include
HOSTCFLAGS_sorttable.o += -DUNWINDER_ORC_ENABLED
endif
diff --git a/sound/core/control.c b/sound/core/control.c
index 8367fd4853716a..fb0c60044f7b3b 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -1275,12 +1275,12 @@ static int snd_ctl_elem_read(struct snd_card *card,
static int snd_ctl_elem_read_user(struct snd_card *card,
struct snd_ctl_elem_value __user *_control)
{
- struct snd_ctl_elem_value *control;
+ struct snd_ctl_elem_value *control __free(kfree) = NULL;
int result;
control = memdup_user(_control, sizeof(*control));
if (IS_ERR(control))
- return PTR_ERR(control);
+ return PTR_ERR(no_free_ptr(control));
result = snd_ctl_elem_read(card, control);
if (result < 0)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index e904f62e195267..a17c36a36aa537 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2645,6 +2645,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x1558, 0x65f1, "Clevo PC50HS", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
SND_PCI_QUIRK(0x1558, 0x65f5, "Clevo PD50PN[NRT]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
SND_PCI_QUIRK(0x1558, 0x66a2, "Clevo PE60RNE", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ SND_PCI_QUIRK(0x1558, 0x66a6, "Clevo PE60SN[CDE]-[GS]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
@@ -6964,6 +6965,25 @@ static void alc256_fixup_mic_no_presence_and_resume(struct hda_codec *codec,
}
}
+static void alc256_decrease_headphone_amp_val(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ u32 caps;
+ u8 nsteps, offs;
+
+ if (action != HDA_FIXUP_ACT_PRE_PROBE)
+ return;
+
+ caps = query_amp_caps(codec, 0x3, HDA_OUTPUT);
+ nsteps = ((caps & AC_AMPCAP_NUM_STEPS) >> AC_AMPCAP_NUM_STEPS_SHIFT) - 10;
+ offs = ((caps & AC_AMPCAP_OFFSET) >> AC_AMPCAP_OFFSET_SHIFT) - 10;
+ caps &= ~AC_AMPCAP_NUM_STEPS & ~AC_AMPCAP_OFFSET;
+ caps |= (nsteps << AC_AMPCAP_NUM_STEPS_SHIFT) | (offs << AC_AMPCAP_OFFSET_SHIFT);
+
+ if (snd_hda_override_amp_caps(codec, 0x3, HDA_OUTPUT, caps))
+ codec_warn(codec, "failed to override amp caps for NID 0x3\n");
+}
+
static void alc_fixup_dell4_mic_no_presence_quiet(struct hda_codec *codec,
const struct hda_fixup *fix,
int action)
@@ -7104,6 +7124,38 @@ static void alc_fixup_headset_mic(struct hda_codec *codec,
}
}
+static void alc245_fixup_hp_spectre_x360_eu0xxx(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ /*
+ * The Pin Complex 0x14 for the treble speakers is wrongly reported as
+ * unconnected.
+ * The Pin Complex 0x17 for the bass speakers has the lowest association
+ * and sequence values so shift it up a bit to squeeze 0x14 in.
+ */
+ static const struct hda_pintbl pincfgs[] = {
+ { 0x14, 0x90170110 }, // top/treble
+ { 0x17, 0x90170111 }, // bottom/bass
+ { }
+ };
+
+ /*
+ * Force DAC 0x02 for the bass speakers 0x17.
+ */
+ static const hda_nid_t conn[] = { 0x02 };
+
+ switch (action) {
+ case HDA_FIXUP_ACT_PRE_PROBE:
+ snd_hda_apply_pincfgs(codec, pincfgs);
+ snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn);
+ break;
+ }
+
+ cs35l41_fixup_i2c_two(codec, fix, action);
+ alc245_fixup_hp_mute_led_coefbit(codec, fix, action);
+ alc245_fixup_hp_gpio_led(codec, fix, action);
+}
+
enum {
ALC269_FIXUP_GPIO2,
@@ -7382,6 +7434,8 @@ enum {
ALC294_FIXUP_CS35L41_I2C_2,
ALC245_FIXUP_CS35L56_SPI_4_HP_GPIO_LED,
ALC256_FIXUP_ACER_SFG16_MICMUTE_LED,
+ ALC256_FIXUP_HEADPHONE_AMP_VOL,
+ ALC245_FIXUP_HP_SPECTRE_X360_EU0XXX,
};
/* A special fixup for Lenovo C940 and Yoga Duet 7;
@@ -9581,6 +9635,14 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc256_fixup_acer_sfg16_micmute_led,
},
+ [ALC256_FIXUP_HEADPHONE_AMP_VOL] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc256_decrease_headphone_amp_val,
+ },
+ [ALC245_FIXUP_HP_SPECTRE_X360_EU0XXX] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc245_fixup_hp_spectre_x360_eu0xxx,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -9944,7 +10006,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8be8, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8be9, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x8c15, "HP Spectre 14", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x103c, 0x8c15, "HP Spectre x360 2-in-1 Laptop 14-eu0xxx", ALC245_FIXUP_HP_SPECTRE_X360_EU0XXX),
SND_PCI_QUIRK(0x103c, 0x8c16, "HP Spectre 16", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8c17, "HP Spectre 16", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8c46, "HP EliteBook 830 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
@@ -10115,12 +10177,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x152d, 0x1082, "Quanta NL3", ALC269_FIXUP_LIFEBOOK),
+ SND_PCI_QUIRK(0x1558, 0x0353, "Clevo V35[05]SN[CDE]Q", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x1323, "Clevo N130ZU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x1325, "Clevo N15[01][CW]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x1401, "Clevo L140[CZ]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x1403, "Clevo N140CU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x1404, "Clevo N150CU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x14a1, "Clevo L141MU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x2624, "Clevo L240TU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x4018, "Clevo NV40M[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x4019, "Clevo NV40MZ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x4020, "Clevo NV40MB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
@@ -10319,6 +10383,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x9e56, "Lenovo ZhaoYang CF4620Z", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1849, 0x1233, "ASRock NUC Box 1100", ALC233_FIXUP_NO_AUDIO_JACK),
SND_PCI_QUIRK(0x1849, 0xa233, "Positivo Master C6300", ALC269_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1854, 0x0440, "LG CQ6", ALC256_FIXUP_HEADPHONE_AMP_VOL),
+ SND_PCI_QUIRK(0x1854, 0x0441, "LG CQ6 AIO", ALC256_FIXUP_HEADPHONE_AMP_VOL),
SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
SND_PCI_QUIRK(0x19e5, 0x320f, "Huawei WRT-WX9 ", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20),
diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
index 90360f8b3e81b9..69c68d8e7a6b54 100644
--- a/sound/soc/amd/yc/acp6x-mach.c
+++ b/sound/soc/amd/yc/acp6x-mach.c
@@ -203,13 +203,6 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "21J2"),
- }
- },
- {
- .driver_data = &acp6x_card,
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "21J0"),
}
},
@@ -321,6 +314,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
{
.driver_data = &acp6x_card,
.matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M7600RE"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Micro-Star International Co., Ltd."),
DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 15 B7ED"),
}
diff --git a/sound/soc/amd/yc/pci-acp6x.c b/sound/soc/amd/yc/pci-acp6x.c
index 694b8e31390248..7af6a349b1d41f 100644
--- a/sound/soc/amd/yc/pci-acp6x.c
+++ b/sound/soc/amd/yc/pci-acp6x.c
@@ -162,7 +162,6 @@ static int snd_acp6x_probe(struct pci_dev *pci,
/* Yellow Carp device check */
switch (pci->revision) {
case 0x60:
- case 0x63:
case 0x6f:
break;
default:
diff --git a/sound/soc/codecs/tlv320adc3xxx.c b/sound/soc/codecs/tlv320adc3xxx.c
index 420bbf588efeaf..e100cc9f5c1929 100644
--- a/sound/soc/codecs/tlv320adc3xxx.c
+++ b/sound/soc/codecs/tlv320adc3xxx.c
@@ -1429,7 +1429,7 @@ err_unprepare_mclk:
return ret;
}
-static void __exit adc3xxx_i2c_remove(struct i2c_client *client)
+static void adc3xxx_i2c_remove(struct i2c_client *client)
{
struct adc3xxx *adc3xxx = i2c_get_clientdata(client);
@@ -1452,7 +1452,7 @@ static struct i2c_driver adc3xxx_i2c_driver = {
.of_match_table = tlv320adc3xxx_of_match,
},
.probe = adc3xxx_i2c_probe,
- .remove = __exit_p(adc3xxx_i2c_remove),
+ .remove = adc3xxx_i2c_remove,
.id_table = adc3xxx_i2c_id,
};
diff --git a/sound/soc/rockchip/rockchip_i2s_tdm.c b/sound/soc/rockchip/rockchip_i2s_tdm.c
index 860e66ec85e8a3..9fa020ef7eab9b 100644
--- a/sound/soc/rockchip/rockchip_i2s_tdm.c
+++ b/sound/soc/rockchip/rockchip_i2s_tdm.c
@@ -25,8 +25,6 @@
#define DEFAULT_MCLK_FS 256
#define CH_GRP_MAX 4 /* The max channel 8 / 2 */
#define MULTIPLEX_CH_MAX 10
-#define CLK_PPM_MIN -1000
-#define CLK_PPM_MAX 1000
#define TRCM_TXRX 0
#define TRCM_TX 1
@@ -53,20 +51,6 @@ struct rk_i2s_tdm_dev {
struct clk *hclk;
struct clk *mclk_tx;
struct clk *mclk_rx;
- /* The mclk_tx_src is parent of mclk_tx */
- struct clk *mclk_tx_src;
- /* The mclk_rx_src is parent of mclk_rx */
- struct clk *mclk_rx_src;
- /*
- * The mclk_root0 and mclk_root1 are root parent and supplies for
- * the different FS.
- *
- * e.g:
- * mclk_root0 is VPLL0, used for FS=48000Hz
- * mclk_root1 is VPLL1, used for FS=44100Hz
- */
- struct clk *mclk_root0;
- struct clk *mclk_root1;
struct regmap *regmap;
struct regmap *grf;
struct snd_dmaengine_dai_dma_data capture_dma_data;
@@ -76,19 +60,11 @@ struct rk_i2s_tdm_dev {
const struct rk_i2s_soc_data *soc_data;
bool is_master_mode;
bool io_multiplex;
- bool mclk_calibrate;
bool tdm_mode;
- unsigned int mclk_rx_freq;
- unsigned int mclk_tx_freq;
- unsigned int mclk_root0_freq;
- unsigned int mclk_root1_freq;
- unsigned int mclk_root0_initial_freq;
- unsigned int mclk_root1_initial_freq;
unsigned int frame_width;
unsigned int clk_trcm;
unsigned int i2s_sdis[CH_GRP_MAX];
unsigned int i2s_sdos[CH_GRP_MAX];
- int clk_ppm;
int refcount;
spinlock_t lock; /* xfer lock */
bool has_playback;
@@ -114,12 +90,6 @@ static void i2s_tdm_disable_unprepare_mclk(struct rk_i2s_tdm_dev *i2s_tdm)
{
clk_disable_unprepare(i2s_tdm->mclk_tx);
clk_disable_unprepare(i2s_tdm->mclk_rx);
- if (i2s_tdm->mclk_calibrate) {
- clk_disable_unprepare(i2s_tdm->mclk_tx_src);
- clk_disable_unprepare(i2s_tdm->mclk_rx_src);
- clk_disable_unprepare(i2s_tdm->mclk_root0);
- clk_disable_unprepare(i2s_tdm->mclk_root1);
- }
}
/**
@@ -142,29 +112,9 @@ static int i2s_tdm_prepare_enable_mclk(struct rk_i2s_tdm_dev *i2s_tdm)
ret = clk_prepare_enable(i2s_tdm->mclk_rx);
if (ret)
goto err_mclk_rx;
- if (i2s_tdm->mclk_calibrate) {
- ret = clk_prepare_enable(i2s_tdm->mclk_tx_src);
- if (ret)
- goto err_mclk_rx;
- ret = clk_prepare_enable(i2s_tdm->mclk_rx_src);
- if (ret)
- goto err_mclk_rx_src;
- ret = clk_prepare_enable(i2s_tdm->mclk_root0);
- if (ret)
- goto err_mclk_root0;
- ret = clk_prepare_enable(i2s_tdm->mclk_root1);
- if (ret)
- goto err_mclk_root1;
- }
return 0;
-err_mclk_root1:
- clk_disable_unprepare(i2s_tdm->mclk_root0);
-err_mclk_root0:
- clk_disable_unprepare(i2s_tdm->mclk_rx_src);
-err_mclk_rx_src:
- clk_disable_unprepare(i2s_tdm->mclk_tx_src);
err_mclk_rx:
clk_disable_unprepare(i2s_tdm->mclk_tx);
err_mclk_tx:
@@ -564,159 +514,6 @@ static void rockchip_i2s_tdm_xfer_resume(struct snd_pcm_substream *substream,
I2S_XFER_RXS_START);
}
-static int rockchip_i2s_tdm_clk_set_rate(struct rk_i2s_tdm_dev *i2s_tdm,
- struct clk *clk, unsigned long rate,
- int ppm)
-{
- unsigned long rate_target;
- int delta, ret;
-
- if (ppm == i2s_tdm->clk_ppm)
- return 0;
-
- if (ppm < 0)
- delta = -1;
- else
- delta = 1;
-
- delta *= (int)div64_u64((u64)rate * (u64)abs(ppm) + 500000,
- 1000000);
-
- rate_target = rate + delta;
-
- if (!rate_target)
- return -EINVAL;
-
- ret = clk_set_rate(clk, rate_target);
- if (ret)
- return ret;
-
- i2s_tdm->clk_ppm = ppm;
-
- return 0;
-}
-
-static int rockchip_i2s_tdm_calibrate_mclk(struct rk_i2s_tdm_dev *i2s_tdm,
- struct snd_pcm_substream *substream,
- unsigned int lrck_freq)
-{
- struct clk *mclk_root;
- struct clk *mclk_parent;
- unsigned int mclk_root_freq;
- unsigned int mclk_root_initial_freq;
- unsigned int mclk_parent_freq;
- unsigned int div, delta;
- u64 ppm;
- int ret;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- mclk_parent = i2s_tdm->mclk_tx_src;
- else
- mclk_parent = i2s_tdm->mclk_rx_src;
-
- switch (lrck_freq) {
- case 8000:
- case 16000:
- case 24000:
- case 32000:
- case 48000:
- case 64000:
- case 96000:
- case 192000:
- mclk_root = i2s_tdm->mclk_root0;
- mclk_root_freq = i2s_tdm->mclk_root0_freq;
- mclk_root_initial_freq = i2s_tdm->mclk_root0_initial_freq;
- mclk_parent_freq = DEFAULT_MCLK_FS * 192000;
- break;
- case 11025:
- case 22050:
- case 44100:
- case 88200:
- case 176400:
- mclk_root = i2s_tdm->mclk_root1;
- mclk_root_freq = i2s_tdm->mclk_root1_freq;
- mclk_root_initial_freq = i2s_tdm->mclk_root1_initial_freq;
- mclk_parent_freq = DEFAULT_MCLK_FS * 176400;
- break;
- default:
- dev_err(i2s_tdm->dev, "Invalid LRCK frequency: %u Hz\n",
- lrck_freq);
- return -EINVAL;
- }
-
- ret = clk_set_parent(mclk_parent, mclk_root);
- if (ret)
- return ret;
-
- ret = rockchip_i2s_tdm_clk_set_rate(i2s_tdm, mclk_root,
- mclk_root_freq, 0);
- if (ret)
- return ret;
-
- delta = abs(mclk_root_freq % mclk_parent_freq - mclk_parent_freq);
- ppm = div64_u64((uint64_t)delta * 1000000, (uint64_t)mclk_root_freq);
-
- if (ppm) {
- div = DIV_ROUND_CLOSEST(mclk_root_initial_freq, mclk_parent_freq);
- if (!div)
- return -EINVAL;
-
- mclk_root_freq = mclk_parent_freq * round_up(div, 2);
-
- ret = clk_set_rate(mclk_root, mclk_root_freq);
- if (ret)
- return ret;
-
- i2s_tdm->mclk_root0_freq = clk_get_rate(i2s_tdm->mclk_root0);
- i2s_tdm->mclk_root1_freq = clk_get_rate(i2s_tdm->mclk_root1);
- }
-
- return clk_set_rate(mclk_parent, mclk_parent_freq);
-}
-
-static int rockchip_i2s_tdm_set_mclk(struct rk_i2s_tdm_dev *i2s_tdm,
- struct snd_pcm_substream *substream,
- struct clk **mclk)
-{
- unsigned int mclk_freq;
- int ret;
-
- if (i2s_tdm->clk_trcm) {
- if (i2s_tdm->mclk_tx_freq != i2s_tdm->mclk_rx_freq) {
- dev_err(i2s_tdm->dev,
- "clk_trcm, tx: %d and rx: %d should be the same\n",
- i2s_tdm->mclk_tx_freq,
- i2s_tdm->mclk_rx_freq);
- return -EINVAL;
- }
-
- ret = clk_set_rate(i2s_tdm->mclk_tx, i2s_tdm->mclk_tx_freq);
- if (ret)
- return ret;
-
- ret = clk_set_rate(i2s_tdm->mclk_rx, i2s_tdm->mclk_rx_freq);
- if (ret)
- return ret;
-
- /* mclk_rx is also ok. */
- *mclk = i2s_tdm->mclk_tx;
- } else {
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- *mclk = i2s_tdm->mclk_tx;
- mclk_freq = i2s_tdm->mclk_tx_freq;
- } else {
- *mclk = i2s_tdm->mclk_rx;
- mclk_freq = i2s_tdm->mclk_rx_freq;
- }
-
- ret = clk_set_rate(*mclk, mclk_freq);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static int rockchip_i2s_ch_to_io(unsigned int ch, bool substream_capture)
{
if (substream_capture) {
@@ -853,19 +650,17 @@ static int rockchip_i2s_tdm_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct rk_i2s_tdm_dev *i2s_tdm = to_info(dai);
- struct clk *mclk;
- int ret = 0;
unsigned int val = 0;
unsigned int mclk_rate, bclk_rate, div_bclk = 4, div_lrck = 64;
+ int err;
if (i2s_tdm->is_master_mode) {
- if (i2s_tdm->mclk_calibrate)
- rockchip_i2s_tdm_calibrate_mclk(i2s_tdm, substream,
- params_rate(params));
+ struct clk *mclk = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
+ i2s_tdm->mclk_tx : i2s_tdm->mclk_rx;
- ret = rockchip_i2s_tdm_set_mclk(i2s_tdm, substream, &mclk);
- if (ret)
- return ret;
+ err = clk_set_rate(mclk, DEFAULT_MCLK_FS * params_rate(params));
+ if (err)
+ return err;
mclk_rate = clk_get_rate(mclk);
bclk_rate = i2s_tdm->frame_width * params_rate(params);
@@ -973,96 +768,6 @@ static int rockchip_i2s_tdm_trigger(struct snd_pcm_substream *substream,
return 0;
}
-static int rockchip_i2s_tdm_set_sysclk(struct snd_soc_dai *cpu_dai, int stream,
- unsigned int freq, int dir)
-{
- struct rk_i2s_tdm_dev *i2s_tdm = to_info(cpu_dai);
-
- /* Put set mclk rate into rockchip_i2s_tdm_set_mclk() */
- if (i2s_tdm->clk_trcm) {
- i2s_tdm->mclk_tx_freq = freq;
- i2s_tdm->mclk_rx_freq = freq;
- } else {
- if (stream == SNDRV_PCM_STREAM_PLAYBACK)
- i2s_tdm->mclk_tx_freq = freq;
- else
- i2s_tdm->mclk_rx_freq = freq;
- }
-
- dev_dbg(i2s_tdm->dev, "The target mclk_%s freq is: %d\n",
- stream ? "rx" : "tx", freq);
-
- return 0;
-}
-
-static int rockchip_i2s_tdm_clk_compensation_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
- uinfo->count = 1;
- uinfo->value.integer.min = CLK_PPM_MIN;
- uinfo->value.integer.max = CLK_PPM_MAX;
- uinfo->value.integer.step = 1;
-
- return 0;
-}
-
-static int rockchip_i2s_tdm_clk_compensation_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
- struct rk_i2s_tdm_dev *i2s_tdm = snd_soc_dai_get_drvdata(dai);
-
- ucontrol->value.integer.value[0] = i2s_tdm->clk_ppm;
-
- return 0;
-}
-
-static int rockchip_i2s_tdm_clk_compensation_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
- struct rk_i2s_tdm_dev *i2s_tdm = snd_soc_dai_get_drvdata(dai);
- int ret = 0, ppm = 0;
- int changed = 0;
- unsigned long old_rate;
-
- if (ucontrol->value.integer.value[0] < CLK_PPM_MIN ||
- ucontrol->value.integer.value[0] > CLK_PPM_MAX)
- return -EINVAL;
-
- ppm = ucontrol->value.integer.value[0];
-
- old_rate = clk_get_rate(i2s_tdm->mclk_root0);
- ret = rockchip_i2s_tdm_clk_set_rate(i2s_tdm, i2s_tdm->mclk_root0,
- i2s_tdm->mclk_root0_freq, ppm);
- if (ret)
- return ret;
- if (old_rate != clk_get_rate(i2s_tdm->mclk_root0))
- changed = 1;
-
- if (clk_is_match(i2s_tdm->mclk_root0, i2s_tdm->mclk_root1))
- return changed;
-
- old_rate = clk_get_rate(i2s_tdm->mclk_root1);
- ret = rockchip_i2s_tdm_clk_set_rate(i2s_tdm, i2s_tdm->mclk_root1,
- i2s_tdm->mclk_root1_freq, ppm);
- if (ret)
- return ret;
- if (old_rate != clk_get_rate(i2s_tdm->mclk_root1))
- changed = 1;
-
- return changed;
-}
-
-static struct snd_kcontrol_new rockchip_i2s_tdm_compensation_control = {
- .iface = SNDRV_CTL_ELEM_IFACE_PCM,
- .name = "PCM Clock Compensation in PPM",
- .info = rockchip_i2s_tdm_clk_compensation_info,
- .get = rockchip_i2s_tdm_clk_compensation_get,
- .put = rockchip_i2s_tdm_clk_compensation_put,
-};
-
static int rockchip_i2s_tdm_dai_probe(struct snd_soc_dai *dai)
{
struct rk_i2s_tdm_dev *i2s_tdm = snd_soc_dai_get_drvdata(dai);
@@ -1072,9 +777,6 @@ static int rockchip_i2s_tdm_dai_probe(struct snd_soc_dai *dai)
if (i2s_tdm->has_playback)
snd_soc_dai_dma_data_set_playback(dai, &i2s_tdm->playback_dma_data);
- if (i2s_tdm->mclk_calibrate)
- snd_soc_add_dai_controls(dai, &rockchip_i2s_tdm_compensation_control, 1);
-
return 0;
}
@@ -1115,7 +817,6 @@ static const struct snd_soc_dai_ops rockchip_i2s_tdm_dai_ops = {
.probe = rockchip_i2s_tdm_dai_probe,
.hw_params = rockchip_i2s_tdm_hw_params,
.set_bclk_ratio = rockchip_i2s_tdm_set_bclk_ratio,
- .set_sysclk = rockchip_i2s_tdm_set_sysclk,
.set_fmt = rockchip_i2s_tdm_set_fmt,
.set_tdm_slot = rockchip_dai_tdm_slot,
.trigger = rockchip_i2s_tdm_trigger,
@@ -1444,35 +1145,6 @@ static void rockchip_i2s_tdm_path_config(struct rk_i2s_tdm_dev *i2s_tdm,
rockchip_i2s_tdm_tx_path_config(i2s_tdm, num);
}
-static int rockchip_i2s_tdm_get_calibrate_mclks(struct rk_i2s_tdm_dev *i2s_tdm)
-{
- int num_mclks = 0;
-
- i2s_tdm->mclk_tx_src = devm_clk_get(i2s_tdm->dev, "mclk_tx_src");
- if (!IS_ERR(i2s_tdm->mclk_tx_src))
- num_mclks++;
-
- i2s_tdm->mclk_rx_src = devm_clk_get(i2s_tdm->dev, "mclk_rx_src");
- if (!IS_ERR(i2s_tdm->mclk_rx_src))
- num_mclks++;
-
- i2s_tdm->mclk_root0 = devm_clk_get(i2s_tdm->dev, "mclk_root0");
- if (!IS_ERR(i2s_tdm->mclk_root0))
- num_mclks++;
-
- i2s_tdm->mclk_root1 = devm_clk_get(i2s_tdm->dev, "mclk_root1");
- if (!IS_ERR(i2s_tdm->mclk_root1))
- num_mclks++;
-
- if (num_mclks < 4 && num_mclks != 0)
- return -ENOENT;
-
- if (num_mclks == 4)
- i2s_tdm->mclk_calibrate = 1;
-
- return 0;
-}
-
static int rockchip_i2s_tdm_path_prepare(struct rk_i2s_tdm_dev *i2s_tdm,
struct device_node *np,
bool is_rx_path)
@@ -1610,11 +1282,6 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev)
i2s_tdm->io_multiplex =
of_property_read_bool(node, "rockchip,io-multiplex");
- ret = rockchip_i2s_tdm_get_calibrate_mclks(i2s_tdm);
- if (ret)
- return dev_err_probe(i2s_tdm->dev, ret,
- "mclk-calibrate clocks missing");
-
regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(regs)) {
return dev_err_probe(i2s_tdm->dev, PTR_ERR(regs),
@@ -1667,13 +1334,6 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev)
goto err_disable_hclk;
}
- if (i2s_tdm->mclk_calibrate) {
- i2s_tdm->mclk_root0_initial_freq = clk_get_rate(i2s_tdm->mclk_root0);
- i2s_tdm->mclk_root1_initial_freq = clk_get_rate(i2s_tdm->mclk_root1);
- i2s_tdm->mclk_root0_freq = i2s_tdm->mclk_root0_initial_freq;
- i2s_tdm->mclk_root1_freq = i2s_tdm->mclk_root1_initial_freq;
- }
-
pm_runtime_enable(&pdev->dev);
regmap_update_bits(i2s_tdm->regmap, I2S_DMACR, I2S_DMACR_TDL_MASK,
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index a38fee48ee005a..e692aa3b8b22f8 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -385,11 +385,15 @@ static int soc_compr_set_params_fe(struct snd_compr_stream *cstream,
fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
+ snd_soc_dpcm_mutex_lock(fe);
ret = dpcm_be_dai_hw_params(fe, stream);
+ snd_soc_dpcm_mutex_unlock(fe);
if (ret < 0)
goto out;
+ snd_soc_dpcm_mutex_lock(fe);
ret = dpcm_be_dai_prepare(fe, stream);
+ snd_soc_dpcm_mutex_unlock(fe);
if (ret < 0)
goto out;
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 1e94edba12eb85..2ec13d1634b636 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1219,6 +1219,9 @@ static int snd_soc_add_pcm_runtime(struct snd_soc_card *card,
if (!snd_soc_is_matching_component(platform, component))
continue;
+ if (snd_soc_component_is_dummy(component) && component->num_dai)
+ continue;
+
snd_soc_rtd_add_component(rtd, component);
}
}
diff --git a/sound/soc/sof/amd/acp-loader.c b/sound/soc/sof/amd/acp-loader.c
index d2d21478399e02..aad904839b817c 100644
--- a/sound/soc/sof/amd/acp-loader.c
+++ b/sound/soc/sof/amd/acp-loader.c
@@ -173,7 +173,7 @@ int acp_dsp_pre_fw_run(struct snd_sof_dev *sdev)
adata = sdev->pdata->hw_pdata;
- if (adata->signed_fw_image)
+ if (adata->quirks && adata->quirks->signed_fw_image)
size_fw = adata->fw_bin_size - ACP_FIRMWARE_SIGNATURE;
else
size_fw = adata->fw_bin_size;
diff --git a/sound/soc/sof/amd/acp.c b/sound/soc/sof/amd/acp.c
index 9b3c26210db38f..be7dc1e02284ab 100644
--- a/sound/soc/sof/amd/acp.c
+++ b/sound/soc/sof/amd/acp.c
@@ -20,12 +20,15 @@
#include "acp.h"
#include "acp-dsp-offset.h"
-#define SECURED_FIRMWARE 1
-
static bool enable_fw_debug;
module_param(enable_fw_debug, bool, 0444);
MODULE_PARM_DESC(enable_fw_debug, "Enable Firmware debug");
+static struct acp_quirk_entry quirk_valve_galileo = {
+ .signed_fw_image = true,
+ .skip_iram_dram_size_mod = true,
+};
+
const struct dmi_system_id acp_sof_quirk_table[] = {
{
/* Steam Deck OLED device */
@@ -33,7 +36,7 @@ const struct dmi_system_id acp_sof_quirk_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "Valve"),
DMI_MATCH(DMI_PRODUCT_NAME, "Galileo"),
},
- .driver_data = (void *)SECURED_FIRMWARE,
+ .driver_data = &quirk_valve_galileo,
},
{}
};
@@ -254,7 +257,7 @@ int configure_and_run_sha_dma(struct acp_dev_data *adata, void *image_addr,
}
}
- if (adata->signed_fw_image)
+ if (adata->quirks && adata->quirks->signed_fw_image)
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_INCLUDE_HDR, ACP_SHA_HEADER);
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_STRT_ADDR, start_addr);
@@ -278,7 +281,7 @@ int configure_and_run_sha_dma(struct acp_dev_data *adata, void *image_addr,
}
/* psp_send_cmd only required for vangogh platform (rev - 5) */
- if (desc->rev == 5) {
+ if (desc->rev == 5 && !(adata->quirks && adata->quirks->skip_iram_dram_size_mod)) {
/* Modify IRAM and DRAM size */
ret = psp_send_cmd(adata, MBOX_ACP_IRAM_DRAM_FENCE_COMMAND | IRAM_DRAM_FENCE_2);
if (ret)
@@ -738,26 +741,27 @@ skip_soundwire:
sdev->debug_box.offset = sdev->host_box.offset + sdev->host_box.size;
sdev->debug_box.size = BOX_SIZE_1024;
- adata->signed_fw_image = false;
dmi_id = dmi_first_match(acp_sof_quirk_table);
- if (dmi_id && dmi_id->driver_data) {
- adata->fw_code_bin = devm_kasprintf(sdev->dev, GFP_KERNEL,
- "sof-%s-code.bin",
- chip->name);
- if (!adata->fw_code_bin) {
- ret = -ENOMEM;
- goto free_ipc_irq;
+ if (dmi_id) {
+ adata->quirks = dmi_id->driver_data;
+
+ if (adata->quirks->signed_fw_image) {
+ adata->fw_code_bin = devm_kasprintf(sdev->dev, GFP_KERNEL,
+ "sof-%s-code.bin",
+ chip->name);
+ if (!adata->fw_code_bin) {
+ ret = -ENOMEM;
+ goto free_ipc_irq;
+ }
+
+ adata->fw_data_bin = devm_kasprintf(sdev->dev, GFP_KERNEL,
+ "sof-%s-data.bin",
+ chip->name);
+ if (!adata->fw_data_bin) {
+ ret = -ENOMEM;
+ goto free_ipc_irq;
+ }
}
-
- adata->fw_data_bin = devm_kasprintf(sdev->dev, GFP_KERNEL,
- "sof-%s-data.bin",
- chip->name);
- if (!adata->fw_data_bin) {
- ret = -ENOMEM;
- goto free_ipc_irq;
- }
-
- adata->signed_fw_image = dmi_id->driver_data;
}
adata->enable_fw_debug = enable_fw_debug;
diff --git a/sound/soc/sof/amd/acp.h b/sound/soc/sof/amd/acp.h
index 947068da39b535..e229bb6b849d6b 100644
--- a/sound/soc/sof/amd/acp.h
+++ b/sound/soc/sof/amd/acp.h
@@ -207,6 +207,11 @@ struct sof_amd_acp_desc {
u64 sdw_acpi_dev_addr;
};
+struct acp_quirk_entry {
+ bool signed_fw_image;
+ bool skip_iram_dram_size_mod;
+};
+
/* Common device data struct for ACP devices */
struct acp_dev_data {
struct snd_sof_dev *dev;
@@ -236,7 +241,7 @@ struct acp_dev_data {
u8 *data_buf;
dma_addr_t sram_dma_addr;
u8 *sram_data_buf;
- bool signed_fw_image;
+ struct acp_quirk_entry *quirks;
struct dma_descriptor dscr_info[ACP_MAX_DESC];
struct acp_dsp_stream stream_buf[ACP_MAX_STREAM];
struct acp_dsp_stream *dtrace_stream;
diff --git a/sound/soc/sof/amd/vangogh.c b/sound/soc/sof/amd/vangogh.c
index de15d21aa6d982..bc6ffdb5471a58 100644
--- a/sound/soc/sof/amd/vangogh.c
+++ b/sound/soc/sof/amd/vangogh.c
@@ -143,6 +143,7 @@ EXPORT_SYMBOL_NS(sof_vangogh_ops, SND_SOC_SOF_AMD_COMMON);
int sof_vangogh_ops_init(struct snd_sof_dev *sdev)
{
const struct dmi_system_id *dmi_id;
+ struct acp_quirk_entry *quirks;
/* common defaults */
memcpy(&sof_vangogh_ops, &sof_acp_common_ops, sizeof(struct snd_sof_dsp_ops));
@@ -151,8 +152,12 @@ int sof_vangogh_ops_init(struct snd_sof_dev *sdev)
sof_vangogh_ops.num_drv = ARRAY_SIZE(vangogh_sof_dai);
dmi_id = dmi_first_match(acp_sof_quirk_table);
- if (dmi_id && dmi_id->driver_data)
- sof_vangogh_ops.load_firmware = acp_sof_load_signed_firmware;
+ if (dmi_id) {
+ quirks = dmi_id->driver_data;
+
+ if (quirks->signed_fw_image)
+ sof_vangogh_ops.load_firmware = acp_sof_load_signed_firmware;
+ }
return 0;
}
diff --git a/tools/Makefile b/tools/Makefile
index 37e9f680483264..276f5d0d53a447 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -11,7 +11,6 @@ help:
@echo ''
@echo ' acpi - ACPI tools'
@echo ' bpf - misc BPF tools'
- @echo ' cgroup - cgroup tools'
@echo ' counter - counter tools'
@echo ' cpupower - a tool for all things x86 CPU power'
@echo ' debugging - tools for debugging'
@@ -69,7 +68,7 @@ acpi: FORCE
cpupower: FORCE
$(call descend,power/$@)
-cgroup counter firewire hv guest bootconfig spi usb virtio mm bpf iio gpio objtool leds wmi pci firmware debugging tracing: FORCE
+counter firewire hv guest bootconfig spi usb virtio mm bpf iio gpio objtool leds wmi pci firmware debugging tracing: FORCE
$(call descend,$@)
bpf/%: FORCE
@@ -116,7 +115,7 @@ freefall: FORCE
kvm_stat: FORCE
$(call descend,kvm/$@)
-all: acpi cgroup counter cpupower gpio hv firewire \
+all: acpi counter cpupower gpio hv firewire \
perf selftests bootconfig spi turbostat usb \
virtio mm bpf x86_energy_perf_policy \
tmon freefall iio objtool kvm_stat wmi \
@@ -128,7 +127,7 @@ acpi_install:
cpupower_install:
$(call descend,power/$(@:_install=),install)
-cgroup_install counter_install firewire_install gpio_install hv_install iio_install perf_install bootconfig_install spi_install usb_install virtio_install mm_install bpf_install objtool_install wmi_install pci_install debugging_install tracing_install:
+counter_install firewire_install gpio_install hv_install iio_install perf_install bootconfig_install spi_install usb_install virtio_install mm_install bpf_install objtool_install wmi_install pci_install debugging_install tracing_install:
$(call descend,$(@:_install=),install)
selftests_install:
@@ -155,7 +154,7 @@ freefall_install:
kvm_stat_install:
$(call descend,kvm/$(@:_install=),install)
-install: acpi_install cgroup_install counter_install cpupower_install gpio_install \
+install: acpi_install counter_install cpupower_install gpio_install \
hv_install firewire_install iio_install \
perf_install selftests_install turbostat_install usb_install \
virtio_install mm_install bpf_install x86_energy_perf_policy_install \
@@ -169,7 +168,7 @@ acpi_clean:
cpupower_clean:
$(call descend,power/cpupower,clean)
-cgroup_clean counter_clean hv_clean firewire_clean bootconfig_clean spi_clean usb_clean virtio_clean mm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean debugging_clean tracing_clean:
+counter_clean hv_clean firewire_clean bootconfig_clean spi_clean usb_clean virtio_clean mm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean debugging_clean tracing_clean:
$(call descend,$(@:_clean=),clean)
libapi_clean:
@@ -209,7 +208,7 @@ freefall_clean:
build_clean:
$(call descend,build,clean)
-clean: acpi_clean cgroup_clean counter_clean cpupower_clean hv_clean firewire_clean \
+clean: acpi_clean counter_clean cpupower_clean hv_clean firewire_clean \
perf_clean selftests_clean turbostat_clean bootconfig_clean spi_clean usb_clean virtio_clean \
mm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \
freefall_clean build_clean libbpf_clean libsubcmd_clean \
diff --git a/tools/arch/loongarch/include/asm/inst.h b/tools/arch/loongarch/include/asm/inst.h
new file mode 100644
index 00000000000000..c25b5853181dba
--- /dev/null
+++ b/tools/arch/loongarch/include/asm/inst.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_INST_H
+#define _ASM_INST_H
+
+#include <linux/bitops.h>
+
+#define LOONGARCH_INSN_NOP 0x03400000
+
+enum reg0i15_op {
+ break_op = 0x54,
+};
+
+enum reg0i26_op {
+ b_op = 0x14,
+ bl_op = 0x15,
+};
+
+enum reg1i21_op {
+ beqz_op = 0x10,
+ bnez_op = 0x11,
+ bceqz_op = 0x12, /* bits[9:8] = 0x00 */
+ bcnez_op = 0x12, /* bits[9:8] = 0x01 */
+};
+
+enum reg2_op {
+ ertn_op = 0x1920e,
+};
+
+enum reg2i12_op {
+ addid_op = 0x0b,
+ andi_op = 0x0d,
+ ldd_op = 0xa3,
+ std_op = 0xa7,
+};
+
+enum reg2i14_op {
+ ldptrd_op = 0x26,
+ stptrd_op = 0x27,
+};
+
+enum reg2i16_op {
+ jirl_op = 0x13,
+ beq_op = 0x16,
+ bne_op = 0x17,
+ blt_op = 0x18,
+ bge_op = 0x19,
+ bltu_op = 0x1a,
+ bgeu_op = 0x1b,
+};
+
+struct reg0i15_format {
+ unsigned int immediate : 15;
+ unsigned int opcode : 17;
+};
+
+struct reg0i26_format {
+ unsigned int immediate_h : 10;
+ unsigned int immediate_l : 16;
+ unsigned int opcode : 6;
+};
+
+struct reg1i21_format {
+ unsigned int immediate_h : 5;
+ unsigned int rj : 5;
+ unsigned int immediate_l : 16;
+ unsigned int opcode : 6;
+};
+
+struct reg2_format {
+ unsigned int rd : 5;
+ unsigned int rj : 5;
+ unsigned int opcode : 22;
+};
+
+struct reg2i12_format {
+ unsigned int rd : 5;
+ unsigned int rj : 5;
+ unsigned int immediate : 12;
+ unsigned int opcode : 10;
+};
+
+struct reg2i14_format {
+ unsigned int rd : 5;
+ unsigned int rj : 5;
+ unsigned int immediate : 14;
+ unsigned int opcode : 8;
+};
+
+struct reg2i16_format {
+ unsigned int rd : 5;
+ unsigned int rj : 5;
+ unsigned int immediate : 16;
+ unsigned int opcode : 6;
+};
+
+union loongarch_instruction {
+ unsigned int word;
+ struct reg0i15_format reg0i15_format;
+ struct reg0i26_format reg0i26_format;
+ struct reg1i21_format reg1i21_format;
+ struct reg2_format reg2_format;
+ struct reg2i12_format reg2i12_format;
+ struct reg2i14_format reg2i14_format;
+ struct reg2i16_format reg2i16_format;
+};
+
+#define LOONGARCH_INSN_SIZE sizeof(union loongarch_instruction)
+
+enum loongarch_gpr {
+ LOONGARCH_GPR_ZERO = 0,
+ LOONGARCH_GPR_RA = 1,
+ LOONGARCH_GPR_TP = 2,
+ LOONGARCH_GPR_SP = 3,
+ LOONGARCH_GPR_A0 = 4, /* Reused as V0 for return value */
+ LOONGARCH_GPR_A1, /* Reused as V1 for return value */
+ LOONGARCH_GPR_A2,
+ LOONGARCH_GPR_A3,
+ LOONGARCH_GPR_A4,
+ LOONGARCH_GPR_A5,
+ LOONGARCH_GPR_A6,
+ LOONGARCH_GPR_A7,
+ LOONGARCH_GPR_T0 = 12,
+ LOONGARCH_GPR_T1,
+ LOONGARCH_GPR_T2,
+ LOONGARCH_GPR_T3,
+ LOONGARCH_GPR_T4,
+ LOONGARCH_GPR_T5,
+ LOONGARCH_GPR_T6,
+ LOONGARCH_GPR_T7,
+ LOONGARCH_GPR_T8,
+ LOONGARCH_GPR_FP = 22,
+ LOONGARCH_GPR_S0 = 23,
+ LOONGARCH_GPR_S1,
+ LOONGARCH_GPR_S2,
+ LOONGARCH_GPR_S3,
+ LOONGARCH_GPR_S4,
+ LOONGARCH_GPR_S5,
+ LOONGARCH_GPR_S6,
+ LOONGARCH_GPR_S7,
+ LOONGARCH_GPR_S8,
+ LOONGARCH_GPR_MAX
+};
+
+#define DEF_EMIT_REG2I16_FORMAT(NAME, OP) \
+static inline void emit_##NAME(union loongarch_instruction *insn, \
+ enum loongarch_gpr rj, \
+ enum loongarch_gpr rd, \
+ int offset) \
+{ \
+ insn->reg2i16_format.opcode = OP; \
+ insn->reg2i16_format.immediate = offset; \
+ insn->reg2i16_format.rj = rj; \
+ insn->reg2i16_format.rd = rd; \
+}
+
+DEF_EMIT_REG2I16_FORMAT(jirl, jirl_op)
+
+#endif /* _ASM_INST_H */
diff --git a/tools/arch/loongarch/include/asm/orc_types.h b/tools/arch/loongarch/include/asm/orc_types.h
new file mode 100644
index 00000000000000..caf1f71a1057b6
--- /dev/null
+++ b/tools/arch/loongarch/include/asm/orc_types.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ORC_TYPES_H
+#define _ORC_TYPES_H
+
+#include <linux/types.h>
+
+/*
+ * The ORC_REG_* registers are base registers which are used to find other
+ * registers on the stack.
+ *
+ * ORC_REG_PREV_SP, also known as DWARF Call Frame Address (CFA), is the
+ * address of the previous frame: the caller's SP before it called the current
+ * function.
+ *
+ * ORC_REG_UNDEFINED means the corresponding register's value didn't change in
+ * the current frame.
+ *
+ * The most commonly used base registers are SP and FP -- which the previous SP
+ * is usually based on -- and PREV_SP and UNDEFINED -- which the previous FP is
+ * usually based on.
+ *
+ * The rest of the base registers are needed for special cases like entry code
+ * and GCC realigned stacks.
+ */
+#define ORC_REG_UNDEFINED 0
+#define ORC_REG_PREV_SP 1
+#define ORC_REG_SP 2
+#define ORC_REG_FP 3
+#define ORC_REG_MAX 4
+
+#define ORC_TYPE_UNDEFINED 0
+#define ORC_TYPE_END_OF_STACK 1
+#define ORC_TYPE_CALL 2
+#define ORC_TYPE_REGS 3
+#define ORC_TYPE_REGS_PARTIAL 4
+
+#ifndef __ASSEMBLY__
+/*
+ * This struct is more or less a vastly simplified version of the DWARF Call
+ * Frame Information standard. It contains only the necessary parts of DWARF
+ * CFI, simplified for ease of access by the in-kernel unwinder. It tells the
+ * unwinder how to find the previous SP and FP (and sometimes entry regs) on
+ * the stack for a given code address. Each instance of the struct corresponds
+ * to one or more code locations.
+ */
+struct orc_entry {
+ s16 sp_offset;
+ s16 fp_offset;
+ s16 ra_offset;
+ unsigned int sp_reg:4;
+ unsigned int fp_reg:4;
+ unsigned int ra_reg:4;
+ unsigned int type:3;
+ unsigned int signal:1;
+};
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ORC_TYPES_H */
diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h
index f18683b95ea673..7319f6ced10860 100644
--- a/tools/include/linux/bitops.h
+++ b/tools/include/linux/bitops.h
@@ -87,4 +87,15 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
return (word << shift) | (word >> ((-shift) & 31));
}
+/**
+ * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit
+ * @value: value to sign extend
+ * @index: 0 based bit index (0<=index<64) to sign bit
+ */
+static __always_inline __s64 sign_extend64(__u64 value, int index)
+{
+ __u8 shift = 63 - index;
+ return (__s64)(value << shift) >> shift;
+}
+
#endif
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index 83b100c1e7f684..bf7f7f84ac6259 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -57,6 +57,10 @@ ifeq ($(SRCARCH),x86)
BUILD_ORC := y
endif
+ifeq ($(SRCARCH),loongarch)
+ BUILD_ORC := y
+endif
+
export BUILD_ORC
export srctree OUTPUT CFLAGS SRCARCH AWK
include $(srctree)/tools/build/Makefile.include
diff --git a/tools/objtool/arch/loongarch/Build b/tools/objtool/arch/loongarch/Build
new file mode 100644
index 00000000000000..1d4b784b6887ab
--- /dev/null
+++ b/tools/objtool/arch/loongarch/Build
@@ -0,0 +1,3 @@
+objtool-y += decode.o
+objtool-y += special.o
+objtool-y += orc.o
diff --git a/tools/objtool/arch/loongarch/decode.c b/tools/objtool/arch/loongarch/decode.c
new file mode 100644
index 00000000000000..aee479d2191c21
--- /dev/null
+++ b/tools/objtool/arch/loongarch/decode.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <string.h>
+#include <objtool/check.h>
+#include <objtool/warn.h>
+#include <asm/inst.h>
+#include <asm/orc_types.h>
+#include <linux/objtool_types.h>
+
+#ifndef EM_LOONGARCH
+#define EM_LOONGARCH 258
+#endif
+
+int arch_ftrace_match(char *name)
+{
+ return !strcmp(name, "_mcount");
+}
+
+unsigned long arch_jump_destination(struct instruction *insn)
+{
+ return insn->offset + (insn->immediate << 2);
+}
+
+unsigned long arch_dest_reloc_offset(int addend)
+{
+ return addend;
+}
+
+bool arch_pc_relative_reloc(struct reloc *reloc)
+{
+ return false;
+}
+
+bool arch_callee_saved_reg(unsigned char reg)
+{
+ switch (reg) {
+ case CFI_RA:
+ case CFI_FP:
+ case CFI_S0 ... CFI_S8:
+ return true;
+ default:
+ return false;
+ }
+}
+
+int arch_decode_hint_reg(u8 sp_reg, int *base)
+{
+ switch (sp_reg) {
+ case ORC_REG_UNDEFINED:
+ *base = CFI_UNDEFINED;
+ break;
+ case ORC_REG_SP:
+ *base = CFI_SP;
+ break;
+ case ORC_REG_FP:
+ *base = CFI_FP;
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+static bool is_loongarch(const struct elf *elf)
+{
+ if (elf->ehdr.e_machine == EM_LOONGARCH)
+ return true;
+
+ WARN("unexpected ELF machine type %d", elf->ehdr.e_machine);
+ return false;
+}
+
+#define ADD_OP(op) \
+ if (!(op = calloc(1, sizeof(*op)))) \
+ return -1; \
+ else for (*ops_list = op, ops_list = &op->next; op; op = NULL)
+
+static bool decode_insn_reg0i26_fomat(union loongarch_instruction inst,
+ struct instruction *insn)
+{
+ switch (inst.reg0i26_format.opcode) {
+ case b_op:
+ insn->type = INSN_JUMP_UNCONDITIONAL;
+ insn->immediate = sign_extend64(inst.reg0i26_format.immediate_h << 16 |
+ inst.reg0i26_format.immediate_l, 25);
+ break;
+ case bl_op:
+ insn->type = INSN_CALL;
+ insn->immediate = sign_extend64(inst.reg0i26_format.immediate_h << 16 |
+ inst.reg0i26_format.immediate_l, 25);
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+static bool decode_insn_reg1i21_fomat(union loongarch_instruction inst,
+ struct instruction *insn)
+{
+ switch (inst.reg1i21_format.opcode) {
+ case beqz_op:
+ case bnez_op:
+ case bceqz_op:
+ insn->type = INSN_JUMP_CONDITIONAL;
+ insn->immediate = sign_extend64(inst.reg1i21_format.immediate_h << 16 |
+ inst.reg1i21_format.immediate_l, 20);
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+static bool decode_insn_reg2i12_fomat(union loongarch_instruction inst,
+ struct instruction *insn,
+ struct stack_op **ops_list,
+ struct stack_op *op)
+{
+ switch (inst.reg2i12_format.opcode) {
+ case addid_op:
+ if ((inst.reg2i12_format.rd == CFI_SP) || (inst.reg2i12_format.rj == CFI_SP)) {
+ /* addi.d sp,sp,si12 or addi.d fp,sp,si12 */
+ insn->immediate = sign_extend64(inst.reg2i12_format.immediate, 11);
+ ADD_OP(op) {
+ op->src.type = OP_SRC_ADD;
+ op->src.reg = inst.reg2i12_format.rj;
+ op->src.offset = insn->immediate;
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = inst.reg2i12_format.rd;
+ }
+ }
+ break;
+ case ldd_op:
+ if (inst.reg2i12_format.rj == CFI_SP) {
+ /* ld.d rd,sp,si12 */
+ insn->immediate = sign_extend64(inst.reg2i12_format.immediate, 11);
+ ADD_OP(op) {
+ op->src.type = OP_SRC_REG_INDIRECT;
+ op->src.reg = CFI_SP;
+ op->src.offset = insn->immediate;
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = inst.reg2i12_format.rd;
+ }
+ }
+ break;
+ case std_op:
+ if (inst.reg2i12_format.rj == CFI_SP) {
+ /* st.d rd,sp,si12 */
+ insn->immediate = sign_extend64(inst.reg2i12_format.immediate, 11);
+ ADD_OP(op) {
+ op->src.type = OP_SRC_REG;
+ op->src.reg = inst.reg2i12_format.rd;
+ op->dest.type = OP_DEST_REG_INDIRECT;
+ op->dest.reg = CFI_SP;
+ op->dest.offset = insn->immediate;
+ }
+ }
+ break;
+ case andi_op:
+ if (inst.reg2i12_format.rd == 0 &&
+ inst.reg2i12_format.rj == 0 &&
+ inst.reg2i12_format.immediate == 0)
+ /* andi r0,r0,0 */
+ insn->type = INSN_NOP;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+static bool decode_insn_reg2i14_fomat(union loongarch_instruction inst,
+ struct instruction *insn,
+ struct stack_op **ops_list,
+ struct stack_op *op)
+{
+ switch (inst.reg2i14_format.opcode) {
+ case ldptrd_op:
+ if (inst.reg2i14_format.rj == CFI_SP) {
+ /* ldptr.d rd,sp,si14 */
+ insn->immediate = sign_extend64(inst.reg2i14_format.immediate, 13);
+ ADD_OP(op) {
+ op->src.type = OP_SRC_REG_INDIRECT;
+ op->src.reg = CFI_SP;
+ op->src.offset = insn->immediate;
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = inst.reg2i14_format.rd;
+ }
+ }
+ break;
+ case stptrd_op:
+ if (inst.reg2i14_format.rj == CFI_SP) {
+ /* stptr.d ra,sp,0 */
+ if (inst.reg2i14_format.rd == LOONGARCH_GPR_RA &&
+ inst.reg2i14_format.immediate == 0)
+ break;
+
+ /* stptr.d rd,sp,si14 */
+ insn->immediate = sign_extend64(inst.reg2i14_format.immediate, 13);
+ ADD_OP(op) {
+ op->src.type = OP_SRC_REG;
+ op->src.reg = inst.reg2i14_format.rd;
+ op->dest.type = OP_DEST_REG_INDIRECT;
+ op->dest.reg = CFI_SP;
+ op->dest.offset = insn->immediate;
+ }
+ }
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+static bool decode_insn_reg2i16_fomat(union loongarch_instruction inst,
+ struct instruction *insn)
+{
+ switch (inst.reg2i16_format.opcode) {
+ case jirl_op:
+ if (inst.reg2i16_format.rd == 0 &&
+ inst.reg2i16_format.rj == CFI_RA &&
+ inst.reg2i16_format.immediate == 0) {
+ /* jirl r0,ra,0 */
+ insn->type = INSN_RETURN;
+ } else if (inst.reg2i16_format.rd == CFI_RA) {
+ /* jirl ra,rj,offs16 */
+ insn->type = INSN_CALL_DYNAMIC;
+ } else if (inst.reg2i16_format.rd == CFI_A0 &&
+ inst.reg2i16_format.immediate == 0) {
+ /*
+ * jirl a0,t0,0
+ * this is a special case in loongarch_suspend_enter,
+ * just treat it as a call instruction.
+ */
+ insn->type = INSN_CALL_DYNAMIC;
+ } else if (inst.reg2i16_format.rd == 0 &&
+ inst.reg2i16_format.immediate == 0) {
+ /* jirl r0,rj,0 */
+ insn->type = INSN_JUMP_DYNAMIC;
+ } else if (inst.reg2i16_format.rd == 0 &&
+ inst.reg2i16_format.immediate != 0) {
+ /*
+ * jirl r0,t0,12
+ * this is a rare case in JUMP_VIRT_ADDR,
+ * just ignore it due to it is harmless for tracing.
+ */
+ break;
+ } else {
+ /* jirl rd,rj,offs16 */
+ insn->type = INSN_JUMP_UNCONDITIONAL;
+ insn->immediate = sign_extend64(inst.reg2i16_format.immediate, 15);
+ }
+ break;
+ case beq_op:
+ case bne_op:
+ case blt_op:
+ case bge_op:
+ case bltu_op:
+ case bgeu_op:
+ insn->type = INSN_JUMP_CONDITIONAL;
+ insn->immediate = sign_extend64(inst.reg2i16_format.immediate, 15);
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+int arch_decode_instruction(struct objtool_file *file, const struct section *sec,
+ unsigned long offset, unsigned int maxlen,
+ struct instruction *insn)
+{
+ struct stack_op **ops_list = &insn->stack_ops;
+ const struct elf *elf = file->elf;
+ struct stack_op *op = NULL;
+ union loongarch_instruction inst;
+
+ if (!is_loongarch(elf))
+ return -1;
+
+ if (maxlen < LOONGARCH_INSN_SIZE)
+ return 0;
+
+ insn->len = LOONGARCH_INSN_SIZE;
+ insn->type = INSN_OTHER;
+ insn->immediate = 0;
+
+ inst = *(union loongarch_instruction *)(sec->data->d_buf + offset);
+
+ if (decode_insn_reg0i26_fomat(inst, insn))
+ return 0;
+ if (decode_insn_reg1i21_fomat(inst, insn))
+ return 0;
+ if (decode_insn_reg2i12_fomat(inst, insn, ops_list, op))
+ return 0;
+ if (decode_insn_reg2i14_fomat(inst, insn, ops_list, op))
+ return 0;
+ if (decode_insn_reg2i16_fomat(inst, insn))
+ return 0;
+
+ if (inst.word == 0)
+ insn->type = INSN_NOP;
+ else if (inst.reg0i15_format.opcode == break_op) {
+ /* break */
+ insn->type = INSN_BUG;
+ } else if (inst.reg2_format.opcode == ertn_op) {
+ /* ertn */
+ insn->type = INSN_RETURN;
+ }
+
+ return 0;
+}
+
+const char *arch_nop_insn(int len)
+{
+ static u32 nop;
+
+ if (len != LOONGARCH_INSN_SIZE)
+ WARN("invalid NOP size: %d\n", len);
+
+ nop = LOONGARCH_INSN_NOP;
+
+ return (const char *)&nop;
+}
+
+const char *arch_ret_insn(int len)
+{
+ static u32 ret;
+
+ if (len != LOONGARCH_INSN_SIZE)
+ WARN("invalid RET size: %d\n", len);
+
+ emit_jirl((union loongarch_instruction *)&ret, LOONGARCH_GPR_RA, LOONGARCH_GPR_ZERO, 0);
+
+ return (const char *)&ret;
+}
+
+void arch_initial_func_cfi_state(struct cfi_init_state *state)
+{
+ int i;
+
+ for (i = 0; i < CFI_NUM_REGS; i++) {
+ state->regs[i].base = CFI_UNDEFINED;
+ state->regs[i].offset = 0;
+ }
+
+ /* initial CFA (call frame address) */
+ state->cfa.base = CFI_SP;
+ state->cfa.offset = 0;
+}
diff --git a/tools/objtool/arch/loongarch/include/arch/cfi_regs.h b/tools/objtool/arch/loongarch/include/arch/cfi_regs.h
new file mode 100644
index 00000000000000..d183cc8f43bf3a
--- /dev/null
+++ b/tools/objtool/arch/loongarch/include/arch/cfi_regs.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _OBJTOOL_ARCH_CFI_REGS_H
+#define _OBJTOOL_ARCH_CFI_REGS_H
+
+#define CFI_RA 1
+#define CFI_SP 3
+#define CFI_A0 4
+#define CFI_FP 22
+#define CFI_S0 23
+#define CFI_S1 24
+#define CFI_S2 25
+#define CFI_S3 26
+#define CFI_S4 27
+#define CFI_S5 28
+#define CFI_S6 29
+#define CFI_S7 30
+#define CFI_S8 31
+#define CFI_NUM_REGS 32
+
+#define CFI_BP CFI_FP
+
+#endif /* _OBJTOOL_ARCH_CFI_REGS_H */
diff --git a/tools/objtool/arch/loongarch/include/arch/elf.h b/tools/objtool/arch/loongarch/include/arch/elf.h
new file mode 100644
index 00000000000000..9623d663220eff
--- /dev/null
+++ b/tools/objtool/arch/loongarch/include/arch/elf.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _OBJTOOL_ARCH_ELF_H
+#define _OBJTOOL_ARCH_ELF_H
+
+/*
+ * See the following link for more info about ELF Relocation types:
+ * https://loongson.github.io/LoongArch-Documentation/LoongArch-ELF-ABI-EN.html#_relocations
+ */
+#ifndef R_LARCH_NONE
+#define R_LARCH_NONE 0
+#endif
+#ifndef R_LARCH_32
+#define R_LARCH_32 1
+#endif
+#ifndef R_LARCH_64
+#define R_LARCH_64 2
+#endif
+#ifndef R_LARCH_32_PCREL
+#define R_LARCH_32_PCREL 99
+#endif
+
+#define R_NONE R_LARCH_NONE
+#define R_ABS32 R_LARCH_32
+#define R_ABS64 R_LARCH_64
+#define R_DATA32 R_LARCH_32_PCREL
+#define R_DATA64 R_LARCH_32_PCREL
+#define R_TEXT32 R_LARCH_32_PCREL
+#define R_TEXT64 R_LARCH_32_PCREL
+
+#endif /* _OBJTOOL_ARCH_ELF_H */
diff --git a/tools/objtool/arch/loongarch/include/arch/special.h b/tools/objtool/arch/loongarch/include/arch/special.h
new file mode 100644
index 00000000000000..35fc979b550ab5
--- /dev/null
+++ b/tools/objtool/arch/loongarch/include/arch/special.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _OBJTOOL_ARCH_SPECIAL_H
+#define _OBJTOOL_ARCH_SPECIAL_H
+
+/*
+ * See more info about struct exception_table_entry
+ * in arch/loongarch/include/asm/extable.h
+ */
+#define EX_ENTRY_SIZE 12
+#define EX_ORIG_OFFSET 0
+#define EX_NEW_OFFSET 4
+
+/*
+ * See more info about struct jump_entry
+ * in include/linux/jump_label.h
+ */
+#define JUMP_ENTRY_SIZE 16
+#define JUMP_ORIG_OFFSET 0
+#define JUMP_NEW_OFFSET 4
+#define JUMP_KEY_OFFSET 8
+
+/*
+ * See more info about struct alt_instr
+ * in arch/loongarch/include/asm/alternative.h
+ */
+#define ALT_ENTRY_SIZE 12
+#define ALT_ORIG_OFFSET 0
+#define ALT_NEW_OFFSET 4
+#define ALT_FEATURE_OFFSET 8
+#define ALT_ORIG_LEN_OFFSET 10
+#define ALT_NEW_LEN_OFFSET 11
+
+#endif /* _OBJTOOL_ARCH_SPECIAL_H */
diff --git a/tools/objtool/arch/loongarch/orc.c b/tools/objtool/arch/loongarch/orc.c
new file mode 100644
index 00000000000000..873536d009d91d
--- /dev/null
+++ b/tools/objtool/arch/loongarch/orc.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/objtool_types.h>
+#include <asm/orc_types.h>
+
+#include <objtool/check.h>
+#include <objtool/orc.h>
+#include <objtool/warn.h>
+#include <objtool/endianness.h>
+
+int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruction *insn)
+{
+ struct cfi_reg *fp = &cfi->regs[CFI_FP];
+ struct cfi_reg *ra = &cfi->regs[CFI_RA];
+
+ memset(orc, 0, sizeof(*orc));
+
+ if (!cfi) {
+ /*
+ * This is usually either unreachable nops/traps (which don't
+ * trigger unreachable instruction warnings), or
+ * STACK_FRAME_NON_STANDARD functions.
+ */
+ orc->type = ORC_TYPE_UNDEFINED;
+ return 0;
+ }
+
+ switch (cfi->type) {
+ case UNWIND_HINT_TYPE_UNDEFINED:
+ orc->type = ORC_TYPE_UNDEFINED;
+ return 0;
+ case UNWIND_HINT_TYPE_END_OF_STACK:
+ orc->type = ORC_TYPE_END_OF_STACK;
+ return 0;
+ case UNWIND_HINT_TYPE_CALL:
+ orc->type = ORC_TYPE_CALL;
+ break;
+ case UNWIND_HINT_TYPE_REGS:
+ orc->type = ORC_TYPE_REGS;
+ break;
+ case UNWIND_HINT_TYPE_REGS_PARTIAL:
+ orc->type = ORC_TYPE_REGS_PARTIAL;
+ break;
+ default:
+ WARN_INSN(insn, "unknown unwind hint type %d", cfi->type);
+ return -1;
+ }
+
+ orc->signal = cfi->signal;
+
+ switch (cfi->cfa.base) {
+ case CFI_SP:
+ orc->sp_reg = ORC_REG_SP;
+ break;
+ case CFI_FP:
+ orc->sp_reg = ORC_REG_FP;
+ break;
+ default:
+ WARN_INSN(insn, "unknown CFA base reg %d", cfi->cfa.base);
+ return -1;
+ }
+
+ switch (fp->base) {
+ case CFI_UNDEFINED:
+ orc->fp_reg = ORC_REG_UNDEFINED;
+ orc->fp_offset = 0;
+ break;
+ case CFI_CFA:
+ orc->fp_reg = ORC_REG_PREV_SP;
+ orc->fp_offset = fp->offset;
+ break;
+ case CFI_FP:
+ orc->fp_reg = ORC_REG_FP;
+ break;
+ default:
+ WARN_INSN(insn, "unknown FP base reg %d", fp->base);
+ return -1;
+ }
+
+ switch (ra->base) {
+ case CFI_UNDEFINED:
+ orc->ra_reg = ORC_REG_UNDEFINED;
+ orc->ra_offset = 0;
+ break;
+ case CFI_CFA:
+ orc->ra_reg = ORC_REG_PREV_SP;
+ orc->ra_offset = ra->offset;
+ break;
+ case CFI_FP:
+ orc->ra_reg = ORC_REG_FP;
+ break;
+ default:
+ WARN_INSN(insn, "unknown RA base reg %d", ra->base);
+ return -1;
+ }
+
+ orc->sp_offset = cfi->cfa.offset;
+
+ return 0;
+}
+
+int write_orc_entry(struct elf *elf, struct section *orc_sec,
+ struct section *ip_sec, unsigned int idx,
+ struct section *insn_sec, unsigned long insn_off,
+ struct orc_entry *o)
+{
+ struct orc_entry *orc;
+
+ /* populate ORC data */
+ orc = (struct orc_entry *)orc_sec->data->d_buf + idx;
+ memcpy(orc, o, sizeof(*orc));
+
+ /* populate reloc for ip */
+ if (!elf_init_reloc_text_sym(elf, ip_sec, idx * sizeof(int), idx,
+ insn_sec, insn_off))
+ return -1;
+
+ return 0;
+}
+
+static const char *reg_name(unsigned int reg)
+{
+ switch (reg) {
+ case ORC_REG_SP:
+ return "sp";
+ case ORC_REG_FP:
+ return "fp";
+ case ORC_REG_PREV_SP:
+ return "prevsp";
+ default:
+ return "?";
+ }
+}
+
+static const char *orc_type_name(unsigned int type)
+{
+ switch (type) {
+ case UNWIND_HINT_TYPE_CALL:
+ return "call";
+ case UNWIND_HINT_TYPE_REGS:
+ return "regs";
+ case UNWIND_HINT_TYPE_REGS_PARTIAL:
+ return "regs (partial)";
+ default:
+ return "?";
+ }
+}
+
+static void print_reg(unsigned int reg, int offset)
+{
+ if (reg == ORC_REG_UNDEFINED)
+ printf(" (und) ");
+ else
+ printf("%s + %3d", reg_name(reg), offset);
+
+}
+
+void orc_print_dump(struct elf *dummy_elf, struct orc_entry *orc, int i)
+{
+ printf("type:%s", orc_type_name(orc[i].type));
+
+ printf(" sp:");
+ print_reg(orc[i].sp_reg, orc[i].sp_offset);
+
+ printf(" fp:");
+ print_reg(orc[i].fp_reg, orc[i].fp_offset);
+
+ printf(" ra:");
+ print_reg(orc[i].ra_reg, orc[i].ra_offset);
+
+ printf(" signal:%d\n", orc[i].signal);
+}
diff --git a/tools/objtool/arch/loongarch/special.c b/tools/objtool/arch/loongarch/special.c
new file mode 100644
index 00000000000000..9bba1e9318e0ba
--- /dev/null
+++ b/tools/objtool/arch/loongarch/special.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <objtool/special.h>
+
+bool arch_support_alt_relocation(struct special_alt *special_alt,
+ struct instruction *insn,
+ struct reloc *reloc)
+{
+ return false;
+}
+
+struct reloc *arch_find_switch_table(struct objtool_file *file,
+ struct instruction *insn)
+{
+ return NULL;
+}
diff --git a/tools/objtool/arch/x86/Build b/tools/objtool/arch/x86/Build
index 9f7869b5c5e0ca..3dedb2fd8f3a0c 100644
--- a/tools/objtool/arch/x86/Build
+++ b/tools/objtool/arch/x86/Build
@@ -1,5 +1,6 @@
objtool-y += special.o
objtool-y += decode.o
+objtool-y += orc.o
inat_tables_script = ../arch/x86/tools/gen-insn-attr-x86.awk
inat_tables_maps = ../arch/x86/lib/x86-opcode-map.txt
diff --git a/tools/objtool/arch/x86/orc.c b/tools/objtool/arch/x86/orc.c
new file mode 100644
index 00000000000000..b6cd943e87f936
--- /dev/null
+++ b/tools/objtool/arch/x86/orc.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/objtool_types.h>
+#include <asm/orc_types.h>
+
+#include <objtool/check.h>
+#include <objtool/orc.h>
+#include <objtool/warn.h>
+#include <objtool/endianness.h>
+
+int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruction *insn)
+{
+ struct cfi_reg *bp = &cfi->regs[CFI_BP];
+
+ memset(orc, 0, sizeof(*orc));
+
+ if (!cfi) {
+ /*
+ * This is usually either unreachable nops/traps (which don't
+ * trigger unreachable instruction warnings), or
+ * STACK_FRAME_NON_STANDARD functions.
+ */
+ orc->type = ORC_TYPE_UNDEFINED;
+ return 0;
+ }
+
+ switch (cfi->type) {
+ case UNWIND_HINT_TYPE_UNDEFINED:
+ orc->type = ORC_TYPE_UNDEFINED;
+ return 0;
+ case UNWIND_HINT_TYPE_END_OF_STACK:
+ orc->type = ORC_TYPE_END_OF_STACK;
+ return 0;
+ case UNWIND_HINT_TYPE_CALL:
+ orc->type = ORC_TYPE_CALL;
+ break;
+ case UNWIND_HINT_TYPE_REGS:
+ orc->type = ORC_TYPE_REGS;
+ break;
+ case UNWIND_HINT_TYPE_REGS_PARTIAL:
+ orc->type = ORC_TYPE_REGS_PARTIAL;
+ break;
+ default:
+ WARN_INSN(insn, "unknown unwind hint type %d", cfi->type);
+ return -1;
+ }
+
+ orc->signal = cfi->signal;
+
+ switch (cfi->cfa.base) {
+ case CFI_SP:
+ orc->sp_reg = ORC_REG_SP;
+ break;
+ case CFI_SP_INDIRECT:
+ orc->sp_reg = ORC_REG_SP_INDIRECT;
+ break;
+ case CFI_BP:
+ orc->sp_reg = ORC_REG_BP;
+ break;
+ case CFI_BP_INDIRECT:
+ orc->sp_reg = ORC_REG_BP_INDIRECT;
+ break;
+ case CFI_R10:
+ orc->sp_reg = ORC_REG_R10;
+ break;
+ case CFI_R13:
+ orc->sp_reg = ORC_REG_R13;
+ break;
+ case CFI_DI:
+ orc->sp_reg = ORC_REG_DI;
+ break;
+ case CFI_DX:
+ orc->sp_reg = ORC_REG_DX;
+ break;
+ default:
+ WARN_INSN(insn, "unknown CFA base reg %d", cfi->cfa.base);
+ return -1;
+ }
+
+ switch (bp->base) {
+ case CFI_UNDEFINED:
+ orc->bp_reg = ORC_REG_UNDEFINED;
+ break;
+ case CFI_CFA:
+ orc->bp_reg = ORC_REG_PREV_SP;
+ break;
+ case CFI_BP:
+ orc->bp_reg = ORC_REG_BP;
+ break;
+ default:
+ WARN_INSN(insn, "unknown BP base reg %d", bp->base);
+ return -1;
+ }
+
+ orc->sp_offset = cfi->cfa.offset;
+ orc->bp_offset = bp->offset;
+
+ return 0;
+}
+
+int write_orc_entry(struct elf *elf, struct section *orc_sec,
+ struct section *ip_sec, unsigned int idx,
+ struct section *insn_sec, unsigned long insn_off,
+ struct orc_entry *o)
+{
+ struct orc_entry *orc;
+
+ /* populate ORC data */
+ orc = (struct orc_entry *)orc_sec->data->d_buf + idx;
+ memcpy(orc, o, sizeof(*orc));
+ orc->sp_offset = bswap_if_needed(elf, orc->sp_offset);
+ orc->bp_offset = bswap_if_needed(elf, orc->bp_offset);
+
+ /* populate reloc for ip */
+ if (!elf_init_reloc_text_sym(elf, ip_sec, idx * sizeof(int), idx,
+ insn_sec, insn_off))
+ return -1;
+
+ return 0;
+}
+
+static const char *reg_name(unsigned int reg)
+{
+ switch (reg) {
+ case ORC_REG_PREV_SP:
+ return "prevsp";
+ case ORC_REG_DX:
+ return "dx";
+ case ORC_REG_DI:
+ return "di";
+ case ORC_REG_BP:
+ return "bp";
+ case ORC_REG_SP:
+ return "sp";
+ case ORC_REG_R10:
+ return "r10";
+ case ORC_REG_R13:
+ return "r13";
+ case ORC_REG_BP_INDIRECT:
+ return "bp(ind)";
+ case ORC_REG_SP_INDIRECT:
+ return "sp(ind)";
+ default:
+ return "?";
+ }
+}
+
+static const char *orc_type_name(unsigned int type)
+{
+ switch (type) {
+ case ORC_TYPE_UNDEFINED:
+ return "(und)";
+ case ORC_TYPE_END_OF_STACK:
+ return "end";
+ case ORC_TYPE_CALL:
+ return "call";
+ case ORC_TYPE_REGS:
+ return "regs";
+ case ORC_TYPE_REGS_PARTIAL:
+ return "regs (partial)";
+ default:
+ return "?";
+ }
+}
+
+static void print_reg(unsigned int reg, int offset)
+{
+ if (reg == ORC_REG_BP_INDIRECT)
+ printf("(bp%+d)", offset);
+ else if (reg == ORC_REG_SP_INDIRECT)
+ printf("(sp)%+d", offset);
+ else if (reg == ORC_REG_UNDEFINED)
+ printf("(und)");
+ else
+ printf("%s%+d", reg_name(reg), offset);
+}
+
+void orc_print_dump(struct elf *dummy_elf, struct orc_entry *orc, int i)
+{
+ printf("type:%s", orc_type_name(orc[i].type));
+
+ printf(" sp:");
+ print_reg(orc[i].sp_reg, bswap_if_needed(dummy_elf, orc[i].sp_offset));
+
+ printf(" bp:");
+ print_reg(orc[i].bp_reg, bswap_if_needed(dummy_elf, orc[i].bp_offset));
+
+ printf(" signal:%d\n", orc[i].signal);
+}
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index eb7e12ebc1d06a..0b10ad00866867 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -20,6 +20,7 @@
#include <linux/hashtable.h>
#include <linux/kernel.h>
#include <linux/static_call_types.h>
+#include <linux/string.h>
struct alternative {
struct alternative *next;
@@ -584,7 +585,7 @@ static int add_dead_ends(struct objtool_file *file)
struct section *rsec;
struct reloc *reloc;
struct instruction *insn;
- s64 addend;
+ unsigned long offset;
/*
* Check for manually annotated dead ends.
@@ -594,27 +595,28 @@ static int add_dead_ends(struct objtool_file *file)
goto reachable;
for_each_reloc(rsec, reloc) {
-
- if (reloc->sym->type != STT_SECTION) {
+ if (reloc->sym->type == STT_SECTION) {
+ offset = reloc_addend(reloc);
+ } else if (reloc->sym->local_label) {
+ offset = reloc->sym->offset;
+ } else {
WARN("unexpected relocation symbol type in %s", rsec->name);
return -1;
}
- addend = reloc_addend(reloc);
-
- insn = find_insn(file, reloc->sym->sec, addend);
+ insn = find_insn(file, reloc->sym->sec, offset);
if (insn)
insn = prev_insn_same_sec(file, insn);
- else if (addend == reloc->sym->sec->sh.sh_size) {
+ else if (offset == reloc->sym->sec->sh.sh_size) {
insn = find_last_insn(file, reloc->sym->sec);
if (!insn) {
WARN("can't find unreachable insn at %s+0x%" PRIx64,
- reloc->sym->sec->name, addend);
+ reloc->sym->sec->name, offset);
return -1;
}
} else {
WARN("can't find unreachable insn at %s+0x%" PRIx64,
- reloc->sym->sec->name, addend);
+ reloc->sym->sec->name, offset);
return -1;
}
@@ -633,27 +635,28 @@ reachable:
return 0;
for_each_reloc(rsec, reloc) {
-
- if (reloc->sym->type != STT_SECTION) {
+ if (reloc->sym->type == STT_SECTION) {
+ offset = reloc_addend(reloc);
+ } else if (reloc->sym->local_label) {
+ offset = reloc->sym->offset;
+ } else {
WARN("unexpected relocation symbol type in %s", rsec->name);
return -1;
}
- addend = reloc_addend(reloc);
-
- insn = find_insn(file, reloc->sym->sec, addend);
+ insn = find_insn(file, reloc->sym->sec, offset);
if (insn)
insn = prev_insn_same_sec(file, insn);
- else if (addend == reloc->sym->sec->sh.sh_size) {
+ else if (offset == reloc->sym->sec->sh.sh_size) {
insn = find_last_insn(file, reloc->sym->sec);
if (!insn) {
WARN("can't find reachable insn at %s+0x%" PRIx64,
- reloc->sym->sec->name, addend);
+ reloc->sym->sec->name, offset);
return -1;
}
} else {
WARN("can't find reachable insn at %s+0x%" PRIx64,
- reloc->sym->sec->name, addend);
+ reloc->sym->sec->name, offset);
return -1;
}
@@ -2224,6 +2227,7 @@ static int read_unwind_hints(struct objtool_file *file)
struct unwind_hint *hint;
struct instruction *insn;
struct reloc *reloc;
+ unsigned long offset;
int i;
sec = find_section_by_name(file->elf, ".discard.unwind_hints");
@@ -2251,7 +2255,16 @@ static int read_unwind_hints(struct objtool_file *file)
return -1;
}
- insn = find_insn(file, reloc->sym->sec, reloc_addend(reloc));
+ if (reloc->sym->type == STT_SECTION) {
+ offset = reloc_addend(reloc);
+ } else if (reloc->sym->local_label) {
+ offset = reloc->sym->offset;
+ } else {
+ WARN("unexpected relocation symbol type in %s", sec->rsec->name);
+ return -1;
+ }
+
+ insn = find_insn(file, reloc->sym->sec, offset);
if (!insn) {
WARN("can't find insn for unwind_hints[%d]", i);
return -1;
@@ -2522,6 +2535,9 @@ static int classify_symbols(struct objtool_file *file)
struct symbol *func;
for_each_sym(file, func) {
+ if (func->type == STT_NOTYPE && strstarts(func->name, ".L"))
+ func->local_label = true;
+
if (func->bind != STB_GLOBAL)
continue;
diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h
index 9f71e988eca45f..2b8a69de4db871 100644
--- a/tools/objtool/include/objtool/elf.h
+++ b/tools/objtool/include/objtool/elf.h
@@ -67,6 +67,7 @@ struct symbol {
u8 profiling_func : 1;
u8 warned : 1;
u8 embedded_insn : 1;
+ u8 local_label : 1;
struct list_head pv_target;
struct reloc *relocs;
};
diff --git a/tools/objtool/include/objtool/orc.h b/tools/objtool/include/objtool/orc.h
new file mode 100644
index 00000000000000..15a32def1071d0
--- /dev/null
+++ b/tools/objtool/include/objtool/orc.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _OBJTOOL_ORC_H
+#define _OBJTOOL_ORC_H
+
+#include <objtool/check.h>
+
+int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruction *insn);
+void orc_print_dump(struct elf *dummy_elf, struct orc_entry *orc, int i);
+int write_orc_entry(struct elf *elf, struct section *orc_sec,
+ struct section *ip_sec, unsigned int idx,
+ struct section *insn_sec, unsigned long insn_off,
+ struct orc_entry *o);
+
+#endif /* _OBJTOOL_ORC_H */
diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c
index 0e183bb1c72051..a62247efb64f2e 100644
--- a/tools/objtool/orc_dump.c
+++ b/tools/objtool/orc_dump.c
@@ -6,65 +6,10 @@
#include <unistd.h>
#include <asm/orc_types.h>
#include <objtool/objtool.h>
+#include <objtool/orc.h>
#include <objtool/warn.h>
#include <objtool/endianness.h>
-static const char *reg_name(unsigned int reg)
-{
- switch (reg) {
- case ORC_REG_PREV_SP:
- return "prevsp";
- case ORC_REG_DX:
- return "dx";
- case ORC_REG_DI:
- return "di";
- case ORC_REG_BP:
- return "bp";
- case ORC_REG_SP:
- return "sp";
- case ORC_REG_R10:
- return "r10";
- case ORC_REG_R13:
- return "r13";
- case ORC_REG_BP_INDIRECT:
- return "bp(ind)";
- case ORC_REG_SP_INDIRECT:
- return "sp(ind)";
- default:
- return "?";
- }
-}
-
-static const char *orc_type_name(unsigned int type)
-{
- switch (type) {
- case ORC_TYPE_UNDEFINED:
- return "(und)";
- case ORC_TYPE_END_OF_STACK:
- return "end";
- case ORC_TYPE_CALL:
- return "call";
- case ORC_TYPE_REGS:
- return "regs";
- case ORC_TYPE_REGS_PARTIAL:
- return "regs (partial)";
- default:
- return "?";
- }
-}
-
-static void print_reg(unsigned int reg, int offset)
-{
- if (reg == ORC_REG_BP_INDIRECT)
- printf("(bp%+d)", offset);
- else if (reg == ORC_REG_SP_INDIRECT)
- printf("(sp)%+d", offset);
- else if (reg == ORC_REG_UNDEFINED)
- printf("(und)");
- else
- printf("%s%+d", reg_name(reg), offset);
-}
-
int orc_dump(const char *_objname)
{
int fd, nr_entries, i, *orc_ip = NULL, orc_size = 0;
@@ -205,17 +150,7 @@ int orc_dump(const char *_objname)
printf("%llx:", (unsigned long long)(orc_ip_addr + (i * sizeof(int)) + orc_ip[i]));
}
- printf("type:%s", orc_type_name(orc[i].type));
-
- printf(" sp:");
-
- print_reg(orc[i].sp_reg, bswap_if_needed(&dummy_elf, orc[i].sp_offset));
-
- printf(" bp:");
-
- print_reg(orc[i].bp_reg, bswap_if_needed(&dummy_elf, orc[i].bp_offset));
-
- printf(" signal:%d\n", orc[i].signal);
+ orc_print_dump(&dummy_elf, orc, i);
}
elf_end(elf);
diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c
index bae34390886710..922e6aac7cea7c 100644
--- a/tools/objtool/orc_gen.c
+++ b/tools/objtool/orc_gen.c
@@ -10,121 +10,10 @@
#include <asm/orc_types.h>
#include <objtool/check.h>
+#include <objtool/orc.h>
#include <objtool/warn.h>
#include <objtool/endianness.h>
-static int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi,
- struct instruction *insn)
-{
- struct cfi_reg *bp = &cfi->regs[CFI_BP];
-
- memset(orc, 0, sizeof(*orc));
-
- if (!cfi) {
- /*
- * This is usually either unreachable nops/traps (which don't
- * trigger unreachable instruction warnings), or
- * STACK_FRAME_NON_STANDARD functions.
- */
- orc->type = ORC_TYPE_UNDEFINED;
- return 0;
- }
-
- switch (cfi->type) {
- case UNWIND_HINT_TYPE_UNDEFINED:
- orc->type = ORC_TYPE_UNDEFINED;
- return 0;
- case UNWIND_HINT_TYPE_END_OF_STACK:
- orc->type = ORC_TYPE_END_OF_STACK;
- return 0;
- case UNWIND_HINT_TYPE_CALL:
- orc->type = ORC_TYPE_CALL;
- break;
- case UNWIND_HINT_TYPE_REGS:
- orc->type = ORC_TYPE_REGS;
- break;
- case UNWIND_HINT_TYPE_REGS_PARTIAL:
- orc->type = ORC_TYPE_REGS_PARTIAL;
- break;
- default:
- WARN_INSN(insn, "unknown unwind hint type %d", cfi->type);
- return -1;
- }
-
- orc->signal = cfi->signal;
-
- switch (cfi->cfa.base) {
- case CFI_SP:
- orc->sp_reg = ORC_REG_SP;
- break;
- case CFI_SP_INDIRECT:
- orc->sp_reg = ORC_REG_SP_INDIRECT;
- break;
- case CFI_BP:
- orc->sp_reg = ORC_REG_BP;
- break;
- case CFI_BP_INDIRECT:
- orc->sp_reg = ORC_REG_BP_INDIRECT;
- break;
- case CFI_R10:
- orc->sp_reg = ORC_REG_R10;
- break;
- case CFI_R13:
- orc->sp_reg = ORC_REG_R13;
- break;
- case CFI_DI:
- orc->sp_reg = ORC_REG_DI;
- break;
- case CFI_DX:
- orc->sp_reg = ORC_REG_DX;
- break;
- default:
- WARN_INSN(insn, "unknown CFA base reg %d", cfi->cfa.base);
- return -1;
- }
-
- switch (bp->base) {
- case CFI_UNDEFINED:
- orc->bp_reg = ORC_REG_UNDEFINED;
- break;
- case CFI_CFA:
- orc->bp_reg = ORC_REG_PREV_SP;
- break;
- case CFI_BP:
- orc->bp_reg = ORC_REG_BP;
- break;
- default:
- WARN_INSN(insn, "unknown BP base reg %d", bp->base);
- return -1;
- }
-
- orc->sp_offset = cfi->cfa.offset;
- orc->bp_offset = bp->offset;
-
- return 0;
-}
-
-static int write_orc_entry(struct elf *elf, struct section *orc_sec,
- struct section *ip_sec, unsigned int idx,
- struct section *insn_sec, unsigned long insn_off,
- struct orc_entry *o)
-{
- struct orc_entry *orc;
-
- /* populate ORC data */
- orc = (struct orc_entry *)orc_sec->data->d_buf + idx;
- memcpy(orc, o, sizeof(*orc));
- orc->sp_offset = bswap_if_needed(elf, orc->sp_offset);
- orc->bp_offset = bswap_if_needed(elf, orc->bp_offset);
-
- /* populate reloc for ip */
- if (!elf_init_reloc_text_sym(elf, ip_sec, idx * sizeof(int), idx,
- insn_sec, insn_off))
- return -1;
-
- return 0;
-}
-
struct orc_list_entry {
struct list_head list;
struct orc_entry orc;
diff --git a/tools/perf/pmu-events/arch/riscv/andes/ax45/firmware.json b/tools/perf/pmu-events/arch/riscv/andes/ax45/firmware.json
new file mode 100644
index 00000000000000..9b4a032186a7b1
--- /dev/null
+++ b/tools/perf/pmu-events/arch/riscv/andes/ax45/firmware.json
@@ -0,0 +1,68 @@
+[
+ {
+ "ArchStdEvent": "FW_MISALIGNED_LOAD"
+ },
+ {
+ "ArchStdEvent": "FW_MISALIGNED_STORE"
+ },
+ {
+ "ArchStdEvent": "FW_ACCESS_LOAD"
+ },
+ {
+ "ArchStdEvent": "FW_ACCESS_STORE"
+ },
+ {
+ "ArchStdEvent": "FW_ILLEGAL_INSN"
+ },
+ {
+ "ArchStdEvent": "FW_SET_TIMER"
+ },
+ {
+ "ArchStdEvent": "FW_IPI_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_IPI_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_FENCE_I_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_FENCE_I_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_SFENCE_VMA_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_GVMA_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_GVMA_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_GVMA_VMID_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_GVMA_VMID_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_VVMA_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_VVMA_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_VVMA_ASID_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_VVMA_ASID_RECEIVED"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/riscv/andes/ax45/instructions.json b/tools/perf/pmu-events/arch/riscv/andes/ax45/instructions.json
new file mode 100644
index 00000000000000..713a08c1a40f8a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/riscv/andes/ax45/instructions.json
@@ -0,0 +1,127 @@
+[
+ {
+ "EventCode": "0x10",
+ "EventName": "cycle_count",
+ "BriefDescription": "Cycle count"
+ },
+ {
+ "EventCode": "0x20",
+ "EventName": "inst_count",
+ "BriefDescription": "Retired instruction count"
+ },
+ {
+ "EventCode": "0x30",
+ "EventName": "int_load_inst",
+ "BriefDescription": "Integer load instruction count"
+ },
+ {
+ "EventCode": "0x40",
+ "EventName": "int_store_inst",
+ "BriefDescription": "Integer store instruction count"
+ },
+ {
+ "EventCode": "0x50",
+ "EventName": "atomic_inst",
+ "BriefDescription": "Atomic instruction count"
+ },
+ {
+ "EventCode": "0x60",
+ "EventName": "sys_inst",
+ "BriefDescription": "System instruction count"
+ },
+ {
+ "EventCode": "0x70",
+ "EventName": "int_compute_inst",
+ "BriefDescription": "Integer computational instruction count"
+ },
+ {
+ "EventCode": "0x80",
+ "EventName": "condition_br",
+ "BriefDescription": "Conditional branch instruction count"
+ },
+ {
+ "EventCode": "0x90",
+ "EventName": "taken_condition_br",
+ "BriefDescription": "Taken conditional branch instruction count"
+ },
+ {
+ "EventCode": "0xA0",
+ "EventName": "jal_inst",
+ "BriefDescription": "JAL instruction count"
+ },
+ {
+ "EventCode": "0xB0",
+ "EventName": "jalr_inst",
+ "BriefDescription": "JALR instruction count"
+ },
+ {
+ "EventCode": "0xC0",
+ "EventName": "ret_inst",
+ "BriefDescription": "Return instruction count"
+ },
+ {
+ "EventCode": "0xD0",
+ "EventName": "control_trans_inst",
+ "BriefDescription": "Control transfer instruction count"
+ },
+ {
+ "EventCode": "0xE0",
+ "EventName": "ex9_inst",
+ "BriefDescription": "EXEC.IT instruction count"
+ },
+ {
+ "EventCode": "0xF0",
+ "EventName": "int_mul_inst",
+ "BriefDescription": "Integer multiplication instruction count"
+ },
+ {
+ "EventCode": "0x100",
+ "EventName": "int_div_rem_inst",
+ "BriefDescription": "Integer division/remainder instruction count"
+ },
+ {
+ "EventCode": "0x110",
+ "EventName": "float_load_inst",
+ "BriefDescription": "Floating-point load instruction count"
+ },
+ {
+ "EventCode": "0x120",
+ "EventName": "float_store_inst",
+ "BriefDescription": "Floating-point store instruction count"
+ },
+ {
+ "EventCode": "0x130",
+ "EventName": "float_add_sub_inst",
+ "BriefDescription": "Floating-point addition/subtraction instruction count"
+ },
+ {
+ "EventCode": "0x140",
+ "EventName": "float_mul_inst",
+ "BriefDescription": "Floating-point multiplication instruction count"
+ },
+ {
+ "EventCode": "0x150",
+ "EventName": "float_fused_muladd_inst",
+ "BriefDescription": "Floating-point fused multiply-add instruction count"
+ },
+ {
+ "EventCode": "0x160",
+ "EventName": "float_div_sqrt_inst",
+ "BriefDescription": "Floating-point division or square-root instruction count"
+ },
+ {
+ "EventCode": "0x170",
+ "EventName": "other_float_inst",
+ "BriefDescription": "Other floating-point instruction count"
+ },
+ {
+ "EventCode": "0x180",
+ "EventName": "int_mul_add_sub_inst",
+ "BriefDescription": "Integer multiplication and add/sub instruction count"
+ },
+ {
+ "EventCode": "0x190",
+ "EventName": "retired_ops",
+ "BriefDescription": "Retired operation count"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/riscv/andes/ax45/memory.json b/tools/perf/pmu-events/arch/riscv/andes/ax45/memory.json
new file mode 100644
index 00000000000000..c7401b526c77c3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/riscv/andes/ax45/memory.json
@@ -0,0 +1,57 @@
+[
+ {
+ "EventCode": "0x01",
+ "EventName": "ilm_access",
+ "BriefDescription": "ILM access"
+ },
+ {
+ "EventCode": "0x11",
+ "EventName": "dlm_access",
+ "BriefDescription": "DLM access"
+ },
+ {
+ "EventCode": "0x21",
+ "EventName": "icache_access",
+ "BriefDescription": "ICACHE access"
+ },
+ {
+ "EventCode": "0x31",
+ "EventName": "icache_miss",
+ "BriefDescription": "ICACHE miss"
+ },
+ {
+ "EventCode": "0x41",
+ "EventName": "dcache_access",
+ "BriefDescription": "DCACHE access"
+ },
+ {
+ "EventCode": "0x51",
+ "EventName": "dcache_miss",
+ "BriefDescription": "DCACHE miss"
+ },
+ {
+ "EventCode": "0x61",
+ "EventName": "dcache_load_access",
+ "BriefDescription": "DCACHE load access"
+ },
+ {
+ "EventCode": "0x71",
+ "EventName": "dcache_load_miss",
+ "BriefDescription": "DCACHE load miss"
+ },
+ {
+ "EventCode": "0x81",
+ "EventName": "dcache_store_access",
+ "BriefDescription": "DCACHE store access"
+ },
+ {
+ "EventCode": "0x91",
+ "EventName": "dcache_store_miss",
+ "BriefDescription": "DCACHE store miss"
+ },
+ {
+ "EventCode": "0xA1",
+ "EventName": "dcache_wb",
+ "BriefDescription": "DCACHE writeback"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/riscv/andes/ax45/microarch.json b/tools/perf/pmu-events/arch/riscv/andes/ax45/microarch.json
new file mode 100644
index 00000000000000..a6d378cbaa74dd
--- /dev/null
+++ b/tools/perf/pmu-events/arch/riscv/andes/ax45/microarch.json
@@ -0,0 +1,77 @@
+[
+ {
+ "EventCode": "0xB1",
+ "EventName": "cycle_wait_icache_fill",
+ "BriefDescription": "Cycles waiting for ICACHE fill data"
+ },
+ {
+ "EventCode": "0xC1",
+ "EventName": "cycle_wait_dcache_fill",
+ "BriefDescription": "Cycles waiting for DCACHE fill data"
+ },
+ {
+ "EventCode": "0xD1",
+ "EventName": "uncached_ifetch_from_bus",
+ "BriefDescription": "Uncached ifetch data access from bus"
+ },
+ {
+ "EventCode": "0xE1",
+ "EventName": "uncached_load_from_bus",
+ "BriefDescription": "Uncached load data access from bus"
+ },
+ {
+ "EventCode": "0xF1",
+ "EventName": "cycle_wait_uncached_ifetch",
+ "BriefDescription": "Cycles waiting for uncached ifetch data from bus"
+ },
+ {
+ "EventCode": "0x101",
+ "EventName": "cycle_wait_uncached_load",
+ "BriefDescription": "Cycles waiting for uncached load data from bus"
+ },
+ {
+ "EventCode": "0x111",
+ "EventName": "main_itlb_access",
+ "BriefDescription": "Main ITLB access"
+ },
+ {
+ "EventCode": "0x121",
+ "EventName": "main_itlb_miss",
+ "BriefDescription": "Main ITLB miss"
+ },
+ {
+ "EventCode": "0x131",
+ "EventName": "main_dtlb_access",
+ "BriefDescription": "Main DTLB access"
+ },
+ {
+ "EventCode": "0x141",
+ "EventName": "main_dtlb_miss",
+ "BriefDescription": "Main DTLB miss"
+ },
+ {
+ "EventCode": "0x151",
+ "EventName": "cycle_wait_itlb_fill",
+ "BriefDescription": "Cycles waiting for Main ITLB fill data"
+ },
+ {
+ "EventCode": "0x161",
+ "EventName": "pipe_stall_cycle_dtlb_miss",
+ "BriefDescription": "Pipeline stall cycles caused by Main DTLB miss"
+ },
+ {
+ "EventCode": "0x02",
+ "EventName": "mispredict_condition_br",
+ "BriefDescription": "Misprediction of conditional branches"
+ },
+ {
+ "EventCode": "0x12",
+ "EventName": "mispredict_take_condition_br",
+ "BriefDescription": "Misprediction of taken conditional branches"
+ },
+ {
+ "EventCode": "0x22",
+ "EventName": "mispredict_target_ret_inst",
+ "BriefDescription": "Misprediction of targets of Return instructions"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/riscv/mapfile.csv b/tools/perf/pmu-events/arch/riscv/mapfile.csv
index cfc449b198105e..3d3a809a5446e8 100644
--- a/tools/perf/pmu-events/arch/riscv/mapfile.csv
+++ b/tools/perf/pmu-events/arch/riscv/mapfile.csv
@@ -17,3 +17,4 @@
0x489-0x8000000000000007-0x[[:xdigit:]]+,v1,sifive/u74,core
0x5b7-0x0-0x0,v1,thead/c900-legacy,core
0x67e-0x80000000db0000[89]0-0x[[:xdigit:]]+,v1,starfive/dubhe-80,core
+0x31e-0x8000000000008a45-0x[[:xdigit:]]+,v1,andes/ax45,core
diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile
index a0b8688b083694..fb4472ddffd81b 100644
--- a/tools/testing/selftests/exec/Makefile
+++ b/tools/testing/selftests/exec/Makefile
@@ -19,8 +19,8 @@ include ../lib.mk
$(OUTPUT)/subdir:
mkdir -p $@
-$(OUTPUT)/script:
- echo '#!/bin/sh' > $@
+$(OUTPUT)/script: Makefile
+ echo '#!/bin/bash' > $@
echo 'exit $$*' >> $@
chmod +x $@
$(OUTPUT)/execveat.symlink: $(OUTPUT)/execveat
diff --git a/tools/testing/selftests/exec/binfmt_script.py b/tools/testing/selftests/exec/binfmt_script.py
index 05f94a741c7aa0..2c575a2c0eab41 100755
--- a/tools/testing/selftests/exec/binfmt_script.py
+++ b/tools/testing/selftests/exec/binfmt_script.py
@@ -16,6 +16,8 @@ SIZE=256
NAME_MAX=int(subprocess.check_output(["getconf", "NAME_MAX", "."]))
test_num=0
+pass_num=0
+fail_num=0
code='''#!/usr/bin/perl
print "Executed interpreter! Args:\n";
@@ -42,7 +44,7 @@ foreach my $a (@ARGV) {
# ...
def test(name, size, good=True, leading="", root="./", target="/perl",
fill="A", arg="", newline="\n", hashbang="#!"):
- global test_num, tests, NAME_MAX
+ global test_num, pass_num, fail_num, tests, NAME_MAX
test_num += 1
if test_num > tests:
raise ValueError("more binfmt_script tests than expected! (want %d, expected %d)"
@@ -80,16 +82,20 @@ def test(name, size, good=True, leading="", root="./", target="/perl",
if good:
print("ok %d - binfmt_script %s (successful good exec)"
% (test_num, name))
+ pass_num += 1
else:
print("not ok %d - binfmt_script %s succeeded when it should have failed"
% (test_num, name))
+ fail_num = 1
else:
if good:
print("not ok %d - binfmt_script %s failed when it should have succeeded (rc:%d)"
% (test_num, name, proc.returncode))
+ fail_num = 1
else:
print("ok %d - binfmt_script %s (correctly failed bad exec)"
% (test_num, name))
+ pass_num += 1
# Clean up crazy binaries
os.unlink(script)
@@ -166,6 +172,8 @@ test(name="two-under-trunc-arg", size=int(SIZE/2), arg=" ")
test(name="two-under-leading", size=int(SIZE/2), leading=" ")
test(name="two-under-lead-trunc-arg", size=int(SIZE/2), leading=" ", arg=" ")
+print("# Totals: pass:%d fail:%d xfail:0 xpass:0 skip:0 error:0" % (pass_num, fail_num))
+
if test_num != tests:
raise ValueError("fewer binfmt_script tests than expected! (ran %d, expected %d"
% (test_num, tests))
diff --git a/tools/testing/selftests/exec/execveat.c b/tools/testing/selftests/exec/execveat.c
index 0546ca24f2b20c..6418ded40bdddc 100644
--- a/tools/testing/selftests/exec/execveat.c
+++ b/tools/testing/selftests/exec/execveat.c
@@ -98,10 +98,9 @@ static int check_execveat_invoked_rc(int fd, const char *path, int flags,
if (child == 0) {
/* Child: do execveat(). */
rc = execveat_(fd, path, argv, envp, flags);
- ksft_print_msg("execveat() failed, rc=%d errno=%d (%s)\n",
+ ksft_print_msg("child execveat() failed, rc=%d errno=%d (%s)\n",
rc, errno, strerror(errno));
- ksft_test_result_fail("%s\n", test_name);
- exit(1); /* should not reach here */
+ exit(errno);
}
/* Parent: wait for & check child's exit status. */
rc = waitpid(child, &status, 0);
@@ -226,11 +225,14 @@ static int check_execveat_pathmax(int root_dfd, const char *src, int is_script)
* "If the command name is found, but it is not an executable utility,
* the exit status shall be 126."), so allow either.
*/
- if (is_script)
+ if (is_script) {
+ ksft_print_msg("Invoke script via root_dfd and relative filename\n");
fail += check_execveat_invoked_rc(root_dfd, longpath + 1, 0,
127, 126);
- else
+ } else {
+ ksft_print_msg("Invoke exec via root_dfd and relative filename\n");
fail += check_execveat(root_dfd, longpath + 1, 0);
+ }
return fail;
}
diff --git a/tools/testing/selftests/exec/load_address.c b/tools/testing/selftests/exec/load_address.c
index d487c2f6a61509..17e3207d34ae7e 100644
--- a/tools/testing/selftests/exec/load_address.c
+++ b/tools/testing/selftests/exec/load_address.c
@@ -5,6 +5,7 @@
#include <link.h>
#include <stdio.h>
#include <stdlib.h>
+#include "../kselftest.h"
struct Statistics {
unsigned long long load_address;
@@ -41,28 +42,23 @@ int main(int argc, char **argv)
unsigned long long misalign;
int ret;
+ ksft_print_header();
+ ksft_set_plan(1);
+
ret = dl_iterate_phdr(ExtractStatistics, &extracted);
- if (ret != 1) {
- fprintf(stderr, "FAILED\n");
- return 1;
- }
+ if (ret != 1)
+ ksft_exit_fail_msg("FAILED: dl_iterate_phdr\n");
- if (extracted.alignment == 0) {
- fprintf(stderr, "No alignment found\n");
- return 1;
- } else if (extracted.alignment & (extracted.alignment - 1)) {
- fprintf(stderr, "Alignment is not a power of 2\n");
- return 1;
- }
+ if (extracted.alignment == 0)
+ ksft_exit_fail_msg("FAILED: No alignment found\n");
+ else if (extracted.alignment & (extracted.alignment - 1))
+ ksft_exit_fail_msg("FAILED: Alignment is not a power of 2\n");
misalign = extracted.load_address & (extracted.alignment - 1);
- if (misalign) {
- printf("alignment = %llu, load_address = %llu\n",
- extracted.alignment, extracted.load_address);
- fprintf(stderr, "FAILED\n");
- return 1;
- }
+ if (misalign)
+ ksft_exit_fail_msg("FAILED: alignment = %llu, load_address = %llu\n",
+ extracted.alignment, extracted.load_address);
- fprintf(stderr, "PASS\n");
- return 0;
+ ksft_test_result_pass("Completed\n");
+ ksft_finished();
}
diff --git a/tools/testing/selftests/exec/recursion-depth.c b/tools/testing/selftests/exec/recursion-depth.c
index 2dbd5bc45b3ed0..b2f37d86a5f623 100644
--- a/tools/testing/selftests/exec/recursion-depth.c
+++ b/tools/testing/selftests/exec/recursion-depth.c
@@ -23,45 +23,44 @@
#include <fcntl.h>
#include <sys/mount.h>
#include <unistd.h>
+#include "../kselftest.h"
int main(void)
{
+ int fd, rv;
+
+ ksft_print_header();
+ ksft_set_plan(1);
+
if (unshare(CLONE_NEWNS) == -1) {
if (errno == ENOSYS || errno == EPERM) {
- fprintf(stderr, "error: unshare, errno %d\n", errno);
- return 4;
+ ksft_test_result_skip("error: unshare, errno %d\n", errno);
+ ksft_finished();
}
- fprintf(stderr, "error: unshare, errno %d\n", errno);
- return 1;
- }
- if (mount(NULL, "/", NULL, MS_PRIVATE|MS_REC, NULL) == -1) {
- fprintf(stderr, "error: mount '/', errno %d\n", errno);
- return 1;
+ ksft_exit_fail_msg("error: unshare, errno %d\n", errno);
}
+
+ if (mount(NULL, "/", NULL, MS_PRIVATE | MS_REC, NULL) == -1)
+ ksft_exit_fail_msg("error: mount '/', errno %d\n", errno);
+
/* Require "exec" filesystem. */
- if (mount(NULL, "/tmp", "ramfs", 0, NULL) == -1) {
- fprintf(stderr, "error: mount ramfs, errno %d\n", errno);
- return 1;
- }
+ if (mount(NULL, "/tmp", "ramfs", 0, NULL) == -1)
+ ksft_exit_fail_msg("error: mount ramfs, errno %d\n", errno);
#define FILENAME "/tmp/1"
- int fd = creat(FILENAME, 0700);
- if (fd == -1) {
- fprintf(stderr, "error: creat, errno %d\n", errno);
- return 1;
- }
+ fd = creat(FILENAME, 0700);
+ if (fd == -1)
+ ksft_exit_fail_msg("error: creat, errno %d\n", errno);
+
#define S "#!" FILENAME "\n"
- if (write(fd, S, strlen(S)) != strlen(S)) {
- fprintf(stderr, "error: write, errno %d\n", errno);
- return 1;
- }
+ if (write(fd, S, strlen(S)) != strlen(S))
+ ksft_exit_fail_msg("error: write, errno %d\n", errno);
+
close(fd);
- int rv = execve(FILENAME, NULL, NULL);
- if (rv == -1 && errno == ELOOP) {
- return 0;
- }
- fprintf(stderr, "error: execve, rv %d, errno %d\n", rv, errno);
- return 1;
+ rv = execve(FILENAME, NULL, NULL);
+ ksft_test_result(rv == -1 && errno == ELOOP,
+ "execve failed as expected (ret %d, errno %d)\n", rv, errno);
+ ksft_finished();
}
diff --git a/tools/testing/selftests/mm/gup_test.c b/tools/testing/selftests/mm/gup_test.c
index cbe99594d319b4..18a49c70d4c635 100644
--- a/tools/testing/selftests/mm/gup_test.c
+++ b/tools/testing/selftests/mm/gup_test.c
@@ -203,7 +203,7 @@ int main(int argc, char **argv)
ksft_print_header();
ksft_set_plan(nthreads);
- filed = open(file, O_RDWR|O_CREAT);
+ filed = open(file, O_RDWR|O_CREAT, 0664);
if (filed < 0)
ksft_exit_fail_msg("Unable to open %s: %s\n", file, strerror(errno));
diff --git a/tools/testing/selftests/mm/protection_keys.c b/tools/testing/selftests/mm/protection_keys.c
index f822ae31af22e2..374a308174d2ba 100644
--- a/tools/testing/selftests/mm/protection_keys.c
+++ b/tools/testing/selftests/mm/protection_keys.c
@@ -1745,9 +1745,12 @@ void pkey_setup_shadow(void)
shadow_pkey_reg = __read_pkey_reg();
}
+pid_t parent_pid;
+
void restore_settings_atexit(void)
{
- cat_into_file(buf, "/proc/sys/vm/nr_hugepages");
+ if (parent_pid == getpid())
+ cat_into_file(buf, "/proc/sys/vm/nr_hugepages");
}
void save_settings(void)
@@ -1773,6 +1776,7 @@ void save_settings(void)
exit(__LINE__);
}
+ parent_pid = getpid();
atexit(restore_settings_atexit);
close(fd);
}
diff --git a/tools/testing/selftests/mm/soft-dirty.c b/tools/testing/selftests/mm/soft-dirty.c
index cc5f144430d4d2..7dbfa53d93a05f 100644
--- a/tools/testing/selftests/mm/soft-dirty.c
+++ b/tools/testing/selftests/mm/soft-dirty.c
@@ -137,7 +137,7 @@ static void test_mprotect(int pagemap_fd, int pagesize, bool anon)
if (!map)
ksft_exit_fail_msg("anon mmap failed\n");
} else {
- test_fd = open(fname, O_RDWR | O_CREAT);
+ test_fd = open(fname, O_RDWR | O_CREAT, 0664);
if (test_fd < 0) {
ksft_test_result_skip("Test %s open() file failed\n", __func__);
return;
diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c
index 856662d2f87a1b..6c988bd2f33567 100644
--- a/tools/testing/selftests/mm/split_huge_page_test.c
+++ b/tools/testing/selftests/mm/split_huge_page_test.c
@@ -223,7 +223,7 @@ void split_file_backed_thp(void)
ksft_exit_fail_msg("Fail to create file-backed THP split testing file\n");
}
- fd = open(testfile, O_CREAT|O_WRONLY);
+ fd = open(testfile, O_CREAT|O_WRONLY, 0664);
if (fd == -1) {
ksft_perror("Cannot open testing file");
goto cleanup;
diff --git a/tools/testing/selftests/mm/uffd-common.c b/tools/testing/selftests/mm/uffd-common.c
index b0ac0ec2356d65..7ad6ba660c7d6f 100644
--- a/tools/testing/selftests/mm/uffd-common.c
+++ b/tools/testing/selftests/mm/uffd-common.c
@@ -18,6 +18,7 @@ bool test_uffdio_wp = true;
unsigned long long *count_verify;
uffd_test_ops_t *uffd_test_ops;
uffd_test_case_ops_t *uffd_test_case_ops;
+atomic_bool ready_for_fork;
static int uffd_mem_fd_create(off_t mem_size, bool hugetlb)
{
@@ -518,6 +519,8 @@ void *uffd_poll_thread(void *arg)
pollfd[1].fd = pipefd[cpu*2];
pollfd[1].events = POLLIN;
+ ready_for_fork = true;
+
for (;;) {
ret = poll(pollfd, 2, -1);
if (ret <= 0) {
diff --git a/tools/testing/selftests/mm/uffd-common.h b/tools/testing/selftests/mm/uffd-common.h
index cb055282c89c96..cc5629c3d2aa10 100644
--- a/tools/testing/selftests/mm/uffd-common.h
+++ b/tools/testing/selftests/mm/uffd-common.h
@@ -32,6 +32,7 @@
#include <inttypes.h>
#include <stdint.h>
#include <sys/random.h>
+#include <stdatomic.h>
#include "../kselftest.h"
#include "vm_util.h"
@@ -103,6 +104,7 @@ extern bool map_shared;
extern bool test_uffdio_wp;
extern unsigned long long *count_verify;
extern volatile bool test_uffdio_copy_eexist;
+extern atomic_bool ready_for_fork;
extern uffd_test_ops_t anon_uffd_test_ops;
extern uffd_test_ops_t shmem_uffd_test_ops;
diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c
index 2b9f8cc52639d1..21ec23206ab44a 100644
--- a/tools/testing/selftests/mm/uffd-unit-tests.c
+++ b/tools/testing/selftests/mm/uffd-unit-tests.c
@@ -775,6 +775,8 @@ static void uffd_sigbus_test_common(bool wp)
char c;
struct uffd_args args = { 0 };
+ ready_for_fork = false;
+
fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
if (uffd_register(uffd, area_dst, nr_pages * page_size,
@@ -790,6 +792,9 @@ static void uffd_sigbus_test_common(bool wp)
if (pthread_create(&uffd_mon, NULL, uffd_poll_thread, &args))
err("uffd_poll_thread create");
+ while (!ready_for_fork)
+ ; /* Wait for the poll_thread to start executing before forking */
+
pid = fork();
if (pid < 0)
err("fork");
@@ -829,6 +834,8 @@ static void uffd_events_test_common(bool wp)
char c;
struct uffd_args args = { 0 };
+ ready_for_fork = false;
+
fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
if (uffd_register(uffd, area_dst, nr_pages * page_size,
true, wp, false))
@@ -838,6 +845,9 @@ static void uffd_events_test_common(bool wp)
if (pthread_create(&uffd_mon, NULL, uffd_poll_thread, &args))
err("uffd_poll_thread create");
+ while (!ready_for_fork)
+ ; /* Wait for the poll_thread to start executing before forking */
+
pid = fork();
if (pid < 0)
err("fork");
@@ -1427,7 +1437,8 @@ uffd_test_case_t uffd_tests[] = {
.uffd_fn = uffd_sigbus_wp_test,
.mem_targets = MEM_ALL,
.uffd_feature_required = UFFD_FEATURE_SIGBUS |
- UFFD_FEATURE_EVENT_FORK | UFFD_FEATURE_PAGEFAULT_FLAG_WP,
+ UFFD_FEATURE_EVENT_FORK | UFFD_FEATURE_PAGEFAULT_FLAG_WP |
+ UFFD_FEATURE_WP_HUGETLBFS_SHMEM,
},
{
.name = "events",
diff --git a/tools/testing/selftests/riscv/mm/mmap_bottomup.c b/tools/testing/selftests/riscv/mm/mmap_bottomup.c
index 1757d19ca89b1b..7f7d3eb8b9c926 100644
--- a/tools/testing/selftests/riscv/mm/mmap_bottomup.c
+++ b/tools/testing/selftests/riscv/mm/mmap_bottomup.c
@@ -6,30 +6,9 @@
TEST(infinite_rlimit)
{
-// Only works on 64 bit
-#if __riscv_xlen == 64
- struct addresses mmap_addresses;
-
EXPECT_EQ(BOTTOM_UP, memory_layout());
- do_mmaps(&mmap_addresses);
-
- EXPECT_NE(MAP_FAILED, mmap_addresses.no_hint);
- EXPECT_NE(MAP_FAILED, mmap_addresses.on_37_addr);
- EXPECT_NE(MAP_FAILED, mmap_addresses.on_38_addr);
- EXPECT_NE(MAP_FAILED, mmap_addresses.on_46_addr);
- EXPECT_NE(MAP_FAILED, mmap_addresses.on_47_addr);
- EXPECT_NE(MAP_FAILED, mmap_addresses.on_55_addr);
- EXPECT_NE(MAP_FAILED, mmap_addresses.on_56_addr);
-
- EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.no_hint);
- EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_37_addr);
- EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_38_addr);
- EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_46_addr);
- EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.on_47_addr);
- EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.on_55_addr);
- EXPECT_GT(1UL << 56, (unsigned long)mmap_addresses.on_56_addr);
-#endif
+ TEST_MMAPS;
}
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/riscv/mm/mmap_default.c b/tools/testing/selftests/riscv/mm/mmap_default.c
index c63c60b9397e7f..2ba3ec9900064d 100644
--- a/tools/testing/selftests/riscv/mm/mmap_default.c
+++ b/tools/testing/selftests/riscv/mm/mmap_default.c
@@ -6,30 +6,9 @@
TEST(default_rlimit)
{
-// Only works on 64 bit
-#if __riscv_xlen == 64
- struct addresses mmap_addresses;
-
EXPECT_EQ(TOP_DOWN, memory_layout());
- do_mmaps(&mmap_addresses);
-
- EXPECT_NE(MAP_FAILED, mmap_addresses.no_hint);
- EXPECT_NE(MAP_FAILED, mmap_addresses.on_37_addr);
- EXPECT_NE(MAP_FAILED, mmap_addresses.on_38_addr);
- EXPECT_NE(MAP_FAILED, mmap_addresses.on_46_addr);
- EXPECT_NE(MAP_FAILED, mmap_addresses.on_47_addr);
- EXPECT_NE(MAP_FAILED, mmap_addresses.on_55_addr);
- EXPECT_NE(MAP_FAILED, mmap_addresses.on_56_addr);
-
- EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.no_hint);
- EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_37_addr);
- EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_38_addr);
- EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_46_addr);
- EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.on_47_addr);
- EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.on_55_addr);
- EXPECT_GT(1UL << 56, (unsigned long)mmap_addresses.on_56_addr);
-#endif
+ TEST_MMAPS;
}
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/riscv/mm/mmap_test.h b/tools/testing/selftests/riscv/mm/mmap_test.h
index 2e0db9c5be6c33..3b29ca3bb3d40d 100644
--- a/tools/testing/selftests/riscv/mm/mmap_test.h
+++ b/tools/testing/selftests/riscv/mm/mmap_test.h
@@ -4,63 +4,86 @@
#include <sys/mman.h>
#include <sys/resource.h>
#include <stddef.h>
+#include <strings.h>
+#include "../../kselftest_harness.h"
#define TOP_DOWN 0
#define BOTTOM_UP 1
-struct addresses {
- int *no_hint;
- int *on_37_addr;
- int *on_38_addr;
- int *on_46_addr;
- int *on_47_addr;
- int *on_55_addr;
- int *on_56_addr;
+#if __riscv_xlen == 64
+uint64_t random_addresses[] = {
+ 0x19764f0d73b3a9f0, 0x016049584cecef59, 0x3580bdd3562f4acd,
+ 0x1164219f20b17da0, 0x07d97fcb40ff2373, 0x76ec528921272ee7,
+ 0x4dd48c38a3de3f70, 0x2e11415055f6997d, 0x14b43334ac476c02,
+ 0x375a60795aff19f6, 0x47f3051725b8ee1a, 0x4e697cf240494a9f,
+ 0x456b59b5c2f9e9d1, 0x101724379d63cb96, 0x7fe9ad31619528c1,
+ 0x2f417247c495c2ea, 0x329a5a5b82943a5e, 0x06d7a9d6adcd3827,
+ 0x327b0b9ee37f62d5, 0x17c7b1851dfd9b76, 0x006ebb6456ec2cd9,
+ 0x00836cd14146a134, 0x00e5c4dcde7126db, 0x004c29feadf75753,
+ 0x00d8b20149ed930c, 0x00d71574c269387a, 0x0006ebe4a82acb7a,
+ 0x0016135df51f471b, 0x00758bdb55455160, 0x00d0bdd949b13b32,
+ 0x00ecea01e7c5f54b, 0x00e37b071b9948b1, 0x0011fdd00ff57ab3,
+ 0x00e407294b52f5ea, 0x00567748c200ed20, 0x000d073084651046,
+ 0x00ac896f4365463c, 0x00eb0d49a0b26216, 0x0066a2564a982a31,
+ 0x002e0d20237784ae, 0x0000554ff8a77a76, 0x00006ce07a54c012,
+ 0x000009570516d799, 0x00000954ca15b84d, 0x0000684f0d453379,
+ 0x00002ae5816302b5, 0x0000042403fb54bf, 0x00004bad7392bf30,
+ 0x00003e73bfa4b5e3, 0x00005442c29978e0, 0x00002803f11286b6,
+ 0x000073875d745fc6, 0x00007cede9cb8240, 0x000027df84cc6a4f,
+ 0x00006d7e0e74242a, 0x00004afd0b836e02, 0x000047d0e837cd82,
+ 0x00003b42405efeda, 0x00001531bafa4c95, 0x00007172cae34ac4,
+};
+#else
+uint32_t random_addresses[] = {
+ 0x8dc302e0, 0x929ab1e0, 0xb47683ba, 0xea519c73, 0xa19f1c90, 0xc49ba213,
+ 0x8f57c625, 0xadfe5137, 0x874d4d95, 0xaa20f09d, 0xcf21ebfc, 0xda7737f1,
+ 0xcedf392a, 0x83026c14, 0xccedca52, 0xc6ccf826, 0xe0cd9415, 0x997472ca,
+ 0xa21a44c1, 0xe82196f5, 0xa23fd66b, 0xc28d5590, 0xd009cdce, 0xcf0be646,
+ 0x8fc8c7ff, 0xe2a85984, 0xa3d3236b, 0x89a0619d, 0xc03db924, 0xb5d4cc1b,
+ 0xb96ee04c, 0xd191da48, 0xb432a000, 0xaa2bebbc, 0xa2fcb289, 0xb0cca89b,
+ 0xb0c18d6a, 0x88f58deb, 0xa4d42d1c, 0xe4d74e86, 0x99902b09, 0x8f786d31,
+ 0xbec5e381, 0x9a727e65, 0xa9a65040, 0xa880d789, 0x8f1b335e, 0xfc821c1e,
+ 0x97e34be4, 0xbbef84ed, 0xf447d197, 0xfd7ceee2, 0xe632348d, 0xee4590f4,
+ 0x958992a5, 0xd57e05d6, 0xfd240970, 0xc5b0dcff, 0xd96da2c2, 0xa7ae041d,
};
+#endif
// Only works on 64 bit
#if __riscv_xlen == 64
-static inline void do_mmaps(struct addresses *mmap_addresses)
-{
- /*
- * Place all of the hint addresses on the boundaries of mmap
- * sv39, sv48, sv57
- * User addresses end at 1<<38, 1<<47, 1<<56 respectively
- */
- void *on_37_bits = (void *)(1UL << 37);
- void *on_38_bits = (void *)(1UL << 38);
- void *on_46_bits = (void *)(1UL << 46);
- void *on_47_bits = (void *)(1UL << 47);
- void *on_55_bits = (void *)(1UL << 55);
- void *on_56_bits = (void *)(1UL << 56);
+#define PROT (PROT_READ | PROT_WRITE)
+#define FLAGS (MAP_PRIVATE | MAP_ANONYMOUS)
- int prot = PROT_READ | PROT_WRITE;
- int flags = MAP_PRIVATE | MAP_ANONYMOUS;
+/* mmap must return a value that doesn't use more bits than the hint address. */
+static inline unsigned long get_max_value(unsigned long input)
+{
+ unsigned long max_bit = (1UL << (((sizeof(unsigned long) * 8) - 1 -
+ __builtin_clzl(input))));
- mmap_addresses->no_hint =
- mmap(NULL, 5 * sizeof(int), prot, flags, 0, 0);
- mmap_addresses->on_37_addr =
- mmap(on_37_bits, 5 * sizeof(int), prot, flags, 0, 0);
- mmap_addresses->on_38_addr =
- mmap(on_38_bits, 5 * sizeof(int), prot, flags, 0, 0);
- mmap_addresses->on_46_addr =
- mmap(on_46_bits, 5 * sizeof(int), prot, flags, 0, 0);
- mmap_addresses->on_47_addr =
- mmap(on_47_bits, 5 * sizeof(int), prot, flags, 0, 0);
- mmap_addresses->on_55_addr =
- mmap(on_55_bits, 5 * sizeof(int), prot, flags, 0, 0);
- mmap_addresses->on_56_addr =
- mmap(on_56_bits, 5 * sizeof(int), prot, flags, 0, 0);
+ return max_bit + (max_bit - 1);
}
+
+#define TEST_MMAPS \
+ ({ \
+ void *mmap_addr; \
+ for (int i = 0; i < ARRAY_SIZE(random_addresses); i++) { \
+ mmap_addr = mmap((void *)random_addresses[i], \
+ 5 * sizeof(int), PROT, FLAGS, 0, 0); \
+ EXPECT_NE(MAP_FAILED, mmap_addr); \
+ EXPECT_GE((void *)get_max_value(random_addresses[i]), \
+ mmap_addr); \
+ mmap_addr = mmap((void *)random_addresses[i], \
+ 5 * sizeof(int), PROT, FLAGS, 0, 0); \
+ EXPECT_NE(MAP_FAILED, mmap_addr); \
+ EXPECT_GE((void *)get_max_value(random_addresses[i]), \
+ mmap_addr); \
+ } \
+ })
#endif /* __riscv_xlen == 64 */
static inline int memory_layout(void)
{
- int prot = PROT_READ | PROT_WRITE;
- int flags = MAP_PRIVATE | MAP_ANONYMOUS;
-
- void *value1 = mmap(NULL, sizeof(int), prot, flags, 0, 0);
- void *value2 = mmap(NULL, sizeof(int), prot, flags, 0, 0);
+ void *value1 = mmap(NULL, sizeof(int), PROT, FLAGS, 0, 0);
+ void *value2 = mmap(NULL, sizeof(int), PROT, FLAGS, 0, 0);
return value2 > value1;
}