aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFelipe Balbi <balbi@kernel.org>2023-12-27 14:49:02 -0800
committerFelipe Balbi <balbi@kernel.org>2023-12-27 14:49:02 -0800
commited9b7c41676ea4ebcc3998f269692f82aa562e67 (patch)
tree672246ad139124530b426b32b17e48bd002f6e0f
parentef70392e0451d19974be930ca14c210f17bc29d1 (diff)
parent861deac3b092f37b2c5e6871732f3e11486f7082 (diff)
downloadusb-ed9b7c41676ea4ebcc3998f269692f82aa562e67.tar.gz
Merge tag 'v6.7-rc7'
Linux 6.7-rc7 # -----BEGIN PGP SIGNATURE----- # # iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAmWHepQeHHRvcnZhbGRz # QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGm6YH/0b9hMQ1f2DbMjEq # w8h0bIMkc+Jv9fLVgCMA8nTuAQsYNp+MJ8PKA9t8CCtG1BTs+lXr6DsN2GpoODEj # fMCXzyHTlMgg5S0iXAXHcxX1t1gRDgxoOeBvUY8YZw1JZriNsO+dF3U7rRdrmg1n # 7jq6WFxJfuB2NMlmys7vk/0c3t193AKxkWyDFBmiim3MX0IYxGlD5DVadaxA6Vx7 # Pqjt9ljVG11jJWeQ9ZFLd5RmZkdmY/JVlPagEr5rPDsJrbL23sLUu4bRHGBWcipU # BgBUdj7K6tahuwKeg2KENxCGjvQOaFIti1l1CvHMWKPgFFlhE30DD6x7nZldPSj6 # m52I1xE= # =tLIr # -----END PGP SIGNATURE----- # gpg: Signature made Sat 23 Dec 2023 04:25:56 PM PST # gpg: using RSA key ABAF11C65A2970B130ABE3C479BE3E4300411886 # gpg: issuer "torvalds@linux-foundation.org" # gpg: Good signature from "Linus Torvalds <torvalds@kernel.org>" [marginal] # gpg: aka "Linus Torvalds <torvalds@linux-foundation.org>" [full] # gpg: torvalds@linux-foundation.org: Verified 30 signatures in the past 3 years. # Encrypted 0 messages.
-rw-r--r--.mailmap4
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple.yaml2
-rw-r--r--Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml10
-rw-r--r--MAINTAINERS6
-rw-r--r--Makefile2
-rw-r--r--arch/arc/Kconfig5
-rw-r--r--arch/arc/include/asm/cacheflush.h43
-rw-r--r--arch/arc/include/asm/entry-arcv2.h32
-rw-r--r--arch/arc/include/asm/entry-compact.h87
-rw-r--r--arch/arc/include/asm/entry.h110
-rw-r--r--arch/arc/include/asm/hugepage.h7
-rw-r--r--arch/arc/include/asm/ptrace.h14
-rw-r--r--arch/arc/kernel/setup.c4
-rw-r--r--arch/arc/kernel/signal.c6
-rw-r--r--arch/arc/mm/cache.c136
-rw-r--r--arch/arc/mm/mmap.c21
-rw-r--r--arch/arc/mm/tlb.c16
-rw-r--r--arch/arm/boot/dts/ti/omap/am33xx.dtsi1
-rw-r--r--arch/arm/boot/dts/ti/omap/dra7.dtsi2
-rw-r--r--arch/arm/mach-omap2/id.c5
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero.dtsi3
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero2.dts3
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero3.dts2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts1
-rw-r--r--arch/arm64/include/asm/pgtable.h6
-rw-r--r--arch/arm64/include/asm/syscall_wrapper.h4
-rw-r--r--arch/arm64/kvm/arm.c2
-rw-r--r--arch/arm64/kvm/vgic/vgic-init.c47
-rw-r--r--arch/arm64/kvm/vgic/vgic-mmio-v3.c4
-rw-r--r--arch/arm64/kvm/vgic/vgic.h1
-rw-r--r--arch/loongarch/kernel/Makefile2
-rw-r--r--arch/m68k/include/asm/kexec.h4
-rw-r--r--arch/m68k/kernel/Makefile2
-rw-r--r--arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi3
-rw-r--r--arch/mips/boot/dts/loongson/ls7a-pch.dtsi3
-rw-r--r--arch/mips/cavium-octeon/smp.c4
-rw-r--r--arch/mips/include/asm/kexec.h2
-rw-r--r--arch/mips/include/asm/smp-ops.h2
-rw-r--r--arch/mips/include/asm/smp.h2
-rw-r--r--arch/mips/kernel/Makefile2
-rw-r--r--arch/mips/kernel/smp-bmips.c4
-rw-r--r--arch/mips/kernel/smp-cps.c10
-rw-r--r--arch/mips/loongson64/reset.c4
-rw-r--r--arch/mips/loongson64/smp.c2
-rw-r--r--arch/powerpc/configs/skiroot_defconfig1
-rw-r--r--arch/powerpc/platforms/pseries/vas.c51
-rw-r--r--arch/powerpc/platforms/pseries/vas.h2
-rw-r--r--arch/riscv/Kconfig4
-rw-r--r--arch/riscv/include/asm/pgtable.h2
-rw-r--r--arch/riscv/include/asm/syscall_wrapper.h5
-rw-r--r--arch/riscv/kernel/crash_core.c4
-rw-r--r--arch/riscv/kvm/aia_imsic.c13
-rw-r--r--arch/s390/configs/debug_defconfig10
-rw-r--r--arch/s390/configs/defconfig9
-rw-r--r--arch/s390/configs/zfcpdump_defconfig3
-rw-r--r--arch/s390/include/asm/fpu/api.h2
-rw-r--r--arch/s390/include/asm/syscall_wrapper.h13
-rw-r--r--arch/sh/include/asm/kexec.h4
-rw-r--r--arch/sh/kernel/Makefile2
-rw-r--r--arch/sh/kernel/reboot.c4
-rw-r--r--arch/sh/kernel/setup.c2
-rw-r--r--arch/x86/boot/compressed/acpi.c2
-rw-r--r--arch/x86/include/asm/syscall_wrapper.h34
-rw-r--r--arch/x86/kernel/acpi/boot.c2
-rw-r--r--arch/x86/kernel/alternative.c14
-rw-r--r--arch/x86/kernel/head_64.S16
-rw-r--r--arch/x86/kvm/svm/sev.c19
-rw-r--r--arch/x86/kvm/svm/svm.c1
-rw-r--r--arch/x86/kvm/svm/svm.h2
-rw-r--r--arch/x86/xen/Kconfig1
-rw-r--r--drivers/accel/ivpu/ivpu_hw_37xx.c12
-rw-r--r--drivers/atm/solos-pci.c8
-rw-r--r--drivers/bluetooth/hci_vhci.c10
-rw-r--r--drivers/bus/ti-sysc.c18
-rw-r--r--drivers/clk/qcom/Kconfig1
-rw-r--r--drivers/clk/rockchip/clk-rk3128.c24
-rw-r--r--drivers/clk/rockchip/clk-rk3568.c1
-rw-r--r--drivers/cxl/core/hdm.c5
-rw-r--r--drivers/cxl/core/memdev.c27
-rw-r--r--drivers/cxl/core/pci.c13
-rw-r--r--drivers/cxl/core/pmu.c2
-rw-r--r--drivers/cxl/core/port.c4
-rw-r--r--drivers/cxl/core/region.c5
-rw-r--r--drivers/dma/fsl-edma-common.c1
-rw-r--r--drivers/dma/fsl-edma-main.c12
-rw-r--r--drivers/dma/idxd/registers.h12
-rw-r--r--drivers/dma/idxd/submit.c14
-rw-r--r--drivers/dma/stm32-dma.c8
-rw-r--r--drivers/dma/ti/k3-psil-am62.c12
-rw-r--r--drivers/dma/ti/k3-psil-am62a.c12
-rw-r--r--drivers/dpll/dpll_netlink.c13
-rw-r--r--drivers/edac/versal_edac.c4
-rw-r--r--drivers/gpio/gpio-dwapb.c12
-rw-r--r--drivers/gpio/gpiolib-cdev.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c28
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c18
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c4
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c4
-rw-r--r--drivers/gpu/drm/drm_auth.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c8
-rw-r--r--drivers/gpu/drm/drm_edid.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c43
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c31
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c19
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h12
-rw-r--r--drivers/gpu/drm/i915/i915_hwmon.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_live_test.c9
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_live_test.h3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_gamma.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c14
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c18
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c2
-rw-r--r--drivers/hid/hid-nintendo.c71
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c48
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c8
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c13
-rw-r--r--drivers/iio/accel/kionix-kx022a.c37
-rw-r--r--drivers/iio/adc/imx93_adc.c4
-rw-r--r--drivers/iio/adc/mcp3564.c8
-rw-r--r--drivers/iio/adc/meson_saradc.c16
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c4
-rw-r--r--drivers/iio/buffer/industrialio-triggered-buffer.c10
-rw-r--r--drivers/iio/common/ms_sensors/ms_sensors_i2c.c4
-rw-r--r--drivers/iio/imu/adis16475.c121
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c4
-rw-r--r--drivers/iio/light/hid-sensor-als.c100
-rw-r--r--drivers/iio/magnetometer/tmag5273.c2
-rw-r--r--drivers/input/joystick/xpad.c1
-rw-r--r--drivers/input/keyboard/atkbd.c46
-rw-r--r--drivers/input/keyboard/ipaq-micro-keys.c3
-rw-r--r--drivers/input/misc/soc_button_array.c5
-rw-r--r--drivers/input/mouse/amimouse.c5
-rw-r--r--drivers/input/mouse/synaptics.c1
-rw-r--r--drivers/input/serio/i8042-acpipnpio.h8
-rw-r--r--drivers/interconnect/core.c3
-rw-r--r--drivers/interconnect/qcom/icc-rpm.c2
-rw-r--r--drivers/interconnect/qcom/sm8250.c1
-rw-r--r--drivers/md/Kconfig1
-rw-r--r--drivers/md/dm-integrity.c11
-rw-r--r--drivers/md/dm-raid.c3
-rw-r--r--drivers/md/md.c11
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c3
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c53
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c5
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c38
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c19
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c11
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c7
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c11
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c27
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c27
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_fdir.h15
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c179
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c71
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c7
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c25
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c55
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c25
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c443
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c162
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h3
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c20
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c42
-rw-r--r--drivers/net/ethernet/microsoft/Kconfig1
-rw-r--r--drivers/net/ethernet/mscc/ocelot_stats.c16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c1
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c17
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c6
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c82
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h1
-rw-r--r--drivers/net/phy/phy_device.c6
-rw-r--r--drivers/net/team/team.c4
-rw-r--r--drivers/net/usb/ax88179_178a.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c10
-rw-r--r--drivers/nvme/host/core.c3
-rw-r--r--drivers/nvme/host/fc.c21
-rw-r--r--drivers/nvmem/brcm_nvram.c134
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c7
-rw-r--r--drivers/pci/controller/pci-loongson.c46
-rw-r--r--drivers/pci/controller/vmd.c2
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c9
-rw-r--r--drivers/pci/pcie/aspm.c65
-rw-r--r--drivers/perf/arm-cmn.c2
-rw-r--r--drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c2
-rw-r--r--drivers/phy/sunplus/phy-sunplus-usb2.c2
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c5
-rw-r--r--drivers/pinctrl/pinctrl-amd.c9
-rw-r--r--drivers/pinctrl/pinctrl-amd.h5
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c8
-rw-r--r--drivers/pinctrl/pinctrl-cy8c95x0.c14
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c4
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c4
-rw-r--r--drivers/platform/x86/amd/pmc/pmc-quirks.c20
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.c33
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.h12
-rw-r--r--drivers/platform/x86/intel/pmc/core.c2
-rw-r--r--drivers/platform/x86/intel/vbtn.c19
-rw-r--r--drivers/platform/x86/intel_ips.c30
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c104
-rw-r--r--drivers/reset/core.c8
-rw-r--r--drivers/reset/hisilicon/hi6220_reset.c2
-rw-r--r--drivers/s390/block/scm_blk.c7
-rw-r--r--drivers/scsi/aacraid/aacraid.h1
-rw-r--r--drivers/scsi/aacraid/commsup.c6
-rw-r--r--drivers/scsi/aacraid/linit.c14
-rw-r--r--drivers/scsi/aacraid/src.c25
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c9
-rw-r--r--drivers/scsi/scsi_error.c2
-rw-r--r--drivers/soundwire/intel_ace2x.c3
-rw-r--r--drivers/soundwire/stream.c7
-rw-r--r--drivers/spi/spi-atmel.c95
-rw-r--r--drivers/spi/spi-cadence.c1
-rw-r--r--drivers/spi/spi-imx.c15
-rw-r--r--drivers/thunderbolt/debugfs.c2
-rw-r--r--drivers/thunderbolt/usb4.c10
-rw-r--r--drivers/ufs/core/ufshcd.c3
-rw-r--r--drivers/ufs/host/ufs-qcom.c6
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.c54
-rw-r--r--drivers/usb/fotg210/fotg210-hcd.c3
-rw-r--r--drivers/usb/serial/ftdi_sio.c6
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/serial/option.c5
-rw-r--r--drivers/usb/storage/unusual_devs.h11
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c2
-rw-r--r--fs/afs/cell.c6
-rw-r--r--fs/afs/dynroot.c31
-rw-r--r--fs/afs/internal.h2
-rw-r--r--fs/afs/volume.c26
-rw-r--r--fs/bcachefs/alloc_foreground.c14
-rw-r--r--fs/bcachefs/btree_iter.c8
-rw-r--r--fs/bcachefs/btree_iter.h1
-rw-r--r--fs/bcachefs/btree_update_interior.c39
-rw-r--r--fs/bcachefs/btree_update_interior.h4
-rw-r--r--fs/bcachefs/data_update.c3
-rw-r--r--fs/bcachefs/fs.c19
-rw-r--r--fs/bcachefs/recovery.h3
-rw-r--r--fs/bcachefs/super.c7
-rw-r--r--fs/btrfs/delalloc-space.c2
-rw-r--r--fs/btrfs/disk-io.c28
-rw-r--r--fs/btrfs/extent-tree.c48
-rw-r--r--fs/btrfs/extent_io.c3
-rw-r--r--fs/btrfs/file.c2
-rw-r--r--fs/btrfs/inode.c16
-rw-r--r--fs/btrfs/ioctl.c9
-rw-r--r--fs/btrfs/ordered-data.c11
-rw-r--r--fs/btrfs/qgroup.c44
-rw-r--r--fs/btrfs/qgroup.h7
-rw-r--r--fs/btrfs/transaction.c2
-rw-r--r--fs/btrfs/transaction.h3
-rw-r--r--fs/debugfs/file.c6
-rw-r--r--fs/nfsd/auth.c4
-rw-r--r--fs/nfsd/nfs4callback.c97
-rw-r--r--fs/nfsd/nfs4state.c114
-rw-r--r--fs/nfsd/nfs4xdr.c7
-rw-r--r--fs/nfsd/nfsctl.c18
-rw-r--r--fs/nfsd/nfsd.h1
-rw-r--r--fs/nfsd/nfssvc.c3
-rw-r--r--fs/nfsd/state.h25
-rw-r--r--fs/nfsd/vfs.c9
-rw-r--r--fs/nfsd/xdr4cb.h18
-rw-r--r--fs/open.c3
-rw-r--r--fs/overlayfs/copy_up.c5
-rw-r--r--fs/smb/client/cached_dir.c17
-rw-r--r--fs/smb/client/cifs_debug.c12
-rw-r--r--fs/smb/client/cifsglob.h3
-rw-r--r--fs/smb/client/connect.c7
-rw-r--r--fs/smb/client/file.c2
-rw-r--r--fs/smb/client/misc.c4
-rw-r--r--fs/smb/client/sess.c50
-rw-r--r--fs/smb/client/smb2misc.c56
-rw-r--r--fs/smb/client/smb2ops.c65
-rw-r--r--fs/smb/client/smb2pdu.c125
-rw-r--r--fs/smb/client/smb2proto.h12
-rw-r--r--fs/tracefs/event_inode.c16
-rw-r--r--include/linux/bpf_types.h4
-rw-r--r--include/linux/cred.h58
-rw-r--r--include/linux/damon.h2
-rw-r--r--include/linux/device.h2
-rw-r--r--include/linux/hid-sensor-ids.h4
-rw-r--r--include/linux/ieee80211.h3
-rw-r--r--include/linux/io_uring_types.h3
-rw-r--r--include/linux/key-type.h1
-rw-r--r--include/linux/mlx5/mlx5_ifc.h9
-rw-r--r--include/linux/mm.h8
-rw-r--r--include/linux/mm_inline.h23
-rw-r--r--include/linux/mmzone.h34
-rw-r--r--include/linux/pci.h3
-rw-r--r--include/net/addrconf.h12
-rw-r--r--include/net/bluetooth/hci_core.h9
-rw-r--r--include/net/if_inet6.h4
-rw-r--r--include/net/ip6_fib.h64
-rw-r--r--include/net/netfilter/nf_flow_table.h10
-rw-r--r--include/net/sock.h5
-rw-r--r--include/trace/events/9p.h11
-rw-r--r--io_uring/poll.c20
-rw-r--r--io_uring/uring_cmd.c2
-rw-r--r--kernel/Kconfig.kexec1
-rw-r--r--kernel/crash_core.c10
-rw-r--r--kernel/cred.c251
-rw-r--r--kernel/events/core.c10
-rw-r--r--kernel/exit.c3
-rw-r--r--kernel/resource.c4
-rw-r--r--kernel/sys_ni.c14
-rw-r--r--kernel/time/posix-stubs.c45
-rw-r--r--kernel/trace/ring_buffer.c184
-rw-r--r--kernel/trace/synth_event_gen_test.c11
-rw-r--r--kernel/trace/trace.c16
-rw-r--r--kernel/trace/trace.h1
-rw-r--r--kernel/trace/trace_events_hist.c12
-rw-r--r--kernel/trace/trace_events_synth.c4
-rw-r--r--kernel/trace/trace_output.c6
-rw-r--r--lib/Kconfig.debug15
-rw-r--r--lib/idr.c2
-rw-r--r--lib/test_ida.c40
-rw-r--r--lib/vsprintf.c11
-rw-r--r--mm/damon/core.c6
-rw-r--r--mm/shmem.c19
-rw-r--r--mm/vmscan.c92
-rw-r--r--mm/workingset.c6
-rw-r--r--net/8021q/vlan_core.c9
-rw-r--r--net/9p/protocol.c17
-rw-r--r--net/appletalk/ddp.c9
-rw-r--r--net/atm/ioctl.c7
-rw-r--r--net/bluetooth/af_bluetooth.c7
-rw-r--r--net/bluetooth/hci_event.c30
-rw-r--r--net/bluetooth/l2cap_core.c21
-rw-r--r--net/bluetooth/mgmt.c25
-rw-r--r--net/bluetooth/smp.c7
-rw-r--r--net/core/dev.c3
-rw-r--r--net/core/neighbour.c9
-rw-r--r--net/core/skbuff.c5
-rw-r--r--net/core/sock_map.c2
-rw-r--r--net/core/stream.c2
-rw-r--r--net/dns_resolver/dns_key.c10
-rw-r--r--net/ife/ife.c1
-rw-r--r--net/ipv4/tcp_input.c22
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv6/addrconf.c6
-rw-r--r--net/ipv6/ip6_fib.c55
-rw-r--r--net/ipv6/route.c6
-rw-r--r--net/mac80211/cfg.c4
-rw-r--r--net/mac80211/driver-ops.c6
-rw-r--r--net/mac80211/mesh_plink.c16
-rw-r--r--net/mac80211/mlme.c4
-rw-r--r--net/mptcp/crypto_test.c1
-rw-r--r--net/mptcp/protocol.c6
-rw-r--r--net/mptcp/protocol.h9
-rw-r--r--net/mptcp/subflow.c28
-rw-r--r--net/mptcp/token_test.c1
-rw-r--r--net/rfkill/rfkill-gpio.c8
-rw-r--r--net/rose/af_rose.c43
-rw-r--r--net/sched/act_ct.c34
-rw-r--r--net/sunrpc/auth.c3
-rw-r--r--net/sunrpc/svc_xprt.c5
-rw-r--r--net/vmw_vsock/virtio_transport_common.c2
-rw-r--r--net/wireless/certs/wens.hex87
-rw-r--r--scripts/sign-file.c12
-rw-r--r--security/keys/gc.c31
-rw-r--r--security/keys/internal.h11
-rw-r--r--security/keys/key.c15
-rw-r--r--security/keys/proc.c2
-rw-r--r--security/selinux/hooks.c6
-rw-r--r--sound/pci/hda/cs35l41_hda.c2
-rw-r--r--sound/pci/hda/cs35l41_hda.h5
-rw-r--r--sound/pci/hda/cs35l41_hda_property.c365
-rw-r--r--sound/pci/hda/patch_hdmi.c3
-rw-r--r--sound/pci/hda/patch_realtek.c39
-rw-r--r--sound/pci/hda/tas2781_hda_i2c.c25
-rw-r--r--sound/soc/codecs/cs35l45-i2c.c2
-rw-r--r--sound/soc/codecs/cs35l45-spi.c2
-rw-r--r--sound/soc/codecs/cs35l45.c56
-rw-r--r--sound/soc/codecs/cs42l43-jack.c23
-rw-r--r--sound/soc/codecs/hdmi-codec.c12
-rw-r--r--sound/soc/codecs/tas2781-fmwlib.c14
-rw-r--r--sound/soc/fsl/fsl_sai.c3
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c31
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-mtl-match.c8
-rw-r--r--sound/soc/sof/mediatek/mt8186/mt8186.c3
-rw-r--r--sound/usb/quirks.c4
-rw-r--r--tools/objtool/noreturns.h1
-rw-r--r--tools/testing/cxl/Kbuild1
-rw-r--r--tools/testing/cxl/cxl_core_exports.c7
-rw-r--r--tools/testing/cxl/test/cxl.c5
-rw-r--r--tools/testing/selftests/Makefile21
-rw-r--r--tools/testing/selftests/alsa/mixer-test.c2
-rw-r--r--tools/testing/selftests/bpf/config.x86_641
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_basic.c34
-rw-r--r--tools/testing/selftests/hid/config.common1
-rw-r--r--tools/testing/selftests/kvm/Makefile26
-rw-r--r--tools/testing/selftests/kvm/get-reg-list.c9
-rw-r--r--tools/testing/selftests/kvm/riscv/get-reg-list.c10
-rw-r--r--tools/testing/selftests/lib.mk40
-rw-r--r--tools/testing/selftests/mm/cow.c3
-rw-r--r--tools/testing/selftests/net/Makefile1
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh8
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh2
-rwxr-xr-xtools/testing/selftests/net/vlan_hw_filter.sh29
-rw-r--r--virt/kvm/kvm_main.c3
466 files changed, 4943 insertions, 3405 deletions
diff --git a/.mailmap b/.mailmap
index 3ac1c12545f201..68e72a6017a0ee 100644
--- a/.mailmap
+++ b/.mailmap
@@ -191,6 +191,10 @@ Gao Xiang <xiang@kernel.org> <gaoxiang25@huawei.com>
Gao Xiang <xiang@kernel.org> <hsiangkao@aol.com>
Gao Xiang <xiang@kernel.org> <hsiangkao@linux.alibaba.com>
Gao Xiang <xiang@kernel.org> <hsiangkao@redhat.com>
+Geliang Tang <geliang.tang@linux.dev> <geliang.tang@suse.com>
+Geliang Tang <geliang.tang@linux.dev> <geliangtang@xiaomi.com>
+Geliang Tang <geliang.tang@linux.dev> <geliangtang@gmail.com>
+Geliang Tang <geliang.tang@linux.dev> <geliangtang@163.com>
Georgi Djakov <djakov@kernel.org> <georgi.djakov@linaro.org>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
index 73674baea75d32..f9160d7bac3caa 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
@@ -42,6 +42,8 @@ properties:
- lg,acx467akm-7
# LG Corporation 7" WXGA TFT LCD panel
- lg,ld070wx3-sl01
+ # LG Corporation 5" HD TFT LCD panel
+ - lg,lh500wx1-sd03
# One Stop Displays OSD101T2587-53TS 10.1" 1920x1200 panel
- osddisplays,osd101t2587-53ts
# Panasonic 10" WUXGA TFT LCD panel
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
index 3ec9ee95045fbc..11422af3477e58 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
@@ -208,8 +208,6 @@ properties:
- lemaker,bl035-rgb-002
# LG 7" (800x480 pixels) TFT LCD panel
- lg,lb070wv8
- # LG Corporation 5" HD TFT LCD panel
- - lg,lh500wx1-sd03
# LG LP079QX1-SP0V 7.9" (1536x2048 pixels) TFT LCD panel
- lg,lp079qx1-sp0v
# LG 9.7" (2048x1536 pixels) TFT LCD panel
diff --git a/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml b/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml
index f43186f9860756..d9287be898772f 100644
--- a/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml
+++ b/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml
@@ -15,9 +15,11 @@ allOf:
properties:
compatible:
- enum:
- - fsl,imx23-ocotp
- - fsl,imx28-ocotp
+ items:
+ - enum:
+ - fsl,imx23-ocotp
+ - fsl,imx28-ocotp
+ - const: fsl,ocotp
reg:
maxItems: 1
@@ -35,7 +37,7 @@ unevaluatedProperties: false
examples:
- |
ocotp: efuse@8002c000 {
- compatible = "fsl,imx28-ocotp";
+ compatible = "fsl,imx28-ocotp", "fsl,ocotp";
#address-cells = <1>;
#size-cells = <1>;
reg = <0x8002c000 0x2000>;
diff --git a/MAINTAINERS b/MAINTAINERS
index e2c6187a3ac80f..7cef2d2ef8d708 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6050,10 +6050,8 @@ M: Mikulas Patocka <mpatocka@redhat.com>
M: dm-devel@lists.linux.dev
L: dm-devel@lists.linux.dev
S: Maintained
-W: http://sources.redhat.com/dm
Q: http://patchwork.kernel.org/project/dm-devel/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm.git
-T: quilt http://people.redhat.com/agk/patches/linux/editing/
F: Documentation/admin-guide/device-mapper/
F: drivers/md/Kconfig
F: drivers/md/Makefile
@@ -9526,6 +9524,7 @@ F: drivers/bus/hisi_lpc.c
HISILICON NETWORK SUBSYSTEM 3 DRIVER (HNS3)
M: Yisen Zhuang <yisen.zhuang@huawei.com>
M: Salil Mehta <salil.mehta@huawei.com>
+M: Jijie Shao <shaojijie@huawei.com>
L: netdev@vger.kernel.org
S: Maintained
W: http://www.hisilicon.com
@@ -12189,6 +12188,8 @@ LINUX FOR POWERPC (32-BIT AND 64-BIT)
M: Michael Ellerman <mpe@ellerman.id.au>
R: Nicholas Piggin <npiggin@gmail.com>
R: Christophe Leroy <christophe.leroy@csgroup.eu>
+R: Aneesh Kumar K.V <aneesh.kumar@kernel.org>
+R: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
L: linuxppc-dev@lists.ozlabs.org
S: Supported
W: https://github.com/linuxppc/wiki/wiki
@@ -19564,7 +19565,6 @@ S: Maintained
F: drivers/misc/sgi-xp/
SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
-M: Karsten Graul <kgraul@linux.ibm.com>
M: Wenjia Zhang <wenjia@linux.ibm.com>
M: Jan Karcher <jaka@linux.ibm.com>
R: D. Wythe <alibuda@linux.alibaba.com>
diff --git a/Makefile b/Makefile
index 70fc4c11dfc04e..ee995fc2b0e5c3 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 7
SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc7
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 3162db540ee96c..1b0483c51cc169 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -49,7 +49,6 @@ config ARC
select OF
select OF_EARLY_FLATTREE
select PCI_SYSCALL if PCI
- select PERF_USE_VMALLOC if ARC_CACHE_VIPT_ALIASING
select HAVE_ARCH_JUMP_LABEL if ISA_ARCV2 && !CPU_ENDIAN_BE32
select TRACE_IRQFLAGS_SUPPORT
@@ -232,10 +231,6 @@ config ARC_CACHE_PAGES
Note that Global I/D ENABLE + Per Page DISABLE works but corollary
Global DISABLE + Per Page ENABLE won't work
-config ARC_CACHE_VIPT_ALIASING
- bool "Support VIPT Aliasing D$"
- depends on ARC_HAS_DCACHE && ISA_ARCOMPACT
-
endif #ARC_CACHE
config ARC_HAS_ICCM
diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h
index bd5b1a9a054402..563af3e75f01f2 100644
--- a/arch/arc/include/asm/cacheflush.h
+++ b/arch/arc/include/asm/cacheflush.h
@@ -44,31 +44,10 @@ void dma_cache_wback(phys_addr_t start, unsigned long sz);
#define flush_cache_dup_mm(mm) /* called on fork (VIVT only) */
-#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
-
#define flush_cache_mm(mm) /* called on munmap/exit */
#define flush_cache_range(mm, u_vstart, u_vend)
#define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */
-#else /* VIPT aliasing dcache */
-
-/* To clear out stale userspace mappings */
-void flush_cache_mm(struct mm_struct *mm);
-void flush_cache_range(struct vm_area_struct *vma,
- unsigned long start,unsigned long end);
-void flush_cache_page(struct vm_area_struct *vma,
- unsigned long user_addr, unsigned long page);
-
-/*
- * To make sure that userspace mapping is flushed to memory before
- * get_user_pages() uses a kernel mapping to access the page
- */
-#define ARCH_HAS_FLUSH_ANON_PAGE
-void flush_anon_page(struct vm_area_struct *vma,
- struct page *page, unsigned long u_vaddr);
-
-#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */
-
/*
* A new pagecache page has PG_arch_1 clear - thus dcache dirty by default
* This works around some PIO based drivers which don't call flush_dcache_page
@@ -76,28 +55,6 @@ void flush_anon_page(struct vm_area_struct *vma,
*/
#define PG_dc_clean PG_arch_1
-#define CACHE_COLORS_NUM 4
-#define CACHE_COLORS_MSK (CACHE_COLORS_NUM - 1)
-#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & CACHE_COLORS_MSK)
-
-/*
- * Simple wrapper over config option
- * Bootup code ensures that hardware matches kernel configuration
- */
-static inline int cache_is_vipt_aliasing(void)
-{
- return IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
-}
-
-/*
- * checks if two addresses (after page aligning) index into same cache set
- */
-#define addr_not_cache_congruent(addr1, addr2) \
-({ \
- cache_is_vipt_aliasing() ? \
- (CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0; \
-})
-
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
index 4d13320e0c1b63..3802a2daaf8692 100644
--- a/arch/arc/include/asm/entry-arcv2.h
+++ b/arch/arc/include/asm/entry-arcv2.h
@@ -291,4 +291,36 @@
/* M = 8-1 N = 8 */
.endm
+.macro SAVE_ABI_CALLEE_REGS
+ push r13
+ push r14
+ push r15
+ push r16
+ push r17
+ push r18
+ push r19
+ push r20
+ push r21
+ push r22
+ push r23
+ push r24
+ push r25
+.endm
+
+.macro RESTORE_ABI_CALLEE_REGS
+ pop r25
+ pop r24
+ pop r23
+ pop r22
+ pop r21
+ pop r20
+ pop r19
+ pop r18
+ pop r17
+ pop r16
+ pop r15
+ pop r14
+ pop r13
+.endm
+
#endif
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h
index a0e760eb35a80e..92c3e9f1325219 100644
--- a/arch/arc/include/asm/entry-compact.h
+++ b/arch/arc/include/asm/entry-compact.h
@@ -33,6 +33,91 @@
#include <asm/irqflags-compact.h>
#include <asm/thread_info.h> /* For THREAD_SIZE */
+/* Note on the LD/ST addr modes with addr reg wback
+ *
+ * LD.a same as LD.aw
+ *
+ * LD.a reg1, [reg2, x] => Pre Incr
+ * Eff Addr for load = [reg2 + x]
+ *
+ * LD.ab reg1, [reg2, x] => Post Incr
+ * Eff Addr for load = [reg2]
+ */
+
+.macro PUSHAX aux
+ lr r9, [\aux]
+ push r9
+.endm
+
+.macro POPAX aux
+ pop r9
+ sr r9, [\aux]
+.endm
+
+.macro SAVE_R0_TO_R12
+ push r0
+ push r1
+ push r2
+ push r3
+ push r4
+ push r5
+ push r6
+ push r7
+ push r8
+ push r9
+ push r10
+ push r11
+ push r12
+.endm
+
+.macro RESTORE_R12_TO_R0
+ pop r12
+ pop r11
+ pop r10
+ pop r9
+ pop r8
+ pop r7
+ pop r6
+ pop r5
+ pop r4
+ pop r3
+ pop r2
+ pop r1
+ pop r0
+.endm
+
+.macro SAVE_ABI_CALLEE_REGS
+ push r13
+ push r14
+ push r15
+ push r16
+ push r17
+ push r18
+ push r19
+ push r20
+ push r21
+ push r22
+ push r23
+ push r24
+ push r25
+.endm
+
+.macro RESTORE_ABI_CALLEE_REGS
+ pop r25
+ pop r24
+ pop r23
+ pop r22
+ pop r21
+ pop r20
+ pop r19
+ pop r18
+ pop r17
+ pop r16
+ pop r15
+ pop r14
+ pop r13
+.endm
+
/*--------------------------------------------------------------
* Switch to Kernel Mode stack if SP points to User Mode stack
*
@@ -235,7 +320,7 @@
SWITCH_TO_KERNEL_STK
- PUSH 0x003\LVL\()abcd /* Dummy ECR */
+ st.a 0x003\LVL\()abcd, [sp, -4] /* Dummy ECR */
sub sp, sp, 8 /* skip orig_r0 (not needed)
skip pt_regs->sp, already saved above */
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
index 49c2e090cb5c1f..cf1ba376e992c6 100644
--- a/arch/arc/include/asm/entry.h
+++ b/arch/arc/include/asm/entry.h
@@ -21,114 +21,12 @@
#include <asm/entry-arcv2.h>
#endif
-/* Note on the LD/ST addr modes with addr reg wback
- *
- * LD.a same as LD.aw
- *
- * LD.a reg1, [reg2, x] => Pre Incr
- * Eff Addr for load = [reg2 + x]
- *
- * LD.ab reg1, [reg2, x] => Post Incr
- * Eff Addr for load = [reg2]
- */
-
-.macro PUSH reg
- st.a \reg, [sp, -4]
-.endm
-
-.macro PUSHAX aux
- lr r9, [\aux]
- PUSH r9
-.endm
-
-.macro POP reg
- ld.ab \reg, [sp, 4]
-.endm
-
-.macro POPAX aux
- POP r9
- sr r9, [\aux]
-.endm
-
-/*--------------------------------------------------------------
- * Helpers to save/restore Scratch Regs:
- * used by Interrupt/Exception Prologue/Epilogue
- *-------------------------------------------------------------*/
-.macro SAVE_R0_TO_R12
- PUSH r0
- PUSH r1
- PUSH r2
- PUSH r3
- PUSH r4
- PUSH r5
- PUSH r6
- PUSH r7
- PUSH r8
- PUSH r9
- PUSH r10
- PUSH r11
- PUSH r12
-.endm
-
-.macro RESTORE_R12_TO_R0
- POP r12
- POP r11
- POP r10
- POP r9
- POP r8
- POP r7
- POP r6
- POP r5
- POP r4
- POP r3
- POP r2
- POP r1
- POP r0
-
-.endm
-
-/*--------------------------------------------------------------
- * Helpers to save/restore callee-saved regs:
- * used by several macros below
- *-------------------------------------------------------------*/
-.macro SAVE_R13_TO_R25
- PUSH r13
- PUSH r14
- PUSH r15
- PUSH r16
- PUSH r17
- PUSH r18
- PUSH r19
- PUSH r20
- PUSH r21
- PUSH r22
- PUSH r23
- PUSH r24
- PUSH r25
-.endm
-
-.macro RESTORE_R25_TO_R13
- POP r25
- POP r24
- POP r23
- POP r22
- POP r21
- POP r20
- POP r19
- POP r18
- POP r17
- POP r16
- POP r15
- POP r14
- POP r13
-.endm
-
/*
* save user mode callee regs as struct callee_regs
* - needed by fork/do_signal/unaligned-access-emulation.
*/
.macro SAVE_CALLEE_SAVED_USER
- SAVE_R13_TO_R25
+ SAVE_ABI_CALLEE_REGS
.endm
/*
@@ -136,18 +34,18 @@
* - could have been changed by ptrace tracer or unaligned-access fixup
*/
.macro RESTORE_CALLEE_SAVED_USER
- RESTORE_R25_TO_R13
+ RESTORE_ABI_CALLEE_REGS
.endm
/*
* save/restore kernel mode callee regs at the time of context switch
*/
.macro SAVE_CALLEE_SAVED_KERNEL
- SAVE_R13_TO_R25
+ SAVE_ABI_CALLEE_REGS
.endm
.macro RESTORE_CALLEE_SAVED_KERNEL
- RESTORE_R25_TO_R13
+ RESTORE_ABI_CALLEE_REGS
.endm
/*--------------------------------------------------------------
diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h
index ef8d4166370c8d..8a2441670a8f67 100644
--- a/arch/arc/include/asm/hugepage.h
+++ b/arch/arc/include/asm/hugepage.h
@@ -10,6 +10,13 @@
#include <linux/types.h>
#include <asm-generic/pgtable-nopmd.h>
+/*
+ * Hugetlb definitions.
+ */
+#define HPAGE_SHIFT PMD_SHIFT
+#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+
static inline pte_t pmd_pte(pmd_t pmd)
{
return __pte(pmd_val(pmd));
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 4a2b30fb5a985e..00b9318e551e7e 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -54,6 +54,10 @@ struct pt_regs {
ecr_reg ecr;
};
+struct callee_regs {
+ unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
+};
+
#define MAX_REG_OFFSET offsetof(struct pt_regs, ecr)
#else
@@ -92,16 +96,14 @@ struct pt_regs {
unsigned long status32;
};
-#define MAX_REG_OFFSET offsetof(struct pt_regs, status32)
-
-#endif
-
-/* Callee saved registers - need to be saved only when you are scheduled out */
-
struct callee_regs {
unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
};
+#define MAX_REG_OFFSET offsetof(struct pt_regs, status32)
+
+#endif
+
#define instruction_pointer(regs) ((regs)->ret)
#define profile_pc(regs) instruction_pointer(regs)
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 4dcf8589b708ac..d08a5092c2b4d4 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -153,7 +153,7 @@ static int arcv2_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
{
int n = 0;
#ifdef CONFIG_ISA_ARCV2
- const char *release, *cpu_nm, *isa_nm = "ARCv2";
+ const char *release = "", *cpu_nm = "HS38", *isa_nm = "ARCv2";
int dual_issue = 0, dual_enb = 0, mpy_opt, present;
int bpu_full, bpu_cache, bpu_pred, bpu_ret_stk;
char mpy_nm[16], lpb_nm[32];
@@ -172,8 +172,6 @@ static int arcv2_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
* releases only update it.
*/
- cpu_nm = "HS38";
-
if (info->arcver > 0x50 && info->arcver <= 0x53) {
release = arc_hs_rel[info->arcver - 0x51].str;
} else {
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index 0b3bb529d24632..8f6f4a5429646f 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -62,7 +62,7 @@ struct rt_sigframe {
unsigned int sigret_magic;
};
-static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
+static int save_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs)
{
int err = 0;
#ifndef CONFIG_ISA_ARCOMPACT
@@ -75,12 +75,12 @@ static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
#else
v2abi.r58 = v2abi.r59 = 0;
#endif
- err = __copy_to_user(&mctx->v2abi, &v2abi, sizeof(v2abi));
+ err = __copy_to_user(&mctx->v2abi, (void const *)&v2abi, sizeof(v2abi));
#endif
return err;
}
-static int restore_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
+static int restore_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs)
{
int err = 0;
#ifndef CONFIG_ISA_ARCOMPACT
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index f7e05c14663750..9106ceac323c43 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -145,10 +145,9 @@ dc_chk:
p_dc->sz_k = 1 << (dbcr.sz - 1);
n += scnprintf(buf + n, len - n,
- "D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s%s\n",
+ "D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s\n",
p_dc->sz_k, assoc, p_dc->line_len,
vipt ? "VIPT" : "PIPT",
- p_dc->colors > 1 ? " aliasing" : "",
IS_USED_CFG(CONFIG_ARC_HAS_DCACHE));
slc_chk:
@@ -703,51 +702,10 @@ static inline void arc_slc_enable(void)
* Exported APIs
*/
-/*
- * Handle cache congruency of kernel and userspace mappings of page when kernel
- * writes-to/reads-from
- *
- * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
- * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
- * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
- * -In SMP, if hardware caches are coherent
- *
- * There's a corollary case, where kernel READs from a userspace mapped page.
- * If the U-mapping is not congruent to K-mapping, former needs flushing.
- */
void flush_dcache_folio(struct folio *folio)
{
- struct address_space *mapping;
-
- if (!cache_is_vipt_aliasing()) {
- clear_bit(PG_dc_clean, &folio->flags);
- return;
- }
-
- /* don't handle anon pages here */
- mapping = folio_flush_mapping(folio);
- if (!mapping)
- return;
-
- /*
- * pagecache page, file not yet mapped to userspace
- * Make a note that K-mapping is dirty
- */
- if (!mapping_mapped(mapping)) {
- clear_bit(PG_dc_clean, &folio->flags);
- } else if (folio_mapped(folio)) {
- /* kernel reading from page with U-mapping */
- phys_addr_t paddr = (unsigned long)folio_address(folio);
- unsigned long vaddr = folio_pos(folio);
-
- /*
- * vaddr is not actually the virtual address, but is
- * congruent to every user mapping.
- */
- if (addr_not_cache_congruent(paddr, vaddr))
- __flush_dcache_pages(paddr, vaddr,
- folio_nr_pages(folio));
- }
+ clear_bit(PG_dc_clean, &folio->flags);
+ return;
}
EXPORT_SYMBOL(flush_dcache_folio);
@@ -921,44 +879,6 @@ noinline void flush_cache_all(void)
}
-#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
-
-void flush_cache_mm(struct mm_struct *mm)
-{
- flush_cache_all();
-}
-
-void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
- unsigned long pfn)
-{
- phys_addr_t paddr = pfn << PAGE_SHIFT;
-
- u_vaddr &= PAGE_MASK;
-
- __flush_dcache_pages(paddr, u_vaddr, 1);
-
- if (vma->vm_flags & VM_EXEC)
- __inv_icache_pages(paddr, u_vaddr, 1);
-}
-
-void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end)
-{
- flush_cache_all();
-}
-
-void flush_anon_page(struct vm_area_struct *vma, struct page *page,
- unsigned long u_vaddr)
-{
- /* TBD: do we really need to clear the kernel mapping */
- __flush_dcache_pages((phys_addr_t)page_address(page), u_vaddr, 1);
- __flush_dcache_pages((phys_addr_t)page_address(page),
- (phys_addr_t)page_address(page), 1);
-
-}
-
-#endif
-
void copy_user_highpage(struct page *to, struct page *from,
unsigned long u_vaddr, struct vm_area_struct *vma)
{
@@ -966,46 +886,11 @@ void copy_user_highpage(struct page *to, struct page *from,
struct folio *dst = page_folio(to);
void *kfrom = kmap_atomic(from);
void *kto = kmap_atomic(to);
- int clean_src_k_mappings = 0;
-
- /*
- * If SRC page was already mapped in userspace AND it's U-mapping is
- * not congruent with K-mapping, sync former to physical page so that
- * K-mapping in memcpy below, sees the right data
- *
- * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
- * equally valid for SRC page as well
- *
- * For !VIPT cache, all of this gets compiled out as
- * addr_not_cache_congruent() is 0
- */
- if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
- __flush_dcache_pages((unsigned long)kfrom, u_vaddr, 1);
- clean_src_k_mappings = 1;
- }
copy_page(kto, kfrom);
- /*
- * Mark DST page K-mapping as dirty for a later finalization by
- * update_mmu_cache(). Although the finalization could have been done
- * here as well (given that both vaddr/paddr are available).
- * But update_mmu_cache() already has code to do that for other
- * non copied user pages (e.g. read faults which wire in pagecache page
- * directly).
- */
clear_bit(PG_dc_clean, &dst->flags);
-
- /*
- * if SRC was already usermapped and non-congruent to kernel mapping
- * sync the kernel mapping back to physical page
- */
- if (clean_src_k_mappings) {
- __flush_dcache_pages((unsigned long)kfrom,
- (unsigned long)kfrom, 1);
- } else {
- clear_bit(PG_dc_clean, &src->flags);
- }
+ clear_bit(PG_dc_clean, &src->flags);
kunmap_atomic(kto);
kunmap_atomic(kfrom);
@@ -1140,17 +1025,8 @@ static noinline void __init arc_cache_init_master(void)
dc->line_len, L1_CACHE_BYTES);
/* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
- if (is_isa_arcompact()) {
- int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
-
- if (dc->colors > 1) {
- if (!handled)
- panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
- if (CACHE_COLORS_NUM != dc->colors)
- panic("CACHE_COLORS_NUM not optimized for config\n");
- } else if (handled && dc->colors == 1) {
- panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
- }
+ if (is_isa_arcompact() && dc->colors > 1) {
+ panic("Aliasing VIPT cache not supported\n");
}
}
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
index fce5fa2b4f5214..3c1c7ae732925c 100644
--- a/arch/arc/mm/mmap.c
+++ b/arch/arc/mm/mmap.c
@@ -14,10 +14,6 @@
#include <asm/cacheflush.h>
-#define COLOUR_ALIGN(addr, pgoff) \
- ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
- (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
-
/*
* Ensure that shared mappings are correctly aligned to
* avoid aliasing issues with VIPT caches.
@@ -31,21 +27,13 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- int do_align = 0;
- int aliasing = cache_is_vipt_aliasing();
struct vm_unmapped_area_info info;
/*
- * We only need to do colour alignment if D cache aliases.
- */
- if (aliasing)
- do_align = filp || (flags & MAP_SHARED);
-
- /*
* We enforce the MAP_FIXED case.
*/
if (flags & MAP_FIXED) {
- if (aliasing && flags & MAP_SHARED &&
+ if (flags & MAP_SHARED &&
(addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
return -EINVAL;
return addr;
@@ -55,10 +43,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
return -ENOMEM;
if (addr) {
- if (do_align)
- addr = COLOUR_ALIGN(addr, pgoff);
- else
- addr = PAGE_ALIGN(addr);
+ addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
@@ -70,7 +55,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
info.length = len;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
- info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+ info.align_mask = 0;
info.align_offset = pgoff << PAGE_SHIFT;
return vm_unmapped_area(&info);
}
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index e536b2dcd4b024..ad702b49aeb3b8 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -478,21 +478,15 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
create_tlb(vma, vaddr, ptep);
- if (page == ZERO_PAGE(0)) {
+ if (page == ZERO_PAGE(0))
return;
- }
/*
- * Exec page : Independent of aliasing/page-color considerations,
- * since icache doesn't snoop dcache on ARC, any dirty
- * K-mapping of a code page needs to be wback+inv so that
- * icache fetch by userspace sees code correctly.
- * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
- * so userspace sees the right data.
- * (Avoids the flush for Non-exec + congruent mapping case)
+ * For executable pages, since icache doesn't snoop dcache, any
+ * dirty K-mapping of a code page needs to be wback+inv so that
+ * icache fetch by userspace sees code correctly.
*/
- if ((vma->vm_flags & VM_EXEC) ||
- addr_not_cache_congruent(paddr, vaddr)) {
+ if (vma->vm_flags & VM_EXEC) {
struct folio *folio = page_folio(page);
int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags);
if (dirty) {
diff --git a/arch/arm/boot/dts/ti/omap/am33xx.dtsi b/arch/arm/boot/dts/ti/omap/am33xx.dtsi
index 1a2cd5baf40210..5b9e01a8aa5d5a 100644
--- a/arch/arm/boot/dts/ti/omap/am33xx.dtsi
+++ b/arch/arm/boot/dts/ti/omap/am33xx.dtsi
@@ -359,6 +359,7 @@
<SYSC_IDLE_NO>,
<SYSC_IDLE_SMART>,
<SYSC_IDLE_SMART_WKUP>;
+ ti,sysc-delay-us = <2>;
clocks = <&l3s_clkctrl AM3_L3S_USB_OTG_HS_CLKCTRL 0>;
clock-names = "fck";
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/ti/omap/dra7.dtsi b/arch/arm/boot/dts/ti/omap/dra7.dtsi
index 3f3e52e3b37526..6509c742fb58c9 100644
--- a/arch/arm/boot/dts/ti/omap/dra7.dtsi
+++ b/arch/arm/boot/dts/ti/omap/dra7.dtsi
@@ -147,7 +147,7 @@
l3-noc@44000000 {
compatible = "ti,dra7-l3-noc";
- reg = <0x44000000 0x1000>,
+ reg = <0x44000000 0x1000000>,
<0x45000000 0x1000>;
interrupts-extended = <&crossbar_mpu GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
<&wakeupgen GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 98999aa8cc0c09..7f387706368a68 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -793,11 +793,16 @@ void __init omap_soc_device_init(void)
soc_dev_attr->machine = soc_name;
soc_dev_attr->family = omap_get_family();
+ if (!soc_dev_attr->family) {
+ kfree(soc_dev_attr);
+ return;
+ }
soc_dev_attr->revision = soc_rev;
soc_dev_attr->custom_attr_group = omap_soc_groups[0];
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr->family);
kfree(soc_dev_attr);
return;
}
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero.dtsi
index 15290e6892fca4..fc7315b9440659 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero.dtsi
@@ -68,10 +68,7 @@
&emac0 {
pinctrl-names = "default";
pinctrl-0 = <&ext_rgmii_pins>;
- phy-mode = "rgmii";
phy-handle = <&ext_rgmii_phy>;
- allwinner,rx-delay-ps = <3100>;
- allwinner,tx-delay-ps = <700>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero2.dts
index d83852e72f0634..b5d713926a341a 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero2.dts
@@ -13,6 +13,9 @@
};
&emac0 {
+ allwinner,rx-delay-ps = <3100>;
+ allwinner,tx-delay-ps = <700>;
+ phy-mode = "rgmii";
phy-supply = <&reg_dcdce>;
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero3.dts b/arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero3.dts
index 00fe28caac939a..b3b1b8692125f9 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero3.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero3.dts
@@ -13,6 +13,8 @@
};
&emac0 {
+ allwinner,tx-delay-ps = <700>;
+ phy-mode = "rgmii-rxid";
phy-supply = <&reg_dldo1>;
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts b/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts
index 70b465f7c6a7fb..00ac59a873e8dd 100644
--- a/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts
@@ -238,6 +238,7 @@
mt6360: pmic@34 {
compatible = "mediatek,mt6360";
reg = <0x34>;
+ interrupt-parent = <&pio>;
interrupts = <128 IRQ_TYPE_EDGE_FALLING>;
interrupt-names = "IRQB";
interrupt-controller;
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index b19a8aee684c87..79ce70fbb751c6 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -834,6 +834,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
+ /*
+ * If we end up clearing hw dirtiness for a sw-dirty PTE, set hardware
+ * dirtiness again.
+ */
+ if (pte_sw_dirty(pte))
+ pte = pte_mkdirty(pte);
return pte;
}
diff --git a/arch/arm64/include/asm/syscall_wrapper.h b/arch/arm64/include/asm/syscall_wrapper.h
index d977713ec0ba85..abb57bc543054f 100644
--- a/arch/arm64/include/asm/syscall_wrapper.h
+++ b/arch/arm64/include/asm/syscall_wrapper.h
@@ -44,9 +44,6 @@
return sys_ni_syscall(); \
}
-#define COMPAT_SYS_NI(name) \
- SYSCALL_ALIAS(__arm64_compat_sys_##name, sys_ni_posix_timers);
-
#endif /* CONFIG_COMPAT */
#define __SYSCALL_DEFINEx(x, name, ...) \
@@ -81,6 +78,5 @@
}
asmlinkage long __arm64_sys_ni_syscall(const struct pt_regs *__unused);
-#define SYS_NI(name) SYSCALL_ALIAS(__arm64_sys_##name, sys_ni_posix_timers);
#endif /* __ASM_SYSCALL_WRAPPER_H */
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index e5f75f1f108539..4796104c447181 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -410,7 +410,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
kvm_timer_vcpu_terminate(vcpu);
kvm_pmu_vcpu_destroy(vcpu);
-
+ kvm_vgic_vcpu_destroy(vcpu);
kvm_arm_vcpu_destroy(vcpu);
}
diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
index c8c3cb81278321..e949e1d0fd9f7b 100644
--- a/arch/arm64/kvm/vgic/vgic-init.c
+++ b/arch/arm64/kvm/vgic/vgic-init.c
@@ -368,7 +368,7 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
vgic_v4_teardown(kvm);
}
-void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
+static void __kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
@@ -379,29 +379,39 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
vgic_flush_pending_lpis(vcpu);
INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
- vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
+ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
+ vgic_unregister_redist_iodev(vcpu);
+ vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
+ }
}
-static void __kvm_vgic_destroy(struct kvm *kvm)
+void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+
+ mutex_lock(&kvm->slots_lock);
+ __kvm_vgic_vcpu_destroy(vcpu);
+ mutex_unlock(&kvm->slots_lock);
+}
+
+void kvm_vgic_destroy(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
unsigned long i;
- lockdep_assert_held(&kvm->arch.config_lock);
+ mutex_lock(&kvm->slots_lock);
vgic_debug_destroy(kvm);
kvm_for_each_vcpu(i, vcpu, kvm)
- kvm_vgic_vcpu_destroy(vcpu);
+ __kvm_vgic_vcpu_destroy(vcpu);
+
+ mutex_lock(&kvm->arch.config_lock);
kvm_vgic_dist_destroy(kvm);
-}
-void kvm_vgic_destroy(struct kvm *kvm)
-{
- mutex_lock(&kvm->arch.config_lock);
- __kvm_vgic_destroy(kvm);
mutex_unlock(&kvm->arch.config_lock);
+ mutex_unlock(&kvm->slots_lock);
}
/**
@@ -469,25 +479,26 @@ int kvm_vgic_map_resources(struct kvm *kvm)
type = VGIC_V3;
}
- if (ret) {
- __kvm_vgic_destroy(kvm);
+ if (ret)
goto out;
- }
+
dist->ready = true;
dist_base = dist->vgic_dist_base;
mutex_unlock(&kvm->arch.config_lock);
ret = vgic_register_dist_iodev(kvm, dist_base, type);
- if (ret) {
+ if (ret)
kvm_err("Unable to register VGIC dist MMIO regions\n");
- kvm_vgic_destroy(kvm);
- }
- mutex_unlock(&kvm->slots_lock);
- return ret;
+ goto out_slots;
out:
mutex_unlock(&kvm->arch.config_lock);
+out_slots:
mutex_unlock(&kvm->slots_lock);
+
+ if (ret)
+ kvm_vgic_destroy(kvm);
+
return ret;
}
diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
index 89117ba2528a0a..a764b0ab8bf913 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
@@ -820,7 +820,7 @@ out_unlock:
return ret;
}
-static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
+void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
{
struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
@@ -833,6 +833,8 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm)
unsigned long c;
int ret = 0;
+ lockdep_assert_held(&kvm->slots_lock);
+
kvm_for_each_vcpu(c, vcpu, kvm) {
ret = vgic_register_redist_iodev(vcpu);
if (ret)
diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
index 0ab09b0d44404b..8d134569d0a1f6 100644
--- a/arch/arm64/kvm/vgic/vgic.h
+++ b/arch/arm64/kvm/vgic/vgic.h
@@ -241,6 +241,7 @@ int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq);
int vgic_v3_save_pending_tables(struct kvm *kvm);
int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count);
int vgic_register_redist_iodev(struct kvm_vcpu *vcpu);
+void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu);
bool vgic_v3_check_base(struct kvm *kvm);
void vgic_v3_load(struct kvm_vcpu *vcpu);
diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
index 4fcc168f073231..3c808c6803703c 100644
--- a/arch/loongarch/kernel/Makefile
+++ b/arch/loongarch/kernel/Makefile
@@ -57,7 +57,7 @@ obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
obj-$(CONFIG_RELOCATABLE) += relocate.o
-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o
diff --git a/arch/m68k/include/asm/kexec.h b/arch/m68k/include/asm/kexec.h
index f5a8b2defa4bce..3b0b64f0a35317 100644
--- a/arch/m68k/include/asm/kexec.h
+++ b/arch/m68k/include/asm/kexec.h
@@ -2,7 +2,7 @@
#ifndef _ASM_M68K_KEXEC_H
#define _ASM_M68K_KEXEC_H
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
/* Maximum physical address we can use pages from */
#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
@@ -25,6 +25,6 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
#endif /* __ASSEMBLY__ */
-#endif /* CONFIG_KEXEC */
+#endif /* CONFIG_KEXEC_CORE */
#endif /* _ASM_M68K_KEXEC_H */
diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile
index 01fb69a5095f43..f335bf3268a108 100644
--- a/arch/m68k/kernel/Makefile
+++ b/arch/m68k/kernel/Makefile
@@ -25,7 +25,7 @@ obj-$(CONFIG_PCI) += pcibios.o
obj-$(CONFIG_M68K_NONCOHERENT_DMA) += dma.o
-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_BOOTINFO_PROC) += bootinfo_proc.o
obj-$(CONFIG_UBOOT) += uboot.o
diff --git a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
index f878f47e4501bc..ee3e2153dd13fb 100644
--- a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
+++ b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
@@ -130,8 +130,7 @@
compatible = "pci0014,7a03.0",
"pci0014,7a03",
"pciclass0c0320",
- "pciclass0c03",
- "loongson, pci-gmac";
+ "pciclass0c03";
reg = <0x1800 0x0 0x0 0x0 0x0>;
interrupts = <12 IRQ_TYPE_LEVEL_LOW>,
diff --git a/arch/mips/boot/dts/loongson/ls7a-pch.dtsi b/arch/mips/boot/dts/loongson/ls7a-pch.dtsi
index 7c69e8245c2f10..cce9428afc41fc 100644
--- a/arch/mips/boot/dts/loongson/ls7a-pch.dtsi
+++ b/arch/mips/boot/dts/loongson/ls7a-pch.dtsi
@@ -193,8 +193,7 @@
compatible = "pci0014,7a03.0",
"pci0014,7a03",
"pciclass020000",
- "pciclass0200",
- "loongson, pci-gmac";
+ "pciclass0200";
reg = <0x1800 0x0 0x0 0x0 0x0>;
interrupts = <12 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 33c09688210fff..08ea2cde1eb5b3 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -422,7 +422,7 @@ static const struct plat_smp_ops octeon_smp_ops = {
.cpu_disable = octeon_cpu_disable,
.cpu_die = octeon_cpu_die,
#endif
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
#endif
};
@@ -502,7 +502,7 @@ static const struct plat_smp_ops octeon_78xx_smp_ops = {
.cpu_disable = octeon_cpu_disable,
.cpu_die = octeon_cpu_die,
#endif
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
#endif
};
diff --git a/arch/mips/include/asm/kexec.h b/arch/mips/include/asm/kexec.h
index d6d5fa5cc31dd7..69e579e41e6623 100644
--- a/arch/mips/include/asm/kexec.h
+++ b/arch/mips/include/asm/kexec.h
@@ -31,7 +31,7 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
prepare_frametrace(newregs);
}
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
struct kimage;
extern unsigned long kexec_args[4];
extern int (*_machine_kexec_prepare)(struct kimage *);
diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h
index 5719ff49eff1c8..0c59e168f8008c 100644
--- a/arch/mips/include/asm/smp-ops.h
+++ b/arch/mips/include/asm/smp-ops.h
@@ -35,7 +35,7 @@ struct plat_smp_ops {
void (*cpu_die)(unsigned int cpu);
void (*cleanup_dead_cpu)(unsigned cpu);
#endif
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
void (*kexec_nonboot_cpu)(void);
#endif
};
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index a40d8c0e4b879b..901bc61fa7ae92 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -93,7 +93,7 @@ static inline void __cpu_die(unsigned int cpu)
extern void __noreturn play_dead(void);
#endif
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
static inline void kexec_nonboot_cpu(void)
{
extern const struct plat_smp_ops *mp_ops; /* private */
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 853a43ee4b446e..ecf3278a32f707 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -90,7 +90,7 @@ obj-$(CONFIG_GPIO_TXX9) += gpio_txx9.o
obj-$(CONFIG_RELOCATABLE) += relocate.o
-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
+obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o crash.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_EARLY_PRINTK_8250) += early_printk_8250.o
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index c074ecce3fbf29..b3dbf9ecb0d63e 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -434,7 +434,7 @@ const struct plat_smp_ops bmips43xx_smp_ops = {
.cpu_disable = bmips_cpu_disable,
.cpu_die = bmips_cpu_die,
#endif
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
#endif
};
@@ -451,7 +451,7 @@ const struct plat_smp_ops bmips5000_smp_ops = {
.cpu_disable = bmips_cpu_disable,
.cpu_die = bmips_cpu_die,
#endif
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
#endif
};
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index dd55d59b88db34..f6c37d407f365f 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -392,7 +392,7 @@ static void cps_smp_finish(void)
local_irq_enable();
}
-#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC)
+#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC_CORE)
enum cpu_death {
CPU_DEATH_HALT,
@@ -429,7 +429,7 @@ static void cps_shutdown_this_cpu(enum cpu_death death)
}
}
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
static void cps_kexec_nonboot_cpu(void)
{
@@ -439,9 +439,9 @@ static void cps_kexec_nonboot_cpu(void)
cps_shutdown_this_cpu(CPU_DEATH_POWER);
}
-#endif /* CONFIG_KEXEC */
+#endif /* CONFIG_KEXEC_CORE */
-#endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC */
+#endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC_CORE */
#ifdef CONFIG_HOTPLUG_CPU
@@ -610,7 +610,7 @@ static const struct plat_smp_ops cps_smp_ops = {
.cpu_die = cps_cpu_die,
.cleanup_dead_cpu = cps_cleanup_dead_cpu,
#endif
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
.kexec_nonboot_cpu = cps_kexec_nonboot_cpu,
#endif
};
diff --git a/arch/mips/loongson64/reset.c b/arch/mips/loongson64/reset.c
index e420800043b089..e01c8d4a805a91 100644
--- a/arch/mips/loongson64/reset.c
+++ b/arch/mips/loongson64/reset.c
@@ -53,7 +53,7 @@ static void loongson_halt(void)
}
}
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
/* 0X80000000~0X80200000 is safe */
#define MAX_ARGS 64
@@ -158,7 +158,7 @@ static int __init mips_reboot_setup(void)
_machine_halt = loongson_halt;
pm_power_off = loongson_poweroff;
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
kexec_argv = kmalloc(KEXEC_ARGV_SIZE, GFP_KERNEL);
if (WARN_ON(!kexec_argv))
return -ENOMEM;
diff --git a/arch/mips/loongson64/smp.c b/arch/mips/loongson64/smp.c
index e015a26a40f7a5..498bdc1bb0ede8 100644
--- a/arch/mips/loongson64/smp.c
+++ b/arch/mips/loongson64/smp.c
@@ -864,7 +864,7 @@ const struct plat_smp_ops loongson3_smp_ops = {
.cpu_disable = loongson3_cpu_disable,
.cpu_die = loongson3_cpu_die,
#endif
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
#endif
};
diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig
index 8d3eacb50d5601..9d44e6630908d2 100644
--- a/arch/powerpc/configs/skiroot_defconfig
+++ b/arch/powerpc/configs/skiroot_defconfig
@@ -301,7 +301,6 @@ CONFIG_WQ_WATCHDOG=y
CONFIG_DEBUG_SG=y
CONFIG_DEBUG_NOTIFIERS=y
CONFIG_BUG_ON_DATA_CORRUPTION=y
-CONFIG_DEBUG_CREDENTIALS=y
# CONFIG_FTRACE is not set
CONFIG_XMON=y
# CONFIG_RUNTIME_TESTING_MENU is not set
diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c
index b1f25bac280b4e..71d52a670d951b 100644
--- a/arch/powerpc/platforms/pseries/vas.c
+++ b/arch/powerpc/platforms/pseries/vas.c
@@ -385,11 +385,15 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
* same fault IRQ is not freed by the OS before.
*/
mutex_lock(&vas_pseries_mutex);
- if (migration_in_progress)
+ if (migration_in_progress) {
rc = -EBUSY;
- else
+ } else {
rc = allocate_setup_window(txwin, (u64 *)&domain[0],
cop_feat_caps->win_type);
+ if (!rc)
+ caps->nr_open_wins_progress++;
+ }
+
mutex_unlock(&vas_pseries_mutex);
if (rc)
goto out;
@@ -404,8 +408,17 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
goto out_free;
txwin->win_type = cop_feat_caps->win_type;
- mutex_lock(&vas_pseries_mutex);
+
/*
+ * The migration SUSPEND thread sets migration_in_progress and
+ * closes all open windows from the list. But the window is
+ * added to the list after open and modify HCALLs. So possible
+ * that migration_in_progress is set before modify HCALL which
+ * may cause some windows are still open when the hypervisor
+ * initiates the migration.
+ * So checks the migration_in_progress flag again and close all
+ * open windows.
+ *
* Possible to lose the acquired credit with DLPAR core
* removal after the window is opened. So if there are any
* closed windows (means with lost credits), do not give new
@@ -413,9 +426,11 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
* after the existing windows are reopened when credits are
* available.
*/
- if (!caps->nr_close_wins) {
+ mutex_lock(&vas_pseries_mutex);
+ if (!caps->nr_close_wins && !migration_in_progress) {
list_add(&txwin->win_list, &caps->list);
caps->nr_open_windows++;
+ caps->nr_open_wins_progress--;
mutex_unlock(&vas_pseries_mutex);
vas_user_win_add_mm_context(&txwin->vas_win.task_ref);
return &txwin->vas_win;
@@ -433,6 +448,12 @@ out_free:
*/
free_irq_setup(txwin);
h_deallocate_vas_window(txwin->vas_win.winid);
+ /*
+ * Hold mutex and reduce nr_open_wins_progress counter.
+ */
+ mutex_lock(&vas_pseries_mutex);
+ caps->nr_open_wins_progress--;
+ mutex_unlock(&vas_pseries_mutex);
out:
atomic_dec(&cop_feat_caps->nr_used_credits);
kfree(txwin);
@@ -937,14 +958,14 @@ int vas_migration_handler(int action)
struct vas_caps *vcaps;
int i, rc = 0;
+ pr_info("VAS migration event %d\n", action);
+
/*
* NX-GZIP is not enabled. Nothing to do for migration.
*/
if (!copypaste_feat)
return rc;
- mutex_lock(&vas_pseries_mutex);
-
if (action == VAS_SUSPEND)
migration_in_progress = true;
else
@@ -990,12 +1011,27 @@ int vas_migration_handler(int action)
switch (action) {
case VAS_SUSPEND:
+ mutex_lock(&vas_pseries_mutex);
rc = reconfig_close_windows(vcaps, vcaps->nr_open_windows,
true);
+ /*
+ * Windows are included in the list after successful
+ * open. So wait for closing these in-progress open
+ * windows in vas_allocate_window() which will be
+ * done if the migration_in_progress is set.
+ */
+ while (vcaps->nr_open_wins_progress) {
+ mutex_unlock(&vas_pseries_mutex);
+ msleep(10);
+ mutex_lock(&vas_pseries_mutex);
+ }
+ mutex_unlock(&vas_pseries_mutex);
break;
case VAS_RESUME:
+ mutex_lock(&vas_pseries_mutex);
atomic_set(&caps->nr_total_credits, new_nr_creds);
rc = reconfig_open_windows(vcaps, new_nr_creds, true);
+ mutex_unlock(&vas_pseries_mutex);
break;
default:
/* should not happen */
@@ -1011,8 +1047,9 @@ int vas_migration_handler(int action)
goto out;
}
+ pr_info("VAS migration event (%d) successful\n", action);
+
out:
- mutex_unlock(&vas_pseries_mutex);
return rc;
}
diff --git a/arch/powerpc/platforms/pseries/vas.h b/arch/powerpc/platforms/pseries/vas.h
index 7115043ec48830..45567cd1317837 100644
--- a/arch/powerpc/platforms/pseries/vas.h
+++ b/arch/powerpc/platforms/pseries/vas.h
@@ -91,6 +91,8 @@ struct vas_cop_feat_caps {
struct vas_caps {
struct vas_cop_feat_caps caps;
struct list_head list; /* List of open windows */
+ int nr_open_wins_progress; /* Number of open windows in */
+ /* progress. Used in migration */
int nr_close_wins; /* closed windows in the hypervisor for DLPAR */
int nr_open_windows; /* Number of successful open windows */
u8 feat; /* Feature type */
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 95a2a06acc6a62..24c1799e2ec490 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -685,7 +685,7 @@ config RISCV_BOOT_SPINWAIT
If unsure what to do here, say N.
config ARCH_SUPPORTS_KEXEC
- def_bool MMU
+ def_bool y
config ARCH_SELECTS_KEXEC
def_bool y
@@ -693,7 +693,7 @@ config ARCH_SELECTS_KEXEC
select HOTPLUG_CPU if SMP
config ARCH_SUPPORTS_KEXEC_FILE
- def_bool 64BIT && MMU
+ def_bool 64BIT
config ARCH_SELECTS_KEXEC_FILE
def_bool y
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 294044429e8e15..ab00235b018f89 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -899,7 +899,7 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
#define PAGE_KERNEL __pgprot(0)
#define swapper_pg_dir NULL
#define TASK_SIZE 0xffffffffUL
-#define VMALLOC_START 0
+#define VMALLOC_START _AC(0, UL)
#define VMALLOC_END TASK_SIZE
#endif /* !CONFIG_MMU */
diff --git a/arch/riscv/include/asm/syscall_wrapper.h b/arch/riscv/include/asm/syscall_wrapper.h
index 1d7942c8a6cbae..eeec04b7dae67b 100644
--- a/arch/riscv/include/asm/syscall_wrapper.h
+++ b/arch/riscv/include/asm/syscall_wrapper.h
@@ -46,9 +46,6 @@ asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *);
return sys_ni_syscall(); \
}
-#define COMPAT_SYS_NI(name) \
- SYSCALL_ALIAS(__riscv_compat_sys_##name, sys_ni_posix_timers);
-
#endif /* CONFIG_COMPAT */
#define __SYSCALL_DEFINEx(x, name, ...) \
@@ -82,6 +79,4 @@ asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *);
return sys_ni_syscall(); \
}
-#define SYS_NI(name) SYSCALL_ALIAS(__riscv_sys_##name, sys_ni_posix_timers);
-
#endif /* __ASM_SYSCALL_WRAPPER_H */
diff --git a/arch/riscv/kernel/crash_core.c b/arch/riscv/kernel/crash_core.c
index 55f1d7856b5448..8706736fd4e2dc 100644
--- a/arch/riscv/kernel/crash_core.c
+++ b/arch/riscv/kernel/crash_core.c
@@ -5,18 +5,20 @@
void arch_crash_save_vmcoreinfo(void)
{
- VMCOREINFO_NUMBER(VA_BITS);
VMCOREINFO_NUMBER(phys_ram_base);
vmcoreinfo_append_str("NUMBER(PAGE_OFFSET)=0x%lx\n", PAGE_OFFSET);
vmcoreinfo_append_str("NUMBER(VMALLOC_START)=0x%lx\n", VMALLOC_START);
vmcoreinfo_append_str("NUMBER(VMALLOC_END)=0x%lx\n", VMALLOC_END);
+#ifdef CONFIG_MMU
+ VMCOREINFO_NUMBER(VA_BITS);
vmcoreinfo_append_str("NUMBER(VMEMMAP_START)=0x%lx\n", VMEMMAP_START);
vmcoreinfo_append_str("NUMBER(VMEMMAP_END)=0x%lx\n", VMEMMAP_END);
#ifdef CONFIG_64BIT
vmcoreinfo_append_str("NUMBER(MODULES_VADDR)=0x%lx\n", MODULES_VADDR);
vmcoreinfo_append_str("NUMBER(MODULES_END)=0x%lx\n", MODULES_END);
#endif
+#endif
vmcoreinfo_append_str("NUMBER(KERNEL_LINK_ADDR)=0x%lx\n", KERNEL_LINK_ADDR);
vmcoreinfo_append_str("NUMBER(va_kernel_pa_offset)=0x%lx\n",
kernel_map.va_kernel_pa_offset);
diff --git a/arch/riscv/kvm/aia_imsic.c b/arch/riscv/kvm/aia_imsic.c
index 6cf23b8adb7129..e808723a85f1b1 100644
--- a/arch/riscv/kvm/aia_imsic.c
+++ b/arch/riscv/kvm/aia_imsic.c
@@ -55,6 +55,7 @@ struct imsic {
/* IMSIC SW-file */
struct imsic_mrif *swfile;
phys_addr_t swfile_pa;
+ spinlock_t swfile_extirq_lock;
};
#define imsic_vs_csr_read(__c) \
@@ -613,12 +614,23 @@ static void imsic_swfile_extirq_update(struct kvm_vcpu *vcpu)
{
struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
struct imsic_mrif *mrif = imsic->swfile;
+ unsigned long flags;
+
+ /*
+ * The critical section is necessary during external interrupt
+ * updates to avoid the risk of losing interrupts due to potential
+ * interruptions between reading topei and updating pending status.
+ */
+
+ spin_lock_irqsave(&imsic->swfile_extirq_lock, flags);
if (imsic_mrif_atomic_read(mrif, &mrif->eidelivery) &&
imsic_mrif_topei(mrif, imsic->nr_eix, imsic->nr_msis))
kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT);
else
kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
+
+ spin_unlock_irqrestore(&imsic->swfile_extirq_lock, flags);
}
static void imsic_swfile_read(struct kvm_vcpu *vcpu, bool clear,
@@ -1039,6 +1051,7 @@ int kvm_riscv_vcpu_aia_imsic_init(struct kvm_vcpu *vcpu)
}
imsic->swfile = page_to_virt(swfile_page);
imsic->swfile_pa = page_to_phys(swfile_page);
+ spin_lock_init(&imsic->swfile_extirq_lock);
/* Setup IO device */
kvm_iodevice_init(&imsic->iodev, &imsic_iodoev_ops);
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 438cd92e60801b..6de44ede4e14dd 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -44,8 +44,7 @@ CONFIG_KEXEC_FILE=y
CONFIG_KEXEC_SIG=y
CONFIG_CRASH_DUMP=y
CONFIG_LIVEPATCH=y
-CONFIG_MARCH_ZEC12=y
-CONFIG_TUNE_ZEC12=y
+CONFIG_MARCH_Z13=y
CONFIG_NR_CPUS=512
CONFIG_NUMA=y
CONFIG_HZ_100=y
@@ -76,7 +75,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_MODULE_SIG_SHA256=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
CONFIG_BLK_CGROUP_IOLATENCY=y
@@ -93,6 +91,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_IOSCHED_BFQ=y
CONFIG_BINFMT_MISC=m
CONFIG_ZSWAP=y
+CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y
CONFIG_ZSMALLOC_STAT=y
CONFIG_SLUB_STATS=y
# CONFIG_COMPAT_BRK is not set
@@ -619,6 +618,9 @@ CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_BTRFS_DEBUG=y
CONFIG_BTRFS_ASSERT=y
CONFIG_NILFS2_FS=m
+CONFIG_BCACHEFS_FS=y
+CONFIG_BCACHEFS_QUOTA=y
+CONFIG_BCACHEFS_POSIX_ACL=y
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FS_ENCRYPTION=y
@@ -691,7 +693,6 @@ CONFIG_PERSISTENT_KEYRINGS=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_KEY_NOTIFICATIONS=y
CONFIG_SECURITY=y
-CONFIG_SECURITY_NETWORK=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_FORTIFY_SOURCE=y
CONFIG_SECURITY_SELINUX=y
@@ -834,7 +835,6 @@ CONFIG_DEBUG_IRQFLAGS=y
CONFIG_DEBUG_LIST=y
CONFIG_DEBUG_SG=y
CONFIG_DEBUG_NOTIFIERS=y
-CONFIG_DEBUG_CREDENTIALS=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_REF_SCALE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=300
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index 1b8150e50f6a65..bcae47da6b7cd2 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -42,8 +42,7 @@ CONFIG_KEXEC_FILE=y
CONFIG_KEXEC_SIG=y
CONFIG_CRASH_DUMP=y
CONFIG_LIVEPATCH=y
-CONFIG_MARCH_ZEC12=y
-CONFIG_TUNE_ZEC12=y
+CONFIG_MARCH_Z13=y
CONFIG_NR_CPUS=512
CONFIG_NUMA=y
CONFIG_HZ_100=y
@@ -71,7 +70,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_MODULE_SIG_SHA256=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
CONFIG_BLK_CGROUP_IOLATENCY=y
@@ -88,6 +86,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_IOSCHED_BFQ=y
CONFIG_BINFMT_MISC=m
CONFIG_ZSWAP=y
+CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y
CONFIG_ZSMALLOC_STAT=y
# CONFIG_COMPAT_BRK is not set
CONFIG_MEMORY_HOTPLUG=y
@@ -605,6 +604,9 @@ CONFIG_OCFS2_FS=m
CONFIG_BTRFS_FS=y
CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m
+CONFIG_BCACHEFS_FS=m
+CONFIG_BCACHEFS_QUOTA=y
+CONFIG_BCACHEFS_POSIX_ACL=y
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FS_ENCRYPTION=y
@@ -677,7 +679,6 @@ CONFIG_PERSISTENT_KEYRINGS=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_KEY_NOTIFICATIONS=y
CONFIG_SECURITY=y
-CONFIG_SECURITY_NETWORK=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_LOCKDOWN_LSM=y
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index b831083b4edd8b..47028450eee157 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -9,8 +9,7 @@ CONFIG_BPF_SYSCALL=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_CRASH_DUMP=y
-CONFIG_MARCH_ZEC12=y
-CONFIG_TUNE_ZEC12=y
+CONFIG_MARCH_Z13=y
# CONFIG_COMPAT is not set
CONFIG_NR_CPUS=2
CONFIG_HZ_100=y
diff --git a/arch/s390/include/asm/fpu/api.h b/arch/s390/include/asm/fpu/api.h
index b714ed0ef68853..9acf48e53a87fb 100644
--- a/arch/s390/include/asm/fpu/api.h
+++ b/arch/s390/include/asm/fpu/api.h
@@ -79,7 +79,7 @@ static inline int test_fp_ctl(u32 fpc)
#define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23|KERNEL_VXR_V24V31)
#define KERNEL_VXR (KERNEL_VXR_LOW|KERNEL_VXR_HIGH)
-#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_V0V7)
+#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_LOW)
struct kernel_fpu;
diff --git a/arch/s390/include/asm/syscall_wrapper.h b/arch/s390/include/asm/syscall_wrapper.h
index 9286430fe7290b..35c1d1b860d88a 100644
--- a/arch/s390/include/asm/syscall_wrapper.h
+++ b/arch/s390/include/asm/syscall_wrapper.h
@@ -63,10 +63,6 @@
cond_syscall(__s390x_sys_##name); \
cond_syscall(__s390_sys_##name)
-#define SYS_NI(name) \
- SYSCALL_ALIAS(__s390x_sys_##name, sys_ni_posix_timers); \
- SYSCALL_ALIAS(__s390_sys_##name, sys_ni_posix_timers)
-
#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
long __s390_compat_sys##name(struct pt_regs *regs); \
ALLOW_ERROR_INJECTION(__s390_compat_sys##name, ERRNO); \
@@ -85,15 +81,11 @@
/*
* As some compat syscalls may not be implemented, we need to expand
- * COND_SYSCALL_COMPAT in kernel/sys_ni.c and COMPAT_SYS_NI in
- * kernel/time/posix-stubs.c to cover this case as well.
+ * COND_SYSCALL_COMPAT in kernel/sys_ni.c to cover this case as well.
*/
#define COND_SYSCALL_COMPAT(name) \
cond_syscall(__s390_compat_sys_##name)
-#define COMPAT_SYS_NI(name) \
- SYSCALL_ALIAS(__s390_compat_sys_##name, sys_ni_posix_timers)
-
#define __S390_SYS_STUBx(x, name, ...) \
long __s390_sys##name(struct pt_regs *regs); \
ALLOW_ERROR_INJECTION(__s390_sys##name, ERRNO); \
@@ -124,9 +116,6 @@
#define COND_SYSCALL(name) \
cond_syscall(__s390x_sys_##name)
-#define SYS_NI(name) \
- SYSCALL_ALIAS(__s390x_sys_##name, sys_ni_posix_timers)
-
#define __S390_SYS_STUBx(x, fullname, name, ...)
#endif /* CONFIG_COMPAT */
diff --git a/arch/sh/include/asm/kexec.h b/arch/sh/include/asm/kexec.h
index 927d80ba2332a7..76631714673ca8 100644
--- a/arch/sh/include/asm/kexec.h
+++ b/arch/sh/include/asm/kexec.h
@@ -28,7 +28,7 @@
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_SH
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
/* arch/sh/kernel/machine_kexec.c */
void reserve_crashkernel(void);
@@ -67,6 +67,6 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
}
#else
static inline void reserve_crashkernel(void) { }
-#endif /* CONFIG_KEXEC */
+#endif /* CONFIG_KEXEC_CORE */
#endif /* __ASM_SH_KEXEC_H */
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index 69cd9ac4b2ab8a..2d7e70537de04c 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -33,7 +33,7 @@ obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_MODULES) += sh_ksyms_32.o module.o
-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_IO_TRAPPED) += io_trapped.o
diff --git a/arch/sh/kernel/reboot.c b/arch/sh/kernel/reboot.c
index e8eeedc9b1822d..1de006b1c33934 100644
--- a/arch/sh/kernel/reboot.c
+++ b/arch/sh/kernel/reboot.c
@@ -63,7 +63,7 @@ struct machine_ops machine_ops = {
.shutdown = native_machine_shutdown,
.restart = native_machine_restart,
.halt = native_machine_halt,
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
.crash_shutdown = native_machine_crash_shutdown,
#endif
};
@@ -88,7 +88,7 @@ void machine_halt(void)
machine_ops.halt();
}
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
void machine_crash_shutdown(struct pt_regs *regs)
{
machine_ops.crash_shutdown(regs);
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 3d80515298d268..d3175f09b3aad9 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -220,7 +220,7 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
request_resource(res, &code_resource);
request_resource(res, &data_resource);
request_resource(res, &bss_resource);
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
request_resource(res, &crashk_res);
#endif
diff --git a/arch/x86/boot/compressed/acpi.c b/arch/x86/boot/compressed/acpi.c
index 55c98fdd67d2b7..18d15d1ce87d59 100644
--- a/arch/x86/boot/compressed/acpi.c
+++ b/arch/x86/boot/compressed/acpi.c
@@ -178,7 +178,7 @@ static unsigned long get_cmdline_acpi_rsdp(void)
{
unsigned long addr = 0;
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
char val[MAX_ADDR_LEN] = { };
int ret;
diff --git a/arch/x86/include/asm/syscall_wrapper.h b/arch/x86/include/asm/syscall_wrapper.h
index fd2669b1cb2d95..21f9407be5d357 100644
--- a/arch/x86/include/asm/syscall_wrapper.h
+++ b/arch/x86/include/asm/syscall_wrapper.h
@@ -86,9 +86,6 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
return sys_ni_syscall(); \
}
-#define __SYS_NI(abi, name) \
- SYSCALL_ALIAS(__##abi##_##name, sys_ni_posix_timers);
-
#ifdef CONFIG_X86_64
#define __X64_SYS_STUB0(name) \
__SYS_STUB0(x64, sys_##name)
@@ -100,13 +97,10 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
#define __X64_COND_SYSCALL(name) \
__COND_SYSCALL(x64, sys_##name)
-#define __X64_SYS_NI(name) \
- __SYS_NI(x64, sys_##name)
#else /* CONFIG_X86_64 */
#define __X64_SYS_STUB0(name)
#define __X64_SYS_STUBx(x, name, ...)
#define __X64_COND_SYSCALL(name)
-#define __X64_SYS_NI(name)
#endif /* CONFIG_X86_64 */
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
@@ -120,13 +114,10 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
#define __IA32_COND_SYSCALL(name) \
__COND_SYSCALL(ia32, sys_##name)
-#define __IA32_SYS_NI(name) \
- __SYS_NI(ia32, sys_##name)
#else /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
#define __IA32_SYS_STUB0(name)
#define __IA32_SYS_STUBx(x, name, ...)
#define __IA32_COND_SYSCALL(name)
-#define __IA32_SYS_NI(name)
#endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
#ifdef CONFIG_IA32_EMULATION
@@ -135,8 +126,7 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
* additional wrappers (aptly named __ia32_sys_xyzzy) which decode the
* ia32 regs in the proper order for shared or "common" syscalls. As some
* syscalls may not be implemented, we need to expand COND_SYSCALL in
- * kernel/sys_ni.c and SYS_NI in kernel/time/posix-stubs.c to cover this
- * case as well.
+ * kernel/sys_ni.c to cover this case as well.
*/
#define __IA32_COMPAT_SYS_STUB0(name) \
__SYS_STUB0(ia32, compat_sys_##name)
@@ -148,14 +138,10 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
#define __IA32_COMPAT_COND_SYSCALL(name) \
__COND_SYSCALL(ia32, compat_sys_##name)
-#define __IA32_COMPAT_SYS_NI(name) \
- __SYS_NI(ia32, compat_sys_##name)
-
#else /* CONFIG_IA32_EMULATION */
#define __IA32_COMPAT_SYS_STUB0(name)
#define __IA32_COMPAT_SYS_STUBx(x, name, ...)
#define __IA32_COMPAT_COND_SYSCALL(name)
-#define __IA32_COMPAT_SYS_NI(name)
#endif /* CONFIG_IA32_EMULATION */
@@ -175,13 +161,10 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
#define __X32_COMPAT_COND_SYSCALL(name) \
__COND_SYSCALL(x64, compat_sys_##name)
-#define __X32_COMPAT_SYS_NI(name) \
- __SYS_NI(x64, compat_sys_##name)
#else /* CONFIG_X86_X32_ABI */
#define __X32_COMPAT_SYS_STUB0(name)
#define __X32_COMPAT_SYS_STUBx(x, name, ...)
#define __X32_COMPAT_COND_SYSCALL(name)
-#define __X32_COMPAT_SYS_NI(name)
#endif /* CONFIG_X86_X32_ABI */
@@ -212,17 +195,12 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
/*
* As some compat syscalls may not be implemented, we need to expand
- * COND_SYSCALL_COMPAT in kernel/sys_ni.c and COMPAT_SYS_NI in
- * kernel/time/posix-stubs.c to cover this case as well.
+ * COND_SYSCALL_COMPAT in kernel/sys_ni.c to cover this case as well.
*/
#define COND_SYSCALL_COMPAT(name) \
__IA32_COMPAT_COND_SYSCALL(name) \
__X32_COMPAT_COND_SYSCALL(name)
-#define COMPAT_SYS_NI(name) \
- __IA32_COMPAT_SYS_NI(name) \
- __X32_COMPAT_SYS_NI(name)
-
#endif /* CONFIG_COMPAT */
#define __SYSCALL_DEFINEx(x, name, ...) \
@@ -243,8 +221,8 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
* As the generic SYSCALL_DEFINE0() macro does not decode any parameters for
* obvious reasons, and passing struct pt_regs *regs to it in %rdi does not
* hurt, we only need to re-define it here to keep the naming congruent to
- * SYSCALL_DEFINEx() -- which is essential for the COND_SYSCALL() and SYS_NI()
- * macros to work correctly.
+ * SYSCALL_DEFINEx() -- which is essential for the COND_SYSCALL() macro
+ * to work correctly.
*/
#define SYSCALL_DEFINE0(sname) \
SYSCALL_METADATA(_##sname, 0); \
@@ -257,10 +235,6 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
__X64_COND_SYSCALL(name) \
__IA32_COND_SYSCALL(name)
-#define SYS_NI(name) \
- __X64_SYS_NI(name) \
- __IA32_SYS_NI(name)
-
/*
* For VSYSCALLS, we need to declare these three syscalls with the new
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 1a0dd80d81ac30..85a3ce2a3666ad 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -293,6 +293,7 @@ acpi_parse_lapic(union acpi_subtable_headers * header, const unsigned long end)
processor->processor_id, /* ACPI ID */
processor->lapic_flags & ACPI_MADT_ENABLED);
+ has_lapic_cpus = true;
return 0;
}
@@ -1134,7 +1135,6 @@ static int __init acpi_parse_madt_lapic_entries(void)
if (!count) {
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC,
acpi_parse_lapic, MAX_LOCAL_APIC);
- has_lapic_cpus = count > 0;
x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC,
acpi_parse_x2apic, MAX_LOCAL_APIC);
}
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 73be3931e4f060..aae7456ece0700 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -255,6 +255,16 @@ static void __init_or_module noinline optimize_nops(u8 *instr, size_t len)
}
}
+static void __init_or_module noinline optimize_nops_inplace(u8 *instr, size_t len)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ optimize_nops(instr, len);
+ sync_core();
+ local_irq_restore(flags);
+}
+
/*
* In this context, "source" is where the instructions are placed in the
* section .altinstr_replacement, for example during kernel build by the
@@ -438,7 +448,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
* patch if feature is *NOT* present.
*/
if (!boot_cpu_has(a->cpuid) == !(a->flags & ALT_FLAG_NOT)) {
- optimize_nops(instr, a->instrlen);
+ optimize_nops_inplace(instr, a->instrlen);
continue;
}
@@ -1685,8 +1695,8 @@ void __init_or_module text_poke_early(void *addr, const void *opcode,
} else {
local_irq_save(flags);
memcpy(addr, opcode, len);
- local_irq_restore(flags);
sync_core();
+ local_irq_restore(flags);
/*
* Could also do a CLFLUSH here to speed up CPU recovery; but
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 086a2c3aaaa044..0f8103240fda31 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -255,6 +255,22 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
testl $X2APIC_ENABLE, %eax
jnz .Lread_apicid_msr
+#ifdef CONFIG_X86_X2APIC
+ /*
+ * If system is in X2APIC mode then MMIO base might not be
+ * mapped causing the MMIO read below to fault. Faults can't
+ * be handled at that point.
+ */
+ cmpl $0, x2apic_mode(%rip)
+ jz .Lread_apicid_mmio
+
+ /* Force the AP into X2APIC mode. */
+ orl $X2APIC_ENABLE, %eax
+ wrmsr
+ jmp .Lread_apicid_msr
+#endif
+
+.Lread_apicid_mmio:
/* Read the APIC ID from the fix-mapped MMIO space. */
movq apic_mmio_base(%rip), %rcx
addq $APIC_ID, %rcx
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 4900c078045acc..6ee925d6664846 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -2972,6 +2972,25 @@ static void sev_es_vcpu_after_set_cpuid(struct vcpu_svm *svm)
set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, v_tsc_aux, v_tsc_aux);
}
+
+ /*
+ * For SEV-ES, accesses to MSR_IA32_XSS should not be intercepted if
+ * the host/guest supports its use.
+ *
+ * guest_can_use() checks a number of requirements on the host/guest to
+ * ensure that MSR_IA32_XSS is available, but it might report true even
+ * if X86_FEATURE_XSAVES isn't configured in the guest to ensure host
+ * MSR_IA32_XSS is always properly restored. For SEV-ES, it is better
+ * to further check that the guest CPUID actually supports
+ * X86_FEATURE_XSAVES so that accesses to MSR_IA32_XSS by misbehaved
+ * guests will still get intercepted and caught in the normal
+ * kvm_emulate_rdmsr()/kvm_emulated_wrmsr() paths.
+ */
+ if (guest_can_use(vcpu, X86_FEATURE_XSAVES) &&
+ guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
+ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 1, 1);
+ else
+ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 0, 0);
}
void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index f3bb30b40876ca..a8bd4e909a1e69 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -103,6 +103,7 @@ static const struct svm_direct_access_msrs {
{ .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
{ .index = MSR_IA32_LASTINTFROMIP, .always = false },
{ .index = MSR_IA32_LASTINTTOIP, .always = false },
+ { .index = MSR_IA32_XSS, .always = false },
{ .index = MSR_EFER, .always = false },
{ .index = MSR_IA32_CR_PAT, .always = false },
{ .index = MSR_AMD64_SEV_ES_GHCB, .always = true },
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index be67ab7fdd104e..c409f934c377fc 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -30,7 +30,7 @@
#define IOPM_SIZE PAGE_SIZE * 3
#define MSRPM_SIZE PAGE_SIZE * 2
-#define MAX_DIRECT_ACCESS_MSRS 46
+#define MAX_DIRECT_ACCESS_MSRS 47
#define MSRPM_OFFSETS 32
extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
extern bool npt_enabled;
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 9b1ec5d8c99c8d..a65fc2ae15b496 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -9,6 +9,7 @@ config XEN
select PARAVIRT_CLOCK
select X86_HV_CALLBACK_VECTOR
depends on X86_64 || (X86_32 && X86_PAE)
+ depends on X86_64 || (X86_GENERIC || MPENTIUM4 || MCORE2 || MATOM || MK8)
depends on X86_LOCAL_APIC && X86_TSC
help
This is the Linux Xen port. Enabling this will allow the
diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c
index 4ccf1994b97adb..d530384f8d6070 100644
--- a/drivers/accel/ivpu/ivpu_hw_37xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_37xx.c
@@ -53,10 +53,12 @@
#define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK)
-#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \
- (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
+#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
(REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR)))
+#define BUTTRESS_ALL_IRQ_MASK (BUTTRESS_IRQ_MASK | \
+ (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)))
+
#define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK)
#define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1)
@@ -74,8 +76,12 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
vdev->wa.clear_runtime_mem = false;
vdev->wa.d3hot_after_power_off = true;
- if (ivpu_device_id(vdev) == PCI_DEVICE_ID_MTL && ivpu_revision(vdev) < 4)
+ REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, BUTTRESS_ALL_IRQ_MASK);
+ if (REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) == BUTTRESS_ALL_IRQ_MASK) {
+ /* Writing 1s does not clear the interrupt status register */
vdev->wa.interrupt_clear_with_0 = true;
+ REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, 0x0);
+ }
IVPU_PRINT_WA(punit_disabled);
IVPU_PRINT_WA(clear_runtime_mem);
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 94fbc3abe60e6a..d3c30a28c410ea 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -449,9 +449,9 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr,
struct sk_buff *skb;
unsigned int len;
- spin_lock(&card->cli_queue_lock);
+ spin_lock_bh(&card->cli_queue_lock);
skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]);
- spin_unlock(&card->cli_queue_lock);
+ spin_unlock_bh(&card->cli_queue_lock);
if(skb == NULL)
return sprintf(buf, "No data.\n");
@@ -956,14 +956,14 @@ static void pclose(struct atm_vcc *vcc)
struct pkt_hdr *header;
/* Remove any yet-to-be-transmitted packets from the pending queue */
- spin_lock(&card->tx_queue_lock);
+ spin_lock_bh(&card->tx_queue_lock);
skb_queue_walk_safe(&card->tx_queue[port], skb, tmpskb) {
if (SKB_CB(skb)->vcc == vcc) {
skb_unlink(skb, &card->tx_queue[port]);
solos_pop(vcc, skb);
}
}
- spin_unlock(&card->tx_queue_lock);
+ spin_unlock_bh(&card->tx_queue_lock);
skb = alloc_skb(sizeof(*header), GFP_KERNEL);
if (!skb) {
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index f3892e9ce800ff..572d68d52965f6 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <asm/unaligned.h>
+#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -44,6 +45,7 @@ struct vhci_data {
bool wakeup;
__u16 msft_opcode;
bool aosp_capable;
+ atomic_t initialized;
};
static int vhci_open_dev(struct hci_dev *hdev)
@@ -75,11 +77,10 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
- mutex_lock(&data->open_mutex);
skb_queue_tail(&data->readq, skb);
- mutex_unlock(&data->open_mutex);
- wake_up_interruptible(&data->read_wait);
+ if (atomic_read(&data->initialized))
+ wake_up_interruptible(&data->read_wait);
return 0;
}
@@ -464,7 +465,8 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
skb_put_u8(skb, 0xff);
skb_put_u8(skb, opcode);
put_unaligned_le16(hdev->id, skb_put(skb, 2));
- skb_queue_tail(&data->readq, skb);
+ skb_queue_head(&data->readq, skb);
+ atomic_inc(&data->initialized);
wake_up_interruptible(&data->read_wait);
return 0;
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index d57bc066dce6b4..9ed9239b1228f6 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -2158,13 +2158,23 @@ static int sysc_reset(struct sysc *ddata)
sysc_val = sysc_read_sysconfig(ddata);
sysc_val |= sysc_mask;
sysc_write(ddata, sysc_offset, sysc_val);
- /* Flush posted write */
+
+ /*
+ * Some devices need a delay before reading registers
+ * after reset. Presumably a srst_udelay is not needed
+ * for devices that use a rstctrl register reset.
+ */
+ if (ddata->cfg.srst_udelay)
+ fsleep(ddata->cfg.srst_udelay);
+
+ /*
+ * Flush posted write. For devices needing srst_udelay
+ * this should trigger an interconnect error if the
+ * srst_udelay value is needed but not configured.
+ */
sysc_val = sysc_read_sysconfig(ddata);
}
- if (ddata->cfg.srst_udelay)
- fsleep(ddata->cfg.srst_udelay);
-
if (ddata->post_reset_quirk)
ddata->post_reset_quirk(ddata);
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index ad1acd9b7426b1..dbc3950c596090 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -767,6 +767,7 @@ config SM_CAMCC_8450
config SM_CAMCC_8550
tristate "SM8550 Camera Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SM_GCC_8550
help
Support for the camera clock controller on SM8550 devices.
diff --git a/drivers/clk/rockchip/clk-rk3128.c b/drivers/clk/rockchip/clk-rk3128.c
index aa53797dbfc145..75071e0cd3216e 100644
--- a/drivers/clk/rockchip/clk-rk3128.c
+++ b/drivers/clk/rockchip/clk-rk3128.c
@@ -138,7 +138,7 @@ PNAME(mux_pll_src_5plls_p) = { "cpll", "gpll", "gpll_div2", "gpll_div3", "usb480
PNAME(mux_pll_src_4plls_p) = { "cpll", "gpll", "gpll_div2", "usb480m" };
PNAME(mux_pll_src_3plls_p) = { "cpll", "gpll", "gpll_div2" };
-PNAME(mux_aclk_peri_src_p) = { "gpll_peri", "cpll_peri", "gpll_div2_peri", "gpll_div3_peri" };
+PNAME(mux_clk_peri_src_p) = { "gpll", "cpll", "gpll_div2", "gpll_div3" };
PNAME(mux_mmc_src_p) = { "cpll", "gpll", "gpll_div2", "xin24m" };
PNAME(mux_clk_cif_out_src_p) = { "clk_cif_src", "xin24m" };
PNAME(mux_sclk_vop_src_p) = { "cpll", "gpll", "gpll_div2", "gpll_div3" };
@@ -275,23 +275,17 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
RK2928_CLKGATE_CON(0), 11, GFLAGS),
/* PD_PERI */
- GATE(0, "gpll_peri", "gpll", CLK_IGNORE_UNUSED,
+ COMPOSITE(0, "clk_peri_src", mux_clk_peri_src_p, 0,
+ RK2928_CLKSEL_CON(10), 14, 2, MFLAGS, 0, 5, DFLAGS,
RK2928_CLKGATE_CON(2), 0, GFLAGS),
- GATE(0, "cpll_peri", "cpll", CLK_IGNORE_UNUSED,
- RK2928_CLKGATE_CON(2), 0, GFLAGS),
- GATE(0, "gpll_div2_peri", "gpll_div2", CLK_IGNORE_UNUSED,
- RK2928_CLKGATE_CON(2), 0, GFLAGS),
- GATE(0, "gpll_div3_peri", "gpll_div3", CLK_IGNORE_UNUSED,
- RK2928_CLKGATE_CON(2), 0, GFLAGS),
- COMPOSITE_NOGATE(0, "aclk_peri_src", mux_aclk_peri_src_p, 0,
- RK2928_CLKSEL_CON(10), 14, 2, MFLAGS, 0, 5, DFLAGS),
- COMPOSITE_NOMUX(PCLK_PERI, "pclk_peri", "aclk_peri_src", 0,
+
+ COMPOSITE_NOMUX(PCLK_PERI, "pclk_peri", "clk_peri_src", 0,
RK2928_CLKSEL_CON(10), 12, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
RK2928_CLKGATE_CON(2), 3, GFLAGS),
- COMPOSITE_NOMUX(HCLK_PERI, "hclk_peri", "aclk_peri_src", 0,
+ COMPOSITE_NOMUX(HCLK_PERI, "hclk_peri", "clk_peri_src", 0,
RK2928_CLKSEL_CON(10), 8, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
RK2928_CLKGATE_CON(2), 2, GFLAGS),
- GATE(ACLK_PERI, "aclk_peri", "aclk_peri_src", 0,
+ GATE(ACLK_PERI, "aclk_peri", "clk_peri_src", 0,
RK2928_CLKGATE_CON(2), 1, GFLAGS),
GATE(SCLK_TIMER0, "sclk_timer0", "xin24m", 0,
@@ -316,7 +310,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
GATE(SCLK_MIPI_24M, "clk_mipi_24m", "xin24m", CLK_IGNORE_UNUSED,
RK2928_CLKGATE_CON(2), 15, GFLAGS),
- COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0,
+ COMPOSITE(SCLK_SDMMC, "sclk_sdmmc", mux_mmc_src_p, 0,
RK2928_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 6, DFLAGS,
RK2928_CLKGATE_CON(2), 11, GFLAGS),
@@ -490,7 +484,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
GATE(HCLK_I2S_2CH, "hclk_i2s_2ch", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 2, GFLAGS),
GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 13, GFLAGS),
GATE(HCLK_HOST2, "hclk_host2", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
- GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK2928_CLKGATE_CON(3), 13, GFLAGS),
+ GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 13, GFLAGS),
GATE(0, "hclk_peri_ahb", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 14, GFLAGS),
GATE(HCLK_SPDIF, "hclk_spdif", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 9, GFLAGS),
GATE(HCLK_TSP, "hclk_tsp", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 12, GFLAGS),
diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c
index 16dabe2b9c47f4..db713e1526cdc3 100644
--- a/drivers/clk/rockchip/clk-rk3568.c
+++ b/drivers/clk/rockchip/clk-rk3568.c
@@ -72,6 +72,7 @@ static struct rockchip_pll_rate_table rk3568_pll_rates[] = {
RK3036_PLL_RATE(408000000, 1, 68, 2, 2, 1, 0),
RK3036_PLL_RATE(312000000, 1, 78, 6, 1, 1, 0),
RK3036_PLL_RATE(297000000, 2, 99, 4, 1, 1, 0),
+ RK3036_PLL_RATE(292500000, 1, 195, 4, 4, 1, 0),
RK3036_PLL_RATE(241500000, 2, 161, 4, 2, 1, 0),
RK3036_PLL_RATE(216000000, 1, 72, 4, 2, 1, 0),
RK3036_PLL_RATE(200000000, 1, 100, 3, 4, 1, 0),
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index 1cc9be85ba4cd1..7d97790b893d70 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -363,10 +363,9 @@ resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
{
resource_size_t base = -1;
- down_read(&cxl_dpa_rwsem);
+ lockdep_assert_held(&cxl_dpa_rwsem);
if (cxled->dpa_res)
base = cxled->dpa_res->start;
- up_read(&cxl_dpa_rwsem);
return base;
}
@@ -839,6 +838,8 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
else
cxld->target_type = CXL_DECODER_DEVMEM;
+
+ guard(rwsem_write)(&cxl_region_rwsem);
if (cxld->id != cxl_num_decoders_committed(port)) {
dev_warn(&port->dev,
"decoder%d.%d: Committed out of order\n",
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index fc5c2b414793bb..2f43d368ba0730 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -227,10 +227,16 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
if (!port || !is_cxl_endpoint(port))
return -EINVAL;
- rc = down_read_interruptible(&cxl_dpa_rwsem);
+ rc = down_read_interruptible(&cxl_region_rwsem);
if (rc)
return rc;
+ rc = down_read_interruptible(&cxl_dpa_rwsem);
+ if (rc) {
+ up_read(&cxl_region_rwsem);
+ return rc;
+ }
+
if (cxl_num_decoders_committed(port) == 0) {
/* No regions mapped to this memdev */
rc = cxl_get_poison_by_memdev(cxlmd);
@@ -239,6 +245,7 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
rc = cxl_get_poison_by_endpoint(port);
}
up_read(&cxl_dpa_rwsem);
+ up_read(&cxl_region_rwsem);
return rc;
}
@@ -324,10 +331,16 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
- rc = down_read_interruptible(&cxl_dpa_rwsem);
+ rc = down_read_interruptible(&cxl_region_rwsem);
if (rc)
return rc;
+ rc = down_read_interruptible(&cxl_dpa_rwsem);
+ if (rc) {
+ up_read(&cxl_region_rwsem);
+ return rc;
+ }
+
rc = cxl_validate_poison_dpa(cxlmd, dpa);
if (rc)
goto out;
@@ -355,6 +368,7 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_INJECT);
out:
up_read(&cxl_dpa_rwsem);
+ up_read(&cxl_region_rwsem);
return rc;
}
@@ -372,10 +386,16 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
- rc = down_read_interruptible(&cxl_dpa_rwsem);
+ rc = down_read_interruptible(&cxl_region_rwsem);
if (rc)
return rc;
+ rc = down_read_interruptible(&cxl_dpa_rwsem);
+ if (rc) {
+ up_read(&cxl_region_rwsem);
+ return rc;
+ }
+
rc = cxl_validate_poison_dpa(cxlmd, dpa);
if (rc)
goto out;
@@ -412,6 +432,7 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_CLEAR);
out:
up_read(&cxl_dpa_rwsem);
+ up_read(&cxl_region_rwsem);
return rc;
}
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index eff20e83d0a64e..37e1652afbc7ea 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -620,7 +620,7 @@ void read_cdat_data(struct cxl_port *port)
struct pci_dev *pdev = NULL;
struct cxl_memdev *cxlmd;
size_t cdat_length;
- void *cdat_table;
+ void *cdat_table, *cdat_buf;
int rc;
if (is_cxl_memdev(uport)) {
@@ -651,16 +651,15 @@ void read_cdat_data(struct cxl_port *port)
return;
}
- cdat_table = devm_kzalloc(dev, cdat_length + sizeof(__le32),
- GFP_KERNEL);
- if (!cdat_table)
+ cdat_buf = devm_kzalloc(dev, cdat_length + sizeof(__le32), GFP_KERNEL);
+ if (!cdat_buf)
return;
- rc = cxl_cdat_read_table(dev, cdat_doe, cdat_table, &cdat_length);
+ rc = cxl_cdat_read_table(dev, cdat_doe, cdat_buf, &cdat_length);
if (rc)
goto err;
- cdat_table = cdat_table + sizeof(__le32);
+ cdat_table = cdat_buf + sizeof(__le32);
if (cdat_checksum(cdat_table, cdat_length))
goto err;
@@ -670,7 +669,7 @@ void read_cdat_data(struct cxl_port *port)
err:
/* Don't leave table data allocated on error */
- devm_kfree(dev, cdat_table);
+ devm_kfree(dev, cdat_buf);
dev_err(dev, "Failed to read/validate CDAT.\n");
}
EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL);
diff --git a/drivers/cxl/core/pmu.c b/drivers/cxl/core/pmu.c
index 7684c843e5a59c..5d8e06b0ba6e88 100644
--- a/drivers/cxl/core/pmu.c
+++ b/drivers/cxl/core/pmu.c
@@ -23,7 +23,7 @@ const struct device_type cxl_pmu_type = {
static void remove_dev(void *dev)
{
- device_del(dev);
+ device_unregister(dev);
}
int devm_cxl_pmu_add(struct device *parent, struct cxl_pmu_regs *regs,
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 38441634e4c683..b7c93bb18f6e75 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -226,9 +226,9 @@ static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *at
char *buf)
{
struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
- u64 base = cxl_dpa_resource_start(cxled);
- return sysfs_emit(buf, "%#llx\n", base);
+ guard(rwsem_read)(&cxl_dpa_rwsem);
+ return sysfs_emit(buf, "%#llx\n", (u64)cxl_dpa_resource_start(cxled));
}
static DEVICE_ATTR_RO(dpa_resource);
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 56e575c79bb491..3e817a6f94c6a4 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -2467,10 +2467,6 @@ int cxl_get_poison_by_endpoint(struct cxl_port *port)
struct cxl_poison_context ctx;
int rc = 0;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
- return rc;
-
ctx = (struct cxl_poison_context) {
.port = port
};
@@ -2480,7 +2476,6 @@ int cxl_get_poison_by_endpoint(struct cxl_port *port)
rc = cxl_get_poison_unmapped(to_cxl_memdev(port->uport_dev),
&ctx);
- up_read(&cxl_region_rwsem);
return rc;
}
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index 6a3abe5b17908d..b53f46245c377f 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -828,6 +828,7 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan)
dma_pool_destroy(fsl_chan->tcd_pool);
fsl_chan->tcd_pool = NULL;
fsl_chan->is_sw = false;
+ fsl_chan->srcid = 0;
}
void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index 4635e16d7705e9..238a69bd0d6f5d 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -396,9 +396,8 @@ static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_eng
link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
- if (IS_ERR(link)) {
- dev_err(dev, "Failed to add device_link to %d: %ld\n", i,
- PTR_ERR(link));
+ if (!link) {
+ dev_err(dev, "Failed to add device_link to %d\n", i);
return -EINVAL;
}
@@ -631,6 +630,8 @@ static int fsl_edma_suspend_late(struct device *dev)
for (i = 0; i < fsl_edma->n_chans; i++) {
fsl_chan = &fsl_edma->chans[i];
+ if (fsl_edma->chan_masked & BIT(i))
+ continue;
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
/* Make sure chan is idle or will force disable. */
if (unlikely(!fsl_chan->idle)) {
@@ -655,13 +656,16 @@ static int fsl_edma_resume_early(struct device *dev)
for (i = 0; i < fsl_edma->n_chans; i++) {
fsl_chan = &fsl_edma->chans[i];
+ if (fsl_edma->chan_masked & BIT(i))
+ continue;
fsl_chan->pm_state = RUNNING;
edma_write_tcdreg(fsl_chan, 0, csr);
if (fsl_chan->slave_id != 0)
fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true);
}
- edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
+ if (!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_SPLIT_REG))
+ edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
return 0;
}
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index 7b54a3939ea135..315c004f58e475 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -440,12 +440,14 @@ union wqcfg {
/*
* This macro calculates the offset into the GRPCFG register
* idxd - struct idxd *
- * n - wq id
- * ofs - the index of the 32b dword for the config register
+ * n - group id
+ * ofs - the index of the 64b qword for the config register
*
- * The WQCFG register block is divided into groups per each wq. The n index
- * allows us to move to the register group that's for that particular wq.
- * Each register is 32bits. The ofs gives us the number of register to access.
+ * The GRPCFG register block is divided into three sub-registers, which
+ * are GRPWQCFG, GRPENGCFG and GRPFLGCFG. The n index allows us to move
+ * to the register block that contains the three sub-registers.
+ * Each register block is 64bits. And the ofs gives us the offset
+ * within the GRPWQCFG register to access.
*/
#define GRPWQCFG_OFFSET(idxd_dev, n, ofs) ((idxd_dev)->grpcfg_offset +\
(n) * GRPCFG_SIZE + sizeof(u64) * (ofs))
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
index c01db23e3333f7..3f922518e3a525 100644
--- a/drivers/dma/idxd/submit.c
+++ b/drivers/dma/idxd/submit.c
@@ -183,13 +183,6 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
portal = idxd_wq_portal_addr(wq);
/*
- * The wmb() flushes writes to coherent DMA data before
- * possibly triggering a DMA read. The wmb() is necessary
- * even on UP because the recipient is a device.
- */
- wmb();
-
- /*
* Pending the descriptor to the lockless list for the irq_entry
* that we designated the descriptor to.
*/
@@ -199,6 +192,13 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
llist_add(&desc->llnode, &ie->pending_llist);
}
+ /*
+ * The wmb() flushes writes to coherent DMA data before
+ * possibly triggering a DMA read. The wmb() is necessary
+ * even on UP because the recipient is a device.
+ */
+ wmb();
+
if (wq_dedicated(wq)) {
iosubmit_cmds512(portal, desc->hw, 1);
} else {
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 72d83cd9ed6bcd..90857d08a1a743 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -1246,8 +1246,8 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
enum dma_slave_buswidth max_width;
struct stm32_dma_desc *desc;
size_t xfer_count, offset;
- u32 num_sgs, best_burst, dma_burst, threshold;
- int i;
+ u32 num_sgs, best_burst, threshold;
+ int dma_burst, i;
num_sgs = DIV_ROUND_UP(len, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
desc = kzalloc(struct_size(desc, sg_req, num_sgs), GFP_NOWAIT);
@@ -1266,6 +1266,10 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
best_burst = stm32_dma_get_best_burst(len, STM32_DMA_MAX_BURST,
threshold, max_width);
dma_burst = stm32_dma_get_burst(chan, best_burst);
+ if (dma_burst < 0) {
+ kfree(desc);
+ return NULL;
+ }
stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
desc->sg_req[i].chan_reg.dma_scr =
diff --git a/drivers/dma/ti/k3-psil-am62.c b/drivers/dma/ti/k3-psil-am62.c
index 2b6fd6e37c6107..1272b1541f61e2 100644
--- a/drivers/dma/ti/k3-psil-am62.c
+++ b/drivers/dma/ti/k3-psil-am62.c
@@ -74,7 +74,9 @@ static struct psil_ep am62_src_ep_map[] = {
PSIL_SAUL(0x7505, 21, 35, 8, 36, 0),
PSIL_SAUL(0x7506, 22, 43, 8, 43, 0),
PSIL_SAUL(0x7507, 23, 43, 8, 44, 0),
- /* PDMA_MAIN0 - SPI0-3 */
+ /* PDMA_MAIN0 - SPI0-2 */
+ PSIL_PDMA_XY_PKT(0x4300),
+ PSIL_PDMA_XY_PKT(0x4301),
PSIL_PDMA_XY_PKT(0x4302),
PSIL_PDMA_XY_PKT(0x4303),
PSIL_PDMA_XY_PKT(0x4304),
@@ -85,8 +87,6 @@ static struct psil_ep am62_src_ep_map[] = {
PSIL_PDMA_XY_PKT(0x4309),
PSIL_PDMA_XY_PKT(0x430a),
PSIL_PDMA_XY_PKT(0x430b),
- PSIL_PDMA_XY_PKT(0x430c),
- PSIL_PDMA_XY_PKT(0x430d),
/* PDMA_MAIN1 - UART0-6 */
PSIL_PDMA_XY_PKT(0x4400),
PSIL_PDMA_XY_PKT(0x4401),
@@ -141,7 +141,9 @@ static struct psil_ep am62_dst_ep_map[] = {
/* SAUL */
PSIL_SAUL(0xf500, 27, 83, 8, 83, 1),
PSIL_SAUL(0xf501, 28, 91, 8, 91, 1),
- /* PDMA_MAIN0 - SPI0-3 */
+ /* PDMA_MAIN0 - SPI0-2 */
+ PSIL_PDMA_XY_PKT(0xc300),
+ PSIL_PDMA_XY_PKT(0xc301),
PSIL_PDMA_XY_PKT(0xc302),
PSIL_PDMA_XY_PKT(0xc303),
PSIL_PDMA_XY_PKT(0xc304),
@@ -152,8 +154,6 @@ static struct psil_ep am62_dst_ep_map[] = {
PSIL_PDMA_XY_PKT(0xc309),
PSIL_PDMA_XY_PKT(0xc30a),
PSIL_PDMA_XY_PKT(0xc30b),
- PSIL_PDMA_XY_PKT(0xc30c),
- PSIL_PDMA_XY_PKT(0xc30d),
/* PDMA_MAIN1 - UART0-6 */
PSIL_PDMA_XY_PKT(0xc400),
PSIL_PDMA_XY_PKT(0xc401),
diff --git a/drivers/dma/ti/k3-psil-am62a.c b/drivers/dma/ti/k3-psil-am62a.c
index ca9d71f914220a..4cf9123b0e9326 100644
--- a/drivers/dma/ti/k3-psil-am62a.c
+++ b/drivers/dma/ti/k3-psil-am62a.c
@@ -84,7 +84,9 @@ static struct psil_ep am62a_src_ep_map[] = {
PSIL_SAUL(0x7505, 21, 35, 8, 36, 0),
PSIL_SAUL(0x7506, 22, 43, 8, 43, 0),
PSIL_SAUL(0x7507, 23, 43, 8, 44, 0),
- /* PDMA_MAIN0 - SPI0-3 */
+ /* PDMA_MAIN0 - SPI0-2 */
+ PSIL_PDMA_XY_PKT(0x4300),
+ PSIL_PDMA_XY_PKT(0x4301),
PSIL_PDMA_XY_PKT(0x4302),
PSIL_PDMA_XY_PKT(0x4303),
PSIL_PDMA_XY_PKT(0x4304),
@@ -95,8 +97,6 @@ static struct psil_ep am62a_src_ep_map[] = {
PSIL_PDMA_XY_PKT(0x4309),
PSIL_PDMA_XY_PKT(0x430a),
PSIL_PDMA_XY_PKT(0x430b),
- PSIL_PDMA_XY_PKT(0x430c),
- PSIL_PDMA_XY_PKT(0x430d),
/* PDMA_MAIN1 - UART0-6 */
PSIL_PDMA_XY_PKT(0x4400),
PSIL_PDMA_XY_PKT(0x4401),
@@ -151,7 +151,9 @@ static struct psil_ep am62a_dst_ep_map[] = {
/* SAUL */
PSIL_SAUL(0xf500, 27, 83, 8, 83, 1),
PSIL_SAUL(0xf501, 28, 91, 8, 91, 1),
- /* PDMA_MAIN0 - SPI0-3 */
+ /* PDMA_MAIN0 - SPI0-2 */
+ PSIL_PDMA_XY_PKT(0xc300),
+ PSIL_PDMA_XY_PKT(0xc301),
PSIL_PDMA_XY_PKT(0xc302),
PSIL_PDMA_XY_PKT(0xc303),
PSIL_PDMA_XY_PKT(0xc304),
@@ -162,8 +164,6 @@ static struct psil_ep am62a_dst_ep_map[] = {
PSIL_PDMA_XY_PKT(0xc309),
PSIL_PDMA_XY_PKT(0xc30a),
PSIL_PDMA_XY_PKT(0xc30b),
- PSIL_PDMA_XY_PKT(0xc30c),
- PSIL_PDMA_XY_PKT(0xc30d),
/* PDMA_MAIN1 - UART0-6 */
PSIL_PDMA_XY_PKT(0xc400),
PSIL_PDMA_XY_PKT(0xc401),
diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c
index 442a0ebeb953e9..ce7cf736f02084 100644
--- a/drivers/dpll/dpll_netlink.c
+++ b/drivers/dpll/dpll_netlink.c
@@ -925,7 +925,6 @@ dpll_pin_parent_pin_set(struct dpll_pin *pin, struct nlattr *parent_nest,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[DPLL_A_PIN_MAX + 1];
- enum dpll_pin_state state;
u32 ppin_idx;
int ret;
@@ -936,10 +935,14 @@ dpll_pin_parent_pin_set(struct dpll_pin *pin, struct nlattr *parent_nest,
return -EINVAL;
}
ppin_idx = nla_get_u32(tb[DPLL_A_PIN_PARENT_ID]);
- state = nla_get_u32(tb[DPLL_A_PIN_STATE]);
- ret = dpll_pin_on_pin_state_set(pin, ppin_idx, state, extack);
- if (ret)
- return ret;
+
+ if (tb[DPLL_A_PIN_STATE]) {
+ enum dpll_pin_state state = nla_get_u32(tb[DPLL_A_PIN_STATE]);
+
+ ret = dpll_pin_on_pin_state_set(pin, ppin_idx, state, extack);
+ if (ret)
+ return ret;
+ }
return 0;
}
diff --git a/drivers/edac/versal_edac.c b/drivers/edac/versal_edac.c
index 87e730dfefa08d..8625de20fc7175 100644
--- a/drivers/edac/versal_edac.c
+++ b/drivers/edac/versal_edac.c
@@ -966,10 +966,10 @@ static int mc_probe(struct platform_device *pdev)
edac_mc_id = emif_get_id(pdev->dev.of_node);
regval = readl(ddrmc_baseaddr + XDDR_REG_CONFIG0_OFFSET);
- num_chans = FIELD_PREP(XDDR_REG_CONFIG0_NUM_CHANS_MASK, regval);
+ num_chans = FIELD_GET(XDDR_REG_CONFIG0_NUM_CHANS_MASK, regval);
num_chans++;
- num_csrows = FIELD_PREP(XDDR_REG_CONFIG0_NUM_RANKS_MASK, regval);
+ num_csrows = FIELD_GET(XDDR_REG_CONFIG0_NUM_RANKS_MASK, regval);
num_csrows *= 2;
if (!num_csrows)
num_csrows = 1;
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 4a4f61bf6c5824..8c59332429c260 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -282,13 +282,15 @@ static void dwapb_irq_enable(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
unsigned long flags;
u32 val;
raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
- val = dwapb_read(gpio, GPIO_INTEN);
- val |= BIT(irqd_to_hwirq(d));
+ val = dwapb_read(gpio, GPIO_INTEN) | BIT(hwirq);
dwapb_write(gpio, GPIO_INTEN, val);
+ val = dwapb_read(gpio, GPIO_INTMASK) & ~BIT(hwirq);
+ dwapb_write(gpio, GPIO_INTMASK, val);
raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
@@ -296,12 +298,14 @@ static void dwapb_irq_disable(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
unsigned long flags;
u32 val;
raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
- val = dwapb_read(gpio, GPIO_INTEN);
- val &= ~BIT(irqd_to_hwirq(d));
+ val = dwapb_read(gpio, GPIO_INTMASK) | BIT(hwirq);
+ dwapb_write(gpio, GPIO_INTMASK, val);
+ val = dwapb_read(gpio, GPIO_INTEN) & ~BIT(hwirq);
dwapb_write(gpio, GPIO_INTEN, val);
raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index 02ffda6c1e5124..f713d1ef7746e4 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -2481,10 +2481,7 @@ static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
return 0;
}
-/*
- * gpio_ioctl() - ioctl handler for the GPIO chardev
- */
-static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+static long gpio_ioctl_unlocked(struct file *file, unsigned int cmd, unsigned long arg)
{
struct gpio_chardev_data *cdev = file->private_data;
struct gpio_device *gdev = cdev->gdev;
@@ -2521,6 +2518,17 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
}
+/*
+ * gpio_ioctl() - ioctl handler for the GPIO chardev
+ */
+static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct gpio_chardev_data *cdev = file->private_data;
+
+ return call_ioctl_locked(file, cmd, arg, cdev->gdev,
+ gpio_ioctl_unlocked);
+}
+
#ifdef CONFIG_COMPAT
static long gpio_ioctl_compat(struct file *file, unsigned int cmd,
unsigned long arg)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 1f64d8cbb14df8..8dee52ce26d0cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4516,8 +4516,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
amdgpu_ras_suspend(adev);
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
-
amdgpu_device_ip_suspend_phase1(adev);
if (!adev->in_s0ix)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index d79b4ca1ecfc4a..5ad03f2afdb45a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1343,6 +1343,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
abo = ttm_to_amdgpu_bo(bo);
+ WARN_ON(abo->vm_bo);
+
if (abo->kfd_bo)
amdgpu_amdkfd_release_notify(abo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index d1b8afd105c9f6..5baefb548a2909 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -285,6 +285,7 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
struct amdgpu_bo *bo = vm_bo->bo;
+ vm_bo->moved = true;
if (!bo || bo->tbo.type != ttm_bo_type_kernel)
list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
else if (bo->parent)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index a2287bb252235e..a160265ddc07c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -642,13 +642,14 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
if (!entry->bo)
return;
+
+ entry->bo->vm_bo = NULL;
shadow = amdgpu_bo_shadowed(entry->bo);
if (shadow) {
ttm_bo_set_bulk_move(&shadow->tbo, NULL);
amdgpu_bo_unref(&shadow);
}
ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
- entry->bo->vm_bo = NULL;
spin_lock(&entry->vm->status_lock);
list_del(&entry->vm_status);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
index 9df011323d4b97..6ede85b28cc8c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
@@ -155,13 +155,6 @@ static int jpeg_v4_0_5_hw_init(void *handle)
struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
int r;
- adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
- (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
-
- WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL,
- ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
- VCN_JPEG_DB_CTRL__EN_MASK);
-
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
@@ -336,6 +329,14 @@ static int jpeg_v4_0_5_start(struct amdgpu_device *adev)
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_jpeg(adev, true);
+ /* doorbell programming is done for every playback */
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
+
+ WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL,
+ ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
+ VCN_JPEG_DB_CTRL__EN_MASK);
+
/* disable power gating */
r = jpeg_v4_0_5_disable_static_power_gating(adev);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 45377a1752503b..8d5d86675a7fea 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -813,12 +813,12 @@ static int sdma_v2_4_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
+ adev->sdma.num_instances = SDMA_MAX_INSTANCE;
+
r = sdma_v2_4_init_microcode(adev);
if (r)
return r;
- adev->sdma.num_instances = SDMA_MAX_INSTANCE;
-
sdma_v2_4_set_ring_funcs(adev);
sdma_v2_4_set_buffer_funcs(adev);
sdma_v2_4_set_vm_pte_funcs(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 83c240f741b519..0058f3f7cf6e43 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -1643,6 +1643,32 @@ static void sdma_v5_2_get_clockgating_state(void *handle, u64 *flags)
*flags |= AMD_CG_SUPPORT_SDMA_LS;
}
+static void sdma_v5_2_ring_begin_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ /* SDMA 5.2.3 (RMB) FW doesn't seem to properly
+ * disallow GFXOFF in some cases leading to
+ * hangs in SDMA. Disallow GFXOFF while SDMA is active.
+ * We can probably just limit this to 5.2.3,
+ * but it shouldn't hurt for other parts since
+ * this GFXOFF will be disallowed anyway when SDMA is
+ * active, this just makes it explicit.
+ */
+ amdgpu_gfx_off_ctrl(adev, false);
+}
+
+static void sdma_v5_2_ring_end_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ /* SDMA 5.2.3 (RMB) FW doesn't seem to properly
+ * disallow GFXOFF in some cases leading to
+ * hangs in SDMA. Allow GFXOFF when SDMA is complete.
+ */
+ amdgpu_gfx_off_ctrl(adev, true);
+}
+
const struct amd_ip_funcs sdma_v5_2_ip_funcs = {
.name = "sdma_v5_2",
.early_init = sdma_v5_2_early_init,
@@ -1690,6 +1716,8 @@ static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = {
.test_ib = sdma_v5_2_ring_test_ib,
.insert_nop = sdma_v5_2_ring_insert_nop,
.pad_ib = sdma_v5_2_ring_pad_ib,
+ .begin_use = sdma_v5_2_ring_begin_use,
+ .end_use = sdma_v5_2_ring_end_use,
.emit_wreg = sdma_v5_2_ring_emit_wreg,
.emit_reg_wait = sdma_v5_2_ring_emit_reg_wait,
.emit_reg_write_reg_wait = sdma_v5_2_ring_emit_reg_write_reg_wait,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index f2f3c338fd9446..a15bfb5223e8fe 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1653,18 +1653,24 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
if (test_bit(gpuidx, prange->bitmap_access))
bitmap_set(ctx->bitmap, gpuidx, 1);
}
+
+ /*
+ * If prange is already mapped or with always mapped flag,
+ * update mapping on GPUs with ACCESS attribute
+ */
+ if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
+ if (prange->mapped_to_gpu ||
+ prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)
+ bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
+ }
} else {
bitmap_or(ctx->bitmap, prange->bitmap_access,
prange->bitmap_aip, MAX_GPU_INSTANCE);
}
if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
- bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
- if (!prange->mapped_to_gpu ||
- bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
- r = 0;
- goto free_ctx;
- }
+ r = 0;
+ goto free_ctx;
}
if (prange->actual_loc && !prange->ttm_res) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index b452796fc6d39e..c8c00c2a5224a7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5182,6 +5182,9 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
if (plane->type == DRM_PLANE_TYPE_CURSOR)
return;
+ if (new_plane_state->rotation != DRM_MODE_ROTATE_0)
+ goto ffu;
+
num_clips = drm_plane_get_damage_clips_count(new_plane_state);
clips = drm_plane_get_damage_clips(new_plane_state);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 6a96810a477e67..2d1f5efa9091a4 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1014,13 +1014,20 @@ static enum bp_result get_ss_info_v4_5(
DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
case AS_SIGNAL_TYPE_DISPLAY_PORT:
- ss_info->spread_spectrum_percentage =
+ if (bp->base.integrated_info) {
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", bp->base.integrated_info->gpuclk_ss_percentage);
+ ss_info->spread_spectrum_percentage =
+ bp->base.integrated_info->gpuclk_ss_percentage;
+ ss_info->type.CENTER_MODE =
+ bp->base.integrated_info->gpuclk_ss_type;
+ } else {
+ ss_info->spread_spectrum_percentage =
disp_cntl_tbl->dp_ss_percentage;
- ss_info->spread_spectrum_range =
+ ss_info->spread_spectrum_range =
disp_cntl_tbl->dp_ss_rate_10hz * 10;
- if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
- ss_info->type.CENTER_MODE = true;
-
+ if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+ ss_info->type.CENTER_MODE = true;
+ }
DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
case AS_SIGNAL_TYPE_GPU_PLL:
@@ -2386,13 +2393,7 @@ static enum bp_result get_vram_info_v30(
return BP_RESULT_BADBIOSTABLE;
info->num_chans = info_v30->channel_num;
- /* As suggested by VBIOS we should always use
- * dram_channel_width_bytes = 2 when using VRAM
- * table version 3.0. This is because the channel_width
- * param in the VRAM info table is changed in 7000 series and
- * no longer represents the memory channel width.
- */
- info->dram_channel_width_bytes = 2;
+ info->dram_channel_width_bytes = (1 << info_v30->channel_width) / 8;
return result;
}
@@ -2820,6 +2821,8 @@ static enum bp_result get_integrated_info_v2_2(
info->ma_channel_number = info_v2_2->umachannelnumber;
info->dp_ss_control =
le16_to_cpu(info_v2_2->reserved1);
+ info->gpuclk_ss_percentage = info_v2_2->gpuclk_ss_percentage;
+ info->gpuclk_ss_type = info_v2_2->gpuclk_ss_type;
for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) {
info->ext_disp_conn_info.gu_id[i] =
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 76b47f17812797..5c118520664595 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -5095,18 +5095,28 @@ void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
*/
bool dc_is_dmub_outbox_supported(struct dc *dc)
{
- /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
- if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
- dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
- !dc->debug.dpia_debug.bits.disable_dpia)
- return true;
+ switch (dc->ctx->asic_id.chip_family) {
- if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 &&
- !dc->debug.dpia_debug.bits.disable_dpia)
- return true;
+ case FAMILY_YELLOW_CARP:
+ /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
+ if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
+ !dc->debug.dpia_debug.bits.disable_dpia)
+ return true;
+ break;
+
+ case AMDGPU_FAMILY_GC_11_0_1:
+ case AMDGPU_FAMILY_GC_11_5_0:
+ if (!dc->debug.dpia_debug.bits.disable_dpia)
+ return true;
+ break;
+
+ default:
+ break;
+ }
/* dmub aux needs dmub notifications to be enabled */
return dc->debug.enable_dmub_aux_for_legacy_ddc;
+
}
/**
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 9649934ea186d6..e2a3aa8812df49 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -465,6 +465,7 @@ struct dc_cursor_mi_param {
struct fixed31_32 v_scale_ratio;
enum dc_rotation_angle rotation;
bool mirror;
+ struct dc_stream_state *stream;
};
/* IPP related types */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index 139cf31d2e456f..89c3bf0fe0c991 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -1077,8 +1077,16 @@ void hubp2_cursor_set_position(
if (src_y_offset < 0)
src_y_offset = 0;
/* Save necessary cursor info x, y position. w, h is saved in attribute func. */
- hubp->cur_rect.x = src_x_offset + param->viewport.x;
- hubp->cur_rect.y = src_y_offset + param->viewport.y;
+ if (param->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
+ param->rotation != ROTATION_ANGLE_0) {
+ hubp->cur_rect.x = 0;
+ hubp->cur_rect.y = 0;
+ hubp->cur_rect.w = param->stream->timing.h_addressable;
+ hubp->cur_rect.h = param->stream->timing.v_addressable;
+ } else {
+ hubp->cur_rect.x = src_x_offset + param->viewport.x;
+ hubp->cur_rect.y = src_y_offset + param->viewport.y;
+ }
}
void hubp2_clk_cntl(struct hubp *hubp, bool enable)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index 39cf1ae3a3e162..f154a3eb1d1a0a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -124,7 +124,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.phyclk_mhz = 600.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 186.0,
- .dtbclk_mhz = 625.0,
+ .dtbclk_mhz = 600.0,
},
{
.state = 1,
@@ -133,7 +133,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 209.0,
- .dtbclk_mhz = 625.0,
+ .dtbclk_mhz = 600.0,
},
{
.state = 2,
@@ -142,7 +142,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 209.0,
- .dtbclk_mhz = 625.0,
+ .dtbclk_mhz = 600.0,
},
{
.state = 3,
@@ -151,7 +151,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 371.0,
- .dtbclk_mhz = 625.0,
+ .dtbclk_mhz = 600.0,
},
{
.state = 4,
@@ -160,7 +160,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 417.0,
- .dtbclk_mhz = 625.0,
+ .dtbclk_mhz = 600.0,
},
},
.num_states = 5,
@@ -348,6 +348,8 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc,
clock_limits[i].socclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
+ clock_limits[i].dtbclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
clk_table->num_entries;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels =
@@ -360,6 +362,8 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc,
clk_table->num_entries;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels =
clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels =
+ clk_table->num_entries;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
index 4d1336e5afc2c3..b95bf27f2fe2fe 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
@@ -5420,7 +5420,7 @@ static void CalculateOutputLink(
*OutBpp = TruncToValidBPP((1 - Downspreading / 100) * 13500, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (dml_uint_t)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
- if (OutBpp == 0 && PHYCLKD32PerState < 20000 / 32 && DSCEnable == dml_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
+ if (*OutBpp == 0 && PHYCLKD32PerState < 20000 / 32 && DSCEnable == dml_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
*RequiresDSC = true;
LinkDSCEnable = true;
*OutBpp = TruncToValidBPP((1 - Downspreading / 100) * 13500, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
@@ -6329,7 +6329,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
mode_lib->ms.NoOfDPPThisState,
mode_lib->ms.dpte_group_bytes,
s->HostVMInefficiencyFactor,
- mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
+ mode_lib->ms.soc.hostvm_min_page_size_kbytes,
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels);
s->NextMaxVStartup = s->MaxVStartupAllPlanes[j];
@@ -6542,7 +6542,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
mode_lib->ms.cache_display_cfg.plane.HostVMEnable,
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels,
mode_lib->ms.cache_display_cfg.plane.GPUVMEnable,
- mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
+ mode_lib->ms.soc.hostvm_min_page_size_kbytes,
mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k],
mode_lib->ms.MetaRowBytes[j][k],
mode_lib->ms.DPTEBytesPerRow[j][k],
@@ -7687,7 +7687,7 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes;
- CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
+ CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn;
CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode;
CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = mode_lib->ms.PTEBufferSizeNotExceededPerState;
@@ -7957,7 +7957,7 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
UseMinimumDCFCLK_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
UseMinimumDCFCLK_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable;
UseMinimumDCFCLK_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes;
- UseMinimumDCFCLK_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
+ UseMinimumDCFCLK_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
UseMinimumDCFCLK_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
UseMinimumDCFCLK_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled;
UseMinimumDCFCLK_params->ImmediateFlipRequirement = s->ImmediateFlipRequiredFinal;
@@ -8699,7 +8699,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes;
- CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
+ CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn;
CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode;
CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = s->dummy_boolean_array[0];
@@ -8805,7 +8805,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
mode_lib->ms.cache_display_cfg.hw.DPPPerSurface,
locals->dpte_group_bytes,
s->HostVMInefficiencyFactor,
- mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
+ mode_lib->ms.soc.hostvm_min_page_size_kbytes,
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels);
locals->TCalc = 24.0 / locals->DCFCLKDeepSleep;
@@ -8995,7 +8995,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
CalculatePrefetchSchedule_params->GPUVMEnable = mode_lib->ms.cache_display_cfg.plane.GPUVMEnable;
CalculatePrefetchSchedule_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable;
CalculatePrefetchSchedule_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
- CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
+ CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
CalculatePrefetchSchedule_params->DynamicMetadataEnable = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataEnable[k];
CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled;
CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataLinesBeforeActiveRequired[k];
@@ -9240,7 +9240,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
mode_lib->ms.cache_display_cfg.plane.HostVMEnable,
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels,
mode_lib->ms.cache_display_cfg.plane.GPUVMEnable,
- mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
+ mode_lib->ms.soc.hostvm_min_page_size_kbytes,
locals->PDEAndMetaPTEBytesFrame[k],
locals->MetaRowByte[k],
locals->PixelPTEBytesPerRow[k],
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index fa8fe5bf7e5756..db06a5b749b408 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -423,8 +423,9 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
}
for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels; i++) {
- p->in_states->state_array[i].dtbclk_mhz =
- dml2->config.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz;
+ if (dml2->config.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz > 0)
+ p->in_states->state_array[i].dtbclk_mhz =
+ dml2->config.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz;
}
for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 2b8b8366538e96..cdb903116eb7ca 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -3417,7 +3417,8 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
.rotation = pipe_ctx->plane_state->rotation,
- .mirror = pipe_ctx->plane_state->horizontal_mirror
+ .mirror = pipe_ctx->plane_state->horizontal_mirror,
+ .stream = pipe_ctx->stream,
};
bool pipe_split_on = false;
bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index 5f7f474ef51c9a..c1a9b746c43fef 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -960,6 +960,12 @@ void dcn32_init_hw(struct dc *dc)
dc->caps.dmub_caps.subvp_psr = dc->ctx->dmub_srv->dmub->feature_caps.subvp_psr_support;
dc->caps.dmub_caps.gecc_enable = dc->ctx->dmub_srv->dmub->feature_caps.gecc_enable;
dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
+
+ if (dc->ctx->dmub_srv->dmub->fw_version <
+ DMUB_FW_VERSION(7, 0, 35)) {
+ dc->debug.force_disable_subvp = true;
+ dc->debug.disable_fpo_optimizations = true;
+ }
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index 996e4ee99023a8..e5cfaaef70b3f7 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -287,8 +287,8 @@ bool set_default_brightness_aux(struct dc_link *link)
if (link && link->dpcd_sink_ext_caps.bits.oled == 1) {
if (!read_default_bl_aux(link, &default_backlight))
default_backlight = 150000;
- // if > 5000, it might be wrong readback
- if (default_backlight > 5000000)
+ // if < 1 nits or > 5000, it might be wrong readback
+ if (default_backlight < 1000 || default_backlight > 5000000)
default_backlight = 150000;
return edp_set_backlight_level_nits(link, true,
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
index bc96d021136080..813463ffe15c52 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
@@ -417,6 +417,8 @@ struct integrated_info {
/* V2.1 */
struct edp_info edp1_info;
struct edp_info edp2_info;
+ uint32_t gpuclk_ss_percentage;
+ uint32_t gpuclk_ss_type;
};
/*
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index a522a7c0291108..1675314a3ff208 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -839,6 +839,8 @@ bool is_psr_su_specific_panel(struct dc_link *link)
((dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x08) ||
(dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x07)))
isPSRSUSupported = false;
+ else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03)
+ isPSRSUSupported = false;
else if (dpcd_caps->psr_info.force_psrsu_cap == 0x1)
isPSRSUSupported = true;
}
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index ca2ece24e1e07b..49028dde0f87b1 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -2198,10 +2198,10 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
} else if (DEVICE_ATTR_IS(xgmi_plpd_policy)) {
if (amdgpu_dpm_get_xgmi_plpd_mode(adev, NULL) == XGMI_PLPD_NONE)
*states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_mclk_od)) {
+ } else if (DEVICE_ATTR_IS(pp_mclk_od)) {
if (amdgpu_dpm_get_mclk_od(adev) == -EOPNOTSUPP)
*states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_sclk_od)) {
+ } else if (DEVICE_ATTR_IS(pp_sclk_od)) {
if (amdgpu_dpm_get_sclk_od(adev) == -EOPNOTSUPP)
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(apu_thermal_cap)) {
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 2ed2585ded3784..6899b3dc1f12a5 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -236,7 +236,7 @@ static int
drm_master_check_perm(struct drm_device *dev, struct drm_file *file_priv)
{
if (file_priv->was_master &&
- rcu_access_pointer(file_priv->pid) == task_pid(current))
+ rcu_access_pointer(file_priv->pid) == task_tgid(current))
return 0;
if (!capable(CAP_SYS_ADMIN))
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index df9bf3c9206e71..cb90e70d85e862 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -715,8 +715,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
struct drm_mode_set set;
uint32_t __user *set_connectors_ptr;
struct drm_modeset_acquire_ctx ctx;
- int ret;
- int i;
+ int ret, i, num_connectors = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
@@ -871,6 +870,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
connector->name);
connector_set[i] = connector;
+ num_connectors++;
}
}
@@ -879,7 +879,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
set.y = crtc_req->y;
set.mode = mode;
set.connectors = connector_set;
- set.num_connectors = crtc_req->count_connectors;
+ set.num_connectors = num_connectors;
set.fb = fb;
if (drm_drv_uses_atomic_modeset(dev))
@@ -892,7 +892,7 @@ out:
drm_framebuffer_put(fb);
if (connector_set) {
- for (i = 0; i < crtc_req->count_connectors; i++) {
+ for (i = 0; i < num_connectors; i++) {
if (connector_set[i])
drm_connector_put(connector_set[i]);
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 39db08f803eac2..3b40650998728c 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2309,7 +2309,8 @@ int drm_edid_override_connector_update(struct drm_connector *connector)
override = drm_edid_override_get(connector);
if (override) {
- num_modes = drm_edid_connector_update(connector, override);
+ if (drm_edid_connector_update(connector, override) == 0)
+ num_modes = drm_edid_connector_add_modes(connector);
drm_edid_free(override);
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index d414f6b7f993c2..ccf225afeb2ad3 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -2465,7 +2465,8 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
val |= XELPDP_FORWARD_CLOCK_UNGATE;
- if (is_hdmi_frl(crtc_state->port_clock))
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
+ is_hdmi_frl(crtc_state->port_clock))
val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_DIV18CLK);
else
val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 63ba4d54a71529..df582ff81b45fa 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -3747,8 +3747,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
if (!active)
goto out;
- intel_dsc_get_config(pipe_config);
intel_bigjoiner_get_config(pipe_config);
+ intel_dsc_get_config(pipe_config);
if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
DISPLAY_VER(dev_priv) >= 11)
@@ -6033,6 +6033,17 @@ static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
return -EINVAL;
}
+ /*
+ * FIXME: Bigjoiner+async flip is busted currently.
+ * Remove this check once the issues are fixed.
+ */
+ if (new_crtc_state->bigjoiner_pipes) {
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] async flip disallowed with bigjoiner\n",
+ crtc->base.base.id, crtc->base.name);
+ return -EINVAL;
+ }
+
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
new_plane_state, i) {
if (plane->pipe != crtc->pipe)
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 63e080e0702368..073b85b57679a5 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -389,7 +389,7 @@ disable_all_flip_queue_events(struct drm_i915_private *i915)
enum intel_dmc_id dmc_id;
/* TODO: check if the following applies to all D13+ platforms. */
- if (!IS_DG2(i915) && !IS_TIGERLAKE(i915))
+ if (!IS_TIGERLAKE(i915))
return;
for_each_dmc_id(dmc_id) {
@@ -493,6 +493,45 @@ void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe)
intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0);
}
+static bool is_dmc_evt_ctl_reg(struct drm_i915_private *i915,
+ enum intel_dmc_id dmc_id, i915_reg_t reg)
+{
+ u32 offset = i915_mmio_reg_offset(reg);
+ u32 start = i915_mmio_reg_offset(DMC_EVT_CTL(i915, dmc_id, 0));
+ u32 end = i915_mmio_reg_offset(DMC_EVT_CTL(i915, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12));
+
+ return offset >= start && offset < end;
+}
+
+static bool disable_dmc_evt(struct drm_i915_private *i915,
+ enum intel_dmc_id dmc_id,
+ i915_reg_t reg, u32 data)
+{
+ if (!is_dmc_evt_ctl_reg(i915, dmc_id, reg))
+ return false;
+
+ /* keep all pipe DMC events disabled by default */
+ if (dmc_id != DMC_FW_MAIN)
+ return true;
+
+ return false;
+}
+
+static u32 dmc_mmiodata(struct drm_i915_private *i915,
+ struct intel_dmc *dmc,
+ enum intel_dmc_id dmc_id, int i)
+{
+ if (disable_dmc_evt(i915, dmc_id,
+ dmc->dmc_info[dmc_id].mmioaddr[i],
+ dmc->dmc_info[dmc_id].mmiodata[i]))
+ return REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
+ DMC_EVT_CTL_TYPE_EDGE_0_1) |
+ REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
+ DMC_EVT_CTL_EVENT_ID_FALSE);
+ else
+ return dmc->dmc_info[dmc_id].mmiodata[i];
+}
+
/**
* intel_dmc_load_program() - write the firmware from memory to register.
* @i915: i915 drm device.
@@ -532,7 +571,7 @@ void intel_dmc_load_program(struct drm_i915_private *i915)
for_each_dmc_id(dmc_id) {
for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
intel_de_write(i915, dmc->dmc_info[dmc_id].mmioaddr[i],
- dmc->dmc_info[dmc_id].mmiodata[i]);
+ dmc_mmiodata(i915, dmc, dmc_id, i));
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index dbc1b66c8ee485..1abfafbbfa7571 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -650,19 +650,30 @@ intel_dp_update_link_bw_set(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 link_bw, u8 rate_select)
{
- u8 link_config[2];
+ u8 lane_count = crtc_state->lane_count;
- /* Write the link configuration data */
- link_config[0] = link_bw;
- link_config[1] = crtc_state->lane_count;
if (crtc_state->enhanced_framing)
- link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
- drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
+ lane_count |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ if (link_bw) {
+ /* DP and eDP v1.3 and earlier link bw set method. */
+ u8 link_config[] = { link_bw, lane_count };
- /* eDP 1.4 rate select method. */
- if (!link_bw)
- drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
- &rate_select, 1);
+ drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config,
+ ARRAY_SIZE(link_config));
+ } else {
+ /*
+ * eDP v1.4 and later link rate set method.
+ *
+ * eDP v1.4x sinks shall ignore DP_LINK_RATE_SET if
+ * DP_LINK_BW_SET is set. Avoid writing DP_LINK_BW_SET.
+ *
+ * eDP v1.5 sinks allow choosing either, and the last choice
+ * shall be active.
+ */
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_LANE_COUNT_SET, lane_count);
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_LINK_RATE_SET, rate_select);
+ }
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 19b35ece31f1b7..646f367a13f5a0 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -1374,7 +1374,8 @@ plane_view_scanout_stride(const struct intel_framebuffer *fb, int color_plane,
struct drm_i915_private *i915 = to_i915(fb->base.dev);
unsigned int stride_tiles;
- if (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14)
+ if ((IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14) &&
+ src_stride_tiles < dst_stride_tiles)
stride_tiles = src_stride_tiles;
else
stride_tiles = dst_stride_tiles;
@@ -1501,8 +1502,20 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
size += remap_info->size;
} else {
- unsigned int dst_stride = plane_view_dst_stride_tiles(fb, color_plane,
- remap_info->width);
+ unsigned int dst_stride;
+
+ /*
+ * The hardware automagically calculates the CCS AUX surface
+ * stride from the main surface stride so can't really remap a
+ * smaller subset (unless we'd remap in whole AUX page units).
+ */
+ if (intel_fb_needs_pot_stride_remap(fb) &&
+ intel_fb_is_ccs_modifier(fb->base.modifier))
+ dst_stride = remap_info->src_stride;
+ else
+ dst_stride = remap_info->width;
+
+ dst_stride = plane_view_dst_stride_tiles(fb, color_plane, dst_stride);
assign_chk_ovf(i915, remap_info->dst_stride, dst_stride);
color_plane_info->mapping_stride = dst_stride *
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index 1e7c97243fcf55..8a934bada6245d 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -504,7 +504,6 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
{
struct drm_plane *plane = NULL;
struct intel_plane *intel_plane;
- struct intel_plane_state *plane_state = NULL;
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
struct drm_atomic_state *drm_state = crtc_state->uapi.state;
@@ -536,6 +535,7 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
/* walkthrough scaler_users bits and start assigning scalers */
for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
+ struct intel_plane_state *plane_state = NULL;
int *scaler_id;
const char *name;
int idx, ret;
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index d5ed904f355d5a..6801f8b95c53d1 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -1293,7 +1293,7 @@ int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
if (msg)
drm_notice(&engine->i915->drm,
"Resetting %s for %s\n", engine->name, msg);
- atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
+ i915_increase_reset_engine_count(&engine->i915->gpu_error, engine);
ret = intel_gt_reset_engine(engine);
if (ret) {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index d37698bd6b91ae..17df71117cc70d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -5001,7 +5001,8 @@ static void capture_error_state(struct intel_guc *guc,
if (match) {
intel_engine_set_hung_context(e, ce);
engine_mask |= e->mask;
- atomic_inc(&i915->gpu_error.reset_engine_count[e->uabi_class]);
+ i915_increase_reset_engine_count(&i915->gpu_error,
+ e);
}
}
@@ -5013,7 +5014,7 @@ static void capture_error_state(struct intel_guc *guc,
} else {
intel_engine_set_hung_context(ce->engine, ce);
engine_mask = ce->engine->mask;
- atomic_inc(&i915->gpu_error.reset_engine_count[ce->engine->uabi_class]);
+ i915_increase_reset_engine_count(&i915->gpu_error, ce->engine);
}
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 9f5971f5e98014..48f6c00402c47a 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -16,6 +16,7 @@
#include "display/intel_display_device.h"
#include "gt/intel_engine.h"
+#include "gt/intel_engine_types.h"
#include "gt/intel_gt_types.h"
#include "gt/uc/intel_uc_fw.h"
@@ -232,7 +233,7 @@ struct i915_gpu_error {
atomic_t reset_count;
/** Number of times an engine has been reset */
- atomic_t reset_engine_count[I915_NUM_ENGINES];
+ atomic_t reset_engine_count[MAX_ENGINE_CLASS];
};
struct drm_i915_error_state_buf {
@@ -255,7 +256,14 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
const struct intel_engine_cs *engine)
{
- return atomic_read(&error->reset_engine_count[engine->uabi_class]);
+ return atomic_read(&error->reset_engine_count[engine->class]);
+}
+
+static inline void
+i915_increase_reset_engine_count(struct i915_gpu_error *error,
+ const struct intel_engine_cs *engine)
+{
+ atomic_inc(&error->reset_engine_count[engine->class]);
}
#define CORE_DUMP_FLAG_NONE 0x0
diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c
index 975da8e7f2a9f8..8c3f443c8347e0 100644
--- a/drivers/gpu/drm/i915/i915_hwmon.c
+++ b/drivers/gpu/drm/i915/i915_hwmon.c
@@ -175,7 +175,7 @@ hwm_power1_max_interval_show(struct device *dev, struct device_attribute *attr,
* tau4 = (4 | x) << y
* but add 2 when doing the final right shift to account for units
*/
- tau4 = ((1 << x_w) | x) << y;
+ tau4 = (u64)((1 << x_w) | x) << y;
/* val in hwmon interface units (millisec) */
out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
@@ -211,7 +211,7 @@ hwm_power1_max_interval_store(struct device *dev,
r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
- tau4 = ((1 << x_w) | x) << y;
+ tau4 = (u64)((1 << x_w) | x) << y;
max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
if (val > max_win)
diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.c b/drivers/gpu/drm/i915/selftests/igt_live_test.c
index 4ddc6d902752af..7d41874a49c589 100644
--- a/drivers/gpu/drm/i915/selftests/igt_live_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_live_test.c
@@ -37,8 +37,9 @@ int igt_live_test_begin(struct igt_live_test *t,
}
for_each_engine(engine, gt, id)
- t->reset_engine[id] =
- i915_reset_engine_count(&i915->gpu_error, engine);
+ t->reset_engine[i][id] =
+ i915_reset_engine_count(&i915->gpu_error,
+ engine);
}
t->reset_global = i915_reset_count(&i915->gpu_error);
@@ -66,14 +67,14 @@ int igt_live_test_end(struct igt_live_test *t)
for_each_gt(gt, i915, i) {
for_each_engine(engine, gt, id) {
- if (t->reset_engine[id] ==
+ if (t->reset_engine[i][id] ==
i915_reset_engine_count(&i915->gpu_error, engine))
continue;
gt_err(gt, "%s(%s): engine '%s' was reset %d times!\n",
t->func, t->name, engine->name,
i915_reset_engine_count(&i915->gpu_error, engine) -
- t->reset_engine[id]);
+ t->reset_engine[i][id]);
return -EIO;
}
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.h b/drivers/gpu/drm/i915/selftests/igt_live_test.h
index 36ed42736c5216..83e3ad430922fe 100644
--- a/drivers/gpu/drm/i915/selftests/igt_live_test.h
+++ b/drivers/gpu/drm/i915/selftests/igt_live_test.h
@@ -7,6 +7,7 @@
#ifndef IGT_LIVE_TEST_H
#define IGT_LIVE_TEST_H
+#include "gt/intel_gt_defines.h" /* for I915_MAX_GT */
#include "gt/intel_engine.h" /* for I915_NUM_ENGINES */
struct drm_i915_private;
@@ -17,7 +18,7 @@ struct igt_live_test {
const char *name;
unsigned int reset_global;
- unsigned int reset_engine[I915_NUM_ENGINES];
+ unsigned int reset_engine[I915_MAX_GT][I915_NUM_ENGINES];
};
/*
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
index f81dc34c9c3ef4..c1bc8b00d9380c 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
@@ -203,7 +203,7 @@ void mtk_gamma_set(struct device *dev, struct drm_crtc_state *state)
/* Disable RELAY mode to pass the processed image */
cfg_val &= ~GAMMA_RELAY_MODE;
- cfg_val = readl(gamma->regs + DISP_GAMMA_CFG);
+ writel(cfg_val, gamma->regs + DISP_GAMMA_CFG);
}
void mtk_gamma_config(struct device *dev, unsigned int w,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index c277b9fae95020..db43f9dff912e4 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -788,6 +788,7 @@ static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
crtc);
struct mtk_crtc_state *mtk_crtc_state = to_mtk_crtc_state(crtc_state);
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ unsigned long flags;
if (mtk_crtc->event && mtk_crtc_state->base.event)
DRM_ERROR("new event while there is still a pending event\n");
@@ -795,7 +796,11 @@ static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
if (mtk_crtc_state->base.event) {
mtk_crtc_state->base.event->pipe = drm_crtc_index(crtc);
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
mtk_crtc->event = mtk_crtc_state->base.event;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
mtk_crtc_state->base.event = NULL;
}
}
@@ -921,7 +926,14 @@ static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev,
struct device *mtk_drm_crtc_dma_dev_get(struct drm_crtc *crtc)
{
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_drm_crtc *mtk_crtc = NULL;
+
+ if (!crtc)
+ return NULL;
+
+ mtk_crtc = to_mtk_crtc(crtc);
+ if (!mtk_crtc)
+ return NULL;
return mtk_crtc->dma_dev;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 2dfaa613276a6d..2b0c35cacbc6da 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -443,6 +443,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
struct mtk_drm_private *private = drm->dev_private;
struct mtk_drm_private *priv_n;
struct device *dma_dev = NULL;
+ struct drm_crtc *crtc;
int ret, i, j;
if (drm_firmware_drivers_only())
@@ -519,7 +520,9 @@ static int mtk_drm_kms_init(struct drm_device *drm)
}
/* Use OVL device for all DMA memory allocations */
- dma_dev = mtk_drm_crtc_dma_dev_get(drm_crtc_from_index(drm, 0));
+ crtc = drm_crtc_from_index(drm, 0);
+ if (crtc)
+ dma_dev = mtk_drm_crtc_dma_dev_get(crtc);
if (!dma_dev) {
ret = -ENODEV;
dev_err(drm->dev, "Need at least one OVL device\n");
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c
index e4279f1772a1b7..377d0e0cef8481 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c
@@ -385,7 +385,7 @@ nvkm_uoutp_mthd_inherit(struct nvkm_outp *outp, void *argv, u32 argc)
/* Ensure an ior is hooked up to this outp already */
ior = outp->func->inherit(outp);
- if (!ior)
+ if (!ior || !ior->arm.head)
return -ENODEV;
/* With iors, there will be a separate output path for each type of connector - and all of
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index 1b811d6972a16d..201022ae9214a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -49,14 +49,14 @@
#include <subdev/mmu.h>
struct gk20a_instobj {
- struct nvkm_memory memory;
+ struct nvkm_instobj base;
struct nvkm_mm_node *mn;
struct gk20a_instmem *imem;
/* CPU mapping */
u32 *vaddr;
};
-#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory)
+#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, base.memory)
/*
* Used for objects allocated using the DMA API
@@ -148,7 +148,7 @@ gk20a_instobj_iommu_recycle_vaddr(struct gk20a_instobj_iommu *obj)
list_del(&obj->vaddr_node);
vunmap(obj->base.vaddr);
obj->base.vaddr = NULL;
- imem->vaddr_use -= nvkm_memory_size(&obj->base.memory);
+ imem->vaddr_use -= nvkm_memory_size(&obj->base.base.memory);
nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", imem->vaddr_use,
imem->vaddr_max);
}
@@ -283,7 +283,7 @@ gk20a_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
{
struct gk20a_instobj *node = gk20a_instobj(memory);
struct nvkm_vmm_map map = {
- .memory = &node->memory,
+ .memory = &node->base.memory,
.offset = offset,
.mem = node->mn,
};
@@ -391,8 +391,8 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
return -ENOMEM;
*_node = &node->base;
- nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory);
- node->base.memory.ptrs = &gk20a_instobj_ptrs;
+ nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.base.memory);
+ node->base.base.memory.ptrs = &gk20a_instobj_ptrs;
node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
&node->handle, GFP_KERNEL,
@@ -438,8 +438,8 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
*_node = &node->base;
node->dma_addrs = (void *)(node->pages + npages);
- nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.memory);
- node->base.memory.ptrs = &gk20a_instobj_ptrs;
+ nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.base.memory);
+ node->base.base.memory.ptrs = &gk20a_instobj_ptrs;
/* Allocate backing memory */
for (i = 0; i < npages; i++) {
@@ -533,7 +533,7 @@ gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
else
ret = gk20a_instobj_ctor_dma(imem, size >> PAGE_SHIFT,
align, &node);
- *pmemory = node ? &node->memory : NULL;
+ *pmemory = node ? &node->base.memory : NULL;
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
index 6e3670508e3a5a..30919c872ac8d5 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
@@ -326,7 +326,7 @@ static const struct drm_display_mode ltk050h3148w_mode = {
static const struct ltk050h3146w_desc ltk050h3148w_data = {
.mode = &ltk050h3148w_mode,
.init = ltk050h3148w_init_sequence,
- .mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
+ .mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_VIDEO_BURST,
};
static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx)
diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
index 138f154fecef34..997c3a1adacab2 100644
--- a/drivers/hid/hid-nintendo.c
+++ b/drivers/hid/hid-nintendo.c
@@ -325,28 +325,28 @@ struct joycon_imu_cal {
* All the controller's button values are stored in a u32.
* They can be accessed with bitwise ANDs.
*/
-static const u32 JC_BTN_Y = BIT(0);
-static const u32 JC_BTN_X = BIT(1);
-static const u32 JC_BTN_B = BIT(2);
-static const u32 JC_BTN_A = BIT(3);
-static const u32 JC_BTN_SR_R = BIT(4);
-static const u32 JC_BTN_SL_R = BIT(5);
-static const u32 JC_BTN_R = BIT(6);
-static const u32 JC_BTN_ZR = BIT(7);
-static const u32 JC_BTN_MINUS = BIT(8);
-static const u32 JC_BTN_PLUS = BIT(9);
-static const u32 JC_BTN_RSTICK = BIT(10);
-static const u32 JC_BTN_LSTICK = BIT(11);
-static const u32 JC_BTN_HOME = BIT(12);
-static const u32 JC_BTN_CAP = BIT(13); /* capture button */
-static const u32 JC_BTN_DOWN = BIT(16);
-static const u32 JC_BTN_UP = BIT(17);
-static const u32 JC_BTN_RIGHT = BIT(18);
-static const u32 JC_BTN_LEFT = BIT(19);
-static const u32 JC_BTN_SR_L = BIT(20);
-static const u32 JC_BTN_SL_L = BIT(21);
-static const u32 JC_BTN_L = BIT(22);
-static const u32 JC_BTN_ZL = BIT(23);
+#define JC_BTN_Y BIT(0)
+#define JC_BTN_X BIT(1)
+#define JC_BTN_B BIT(2)
+#define JC_BTN_A BIT(3)
+#define JC_BTN_SR_R BIT(4)
+#define JC_BTN_SL_R BIT(5)
+#define JC_BTN_R BIT(6)
+#define JC_BTN_ZR BIT(7)
+#define JC_BTN_MINUS BIT(8)
+#define JC_BTN_PLUS BIT(9)
+#define JC_BTN_RSTICK BIT(10)
+#define JC_BTN_LSTICK BIT(11)
+#define JC_BTN_HOME BIT(12)
+#define JC_BTN_CAP BIT(13) /* capture button */
+#define JC_BTN_DOWN BIT(16)
+#define JC_BTN_UP BIT(17)
+#define JC_BTN_RIGHT BIT(18)
+#define JC_BTN_LEFT BIT(19)
+#define JC_BTN_SR_L BIT(20)
+#define JC_BTN_SL_L BIT(21)
+#define JC_BTN_L BIT(22)
+#define JC_BTN_ZL BIT(23)
enum joycon_msg_type {
JOYCON_MSG_TYPE_NONE,
@@ -927,14 +927,27 @@ static int joycon_request_calibration(struct joycon_ctlr *ctlr)
*/
static void joycon_calc_imu_cal_divisors(struct joycon_ctlr *ctlr)
{
- int i;
+ int i, divz = 0;
for (i = 0; i < 3; i++) {
ctlr->imu_cal_accel_divisor[i] = ctlr->accel_cal.scale[i] -
ctlr->accel_cal.offset[i];
ctlr->imu_cal_gyro_divisor[i] = ctlr->gyro_cal.scale[i] -
ctlr->gyro_cal.offset[i];
+
+ if (ctlr->imu_cal_accel_divisor[i] == 0) {
+ ctlr->imu_cal_accel_divisor[i] = 1;
+ divz++;
+ }
+
+ if (ctlr->imu_cal_gyro_divisor[i] == 0) {
+ ctlr->imu_cal_gyro_divisor[i] = 1;
+ divz++;
+ }
}
+
+ if (divz)
+ hid_warn(ctlr->hdev, "inaccurate IMU divisors (%d)\n", divz);
}
static const s16 DFLT_ACCEL_OFFSET /*= 0*/;
@@ -1163,16 +1176,16 @@ static void joycon_parse_imu_report(struct joycon_ctlr *ctlr,
JC_IMU_SAMPLES_PER_DELTA_AVG) {
ctlr->imu_avg_delta_ms = ctlr->imu_delta_samples_sum /
ctlr->imu_delta_samples_count;
- /* don't ever want divide by zero shenanigans */
- if (ctlr->imu_avg_delta_ms == 0) {
- ctlr->imu_avg_delta_ms = 1;
- hid_warn(ctlr->hdev,
- "calculated avg imu delta of 0\n");
- }
ctlr->imu_delta_samples_count = 0;
ctlr->imu_delta_samples_sum = 0;
}
+ /* don't ever want divide by zero shenanigans */
+ if (ctlr->imu_avg_delta_ms == 0) {
+ ctlr->imu_avg_delta_ms = 1;
+ hid_warn(ctlr->hdev, "calculated avg imu delta of 0\n");
+ }
+
/* useful for debugging IMU sample rate */
hid_dbg(ctlr->hdev,
"imu_report: ms=%u last_ms=%u delta=%u avg_delta=%u\n",
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index 28e2a5fc45282d..5511fd46a65eae 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -249,18 +249,46 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
if (!slave)
return 0;
- command = readl(bus->base + ASPEED_I2C_CMD_REG);
+ /*
+ * Handle stop conditions early, prior to SLAVE_MATCH. Some masters may drive
+ * transfers with low enough latency between the nak/stop phase of the current
+ * command and the start/address phase of the following command that the
+ * interrupts are coalesced by the time we process them.
+ */
+ if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) {
+ irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
+ bus->slave_state = ASPEED_I2C_SLAVE_STOP;
+ }
+
+ if (irq_status & ASPEED_I2CD_INTR_TX_NAK &&
+ bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) {
+ irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
+ bus->slave_state = ASPEED_I2C_SLAVE_STOP;
+ }
+
+ /* Propagate any stop conditions to the slave implementation. */
+ if (bus->slave_state == ASPEED_I2C_SLAVE_STOP) {
+ i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
+ bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
+ }
- /* Slave was requested, restart state machine. */
+ /*
+ * Now that we've dealt with any potentially coalesced stop conditions,
+ * address any start conditions.
+ */
if (irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH) {
irq_handled |= ASPEED_I2CD_INTR_SLAVE_MATCH;
bus->slave_state = ASPEED_I2C_SLAVE_START;
}
- /* Slave is not currently active, irq was for someone else. */
+ /*
+ * If the slave has been stopped and not started then slave interrupt
+ * handling is complete.
+ */
if (bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE)
return irq_handled;
+ command = readl(bus->base + ASPEED_I2C_CMD_REG);
dev_dbg(bus->dev, "slave irq status 0x%08x, cmd 0x%08x\n",
irq_status, command);
@@ -279,17 +307,6 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
irq_handled |= ASPEED_I2CD_INTR_RX_DONE;
}
- /* Slave was asked to stop. */
- if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) {
- irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
- bus->slave_state = ASPEED_I2C_SLAVE_STOP;
- }
- if (irq_status & ASPEED_I2CD_INTR_TX_NAK &&
- bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) {
- irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
- bus->slave_state = ASPEED_I2C_SLAVE_STOP;
- }
-
switch (bus->slave_state) {
case ASPEED_I2C_SLAVE_READ_REQUESTED:
if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_ACK))
@@ -324,8 +341,7 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED, &value);
break;
case ASPEED_I2C_SLAVE_STOP:
- i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
- bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
+ /* Stop event handling is done early. Unreachable. */
break;
case ASPEED_I2C_SLAVE_START:
/* Slave was just started. Waiting for the next event. */;
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 6d829ed2f868f7..0d2e7171e3a6f9 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -858,6 +858,7 @@ static int geni_i2c_probe(struct platform_device *pdev)
ret = geni_se_resources_on(&gi2c->se);
if (ret) {
dev_err(dev, "Error turning on resources %d\n", ret);
+ clk_disable_unprepare(gi2c->core_clk);
return ret;
}
proto = geni_se_read_proto(&gi2c->se);
@@ -877,8 +878,11 @@ static int geni_i2c_probe(struct platform_device *pdev)
/* FIFO is disabled, so we can only use GPI DMA */
gi2c->gpi_mode = true;
ret = setup_gpi_dma(gi2c);
- if (ret)
+ if (ret) {
+ geni_se_resources_off(&gi2c->se);
+ clk_disable_unprepare(gi2c->core_clk);
return dev_err_probe(dev, ret, "Failed to setup GPI DMA mode\n");
+ }
dev_dbg(dev, "Using GPI DMA mode for I2C\n");
} else {
@@ -891,6 +895,8 @@ static int geni_i2c_probe(struct platform_device *pdev)
if (!tx_depth) {
dev_err(dev, "Invalid TX FIFO depth\n");
+ geni_se_resources_off(&gi2c->se);
+ clk_disable_unprepare(gi2c->core_clk);
return -EINVAL;
}
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index a044ca0c35a193..4362db7c57892c 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -178,6 +178,7 @@ struct rk3x_i2c_soc_data {
* @clk: function clk for rk3399 or function & Bus clks for others
* @pclk: Bus clk for rk3399
* @clk_rate_nb: i2c clk rate change notify
+ * @irq: irq number
* @t: I2C known timing information
* @lock: spinlock for the i2c bus
* @wait: the waitqueue to wait for i2c transfer
@@ -200,6 +201,7 @@ struct rk3x_i2c {
struct clk *clk;
struct clk *pclk;
struct notifier_block clk_rate_nb;
+ int irq;
/* Settings */
struct i2c_timings t;
@@ -1087,13 +1089,18 @@ static int rk3x_i2c_xfer_common(struct i2c_adapter *adap,
spin_unlock_irqrestore(&i2c->lock, flags);
- rk3x_i2c_start(i2c);
-
if (!polling) {
+ rk3x_i2c_start(i2c);
+
timeout = wait_event_timeout(i2c->wait, !i2c->busy,
msecs_to_jiffies(WAIT_TIMEOUT));
} else {
+ disable_irq(i2c->irq);
+ rk3x_i2c_start(i2c);
+
timeout = rk3x_i2c_wait_xfer_poll(i2c);
+
+ enable_irq(i2c->irq);
}
spin_lock_irqsave(&i2c->lock, flags);
@@ -1310,6 +1317,8 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
return ret;
}
+ i2c->irq = irq;
+
platform_set_drvdata(pdev, i2c);
if (i2c->soc_data->calc_timings == rk3x_i2c_v0_calc_timings) {
diff --git a/drivers/iio/accel/kionix-kx022a.c b/drivers/iio/accel/kionix-kx022a.c
index 60864be3a66712..53d59a04ae15e9 100644
--- a/drivers/iio/accel/kionix-kx022a.c
+++ b/drivers/iio/accel/kionix-kx022a.c
@@ -393,17 +393,17 @@ static const unsigned int kx022a_odrs[] = {
* (range / 2^bits) * g = (range / 2^bits) * 9.80665 m/s^2
* => KX022A uses 16 bit (HiRes mode - assume the low 8 bits are zeroed
* in low-power mode(?) )
- * => +/-2G => 4 / 2^16 * 9,80665 * 10^6 (to scale to micro)
- * => +/-2G - 598.550415
- * +/-4G - 1197.10083
- * +/-8G - 2394.20166
- * +/-16G - 4788.40332
+ * => +/-2G => 4 / 2^16 * 9,80665
+ * => +/-2G - 0.000598550415
+ * +/-4G - 0.00119710083
+ * +/-8G - 0.00239420166
+ * +/-16G - 0.00478840332
*/
static const int kx022a_scale_table[][2] = {
- { 598, 550415 },
- { 1197, 100830 },
- { 2394, 201660 },
- { 4788, 403320 },
+ { 0, 598550 },
+ { 0, 1197101 },
+ { 0, 2394202 },
+ { 0, 4788403 },
};
static int kx022a_read_avail(struct iio_dev *indio_dev,
@@ -422,7 +422,7 @@ static int kx022a_read_avail(struct iio_dev *indio_dev,
*vals = (const int *)kx022a_scale_table;
*length = ARRAY_SIZE(kx022a_scale_table) *
ARRAY_SIZE(kx022a_scale_table[0]);
- *type = IIO_VAL_INT_PLUS_MICRO;
+ *type = IIO_VAL_INT_PLUS_NANO;
return IIO_AVAIL_LIST;
default:
return -EINVAL;
@@ -485,6 +485,20 @@ static int kx022a_turn_on_unlock(struct kx022a_data *data)
return ret;
}
+static int kx022a_write_raw_get_fmt(struct iio_dev *idev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
static int kx022a_write_raw(struct iio_dev *idev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
@@ -629,7 +643,7 @@ static int kx022a_read_raw(struct iio_dev *idev,
kx022a_reg2scale(regval, val, val2);
- return IIO_VAL_INT_PLUS_MICRO;
+ return IIO_VAL_INT_PLUS_NANO;
}
return -EINVAL;
@@ -856,6 +870,7 @@ static int kx022a_fifo_flush(struct iio_dev *idev, unsigned int samples)
static const struct iio_info kx022a_info = {
.read_raw = &kx022a_read_raw,
.write_raw = &kx022a_write_raw,
+ .write_raw_get_fmt = &kx022a_write_raw_get_fmt,
.read_avail = &kx022a_read_avail,
.validate_trigger = iio_validate_own_trigger,
diff --git a/drivers/iio/adc/imx93_adc.c b/drivers/iio/adc/imx93_adc.c
index 9bb1e4ba1aeed8..4ccf4819f1f131 100644
--- a/drivers/iio/adc/imx93_adc.c
+++ b/drivers/iio/adc/imx93_adc.c
@@ -93,6 +93,10 @@ static const struct iio_chan_spec imx93_adc_iio_channels[] = {
IMX93_ADC_CHAN(1),
IMX93_ADC_CHAN(2),
IMX93_ADC_CHAN(3),
+ IMX93_ADC_CHAN(4),
+ IMX93_ADC_CHAN(5),
+ IMX93_ADC_CHAN(6),
+ IMX93_ADC_CHAN(7),
};
static void imx93_adc_power_down(struct imx93_adc *adc)
diff --git a/drivers/iio/adc/mcp3564.c b/drivers/iio/adc/mcp3564.c
index e3f1de5fcc5abd..311b613b605754 100644
--- a/drivers/iio/adc/mcp3564.c
+++ b/drivers/iio/adc/mcp3564.c
@@ -918,7 +918,7 @@ static int mcp3564_write_raw(struct iio_dev *indio_dev,
mutex_unlock(&adc->lock);
return ret;
case IIO_CHAN_INFO_CALIBBIAS:
- if (val < mcp3564_calib_bias[0] && val > mcp3564_calib_bias[2])
+ if (val < mcp3564_calib_bias[0] || val > mcp3564_calib_bias[2])
return -EINVAL;
mutex_lock(&adc->lock);
@@ -928,7 +928,7 @@ static int mcp3564_write_raw(struct iio_dev *indio_dev,
mutex_unlock(&adc->lock);
return ret;
case IIO_CHAN_INFO_CALIBSCALE:
- if (val < mcp3564_calib_scale[0] && val > mcp3564_calib_scale[2])
+ if (val < mcp3564_calib_scale[0] || val > mcp3564_calib_scale[2])
return -EINVAL;
if (adc->calib_scale == val)
@@ -1122,7 +1122,7 @@ static int mcp3564_config(struct iio_dev *indio_dev)
enum mcp3564_ids ids;
int ret = 0;
unsigned int tmp = 0x01;
- bool err = true;
+ bool err = false;
/*
* The address is set on a per-device basis by fuses in the factory,
@@ -1509,5 +1509,5 @@ static struct spi_driver mcp3564_driver = {
module_spi_driver(mcp3564_driver);
MODULE_AUTHOR("Marius Cristea <marius.cristea@microchip.com>");
-MODULE_DESCRIPTION("Microchip MCP346x/MCP346xR and MCP356x/MCP346xR ADCs");
+MODULE_DESCRIPTION("Microchip MCP346x/MCP346xR and MCP356x/MCP356xR ADCs");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 950ff13e6dde80..13b473d8c6c788 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -1241,6 +1241,20 @@ static const struct meson_sar_adc_param meson_sar_adc_gxl_param = {
.cmv_select = 1,
};
+static const struct meson_sar_adc_param meson_sar_adc_axg_param = {
+ .has_bl30_integration = true,
+ .clock_rate = 1200000,
+ .bandgap_reg = MESON_SAR_ADC_REG11,
+ .regmap_config = &meson_sar_adc_regmap_config_gxbb,
+ .resolution = 12,
+ .disable_ring_counter = 1,
+ .has_reg11 = true,
+ .vref_volatge = 1,
+ .has_vref_select = true,
+ .vref_select = VREF_VDDA,
+ .cmv_select = 1,
+};
+
static const struct meson_sar_adc_param meson_sar_adc_g12a_param = {
.has_bl30_integration = false,
.clock_rate = 1200000,
@@ -1285,7 +1299,7 @@ static const struct meson_sar_adc_data meson_sar_adc_gxm_data = {
};
static const struct meson_sar_adc_data meson_sar_adc_axg_data = {
- .param = &meson_sar_adc_gxl_param,
+ .param = &meson_sar_adc_axg_param,
.name = "meson-axg-saradc",
};
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index c755e8cd52205c..95fa857e8aadfe 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -670,8 +670,10 @@ static int tiadc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, indio_dev);
err = tiadc_request_dma(pdev, adc_dev);
- if (err && err == -EPROBE_DEFER)
+ if (err && err != -ENODEV) {
+ dev_err_probe(&pdev->dev, err, "DMA request failed\n");
goto err_dma;
+ }
return 0;
diff --git a/drivers/iio/buffer/industrialio-triggered-buffer.c b/drivers/iio/buffer/industrialio-triggered-buffer.c
index c7671b1f5eadac..c06515987e7a7c 100644
--- a/drivers/iio/buffer/industrialio-triggered-buffer.c
+++ b/drivers/iio/buffer/industrialio-triggered-buffer.c
@@ -46,6 +46,16 @@ int iio_triggered_buffer_setup_ext(struct iio_dev *indio_dev,
struct iio_buffer *buffer;
int ret;
+ /*
+ * iio_triggered_buffer_cleanup() assumes that the buffer allocated here
+ * is assigned to indio_dev->buffer but this is only the case if this
+ * function is the first caller to iio_device_attach_buffer(). If
+ * indio_dev->buffer is already set then we can't proceed otherwise the
+ * cleanup function will try to free a buffer that was not allocated here.
+ */
+ if (indio_dev->buffer)
+ return -EADDRINUSE;
+
buffer = iio_kfifo_allocate();
if (!buffer) {
ret = -ENOMEM;
diff --git a/drivers/iio/common/ms_sensors/ms_sensors_i2c.c b/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
index 6633b35a94e69a..9c9bc77003c7ff 100644
--- a/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
+++ b/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
@@ -15,8 +15,8 @@
/* Conversion times in us */
static const u16 ms_sensors_ht_t_conversion_time[] = { 50000, 25000,
13000, 7000 };
-static const u16 ms_sensors_ht_h_conversion_time[] = { 16000, 3000,
- 5000, 8000 };
+static const u16 ms_sensors_ht_h_conversion_time[] = { 16000, 5000,
+ 3000, 8000 };
static const u16 ms_sensors_tp_conversion_time[] = { 500, 1100, 2100,
4100, 8220, 16440 };
diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c
index b7cbe1565aee02..64be656f0b8052 100644
--- a/drivers/iio/imu/adis16475.c
+++ b/drivers/iio/imu/adis16475.c
@@ -70,8 +70,8 @@
#define ADIS16475_MAX_SCAN_DATA 20
/* spi max speed in brust mode */
#define ADIS16475_BURST_MAX_SPEED 1000000
-#define ADIS16475_LSB_DEC_MASK BIT(0)
-#define ADIS16475_LSB_FIR_MASK BIT(1)
+#define ADIS16475_LSB_DEC_MASK 0
+#define ADIS16475_LSB_FIR_MASK 1
#define ADIS16500_BURST_DATA_SEL_0_CHN_MASK GENMASK(5, 0)
#define ADIS16500_BURST_DATA_SEL_1_CHN_MASK GENMASK(12, 7)
@@ -1406,50 +1406,6 @@ static int adis16475_config_irq_pin(struct adis16475 *st)
return 0;
}
-static const struct of_device_id adis16475_of_match[] = {
- { .compatible = "adi,adis16470",
- .data = &adis16475_chip_info[ADIS16470] },
- { .compatible = "adi,adis16475-1",
- .data = &adis16475_chip_info[ADIS16475_1] },
- { .compatible = "adi,adis16475-2",
- .data = &adis16475_chip_info[ADIS16475_2] },
- { .compatible = "adi,adis16475-3",
- .data = &adis16475_chip_info[ADIS16475_3] },
- { .compatible = "adi,adis16477-1",
- .data = &adis16475_chip_info[ADIS16477_1] },
- { .compatible = "adi,adis16477-2",
- .data = &adis16475_chip_info[ADIS16477_2] },
- { .compatible = "adi,adis16477-3",
- .data = &adis16475_chip_info[ADIS16477_3] },
- { .compatible = "adi,adis16465-1",
- .data = &adis16475_chip_info[ADIS16465_1] },
- { .compatible = "adi,adis16465-2",
- .data = &adis16475_chip_info[ADIS16465_2] },
- { .compatible = "adi,adis16465-3",
- .data = &adis16475_chip_info[ADIS16465_3] },
- { .compatible = "adi,adis16467-1",
- .data = &adis16475_chip_info[ADIS16467_1] },
- { .compatible = "adi,adis16467-2",
- .data = &adis16475_chip_info[ADIS16467_2] },
- { .compatible = "adi,adis16467-3",
- .data = &adis16475_chip_info[ADIS16467_3] },
- { .compatible = "adi,adis16500",
- .data = &adis16475_chip_info[ADIS16500] },
- { .compatible = "adi,adis16505-1",
- .data = &adis16475_chip_info[ADIS16505_1] },
- { .compatible = "adi,adis16505-2",
- .data = &adis16475_chip_info[ADIS16505_2] },
- { .compatible = "adi,adis16505-3",
- .data = &adis16475_chip_info[ADIS16505_3] },
- { .compatible = "adi,adis16507-1",
- .data = &adis16475_chip_info[ADIS16507_1] },
- { .compatible = "adi,adis16507-2",
- .data = &adis16475_chip_info[ADIS16507_2] },
- { .compatible = "adi,adis16507-3",
- .data = &adis16475_chip_info[ADIS16507_3] },
- { },
-};
-MODULE_DEVICE_TABLE(of, adis16475_of_match);
static int adis16475_probe(struct spi_device *spi)
{
@@ -1463,7 +1419,7 @@ static int adis16475_probe(struct spi_device *spi)
st = iio_priv(indio_dev);
- st->info = device_get_match_data(&spi->dev);
+ st->info = spi_get_device_match_data(spi);
if (!st->info)
return -EINVAL;
@@ -1503,12 +1459,83 @@ static int adis16475_probe(struct spi_device *spi)
return 0;
}
+static const struct of_device_id adis16475_of_match[] = {
+ { .compatible = "adi,adis16470",
+ .data = &adis16475_chip_info[ADIS16470] },
+ { .compatible = "adi,adis16475-1",
+ .data = &adis16475_chip_info[ADIS16475_1] },
+ { .compatible = "adi,adis16475-2",
+ .data = &adis16475_chip_info[ADIS16475_2] },
+ { .compatible = "adi,adis16475-3",
+ .data = &adis16475_chip_info[ADIS16475_3] },
+ { .compatible = "adi,adis16477-1",
+ .data = &adis16475_chip_info[ADIS16477_1] },
+ { .compatible = "adi,adis16477-2",
+ .data = &adis16475_chip_info[ADIS16477_2] },
+ { .compatible = "adi,adis16477-3",
+ .data = &adis16475_chip_info[ADIS16477_3] },
+ { .compatible = "adi,adis16465-1",
+ .data = &adis16475_chip_info[ADIS16465_1] },
+ { .compatible = "adi,adis16465-2",
+ .data = &adis16475_chip_info[ADIS16465_2] },
+ { .compatible = "adi,adis16465-3",
+ .data = &adis16475_chip_info[ADIS16465_3] },
+ { .compatible = "adi,adis16467-1",
+ .data = &adis16475_chip_info[ADIS16467_1] },
+ { .compatible = "adi,adis16467-2",
+ .data = &adis16475_chip_info[ADIS16467_2] },
+ { .compatible = "adi,adis16467-3",
+ .data = &adis16475_chip_info[ADIS16467_3] },
+ { .compatible = "adi,adis16500",
+ .data = &adis16475_chip_info[ADIS16500] },
+ { .compatible = "adi,adis16505-1",
+ .data = &adis16475_chip_info[ADIS16505_1] },
+ { .compatible = "adi,adis16505-2",
+ .data = &adis16475_chip_info[ADIS16505_2] },
+ { .compatible = "adi,adis16505-3",
+ .data = &adis16475_chip_info[ADIS16505_3] },
+ { .compatible = "adi,adis16507-1",
+ .data = &adis16475_chip_info[ADIS16507_1] },
+ { .compatible = "adi,adis16507-2",
+ .data = &adis16475_chip_info[ADIS16507_2] },
+ { .compatible = "adi,adis16507-3",
+ .data = &adis16475_chip_info[ADIS16507_3] },
+ { },
+};
+MODULE_DEVICE_TABLE(of, adis16475_of_match);
+
+static const struct spi_device_id adis16475_ids[] = {
+ { "adis16470", (kernel_ulong_t)&adis16475_chip_info[ADIS16470] },
+ { "adis16475-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16475_1] },
+ { "adis16475-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16475_2] },
+ { "adis16475-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16475_3] },
+ { "adis16477-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16477_1] },
+ { "adis16477-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16477_2] },
+ { "adis16477-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16477_3] },
+ { "adis16465-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16465_1] },
+ { "adis16465-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16465_2] },
+ { "adis16465-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16465_3] },
+ { "adis16467-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16467_1] },
+ { "adis16467-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16467_2] },
+ { "adis16467-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16467_3] },
+ { "adis16500", (kernel_ulong_t)&adis16475_chip_info[ADIS16500] },
+ { "adis16505-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16505_1] },
+ { "adis16505-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16505_2] },
+ { "adis16505-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16505_3] },
+ { "adis16507-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16507_1] },
+ { "adis16507-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16507_2] },
+ { "adis16507-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16507_3] },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, adis16475_ids);
+
static struct spi_driver adis16475_driver = {
.driver = {
.name = "adis16475",
.of_match_table = adis16475_of_match,
},
.probe = adis16475_probe,
+ .id_table = adis16475_ids,
};
module_spi_driver(adis16475_driver);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 3fbeef1a70186b..6b034dccc3b17e 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -750,13 +750,13 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
ret = inv_mpu6050_sensor_show(st, st->reg->gyro_offset,
chan->channel2, val);
mutex_unlock(&st->lock);
- return IIO_VAL_INT;
+ return ret;
case IIO_ACCEL:
mutex_lock(&st->lock);
ret = inv_mpu6050_sensor_show(st, st->reg->accl_offset,
chan->channel2, val);
mutex_unlock(&st->lock);
- return IIO_VAL_INT;
+ return ret;
default:
return -EINVAL;
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index f17304b5446839..5cd27f04b45e6d 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -14,11 +14,8 @@
#include "../common/hid-sensors/hid-sensor-trigger.h"
enum {
- CHANNEL_SCAN_INDEX_INTENSITY,
- CHANNEL_SCAN_INDEX_ILLUM,
- CHANNEL_SCAN_INDEX_COLOR_TEMP,
- CHANNEL_SCAN_INDEX_CHROMATICITY_X,
- CHANNEL_SCAN_INDEX_CHROMATICITY_Y,
+ CHANNEL_SCAN_INDEX_INTENSITY = 0,
+ CHANNEL_SCAN_INDEX_ILLUM = 1,
CHANNEL_SCAN_INDEX_MAX
};
@@ -68,40 +65,6 @@ static const struct iio_chan_spec als_channels[] = {
BIT(IIO_CHAN_INFO_HYSTERESIS_RELATIVE),
.scan_index = CHANNEL_SCAN_INDEX_ILLUM,
},
- {
- .type = IIO_COLORTEMP,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
- BIT(IIO_CHAN_INFO_SCALE) |
- BIT(IIO_CHAN_INFO_SAMP_FREQ) |
- BIT(IIO_CHAN_INFO_HYSTERESIS) |
- BIT(IIO_CHAN_INFO_HYSTERESIS_RELATIVE),
- .scan_index = CHANNEL_SCAN_INDEX_COLOR_TEMP,
- },
- {
- .type = IIO_CHROMATICITY,
- .modified = 1,
- .channel2 = IIO_MOD_X,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
- BIT(IIO_CHAN_INFO_SCALE) |
- BIT(IIO_CHAN_INFO_SAMP_FREQ) |
- BIT(IIO_CHAN_INFO_HYSTERESIS) |
- BIT(IIO_CHAN_INFO_HYSTERESIS_RELATIVE),
- .scan_index = CHANNEL_SCAN_INDEX_CHROMATICITY_X,
- },
- {
- .type = IIO_CHROMATICITY,
- .modified = 1,
- .channel2 = IIO_MOD_Y,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
- BIT(IIO_CHAN_INFO_SCALE) |
- BIT(IIO_CHAN_INFO_SAMP_FREQ) |
- BIT(IIO_CHAN_INFO_HYSTERESIS) |
- BIT(IIO_CHAN_INFO_HYSTERESIS_RELATIVE),
- .scan_index = CHANNEL_SCAN_INDEX_CHROMATICITY_Y,
- },
IIO_CHAN_SOFT_TIMESTAMP(CHANNEL_SCAN_INDEX_TIMESTAMP)
};
@@ -140,21 +103,6 @@ static int als_read_raw(struct iio_dev *indio_dev,
min = als_state->als[chan->scan_index].logical_minimum;
address = HID_USAGE_SENSOR_LIGHT_ILLUM;
break;
- case CHANNEL_SCAN_INDEX_COLOR_TEMP:
- report_id = als_state->als[chan->scan_index].report_id;
- min = als_state->als[chan->scan_index].logical_minimum;
- address = HID_USAGE_SENSOR_LIGHT_COLOR_TEMPERATURE;
- break;
- case CHANNEL_SCAN_INDEX_CHROMATICITY_X:
- report_id = als_state->als[chan->scan_index].report_id;
- min = als_state->als[chan->scan_index].logical_minimum;
- address = HID_USAGE_SENSOR_LIGHT_CHROMATICITY_X;
- break;
- case CHANNEL_SCAN_INDEX_CHROMATICITY_Y:
- report_id = als_state->als[chan->scan_index].report_id;
- min = als_state->als[chan->scan_index].logical_minimum;
- address = HID_USAGE_SENSOR_LIGHT_CHROMATICITY_Y;
- break;
default:
report_id = -1;
break;
@@ -275,18 +223,6 @@ static int als_capture_sample(struct hid_sensor_hub_device *hsdev,
als_state->scan.illum[CHANNEL_SCAN_INDEX_ILLUM] = sample_data;
ret = 0;
break;
- case HID_USAGE_SENSOR_LIGHT_COLOR_TEMPERATURE:
- als_state->scan.illum[CHANNEL_SCAN_INDEX_COLOR_TEMP] = sample_data;
- ret = 0;
- break;
- case HID_USAGE_SENSOR_LIGHT_CHROMATICITY_X:
- als_state->scan.illum[CHANNEL_SCAN_INDEX_CHROMATICITY_X] = sample_data;
- ret = 0;
- break;
- case HID_USAGE_SENSOR_LIGHT_CHROMATICITY_Y:
- als_state->scan.illum[CHANNEL_SCAN_INDEX_CHROMATICITY_Y] = sample_data;
- ret = 0;
- break;
case HID_USAGE_SENSOR_TIME_TIMESTAMP:
als_state->timestamp = hid_sensor_convert_timestamp(&als_state->common_attributes,
*(s64 *)raw_data);
@@ -322,38 +258,6 @@ static int als_parse_report(struct platform_device *pdev,
st->als[i].report_id);
}
- ret = sensor_hub_input_get_attribute_info(hsdev, HID_INPUT_REPORT,
- usage_id,
- HID_USAGE_SENSOR_LIGHT_COLOR_TEMPERATURE,
- &st->als[CHANNEL_SCAN_INDEX_COLOR_TEMP]);
- if (ret < 0)
- return ret;
- als_adjust_channel_bit_mask(channels, CHANNEL_SCAN_INDEX_COLOR_TEMP,
- st->als[CHANNEL_SCAN_INDEX_COLOR_TEMP].size);
-
- dev_dbg(&pdev->dev, "als %x:%x\n",
- st->als[CHANNEL_SCAN_INDEX_COLOR_TEMP].index,
- st->als[CHANNEL_SCAN_INDEX_COLOR_TEMP].report_id);
-
- for (i = 0; i < 2; i++) {
- int next_scan_index = CHANNEL_SCAN_INDEX_CHROMATICITY_X + i;
-
- ret = sensor_hub_input_get_attribute_info(hsdev,
- HID_INPUT_REPORT, usage_id,
- HID_USAGE_SENSOR_LIGHT_CHROMATICITY_X + i,
- &st->als[next_scan_index]);
- if (ret < 0)
- return ret;
-
- als_adjust_channel_bit_mask(channels,
- CHANNEL_SCAN_INDEX_CHROMATICITY_X + i,
- st->als[next_scan_index].size);
-
- dev_dbg(&pdev->dev, "als %x:%x\n",
- st->als[next_scan_index].index,
- st->als[next_scan_index].report_id);
- }
-
st->scale_precision = hid_sensor_format_scale(usage_id,
&st->als[CHANNEL_SCAN_INDEX_INTENSITY],
&st->scale_pre_decml, &st->scale_post_decml);
diff --git a/drivers/iio/magnetometer/tmag5273.c b/drivers/iio/magnetometer/tmag5273.c
index c5e5c4ad681e64..e8c4ca142d21d6 100644
--- a/drivers/iio/magnetometer/tmag5273.c
+++ b/drivers/iio/magnetometer/tmag5273.c
@@ -356,7 +356,7 @@ static int tmag5273_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_OFFSET:
switch (chan->type) {
case IIO_TEMP:
- *val = -266314;
+ *val = -16005;
return IIO_VAL_INT;
default:
return -EINVAL;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index f5c21565bb3cec..e2c1848182de9a 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -286,6 +286,7 @@ static const struct xpad_device {
{ 0x146b, 0x0604, "Bigben Interactive DAIJA Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
+ { 0x1532, 0x0a29, "Razer Wolverine V2", 0, XTYPE_XBOXONE },
{ 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
{ 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
{ 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 },
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index c92e544c792df8..786f00f6b7fd8b 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -765,6 +765,44 @@ static void atkbd_deactivate(struct atkbd *atkbd)
ps2dev->serio->phys);
}
+#ifdef CONFIG_X86
+static bool atkbd_is_portable_device(void)
+{
+ static const char * const chassis_types[] = {
+ "8", /* Portable */
+ "9", /* Laptop */
+ "10", /* Notebook */
+ "14", /* Sub-Notebook */
+ "31", /* Convertible */
+ "32", /* Detachable */
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(chassis_types); i++)
+ if (dmi_match(DMI_CHASSIS_TYPE, chassis_types[i]))
+ return true;
+
+ return false;
+}
+
+/*
+ * On many modern laptops ATKBD_CMD_GETID may cause problems, on these laptops
+ * the controller is always in translated mode. In this mode mice/touchpads will
+ * not work. So in this case simply assume a keyboard is connected to avoid
+ * confusing some laptop keyboards.
+ *
+ * Skipping ATKBD_CMD_GETID ends up using a fake keyboard id. Using a fake id is
+ * ok in translated mode, only atkbd_select_set() checks atkbd->id and in
+ * translated mode that is a no-op.
+ */
+static bool atkbd_skip_getid(struct atkbd *atkbd)
+{
+ return atkbd->translated && atkbd_is_portable_device();
+}
+#else
+static inline bool atkbd_skip_getid(struct atkbd *atkbd) { return false; }
+#endif
+
/*
* atkbd_probe() probes for an AT keyboard on a serio port.
*/
@@ -794,12 +832,12 @@ static int atkbd_probe(struct atkbd *atkbd)
*/
param[0] = param[1] = 0xa5; /* initialize with invalid values */
- if (ps2_command(ps2dev, param, ATKBD_CMD_GETID)) {
+ if (atkbd_skip_getid(atkbd) || ps2_command(ps2dev, param, ATKBD_CMD_GETID)) {
/*
- * If the get ID command failed, we check if we can at least set the LEDs on
- * the keyboard. This should work on every keyboard out there. It also turns
- * the LEDs off, which we want anyway.
+ * If the get ID command was skipped or failed, we check if we can at least set
+ * the LEDs on the keyboard. This should work on every keyboard out there.
+ * It also turns the LEDs off, which we want anyway.
*/
param[0] = 0;
if (ps2_command(ps2dev, param, ATKBD_CMD_SETLEDS))
diff --git a/drivers/input/keyboard/ipaq-micro-keys.c b/drivers/input/keyboard/ipaq-micro-keys.c
index 7b509bce2b332f..1d71dd79ffd289 100644
--- a/drivers/input/keyboard/ipaq-micro-keys.c
+++ b/drivers/input/keyboard/ipaq-micro-keys.c
@@ -105,6 +105,9 @@ static int micro_key_probe(struct platform_device *pdev)
keys->codes = devm_kmemdup(&pdev->dev, micro_keycodes,
keys->input->keycodesize * keys->input->keycodemax,
GFP_KERNEL);
+ if (!keys->codes)
+ return -ENOMEM;
+
keys->input->keycode = keys->codes;
__set_bit(EV_KEY, keys->input->evbit);
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index 08bcee3d6bccac..f6d060377d1894 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -299,6 +299,11 @@ static int soc_button_parse_btn_desc(struct device *dev,
info->name = "power";
info->event_code = KEY_POWER;
info->wakeup = true;
+ } else if (upage == 0x01 && usage == 0xc6) {
+ info->name = "airplane mode switch";
+ info->event_type = EV_SW;
+ info->event_code = SW_RFKILL_ALL;
+ info->active_low = false;
} else if (upage == 0x01 && usage == 0xca) {
info->name = "rotation lock switch";
info->event_type = EV_SW;
diff --git a/drivers/input/mouse/amimouse.c b/drivers/input/mouse/amimouse.c
index a50e5035483231..cda0c3ff5a288d 100644
--- a/drivers/input/mouse/amimouse.c
+++ b/drivers/input/mouse/amimouse.c
@@ -125,16 +125,15 @@ static int __init amimouse_probe(struct platform_device *pdev)
return 0;
}
-static int __exit amimouse_remove(struct platform_device *pdev)
+static void __exit amimouse_remove(struct platform_device *pdev)
{
struct input_dev *dev = platform_get_drvdata(pdev);
input_unregister_device(dev);
- return 0;
}
static struct platform_driver amimouse_driver = {
- .remove = __exit_p(amimouse_remove),
+ .remove_new = __exit_p(amimouse_remove),
.driver = {
.name = "amiga-mouse",
},
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 22d16d80efb938..7a303a9d6bf72b 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -183,6 +183,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN009b", /* T580 */
"LEN0402", /* X1 Extreme Gen 2 / P1 Gen 2 */
"LEN040f", /* P1 Gen 3 */
+ "LEN0411", /* L14 Gen 1 */
"LEN200f", /* T450s */
"LEN2044", /* L470 */
"LEN2054", /* E480 */
diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
index 9c39553d30fa27..b585b1dab870e0 100644
--- a/drivers/input/serio/i8042-acpipnpio.h
+++ b/drivers/input/serio/i8042-acpipnpio.h
@@ -361,6 +361,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
+ /* Acer TravelMate P459-G2-M */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate P459-G2-M"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
+ },
+ {
/* Amoi M636/A737 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."),
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index dfab160ca52934..50bac2d79d9b5e 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -395,6 +395,9 @@ struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec)
}
mutex_unlock(&icc_lock);
+ if (!node)
+ return ERR_PTR(-EINVAL);
+
if (IS_ERR(node))
return ERR_CAST(node);
diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
index 628e651c555cb0..dbacb2a7af508f 100644
--- a/drivers/interconnect/qcom/icc-rpm.c
+++ b/drivers/interconnect/qcom/icc-rpm.c
@@ -307,7 +307,7 @@ static u64 qcom_icc_calc_rate(struct qcom_icc_provider *qp, struct qcom_icc_node
if (qn->ib_coeff) {
agg_peak_rate = qn->max_peak[ctx] * 100;
- agg_peak_rate = div_u64(qn->max_peak[ctx], qn->ib_coeff);
+ agg_peak_rate = div_u64(agg_peak_rate, qn->ib_coeff);
} else {
agg_peak_rate = qn->max_peak[ctx];
}
diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c
index 83aeb3eedc1972..02d40eea0d6964 100644
--- a/drivers/interconnect/qcom/sm8250.c
+++ b/drivers/interconnect/qcom/sm8250.c
@@ -1995,6 +1995,7 @@ static struct platform_driver qnoc_driver = {
.driver = {
.name = "qnoc-sm8250",
.of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 2a8b081bce7dd8..3ff87cb4dc4948 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -660,6 +660,7 @@ config DM_ZONED
config DM_AUDIT
bool "DM audit events"
+ depends on BLK_DEV_DM
depends on AUDIT
help
Generate audit events for device-mapper.
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index e85c688fd91e90..c5f03aab455256 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1755,11 +1755,12 @@ static void integrity_metadata(struct work_struct *w)
sectors_to_process = dio->range.n_sectors;
__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
+ struct bio_vec bv_copy = bv;
unsigned int pos;
char *mem, *checksums_ptr;
again:
- mem = bvec_kmap_local(&bv);
+ mem = bvec_kmap_local(&bv_copy);
pos = 0;
checksums_ptr = checksums;
do {
@@ -1768,7 +1769,7 @@ again:
sectors_to_process -= ic->sectors_per_block;
pos += ic->sectors_per_block << SECTOR_SHIFT;
sector += ic->sectors_per_block;
- } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
+ } while (pos < bv_copy.bv_len && sectors_to_process && checksums != checksums_onstack);
kunmap_local(mem);
r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
@@ -1793,9 +1794,9 @@ again:
if (!sectors_to_process)
break;
- if (unlikely(pos < bv.bv_len)) {
- bv.bv_offset += pos;
- bv.bv_len -= pos;
+ if (unlikely(pos < bv_copy.bv_len)) {
+ bv_copy.bv_offset += pos;
+ bv_copy.bv_len -= pos;
goto again;
}
}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 91ebdcc6e9a8c5..eb009d6bb03a17 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3317,6 +3317,9 @@ static void raid_dtr(struct dm_target *ti)
mddev_lock_nointr(&rs->md);
md_stop(&rs->md);
mddev_unlock(&rs->md);
+
+ if (work_pending(&rs->md.event_work))
+ flush_work(&rs->md.event_work);
raid_set_free(rs);
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index b066abbffd10e0..9bdd57324c3760 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -82,6 +82,14 @@ static struct module *md_cluster_mod;
static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
static struct workqueue_struct *md_wq;
+
+/*
+ * This workqueue is used for sync_work to register new sync_thread, and for
+ * del_work to remove rdev, and for event_work that is only set by dm-raid.
+ *
+ * Noted that sync_work will grab reconfig_mutex, hence never flush this
+ * workqueue whith reconfig_mutex grabbed.
+ */
static struct workqueue_struct *md_misc_wq;
struct workqueue_struct *md_bitmap_wq;
@@ -6330,9 +6338,6 @@ static void __md_stop(struct mddev *mddev)
struct md_personality *pers = mddev->pers;
md_bitmap_destroy(mddev);
mddev_detach(mddev);
- /* Ensure ->event_work is done */
- if (mddev->event_work.func)
- flush_workqueue(md_misc_wq);
spin_lock(&mddev->lock);
mddev->pers = NULL;
spin_unlock(&mddev->lock);
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index 3d6f0a466a9ed4..f9f886289b970a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -328,9 +328,6 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
* compare it to the stored version, just create the meta
*/
if (io_sq->disable_meta_caching) {
- if (unlikely(!ena_tx_ctx->meta_valid))
- return -EINVAL;
-
*have_meta = true;
return ena_com_create_meta(io_sq, ena_meta);
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index b5bca481483099..c44c44e26ddfe7 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -74,6 +74,8 @@ static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
struct ena_tx_buffer *tx_info);
static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
int first_index, int count);
+static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
+ int first_index, int count);
/* Increase a stat by cnt while holding syncp seqlock on 32bit machines */
static void ena_increase_stat(u64 *statp, u64 cnt,
@@ -457,23 +459,22 @@ static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
{
+ u32 xdp_first_ring = adapter->xdp_first_ring;
+ u32 xdp_num_queues = adapter->xdp_num_queues;
int rc = 0;
- rc = ena_setup_tx_resources_in_range(adapter, adapter->xdp_first_ring,
- adapter->xdp_num_queues);
+ rc = ena_setup_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
if (rc)
goto setup_err;
- rc = ena_create_io_tx_queues_in_range(adapter,
- adapter->xdp_first_ring,
- adapter->xdp_num_queues);
+ rc = ena_create_io_tx_queues_in_range(adapter, xdp_first_ring, xdp_num_queues);
if (rc)
goto create_err;
return 0;
create_err:
- ena_free_all_io_tx_resources(adapter);
+ ena_free_all_io_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
setup_err:
return rc;
}
@@ -1492,11 +1493,6 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
if (unlikely(!skb))
return NULL;
- /* sync this buffer for CPU use */
- dma_sync_single_for_cpu(rx_ring->dev,
- dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
- len,
- DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, buf_addr + buf_offset, len);
dma_sync_single_for_device(rx_ring->dev,
dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
@@ -1515,17 +1511,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
buf_len = SKB_DATA_ALIGN(len + buf_offset + tailroom);
- pre_reuse_paddr = dma_unmap_addr(&rx_info->ena_buf, paddr);
-
/* If XDP isn't loaded try to reuse part of the RX buffer */
reuse_rx_buf_page = !is_xdp_loaded &&
ena_try_rx_buf_page_reuse(rx_info, buf_len, len, pkt_offset);
- dma_sync_single_for_cpu(rx_ring->dev,
- pre_reuse_paddr + pkt_offset,
- len,
- DMA_FROM_DEVICE);
-
if (!reuse_rx_buf_page)
ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC);
@@ -1671,20 +1660,23 @@ static void ena_set_rx_hash(struct ena_ring *rx_ring,
}
}
-static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
+static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp, u16 num_descs)
{
struct ena_rx_buffer *rx_info;
int ret;
+ /* XDP multi-buffer packets not supported */
+ if (unlikely(num_descs > 1)) {
+ netdev_err_once(rx_ring->adapter->netdev,
+ "xdp: dropped unsupported multi-buffer packets\n");
+ ena_increase_stat(&rx_ring->rx_stats.xdp_drop, 1, &rx_ring->syncp);
+ return ENA_XDP_DROP;
+ }
+
rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
xdp_prepare_buff(xdp, page_address(rx_info->page),
rx_info->buf_offset,
rx_ring->ena_bufs[0].len, false);
- /* If for some reason we received a bigger packet than
- * we expect, then we simply drop it
- */
- if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
- return ENA_XDP_DROP;
ret = ena_xdp_execute(rx_ring, xdp);
@@ -1719,6 +1711,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
int xdp_flags = 0;
int total_len = 0;
int xdp_verdict;
+ u8 pkt_offset;
int rc = 0;
int i;
@@ -1745,15 +1738,21 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
/* First descriptor might have an offset set by the device */
rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
- rx_info->buf_offset += ena_rx_ctx.pkt_offset;
+ pkt_offset = ena_rx_ctx.pkt_offset;
+ rx_info->buf_offset += pkt_offset;
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
+ dma_sync_single_for_cpu(rx_ring->dev,
+ dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
+ rx_ring->ena_bufs[0].len,
+ DMA_FROM_DEVICE);
+
if (ena_xdp_present_ring(rx_ring))
- xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp);
+ xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp, ena_rx_ctx.descs);
/* allocate skb and fill it */
if (xdp_verdict == ENA_XDP_PASS)
@@ -1777,7 +1776,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
if (xdp_verdict & ENA_XDP_FORWARDED) {
ena_unmap_rx_buff_attrs(rx_ring,
&rx_ring->rx_buffer_info[req_id],
- 0);
+ DMA_ATTR_SKIP_CPU_SYNC);
rx_ring->rx_buffer_info[req_id].page = NULL;
}
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 694daeaf3e6150..e1885c1eb100a1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -938,11 +938,14 @@ void aq_ring_free(struct aq_ring_s *self)
return;
kfree(self->buff_ring);
+ self->buff_ring = NULL;
- if (self->dx_ring)
+ if (self->dx_ring) {
dma_free_coherent(aq_nic_get_dev(self->aq_nic),
self->size * self->dx_size, self->dx_ring,
self->dx_ring_pa);
+ self->dx_ring = NULL;
+ }
}
unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 5935be190b9e22..5f2a6fcba96708 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -866,10 +866,13 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n",
offset, adapter->ring_size);
err = -1;
- goto failed;
+ goto free_buffer;
}
return 0;
+free_buffer:
+ kfree(tx_ring->tx_buffer);
+ tx_ring->tx_buffer = NULL;
failed:
if (adapter->ring_vir_addr != NULL) {
dma_free_coherent(&pdev->dev, adapter->ring_size,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index d0359b569afeb1..579eebb6fc5663 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1748,16 +1748,32 @@ static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
struct sk_buff *skb)
{
+ skb_mark_for_recycle(skb);
+
if (skb->dev != bp->dev) {
/* this packet belongs to a vf-rep */
bnxt_vf_rep_rx(bp, skb);
return;
}
skb_record_rx_queue(skb, bnapi->index);
- skb_mark_for_recycle(skb);
napi_gro_receive(&bnapi->napi, skb);
}
+static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags,
+ struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts)
+{
+ u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
+
+ if (BNXT_PTP_RX_TS_VALID(flags))
+ goto ts_valid;
+ if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags))
+ return false;
+
+ts_valid:
+ *cmpl_ts = ts;
+ return true;
+}
+
/* returns the following:
* 1 - 1 packet successfully received
* 0 - successful TPA_START, packet not completed yet
@@ -1783,6 +1799,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct sk_buff *skb;
struct xdp_buff xdp;
u32 flags, misc;
+ u32 cmpl_ts;
void *data;
int rc = 0;
@@ -2005,10 +2022,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
}
- if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
- RX_CMP_FLAGS_ITYPE_PTP_W_TS) || bp->ptp_all_rx_tstamp) {
+ if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
if (bp->flags & BNXT_FLAG_CHIP_P5) {
- u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
u64 ns, ts;
if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
@@ -10731,10 +10746,8 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
bnxt_free_mem(bp, irq_re_init);
}
-int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
- int rc = 0;
-
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
/* If we get here, it means firmware reset is in progress
* while we are trying to close. We can safely proceed with
@@ -10749,15 +10762,18 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
#ifdef CONFIG_BNXT_SRIOV
if (bp->sriov_cfg) {
+ int rc;
+
rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
!bp->sriov_cfg,
BNXT_SRIOV_CFG_WAIT_TMO);
- if (rc)
- netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
+ if (!rc)
+ netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n");
+ else if (rc < 0)
+ netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n");
}
#endif
__bnxt_close_nic(bp, irq_re_init, link_re_init);
- return rc;
}
static int bnxt_close(struct net_device *dev)
@@ -13940,6 +13956,8 @@ static int bnxt_resume(struct device *device)
if (rc)
goto resume_exit;
+ bnxt_clear_reservations(bp, true);
+
if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
rc = -ENODEV;
goto resume_exit;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index e702dbc3e6b131..a7d7b09ea16202 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -161,7 +161,7 @@ struct rx_cmp {
#define RX_CMP_FLAGS_ERROR (1 << 6)
#define RX_CMP_FLAGS_PLACEMENT (7 << 7)
#define RX_CMP_FLAGS_RSS_VALID (1 << 10)
- #define RX_CMP_FLAGS_UNUSED (1 << 11)
+ #define RX_CMP_FLAGS_PKT_METADATA_PRESENT (1 << 11)
#define RX_CMP_FLAGS_ITYPES_SHIFT 12
#define RX_CMP_FLAGS_ITYPES_MASK 0xf000
#define RX_CMP_FLAGS_ITYPE_UNKNOWN (0 << 12)
@@ -188,6 +188,12 @@ struct rx_cmp {
__le32 rx_cmp_rss_hash;
};
+#define BNXT_PTP_RX_TS_VALID(flags) \
+ (((flags) & RX_CMP_FLAGS_ITYPES_MASK) == RX_CMP_FLAGS_ITYPE_PTP_W_TS)
+
+#define BNXT_ALL_RX_TS_VALID(flags) \
+ !((flags) & RX_CMP_FLAGS_PKT_METADATA_PRESENT)
+
#define RX_CMP_HASH_VALID(rxcmp) \
((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID))
@@ -2375,7 +2381,7 @@ int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp);
void bnxt_reenable_sriov(struct bnxt *bp);
-int bnxt_close_nic(struct bnxt *, bool, bool);
+void bnxt_close_nic(struct bnxt *, bool, bool);
void bnxt_get_ring_err_stats(struct bnxt *bp,
struct bnxt_total_ring_err_stats *stats);
int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index f302dac565996d..89809f1b129c0e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -449,15 +449,8 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
return -ENODEV;
}
bnxt_ulp_stop(bp);
- if (netif_running(bp->dev)) {
- rc = bnxt_close_nic(bp, true, true);
- if (rc) {
- NL_SET_ERR_MSG_MOD(extack, "Failed to close");
- dev_close(bp->dev);
- rtnl_unlock();
- break;
- }
- }
+ if (netif_running(bp->dev))
+ bnxt_close_nic(bp, true, true);
bnxt_vf_reps_free(bp);
rc = bnxt_hwrm_func_drv_unrgtr(bp);
if (rc) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index f3f384773ac038..5f67a7f94e7d1f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -165,9 +165,8 @@ static int bnxt_set_coalesce(struct net_device *dev,
reset_coalesce:
if (test_bit(BNXT_STATE_OPEN, &bp->state)) {
if (update_stats) {
- rc = bnxt_close_nic(bp, true, false);
- if (!rc)
- rc = bnxt_open_nic(bp, true, false);
+ bnxt_close_nic(bp, true, false);
+ rc = bnxt_open_nic(bp, true, false);
} else {
rc = bnxt_hwrm_set_coal(bp);
}
@@ -972,12 +971,7 @@ static int bnxt_set_channels(struct net_device *dev,
* before PF unload
*/
}
- rc = bnxt_close_nic(bp, true, false);
- if (rc) {
- netdev_err(bp->dev, "Set channel failure rc :%x\n",
- rc);
- return rc;
- }
+ bnxt_close_nic(bp, true, false);
}
if (sh) {
@@ -4042,12 +4036,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
bnxt_run_fw_tests(bp, test_mask, &test_results);
} else {
bnxt_ulp_stop(bp);
- rc = bnxt_close_nic(bp, true, false);
- if (rc) {
- etest->flags |= ETH_TEST_FL_FAILED;
- bnxt_ulp_start(bp, rc);
- return;
- }
+ bnxt_close_nic(bp, true, false);
bnxt_run_fw_tests(bp, test_mask, &test_results);
buf[BNXT_MACLPBK_TEST_IDX] = 1;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index f3886710e77873..6e3da3362bd617 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -521,9 +521,8 @@ static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
if (netif_running(bp->dev)) {
if (ptp->rx_filter == HWTSTAMP_FILTER_ALL) {
- rc = bnxt_close_nic(bp, false, false);
- if (!rc)
- rc = bnxt_open_nic(bp, false, false);
+ bnxt_close_nic(bp, false, false);
+ rc = bnxt_open_nic(bp, false, false);
} else {
bnxt_ptp_cfg_tstamp_filters(bp);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 96f5ca778c67d6..8cb9a99154aad9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -59,7 +59,6 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
for (i = 0; i < num_frags ; i++) {
skb_frag_t *frag = &sinfo->frags[i];
struct bnxt_sw_tx_bd *frag_tx_buf;
- struct pci_dev *pdev = bp->pdev;
dma_addr_t frag_mapping;
int frag_len;
@@ -73,16 +72,10 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
frag_len = skb_frag_size(frag);
- frag_mapping = skb_frag_dma_map(&pdev->dev, frag, 0,
- frag_len, DMA_TO_DEVICE);
-
- if (unlikely(dma_mapping_error(&pdev->dev, frag_mapping)))
- return NULL;
-
- dma_unmap_addr_set(frag_tx_buf, mapping, frag_mapping);
-
flags = frag_len << TX_BD_LEN_SHIFT;
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+ frag_mapping = page_pool_get_dma_addr(skb_frag_page(frag)) +
+ skb_frag_off(frag);
txbd->tx_bd_haddr = cpu_to_le64(frag_mapping);
len = frag_len;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
index 4798fb7fe35d14..b6a534a3e0b123 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
@@ -139,7 +139,8 @@ int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block,
err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
filter_block->acl_id, acl_entry_cfg);
- dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
+ dma_unmap_single(dev, acl_entry_cfg->key_iova,
+ DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
DMA_TO_DEVICE);
if (err) {
dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err);
@@ -181,8 +182,8 @@ dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block,
err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
block->acl_id, acl_entry_cfg);
- dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
- DMA_TO_DEVICE);
+ dma_unmap_single(dev, acl_entry_cfg->key_iova,
+ DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, DMA_TO_DEVICE);
if (err) {
dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err);
kfree(cmd_buff);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index 97d3151076d534..e01a246124ac69 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -1998,9 +1998,6 @@ static int dpaa2_switch_port_attr_set_event(struct net_device *netdev,
return notifier_from_errno(err);
}
-static struct notifier_block dpaa2_switch_port_switchdev_nb;
-static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb;
-
static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
struct net_device *upper_dev,
struct netlink_ext_ack *extack)
@@ -2043,9 +2040,7 @@ static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
goto err_egress_flood;
err = switchdev_bridge_port_offload(netdev, netdev, NULL,
- &dpaa2_switch_port_switchdev_nb,
- &dpaa2_switch_port_switchdev_blocking_nb,
- false, extack);
+ NULL, NULL, false, extack);
if (err)
goto err_switchdev_offload;
@@ -2079,9 +2074,7 @@ static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, vo
static void dpaa2_switch_port_pre_bridge_leave(struct net_device *netdev)
{
- switchdev_bridge_port_unoffload(netdev, NULL,
- &dpaa2_switch_port_switchdev_nb,
- &dpaa2_switch_port_switchdev_blocking_nb);
+ switchdev_bridge_port_unoffload(netdev, NULL, NULL, NULL);
}
static int dpaa2_switch_port_bridge_leave(struct net_device *netdev)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index c3b7694a74851c..e08c7b572497d3 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3731,31 +3731,26 @@ static int fec_set_features(struct net_device *netdev,
return 0;
}
-static u16 fec_enet_get_raw_vlan_tci(struct sk_buff *skb)
-{
- struct vlan_ethhdr *vhdr;
- unsigned short vlan_TCI = 0;
-
- if (skb->protocol == htons(ETH_P_ALL)) {
- vhdr = (struct vlan_ethhdr *)(skb->data);
- vlan_TCI = ntohs(vhdr->h_vlan_TCI);
- }
-
- return vlan_TCI;
-}
-
static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,
struct net_device *sb_dev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- u16 vlan_tag;
+ u16 vlan_tag = 0;
if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
return netdev_pick_tx(ndev, skb, NULL);
- vlan_tag = fec_enet_get_raw_vlan_tci(skb);
- if (!vlan_tag)
+ /* VLAN is present in the payload.*/
+ if (eth_type_vlan(skb->protocol)) {
+ struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb);
+
+ vlan_tag = ntohs(vhdr->h_vlan_TCI);
+ /* VLAN is present in the skb but not yet pushed in the payload.*/
+ } else if (skb_vlan_tag_present(skb)) {
+ vlan_tag = skb->vlan_tci;
+ } else {
return vlan_tag;
+ }
return fec_enet_vlan_pri_to_queue[vlan_tag >> 13];
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index f408fcf23ce86d..f6671ac797353a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -207,7 +207,7 @@
#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
#define I40E_GLGEN_MSCA_OPCODE_MASK(_i) I40E_MASK(_i, I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_GLGEN_MSCA_STCODE_SHIFT 28
-#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_GLGEN_MSCA_STCODE_MASK(_i) I40E_MASK(_i, I40E_GLGEN_MSCA_STCODE_SHIFT)
#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index aff6dc6afbe21b..f95bc2a4a83883 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -37,11 +37,11 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
#define I40E_QTX_CTL_VM_QUEUE 0x1
#define I40E_QTX_CTL_PF_QUEUE 0x2
-#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK
+#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK(1)
#define I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK I40E_GLGEN_MSCA_OPCODE_MASK(1)
#define I40E_MDIO_CLAUSE22_OPCODE_READ_MASK I40E_GLGEN_MSCA_OPCODE_MASK(2)
-#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK
+#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK(0)
#define I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK I40E_GLGEN_MSCA_OPCODE_MASK(0)
#define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_GLGEN_MSCA_OPCODE_MASK(1)
#define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_GLGEN_MSCA_OPCODE_MASK(3)
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index e7ab89dc883aa6..63b45c61cc4aa3 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -292,6 +292,7 @@ struct iavf_adapter {
#define IAVF_FLAG_QUEUES_DISABLED BIT(17)
#define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18)
#define IAVF_FLAG_REINIT_MSIX_NEEDED BIT(20)
+#define IAVF_FLAG_FDIR_ENABLED BIT(21)
/* duplicates for common code */
#define IAVF_FLAG_DCB_ENABLED 0
/* flags for admin queue service task */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 19cbfe554689f4..dc499fe7734ec9 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -1061,7 +1061,7 @@ iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter,
struct iavf_fdir_fltr *rule = NULL;
int ret = 0;
- if (!FDIR_FLTR_SUPPORT(adapter))
+ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
spin_lock_bh(&adapter->fdir_fltr_lock);
@@ -1203,7 +1203,7 @@ iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd,
unsigned int cnt = 0;
int val = 0;
- if (!FDIR_FLTR_SUPPORT(adapter))
+ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
cmd->data = IAVF_MAX_FDIR_FILTERS;
@@ -1395,7 +1395,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
int count = 50;
int err;
- if (!FDIR_FLTR_SUPPORT(adapter))
+ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
if (fsp->flow_type & FLOW_MAC_EXT)
@@ -1436,12 +1436,16 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
spin_lock_bh(&adapter->fdir_fltr_lock);
iavf_fdir_list_add_fltr(adapter, fltr);
adapter->fdir_active_fltr++;
- fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
- adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
+ if (adapter->link_up) {
+ fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
+ } else {
+ fltr->state = IAVF_FDIR_FLTR_INACTIVE;
+ }
spin_unlock_bh(&adapter->fdir_fltr_lock);
- mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
-
+ if (adapter->link_up)
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
ret:
if (err && fltr)
kfree(fltr);
@@ -1463,7 +1467,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
struct iavf_fdir_fltr *fltr = NULL;
int err = 0;
- if (!FDIR_FLTR_SUPPORT(adapter))
+ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
spin_lock_bh(&adapter->fdir_fltr_lock);
@@ -1472,6 +1476,11 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) {
fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
+ } else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) {
+ list_del(&fltr->list);
+ kfree(fltr);
+ adapter->fdir_active_fltr--;
+ fltr = NULL;
} else {
err = -EBUSY;
}
@@ -1780,7 +1789,7 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
ret = 0;
break;
case ETHTOOL_GRXCLSRLCNT:
- if (!FDIR_FLTR_SUPPORT(adapter))
+ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
break;
spin_lock_bh(&adapter->fdir_fltr_lock);
cmd->rule_cnt = adapter->fdir_active_fltr;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
index 9eb9f73f6adf3a..d31bd923ba8cbf 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_fdir.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
@@ -6,12 +6,25 @@
struct iavf_adapter;
-/* State of Flow Director filter */
+/* State of Flow Director filter
+ *
+ * *_REQUEST states are used to mark filter to be sent to PF driver to perform
+ * an action (either add or delete filter). *_PENDING states are an indication
+ * that request was sent to PF and the driver is waiting for response.
+ *
+ * Both DELETE and DISABLE states are being used to delete a filter in PF.
+ * The difference is that after a successful response filter in DEL_PENDING
+ * state is being deleted from VF driver as well and filter in DIS_PENDING state
+ * is being changed to INACTIVE state.
+ */
enum iavf_fdir_fltr_state_t {
IAVF_FDIR_FLTR_ADD_REQUEST, /* User requests to add filter */
IAVF_FDIR_FLTR_ADD_PENDING, /* Filter pending add by the PF */
IAVF_FDIR_FLTR_DEL_REQUEST, /* User requests to delete filter */
IAVF_FDIR_FLTR_DEL_PENDING, /* Filter pending delete by the PF */
+ IAVF_FDIR_FLTR_DIS_REQUEST, /* Filter scheduled to be disabled */
+ IAVF_FDIR_FLTR_DIS_PENDING, /* Filter pending disable by the PF */
+ IAVF_FDIR_FLTR_INACTIVE, /* Filter inactive on link down */
IAVF_FDIR_FLTR_ACTIVE, /* Filter is active */
};
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index c862ebcd2e392e..e8d5b889addcb4 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -277,27 +277,6 @@ void iavf_free_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem)
}
/**
- * iavf_lock_timeout - try to lock mutex but give up after timeout
- * @lock: mutex that should be locked
- * @msecs: timeout in msecs
- *
- * Returns 0 on success, negative on failure
- **/
-static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
-{
- unsigned int wait, delay = 10;
-
- for (wait = 0; wait < msecs; wait += delay) {
- if (mutex_trylock(lock))
- return 0;
-
- msleep(delay);
- }
-
- return -1;
-}
-
-/**
* iavf_schedule_reset - Set the flags and schedule a reset event
* @adapter: board private structure
* @flags: IAVF_FLAG_RESET_PENDING or IAVF_FLAG_RESET_NEEDED
@@ -1353,18 +1332,20 @@ static void iavf_clear_cloud_filters(struct iavf_adapter *adapter)
**/
static void iavf_clear_fdir_filters(struct iavf_adapter *adapter)
{
- struct iavf_fdir_fltr *fdir, *fdirtmp;
+ struct iavf_fdir_fltr *fdir;
/* remove all Flow Director filters */
spin_lock_bh(&adapter->fdir_fltr_lock);
- list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
- list) {
+ list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
- list_del(&fdir->list);
- kfree(fdir);
- adapter->fdir_active_fltr--;
- } else {
- fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+ /* Cancel a request, keep filter as inactive */
+ fdir->state = IAVF_FDIR_FLTR_INACTIVE;
+ } else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
+ fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
+ /* Disable filters which are active or have a pending
+ * request to PF to be added
+ */
+ fdir->state = IAVF_FDIR_FLTR_DIS_REQUEST;
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
@@ -4113,6 +4094,33 @@ static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
}
/**
+ * iavf_restore_fdir_filters
+ * @adapter: board private structure
+ *
+ * Restore existing FDIR filters when VF netdev comes back up.
+ **/
+static void iavf_restore_fdir_filters(struct iavf_adapter *adapter)
+{
+ struct iavf_fdir_fltr *f;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry(f, &adapter->fdir_list_head, list) {
+ if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST) {
+ /* Cancel a request, keep filter as active */
+ f->state = IAVF_FDIR_FLTR_ACTIVE;
+ } else if (f->state == IAVF_FDIR_FLTR_DIS_PENDING ||
+ f->state == IAVF_FDIR_FLTR_INACTIVE) {
+ /* Add filters which are inactive or have a pending
+ * request to PF to be deleted
+ */
+ f->state = IAVF_FDIR_FLTR_ADD_REQUEST;
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+}
+
+/**
* iavf_open - Called when a network interface is made active
* @netdev: network interface device structure
*
@@ -4179,8 +4187,9 @@ static int iavf_open(struct net_device *netdev)
spin_unlock_bh(&adapter->mac_vlan_list_lock);
- /* Restore VLAN filters that were removed with IFF_DOWN */
+ /* Restore filters that were removed with IFF_DOWN */
iavf_restore_filters(adapter);
+ iavf_restore_fdir_filters(adapter);
iavf_configure(adapter);
@@ -4311,6 +4320,49 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
return ret;
}
+/**
+ * iavf_disable_fdir - disable Flow Director and clear existing filters
+ * @adapter: board private structure
+ **/
+static void iavf_disable_fdir(struct iavf_adapter *adapter)
+{
+ struct iavf_fdir_fltr *fdir, *fdirtmp;
+ bool del_filters = false;
+
+ adapter->flags &= ~IAVF_FLAG_FDIR_ENABLED;
+
+ /* remove all Flow Director filters */
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
+ list) {
+ if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST ||
+ fdir->state == IAVF_FDIR_FLTR_INACTIVE) {
+ /* Delete filters not registered in PF */
+ list_del(&fdir->list);
+ kfree(fdir);
+ adapter->fdir_active_fltr--;
+ } else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
+ fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
+ fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
+ /* Filters registered in PF, schedule their deletion */
+ fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+ del_filters = true;
+ } else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
+ /* Request to delete filter already sent to PF, change
+ * state to DEL_PENDING to delete filter after PF's
+ * response, not set as INACTIVE
+ */
+ fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ if (del_filters) {
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ }
+}
+
#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_CTAG_TX | \
NETIF_F_HW_VLAN_STAG_RX | \
@@ -4336,6 +4388,13 @@ static int iavf_set_features(struct net_device *netdev,
((netdev->features & NETIF_F_RXFCS) ^ (features & NETIF_F_RXFCS)))
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
+ if ((netdev->features & NETIF_F_NTUPLE) ^ (features & NETIF_F_NTUPLE)) {
+ if (features & NETIF_F_NTUPLE)
+ adapter->flags |= IAVF_FLAG_FDIR_ENABLED;
+ else
+ iavf_disable_fdir(adapter);
+ }
+
return 0;
}
@@ -4685,6 +4744,9 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev,
features = iavf_fix_netdev_vlan_features(adapter, features);
+ if (!FDIR_FLTR_SUPPORT(adapter))
+ features &= ~NETIF_F_NTUPLE;
+
return iavf_fix_strip_features(adapter, features);
}
@@ -4802,6 +4864,12 @@ int iavf_process_config(struct iavf_adapter *adapter)
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (FDIR_FLTR_SUPPORT(adapter)) {
+ netdev->hw_features |= NETIF_F_NTUPLE;
+ netdev->features |= NETIF_F_NTUPLE;
+ adapter->flags |= IAVF_FLAG_FDIR_ENABLED;
+ }
+
netdev->priv_flags |= IFF_UNICAST_FLT;
/* Do not turn on offloads when they are requested to be turned off.
@@ -4826,34 +4894,6 @@ int iavf_process_config(struct iavf_adapter *adapter)
}
/**
- * iavf_shutdown - Shutdown the device in preparation for a reboot
- * @pdev: pci device structure
- **/
-static void iavf_shutdown(struct pci_dev *pdev)
-{
- struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
- struct net_device *netdev = adapter->netdev;
-
- netif_device_detach(netdev);
-
- if (netif_running(netdev))
- iavf_close(netdev);
-
- if (iavf_lock_timeout(&adapter->crit_lock, 5000))
- dev_warn(&adapter->pdev->dev, "%s: failed to acquire crit_lock\n", __func__);
- /* Prevent the watchdog from running. */
- iavf_change_state(adapter, __IAVF_REMOVE);
- adapter->aq_required = 0;
- mutex_unlock(&adapter->crit_lock);
-
-#ifdef CONFIG_PM
- pci_save_state(pdev);
-
-#endif
- pci_disable_device(pdev);
-}
-
-/**
* iavf_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in iavf_pci_tbl
@@ -5063,16 +5103,21 @@ static int __maybe_unused iavf_resume(struct device *dev_d)
**/
static void iavf_remove(struct pci_dev *pdev)
{
- struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
struct iavf_fdir_fltr *fdir, *fdirtmp;
struct iavf_vlan_filter *vlf, *vlftmp;
struct iavf_cloud_filter *cf, *cftmp;
struct iavf_adv_rss *rss, *rsstmp;
struct iavf_mac_filter *f, *ftmp;
+ struct iavf_adapter *adapter;
struct net_device *netdev;
struct iavf_hw *hw;
- netdev = adapter->netdev;
+ /* Don't proceed with remove if netdev is already freed */
+ netdev = pci_get_drvdata(pdev);
+ if (!netdev)
+ return;
+
+ adapter = iavf_pdev_to_adapter(pdev);
hw = &adapter->hw;
if (test_and_set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
@@ -5184,11 +5229,25 @@ static void iavf_remove(struct pci_dev *pdev)
destroy_workqueue(adapter->wq);
+ pci_set_drvdata(pdev, NULL);
+
free_netdev(netdev);
pci_disable_device(pdev);
}
+/**
+ * iavf_shutdown - Shutdown the device in preparation for a reboot
+ * @pdev: pci device structure
+ **/
+static void iavf_shutdown(struct pci_dev *pdev)
+{
+ iavf_remove(pdev);
+
+ if (system_state == SYSTEM_POWER_OFF)
+ pci_set_power_state(pdev, PCI_D3hot);
+}
+
static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
static struct pci_driver iavf_driver = {
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 64c4443dbef9ca..2d9366be0ec506 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -1735,8 +1735,8 @@ void iavf_add_fdir_filter(struct iavf_adapter *adapter)
**/
void iavf_del_fdir_filter(struct iavf_adapter *adapter)
{
+ struct virtchnl_fdir_del f = {};
struct iavf_fdir_fltr *fdir;
- struct virtchnl_fdir_del f;
bool process_fltr = false;
int len;
@@ -1753,11 +1753,16 @@ void iavf_del_fdir_filter(struct iavf_adapter *adapter)
list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) {
process_fltr = true;
- memset(&f, 0, len);
f.vsi_id = fdir->vc_add_msg.vsi_id;
f.flow_id = fdir->flow_id;
fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
break;
+ } else if (fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST) {
+ process_fltr = true;
+ f.vsi_id = fdir->vc_add_msg.vsi_id;
+ f.flow_id = fdir->flow_id;
+ fdir->state = IAVF_FDIR_FLTR_DIS_PENDING;
+ break;
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
@@ -1902,6 +1907,48 @@ static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev,
}
/**
+ * iavf_activate_fdir_filters - Reactivate all FDIR filters after a reset
+ * @adapter: private adapter structure
+ *
+ * Called after a reset to re-add all FDIR filters and delete some of them
+ * if they were pending to be deleted.
+ */
+static void iavf_activate_fdir_filters(struct iavf_adapter *adapter)
+{
+ struct iavf_fdir_fltr *f, *ftmp;
+ bool add_filters = false;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry_safe(f, ftmp, &adapter->fdir_list_head, list) {
+ if (f->state == IAVF_FDIR_FLTR_ADD_REQUEST ||
+ f->state == IAVF_FDIR_FLTR_ADD_PENDING ||
+ f->state == IAVF_FDIR_FLTR_ACTIVE) {
+ /* All filters and requests have been removed in PF,
+ * restore them
+ */
+ f->state = IAVF_FDIR_FLTR_ADD_REQUEST;
+ add_filters = true;
+ } else if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
+ f->state == IAVF_FDIR_FLTR_DIS_PENDING) {
+ /* Link down state, leave filters as inactive */
+ f->state = IAVF_FDIR_FLTR_INACTIVE;
+ } else if (f->state == IAVF_FDIR_FLTR_DEL_REQUEST ||
+ f->state == IAVF_FDIR_FLTR_DEL_PENDING) {
+ /* Delete filters that were pending to be deleted, the
+ * list on PF is already cleared after a reset
+ */
+ list_del(&f->list);
+ kfree(f);
+ adapter->fdir_active_fltr--;
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ if (add_filters)
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
+}
+
+/**
* iavf_virtchnl_completion
* @adapter: adapter structure
* @v_opcode: opcode sent by PF
@@ -2078,7 +2125,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry(fdir, &adapter->fdir_list_head,
list) {
- if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
+ if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING ||
+ fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
fdir->state = IAVF_FDIR_FLTR_ACTIVE;
dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n",
iavf_stat_str(&adapter->hw,
@@ -2214,6 +2262,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ iavf_activate_fdir_filters(adapter);
+
iavf_parse_vf_resource_msg(adapter);
/* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the
@@ -2390,7 +2440,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head,
list) {
if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
- if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
+ if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS ||
+ del_fltr->status ==
+ VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
fdir->loc);
list_del(&fdir->list);
@@ -2402,6 +2454,17 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
del_fltr->status);
iavf_print_fdir_fltr(adapter, fdir);
}
+ } else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
+ if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS ||
+ del_fltr->status ==
+ VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
+ fdir->state = IAVF_FDIR_FLTR_INACTIVE;
+ } else {
+ fdir->state = IAVF_FDIR_FLTR_ACTIVE;
+ dev_info(&adapter->pdev->dev, "Failed to disable Flow Director filter with status: %d\n",
+ del_fltr->status);
+ iavf_print_fdir_fltr(adapter, fdir);
+ }
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index a34083567e6ff5..bde9bc74f92878 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -1850,14 +1850,14 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
linkmode_zero(ks->link_modes.supported);
linkmode_zero(ks->link_modes.advertising);
- for (i = 0; i < BITS_PER_TYPE(u64); i++) {
+ for (i = 0; i < ARRAY_SIZE(phy_type_low_lkup); i++) {
if (phy_types_low & BIT_ULL(i))
ice_linkmode_set_bit(&phy_type_low_lkup[i], ks,
req_speeds, advert_phy_type_lo,
i);
}
- for (i = 0; i < BITS_PER_TYPE(u64); i++) {
+ for (i = 0; i < ARRAY_SIZE(phy_type_high_lkup); i++) {
if (phy_types_high & BIT_ULL(i))
ice_linkmode_set_bit(&phy_type_high_lkup[i], ks,
req_speeds, advert_phy_type_hi,
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 280994ee59330a..b47cd43ae87108 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -1981,6 +1981,8 @@ int ice_init_lag(struct ice_pf *pf)
int n, err;
ice_lag_init_feature_support_flag(pf);
+ if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
+ return 0;
pf->lag = kzalloc(sizeof(*lag), GFP_KERNEL);
if (!pf->lag)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 4b1e56396293a4..1bad6e17f9befd 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -2371,6 +2371,9 @@ static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi)
} else {
max_txqs[i] = vsi->alloc_txq;
}
+
+ if (vsi->type == ICE_VSI_PF)
+ max_txqs[i] += vsi->num_xdp_txq;
}
dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
@@ -2620,10 +2623,6 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
if (vsi->type == ICE_VSI_VF &&
vsi->agg_node && vsi->agg_node->valid)
vsi->agg_node->num_vsis--;
- if (vsi->agg_node) {
- vsi->agg_node->valid = false;
- vsi->agg_node->agg_id = 0;
- }
}
/**
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index 552970c7dec097..a9bdf3283a852f 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -1193,6 +1193,13 @@ int octep_device_setup(struct octep_device *oct)
if (ret)
return ret;
+ INIT_WORK(&oct->tx_timeout_task, octep_tx_timeout_task);
+ INIT_WORK(&oct->ctrl_mbox_task, octep_ctrl_mbox_task);
+ INIT_DELAYED_WORK(&oct->intr_poll_task, octep_intr_poll_task);
+ oct->poll_non_ioq_intr = true;
+ queue_delayed_work(octep_wq, &oct->intr_poll_task,
+ msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS));
+
atomic_set(&oct->hb_miss_cnt, 0);
INIT_DELAYED_WORK(&oct->hb_task, octep_hb_timeout_task);
@@ -1258,7 +1265,8 @@ static bool get_fw_ready_status(struct pci_dev *pdev)
pci_read_config_byte(pdev, (pos + 8), &status);
dev_info(&pdev->dev, "Firmware ready status = %u\n", status);
- return status;
+#define FW_STATUS_READY 1ULL
+ return status == FW_STATUS_READY;
}
return false;
}
@@ -1325,21 +1333,18 @@ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_octep_config;
}
- octep_ctrl_net_get_info(octep_dev, OCTEP_CTRL_NET_INVALID_VFID,
- &octep_dev->conf->fw_info);
+ err = octep_ctrl_net_get_info(octep_dev, OCTEP_CTRL_NET_INVALID_VFID,
+ &octep_dev->conf->fw_info);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to get firmware info\n");
+ goto register_dev_err;
+ }
dev_info(&octep_dev->pdev->dev, "Heartbeat interval %u msecs Heartbeat miss count %u\n",
octep_dev->conf->fw_info.hb_interval,
octep_dev->conf->fw_info.hb_miss_count);
queue_delayed_work(octep_wq, &octep_dev->hb_task,
msecs_to_jiffies(octep_dev->conf->fw_info.hb_interval));
- INIT_WORK(&octep_dev->tx_timeout_task, octep_tx_timeout_task);
- INIT_WORK(&octep_dev->ctrl_mbox_task, octep_ctrl_mbox_task);
- INIT_DELAYED_WORK(&octep_dev->intr_poll_task, octep_intr_poll_task);
- octep_dev->poll_non_ioq_intr = true;
- queue_delayed_work(octep_wq, &octep_dev->intr_poll_task,
- msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS));
-
netdev->netdev_ops = &octep_netdev_ops;
octep_set_ethtool_ops(netdev);
netif_carrier_off(netdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index af21e2030cff28..4728ba34b0e34c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -373,6 +373,11 @@ void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ /* Disable forward pause to driver */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
/* Enable channel mask for all LMACS */
if (is_dev_rpm2(rpm))
rpm_write(rpm, lmac_id, RPM2_CMR_CHAN_MSK_OR, 0xffff);
@@ -616,12 +621,10 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p
if (rx_pause) {
cfg &= ~(RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
- RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
- RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE);
} else {
cfg |= (RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
- RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
- RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE);
}
if (tx_pause) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 62780d8b4f9ac8..21b5d71c1e3758 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -538,7 +538,7 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
if (!rvu_dl->devlink_wq)
- goto err;
+ return -ENOMEM;
INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work);
INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work);
@@ -546,9 +546,6 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work);
return 0;
-err:
- rvu_nix_health_reporters_destroy(rvu_dl);
- return -ENOMEM;
}
static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index f65805860c8d4e..0bcf3e55928065 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -671,6 +671,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
int blkaddr, ucast_idx, index;
struct nix_rx_action action = { 0 };
u64 relaxed_mask;
+ u8 flow_key_alg;
if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc))
return;
@@ -701,6 +702,8 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
action.op = NIX_RX_ACTIONOP_UCAST;
}
+ flow_key_alg = action.flow_key_alg;
+
/* RX_ACTION set to MCAST for CGX PF's */
if (hw->cap.nix_rx_multicast && pfvf->use_mce_list &&
is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
@@ -740,7 +743,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
req.vf = pcifunc;
req.index = action.index;
req.match_id = action.match_id;
- req.flow_key_alg = action.flow_key_alg;
+ req.flow_key_alg = flow_key_alg;
rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
@@ -854,6 +857,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
u8 mac_addr[ETH_ALEN] = { 0 };
struct nix_rx_action action = { 0 };
struct rvu_pfvf *pfvf;
+ u8 flow_key_alg;
u16 vf_func;
/* Only CGX PF/VF can add allmulticast entry */
@@ -888,6 +892,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
*(u64 *)&action = npc_get_mcam_action(rvu, mcam,
blkaddr, ucast_idx);
+ flow_key_alg = action.flow_key_alg;
if (action.op != NIX_RX_ACTIONOP_RSS) {
*(u64 *)&action = 0;
action.op = NIX_RX_ACTIONOP_UCAST;
@@ -924,7 +929,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
req.vf = pcifunc | vf_func;
req.index = action.index;
req.match_id = action.match_id;
- req.flow_key_alg = action.flow_key_alg;
+ req.flow_key_alg = flow_key_alg;
rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
@@ -990,11 +995,38 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
mutex_unlock(&mcam->lock);
}
+static void npc_update_rx_action_with_alg_idx(struct rvu *rvu, struct nix_rx_action action,
+ struct rvu_pfvf *pfvf, int mcam_index, int blkaddr,
+ int alg_idx)
+
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int bank, op_rss;
+
+ if (!is_mcam_entry_enabled(rvu, mcam, blkaddr, mcam_index))
+ return;
+
+ op_rss = (!hw->cap.nix_rx_multicast || !pfvf->use_mce_list);
+
+ bank = npc_get_bank(mcam, mcam_index);
+ mcam_index &= (mcam->banksize - 1);
+
+ /* If Rx action is MCAST update only RSS algorithm index */
+ if (!op_rss) {
+ *(u64 *)&action = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(mcam_index, bank));
+
+ action.flow_key_alg = alg_idx;
+ }
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(mcam_index, bank), *(u64 *)&action);
+}
+
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
- struct rvu_hwinfo *hw = rvu->hw;
struct nix_rx_action action;
int blkaddr, index, bank;
struct rvu_pfvf *pfvf;
@@ -1050,15 +1082,16 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
/* If PF's promiscuous entry is enabled,
* Set RSS action for that entry as well
*/
- if ((!hw->cap.nix_rx_multicast || !pfvf->use_mce_list) &&
- is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) {
- bank = npc_get_bank(mcam, index);
- index &= (mcam->banksize - 1);
+ npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr,
+ alg_idx);
- rvu_write64(rvu, blkaddr,
- NPC_AF_MCAMEX_BANKX_ACTION(index, bank),
- *(u64 *)&action);
- }
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_ALLMULTI_ENTRY);
+ /* If PF's allmulti entry is enabled,
+ * Set RSS action for that entry as well
+ */
+ npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr,
+ alg_idx);
}
void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
index bfddbff7bcdfbf..28fb643d2917f7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
@@ -399,9 +399,10 @@ static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
{
struct otx2_nic *pfvf = netdev_priv(dev);
+ u8 old_pfc_en;
int err;
- /* Save PFC configuration to interface */
+ old_pfc_en = pfvf->pfc_en;
pfvf->pfc_en = pfc->pfc_en;
if (pfvf->hw.tx_queues >= NIX_PF_PFC_PRIO_MAX)
@@ -411,13 +412,17 @@ static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
* supported by the tx queue configuration
*/
err = otx2_check_pfc_config(pfvf);
- if (err)
+ if (err) {
+ pfvf->pfc_en = old_pfc_en;
return err;
+ }
process_pfc:
err = otx2_config_priority_flow_ctrl(pfvf);
- if (err)
+ if (err) {
+ pfvf->pfc_en = old_pfc_en;
return err;
+ }
/* Request Per channel Bpids */
if (pfc->pfc_en)
@@ -425,6 +430,12 @@ process_pfc:
err = otx2_pfc_txschq_update(pfvf);
if (err) {
+ if (pfc->pfc_en)
+ otx2_nix_config_bp(pfvf, false);
+
+ otx2_pfc_txschq_stop(pfvf);
+ pfvf->pfc_en = old_pfc_en;
+ otx2_config_priority_flow_ctrl(pfvf);
dev_err(pfvf->dev, "%s failed to update TX schedulers\n", __func__);
return err;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 0c17ebdda14873..a57455aebff6fc 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1650,6 +1650,21 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
mutex_unlock(&mbox->lock);
}
+static bool otx2_promisc_use_mce_list(struct otx2_nic *pfvf)
+{
+ int vf;
+
+ /* The AF driver will determine whether to allow the VF netdev or not */
+ if (is_otx2_vf(pfvf->pcifunc))
+ return true;
+
+ /* check if there are any trusted VFs associated with the PF netdev */
+ for (vf = 0; vf < pci_num_vf(pfvf->pdev); vf++)
+ if (pfvf->vf_configs[vf].trusted)
+ return true;
+ return false;
+}
+
static void otx2_do_set_rx_mode(struct otx2_nic *pf)
{
struct net_device *netdev = pf->netdev;
@@ -1682,7 +1697,8 @@ static void otx2_do_set_rx_mode(struct otx2_nic *pf)
if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
req->mode |= NIX_RX_MODE_ALLMULTI;
- req->mode |= NIX_RX_MODE_USE_MCE;
+ if (otx2_promisc_use_mce_list(pf))
+ req->mode |= NIX_RX_MODE_USE_MCE;
otx2_sync_mbox_msg(&pf->mbox);
mutex_unlock(&pf->mbox.lock);
@@ -2691,11 +2707,14 @@ static int otx2_ndo_set_vf_trust(struct net_device *netdev, int vf,
pf->vf_configs[vf].trusted = enable;
rc = otx2_set_vf_permissions(pf, vf, OTX2_TRUSTED_VF);
- if (rc)
+ if (rc) {
pf->vf_configs[vf].trusted = !enable;
- else
+ } else {
netdev_info(pf->netdev, "VF %d is %strusted\n",
vf, enable ? "" : "not ");
+ otx2_set_rx_mode(netdev);
+ }
+
return rc;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
index 3bd51a3d665001..ae44ad5f8ce8a1 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
@@ -291,6 +291,9 @@ mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
for (i = 0; i < q->n_desc; i++) {
struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
+ if (!entry->buf)
+ continue;
+
dma_unmap_single(wo->hw->dev, entry->addr, entry->len,
DMA_TO_DEVICE);
skb_free_frag(entry->buf);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index f8f0a712c943f2..a7b1f9686c09a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -156,15 +156,18 @@ static u8 alloc_token(struct mlx5_cmd *cmd)
return token;
}
-static int cmd_alloc_index(struct mlx5_cmd *cmd)
+static int cmd_alloc_index(struct mlx5_cmd *cmd, struct mlx5_cmd_work_ent *ent)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&cmd->alloc_lock, flags);
ret = find_first_bit(&cmd->vars.bitmask, cmd->vars.max_reg_cmds);
- if (ret < cmd->vars.max_reg_cmds)
+ if (ret < cmd->vars.max_reg_cmds) {
clear_bit(ret, &cmd->vars.bitmask);
+ ent->idx = ret;
+ cmd->ent_arr[ent->idx] = ent;
+ }
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
return ret < cmd->vars.max_reg_cmds ? ret : -ENOMEM;
@@ -979,7 +982,7 @@ static void cmd_work_handler(struct work_struct *work)
sem = ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem;
down(sem);
if (!ent->page_queue) {
- alloc_ret = cmd_alloc_index(cmd);
+ alloc_ret = cmd_alloc_index(cmd, ent);
if (alloc_ret < 0) {
mlx5_core_err_rl(dev, "failed to allocate command entry\n");
if (ent->callback) {
@@ -994,15 +997,14 @@ static void cmd_work_handler(struct work_struct *work)
up(sem);
return;
}
- ent->idx = alloc_ret;
} else {
ent->idx = cmd->vars.max_reg_cmds;
spin_lock_irqsave(&cmd->alloc_lock, flags);
clear_bit(ent->idx, &cmd->vars.bitmask);
+ cmd->ent_arr[ent->idx] = ent;
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
- cmd->ent_arr[ent->idx] = ent;
lay = get_inst(cmd, ent->idx);
ent->lay = lay;
memset(lay, 0, sizeof(*lay));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 76d27d2ee40cbb..080e7eab52c7e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -718,7 +718,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
while (block_timestamp > tracer->last_timestamp) {
/* Check block override if it's not the first block */
- if (!tracer->last_timestamp) {
+ if (tracer->last_timestamp) {
u64 *ts_event;
/* To avoid block override be the HW in case of buffer
* wraparound, the time stamp of the previous block
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index b2a5da9739d2b4..729a11b5fb2524 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -826,6 +826,7 @@ enum {
MLX5E_STATE_DESTROYING,
MLX5E_STATE_XDP_TX_ENABLED,
MLX5E_STATE_XDP_ACTIVE,
+ MLX5E_STATE_CHANNELS_ACTIVE,
};
struct mlx5e_modify_sq_param {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
index be83ad9db82a47..e1283531e0b810 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
@@ -154,6 +154,7 @@ static int fs_udp_create_groups(struct mlx5e_flow_table *ft, enum fs_udp_type ty
in = kvzalloc(inlen, GFP_KERNEL);
if (!in || !ft->g) {
kfree(ft->g);
+ ft->g = NULL;
kvfree(in);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
index f63402c480280c..1b418095b79a39 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
@@ -197,7 +197,7 @@ parse_mirred_encap(struct mlx5e_tc_act_parse_state *parse_state,
}
esw_attr->dests[esw_attr->out_count].flags |= MLX5_ESW_DEST_ENCAP;
esw_attr->out_count++;
- /* attr->dests[].rep is resolved when we handle encap */
+ /* attr->dests[].vport is resolved when we handle encap */
return 0;
}
@@ -270,7 +270,8 @@ parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
out_priv = netdev_priv(out_dev);
rpriv = out_priv->ppriv;
- esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
+ esw_attr->dests[esw_attr->out_count].vport_valid = true;
+ esw_attr->dests[esw_attr->out_count].vport = rpriv->rep->vport;
esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev;
esw_attr->out_count++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
index 4e923a2874aefe..86bf007fd05b73 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
@@ -83,6 +83,9 @@ mlx5e_tc_post_act_offload(struct mlx5e_post_act *post_act,
struct mlx5_flow_spec *spec;
int err;
+ if (IS_ERR(post_act))
+ return PTR_ERR(post_act);
+
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
@@ -111,6 +114,9 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *po
struct mlx5e_post_act_handle *handle;
int err;
+ if (IS_ERR(post_act))
+ return ERR_CAST(post_act);
+
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 668da5c70e63de..8dfb57f712b0dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -300,6 +300,10 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
if (err)
goto destroy_neigh_entry;
+ e->encap_size = ipv4_encap_size;
+ e->encap_header = encap_header;
+ encap_header = NULL;
+
if (!(nud_state & NUD_VALID)) {
neigh_event_send(attr.n, NULL);
/* the encap entry will be made valid on neigh update event
@@ -310,8 +314,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
memset(&reformat_params, 0, sizeof(reformat_params));
reformat_params.type = e->reformat_type;
- reformat_params.size = ipv4_encap_size;
- reformat_params.data = encap_header;
+ reformat_params.size = e->encap_size;
+ reformat_params.data = e->encap_header;
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
@@ -319,8 +323,6 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
goto destroy_neigh_entry;
}
- e->encap_size = ipv4_encap_size;
- e->encap_header = encap_header;
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
mlx5e_route_lookup_ipv4_put(&attr);
@@ -403,18 +405,23 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
if (err)
goto free_encap;
+ e->encap_size = ipv4_encap_size;
+ kfree(e->encap_header);
+ e->encap_header = encap_header;
+ encap_header = NULL;
+
if (!(nud_state & NUD_VALID)) {
neigh_event_send(attr.n, NULL);
/* the encap entry will be made valid on neigh update event
* and not used before that.
*/
- goto free_encap;
+ goto release_neigh;
}
memset(&reformat_params, 0, sizeof(reformat_params));
reformat_params.type = e->reformat_type;
- reformat_params.size = ipv4_encap_size;
- reformat_params.data = encap_header;
+ reformat_params.size = e->encap_size;
+ reformat_params.data = e->encap_header;
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
@@ -422,10 +429,6 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
goto free_encap;
}
- e->encap_size = ipv4_encap_size;
- kfree(e->encap_header);
- e->encap_header = encap_header;
-
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
mlx5e_route_lookup_ipv4_put(&attr);
@@ -567,6 +570,10 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
if (err)
goto destroy_neigh_entry;
+ e->encap_size = ipv6_encap_size;
+ e->encap_header = encap_header;
+ encap_header = NULL;
+
if (!(nud_state & NUD_VALID)) {
neigh_event_send(attr.n, NULL);
/* the encap entry will be made valid on neigh update event
@@ -577,8 +584,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
memset(&reformat_params, 0, sizeof(reformat_params));
reformat_params.type = e->reformat_type;
- reformat_params.size = ipv6_encap_size;
- reformat_params.data = encap_header;
+ reformat_params.size = e->encap_size;
+ reformat_params.data = e->encap_header;
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
@@ -586,8 +593,6 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
goto destroy_neigh_entry;
}
- e->encap_size = ipv6_encap_size;
- e->encap_header = encap_header;
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
mlx5e_route_lookup_ipv6_put(&attr);
@@ -669,18 +674,23 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
if (err)
goto free_encap;
+ e->encap_size = ipv6_encap_size;
+ kfree(e->encap_header);
+ e->encap_header = encap_header;
+ encap_header = NULL;
+
if (!(nud_state & NUD_VALID)) {
neigh_event_send(attr.n, NULL);
/* the encap entry will be made valid on neigh update event
* and not used before that.
*/
- goto free_encap;
+ goto release_neigh;
}
memset(&reformat_params, 0, sizeof(reformat_params));
reformat_params.type = e->reformat_type;
- reformat_params.size = ipv6_encap_size;
- reformat_params.data = encap_header;
+ reformat_params.size = e->encap_size;
+ reformat_params.data = e->encap_header;
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
@@ -688,10 +698,6 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
goto free_encap;
}
- e->encap_size = ipv6_encap_size;
- kfree(e->encap_header);
- e->encap_header = encap_header;
-
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
mlx5e_route_lookup_ipv6_put(&attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index b10e40e1a9c141..f1d1e1542e81b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -1064,7 +1064,8 @@ int mlx5e_tc_tun_encap_dests_set(struct mlx5e_priv *priv,
out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv;
- esw_attr->dests[out_index].rep = rpriv->rep;
+ esw_attr->dests[out_index].vport_valid = true;
+ esw_attr->dests[out_index].vport = rpriv->rep->vport;
esw_attr->dests[out_index].mdev = out_priv->mdev;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 7decc81ed33a9e..13c7ed1bb37e93 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -493,6 +493,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
dma_addr_t dma_addr = xdptxd->dma_addr;
u32 dma_len = xdptxd->len;
u16 ds_cnt, inline_hdr_sz;
+ unsigned int frags_size;
u8 num_wqebbs = 1;
int num_frags = 0;
bool inline_ok;
@@ -503,8 +504,9 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
inline_ok = sq->min_inline_mode == MLX5_INLINE_MODE_NONE ||
dma_len >= MLX5E_XDP_MIN_INLINE;
+ frags_size = xdptxd->has_frags ? xdptxdf->sinfo->xdp_frags_size : 0;
- if (unlikely(!inline_ok || sq->hw_mtu < dma_len)) {
+ if (unlikely(!inline_ok || sq->hw_mtu < dma_len + frags_size)) {
stats->err++;
return false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 655496598c6884..161c5190c236a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -121,7 +121,14 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO)
esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom));
- sa_entry->esn_state.esn = esn;
+ if (sa_entry->esn_state.esn_msb)
+ sa_entry->esn_state.esn = esn;
+ else
+ /* According to RFC4303, section "3.3.3. Sequence Number Generation",
+ * the first packet sent using a given SA will contain a sequence
+ * number of 1.
+ */
+ sa_entry->esn_state.esn = max_t(u32, esn, 1);
sa_entry->esn_state.esn_msb = esn_msb;
if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
@@ -335,6 +342,27 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
attrs->replay_esn.esn = sa_entry->esn_state.esn;
attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb;
attrs->replay_esn.overlap = sa_entry->esn_state.overlap;
+ switch (x->replay_esn->replay_window) {
+ case 32:
+ attrs->replay_esn.replay_window =
+ MLX5_IPSEC_ASO_REPLAY_WIN_32BIT;
+ break;
+ case 64:
+ attrs->replay_esn.replay_window =
+ MLX5_IPSEC_ASO_REPLAY_WIN_64BIT;
+ break;
+ case 128:
+ attrs->replay_esn.replay_window =
+ MLX5_IPSEC_ASO_REPLAY_WIN_128BIT;
+ break;
+ case 256:
+ attrs->replay_esn.replay_window =
+ MLX5_IPSEC_ASO_REPLAY_WIN_256BIT;
+ break;
+ default:
+ WARN_ON(true);
+ return;
+ }
}
attrs->dir = x->xso.dir;
@@ -907,9 +935,11 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
return;
mlx5e_accel_ipsec_fs_cleanup(ipsec);
- if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_TUNNEL)
+ if (ipsec->netevent_nb.notifier_call) {
unregister_netevent_notifier(&ipsec->netevent_nb);
- if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
+ ipsec->netevent_nb.notifier_call = NULL;
+ }
+ if (ipsec->aso)
mlx5e_ipsec_aso_cleanup(ipsec);
destroy_workqueue(ipsec->wq);
kfree(ipsec);
@@ -1018,6 +1048,12 @@ static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
}
}
+ if (x->xdo.type == XFRM_DEV_OFFLOAD_PACKET &&
+ !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
+ NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -1113,14 +1149,6 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
.xdo_dev_state_free = mlx5e_xfrm_free_state,
.xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
-};
-
-static const struct xfrmdev_ops mlx5e_ipsec_packet_xfrmdev_ops = {
- .xdo_dev_state_add = mlx5e_xfrm_add_state,
- .xdo_dev_state_delete = mlx5e_xfrm_del_state,
- .xdo_dev_state_free = mlx5e_xfrm_free_state,
- .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
- .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
.xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft,
.xdo_dev_policy_add = mlx5e_xfrm_add_policy,
@@ -1138,11 +1166,7 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
- if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
- netdev->xfrmdev_ops = &mlx5e_ipsec_packet_xfrmdev_ops;
- else
- netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
-
+ netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
netdev->features |= NETIF_F_HW_ESP;
netdev->hw_enc_features |= NETIF_F_HW_ESP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 8f4a37bceaf455..adaea3493193ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -189,11 +189,19 @@ struct mlx5e_ipsec_ft {
u32 refcnt;
};
+struct mlx5e_ipsec_drop {
+ struct mlx5_flow_handle *rule;
+ struct mlx5_fc *fc;
+};
+
struct mlx5e_ipsec_rule {
struct mlx5_flow_handle *rule;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_pkt_reformat *pkt_reformat;
struct mlx5_fc *fc;
+ struct mlx5e_ipsec_drop replay;
+ struct mlx5e_ipsec_drop auth;
+ struct mlx5e_ipsec_drop trailer;
};
struct mlx5e_ipsec_miss {
@@ -201,19 +209,6 @@ struct mlx5e_ipsec_miss {
struct mlx5_flow_handle *rule;
};
-struct mlx5e_ipsec_rx {
- struct mlx5e_ipsec_ft ft;
- struct mlx5e_ipsec_miss pol;
- struct mlx5e_ipsec_miss sa;
- struct mlx5e_ipsec_rule status;
- struct mlx5e_ipsec_miss status_drop;
- struct mlx5_fc *status_drop_cnt;
- struct mlx5e_ipsec_fc *fc;
- struct mlx5_fs_chains *chains;
- u8 allow_tunnel_mode : 1;
- struct xarray ipsec_obj_id_map;
-};
-
struct mlx5e_ipsec_tx_create_attr {
int prio;
int pol_level;
@@ -248,6 +243,7 @@ struct mlx5e_ipsec {
struct mlx5_ipsec_fs *roce;
u8 is_uplink_rep: 1;
struct mlx5e_ipsec_mpv_work mpv_work;
+ struct xarray ipsec_obj_id_map;
};
struct mlx5e_ipsec_esn_state {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index f41c976dc33f93..41a2543a52cda0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -32,6 +32,22 @@ struct mlx5e_ipsec_tx {
u8 allow_tunnel_mode : 1;
};
+struct mlx5e_ipsec_status_checks {
+ struct mlx5_flow_group *drop_all_group;
+ struct mlx5e_ipsec_drop all;
+};
+
+struct mlx5e_ipsec_rx {
+ struct mlx5e_ipsec_ft ft;
+ struct mlx5e_ipsec_miss pol;
+ struct mlx5e_ipsec_miss sa;
+ struct mlx5e_ipsec_rule status;
+ struct mlx5e_ipsec_status_checks status_drops;
+ struct mlx5e_ipsec_fc *fc;
+ struct mlx5_fs_chains *chains;
+ u8 allow_tunnel_mode : 1;
+};
+
/* IPsec RX flow steering */
static enum mlx5_traffic_types family2tt(u32 family)
{
@@ -128,14 +144,37 @@ static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
}
-static int ipsec_status_rule(struct mlx5_core_dev *mdev,
- struct mlx5e_ipsec_rx *rx,
- struct mlx5_flow_destination *dest)
+static void ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ mlx5_del_flow_rules(rx->status_drops.all.rule);
+ mlx5_fc_destroy(ipsec->mdev, rx->status_drops.all.fc);
+ mlx5_destroy_flow_group(rx->status_drops.drop_all_group);
+}
+
+static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
{
- u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ mlx5_del_flow_rules(rx->status.rule);
+
+ if (rx != ipsec->rx_esw)
+ return;
+
+#ifdef CONFIG_MLX5_ESWITCH
+ mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
+#endif
+}
+
+static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5e_ipsec_rx *rx)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5_flow_table *ft = rx->ft.status;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {};
- struct mlx5_modify_hdr *modify_hdr;
- struct mlx5_flow_handle *fte;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_fc *flow_counter;
struct mlx5_flow_spec *spec;
int err;
@@ -143,48 +182,273 @@ static int ipsec_status_rule(struct mlx5_core_dev *mdev,
if (!spec)
return -ENOMEM;
- /* Action to copy 7 bit ipsec_syndrome to regB[24:30] */
- MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
- MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME);
- MLX5_SET(copy_action_in, action, src_offset, 0);
- MLX5_SET(copy_action_in, action, length, 7);
- MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
- MLX5_SET(copy_action_in, action, dst_offset, 24);
+ flow_counter = mlx5_fc_create(mdev, true);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
+ goto err_cnt;
+ }
+ sa_entry->ipsec_rule.auth.fc = flow_counter;
- modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
- 1, action);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ flow_act.flags = FLOW_ACT_NO_APPEND;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(flow_counter);
+ if (rx == ipsec->rx_esw)
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
- if (IS_ERR(modify_hdr)) {
- err = PTR_ERR(modify_hdr);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.ipsec_syndrome);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 1);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_2,
+ sa_entry->ipsec_obj_id | BIT(31));
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
mlx5_core_err(mdev,
- "fail to alloc ipsec copy modify_header_id err=%d\n", err);
- goto out_spec;
+ "Failed to add ipsec rx status drop rule, err=%d\n", err);
+ goto err_rule;
}
+ sa_entry->ipsec_rule.auth.rule = rule;
- /* create fte */
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ flow_counter = mlx5_fc_create(mdev, true);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
+ goto err_cnt_2;
+ }
+ sa_entry->ipsec_rule.trailer.fc = flow_counter;
+
+ dest.counter_id = mlx5_fc_id(flow_counter);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 2);
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule, err=%d\n", err);
+ goto err_rule_2;
+ }
+ sa_entry->ipsec_rule.trailer.rule = rule;
+
+ kvfree(spec);
+ return 0;
+
+err_rule_2:
+ mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.trailer.fc);
+err_cnt_2:
+ mlx5_del_flow_rules(sa_entry->ipsec_rule.auth.rule);
+err_rule:
+ mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.auth.fc);
+err_cnt:
+ kvfree(spec);
+ return err;
+}
+
+static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct mlx5e_ipsec_rx *rx)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5_flow_table *ft = rx->ft.status;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_fc *flow_counter;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ flow_counter = mlx5_fc_create(mdev, true);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
+ goto err_cnt;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ flow_act.flags = FLOW_ACT_NO_APPEND;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(flow_counter);
+ if (rx == ipsec->rx_esw)
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 1);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_2,
+ sa_entry->ipsec_obj_id | BIT(31));
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule, err=%d\n", err);
+ goto err_rule;
+ }
+
+ sa_entry->ipsec_rule.replay.rule = rule;
+ sa_entry->ipsec_rule.replay.fc = flow_counter;
+
+ kvfree(spec);
+ return 0;
+
+err_rule:
+ mlx5_fc_destroy(mdev, flow_counter);
+err_cnt:
+ kvfree(spec);
+ return err;
+}
+
+static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table *ft = rx->ft.status;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_fc *flow_counter;
+ struct mlx5_flow_spec *spec;
+ struct mlx5_flow_group *g;
+ u32 *flow_group_in;
+ int err = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!flow_group_in || !spec) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
+ g = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop flow group, err=%d\n", err);
+ goto err_out;
+ }
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
+ goto err_cnt;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(flow_counter);
+ if (rx == ipsec->rx_esw)
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule, err=%d\n", err);
+ goto err_rule;
+ }
+
+ rx->status_drops.drop_all_group = g;
+ rx->status_drops.all.rule = rule;
+ rx->status_drops.all.fc = flow_counter;
+
+ kvfree(flow_group_in);
+ kvfree(spec);
+ return 0;
+
+err_rule:
+ mlx5_fc_destroy(mdev, flow_counter);
+err_cnt:
+ mlx5_destroy_flow_group(g);
+err_out:
+ kvfree(flow_group_in);
+ kvfree(spec);
+ return err;
+}
+
+static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.ipsec_syndrome);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_4);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.ipsec_syndrome, 0);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_4, 0);
+ if (rx == ipsec->rx_esw)
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ flow_act.flags = FLOW_ACT_NO_APPEND;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
- flow_act.modify_hdr = modify_hdr;
- fte = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
- if (IS_ERR(fte)) {
- err = PTR_ERR(fte);
- mlx5_core_err(mdev, "fail to add ipsec rx err copy rule err=%d\n", err);
- goto out;
+ rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_warn(ipsec->mdev,
+ "Failed to add ipsec rx status pass rule, err=%d\n", err);
+ goto err_rule;
}
+ rx->status.rule = rule;
kvfree(spec);
- rx->status.rule = fte;
- rx->status.modify_hdr = modify_hdr;
return 0;
-out:
- mlx5_modify_header_dealloc(mdev, modify_hdr);
-out_spec:
+err_rule:
kvfree(spec);
return err;
}
+static void mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ ipsec_rx_status_pass_destroy(ipsec, rx);
+ ipsec_rx_status_drop_destroy(ipsec, rx);
+}
+
+static int mlx5_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest)
+{
+ int err;
+
+ err = ipsec_rx_status_drop_all_create(ipsec, rx);
+ if (err)
+ return err;
+
+ err = ipsec_rx_status_pass_create(ipsec, rx, dest);
+ if (err)
+ goto err_pass_create;
+
+ return 0;
+
+err_pass_create:
+ ipsec_rx_status_drop_destroy(ipsec, rx);
+ return err;
+}
+
static int ipsec_miss_create(struct mlx5_core_dev *mdev,
struct mlx5_flow_table *ft,
struct mlx5e_ipsec_miss *miss,
@@ -333,12 +597,7 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
mlx5_destroy_flow_table(rx->ft.sa);
if (rx->allow_tunnel_mode)
mlx5_eswitch_unblock_encap(mdev);
- if (rx == ipsec->rx_esw) {
- mlx5_esw_ipsec_rx_status_destroy(ipsec, rx);
- } else {
- mlx5_del_flow_rules(rx->status.rule);
- mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
- }
+ mlx5_ipsec_rx_status_destroy(ipsec, rx);
mlx5_destroy_flow_table(rx->ft.status);
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
@@ -419,7 +678,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
if (err)
return err;
- ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 1, 0);
+ ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 0);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft_status;
@@ -428,10 +687,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
- if (rx == ipsec->rx_esw)
- err = mlx5_esw_ipsec_rx_status_create(ipsec, rx, dest);
- else
- err = ipsec_status_rule(mdev, rx, dest);
+ err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
if (err)
goto err_add;
@@ -956,13 +1212,22 @@ static void setup_fte_esp(struct mlx5_flow_spec *spec)
MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
}
-static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi)
+static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi, bool encap)
{
/* SPI number */
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi);
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi, spi);
+ if (encap) {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters.inner_esp_spi);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters.inner_esp_spi, spi);
+ } else {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters.outer_esp_spi);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters.outer_esp_spi, spi);
+ }
}
static void setup_fte_no_frags(struct mlx5_flow_spec *spec)
@@ -1052,29 +1317,48 @@ static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8
struct mlx5_flow_act *flow_act)
{
enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, type, dir);
- u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ u8 action[3][MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5_modify_hdr *modify_hdr;
+ u8 num_of_actions = 1;
- MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action[0], action_type, MLX5_ACTION_TYPE_SET);
switch (dir) {
case XFRM_DEV_OFFLOAD_IN:
- MLX5_SET(set_action_in, action, field,
+ MLX5_SET(set_action_in, action[0], field,
MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+
+ num_of_actions++;
+ MLX5_SET(set_action_in, action[1], action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action[1], field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_2);
+ MLX5_SET(set_action_in, action[1], data, val);
+ MLX5_SET(set_action_in, action[1], offset, 0);
+ MLX5_SET(set_action_in, action[1], length, 32);
+
+ if (type == XFRM_DEV_OFFLOAD_CRYPTO) {
+ num_of_actions++;
+ MLX5_SET(set_action_in, action[2], action_type,
+ MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action[2], field,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
+ MLX5_SET(set_action_in, action[2], data, 0);
+ MLX5_SET(set_action_in, action[2], offset, 0);
+ MLX5_SET(set_action_in, action[2], length, 32);
+ }
break;
case XFRM_DEV_OFFLOAD_OUT:
- MLX5_SET(set_action_in, action, field,
+ MLX5_SET(set_action_in, action[0], field,
MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
break;
default:
return -EINVAL;
}
- MLX5_SET(set_action_in, action, data, val);
- MLX5_SET(set_action_in, action, offset, 0);
- MLX5_SET(set_action_in, action, length, 32);
+ MLX5_SET(set_action_in, action[0], data, val);
+ MLX5_SET(set_action_in, action[0], offset, 0);
+ MLX5_SET(set_action_in, action[0], length, 32);
- modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, 1, action);
+ modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, num_of_actions, action);
if (IS_ERR(modify_hdr)) {
mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n",
PTR_ERR(modify_hdr));
@@ -1321,8 +1605,9 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
else
setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
- setup_fte_spi(spec, attrs->spi);
- setup_fte_esp(spec);
+ setup_fte_spi(spec, attrs->spi, attrs->encap);
+ if (!attrs->encap)
+ setup_fte_esp(spec);
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
@@ -1372,6 +1657,15 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
goto err_add_flow;
}
+ if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
+ err = rx_add_rule_drop_replay(sa_entry, rx);
+ if (err)
+ goto err_add_replay;
+
+ err = rx_add_rule_drop_auth_trailer(sa_entry, rx);
+ if (err)
+ goto err_drop_reason;
+
kvfree(spec);
sa_entry->ipsec_rule.rule = rule;
@@ -1380,6 +1674,13 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
return 0;
+err_drop_reason:
+ if (sa_entry->ipsec_rule.replay.rule) {
+ mlx5_del_flow_rules(sa_entry->ipsec_rule.replay.rule);
+ mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.replay.fc);
+ }
+err_add_replay:
+ mlx5_del_flow_rules(rule);
err_add_flow:
mlx5_fc_destroy(mdev, counter);
err_add_cnt:
@@ -1428,7 +1729,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
switch (attrs->type) {
case XFRM_DEV_OFFLOAD_CRYPTO:
- setup_fte_spi(spec, attrs->spi);
+ setup_fte_spi(spec, attrs->spi, false);
setup_fte_esp(spec);
setup_fte_reg_a(spec);
break;
@@ -1809,8 +2110,11 @@ static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
struct mlx5_eswitch *esw = mdev->priv.eswitch;
int err = 0;
- if (esw)
- down_write(&esw->mode_lock);
+ if (esw) {
+ err = mlx5_esw_lock(esw);
+ if (err)
+ return err;
+ }
if (mdev->num_block_ipsec) {
err = -EBUSY;
@@ -1821,7 +2125,7 @@ static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
unlock:
if (esw)
- up_write(&esw->mode_lock);
+ mlx5_esw_unlock(esw);
return err;
}
@@ -1838,7 +2142,7 @@ static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
static void mlx5e_ipsec_unblock_tc_offload(struct mlx5_core_dev *mdev)
{
- mdev->num_block_tc++;
+ mdev->num_block_tc--;
}
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
@@ -1887,6 +2191,17 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
if (ipsec_rule->modify_hdr)
mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
+
+ mlx5_del_flow_rules(ipsec_rule->trailer.rule);
+ mlx5_fc_destroy(mdev, ipsec_rule->trailer.fc);
+
+ mlx5_del_flow_rules(ipsec_rule->auth.rule);
+ mlx5_fc_destroy(mdev, ipsec_rule->auth.fc);
+
+ if (ipsec_rule->replay.rule) {
+ mlx5_del_flow_rules(ipsec_rule->replay.rule);
+ mlx5_fc_destroy(mdev, ipsec_rule->replay.fc);
+ }
mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry);
rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type);
}
@@ -1957,7 +2272,7 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
kfree(ipsec->rx_ipv6);
if (ipsec->is_uplink_rep) {
- xa_destroy(&ipsec->rx_esw->ipsec_obj_id_map);
+ xa_destroy(&ipsec->ipsec_obj_id_map);
mutex_destroy(&ipsec->tx_esw->ft.mutex);
WARN_ON(ipsec->tx_esw->ft.refcnt);
@@ -2020,7 +2335,7 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec,
mutex_init(&ipsec->tx_esw->ft.mutex);
mutex_init(&ipsec->rx_esw->ft.mutex);
ipsec->tx_esw->ns = ns_esw;
- xa_init_flags(&ipsec->rx_esw->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
+ xa_init_flags(&ipsec->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
} else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) {
ipsec->roce = mlx5_ipsec_fs_roce_init(mdev, devcom);
} else {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index a91f772dc981a1..6e00afe4671b78 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -6,6 +6,8 @@
#include "ipsec.h"
#include "lib/crypto.h"
#include "lib/ipsec_fs_roce.h"
+#include "fs_core.h"
+#include "eswitch.h"
enum {
MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
@@ -38,7 +40,10 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp))
caps |= MLX5_IPSEC_CAP_CRYPTO;
- if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload)) {
+ if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload) &&
+ (mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS ||
+ (mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS &&
+ is_mdev_legacy_mode(mdev)))) {
if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
reformat_add_esp_trasport) &&
MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
@@ -95,7 +100,7 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
MLX5_SET(ipsec_aso, aso_ctx, window_sz,
- attrs->replay_esn.replay_window / 64);
+ attrs->replay_esn.replay_window);
MLX5_SET(ipsec_aso, aso_ctx, mode,
MLX5_IPSEC_ASO_REPLAY_PROTECTION);
}
@@ -559,6 +564,7 @@ void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
DMA_BIDIRECTIONAL);
kfree(aso);
+ ipsec->aso = NULL;
}
static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 792a0ea544cd39..c7c1b667b105a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -49,7 +49,7 @@ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
- if (count == sizeof(drvinfo->fw_version))
+ if (count >= sizeof(drvinfo->fw_version))
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%04d", fw_rev_maj(mdev),
fw_rev_min(mdev), fw_rev_sub(mdev));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index ea58c691743302..0c87ddb8a7a218 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2731,6 +2731,7 @@ void mlx5e_close_channels(struct mlx5e_channels *chs)
{
int i;
+ ASSERT_RTNL();
if (chs->ptp) {
mlx5e_ptp_close(chs->ptp);
chs->ptp = NULL;
@@ -3012,17 +3013,29 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
if (mlx5e_is_vport_rep(priv))
mlx5e_rep_activate_channels(priv);
+ set_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state);
+
mlx5e_wait_channels_min_rx_wqes(&priv->channels);
if (priv->rx_res)
mlx5e_rx_res_channels_activate(priv->rx_res, &priv->channels);
}
+static void mlx5e_cancel_tx_timeout_work(struct mlx5e_priv *priv)
+{
+ WARN_ON_ONCE(test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state));
+ if (current_work() != &priv->tx_timeout_work)
+ cancel_work_sync(&priv->tx_timeout_work);
+}
+
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
{
if (priv->rx_res)
mlx5e_rx_res_channels_deactivate(priv->rx_res);
+ clear_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state);
+ mlx5e_cancel_tx_timeout_work(priv);
+
if (mlx5e_is_vport_rep(priv))
mlx5e_rep_deactivate_channels(priv);
@@ -4801,8 +4814,17 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
struct net_device *netdev = priv->netdev;
int i;
- rtnl_lock();
- mutex_lock(&priv->state_lock);
+ /* Take rtnl_lock to ensure no change in netdev->real_num_tx_queues
+ * through this flow. However, channel closing flows have to wait for
+ * this work to finish while holding rtnl lock too. So either get the
+ * lock or find that channels are being closed for other reason and
+ * this work is not relevant anymore.
+ */
+ while (!rtnl_trylock()) {
+ if (!test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state))
+ return;
+ msleep(20);
+ }
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto unlock;
@@ -4821,7 +4843,6 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
}
unlock:
- mutex_unlock(&priv->state_lock);
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 3ab682bbcf8678..e92d4f83592ed9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -78,7 +78,7 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev,
count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
- if (count == sizeof(drvinfo->fw_version))
+ if (count >= sizeof(drvinfo->fw_version))
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%04d", fw_rev_maj(mdev),
fw_rev_min(mdev), fw_rev_sub(mdev));
@@ -1497,7 +1497,7 @@ mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch,
rpriv->rep->vport);
- if (dl_port) {
+ if (!IS_ERR(dl_port)) {
SET_NETDEV_DEVLINK_PORT(netdev, dl_port);
mlx5e_rep_vnic_reporter_create(priv, dl_port);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 7ca9e5b86778e3..96af9e2ab1d87d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -444,6 +444,9 @@ mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv,
struct mlx5e_flow_meter_handle *meter;
enum mlx5e_post_meter_type type;
+ if (IS_ERR(post_act))
+ return PTR_ERR(post_act);
+
meter = mlx5e_tc_meter_replace(priv->mdev, &attr->meter_attr.params);
if (IS_ERR(meter)) {
mlx5_core_err(priv->mdev, "Failed to get flow meter\n");
@@ -3739,6 +3742,20 @@ out_free:
}
static int
+set_branch_dest_ft(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr)
+{
+ struct mlx5e_post_act *post_act = get_post_action(priv);
+
+ if (IS_ERR(post_act))
+ return PTR_ERR(post_act);
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ attr->dest_ft = mlx5e_tc_post_act_get_ft(post_act);
+
+ return 0;
+}
+
+static int
alloc_branch_attr(struct mlx5e_tc_flow *flow,
struct mlx5e_tc_act_branch_ctrl *cond,
struct mlx5_flow_attr **cond_attr,
@@ -3761,8 +3778,9 @@ alloc_branch_attr(struct mlx5e_tc_flow *flow,
break;
case FLOW_ACTION_ACCEPT:
case FLOW_ACTION_PIPE:
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv));
+ err = set_branch_dest_ft(flow->priv, attr);
+ if (err)
+ goto out_err;
break;
case FLOW_ACTION_JUMP:
if (*jump_count) {
@@ -3771,8 +3789,9 @@ alloc_branch_attr(struct mlx5e_tc_flow *flow,
goto out_err;
}
*jump_count = cond->extval;
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv));
+ err = set_branch_dest_ft(flow->priv, attr);
+ if (err)
+ goto out_err;
break;
default:
err = -EOPNOTSUPP;
@@ -5719,8 +5738,10 @@ int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_a
esw = priv->mdev->priv.eswitch;
attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping);
- if (IS_ERR(attr->act_id_restore_rule))
+ if (IS_ERR(attr->act_id_restore_rule)) {
+ err = PTR_ERR(attr->act_id_restore_rule);
goto err_rule;
+ }
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
index 095f31f380fa3a..190f10aba17028 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
@@ -21,158 +21,6 @@ enum {
MLX5_ESW_IPSEC_TX_ESP_FT_CNT_LEVEL,
};
-static void esw_ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx)
-{
- mlx5_del_flow_rules(rx->status_drop.rule);
- mlx5_destroy_flow_group(rx->status_drop.group);
- mlx5_fc_destroy(ipsec->mdev, rx->status_drop_cnt);
-}
-
-static void esw_ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx)
-{
- mlx5_del_flow_rules(rx->status.rule);
- mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
-}
-
-static int esw_ipsec_rx_status_drop_create(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx)
-{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5_flow_table *ft = rx->ft.status;
- struct mlx5_core_dev *mdev = ipsec->mdev;
- struct mlx5_flow_destination dest = {};
- struct mlx5_flow_act flow_act = {};
- struct mlx5_flow_handle *rule;
- struct mlx5_fc *flow_counter;
- struct mlx5_flow_spec *spec;
- struct mlx5_flow_group *g;
- u32 *flow_group_in;
- int err = 0;
-
- flow_group_in = kvzalloc(inlen, GFP_KERNEL);
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!flow_group_in || !spec) {
- err = -ENOMEM;
- goto err_out;
- }
-
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
- g = mlx5_create_flow_group(ft, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- mlx5_core_err(mdev,
- "Failed to add ipsec rx status drop flow group, err=%d\n", err);
- goto err_out;
- }
-
- flow_counter = mlx5_fc_create(mdev, false);
- if (IS_ERR(flow_counter)) {
- err = PTR_ERR(flow_counter);
- mlx5_core_err(mdev,
- "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
- goto err_cnt;
- }
-
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
- dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(flow_counter);
- spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
- rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- mlx5_core_err(mdev,
- "Failed to add ipsec rx status drop rule, err=%d\n", err);
- goto err_rule;
- }
-
- rx->status_drop.group = g;
- rx->status_drop.rule = rule;
- rx->status_drop_cnt = flow_counter;
-
- kvfree(flow_group_in);
- kvfree(spec);
- return 0;
-
-err_rule:
- mlx5_fc_destroy(mdev, flow_counter);
-err_cnt:
- mlx5_destroy_flow_group(g);
-err_out:
- kvfree(flow_group_in);
- kvfree(spec);
- return err;
-}
-
-static int esw_ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx,
- struct mlx5_flow_destination *dest)
-{
- struct mlx5_flow_act flow_act = {};
- struct mlx5_flow_handle *rule;
- struct mlx5_flow_spec *spec;
- int err;
-
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec)
- return -ENOMEM;
-
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
- misc_parameters_2.ipsec_syndrome);
- MLX5_SET(fte_match_param, spec->match_value,
- misc_parameters_2.ipsec_syndrome, 0);
- spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
- spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
- flow_act.flags = FLOW_ACT_NO_APPEND;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- mlx5_core_warn(ipsec->mdev,
- "Failed to add ipsec rx status pass rule, err=%d\n", err);
- goto err_rule;
- }
-
- rx->status.rule = rule;
- kvfree(spec);
- return 0;
-
-err_rule:
- kvfree(spec);
- return err;
-}
-
-void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx)
-{
- esw_ipsec_rx_status_pass_destroy(ipsec, rx);
- esw_ipsec_rx_status_drop_destroy(ipsec, rx);
-}
-
-int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx,
- struct mlx5_flow_destination *dest)
-{
- int err;
-
- err = esw_ipsec_rx_status_drop_create(ipsec, rx);
- if (err)
- return err;
-
- err = esw_ipsec_rx_status_pass_create(ipsec, rx, dest);
- if (err)
- goto err_pass_create;
-
- return 0;
-
-err_pass_create:
- esw_ipsec_rx_status_drop_destroy(ipsec, rx);
- return err;
-}
-
void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr)
{
@@ -202,7 +50,7 @@ int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
u32 mapped_id;
int err;
- err = xa_alloc_bh(&ipsec->rx_esw->ipsec_obj_id_map, &mapped_id,
+ err = xa_alloc_bh(&ipsec->ipsec_obj_id_map, &mapped_id,
xa_mk_value(sa_entry->ipsec_obj_id),
XA_LIMIT(1, ESW_IPSEC_RX_MAPPED_ID_MASK), 0);
if (err)
@@ -233,7 +81,7 @@ int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
return 0;
err_header_alloc:
- xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map, mapped_id);
+ xa_erase_bh(&ipsec->ipsec_obj_id_map, mapped_id);
return err;
}
@@ -242,7 +90,7 @@ void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry)
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
if (sa_entry->rx_mapped_id)
- xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map,
+ xa_erase_bh(&ipsec->ipsec_obj_id_map,
sa_entry->rx_mapped_id);
}
@@ -252,7 +100,7 @@ int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
struct mlx5e_ipsec *ipsec = priv->ipsec;
void *val;
- val = xa_load(&ipsec->rx_esw->ipsec_obj_id_map, id);
+ val = xa_load(&ipsec->ipsec_obj_id_map, id);
if (!val)
return -ENOENT;
@@ -304,7 +152,7 @@ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
xa_for_each(&esw->offloads.vport_reps, i, rep) {
rpriv = rep->rep_data[REP_ETH].priv;
- if (!rpriv || !rpriv->netdev)
+ if (!rpriv || !rpriv->netdev || !atomic_read(&rpriv->tc_ht.nelems))
continue;
rhashtable_walk_enter(&rpriv->tc_ht, &iter);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
index 0c90f7a8b0d32c..ac9c65b89166e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
@@ -8,11 +8,6 @@ struct mlx5e_ipsec;
struct mlx5e_ipsec_sa_entry;
#ifdef CONFIG_MLX5_ESWITCH
-void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx);
-int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx,
- struct mlx5_flow_destination *dest);
void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr);
int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
@@ -26,16 +21,6 @@ void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_tx_create_attr *attr);
void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev);
#else
-static inline void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx) {}
-
-static inline int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx,
- struct mlx5_flow_destination *dest)
-{
- return -EINVAL;
-}
-
static inline void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr) {}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 8d0b915a31214e..3047d7015c5256 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1463,7 +1463,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
{
int err;
- lockdep_assert_held(&esw->mode_lock);
+ devl_assert_locked(priv_to_devlink(esw->dev));
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
@@ -1531,7 +1531,6 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
if (toggle_lag)
mlx5_lag_disable_change(esw->dev);
- down_write(&esw->mode_lock);
if (!mlx5_esw_is_fdb_created(esw)) {
ret = mlx5_eswitch_enable_locked(esw, num_vfs);
} else {
@@ -1554,8 +1553,6 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
}
}
- up_write(&esw->mode_lock);
-
if (toggle_lag)
mlx5_lag_enable_change(esw->dev);
@@ -1569,12 +1566,11 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
return;
devl_assert_locked(priv_to_devlink(esw->dev));
- down_write(&esw->mode_lock);
/* If driver is unloaded, this function is called twice by remove_one()
* and mlx5_unload(). Prevent the second call.
*/
if (!esw->esw_funcs.num_vfs && !esw->esw_funcs.num_ec_vfs && !clear_vf)
- goto unlock;
+ return;
esw_info(esw->dev, "Unload vfs: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n",
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
@@ -1603,9 +1599,6 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
esw->esw_funcs.num_vfs = 0;
else
esw->esw_funcs.num_ec_vfs = 0;
-
-unlock:
- up_write(&esw->mode_lock);
}
/* Free resources for corresponding eswitch mode. It is called by devlink
@@ -1647,10 +1640,8 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
devl_assert_locked(priv_to_devlink(esw->dev));
mlx5_lag_disable_change(esw->dev);
- down_write(&esw->mode_lock);
mlx5_eswitch_disable_locked(esw);
esw->mode = MLX5_ESWITCH_LEGACY;
- up_write(&esw->mode_lock);
mlx5_lag_enable_change(esw->dev);
}
@@ -2254,8 +2245,13 @@ bool mlx5_esw_hold(struct mlx5_core_dev *mdev)
if (!mlx5_esw_allowed(esw))
return true;
- if (down_read_trylock(&esw->mode_lock) != 0)
+ if (down_read_trylock(&esw->mode_lock) != 0) {
+ if (esw->eswitch_operation_in_progress) {
+ up_read(&esw->mode_lock);
+ return false;
+ }
return true;
+ }
return false;
}
@@ -2312,7 +2308,8 @@ int mlx5_esw_try_lock(struct mlx5_eswitch *esw)
if (down_write_trylock(&esw->mode_lock) == 0)
return -EINVAL;
- if (atomic64_read(&esw->user_count) > 0) {
+ if (esw->eswitch_operation_in_progress ||
+ atomic64_read(&esw->user_count) > 0) {
up_write(&esw->mode_lock);
return -EBUSY;
}
@@ -2320,6 +2317,18 @@ int mlx5_esw_try_lock(struct mlx5_eswitch *esw)
return esw->mode;
}
+int mlx5_esw_lock(struct mlx5_eswitch *esw)
+{
+ down_write(&esw->mode_lock);
+
+ if (esw->eswitch_operation_in_progress) {
+ up_write(&esw->mode_lock);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
/**
* mlx5_esw_unlock() - Release write lock on esw mode lock
* @esw: eswitch device.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 37ab66e7b403f1..b4eb17141edf3e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -383,6 +383,7 @@ struct mlx5_eswitch {
struct xarray paired;
struct mlx5_devcom_comp_dev *devcom;
u16 enabled_ipsec_vf_count;
+ bool eswitch_operation_in_progress;
};
void esw_offloads_disable(struct mlx5_eswitch *esw);
@@ -525,7 +526,8 @@ struct mlx5_esw_flow_attr {
u8 total_vlan;
struct {
u32 flags;
- struct mlx5_eswitch_rep *rep;
+ bool vport_valid;
+ u16 vport;
struct mlx5_pkt_reformat *pkt_reformat;
struct mlx5_core_dev *mdev;
struct mlx5_termtbl_handle *termtbl;
@@ -827,6 +829,7 @@ void mlx5_esw_release(struct mlx5_core_dev *dev);
void mlx5_esw_get(struct mlx5_core_dev *dev);
void mlx5_esw_put(struct mlx5_core_dev *dev);
int mlx5_esw_try_lock(struct mlx5_eswitch *esw);
+int mlx5_esw_lock(struct mlx5_eswitch *esw);
void mlx5_esw_unlock(struct mlx5_eswitch *esw);
void esw_vport_change_handle_locked(struct mlx5_vport *vport);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 88236e75fd9013..b0455134c98eff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -287,10 +287,9 @@ static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_
for (i = from; i < to; i++)
if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
mlx5_chains_put_table(chains, 0, 1, 0);
- else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
+ else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].vport,
esw_attr->dests[i].mdev))
- mlx5_esw_indir_table_put(esw, esw_attr->dests[i].rep->vport,
- false);
+ mlx5_esw_indir_table_put(esw, esw_attr->dests[i].vport, false);
}
static bool
@@ -358,8 +357,8 @@ esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
* this criteria.
*/
for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
- if (esw_attr->dests[i].rep &&
- mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
+ if (esw_attr->dests[i].vport_valid &&
+ mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].vport,
esw_attr->dests[i].mdev)) {
result = true;
} else {
@@ -388,7 +387,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[*i].ft = mlx5_esw_indir_table_get(esw, attr,
- esw_attr->dests[j].rep->vport, false);
+ esw_attr->dests[j].vport, false);
if (IS_ERR(dest[*i].ft)) {
err = PTR_ERR(dest[*i].ft);
goto err_indir_tbl_get;
@@ -432,11 +431,11 @@ static bool esw_setup_uplink_fwd_ipsec_needed(struct mlx5_eswitch *esw,
int attr_idx)
{
if (esw->offloads.ft_ipsec_tx_pol &&
- esw_attr->dests[attr_idx].rep &&
- esw_attr->dests[attr_idx].rep->vport == MLX5_VPORT_UPLINK &&
+ esw_attr->dests[attr_idx].vport_valid &&
+ esw_attr->dests[attr_idx].vport == MLX5_VPORT_UPLINK &&
/* To be aligned with software, encryption is needed only for tunnel device */
(esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) &&
- esw_attr->dests[attr_idx].rep != esw_attr->in_rep &&
+ esw_attr->dests[attr_idx].vport != esw_attr->in_rep->vport &&
esw_same_vhca_id(esw_attr->dests[attr_idx].mdev, esw->dev))
return true;
@@ -469,7 +468,7 @@ esw_setup_dest_fwd_vport(struct mlx5_flow_destination *dest, struct mlx5_flow_ac
int attr_idx, int dest_idx, bool pkt_reformat)
{
dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
+ dest[dest_idx].vport.num = esw_attr->dests[attr_idx].vport;
if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
dest[dest_idx].vport.vhca_id =
MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
@@ -1177,9 +1176,9 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *flow;
struct mlx5_flow_spec *spec;
struct mlx5_vport *vport;
+ int err, pfindex;
unsigned long i;
void *misc;
- int err;
if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev))
return 0;
@@ -1255,7 +1254,15 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
flows[vport->index] = flow;
}
}
- esw->fdb_table.offloads.peer_miss_rules[mlx5_get_dev_index(peer_dev)] = flows;
+
+ pfindex = mlx5_get_dev_index(peer_dev);
+ if (pfindex >= MLX5_MAX_PORTS) {
+ esw_warn(esw->dev, "Peer dev index(%d) is over the max num defined(%d)\n",
+ pfindex, MLX5_MAX_PORTS);
+ err = -EINVAL;
+ goto add_ec_vf_flow_err;
+ }
+ esw->fdb_table.offloads.peer_miss_rules[pfindex] = flows;
kvfree(spec);
return 0;
@@ -3653,14 +3660,18 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
struct net *devl_net, *netdev_net;
- struct mlx5_eswitch *esw;
-
- esw = mlx5_devlink_eswitch_nocheck_get(devlink);
- netdev_net = dev_net(esw->dev->mlx5e_res.uplink_netdev);
- devl_net = devlink_net(devlink);
+ bool ret = false;
- return net_eq(devl_net, netdev_net);
+ mutex_lock(&dev->mlx5e_res.uplink_netdev_lock);
+ if (dev->mlx5e_res.uplink_netdev) {
+ netdev_net = dev_net(dev->mlx5e_res.uplink_netdev);
+ devl_net = devlink_net(devlink);
+ ret = net_eq(devl_net, netdev_net);
+ }
+ mutex_unlock(&dev->mlx5e_res.uplink_netdev_lock);
+ return ret;
}
int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev)
@@ -3733,13 +3744,16 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
goto unlock;
}
+ esw->eswitch_operation_in_progress = true;
+ up_write(&esw->mode_lock);
+
mlx5_eswitch_disable_locked(esw);
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
if (mlx5_devlink_trap_get_num_active(esw->dev)) {
NL_SET_ERR_MSG_MOD(extack,
"Can't change mode while devlink traps are active");
err = -EOPNOTSUPP;
- goto unlock;
+ goto skip;
}
err = esw_offloads_start(esw, extack);
} else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
@@ -3749,6 +3763,9 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
err = -EINVAL;
}
+skip:
+ down_write(&esw->mode_lock);
+ esw->eswitch_operation_in_progress = false;
unlock:
mlx5_esw_unlock(esw);
enable_lag:
@@ -3759,16 +3776,12 @@ enable_lag:
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
struct mlx5_eswitch *esw;
- int err;
esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
- down_read(&esw->mode_lock);
- err = esw_mode_to_devlink(esw->mode, mode);
- up_read(&esw->mode_lock);
- return err;
+ return esw_mode_to_devlink(esw->mode, mode);
}
static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode,
@@ -3862,11 +3875,15 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
if (err)
goto out;
+ esw->eswitch_operation_in_progress = true;
+ up_write(&esw->mode_lock);
+
err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack);
- if (err)
- goto out;
+ if (!err)
+ esw->offloads.inline_mode = mlx5_mode;
- esw->offloads.inline_mode = mlx5_mode;
+ down_write(&esw->mode_lock);
+ esw->eswitch_operation_in_progress = false;
up_write(&esw->mode_lock);
return 0;
@@ -3878,16 +3895,12 @@ out:
int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
{
struct mlx5_eswitch *esw;
- int err;
esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
- down_read(&esw->mode_lock);
- err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
- up_read(&esw->mode_lock);
- return err;
+ return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
}
bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
@@ -3969,6 +3982,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
goto unlock;
}
+ esw->eswitch_operation_in_progress = true;
+ up_write(&esw->mode_lock);
+
esw_destroy_offloads_fdb_tables(esw);
esw->offloads.encap = encap;
@@ -3982,6 +3998,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
(void)esw_create_offloads_fdb_tables(esw);
}
+ down_write(&esw->mode_lock);
+ esw->eswitch_operation_in_progress = false;
+
unlock:
up_write(&esw->mode_lock);
return err;
@@ -3996,9 +4015,7 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
if (IS_ERR(esw))
return PTR_ERR(esw);
- down_read(&esw->mode_lock);
*encap = esw->offloads.encap;
- up_read(&esw->mode_lock);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index edd91025831441..40bdc677f051dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -233,8 +233,8 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
/* hairpin */
for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
- if (!esw_attr->dest_int_port && esw_attr->dests[i].rep &&
- esw_attr->dests[i].rep->vport == MLX5_VPORT_UPLINK)
+ if (!esw_attr->dest_int_port && esw_attr->dests[i].vport_valid &&
+ esw_attr->dests[i].vport == MLX5_VPORT_UPLINK)
return true;
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index b568988e92e3e9..c4e19d627da214 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -325,6 +325,29 @@ static void mlx5_fw_live_patch_event(struct work_struct *work)
mlx5_core_err(dev, "Failed to reload FW tracer\n");
}
+#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
+static int mlx5_check_hotplug_interrupt(struct mlx5_core_dev *dev)
+{
+ struct pci_dev *bridge = dev->pdev->bus->self;
+ u16 reg16;
+ int err;
+
+ if (!bridge)
+ return -EOPNOTSUPP;
+
+ err = pcie_capability_read_word(bridge, PCI_EXP_SLTCTL, &reg16);
+ if (err)
+ return err;
+
+ if ((reg16 & PCI_EXP_SLTCTL_HPIE) && (reg16 & PCI_EXP_SLTCTL_DLLSCE)) {
+ mlx5_core_warn(dev, "FW reset is not supported as HotPlug is enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+#endif
+
static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id)
{
struct pci_bus *bridge_bus = dev->pdev->bus;
@@ -357,6 +380,12 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev)
return false;
}
+#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
+ err = mlx5_check_hotplug_interrupt(dev);
+ if (err)
+ return false;
+#endif
+
err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
if (err)
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 5a31fb47ffa58b..21753f32786850 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -277,7 +277,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
req_list_size = max_list_size;
}
- out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_in) +
+ out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_out) +
req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
out = kvzalloc(out_sz, GFP_KERNEL);
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index fecd43754cead1..e5ec0a363aff84 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -350,6 +350,8 @@ union ks8851_tx_hdr {
* @rxd: Space for receiving SPI data, in DMA-able space.
* @txd: Space for transmitting SPI data, in DMA-able space.
* @msg_enable: The message flags controlling driver output (see ethtool).
+ * @tx_space: Free space in the hardware TX buffer (cached copy of KS_TXMIR).
+ * @queued_len: Space required in hardware TX buffer for queued packets in txq.
* @fid: Incrementing frame id tag.
* @rc_ier: Cached copy of KS_IER.
* @rc_ccr: Cached copy of KS_CCR.
@@ -399,6 +401,7 @@ struct ks8851_net {
struct work_struct rxctrl_work;
struct sk_buff_head txq;
+ unsigned int queued_len;
struct eeprom_93cx6 eeprom;
struct regulator *vdd_reg;
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index cfbc900d4aeb9e..0bf13b38b8f5b9 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -362,16 +362,18 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
handled |= IRQ_RXPSI;
if (status & IRQ_TXI) {
- handled |= IRQ_TXI;
+ unsigned short tx_space = ks8851_rdreg16(ks, KS_TXMIR);
- /* no lock here, tx queue should have been stopped */
+ netif_dbg(ks, intr, ks->netdev,
+ "%s: txspace %d\n", __func__, tx_space);
- /* update our idea of how much tx space is available to the
- * system */
- ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR);
+ spin_lock(&ks->statelock);
+ ks->tx_space = tx_space;
+ if (netif_queue_stopped(ks->netdev))
+ netif_wake_queue(ks->netdev);
+ spin_unlock(&ks->statelock);
- netif_dbg(ks, intr, ks->netdev,
- "%s: txspace %d\n", __func__, ks->tx_space);
+ handled |= IRQ_TXI;
}
if (status & IRQ_RXI)
@@ -414,9 +416,6 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
if (status & IRQ_LCI)
mii_check_link(&ks->mii);
- if (status & IRQ_TXI)
- netif_wake_queue(ks->netdev);
-
return IRQ_HANDLED;
}
@@ -500,6 +499,7 @@ static int ks8851_net_open(struct net_device *dev)
ks8851_wrreg16(ks, KS_ISR, ks->rc_ier);
ks8851_wrreg16(ks, KS_IER, ks->rc_ier);
+ ks->queued_len = 0;
netif_start_queue(ks->netdev);
netif_dbg(ks, ifup, ks->netdev, "network device up\n");
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 70bc7253454f6b..88e26c120b4836 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -287,6 +287,18 @@ static void ks8851_wrfifo_spi(struct ks8851_net *ks, struct sk_buff *txp,
}
/**
+ * calc_txlen - calculate size of message to send packet
+ * @len: Length of data
+ *
+ * Returns the size of the TXFIFO message needed to send
+ * this packet.
+ */
+static unsigned int calc_txlen(unsigned int len)
+{
+ return ALIGN(len + 4, 4);
+}
+
+/**
* ks8851_rx_skb_spi - receive skbuff
* @ks: The device state
* @skb: The skbuff
@@ -305,7 +317,9 @@ static void ks8851_rx_skb_spi(struct ks8851_net *ks, struct sk_buff *skb)
*/
static void ks8851_tx_work(struct work_struct *work)
{
+ unsigned int dequeued_len = 0;
struct ks8851_net_spi *kss;
+ unsigned short tx_space;
struct ks8851_net *ks;
unsigned long flags;
struct sk_buff *txb;
@@ -322,6 +336,8 @@ static void ks8851_tx_work(struct work_struct *work)
last = skb_queue_empty(&ks->txq);
if (txb) {
+ dequeued_len += calc_txlen(txb->len);
+
ks8851_wrreg16_spi(ks, KS_RXQCR,
ks->rc_rxqcr | RXQCR_SDA);
ks8851_wrfifo_spi(ks, txb, last);
@@ -332,6 +348,13 @@ static void ks8851_tx_work(struct work_struct *work)
}
}
+ tx_space = ks8851_rdreg16_spi(ks, KS_TXMIR);
+
+ spin_lock(&ks->statelock);
+ ks->queued_len -= dequeued_len;
+ ks->tx_space = tx_space;
+ spin_unlock(&ks->statelock);
+
ks8851_unlock_spi(ks, &flags);
}
@@ -347,18 +370,6 @@ static void ks8851_flush_tx_work_spi(struct ks8851_net *ks)
}
/**
- * calc_txlen - calculate size of message to send packet
- * @len: Length of data
- *
- * Returns the size of the TXFIFO message needed to send
- * this packet.
- */
-static unsigned int calc_txlen(unsigned int len)
-{
- return ALIGN(len + 4, 4);
-}
-
-/**
* ks8851_start_xmit_spi - transmit packet using SPI
* @skb: The buffer to transmit
* @dev: The device used to transmit the packet.
@@ -386,16 +397,17 @@ static netdev_tx_t ks8851_start_xmit_spi(struct sk_buff *skb,
spin_lock(&ks->statelock);
- if (needed > ks->tx_space) {
+ if (ks->queued_len + needed > ks->tx_space) {
netif_stop_queue(dev);
ret = NETDEV_TX_BUSY;
} else {
- ks->tx_space -= needed;
+ ks->queued_len += needed;
skb_queue_tail(&ks->txq, skb);
}
spin_unlock(&ks->statelock);
- schedule_work(&kss->tx_work);
+ if (ret == NETDEV_TX_OK)
+ schedule_work(&kss->tx_work);
return ret;
}
diff --git a/drivers/net/ethernet/microsoft/Kconfig b/drivers/net/ethernet/microsoft/Kconfig
index 090e6b9832431c..01eb7445ead951 100644
--- a/drivers/net/ethernet/microsoft/Kconfig
+++ b/drivers/net/ethernet/microsoft/Kconfig
@@ -20,6 +20,7 @@ config MICROSOFT_MANA
depends on PCI_MSI && X86_64
depends on PCI_HYPERV
select AUXILIARY_BUS
+ select PAGE_POOL
help
This driver supports Microsoft Azure Network Adapter (MANA).
So far, the driver is only supported on X86_64.
diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c
index 5c55197c7327df..c018783757fb2f 100644
--- a/drivers/net/ethernet/mscc/ocelot_stats.c
+++ b/drivers/net/ethernet/mscc/ocelot_stats.c
@@ -582,10 +582,10 @@ static void ocelot_port_rmon_stats_cb(struct ocelot *ocelot, int port, void *pri
rmon_stats->hist_tx[0] = s[OCELOT_STAT_TX_64];
rmon_stats->hist_tx[1] = s[OCELOT_STAT_TX_65_127];
rmon_stats->hist_tx[2] = s[OCELOT_STAT_TX_128_255];
- rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_128_255];
- rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_256_511];
- rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_512_1023];
- rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_1024_1526];
+ rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_256_511];
+ rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_512_1023];
+ rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_1024_1526];
+ rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_1527_MAX];
}
static void ocelot_port_pmac_rmon_stats_cb(struct ocelot *ocelot, int port,
@@ -610,10 +610,10 @@ static void ocelot_port_pmac_rmon_stats_cb(struct ocelot *ocelot, int port,
rmon_stats->hist_tx[0] = s[OCELOT_STAT_TX_PMAC_64];
rmon_stats->hist_tx[1] = s[OCELOT_STAT_TX_PMAC_65_127];
rmon_stats->hist_tx[2] = s[OCELOT_STAT_TX_PMAC_128_255];
- rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_PMAC_128_255];
- rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_PMAC_256_511];
- rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_PMAC_512_1023];
- rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_PMAC_1024_1526];
+ rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_PMAC_256_511];
+ rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_PMAC_512_1023];
+ rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_PMAC_1024_1526];
+ rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_PMAC_1527_MAX];
}
void ocelot_port_get_rmon_stats(struct ocelot *ocelot, int port,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 65e20693c549e1..33f4f58ee51c68 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -933,6 +933,7 @@ static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
p_dma->virt_addr = NULL;
}
kfree(p_mngr->ilt_shadow);
+ p_mngr->ilt_shadow = NULL;
}
static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index 6f2fa2a42770aa..1822f2ad8f0ddf 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -30,6 +30,8 @@
#define QCASPI_MAX_REGS 0x20
+#define QCASPI_RX_MAX_FRAMES 4
+
static const u16 qcaspi_spi_regs[] = {
SPI_REG_BFR_SIZE,
SPI_REG_WRBUF_SPC_AVA,
@@ -252,9 +254,9 @@ qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring,
{
struct qcaspi *qca = netdev_priv(dev);
- ring->rx_max_pending = 4;
+ ring->rx_max_pending = QCASPI_RX_MAX_FRAMES;
ring->tx_max_pending = TX_RING_MAX_LEN;
- ring->rx_pending = 4;
+ ring->rx_pending = QCASPI_RX_MAX_FRAMES;
ring->tx_pending = qca->txr.count;
}
@@ -263,22 +265,21 @@ qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
- const struct net_device_ops *ops = dev->netdev_ops;
struct qcaspi *qca = netdev_priv(dev);
- if ((ring->rx_pending) ||
+ if (ring->rx_pending != QCASPI_RX_MAX_FRAMES ||
(ring->rx_mini_pending) ||
(ring->rx_jumbo_pending))
return -EINVAL;
- if (netif_running(dev))
- ops->ndo_stop(dev);
+ if (qca->spi_thread)
+ kthread_park(qca->spi_thread);
qca->txr.count = max_t(u32, ring->tx_pending, TX_RING_MIN_LEN);
qca->txr.count = min_t(u16, qca->txr.count, TX_RING_MAX_LEN);
- if (netif_running(dev))
- ops->ndo_open(dev);
+ if (qca->spi_thread)
+ kthread_unpark(qca->spi_thread);
return 0;
}
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index bec723028e96c9..5f3c11fb3fa279 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -580,6 +580,18 @@ qcaspi_spi_thread(void *data)
netdev_info(qca->net_dev, "SPI thread created\n");
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread_should_park()) {
+ netif_tx_disable(qca->net_dev);
+ netif_carrier_off(qca->net_dev);
+ qcaspi_flush_tx_ring(qca);
+ kthread_parkme();
+ if (qca->sync == QCASPI_SYNC_READY) {
+ netif_carrier_on(qca->net_dev);
+ netif_wake_queue(qca->net_dev);
+ }
+ continue;
+ }
+
if ((qca->intr_req == qca->intr_svc) &&
!qca->txr.skb[qca->txr.head])
schedule();
@@ -608,11 +620,17 @@ qcaspi_spi_thread(void *data)
if (intr_cause & SPI_INT_CPU_ON) {
qcaspi_qca7k_sync(qca, QCASPI_EVENT_CPUON);
+ /* Frame decoding in progress */
+ if (qca->frm_handle.state != qca->frm_handle.init)
+ qca->net_dev->stats.rx_dropped++;
+
+ qcafrm_fsm_init_spi(&qca->frm_handle);
+ qca->stats.device_reset++;
+
/* not synced. */
if (qca->sync != QCASPI_SYNC_READY)
continue;
- qca->stats.device_reset++;
netif_wake_queue(qca->net_dev);
netif_carrier_on(qca->net_dev);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index 2cd6fce5c9934c..9e40c28d453ab1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -59,26 +59,19 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
return -ENODEV;
}
- if (!of_device_is_compatible(np, "loongson, pci-gmac")) {
- pr_info("dwmac_loongson_pci: Incompatible OF node\n");
- return -ENODEV;
- }
-
plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
if (!plat)
return -ENOMEM;
+ plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
+ sizeof(*plat->mdio_bus_data),
+ GFP_KERNEL);
+ if (!plat->mdio_bus_data)
+ return -ENOMEM;
+
plat->mdio_node = of_get_child_by_name(np, "mdio");
if (plat->mdio_node) {
dev_info(&pdev->dev, "Found MDIO subnode\n");
-
- plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
- sizeof(*plat->mdio_bus_data),
- GFP_KERNEL);
- if (!plat->mdio_bus_data) {
- ret = -ENOMEM;
- goto err_put_node;
- }
plat->mdio_bus_data->needs_reset = true;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index d3bf42d0fceb69..31631e3f89d0a4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -34,6 +34,7 @@
#define RGMII_CONFIG_LOOPBACK_EN BIT(2)
#define RGMII_CONFIG_PROG_SWAP BIT(1)
#define RGMII_CONFIG_DDR_MODE BIT(0)
+#define RGMII_CONFIG_SGMII_CLK_DVDR GENMASK(18, 10)
/* SDCC_HC_REG_DLL_CONFIG fields */
#define SDCC_DLL_CONFIG_DLL_RST BIT(30)
@@ -78,6 +79,8 @@
#define ETHQOS_MAC_CTRL_SPEED_MODE BIT(14)
#define ETHQOS_MAC_CTRL_PORT_SEL BIT(15)
+#define SGMII_10M_RX_CLK_DVDR 0x31
+
struct ethqos_emac_por {
unsigned int offset;
unsigned int value;
@@ -598,6 +601,9 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
return 0;
}
+/* On interface toggle MAC registers gets reset.
+ * Configure MAC block for SGMII on ethernet phy link up
+ */
static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
{
int val;
@@ -617,6 +623,10 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
case SPEED_10:
val |= ETHQOS_MAC_CTRL_PORT_SEL;
val &= ~ETHQOS_MAC_CTRL_SPEED_MODE;
+ rgmii_updatel(ethqos, RGMII_CONFIG_SGMII_CLK_DVDR,
+ FIELD_PREP(RGMII_CONFIG_SGMII_CLK_DVDR,
+ SGMII_10M_RX_CLK_DVDR),
+ RGMII_IO_MACRO_CONFIG);
break;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 540f6a4ec0b81f..f05bd757dfe525 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -237,7 +237,7 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
*/
ts_status = readl(priv->ioaddr + GMAC_TIMESTAMP_STATUS);
- if (priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN)
+ if (!(priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN))
return;
num_snapshot = (ts_status & GMAC_TIMESTAMP_ATSNS_MASK) >>
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index fa9e7e7040b945..0542cfd1817e62 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -591,7 +591,11 @@ int stmmac_mdio_register(struct net_device *ndev)
new_bus->parent = priv->device;
err = of_mdiobus_register(new_bus, mdio_node);
- if (err != 0) {
+ if (err == -ENODEV) {
+ err = 0;
+ dev_info(dev, "MDIO bus is disabled\n");
+ goto bus_register_fail;
+ } else if (err) {
dev_err_probe(dev, err, "Cannot register the MDIO bus\n");
goto bus_register_fail;
}
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index a5a50b5a881684..347d3cec02a363 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -160,60 +160,6 @@ static __le32 wx_test_staterr(union wx_rx_desc *rx_desc,
return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
}
-static bool wx_can_reuse_rx_page(struct wx_rx_buffer *rx_buffer,
- int rx_buffer_pgcnt)
-{
- unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
- struct page *page = rx_buffer->page;
-
- /* avoid re-using remote and pfmemalloc pages */
- if (!dev_page_is_reusable(page))
- return false;
-
-#if (PAGE_SIZE < 8192)
- /* if we are only owner of page we can reuse it */
- if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
- return false;
-#endif
-
- /* If we have drained the page fragment pool we need to update
- * the pagecnt_bias and page count so that we fully restock the
- * number of references the driver holds.
- */
- if (unlikely(pagecnt_bias == 1)) {
- page_ref_add(page, USHRT_MAX - 1);
- rx_buffer->pagecnt_bias = USHRT_MAX;
- }
-
- return true;
-}
-
-/**
- * wx_reuse_rx_page - page flip buffer and store it back on the ring
- * @rx_ring: rx descriptor ring to store buffers on
- * @old_buff: donor buffer to have page reused
- *
- * Synchronizes page for reuse by the adapter
- **/
-static void wx_reuse_rx_page(struct wx_ring *rx_ring,
- struct wx_rx_buffer *old_buff)
-{
- u16 nta = rx_ring->next_to_alloc;
- struct wx_rx_buffer *new_buff;
-
- new_buff = &rx_ring->rx_buffer_info[nta];
-
- /* update, and store next to alloc */
- nta++;
- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
- /* transfer page from old buffer to new buffer */
- new_buff->page = old_buff->page;
- new_buff->page_dma = old_buff->page_dma;
- new_buff->page_offset = old_buff->page_offset;
- new_buff->pagecnt_bias = old_buff->pagecnt_bias;
-}
-
static void wx_dma_sync_frag(struct wx_ring *rx_ring,
struct wx_rx_buffer *rx_buffer)
{
@@ -270,8 +216,6 @@ static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring,
size,
DMA_FROM_DEVICE);
skip_sync:
- rx_buffer->pagecnt_bias--;
-
return rx_buffer;
}
@@ -280,19 +224,9 @@ static void wx_put_rx_buffer(struct wx_ring *rx_ring,
struct sk_buff *skb,
int rx_buffer_pgcnt)
{
- if (wx_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
- /* hand second half of page back to the ring */
- wx_reuse_rx_page(rx_ring, rx_buffer);
- } else {
- if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma)
- /* the page has been released from the ring */
- WX_CB(skb)->page_released = true;
- else
- page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
-
- __page_frag_cache_drain(rx_buffer->page,
- rx_buffer->pagecnt_bias);
- }
+ if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma)
+ /* the page has been released from the ring */
+ WX_CB(skb)->page_released = true;
/* clear contents of rx_buffer */
rx_buffer->page = NULL;
@@ -335,11 +269,12 @@ static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring,
if (size <= WX_RXBUFFER_256) {
memcpy(__skb_put(skb, size), page_addr,
ALIGN(size, sizeof(long)));
- rx_buffer->pagecnt_bias++;
-
+ page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, true);
return skb;
}
+ skb_mark_for_recycle(skb);
+
if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP))
WX_CB(skb)->dma = rx_buffer->dma;
@@ -382,8 +317,6 @@ static bool wx_alloc_mapped_page(struct wx_ring *rx_ring,
bi->page_dma = dma;
bi->page = page;
bi->page_offset = 0;
- page_ref_add(page, USHRT_MAX - 1);
- bi->pagecnt_bias = USHRT_MAX;
return true;
}
@@ -723,7 +656,6 @@ static int wx_clean_rx_irq(struct wx_q_vector *q_vector,
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
- rx_buffer->pagecnt_bias++;
break;
}
@@ -2248,8 +2180,6 @@ static void wx_clean_rx_ring(struct wx_ring *rx_ring)
/* free resources associated with mapping */
page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
- __page_frag_cache_drain(rx_buffer->page,
- rx_buffer->pagecnt_bias);
i++;
rx_buffer++;
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index 165e82de772e6c..83f9bb7b3c22a0 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -787,7 +787,6 @@ struct wx_rx_buffer {
dma_addr_t page_dma;
struct page *page;
unsigned int page_offset;
- u16 pagecnt_bias;
};
struct wx_queue_stats {
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 2ce74593d6e4a1..a42df2c1bd043c 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1548,7 +1548,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
goto error;
phy_resume(phydev);
- phy_led_triggers_register(phydev);
+ if (!phydev->is_on_sfp_module)
+ phy_led_triggers_register(phydev);
/**
* If the external phy used by current mac interface is managed by
@@ -1817,7 +1818,8 @@ void phy_detach(struct phy_device *phydev)
}
phydev->phylink = NULL;
- phy_led_triggers_unregister(phydev);
+ if (!phydev->is_on_sfp_module)
+ phy_led_triggers_unregister(phydev);
if (phydev->mdio.dev.driver)
module_put(phydev->mdio.dev.driver->owner);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 508d9a392ab182..f575f225d41789 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -281,8 +281,10 @@ static int __team_options_register(struct team *team,
return 0;
inst_rollback:
- for (i--; i >= 0; i--)
+ for (i--; i >= 0; i--) {
__team_option_inst_del_option(team, dst_opts[i]);
+ list_del(&dst_opts[i]->list);
+ }
i = option_count;
alloc_rollback:
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 4ea0e155bb0d5d..5a1bf42ce15668 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -173,6 +173,7 @@ struct ax88179_data {
u8 in_pm;
u32 wol_supported;
u32 wolopts;
+ u8 disconnecting;
};
struct ax88179_int_data {
@@ -208,6 +209,7 @@ static int __ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
{
int ret;
int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
+ struct ax88179_data *ax179_data = dev->driver_priv;
BUG_ON(!dev);
@@ -219,7 +221,7 @@ static int __ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
ret = fn(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, data, size);
- if (unlikely(ret < 0))
+ if (unlikely((ret < 0) && !(ret == -ENODEV && ax179_data->disconnecting)))
netdev_warn(dev->net, "Failed to read reg index 0x%04x: %d\n",
index, ret);
@@ -231,6 +233,7 @@ static int __ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
{
int ret;
int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
+ struct ax88179_data *ax179_data = dev->driver_priv;
BUG_ON(!dev);
@@ -242,7 +245,7 @@ static int __ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
ret = fn(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, data, size);
- if (unlikely(ret < 0))
+ if (unlikely((ret < 0) && !(ret == -ENODEV && ax179_data->disconnecting)))
netdev_warn(dev->net, "Failed to write reg index 0x%04x: %d\n",
index, ret);
@@ -492,6 +495,20 @@ static int ax88179_resume(struct usb_interface *intf)
return usbnet_resume(intf);
}
+static void ax88179_disconnect(struct usb_interface *intf)
+{
+ struct usbnet *dev = usb_get_intfdata(intf);
+ struct ax88179_data *ax179_data;
+
+ if (!dev)
+ return;
+
+ ax179_data = dev->driver_priv;
+ ax179_data->disconnecting = 1;
+
+ usbnet_disconnect(intf);
+}
+
static void
ax88179_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
{
@@ -1906,7 +1923,7 @@ static struct usb_driver ax88179_178a_driver = {
.suspend = ax88179_suspend,
.resume = ax88179_resume,
.reset_resume = ax88179_resume,
- .disconnect = usbnet_disconnect,
+ .disconnect = ax88179_disconnect,
.supports_autosuspend = 1,
.disable_hub_initiated_lpm = 1,
};
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 146bc7bd14fbc0..bc6a9f861711fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1385,7 +1385,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
* if it is true then one of the handlers took the page.
*/
- if (reclaim) {
+ if (reclaim && txq) {
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
int index = SEQ_TO_INDEX(sequence);
int cmd_index = iwl_txq_get_cmd_index(txq, index);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index a468e5efeecd4f..92253260f56832 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -3106,7 +3106,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
struct iwl_rxq *rxq = &trans_pcie->rxq[0];
u32 i, r, j, rb_len = 0;
- spin_lock(&rxq->lock);
+ spin_lock_bh(&rxq->lock);
r = iwl_get_closed_rb_stts(trans, rxq);
@@ -3130,7 +3130,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
*data = iwl_fw_error_next_data(*data);
}
- spin_unlock(&rxq->lock);
+ spin_unlock_bh(&rxq->lock);
return rb_len;
}
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 511fe7e6e744b3..68ad915203aa54 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -783,7 +783,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
static void
mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
- int len, bool more, u32 info)
+ int len, bool more, u32 info, bool allow_direct)
{
struct sk_buff *skb = q->rx_head;
struct skb_shared_info *shinfo = skb_shinfo(skb);
@@ -795,7 +795,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
} else {
- mt76_put_page_pool_buf(data, true);
+ mt76_put_page_pool_buf(data, allow_direct);
}
if (more)
@@ -815,6 +815,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
struct sk_buff *skb;
unsigned char *data;
bool check_ddone = false;
+ bool allow_direct = !mt76_queue_is_wed_rx(q);
bool more;
if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
@@ -855,7 +856,8 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
}
if (q->rx_head) {
- mt76_add_fragment(dev, q, data, len, more, info);
+ mt76_add_fragment(dev, q, data, len, more, info,
+ allow_direct);
continue;
}
@@ -884,7 +886,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
continue;
free_frag:
- mt76_put_page_pool_buf(data, true);
+ mt76_put_page_pool_buf(data, allow_direct);
}
mt76_dma_rx_fill(dev, q, true);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 8ebdfd623e0f78..60f14019f98166 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -4137,6 +4137,8 @@ static void nvme_fw_act_work(struct work_struct *work)
struct nvme_ctrl, fw_act_work);
unsigned long fw_act_timeout;
+ nvme_auth_stop(ctrl);
+
if (ctrl->mtfa)
fw_act_timeout = jiffies +
msecs_to_jiffies(ctrl->mtfa * 100);
@@ -4192,7 +4194,6 @@ static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
* firmware activation.
*/
if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
- nvme_auth_stop(ctrl);
requeue = false;
queue_work(nvme_wq, &ctrl->fw_act_work);
}
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index fb22976a36a89c..1d51925ea67fd1 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2548,24 +2548,17 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
* the controller. Abort any ios on the association and let the
* create_association error path resolve things.
*/
- enum nvme_ctrl_state state;
- unsigned long flags;
-
- spin_lock_irqsave(&ctrl->lock, flags);
- state = ctrl->ctrl.state;
- if (state == NVME_CTRL_CONNECTING) {
- set_bit(ASSOC_FAILED, &ctrl->flags);
- spin_unlock_irqrestore(&ctrl->lock, flags);
+ if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
__nvme_fc_abort_outstanding_ios(ctrl, true);
+ set_bit(ASSOC_FAILED, &ctrl->flags);
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: transport error during (re)connect\n",
ctrl->cnum);
return;
}
- spin_unlock_irqrestore(&ctrl->lock, flags);
/* Otherwise, only proceed if in LIVE state - e.g. on first error */
- if (state != NVME_CTRL_LIVE)
+ if (ctrl->ctrl.state != NVME_CTRL_LIVE)
return;
dev_warn(ctrl->ctrl.device,
@@ -3180,16 +3173,12 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
else
ret = nvme_fc_recreate_io_queues(ctrl);
}
-
- spin_lock_irqsave(&ctrl->lock, flags);
if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags))
ret = -EIO;
- if (ret) {
- spin_unlock_irqrestore(&ctrl->lock, flags);
+ if (ret)
goto out_term_aen_ops;
- }
+
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
- spin_unlock_irqrestore(&ctrl->lock, flags);
ctrl->ctrl.nr_reconnects = 0;
diff --git a/drivers/nvmem/brcm_nvram.c b/drivers/nvmem/brcm_nvram.c
index 9737104f3b76b3..5cdf339cfbec0e 100644
--- a/drivers/nvmem/brcm_nvram.c
+++ b/drivers/nvmem/brcm_nvram.c
@@ -17,9 +17,23 @@
#define NVRAM_MAGIC "FLSH"
+/**
+ * struct brcm_nvram - driver state internal struct
+ *
+ * @dev: NVMEM device pointer
+ * @nvmem_size: Size of the whole space available for NVRAM
+ * @data: NVRAM data copy stored to avoid poking underlaying flash controller
+ * @data_len: NVRAM data size
+ * @padding_byte: Padding value used to fill remaining space
+ * @cells: Array of discovered NVMEM cells
+ * @ncells: Number of elements in cells
+ */
struct brcm_nvram {
struct device *dev;
- void __iomem *base;
+ size_t nvmem_size;
+ uint8_t *data;
+ size_t data_len;
+ uint8_t padding_byte;
struct nvmem_cell_info *cells;
int ncells;
};
@@ -36,10 +50,47 @@ static int brcm_nvram_read(void *context, unsigned int offset, void *val,
size_t bytes)
{
struct brcm_nvram *priv = context;
- u8 *dst = val;
+ size_t to_copy;
+
+ if (offset + bytes > priv->data_len)
+ to_copy = max_t(ssize_t, (ssize_t)priv->data_len - offset, 0);
+ else
+ to_copy = bytes;
+
+ memcpy(val, priv->data + offset, to_copy);
+
+ memset((uint8_t *)val + to_copy, priv->padding_byte, bytes - to_copy);
+
+ return 0;
+}
+
+static int brcm_nvram_copy_data(struct brcm_nvram *priv, struct platform_device *pdev)
+{
+ struct resource *res;
+ void __iomem *base;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv->nvmem_size = resource_size(res);
+
+ priv->padding_byte = readb(base + priv->nvmem_size - 1);
+ for (priv->data_len = priv->nvmem_size;
+ priv->data_len;
+ priv->data_len--) {
+ if (readb(base + priv->data_len - 1) != priv->padding_byte)
+ break;
+ }
+ WARN(priv->data_len > SZ_128K, "Unexpected (big) NVRAM size: %zu B\n", priv->data_len);
+
+ priv->data = devm_kzalloc(priv->dev, priv->data_len, GFP_KERNEL);
+ if (!priv->data)
+ return -ENOMEM;
+
+ memcpy_fromio(priv->data, base, priv->data_len);
- while (bytes--)
- *dst++ = readb(priv->base + offset++);
+ bcm47xx_nvram_init_from_iomem(base, priv->data_len);
return 0;
}
@@ -67,8 +118,13 @@ static int brcm_nvram_add_cells(struct brcm_nvram *priv, uint8_t *data,
size_t len)
{
struct device *dev = priv->dev;
- char *var, *value, *eq;
+ char *var, *value;
+ uint8_t tmp;
int idx;
+ int err = 0;
+
+ tmp = priv->data[len - 1];
+ priv->data[len - 1] = '\0';
priv->ncells = 0;
for (var = data + sizeof(struct brcm_nvram_header);
@@ -78,67 +134,68 @@ static int brcm_nvram_add_cells(struct brcm_nvram *priv, uint8_t *data,
}
priv->cells = devm_kcalloc(dev, priv->ncells, sizeof(*priv->cells), GFP_KERNEL);
- if (!priv->cells)
- return -ENOMEM;
+ if (!priv->cells) {
+ err = -ENOMEM;
+ goto out;
+ }
for (var = data + sizeof(struct brcm_nvram_header), idx = 0;
var < (char *)data + len && *var;
var = value + strlen(value) + 1, idx++) {
+ char *eq, *name;
+
eq = strchr(var, '=');
if (!eq)
break;
*eq = '\0';
+ name = devm_kstrdup(dev, var, GFP_KERNEL);
+ *eq = '=';
+ if (!name) {
+ err = -ENOMEM;
+ goto out;
+ }
value = eq + 1;
- priv->cells[idx].name = devm_kstrdup(dev, var, GFP_KERNEL);
- if (!priv->cells[idx].name)
- return -ENOMEM;
+ priv->cells[idx].name = name;
priv->cells[idx].offset = value - (char *)data;
priv->cells[idx].bytes = strlen(value);
priv->cells[idx].np = of_get_child_by_name(dev->of_node, priv->cells[idx].name);
- if (!strcmp(var, "et0macaddr") ||
- !strcmp(var, "et1macaddr") ||
- !strcmp(var, "et2macaddr")) {
+ if (!strcmp(name, "et0macaddr") ||
+ !strcmp(name, "et1macaddr") ||
+ !strcmp(name, "et2macaddr")) {
priv->cells[idx].raw_len = strlen(value);
priv->cells[idx].bytes = ETH_ALEN;
priv->cells[idx].read_post_process = brcm_nvram_read_post_process_macaddr;
}
}
- return 0;
+out:
+ priv->data[len - 1] = tmp;
+ return err;
}
static int brcm_nvram_parse(struct brcm_nvram *priv)
{
+ struct brcm_nvram_header *header = (struct brcm_nvram_header *)priv->data;
struct device *dev = priv->dev;
- struct brcm_nvram_header header;
- uint8_t *data;
size_t len;
int err;
- memcpy_fromio(&header, priv->base, sizeof(header));
-
- if (memcmp(header.magic, NVRAM_MAGIC, 4)) {
+ if (memcmp(header->magic, NVRAM_MAGIC, 4)) {
dev_err(dev, "Invalid NVRAM magic\n");
return -EINVAL;
}
- len = le32_to_cpu(header.len);
-
- data = kzalloc(len, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- memcpy_fromio(data, priv->base, len);
- data[len - 1] = '\0';
-
- err = brcm_nvram_add_cells(priv, data, len);
- if (err) {
- dev_err(dev, "Failed to add cells: %d\n", err);
- return err;
+ len = le32_to_cpu(header->len);
+ if (len > priv->nvmem_size) {
+ dev_err(dev, "NVRAM length (%zd) exceeds mapped size (%zd)\n", len,
+ priv->nvmem_size);
+ return -EINVAL;
}
- kfree(data);
+ err = brcm_nvram_add_cells(priv, priv->data, len);
+ if (err)
+ dev_err(dev, "Failed to add cells: %d\n", err);
return 0;
}
@@ -150,7 +207,6 @@ static int brcm_nvram_probe(struct platform_device *pdev)
.reg_read = brcm_nvram_read,
};
struct device *dev = &pdev->dev;
- struct resource *res;
struct brcm_nvram *priv;
int err;
@@ -159,21 +215,19 @@ static int brcm_nvram_probe(struct platform_device *pdev)
return -ENOMEM;
priv->dev = dev;
- priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
+ err = brcm_nvram_copy_data(priv, pdev);
+ if (err)
+ return err;
err = brcm_nvram_parse(priv);
if (err)
return err;
- bcm47xx_nvram_init_from_iomem(priv->base, resource_size(res));
-
config.dev = dev;
config.cells = priv->cells;
config.ncells = priv->ncells;
config.priv = priv;
- config.size = resource_size(res);
+ config.size = priv->nvmem_size;
return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &config));
}
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 6902e97719d136..11c80555d97543 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -968,9 +968,12 @@ static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata)
{
- /* Downstream devices need to be in D0 state before enabling PCI PM substates */
+ /*
+ * Downstream devices need to be in D0 state before enabling PCI PM
+ * substates.
+ */
pci_set_power_state(pdev, PCI_D0);
- pci_enable_link_state(pdev, PCIE_LINK_STATE_ALL);
+ pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
return 0;
}
diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c
index d45e7b8dc530d6..8b34ccff073a99 100644
--- a/drivers/pci/controller/pci-loongson.c
+++ b/drivers/pci/controller/pci-loongson.c
@@ -80,13 +80,49 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
DEV_LS7A_LPC, system_bus_quirk);
+/*
+ * Some Loongson PCIe ports have hardware limitations on their Maximum Read
+ * Request Size. They can't handle anything larger than this. Sane
+ * firmware will set proper MRRS at boot, so we only need no_inc_mrrs for
+ * bridges. However, some MIPS Loongson firmware doesn't set MRRS properly,
+ * so we have to enforce maximum safe MRRS, which is 256 bytes.
+ */
+#ifdef CONFIG_MIPS
+static void loongson_set_min_mrrs_quirk(struct pci_dev *pdev)
+{
+ struct pci_bus *bus = pdev->bus;
+ struct pci_dev *bridge;
+ static const struct pci_device_id bridge_devids[] = {
+ { PCI_VDEVICE(LOONGSON, DEV_LS2K_PCIE_PORT0) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT0) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT1) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT2) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT3) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT4) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT5) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT6) },
+ { 0, },
+ };
+
+ /* look for the matching bridge */
+ while (!pci_is_root_bus(bus)) {
+ bridge = bus->self;
+ bus = bus->parent;
+
+ if (pci_match_id(bridge_devids, bridge)) {
+ if (pcie_get_readrq(pdev) > 256) {
+ pci_info(pdev, "limiting MRRS to 256\n");
+ pcie_set_readrq(pdev, 256);
+ }
+ break;
+ }
+ }
+}
+DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, loongson_set_min_mrrs_quirk);
+#endif
+
static void loongson_mrrs_quirk(struct pci_dev *pdev)
{
- /*
- * Some Loongson PCIe ports have h/w limitations of maximum read
- * request size. They can't handle anything larger than this. So
- * force this limit on any devices attached under these ports.
- */
struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
bridge->no_inc_mrrs = 1;
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 94ba61fe1c4411..0452cbc362eef7 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -751,7 +751,7 @@ static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata)
if (!(features & VMD_FEAT_BIOS_PM_QUIRK))
return 0;
- pci_enable_link_state(pdev, PCIE_LINK_STATE_ALL);
+ pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_LTR);
if (!pos)
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 601129772b2d50..5b1f271c6034be 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -512,15 +512,12 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
if (pass && dev->subordinate) {
check_hotplug_bridge(slot, dev);
pcibios_resource_survey_bus(dev->subordinate);
- if (pci_is_root_bus(bus))
- __pci_bus_size_bridges(dev->subordinate, &add_list);
+ __pci_bus_size_bridges(dev->subordinate,
+ &add_list);
}
}
}
- if (pci_is_root_bus(bus))
- __pci_bus_assign_resources(bus, &add_list, NULL);
- else
- pci_assign_unassigned_bridge_resources(bus->self);
+ __pci_bus_assign_resources(bus, &add_list, NULL);
}
acpiphp_sanitize_bus(bus);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 50b04ae5c394e3..5dab531c8654d6 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -1041,7 +1041,7 @@ static struct pcie_link_state *pcie_aspm_get_link(struct pci_dev *pdev)
return bridge->link_state;
}
-static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
+static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool locked)
{
struct pcie_link_state *link = pcie_aspm_get_link(pdev);
@@ -1060,7 +1060,7 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
return -EPERM;
}
- if (sem)
+ if (!locked)
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
if (state & PCIE_LINK_STATE_L0S)
@@ -1082,7 +1082,7 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
link->clkpm_disable = 1;
pcie_set_clkpm(link, policy_to_clkpm_state(link));
mutex_unlock(&aspm_lock);
- if (sem)
+ if (!locked)
up_read(&pci_bus_sem);
return 0;
@@ -1090,7 +1090,9 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
{
- return __pci_disable_link_state(pdev, state, false);
+ lockdep_assert_held_read(&pci_bus_sem);
+
+ return __pci_disable_link_state(pdev, state, true);
}
EXPORT_SYMBOL(pci_disable_link_state_locked);
@@ -1105,21 +1107,11 @@ EXPORT_SYMBOL(pci_disable_link_state_locked);
*/
int pci_disable_link_state(struct pci_dev *pdev, int state)
{
- return __pci_disable_link_state(pdev, state, true);
+ return __pci_disable_link_state(pdev, state, false);
}
EXPORT_SYMBOL(pci_disable_link_state);
-/**
- * pci_enable_link_state - Clear and set the default device link state so that
- * the link may be allowed to enter the specified states. Note that if the
- * BIOS didn't grant ASPM control to the OS, this does nothing because we can't
- * touch the LNKCTL register. Also note that this does not enable states
- * disabled by pci_disable_link_state(). Return 0 or a negative errno.
- *
- * @pdev: PCI device
- * @state: Mask of ASPM link states to enable
- */
-int pci_enable_link_state(struct pci_dev *pdev, int state)
+static int __pci_enable_link_state(struct pci_dev *pdev, int state, bool locked)
{
struct pcie_link_state *link = pcie_aspm_get_link(pdev);
@@ -1136,7 +1128,8 @@ int pci_enable_link_state(struct pci_dev *pdev, int state)
return -EPERM;
}
- down_read(&pci_bus_sem);
+ if (!locked)
+ down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
link->aspm_default = 0;
if (state & PCIE_LINK_STATE_L0S)
@@ -1157,12 +1150,48 @@ int pci_enable_link_state(struct pci_dev *pdev, int state)
link->clkpm_default = (state & PCIE_LINK_STATE_CLKPM) ? 1 : 0;
pcie_set_clkpm(link, policy_to_clkpm_state(link));
mutex_unlock(&aspm_lock);
- up_read(&pci_bus_sem);
+ if (!locked)
+ up_read(&pci_bus_sem);
return 0;
}
+
+/**
+ * pci_enable_link_state - Clear and set the default device link state so that
+ * the link may be allowed to enter the specified states. Note that if the
+ * BIOS didn't grant ASPM control to the OS, this does nothing because we can't
+ * touch the LNKCTL register. Also note that this does not enable states
+ * disabled by pci_disable_link_state(). Return 0 or a negative errno.
+ *
+ * @pdev: PCI device
+ * @state: Mask of ASPM link states to enable
+ */
+int pci_enable_link_state(struct pci_dev *pdev, int state)
+{
+ return __pci_enable_link_state(pdev, state, false);
+}
EXPORT_SYMBOL(pci_enable_link_state);
+/**
+ * pci_enable_link_state_locked - Clear and set the default device link state
+ * so that the link may be allowed to enter the specified states. Note that if
+ * the BIOS didn't grant ASPM control to the OS, this does nothing because we
+ * can't touch the LNKCTL register. Also note that this does not enable states
+ * disabled by pci_disable_link_state(). Return 0 or a negative errno.
+ *
+ * @pdev: PCI device
+ * @state: Mask of ASPM link states to enable
+ *
+ * Context: Caller holds pci_bus_sem read lock.
+ */
+int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
+{
+ lockdep_assert_held_read(&pci_bus_sem);
+
+ return __pci_enable_link_state(pdev, state, true);
+}
+EXPORT_SYMBOL(pci_enable_link_state_locked);
+
static int pcie_aspm_set_policy(const char *val,
const struct kernel_param *kp)
{
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index 014010d0358823..847b0dc41293d2 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -1816,7 +1816,7 @@ static int arm_cmn_event_add(struct perf_event *event, int flags)
idx = 0;
while (cmn->dtc[j].counters[idx])
if (++idx == CMN_DT_NUM_COUNTERS)
- goto free_dtms;
+ return -ENOSPC;
}
hw->dtc_idx[j] = idx;
}
diff --git a/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c b/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c
index f021ec5a70e5c3..553725e1269c9d 100644
--- a/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c
+++ b/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c
@@ -100,7 +100,7 @@ static void mtk_mipi_tx_pll_disable(struct clk_hw *hw)
static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
- return clamp_val(rate, 50000000, 1600000000);
+ return clamp_val(rate, 125000000, 1600000000);
}
static const struct clk_ops mtk_mipi_tx_pll_ops = {
diff --git a/drivers/phy/sunplus/phy-sunplus-usb2.c b/drivers/phy/sunplus/phy-sunplus-usb2.c
index 0efe74ac9c6afc..637a5fbae6d9a9 100644
--- a/drivers/phy/sunplus/phy-sunplus-usb2.c
+++ b/drivers/phy/sunplus/phy-sunplus-usb2.c
@@ -275,7 +275,7 @@ static int sp_usb_phy_probe(struct platform_device *pdev)
phy = devm_phy_create(&pdev->dev, NULL, &sp_uphy_ops);
if (IS_ERR(phy)) {
- ret = -PTR_ERR(phy);
+ ret = PTR_ERR(phy);
return ret;
}
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
index 555b323f45da1e..bc847d3879f79c 100644
--- a/drivers/phy/ti/phy-gmii-sel.c
+++ b/drivers/phy/ti/phy-gmii-sel.c
@@ -64,6 +64,7 @@ struct phy_gmii_sel_priv {
u32 num_ports;
u32 reg_offset;
u32 qsgmii_main_ports;
+ bool no_offset;
};
static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode)
@@ -402,7 +403,8 @@ static int phy_gmii_sel_init_ports(struct phy_gmii_sel_priv *priv)
priv->num_ports = size / sizeof(u32);
if (!priv->num_ports)
return -EINVAL;
- priv->reg_offset = __be32_to_cpu(*offset);
+ if (!priv->no_offset)
+ priv->reg_offset = __be32_to_cpu(*offset);
}
if_phys = devm_kcalloc(dev, priv->num_ports,
@@ -471,6 +473,7 @@ static int phy_gmii_sel_probe(struct platform_device *pdev)
dev_err(dev, "Failed to get syscon %d\n", ret);
return ret;
}
+ priv->no_offset = true;
}
ret = phy_gmii_sel_init_ports(priv);
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index fb58acb843d90a..03ecb3d1aaf60d 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -923,6 +923,15 @@ static int amd_gpio_suspend(struct device *dev)
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin * 4) & ~PIN_IRQ_PENDING;
+
+ /* mask any interrupts not intended to be a wake source */
+ if (!(gpio_dev->saved_regs[i] & WAKE_SOURCE)) {
+ writel(gpio_dev->saved_regs[i] & ~BIT(INTERRUPT_MASK_OFF),
+ gpio_dev->base + pin * 4);
+ pm_pr_dbg("Disabling GPIO #%d interrupt for suspend.\n",
+ pin);
+ }
+
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h
index 34c5c3e71fb261..cf59089f277639 100644
--- a/drivers/pinctrl/pinctrl-amd.h
+++ b/drivers/pinctrl/pinctrl-amd.h
@@ -80,6 +80,11 @@
#define FUNCTION_MASK GENMASK(1, 0)
#define FUNCTION_INVALID GENMASK(7, 0)
+#define WAKE_SOURCE (BIT(WAKE_CNTRL_OFF_S0I3) | \
+ BIT(WAKE_CNTRL_OFF_S3) | \
+ BIT(WAKE_CNTRL_OFF_S4) | \
+ BIT(WAKECNTRL_Z_OFF))
+
struct amd_function {
const char *name;
const char * const groups[NSELECTS];
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index 383309e533c3d7..a27c01fcbb47ed 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -1068,6 +1068,13 @@ static const struct of_device_id atmel_pctrl_of_match[] = {
}
};
+/*
+ * This lock class allows to tell lockdep that parent IRQ and children IRQ do
+ * not share the same class so it does not raise false positive
+ */
+static struct lock_class_key atmel_lock_key;
+static struct lock_class_key atmel_request_key;
+
static int atmel_pinctrl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1214,6 +1221,7 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
irq_set_chip_and_handler(irq, &atmel_gpio_irq_chip,
handle_simple_irq);
irq_set_chip_data(irq, atmel_pioctrl);
+ irq_set_lockdep_class(irq, &atmel_lock_key, &atmel_request_key);
dev_dbg(dev,
"atmel gpio irq domain: hwirq: %d, linux irq: %d\n",
i, irq);
diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c
index 4ccfa99ed93a10..fe9545c630a2fa 100644
--- a/drivers/pinctrl/pinctrl-cy8c95x0.c
+++ b/drivers/pinctrl/pinctrl-cy8c95x0.c
@@ -308,6 +308,9 @@ static const char * const cy8c95x0_groups[] = {
"gp77",
};
+static int cy8c95x0_pinmux_direction(struct cy8c95x0_pinctrl *chip,
+ unsigned int pin, bool input);
+
static inline u8 cypress_get_port(struct cy8c95x0_pinctrl *chip, unsigned int pin)
{
/* Account for GPORT2 which only has 4 bits */
@@ -712,6 +715,8 @@ static int cy8c95x0_gpio_get_pincfg(struct cy8c95x0_pinctrl *chip,
ret = regmap_read(chip->regmap, reg, &reg_val);
if (reg_val & bit)
arg = 1;
+ if (param == PIN_CONFIG_OUTPUT_ENABLE)
+ arg = !arg;
*config = pinconf_to_config_packed(param, (u16)arg);
out:
@@ -727,6 +732,7 @@ static int cy8c95x0_gpio_set_pincfg(struct cy8c95x0_pinctrl *chip,
u8 port = cypress_get_port(chip, off);
u8 bit = cypress_get_pin_mask(chip, off);
unsigned long param = pinconf_to_config_param(config);
+ unsigned long arg = pinconf_to_config_argument(config);
unsigned int reg;
int ret;
@@ -765,6 +771,12 @@ static int cy8c95x0_gpio_set_pincfg(struct cy8c95x0_pinctrl *chip,
case PIN_CONFIG_MODE_PWM:
reg = CY8C95X0_PWMSEL;
break;
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ ret = cy8c95x0_pinmux_direction(chip, off, !arg);
+ goto out;
+ case PIN_CONFIG_INPUT_ENABLE:
+ ret = cy8c95x0_pinmux_direction(chip, off, arg);
+ goto out;
default:
ret = -ENOTSUPP;
goto out;
@@ -822,7 +834,7 @@ static int cy8c95x0_setup_gpiochip(struct cy8c95x0_pinctrl *chip)
gc->get_direction = cy8c95x0_gpio_get_direction;
gc->get_multiple = cy8c95x0_gpio_get_multiple;
gc->set_multiple = cy8c95x0_gpio_set_multiple;
- gc->set_config = gpiochip_generic_config,
+ gc->set_config = gpiochip_generic_config;
gc->can_sleep = true;
gc->add_pin_ranges = cy8c95x0_add_pin_ranges;
diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
index ea70b8c61679f0..b29b0ab9892b8e 100644
--- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
+++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
@@ -492,7 +492,7 @@ static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev,
nmaps = 0;
ngroups = 0;
- for_each_child_of_node(np, child) {
+ for_each_available_child_of_node(np, child) {
int npinmux = of_property_count_u32_elems(child, "pinmux");
int npins = of_property_count_u32_elems(child, "pins");
@@ -527,7 +527,7 @@ static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev,
nmaps = 0;
ngroups = 0;
mutex_lock(&sfp->mutex);
- for_each_child_of_node(np, child) {
+ for_each_available_child_of_node(np, child) {
int npins;
int i;
diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
index 9d71e8c1331020..6de11a4057346d 100644
--- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
+++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
@@ -135,7 +135,7 @@ static int jh7110_dt_node_to_map(struct pinctrl_dev *pctldev,
int ret;
ngroups = 0;
- for_each_child_of_node(np, child)
+ for_each_available_child_of_node(np, child)
ngroups += 1;
nmaps = 2 * ngroups;
@@ -150,7 +150,7 @@ static int jh7110_dt_node_to_map(struct pinctrl_dev *pctldev,
nmaps = 0;
ngroups = 0;
mutex_lock(&sfp->mutex);
- for_each_child_of_node(np, child) {
+ for_each_available_child_of_node(np, child) {
int npins = of_property_count_u32_elems(child, "pinmux");
int *pins;
u32 *pinmux;
diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c
index 6bbffb081053e5..b456370166b6bb 100644
--- a/drivers/platform/x86/amd/pmc/pmc-quirks.c
+++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c
@@ -16,12 +16,17 @@
struct quirk_entry {
u32 s2idle_bug_mmio;
+ bool spurious_8042;
};
static struct quirk_entry quirk_s2idle_bug = {
.s2idle_bug_mmio = 0xfed80380,
};
+static struct quirk_entry quirk_spurious_8042 = {
+ .spurious_8042 = true,
+};
+
static const struct dmi_system_id fwbug_list[] = {
{
.ident = "L14 Gen2 AMD",
@@ -193,6 +198,16 @@ static const struct dmi_system_id fwbug_list[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "HP Laptop 15s-eq2xxx"),
}
},
+ /* https://community.frame.work/t/tracking-framework-amd-ryzen-7040-series-lid-wakeup-behavior-feedback/39128 */
+ {
+ .ident = "Framework Laptop 13 (Phoenix)",
+ .driver_data = &quirk_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Laptop 13 (AMD Ryzen 7040Series)"),
+ DMI_MATCH(DMI_BIOS_VERSION, "03.03"),
+ }
+ },
{}
};
@@ -235,6 +250,9 @@ void amd_pmc_quirks_init(struct amd_pmc_dev *dev)
{
const struct dmi_system_id *dmi_id;
+ if (dev->cpu_id == AMD_CPU_ID_CZN)
+ dev->disable_8042_wakeup = true;
+
dmi_id = dmi_first_match(fwbug_list);
if (!dmi_id)
return;
@@ -242,4 +260,6 @@ void amd_pmc_quirks_init(struct amd_pmc_dev *dev)
if (dev->quirks->s2idle_bug_mmio)
pr_info("Using s2idle quirk to avoid %s platform firmware bug\n",
dmi_id->ident);
+ if (dev->quirks->spurious_8042)
+ dev->disable_8042_wakeup = true;
}
diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
index c3104714b48027..864c8cc2f8a3c5 100644
--- a/drivers/platform/x86/amd/pmc/pmc.c
+++ b/drivers/platform/x86/amd/pmc/pmc.c
@@ -91,16 +91,6 @@
#define SMU_MSG_LOG_RESET 0x07
#define SMU_MSG_LOG_DUMP_DATA 0x08
#define SMU_MSG_GET_SUP_CONSTRAINTS 0x09
-/* List of supported CPU ids */
-#define AMD_CPU_ID_RV 0x15D0
-#define AMD_CPU_ID_RN 0x1630
-#define AMD_CPU_ID_PCO AMD_CPU_ID_RV
-#define AMD_CPU_ID_CZN AMD_CPU_ID_RN
-#define AMD_CPU_ID_YC 0x14B5
-#define AMD_CPU_ID_CB 0x14D8
-#define AMD_CPU_ID_PS 0x14E8
-#define AMD_CPU_ID_SP 0x14A4
-#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507
#define PMC_MSG_DELAY_MIN_US 50
#define RESPONSE_REGISTER_LOOP_MAX 20000
@@ -766,19 +756,22 @@ static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
return -EINVAL;
}
-static int amd_pmc_czn_wa_irq1(struct amd_pmc_dev *pdev)
+static int amd_pmc_wa_irq1(struct amd_pmc_dev *pdev)
{
struct device *d;
int rc;
- if (!pdev->major) {
- rc = amd_pmc_get_smu_version(pdev);
- if (rc)
- return rc;
- }
+ /* cezanne platform firmware has a fix in 64.66.0 */
+ if (pdev->cpu_id == AMD_CPU_ID_CZN) {
+ if (!pdev->major) {
+ rc = amd_pmc_get_smu_version(pdev);
+ if (rc)
+ return rc;
+ }
- if (pdev->major > 64 || (pdev->major == 64 && pdev->minor > 65))
- return 0;
+ if (pdev->major > 64 || (pdev->major == 64 && pdev->minor > 65))
+ return 0;
+ }
d = bus_find_device_by_name(&serio_bus, NULL, "serio0");
if (!d)
@@ -937,8 +930,8 @@ static int amd_pmc_suspend_handler(struct device *dev)
{
struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
- if (pdev->cpu_id == AMD_CPU_ID_CZN && !disable_workarounds) {
- int rc = amd_pmc_czn_wa_irq1(pdev);
+ if (pdev->disable_8042_wakeup && !disable_workarounds) {
+ int rc = amd_pmc_wa_irq1(pdev);
if (rc) {
dev_err(pdev->dev, "failed to adjust keyboard wakeup: %d\n", rc);
diff --git a/drivers/platform/x86/amd/pmc/pmc.h b/drivers/platform/x86/amd/pmc/pmc.h
index c27bd6a5642f48..b4794f118739f6 100644
--- a/drivers/platform/x86/amd/pmc/pmc.h
+++ b/drivers/platform/x86/amd/pmc/pmc.h
@@ -36,9 +36,21 @@ struct amd_pmc_dev {
struct mutex lock; /* generic mutex lock */
struct dentry *dbgfs_dir;
struct quirk_entry *quirks;
+ bool disable_8042_wakeup;
};
void amd_pmc_process_restore_quirks(struct amd_pmc_dev *dev);
void amd_pmc_quirks_init(struct amd_pmc_dev *dev);
+/* List of supported CPU ids */
+#define AMD_CPU_ID_RV 0x15D0
+#define AMD_CPU_ID_RN 0x1630
+#define AMD_CPU_ID_PCO AMD_CPU_ID_RV
+#define AMD_CPU_ID_CZN AMD_CPU_ID_RN
+#define AMD_CPU_ID_YC 0x14B5
+#define AMD_CPU_ID_CB 0x14D8
+#define AMD_CPU_ID_PS 0x14E8
+#define AMD_CPU_ID_SP 0x14A4
+#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507
+
#endif /* PMC_H */
diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
index 84c175b9721a0b..e95d3011b9997d 100644
--- a/drivers/platform/x86/intel/pmc/core.c
+++ b/drivers/platform/x86/intel/pmc/core.c
@@ -472,7 +472,7 @@ int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value)
* is based on the contiguous indexes from ltr_show output.
* pmc index and ltr index needs to be calculated from it.
*/
- for (pmc_index = 0; pmc_index < ARRAY_SIZE(pmcdev->pmcs) && ltr_index > 0; pmc_index++) {
+ for (pmc_index = 0; pmc_index < ARRAY_SIZE(pmcdev->pmcs) && ltr_index >= 0; pmc_index++) {
pmc = pmcdev->pmcs[pmc_index];
if (!pmc)
diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c
index 6fa1735ad7a49a..210b0a81b7ecbe 100644
--- a/drivers/platform/x86/intel/vbtn.c
+++ b/drivers/platform/x86/intel/vbtn.c
@@ -73,10 +73,10 @@ struct intel_vbtn_priv {
bool wakeup_mode;
};
-static void detect_tablet_mode(struct platform_device *device)
+static void detect_tablet_mode(struct device *dev)
{
- struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
- acpi_handle handle = ACPI_HANDLE(&device->dev);
+ struct intel_vbtn_priv *priv = dev_get_drvdata(dev);
+ acpi_handle handle = ACPI_HANDLE(dev);
unsigned long long vgbs;
acpi_status status;
int m;
@@ -89,6 +89,8 @@ static void detect_tablet_mode(struct platform_device *device)
input_report_switch(priv->switches_dev, SW_TABLET_MODE, m);
m = (vgbs & VGBS_DOCK_MODE_FLAG) ? 1 : 0;
input_report_switch(priv->switches_dev, SW_DOCK, m);
+
+ input_sync(priv->switches_dev);
}
/*
@@ -134,7 +136,7 @@ static int intel_vbtn_input_setup(struct platform_device *device)
priv->switches_dev->id.bustype = BUS_HOST;
if (priv->has_switches) {
- detect_tablet_mode(device);
+ detect_tablet_mode(&device->dev);
ret = input_register_device(priv->switches_dev);
if (ret)
@@ -198,6 +200,9 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
autorelease = val && (!ke_rel || ke_rel->type == KE_IGNORE);
sparse_keymap_report_event(input_dev, event, val, autorelease);
+
+ /* Some devices need this to report further events */
+ acpi_evaluate_object(handle, "VBDL", NULL, NULL);
}
/*
@@ -352,7 +357,13 @@ static void intel_vbtn_pm_complete(struct device *dev)
static int intel_vbtn_pm_resume(struct device *dev)
{
+ struct intel_vbtn_priv *priv = dev_get_drvdata(dev);
+
intel_vbtn_pm_complete(dev);
+
+ if (priv->has_switches)
+ detect_tablet_mode(dev);
+
return 0;
}
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index 4dfdbfca684172..c66808601fdd08 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -590,6 +590,8 @@ static void ips_disable_gpu_turbo(struct ips_driver *ips)
* @ips: IPS driver struct
*
* Check whether the MCP is over its thermal or power budget.
+ *
+ * Returns: %true if the temp or power has exceeded its maximum, else %false
*/
static bool mcp_exceeded(struct ips_driver *ips)
{
@@ -619,6 +621,8 @@ static bool mcp_exceeded(struct ips_driver *ips)
* @cpu: CPU number to check
*
* Check a given CPU's average temp or power is over its limit.
+ *
+ * Returns: %true if the temp or power has exceeded its maximum, else %false
*/
static bool cpu_exceeded(struct ips_driver *ips, int cpu)
{
@@ -645,6 +649,8 @@ static bool cpu_exceeded(struct ips_driver *ips, int cpu)
* @ips: IPS driver struct
*
* Check the MCH temp & power against their maximums.
+ *
+ * Returns: %true if the temp or power has exceeded its maximum, else %false
*/
static bool mch_exceeded(struct ips_driver *ips)
{
@@ -742,12 +748,13 @@ static void update_turbo_limits(struct ips_driver *ips)
* - down (at TDP limit)
* - adjust both CPU and GPU down if possible
*
- cpu+ gpu+ cpu+gpu- cpu-gpu+ cpu-gpu-
-cpu < gpu < cpu+gpu+ cpu+ gpu+ nothing
-cpu < gpu >= cpu+gpu-(mcp<) cpu+gpu-(mcp<) gpu- gpu-
-cpu >= gpu < cpu-gpu+(mcp<) cpu- cpu-gpu+(mcp<) cpu-
-cpu >= gpu >= cpu-gpu- cpu-gpu- cpu-gpu- cpu-gpu-
+ * |cpu+ gpu+ cpu+gpu- cpu-gpu+ cpu-gpu-
+ * cpu < gpu < |cpu+gpu+ cpu+ gpu+ nothing
+ * cpu < gpu >= |cpu+gpu-(mcp<) cpu+gpu-(mcp<) gpu- gpu-
+ * cpu >= gpu < |cpu-gpu+(mcp<) cpu- cpu-gpu+(mcp<) cpu-
+ * cpu >= gpu >=|cpu-gpu- cpu-gpu- cpu-gpu- cpu-gpu-
*
+ * Returns: %0
*/
static int ips_adjust(void *data)
{
@@ -935,11 +942,13 @@ static void monitor_timeout(struct timer_list *t)
* @data: ips driver structure
*
* This is the main function for the IPS driver. It monitors power and
- * tempurature in the MCP and adjusts CPU and GPU power clams accordingly.
+ * temperature in the MCP and adjusts CPU and GPU power clamps accordingly.
*
- * We keep a 5s moving average of power consumption and tempurature. Using
+ * We keep a 5s moving average of power consumption and temperature. Using
* that data, along with CPU vs GPU preference, we adjust the power clamps
* up or down.
+ *
+ * Returns: %0 on success or -errno on error
*/
static int ips_monitor(void *data)
{
@@ -1146,6 +1155,8 @@ static void dump_thermal_info(struct ips_driver *ips)
* Handle temperature limit trigger events, generally by lowering the clamps.
* If we're at a critical limit, we clamp back to the lowest possible value
* to prevent emergency shutdown.
+ *
+ * Returns: IRQ_NONE or IRQ_HANDLED
*/
static irqreturn_t ips_irq_handler(int irq, void *arg)
{
@@ -1293,9 +1304,12 @@ static void ips_debugfs_init(struct ips_driver *ips)
/**
* ips_detect_cpu - detect whether CPU supports IPS
+ * @ips: IPS driver struct
*
* Walk our list and see if we're on a supported CPU. If we find one,
* return the limits for it.
+ *
+ * Returns: the &ips_mcp_limits struct that matches the boot CPU or %NULL
*/
static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
{
@@ -1352,6 +1366,8 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
* monitor and control graphics turbo mode. If we can find them, we can
* enable graphics turbo, otherwise we must disable it to avoid exceeding
* thermal and power limits in the MCP.
+ *
+ * Returns: %true if the required symbols are found, else %false
*/
static bool ips_get_i915_syms(struct ips_driver *ips)
{
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index d0b5fd4137bcdc..c4895e9bc7148a 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -512,10 +512,10 @@ struct tpacpi_quirk {
* Iterates over a quirks list until one is found that matches the
* ThinkPad's vendor, BIOS and EC model.
*
- * Returns 0 if nothing matches, otherwise returns the quirks field of
+ * Returns: %0 if nothing matches, otherwise returns the quirks field of
* the matching &struct tpacpi_quirk entry.
*
- * The match criteria is: vendor, ec and bios much match.
+ * The match criteria is: vendor, ec and bios must match.
*/
static unsigned long __init tpacpi_check_quirks(
const struct tpacpi_quirk *qlist,
@@ -7948,8 +7948,19 @@ static struct ibm_struct volume_driver_data = {
* TPACPI_FAN_WR_TPEC is also available and should be used to
* command the fan. The X31/X40/X41 seems to have 8 fan levels,
* but the ACPI tables just mention level 7.
+ *
+ * TPACPI_FAN_RD_TPEC_NS:
+ * This mode is used for a few ThinkPads (L13 Yoga Gen2, X13 Yoga Gen2 etc.)
+ * that are using non-standard EC locations for reporting fan speeds.
+ * Currently these platforms only provide fan rpm reporting.
+ *
*/
+#define FAN_RPM_CAL_CONST 491520 /* FAN RPM calculation offset for some non-standard ECFW */
+
+#define FAN_NS_CTRL_STATUS BIT(2) /* Bit which determines control is enabled or not */
+#define FAN_NS_CTRL BIT(4) /* Bit which determines control is by host or EC */
+
enum { /* Fan control constants */
fan_status_offset = 0x2f, /* EC register 0x2f */
fan_rpm_offset = 0x84, /* EC register 0x84: LSB, 0x85 MSB (RPM)
@@ -7957,6 +7968,11 @@ enum { /* Fan control constants */
fan_select_offset = 0x31, /* EC register 0x31 (Firmware 7M)
bit 0 selects which fan is active */
+ fan_status_offset_ns = 0x93, /* Special status/control offset for non-standard EC Fan1 */
+ fan2_status_offset_ns = 0x96, /* Special status/control offset for non-standard EC Fan2 */
+ fan_rpm_status_ns = 0x95, /* Special offset for Fan1 RPM status for non-standard EC */
+ fan2_rpm_status_ns = 0x98, /* Special offset for Fan2 RPM status for non-standard EC */
+
TP_EC_FAN_FULLSPEED = 0x40, /* EC fan mode: full speed */
TP_EC_FAN_AUTO = 0x80, /* EC fan mode: auto fan control */
@@ -7967,6 +7983,7 @@ enum fan_status_access_mode {
TPACPI_FAN_NONE = 0, /* No fan status or control */
TPACPI_FAN_RD_ACPI_GFAN, /* Use ACPI GFAN */
TPACPI_FAN_RD_TPEC, /* Use ACPI EC regs 0x2f, 0x84-0x85 */
+ TPACPI_FAN_RD_TPEC_NS, /* Use non-standard ACPI EC regs (eg: L13 Yoga gen2 etc.) */
};
enum fan_control_access_mode {
@@ -7994,6 +8011,8 @@ static u8 fan_control_desired_level;
static u8 fan_control_resume_level;
static int fan_watchdog_maxinterval;
+static bool fan_with_ns_addr;
+
static struct mutex fan_mutex;
static void fan_watchdog_fire(struct work_struct *ignored);
@@ -8123,6 +8142,15 @@ static int fan_get_status(u8 *status)
}
break;
+ case TPACPI_FAN_RD_TPEC_NS:
+ /* Default mode is AUTO which means controlled by EC */
+ if (!acpi_ec_read(fan_status_offset_ns, &s))
+ return -EIO;
+
+ if (status)
+ *status = s;
+
+ break;
default:
return -ENXIO;
@@ -8139,7 +8167,8 @@ static int fan_get_status_safe(u8 *status)
if (mutex_lock_killable(&fan_mutex))
return -ERESTARTSYS;
rc = fan_get_status(&s);
- if (!rc)
+ /* NS EC doesn't have register with level settings */
+ if (!rc && !fan_with_ns_addr)
fan_update_desired_level(s);
mutex_unlock(&fan_mutex);
@@ -8166,7 +8195,13 @@ static int fan_get_speed(unsigned int *speed)
if (likely(speed))
*speed = (hi << 8) | lo;
+ break;
+ case TPACPI_FAN_RD_TPEC_NS:
+ if (!acpi_ec_read(fan_rpm_status_ns, &lo))
+ return -EIO;
+ if (speed)
+ *speed = lo ? FAN_RPM_CAL_CONST / lo : 0;
break;
default:
@@ -8178,7 +8213,7 @@ static int fan_get_speed(unsigned int *speed)
static int fan2_get_speed(unsigned int *speed)
{
- u8 hi, lo;
+ u8 hi, lo, status;
bool rc;
switch (fan_status_access_mode) {
@@ -8194,7 +8229,21 @@ static int fan2_get_speed(unsigned int *speed)
if (likely(speed))
*speed = (hi << 8) | lo;
+ break;
+ case TPACPI_FAN_RD_TPEC_NS:
+ rc = !acpi_ec_read(fan2_status_offset_ns, &status);
+ if (rc)
+ return -EIO;
+ if (!(status & FAN_NS_CTRL_STATUS)) {
+ pr_info("secondary fan control not supported\n");
+ return -EIO;
+ }
+ rc = !acpi_ec_read(fan2_rpm_status_ns, &lo);
+ if (rc)
+ return -EIO;
+ if (speed)
+ *speed = lo ? FAN_RPM_CAL_CONST / lo : 0;
break;
default:
@@ -8697,6 +8746,7 @@ static const struct attribute_group fan_driver_attr_group = {
#define TPACPI_FAN_2FAN 0x0002 /* EC 0x31 bit 0 selects fan2 */
#define TPACPI_FAN_2CTL 0x0004 /* selects fan2 control */
#define TPACPI_FAN_NOFAN 0x0008 /* no fan available */
+#define TPACPI_FAN_NS 0x0010 /* For EC with non-Standard register addresses */
static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_QEC_IBM('1', 'Y', TPACPI_FAN_Q1),
@@ -8715,6 +8765,8 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (2nd gen) */
TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL), /* P15 (1st gen) / P15v (1st gen) */
TPACPI_Q_LNV3('N', '3', '7', TPACPI_FAN_2CTL), /* T15g (2nd gen) */
+ TPACPI_Q_LNV3('R', '1', 'F', TPACPI_FAN_NS), /* L13 Yoga Gen 2 */
+ TPACPI_Q_LNV3('N', '2', 'U', TPACPI_FAN_NS), /* X13 Yoga Gen 2*/
TPACPI_Q_LNV3('N', '1', 'O', TPACPI_FAN_NOFAN), /* X1 Tablet (2nd gen) */
};
@@ -8749,18 +8801,27 @@ static int __init fan_init(struct ibm_init_struct *iibm)
return -ENODEV;
}
+ if (quirks & TPACPI_FAN_NS) {
+ pr_info("ECFW with non-standard fan reg control found\n");
+ fan_with_ns_addr = 1;
+ /* Fan ctrl support from host is undefined for now */
+ tp_features.fan_ctrl_status_undef = 1;
+ }
+
if (gfan_handle) {
/* 570, 600e/x, 770e, 770x */
fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN;
} else {
/* all other ThinkPads: note that even old-style
* ThinkPad ECs supports the fan control register */
- if (likely(acpi_ec_read(fan_status_offset,
- &fan_control_initial_status))) {
+ if (fan_with_ns_addr ||
+ likely(acpi_ec_read(fan_status_offset, &fan_control_initial_status))) {
int res;
unsigned int speed;
- fan_status_access_mode = TPACPI_FAN_RD_TPEC;
+ fan_status_access_mode = fan_with_ns_addr ?
+ TPACPI_FAN_RD_TPEC_NS : TPACPI_FAN_RD_TPEC;
+
if (quirks & TPACPI_FAN_Q1)
fan_quirk1_setup();
/* Try and probe the 2nd fan */
@@ -8769,7 +8830,8 @@ static int __init fan_init(struct ibm_init_struct *iibm)
if (res >= 0 && speed != FAN_NOT_PRESENT) {
/* It responded - so let's assume it's there */
tp_features.second_fan = 1;
- tp_features.second_fan_ctl = 1;
+ /* fan control not currently available for ns ECFW */
+ tp_features.second_fan_ctl = !fan_with_ns_addr;
pr_info("secondary fan control detected & enabled\n");
} else {
/* Fan not auto-detected */
@@ -8944,6 +9006,7 @@ static int fan_read(struct seq_file *m)
str_enabled_disabled(status), status);
break;
+ case TPACPI_FAN_RD_TPEC_NS:
case TPACPI_FAN_RD_TPEC:
/* all except 570, 600e/x, 770e, 770x */
rc = fan_get_status_safe(&status);
@@ -8958,13 +9021,22 @@ static int fan_read(struct seq_file *m)
seq_printf(m, "speed:\t\t%d\n", speed);
- if (status & TP_EC_FAN_FULLSPEED)
- /* Disengaged mode takes precedence */
- seq_printf(m, "level:\t\tdisengaged\n");
- else if (status & TP_EC_FAN_AUTO)
- seq_printf(m, "level:\t\tauto\n");
- else
- seq_printf(m, "level:\t\t%d\n", status);
+ if (fan_status_access_mode == TPACPI_FAN_RD_TPEC_NS) {
+ /*
+ * No full speed bit in NS EC
+ * EC Auto mode is set by default.
+ * No other levels settings available
+ */
+ seq_printf(m, "level:\t\t%s\n", status & FAN_NS_CTRL ? "unknown" : "auto");
+ } else {
+ if (status & TP_EC_FAN_FULLSPEED)
+ /* Disengaged mode takes precedence */
+ seq_printf(m, "level:\t\tdisengaged\n");
+ else if (status & TP_EC_FAN_AUTO)
+ seq_printf(m, "level:\t\tauto\n");
+ else
+ seq_printf(m, "level:\t\t%d\n", status);
+ }
break;
case TPACPI_FAN_NONE:
@@ -9303,7 +9375,7 @@ static struct tpacpi_battery_driver_data battery_info;
/* ACPI helpers/functions/probes */
-/**
+/*
* This evaluates a ACPI method call specific to the battery
* ACPI extension. The specifics are that an error is marked
* in the 32rd bit of the response, so we just check that here.
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index 7ece6a8e985855..4d5a78d3c085bc 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -807,6 +807,9 @@ static void __reset_control_put_internal(struct reset_control *rstc)
{
lockdep_assert_held(&reset_list_mutex);
+ if (IS_ERR_OR_NULL(rstc))
+ return;
+
kref_put(&rstc->refcnt, __reset_control_release);
}
@@ -1017,11 +1020,8 @@ EXPORT_SYMBOL_GPL(reset_control_put);
void reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs)
{
mutex_lock(&reset_list_mutex);
- while (num_rstcs--) {
- if (IS_ERR_OR_NULL(rstcs[num_rstcs].rstc))
- continue;
+ while (num_rstcs--)
__reset_control_put_internal(rstcs[num_rstcs].rstc);
- }
mutex_unlock(&reset_list_mutex);
}
EXPORT_SYMBOL_GPL(reset_control_bulk_put);
diff --git a/drivers/reset/hisilicon/hi6220_reset.c b/drivers/reset/hisilicon/hi6220_reset.c
index 8d1fce18ded78c..5c3267acd2b1c2 100644
--- a/drivers/reset/hisilicon/hi6220_reset.c
+++ b/drivers/reset/hisilicon/hi6220_reset.c
@@ -163,7 +163,7 @@ static int hi6220_reset_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
- type = (enum hi6220_reset_ctrl_type)of_device_get_match_data(dev);
+ type = (uintptr_t)of_device_get_match_data(dev);
regmap = syscon_node_to_regmap(np);
if (IS_ERR(regmap)) {
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 3a9cc8a4a23022..ade95e91b3c8db 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -17,6 +17,7 @@
#include <linux/blk-mq.h>
#include <linux/slab.h>
#include <linux/list.h>
+#include <linux/io.h>
#include <asm/eadm.h>
#include "scm_blk.h"
@@ -130,7 +131,7 @@ static void scm_request_done(struct scm_request *scmrq)
for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
msb = &scmrq->aob->msb[i];
- aidaw = msb->data_addr;
+ aidaw = (u64)phys_to_virt(msb->data_addr);
if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
IS_ALIGNED(aidaw, PAGE_SIZE))
@@ -195,12 +196,12 @@ static int scm_request_prepare(struct scm_request *scmrq)
msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE;
msb->flags |= MSB_FLAG_IDA;
- msb->data_addr = (u64) aidaw;
+ msb->data_addr = (u64)virt_to_phys(aidaw);
rq_for_each_segment(bv, req, iter) {
WARN_ON(bv.bv_offset);
msb->blk_count += bv.bv_len >> 12;
- aidaw->data_addr = (u64) page_address(bv.bv_page);
+ aidaw->data_addr = virt_to_phys(page_address(bv.bv_page));
aidaw++;
}
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 73b6ac0c01f549..7d5a155073c627 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1678,7 +1678,6 @@ struct aac_dev
u32 handle_pci_error;
bool init_reset;
u8 soft_reset_support;
- u8 use_map_queue;
};
#define aac_adapter_interrupt(dev) \
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 013a9a334972eb..25cee03d7f9737 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -223,12 +223,8 @@ int aac_fib_setup(struct aac_dev * dev)
struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
{
struct fib *fibptr;
- u32 blk_tag;
- int i;
- blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
- i = blk_mq_unique_tag_to_tag(blk_tag);
- fibptr = &dev->fibs[i];
+ fibptr = &dev->fibs[scsi_cmd_to_rq(scmd)->tag];
/*
* Null out fields that depend on being zero at the start of
* each I/O
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index c4a36c0be527cd..68f4dbcfff4925 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -19,7 +19,6 @@
#include <linux/compat.h>
#include <linux/blkdev.h>
-#include <linux/blk-mq-pci.h>
#include <linux/completion.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -505,15 +504,6 @@ common_config:
return 0;
}
-static void aac_map_queues(struct Scsi_Host *shost)
-{
- struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
-
- blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
- aac->pdev, 0);
- aac->use_map_queue = true;
-}
-
/**
* aac_change_queue_depth - alter queue depths
* @sdev: SCSI device we are considering
@@ -1498,7 +1488,6 @@ static const struct scsi_host_template aac_driver_template = {
.bios_param = aac_biosparm,
.shost_groups = aac_host_groups,
.slave_configure = aac_slave_configure,
- .map_queues = aac_map_queues,
.change_queue_depth = aac_change_queue_depth,
.sdev_groups = aac_dev_groups,
.eh_abort_handler = aac_eh_abort,
@@ -1786,8 +1775,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
shost->max_lun = AAC_MAX_LUN;
pci_set_drvdata(pdev, shost);
- shost->nr_hw_queues = aac->max_msix;
- shost->host_tagset = 1;
error = scsi_add_host(shost, &pdev->dev);
if (error)
@@ -1919,7 +1906,6 @@ static void aac_remove_one(struct pci_dev *pdev)
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
aac_cancel_rescan_worker(aac);
- aac->use_map_queue = false;
scsi_remove_host(shost);
__aac_shutdown(aac);
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 61949f3741886b..11ef58204e96f1 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -493,10 +493,6 @@ static int aac_src_deliver_message(struct fib *fib)
#endif
u16 vector_no;
- struct scsi_cmnd *scmd;
- u32 blk_tag;
- struct Scsi_Host *shost = dev->scsi_host_ptr;
- struct blk_mq_queue_map *qmap;
atomic_inc(&q->numpending);
@@ -509,25 +505,8 @@ static int aac_src_deliver_message(struct fib *fib)
if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)
&& dev->sa_firmware)
vector_no = aac_get_vector(dev);
- else {
- if (!fib->vector_no || !fib->callback_data) {
- if (shost && dev->use_map_queue) {
- qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
- vector_no = qmap->mq_map[raw_smp_processor_id()];
- }
- /*
- * We hardcode the vector_no for
- * reserved commands as a valid shost is
- * absent during the init
- */
- else
- vector_no = 0;
- } else {
- scmd = (struct scsi_cmnd *)fib->callback_data;
- blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
- vector_no = blk_mq_unique_tag_to_hwq(blk_tag);
- }
- }
+ else
+ vector_no = fib->vector_no;
if (native_hba) {
if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 05ddbb9bb7d8aa..451a58e0fd9691 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -429,7 +429,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
struct fcoe_ctlr *ctlr;
struct fcoe_rcv_info *fr;
struct fcoe_percpu_s *bg;
- struct sk_buff *tmp_skb;
interface = container_of(ptype, struct bnx2fc_interface,
fcoe_packet_type);
@@ -441,11 +440,9 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
goto err;
}
- tmp_skb = skb_share_check(skb, GFP_ATOMIC);
- if (!tmp_skb)
- goto err;
-
- skb = tmp_skb;
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ return -1;
if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n");
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index c67cdcdc3ba86d..1223d34c04da33 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1152,6 +1152,7 @@ retry:
scsi_log_send(scmd);
scmd->submitter = SUBMITTED_BY_SCSI_ERROR_HANDLER;
+ scmd->flags |= SCMD_LAST;
/*
* Lock sdev->state_mutex to avoid that scsi_device_quiesce() can
@@ -2459,6 +2460,7 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
scsi_init_command(dev, scmd);
scmd->submitter = SUBMITTED_BY_SCSI_RESET_IOCTL;
+ scmd->flags |= SCMD_LAST;
memset(&scmd->sdb, 0, sizeof(scmd->sdb));
scmd->cmd_len = 0;
diff --git a/drivers/soundwire/intel_ace2x.c b/drivers/soundwire/intel_ace2x.c
index 82672fcbc2aa27..8280baa3254b0b 100644
--- a/drivers/soundwire/intel_ace2x.c
+++ b/drivers/soundwire/intel_ace2x.c
@@ -23,8 +23,9 @@
static void intel_shim_vs_init(struct sdw_intel *sdw)
{
void __iomem *shim_vs = sdw->link_res->shim_vs;
- u16 act = 0;
+ u16 act;
+ act = intel_readw(shim_vs, SDW_SHIM2_INTEL_VS_ACTMCTL);
u16p_replace_bits(&act, 0x1, SDW_SHIM2_INTEL_VS_ACTMCTL_DOAIS);
act |= SDW_SHIM2_INTEL_VS_ACTMCTL_DACTQE;
act |= SDW_SHIM2_INTEL_VS_ACTMCTL_DODS;
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index 69719b335bcb1e..f048b3d55b2edc 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -742,14 +742,15 @@ error_1:
* sdw_ml_sync_bank_switch: Multilink register bank switch
*
* @bus: SDW bus instance
+ * @multi_link: whether this is a multi-link stream with hardware-based sync
*
* Caller function should free the buffers on error
*/
-static int sdw_ml_sync_bank_switch(struct sdw_bus *bus)
+static int sdw_ml_sync_bank_switch(struct sdw_bus *bus, bool multi_link)
{
unsigned long time_left;
- if (!bus->multi_link)
+ if (!multi_link)
return 0;
/* Wait for completion of transfer */
@@ -847,7 +848,7 @@ static int do_bank_switch(struct sdw_stream_runtime *stream)
bus->bank_switch_timeout = DEFAULT_BANK_SWITCH_TIMEOUT;
/* Check if bank switch was successful */
- ret = sdw_ml_sync_bank_switch(bus);
+ ret = sdw_ml_sync_bank_switch(bus, multi_link);
if (ret < 0) {
dev_err(bus->dev,
"multi link bank switch failed: %d\n", ret);
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 6aa8adbe4170cd..bad34998454a80 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -22,6 +22,7 @@
#include <linux/gpio/consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
+#include <linux/iopoll.h>
#include <trace/events/spi.h>
/* SPI register offsets */
@@ -233,9 +234,6 @@
*/
#define DMA_MIN_BYTES 16
-#define SPI_DMA_MIN_TIMEOUT (msecs_to_jiffies(1000))
-#define SPI_DMA_TIMEOUT_PER_10K (msecs_to_jiffies(4))
-
#define AUTOSUSPEND_TIMEOUT 2000
struct atmel_spi_caps {
@@ -279,6 +277,7 @@ struct atmel_spi {
bool keep_cs;
u32 fifo_size;
+ bool last_polarity;
u8 native_cs_free;
u8 native_cs_for_gpio;
};
@@ -292,6 +291,22 @@ struct atmel_spi_device {
#define INVALID_DMA_ADDRESS 0xffffffff
/*
+ * This frequency can be anything supported by the controller, but to avoid
+ * unnecessary delay, the highest possible frequency is chosen.
+ *
+ * This frequency is the highest possible which is not interfering with other
+ * chip select registers (see Note for Serial Clock Bit Rate configuration in
+ * Atmel-11121F-ATARM-SAMA5D3-Series-Datasheet_02-Feb-16, page 1283)
+ */
+#define DUMMY_MSG_FREQUENCY 0x02
+/*
+ * 8 bits is the minimum data the controller is capable of sending.
+ *
+ * This message can be anything as it should not be treated by any SPI device.
+ */
+#define DUMMY_MSG 0xAA
+
+/*
* Version 2 of the SPI controller has
* - CR.LASTXFER
* - SPI_MR.DIV32 may become FDIV or must-be-zero (here: always zero)
@@ -305,6 +320,43 @@ static bool atmel_spi_is_v2(struct atmel_spi *as)
}
/*
+ * Send a dummy message.
+ *
+ * This is sometimes needed when using a CS GPIO to force clock transition when
+ * switching between devices with different polarities.
+ */
+static void atmel_spi_send_dummy(struct atmel_spi *as, struct spi_device *spi, int chip_select)
+{
+ u32 status;
+ u32 csr;
+
+ /*
+ * Set a clock frequency to allow sending message on SPI bus.
+ * The frequency here can be anything, but is needed for
+ * the controller to send the data.
+ */
+ csr = spi_readl(as, CSR0 + 4 * chip_select);
+ csr = SPI_BFINS(SCBR, DUMMY_MSG_FREQUENCY, csr);
+ spi_writel(as, CSR0 + 4 * chip_select, csr);
+
+ /*
+ * Read all data coming from SPI bus, needed to be able to send
+ * the message.
+ */
+ spi_readl(as, RDR);
+ while (spi_readl(as, SR) & SPI_BIT(RDRF)) {
+ spi_readl(as, RDR);
+ cpu_relax();
+ }
+
+ spi_writel(as, TDR, DUMMY_MSG);
+
+ readl_poll_timeout_atomic(as->regs + SPI_SR, status,
+ (status & SPI_BIT(TXEMPTY)), 1, 1000);
+}
+
+
+/*
* Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby
* they assume that spi slave device state will not change on deselect, so
* that automagic deselection is OK. ("NPCSx rises if no data is to be
@@ -320,11 +372,17 @@ static bool atmel_spi_is_v2(struct atmel_spi *as)
* Master on Chip Select 0.") No workaround exists for that ... so for
* nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH,
* and (c) will trigger that first erratum in some cases.
+ *
+ * When changing the clock polarity, the SPI controller waits for the next
+ * transmission to enforce the default clock state. This may be an issue when
+ * using a GPIO as Chip Select: the clock level is applied only when the first
+ * packet is sent, once the CS has already been asserted. The workaround is to
+ * avoid this by sending a first (dummy) message before toggling the CS state.
*/
-
static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
{
struct atmel_spi_device *asd = spi->controller_state;
+ bool new_polarity;
int chip_select;
u32 mr;
@@ -353,6 +411,25 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
}
mr = spi_readl(as, MR);
+
+ /*
+ * Ensures the clock polarity is valid before we actually
+ * assert the CS to avoid spurious clock edges to be
+ * processed by the spi devices.
+ */
+ if (spi_get_csgpiod(spi, 0)) {
+ new_polarity = (asd->csr & SPI_BIT(CPOL)) != 0;
+ if (new_polarity != as->last_polarity) {
+ /*
+ * Need to disable the GPIO before sending the dummy
+ * message because it is already set by the spi core.
+ */
+ gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), 0);
+ atmel_spi_send_dummy(as, spi, chip_select);
+ as->last_polarity = new_polarity;
+ gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), 1);
+ }
+ }
} else {
u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0;
int i;
@@ -1336,12 +1413,10 @@ static int atmel_spi_one_transfer(struct spi_controller *host,
}
dma_timeout = msecs_to_jiffies(spi_controller_xfer_timeout(host, xfer));
- ret_timeout = wait_for_completion_interruptible_timeout(&as->xfer_completion,
- dma_timeout);
- if (ret_timeout <= 0) {
- dev_err(&spi->dev, "spi transfer %s\n",
- !ret_timeout ? "timeout" : "canceled");
- as->done_status = ret_timeout < 0 ? ret_timeout : -EIO;
+ ret_timeout = wait_for_completion_timeout(&as->xfer_completion, dma_timeout);
+ if (!ret_timeout) {
+ dev_err(&spi->dev, "spi transfer timeout\n");
+ as->done_status = -EIO;
}
if (as->done_status)
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index 1f2f8c717df61f..a50eb4db79de8e 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -451,7 +451,6 @@ static int cdns_transfer_one(struct spi_controller *ctlr,
udelay(10);
cdns_spi_process_fifo(xspi, xspi->tx_fifo_depth, 0);
- spi_transfer_delay_exec(transfer);
cdns_spi_write(xspi, CDNS_SPI_IER, CDNS_SPI_IXR_DEFAULT);
return transfer->len;
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 498e35c8db2c1d..272bc871a848b8 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -659,11 +659,18 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
ctrl |= (spi_imx->target_burst * 8 - 1)
<< MX51_ECSPI_CTRL_BL_OFFSET;
else {
- if (spi_imx->count >= 512)
- ctrl |= 0xFFF << MX51_ECSPI_CTRL_BL_OFFSET;
- else
- ctrl |= (spi_imx->count * spi_imx->bits_per_word - 1)
+ if (spi_imx->usedma) {
+ ctrl |= (spi_imx->bits_per_word *
+ spi_imx_bytes_per_word(spi_imx->bits_per_word) - 1)
<< MX51_ECSPI_CTRL_BL_OFFSET;
+ } else {
+ if (spi_imx->count >= MX51_ECSPI_CTRL_MAX_BURST)
+ ctrl |= (MX51_ECSPI_CTRL_MAX_BURST - 1)
+ << MX51_ECSPI_CTRL_BL_OFFSET;
+ else
+ ctrl |= (spi_imx->count * spi_imx->bits_per_word - 1)
+ << MX51_ECSPI_CTRL_BL_OFFSET;
+ }
}
/* set clock speed */
diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c
index c9ddd49138d822..e324cd89971937 100644
--- a/drivers/thunderbolt/debugfs.c
+++ b/drivers/thunderbolt/debugfs.c
@@ -959,7 +959,7 @@ static void margining_port_remove(struct tb_port *port)
snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
parent = debugfs_lookup(dir_name, port->sw->debugfs_dir);
if (parent)
- debugfs_remove_recursive(debugfs_lookup("margining", parent));
+ debugfs_lookup_and_remove("margining", parent);
kfree(port->usb4->margining);
port->usb4->margining = NULL;
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index 4277733d0021bf..f8f0d24ff6e462 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -2311,13 +2311,13 @@ int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
goto err_request;
/*
- * Always keep 1000 Mb/s to make sure xHCI has at least some
+ * Always keep 900 Mb/s to make sure xHCI has at least some
* bandwidth available for isochronous traffic.
*/
- if (consumed_up < 1000)
- consumed_up = 1000;
- if (consumed_down < 1000)
- consumed_down = 1000;
+ if (consumed_up < 900)
+ consumed_up = 900;
+ if (consumed_down < 900)
+ consumed_down = 900;
ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up,
consumed_down);
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index bce0d2a9a7f3a5..16d76325039a5d 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -2274,9 +2274,10 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
if (is_mcq_enabled(hba)) {
int utrd_size = sizeof(struct utp_transfer_req_desc);
struct utp_transfer_req_desc *src = lrbp->utr_descriptor_ptr;
- struct utp_transfer_req_desc *dest = hwq->sqe_base_addr + hwq->sq_tail_slot;
+ struct utp_transfer_req_desc *dest;
spin_lock(&hwq->sq_lock);
+ dest = hwq->sqe_base_addr + hwq->sq_tail_slot;
memcpy(dest, src, utrd_size);
ufshcd_inc_sq_tail(hwq);
spin_unlock(&hwq->sq_lock);
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 96cb8b5b4e66f5..17e24270477dd8 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -1516,9 +1516,11 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
err = ufs_qcom_clk_scale_up_pre_change(hba);
else
err = ufs_qcom_clk_scale_down_pre_change(hba);
- if (err)
- ufshcd_uic_hibern8_exit(hba);
+ if (err) {
+ ufshcd_uic_hibern8_exit(hba);
+ return err;
+ }
} else {
if (scale_up)
err = ufs_qcom_clk_scale_up_post_change(hba);
diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c
index da2558e274b473..db9d9365ff55dc 100644
--- a/drivers/ufs/host/ufshcd-pltfrm.c
+++ b/drivers/ufs/host/ufshcd-pltfrm.c
@@ -8,6 +8,7 @@
* Vinayak Holikatti <h.vinayak@samsung.com>
*/
+#include <linux/clk.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
@@ -213,6 +214,55 @@ static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
}
}
+/**
+ * ufshcd_parse_clock_min_max_freq - Parse MIN and MAX clocks freq
+ * @hba: per adapter instance
+ *
+ * This function parses MIN and MAX frequencies of all clocks required
+ * by the host drivers.
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+static int ufshcd_parse_clock_min_max_freq(struct ufs_hba *hba)
+{
+ struct list_head *head = &hba->clk_list_head;
+ struct ufs_clk_info *clki;
+ struct dev_pm_opp *opp;
+ unsigned long freq;
+ u8 idx = 0;
+
+ list_for_each_entry(clki, head, list) {
+ if (!clki->name)
+ continue;
+
+ clki->clk = devm_clk_get(hba->dev, clki->name);
+ if (IS_ERR(clki->clk))
+ continue;
+
+ /* Find Max Freq */
+ freq = ULONG_MAX;
+ opp = dev_pm_opp_find_freq_floor_indexed(hba->dev, &freq, idx);
+ if (IS_ERR(opp)) {
+ dev_err(hba->dev, "Failed to find OPP for MAX frequency\n");
+ return PTR_ERR(opp);
+ }
+ clki->max_freq = dev_pm_opp_get_freq_indexed(opp, idx);
+ dev_pm_opp_put(opp);
+
+ /* Find Min Freq */
+ freq = 0;
+ opp = dev_pm_opp_find_freq_ceil_indexed(hba->dev, &freq, idx);
+ if (IS_ERR(opp)) {
+ dev_err(hba->dev, "Failed to find OPP for MIN frequency\n");
+ return PTR_ERR(opp);
+ }
+ clki->min_freq = dev_pm_opp_get_freq_indexed(opp, idx++);
+ dev_pm_opp_put(opp);
+ }
+
+ return 0;
+}
+
static int ufshcd_parse_operating_points(struct ufs_hba *hba)
{
struct device *dev = hba->dev;
@@ -279,6 +329,10 @@ static int ufshcd_parse_operating_points(struct ufs_hba *hba)
return ret;
}
+ ret = ufshcd_parse_clock_min_max_freq(hba);
+ if (ret)
+ return ret;
+
hba->use_pm_opp = true;
return 0;
diff --git a/drivers/usb/fotg210/fotg210-hcd.c b/drivers/usb/fotg210/fotg210-hcd.c
index 929106c16b29b5..7bf810a0c98a93 100644
--- a/drivers/usb/fotg210/fotg210-hcd.c
+++ b/drivers/usb/fotg210/fotg210-hcd.c
@@ -428,8 +428,6 @@ static void qh_lines(struct fotg210_hcd *fotg210, struct fotg210_qh *qh,
temp = size;
size -= temp;
next += temp;
- if (temp == size)
- goto done;
}
temp = snprintf(next, size, "\n");
@@ -439,7 +437,6 @@ static void qh_lines(struct fotg210_hcd *fotg210, struct fotg210_qh *qh,
size -= temp;
next += temp;
-done:
*sizep = size;
*nextp = next;
}
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 1bf23611be1221..13a56783830df2 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1033,9 +1033,9 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, ACTISENSE_USG_PID) },
{ USB_DEVICE(FTDI_VID, ACTISENSE_NGT_PID) },
{ USB_DEVICE(FTDI_VID, ACTISENSE_NGW_PID) },
- { USB_DEVICE(FTDI_VID, ACTISENSE_D9AC_PID) },
- { USB_DEVICE(FTDI_VID, ACTISENSE_D9AD_PID) },
- { USB_DEVICE(FTDI_VID, ACTISENSE_D9AE_PID) },
+ { USB_DEVICE(FTDI_VID, ACTISENSE_UID_PID) },
+ { USB_DEVICE(FTDI_VID, ACTISENSE_USA_PID) },
+ { USB_DEVICE(FTDI_VID, ACTISENSE_NGX_PID) },
{ USB_DEVICE(FTDI_VID, ACTISENSE_D9AF_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEAGAUGE_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEASWITCH_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index e2099445db708f..21a2b5a25fc097 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1568,9 +1568,9 @@
#define ACTISENSE_USG_PID 0xD9A9 /* USG USB Serial Adapter */
#define ACTISENSE_NGT_PID 0xD9AA /* NGT NMEA2000 Interface */
#define ACTISENSE_NGW_PID 0xD9AB /* NGW NMEA2000 Gateway */
-#define ACTISENSE_D9AC_PID 0xD9AC /* Actisense Reserved */
-#define ACTISENSE_D9AD_PID 0xD9AD /* Actisense Reserved */
-#define ACTISENSE_D9AE_PID 0xD9AE /* Actisense Reserved */
+#define ACTISENSE_UID_PID 0xD9AC /* USB Isolating Device */
+#define ACTISENSE_USA_PID 0xD9AD /* USB to Serial Adapter */
+#define ACTISENSE_NGX_PID 0xD9AE /* NGX NMEA2000 Gateway */
#define ACTISENSE_D9AF_PID 0xD9AF /* Actisense Reserved */
#define CHETCO_SEAGAUGE_PID 0xA548 /* SeaGauge USB Adapter */
#define CHETCO_SEASWITCH_PID 0xA549 /* SeaSwitch USB Adapter */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 4dffcfefd62da2..72390dbf076928 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -272,6 +272,7 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_RM500Q 0x0800
#define QUECTEL_PRODUCT_RM520N 0x0801
#define QUECTEL_PRODUCT_EC200U 0x0901
+#define QUECTEL_PRODUCT_EG912Y 0x6001
#define QUECTEL_PRODUCT_EC200S_CN 0x6002
#define QUECTEL_PRODUCT_EC200A 0x6005
#define QUECTEL_PRODUCT_EM061K_LWW 0x6008
@@ -1232,6 +1233,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0700, 0xff), /* BG95 */
.driver_info = RSVD(3) | ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
.driver_info = ZLP },
@@ -1244,6 +1246,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200U, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG912Y, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
@@ -2242,6 +2245,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
+ { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0da, 0xff), /* Foxconn T99W265 MBIM variant */
+ .driver_info = RSVD(3) | RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0db, 0xff), /* Foxconn T99W265 MBIM */
.driver_info = RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0ee, 0xff), /* Foxconn T99W368 MBIM */
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 20dcbccb290b36..fd68204374f2ce 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1306,6 +1306,17 @@ UNUSUAL_DEV( 0x090c, 0x6000, 0x0100, 0x0100,
US_FL_INITIAL_READ10 ),
/*
+ * Patch by Tasos Sahanidis <tasos@tasossah.com>
+ * This flash drive always shows up with write protect enabled
+ * during the first mode sense.
+ */
+UNUSUAL_DEV(0x0951, 0x1697, 0x0100, 0x0100,
+ "Kingston",
+ "DT Ultimate G3",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_WP_DETECT),
+
+/*
* This Pentax still camera is not conformant
* to the USB storage specification: -
* - It does not like the INQUIRY command. So we must handle this command
diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
index db6e248f820831..4853141cd10c85 100644
--- a/drivers/usb/typec/ucsi/ucsi_glink.c
+++ b/drivers/usb/typec/ucsi/ucsi_glink.c
@@ -228,7 +228,7 @@ static void pmic_glink_ucsi_notify(struct work_struct *work)
con_num = UCSI_CCI_CONNECTOR(cci);
if (con_num) {
- if (con_num < PMIC_GLINK_MAX_PORTS &&
+ if (con_num <= PMIC_GLINK_MAX_PORTS &&
ucsi->port_orientation[con_num - 1]) {
int orientation = gpiod_get_value(ucsi->port_orientation[con_num - 1]);
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index 988c2ac7cececd..926cb1188eba6c 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -409,10 +409,12 @@ static int afs_update_cell(struct afs_cell *cell)
if (ret == -ENOMEM)
goto out_wake;
- ret = -ENOMEM;
vllist = afs_alloc_vlserver_list(0);
- if (!vllist)
+ if (!vllist) {
+ if (ret >= 0)
+ ret = -ENOMEM;
goto out_wake;
+ }
switch (ret) {
case -ENODATA:
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
index 1fa8cf23bd3609..1f656005018ea0 100644
--- a/fs/afs/dynroot.c
+++ b/fs/afs/dynroot.c
@@ -114,6 +114,7 @@ static int afs_probe_cell_name(struct dentry *dentry)
struct afs_net *net = afs_d2net(dentry);
const char *name = dentry->d_name.name;
size_t len = dentry->d_name.len;
+ char *result = NULL;
int ret;
/* Names prefixed with a dot are R/W mounts. */
@@ -131,9 +132,22 @@ static int afs_probe_cell_name(struct dentry *dentry)
}
ret = dns_query(net->net, "afsdb", name, len, "srv=1",
- NULL, NULL, false);
- if (ret == -ENODATA || ret == -ENOKEY)
+ &result, NULL, false);
+ if (ret == -ENODATA || ret == -ENOKEY || ret == 0)
ret = -ENOENT;
+ if (ret > 0 && ret >= sizeof(struct dns_server_list_v1_header)) {
+ struct dns_server_list_v1_header *v1 = (void *)result;
+
+ if (v1->hdr.zero == 0 &&
+ v1->hdr.content == DNS_PAYLOAD_IS_SERVER_LIST &&
+ v1->hdr.version == 1 &&
+ (v1->status != DNS_LOOKUP_GOOD &&
+ v1->status != DNS_LOOKUP_GOOD_WITH_BAD))
+ return -ENOENT;
+
+ }
+
+ kfree(result);
return ret;
}
@@ -252,20 +266,9 @@ static int afs_dynroot_d_revalidate(struct dentry *dentry, unsigned int flags)
return 1;
}
-/*
- * Allow the VFS to enquire as to whether a dentry should be unhashed (mustn't
- * sleep)
- * - called from dput() when d_count is going to 0.
- * - return 1 to request dentry be unhashed, 0 otherwise
- */
-static int afs_dynroot_d_delete(const struct dentry *dentry)
-{
- return d_really_is_positive(dentry);
-}
-
const struct dentry_operations afs_dynroot_dentry_operations = {
.d_revalidate = afs_dynroot_d_revalidate,
- .d_delete = afs_dynroot_d_delete,
+ .d_delete = always_delete_dentry,
.d_release = afs_d_release,
.d_automount = afs_d_automount,
};
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index a812952be1c948..7385d62c8cf503 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -586,6 +586,7 @@ struct afs_volume {
#define AFS_VOLUME_OFFLINE 4 /* - T if volume offline notice given */
#define AFS_VOLUME_BUSY 5 /* - T if volume busy notice given */
#define AFS_VOLUME_MAYBE_NO_IBULK 6 /* - T if some servers don't have InlineBulkStatus */
+#define AFS_VOLUME_RM_TREE 7 /* - Set if volume removed from cell->volumes */
#ifdef CONFIG_AFS_FSCACHE
struct fscache_volume *cache; /* Caching cookie */
#endif
@@ -1513,6 +1514,7 @@ extern struct afs_vlserver_list *afs_extract_vlserver_list(struct afs_cell *,
extern struct afs_volume *afs_create_volume(struct afs_fs_context *);
extern int afs_activate_volume(struct afs_volume *);
extern void afs_deactivate_volume(struct afs_volume *);
+bool afs_try_get_volume(struct afs_volume *volume, enum afs_volume_trace reason);
extern struct afs_volume *afs_get_volume(struct afs_volume *, enum afs_volume_trace);
extern void afs_put_volume(struct afs_net *, struct afs_volume *, enum afs_volume_trace);
extern int afs_check_volume_status(struct afs_volume *, struct afs_operation *);
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
index 29d483c8028130..115c081a8e2ce5 100644
--- a/fs/afs/volume.c
+++ b/fs/afs/volume.c
@@ -32,8 +32,13 @@ static struct afs_volume *afs_insert_volume_into_cell(struct afs_cell *cell,
} else if (p->vid > volume->vid) {
pp = &(*pp)->rb_right;
} else {
- volume = afs_get_volume(p, afs_volume_trace_get_cell_insert);
- goto found;
+ if (afs_try_get_volume(p, afs_volume_trace_get_cell_insert)) {
+ volume = p;
+ goto found;
+ }
+
+ set_bit(AFS_VOLUME_RM_TREE, &volume->flags);
+ rb_replace_node_rcu(&p->cell_node, &volume->cell_node, &cell->volumes);
}
}
@@ -56,7 +61,8 @@ static void afs_remove_volume_from_cell(struct afs_volume *volume)
afs_volume_trace_remove);
write_seqlock(&cell->volume_lock);
hlist_del_rcu(&volume->proc_link);
- rb_erase(&volume->cell_node, &cell->volumes);
+ if (!test_and_set_bit(AFS_VOLUME_RM_TREE, &volume->flags))
+ rb_erase(&volume->cell_node, &cell->volumes);
write_sequnlock(&cell->volume_lock);
}
}
@@ -232,6 +238,20 @@ static void afs_destroy_volume(struct afs_net *net, struct afs_volume *volume)
}
/*
+ * Try to get a reference on a volume record.
+ */
+bool afs_try_get_volume(struct afs_volume *volume, enum afs_volume_trace reason)
+{
+ int r;
+
+ if (__refcount_inc_not_zero(&volume->ref, &r)) {
+ trace_afs_volume(volume->vid, r + 1, reason);
+ return true;
+ }
+ return false;
+}
+
+/*
* Get a reference on a volume record.
*/
struct afs_volume *afs_get_volume(struct afs_volume *volume,
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c
index 1ba0eeb7552a27..0e615798260721 100644
--- a/fs/bcachefs/alloc_foreground.c
+++ b/fs/bcachefs/alloc_foreground.c
@@ -1374,8 +1374,17 @@ retry:
goto alloc_done;
/* Don't retry from all devices if we're out of open buckets: */
- if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
- goto allocate_blocking;
+ if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) {
+ int ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
+ target, erasure_code,
+ nr_replicas, &nr_effective,
+ &have_cache, watermark,
+ flags, cl);
+ if (!ret ||
+ bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
+ bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
+ goto alloc_done;
+ }
/*
* Only try to allocate cache (durability = 0 devices) from the
@@ -1389,7 +1398,6 @@ retry:
&have_cache, watermark,
flags, cl);
} else {
-allocate_blocking:
ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
target, erasure_code,
nr_replicas, &nr_effective,
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 8e0fe65f6101bb..6be79129738d0f 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -3214,10 +3214,9 @@ void bch2_fs_btree_iter_exit(struct bch_fs *c)
mempool_exit(&c->btree_trans_pool);
}
-int bch2_fs_btree_iter_init(struct bch_fs *c)
+void bch2_fs_btree_iter_init_early(struct bch_fs *c)
{
struct btree_transaction_stats *s;
- int ret;
for (s = c->btree_transaction_stats;
s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
@@ -3228,6 +3227,11 @@ int bch2_fs_btree_iter_init(struct bch_fs *c)
INIT_LIST_HEAD(&c->btree_trans_list);
seqmutex_init(&c->btree_trans_lock);
+}
+
+int bch2_fs_btree_iter_init(struct bch_fs *c)
+{
+ int ret;
c->btree_trans_bufs = alloc_percpu(struct btree_trans_buf);
if (!c->btree_trans_bufs)
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
index 85e7cb52f6b6c4..eaffced4c132b5 100644
--- a/fs/bcachefs/btree_iter.h
+++ b/fs/bcachefs/btree_iter.h
@@ -938,6 +938,7 @@ unsigned bch2_trans_get_fn_idx(const char *);
void bch2_btree_trans_to_text(struct printbuf *, struct btree_trans *);
void bch2_fs_btree_iter_exit(struct bch_fs *);
+void bch2_fs_btree_iter_init_early(struct bch_fs *);
int bch2_fs_btree_iter_init(struct bch_fs *);
#endif /* _BCACHEFS_BTREE_ITER_H */
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index 26be38ab6ecb8f..239fcc3c7c996c 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -99,7 +99,7 @@ static void btree_node_interior_verify(struct bch_fs *c, struct btree *b)
/* Calculate ideal packed bkey format for new btree nodes: */
-void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b)
+static void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b)
{
struct bkey_packed *k;
struct bset_tree *t;
@@ -125,21 +125,20 @@ static struct bkey_format bch2_btree_calc_format(struct btree *b)
return bch2_bkey_format_done(&s);
}
-static size_t btree_node_u64s_with_format(struct btree *b,
+static size_t btree_node_u64s_with_format(struct btree_nr_keys nr,
+ struct bkey_format *old_f,
struct bkey_format *new_f)
{
- struct bkey_format *old_f = &b->format;
-
/* stupid integer promotion rules */
ssize_t delta =
(((int) new_f->key_u64s - old_f->key_u64s) *
- (int) b->nr.packed_keys) +
+ (int) nr.packed_keys) +
(((int) new_f->key_u64s - BKEY_U64s) *
- (int) b->nr.unpacked_keys);
+ (int) nr.unpacked_keys);
- BUG_ON(delta + b->nr.live_u64s < 0);
+ BUG_ON(delta + nr.live_u64s < 0);
- return b->nr.live_u64s + delta;
+ return nr.live_u64s + delta;
}
/**
@@ -147,16 +146,18 @@ static size_t btree_node_u64s_with_format(struct btree *b,
*
* @c: filesystem handle
* @b: btree node to rewrite
+ * @nr: number of keys for new node (i.e. b->nr)
* @new_f: bkey format to translate keys to
*
* Returns: true if all re-packed keys will be able to fit in a new node.
*
* Assumes all keys will successfully pack with the new format.
*/
-bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b,
+static bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b,
+ struct btree_nr_keys nr,
struct bkey_format *new_f)
{
- size_t u64s = btree_node_u64s_with_format(b, new_f);
+ size_t u64s = btree_node_u64s_with_format(nr, &b->format, new_f);
return __vstruct_bytes(struct btree_node, u64s) < btree_bytes(c);
}
@@ -391,7 +392,7 @@ static struct btree *bch2_btree_node_alloc_replacement(struct btree_update *as,
* The keys might expand with the new format - if they wouldn't fit in
* the btree node anymore, use the old format for now:
*/
- if (!bch2_btree_node_format_fits(as->c, b, &format))
+ if (!bch2_btree_node_format_fits(as->c, b, b->nr, &format))
format = b->format;
SET_BTREE_NODE_SEQ(n->data, BTREE_NODE_SEQ(b->data) + 1);
@@ -1345,8 +1346,11 @@ static void __btree_split_node(struct btree_update *as,
struct bkey_packed *out[2];
struct bkey uk;
unsigned u64s, n1_u64s = (b->nr.live_u64s * 3) / 5;
+ struct { unsigned nr_keys, val_u64s; } nr_keys[2];
int i;
+ memset(&nr_keys, 0, sizeof(nr_keys));
+
for (i = 0; i < 2; i++) {
BUG_ON(n[i]->nsets != 1);
@@ -1368,6 +1372,9 @@ static void __btree_split_node(struct btree_update *as,
if (!i)
n1_pos = uk.p;
bch2_bkey_format_add_key(&format[i], &uk);
+
+ nr_keys[i].nr_keys++;
+ nr_keys[i].val_u64s += bkeyp_val_u64s(&b->format, k);
}
btree_set_min(n[0], b->data->min_key);
@@ -1380,6 +1387,12 @@ static void __btree_split_node(struct btree_update *as,
bch2_bkey_format_add_pos(&format[i], n[i]->data->max_key);
n[i]->data->format = bch2_bkey_format_done(&format[i]);
+
+ unsigned u64s = nr_keys[i].nr_keys * n[i]->data->format.key_u64s +
+ nr_keys[i].val_u64s;
+ if (__vstruct_bytes(struct btree_node, u64s) > btree_bytes(as->c))
+ n[i]->data->format = b->format;
+
btree_node_set_format(n[i], n[i]->data->format);
}
@@ -1822,8 +1835,8 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
bch2_bkey_format_add_pos(&new_s, next->data->max_key);
new_f = bch2_bkey_format_done(&new_s);
- sib_u64s = btree_node_u64s_with_format(b, &new_f) +
- btree_node_u64s_with_format(m, &new_f);
+ sib_u64s = btree_node_u64s_with_format(b->nr, &b->format, &new_f) +
+ btree_node_u64s_with_format(m->nr, &m->format, &new_f);
if (sib_u64s > BTREE_FOREGROUND_MERGE_HYSTERESIS(c)) {
sib_u64s -= BTREE_FOREGROUND_MERGE_HYSTERESIS(c);
diff --git a/fs/bcachefs/btree_update_interior.h b/fs/bcachefs/btree_update_interior.h
index 031076e75fa132..a6668992a272ba 100644
--- a/fs/bcachefs/btree_update_interior.h
+++ b/fs/bcachefs/btree_update_interior.h
@@ -6,10 +6,6 @@
#include "btree_locking.h"
#include "btree_update.h"
-void __bch2_btree_calc_format(struct bkey_format_state *, struct btree *);
-bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *,
- struct bkey_format *);
-
#define BTREE_UPDATE_NODES_MAX ((BTREE_MAX_DEPTH - 2) * 2 + GC_MERGE_NODES)
#define BTREE_UPDATE_JOURNAL_RES (BTREE_UPDATE_NODES_MAX * (BKEY_BTREE_PTR_U64s_MAX + 1))
diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c
index 2418c528c5333b..b05457d284a600 100644
--- a/fs/bcachefs/data_update.c
+++ b/fs/bcachefs/data_update.c
@@ -560,7 +560,8 @@ int bch2_data_update_init(struct btree_trans *trans,
move_ctxt_wait_event(ctxt,
(locked = bch2_bucket_nocow_trylock(&c->nocow_locks,
PTR_BUCKET_POS(c, &p.ptr), 0)) ||
- !atomic_read(&ctxt->read_sectors));
+ (!atomic_read(&ctxt->read_sectors) &&
+ !atomic_read(&ctxt->write_sectors)));
if (!locked)
bch2_bucket_nocow_lock(&c->nocow_locks,
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
index 371565e02ff273..ba93e32d7708e6 100644
--- a/fs/bcachefs/fs.c
+++ b/fs/bcachefs/fs.c
@@ -1143,24 +1143,33 @@ static int bch2_encode_fh(struct inode *vinode, u32 *fh, int *len,
{
struct bch_inode_info *inode = to_bch_ei(vinode);
struct bch_inode_info *dir = to_bch_ei(vdir);
-
- if (*len < sizeof(struct bcachefs_fid_with_parent) / sizeof(u32))
- return FILEID_INVALID;
+ int min_len;
if (!S_ISDIR(inode->v.i_mode) && dir) {
struct bcachefs_fid_with_parent *fid = (void *) fh;
+ min_len = sizeof(*fid) / sizeof(u32);
+ if (*len < min_len) {
+ *len = min_len;
+ return FILEID_INVALID;
+ }
+
fid->fid = bch2_inode_to_fid(inode);
fid->dir = bch2_inode_to_fid(dir);
- *len = sizeof(*fid) / sizeof(u32);
+ *len = min_len;
return FILEID_BCACHEFS_WITH_PARENT;
} else {
struct bcachefs_fid *fid = (void *) fh;
+ min_len = sizeof(*fid) / sizeof(u32);
+ if (*len < min_len) {
+ *len = min_len;
+ return FILEID_INVALID;
+ }
*fid = bch2_inode_to_fid(inode);
- *len = sizeof(*fid) / sizeof(u32);
+ *len = min_len;
return FILEID_BCACHEFS_WITHOUT_PARENT;
}
}
diff --git a/fs/bcachefs/recovery.h b/fs/bcachefs/recovery.h
index 852d30567da9c4..d266aae90200e5 100644
--- a/fs/bcachefs/recovery.h
+++ b/fs/bcachefs/recovery.h
@@ -10,6 +10,9 @@ extern const char * const bch2_recovery_passes[];
static inline int bch2_run_explicit_recovery_pass(struct bch_fs *c,
enum bch_recovery_pass pass)
{
+ if (c->recovery_passes_explicit & BIT_ULL(pass))
+ return 0;
+
bch_info(c, "running explicit recovery pass %s (%u), currently at %s (%u)",
bch2_recovery_passes[pass], pass,
bch2_recovery_passes[c->curr_recovery_pass], c->curr_recovery_pass);
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index f63474c5c5a293..818ec467a06b96 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -72,6 +72,12 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
MODULE_DESCRIPTION("bcachefs filesystem");
+MODULE_SOFTDEP("pre: crc32c");
+MODULE_SOFTDEP("pre: crc64");
+MODULE_SOFTDEP("pre: sha256");
+MODULE_SOFTDEP("pre: chacha20");
+MODULE_SOFTDEP("pre: poly1305");
+MODULE_SOFTDEP("pre: xxhash");
#define KTYPE(type) \
static const struct attribute_group type ## _group = { \
@@ -714,6 +720,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
bch2_fs_copygc_init(c);
bch2_fs_btree_key_cache_init_early(&c->btree_key_cache);
+ bch2_fs_btree_iter_init_early(c);
bch2_fs_btree_interior_update_init_early(c);
bch2_fs_allocator_background_init(c);
bch2_fs_allocator_foreground_init(c);
diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
index 51453d4928fa40..2833e8ef4c098f 100644
--- a/fs/btrfs/delalloc-space.c
+++ b/fs/btrfs/delalloc-space.c
@@ -199,7 +199,7 @@ void btrfs_free_reserved_data_space(struct btrfs_inode *inode,
start = round_down(start, fs_info->sectorsize);
btrfs_free_reserved_data_space_noquota(fs_info, len);
- btrfs_qgroup_free_data(inode, reserved, start, len);
+ btrfs_qgroup_free_data(inode, reserved, start, len, NULL);
}
/*
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index bbcc3df776461f..62cb97f7c94fa2 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -4799,6 +4799,32 @@ void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
}
}
+static void btrfs_free_all_qgroup_pertrans(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_root *gang[8];
+ int i;
+ int ret;
+
+ spin_lock(&fs_info->fs_roots_radix_lock);
+ while (1) {
+ ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
+ (void **)gang, 0,
+ ARRAY_SIZE(gang),
+ BTRFS_ROOT_TRANS_TAG);
+ if (ret == 0)
+ break;
+ for (i = 0; i < ret; i++) {
+ struct btrfs_root *root = gang[i];
+
+ btrfs_qgroup_free_meta_all_pertrans(root);
+ radix_tree_tag_clear(&fs_info->fs_roots_radix,
+ (unsigned long)root->root_key.objectid,
+ BTRFS_ROOT_TRANS_TAG);
+ }
+ }
+ spin_unlock(&fs_info->fs_roots_radix_lock);
+}
+
void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
struct btrfs_fs_info *fs_info)
{
@@ -4827,6 +4853,8 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
EXTENT_DIRTY);
btrfs_destroy_pinned_extent(fs_info, &cur_trans->pinned_extents);
+ btrfs_free_all_qgroup_pertrans(fs_info);
+
cur_trans->state =TRANS_STATE_COMPLETED;
wake_up(&cur_trans->commit_wait);
}
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 0455935ff55880..01423670bc8a2b 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -1547,6 +1547,23 @@ out:
return ret;
}
+static void free_head_ref_squota_rsv(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_head *href)
+{
+ u64 root = href->owning_root;
+
+ /*
+ * Don't check must_insert_reserved, as this is called from contexts
+ * where it has already been unset.
+ */
+ if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE ||
+ !href->is_data || !is_fstree(root))
+ return;
+
+ btrfs_qgroup_free_refroot(fs_info, root, href->reserved_bytes,
+ BTRFS_QGROUP_RSV_DATA);
+}
+
static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *href,
struct btrfs_delayed_ref_node *node,
@@ -1569,7 +1586,6 @@ static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
struct btrfs_squota_delta delta = {
.root = href->owning_root,
.num_bytes = node->num_bytes,
- .rsv_bytes = href->reserved_bytes,
.is_data = true,
.is_inc = true,
.generation = trans->transid,
@@ -1586,11 +1602,9 @@ static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
flags, ref->objectid,
ref->offset, &key,
node->ref_mod, href->owning_root);
+ free_head_ref_squota_rsv(trans->fs_info, href);
if (!ret)
ret = btrfs_record_squota_delta(trans->fs_info, &delta);
- else
- btrfs_qgroup_free_refroot(trans->fs_info, delta.root,
- delta.rsv_bytes, BTRFS_QGROUP_RSV_DATA);
} else if (node->action == BTRFS_ADD_DELAYED_REF) {
ret = __btrfs_inc_extent_ref(trans, node, parent, ref->root,
ref->objectid, ref->offset,
@@ -1742,7 +1756,6 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
struct btrfs_squota_delta delta = {
.root = href->owning_root,
.num_bytes = fs_info->nodesize,
- .rsv_bytes = 0,
.is_data = false,
.is_inc = true,
.generation = trans->transid,
@@ -1774,8 +1787,10 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
int ret = 0;
if (TRANS_ABORTED(trans)) {
- if (insert_reserved)
+ if (insert_reserved) {
btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1);
+ free_head_ref_squota_rsv(trans->fs_info, href);
+ }
return 0;
}
@@ -1871,6 +1886,8 @@ u64 btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_delayed_ref_head *head)
{
+ u64 ret = 0;
+
/*
* We had csum deletions accounted for in our delayed refs rsv, we need
* to drop the csum leaves for this update from our delayed_refs_rsv.
@@ -1885,14 +1902,13 @@ u64 btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
btrfs_delayed_refs_rsv_release(fs_info, 0, nr_csums);
- return btrfs_calc_delayed_ref_csum_bytes(fs_info, nr_csums);
+ ret = btrfs_calc_delayed_ref_csum_bytes(fs_info, nr_csums);
}
- if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE &&
- head->must_insert_reserved && head->is_data)
- btrfs_qgroup_free_refroot(fs_info, head->owning_root,
- head->reserved_bytes, BTRFS_QGROUP_RSV_DATA);
+ /* must_insert_reserved can be set only if we didn't run the head ref. */
+ if (head->must_insert_reserved)
+ free_head_ref_squota_rsv(fs_info, head);
- return 0;
+ return ret;
}
static int cleanup_ref_head(struct btrfs_trans_handle *trans,
@@ -2033,6 +2049,12 @@ static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
* spin lock.
*/
must_insert_reserved = locked_ref->must_insert_reserved;
+ /*
+ * Unsetting this on the head ref relinquishes ownership of
+ * the rsv_bytes, so it is critical that every possible code
+ * path from here forward frees all reserves including qgroup
+ * reserve.
+ */
locked_ref->must_insert_reserved = false;
extent_op = locked_ref->extent_op;
@@ -3292,7 +3314,6 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_squota_delta delta = {
.root = delayed_ref_root,
.num_bytes = num_bytes,
- .rsv_bytes = 0,
.is_data = is_data,
.is_inc = false,
.generation = btrfs_extent_generation(leaf, ei),
@@ -4937,7 +4958,6 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
.root = root_objectid,
.num_bytes = ins->offset,
.generation = trans->transid,
- .rsv_bytes = 0,
.is_data = true,
.is_inc = true,
};
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index e6230a6ffa9859..8f724c54fc8e9c 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2302,7 +2302,8 @@ static int try_release_extent_state(struct extent_io_tree *tree,
ret = 0;
} else {
u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
- EXTENT_DELALLOC_NEW | EXTENT_CTLBITS);
+ EXTENT_DELALLOC_NEW | EXTENT_CTLBITS |
+ EXTENT_QGROUP_RESERVED);
/*
* At this point we can safely clear everything except the
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index f47731c45bb504..32611a4edd6b23 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -3192,7 +3192,7 @@ static long btrfs_fallocate(struct file *file, int mode,
qgroup_reserved -= range->len;
} else if (qgroup_reserved > 0) {
btrfs_qgroup_free_data(BTRFS_I(inode), data_reserved,
- range->start, range->len);
+ range->start, range->len, NULL);
qgroup_reserved -= range->len;
}
list_del(&range->list);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 9f5a9894f88f49..fb3c3f43c3fa40 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -688,7 +688,7 @@ out:
* And at reserve time, it's always aligned to page size, so
* just free one page here.
*/
- btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE);
+ btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE, NULL);
btrfs_free_path(path);
btrfs_end_transaction(trans);
return ret;
@@ -5132,7 +5132,7 @@ static void evict_inode_truncate_pages(struct inode *inode)
*/
if (state_flags & EXTENT_DELALLOC)
btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start,
- end - start + 1);
+ end - start + 1, NULL);
clear_extent_bit(io_tree, start, end,
EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING,
@@ -8059,7 +8059,7 @@ next:
* reserved data space.
* Since the IO will never happen for this page.
*/
- btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur);
+ btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur, NULL);
if (!inode_evicting) {
clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED |
EXTENT_DELALLOC | EXTENT_UPTODATE |
@@ -9491,7 +9491,7 @@ static struct btrfs_trans_handle *insert_prealloc_file_extent(
struct btrfs_path *path;
u64 start = ins->objectid;
u64 len = ins->offset;
- int qgroup_released;
+ u64 qgroup_released = 0;
int ret;
memset(&stack_fi, 0, sizeof(stack_fi));
@@ -9504,9 +9504,9 @@ static struct btrfs_trans_handle *insert_prealloc_file_extent(
btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE);
/* Encryption and other encoding is reserved and all 0 */
- qgroup_released = btrfs_qgroup_release_data(inode, file_offset, len);
- if (qgroup_released < 0)
- return ERR_PTR(qgroup_released);
+ ret = btrfs_qgroup_release_data(inode, file_offset, len, &qgroup_released);
+ if (ret < 0)
+ return ERR_PTR(ret);
if (trans) {
ret = insert_reserved_file_extent(trans, inode,
@@ -10401,7 +10401,7 @@ out_delalloc_release:
btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0);
out_qgroup_free_data:
if (ret < 0)
- btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes);
+ btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes, NULL);
out_free_data_space:
/*
* If btrfs_reserve_extent() succeeded, then we already decremented
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 4e50b62db2a8fe..a1743904202b78 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1290,6 +1290,15 @@ static noinline int __btrfs_ioctl_snap_create(struct file *file,
* are limited to own subvolumes only
*/
ret = -EPERM;
+ } else if (btrfs_ino(BTRFS_I(src_inode)) != BTRFS_FIRST_FREE_OBJECTID) {
+ /*
+ * Snapshots must be made with the src_inode referring
+ * to the subvolume inode, otherwise the permission
+ * checking above is useless because we may have
+ * permission on a lower directory but not the subvol
+ * itself.
+ */
+ ret = -EINVAL;
} else {
ret = btrfs_mksnapshot(&file->f_path, idmap,
name, namelen,
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 574e8a55e24a2b..a82e1417c4d278 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -152,11 +152,12 @@ static struct btrfs_ordered_extent *alloc_ordered_extent(
{
struct btrfs_ordered_extent *entry;
int ret;
+ u64 qgroup_rsv = 0;
if (flags &
((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) {
/* For nocow write, we can release the qgroup rsv right now */
- ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes);
+ ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes, &qgroup_rsv);
if (ret < 0)
return ERR_PTR(ret);
} else {
@@ -164,7 +165,7 @@ static struct btrfs_ordered_extent *alloc_ordered_extent(
* The ordered extent has reserved qgroup space, release now
* and pass the reserved number for qgroup_record to free.
*/
- ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes);
+ ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes, &qgroup_rsv);
if (ret < 0)
return ERR_PTR(ret);
}
@@ -182,7 +183,7 @@ static struct btrfs_ordered_extent *alloc_ordered_extent(
entry->inode = igrab(&inode->vfs_inode);
entry->compress_type = compress_type;
entry->truncated_len = (u64)-1;
- entry->qgroup_rsv = ret;
+ entry->qgroup_rsv = qgroup_rsv;
entry->flags = flags;
refcount_set(&entry->refs, 1);
init_waitqueue_head(&entry->wait);
@@ -599,7 +600,9 @@ void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
release = entry->disk_num_bytes;
else
release = entry->num_bytes;
- btrfs_delalloc_release_metadata(btrfs_inode, release, false);
+ btrfs_delalloc_release_metadata(btrfs_inode, release,
+ test_bit(BTRFS_ORDERED_IOERR,
+ &entry->flags));
}
percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index ce446d9d7f23da..e46774e8f49fd6 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -4057,13 +4057,14 @@ int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
/* Free ranges specified by @reserved, normally in error path */
static int qgroup_free_reserved_data(struct btrfs_inode *inode,
- struct extent_changeset *reserved, u64 start, u64 len)
+ struct extent_changeset *reserved,
+ u64 start, u64 len, u64 *freed_ret)
{
struct btrfs_root *root = inode->root;
struct ulist_node *unode;
struct ulist_iterator uiter;
struct extent_changeset changeset;
- int freed = 0;
+ u64 freed = 0;
int ret;
extent_changeset_init(&changeset);
@@ -4104,7 +4105,9 @@ static int qgroup_free_reserved_data(struct btrfs_inode *inode,
}
btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid, freed,
BTRFS_QGROUP_RSV_DATA);
- ret = freed;
+ if (freed_ret)
+ *freed_ret = freed;
+ ret = 0;
out:
extent_changeset_release(&changeset);
return ret;
@@ -4112,7 +4115,7 @@ out:
static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
struct extent_changeset *reserved, u64 start, u64 len,
- int free)
+ u64 *released, int free)
{
struct extent_changeset changeset;
int trace_op = QGROUP_RELEASE;
@@ -4128,7 +4131,7 @@ static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
/* In release case, we shouldn't have @reserved */
WARN_ON(!free && reserved);
if (free && reserved)
- return qgroup_free_reserved_data(inode, reserved, start, len);
+ return qgroup_free_reserved_data(inode, reserved, start, len, released);
extent_changeset_init(&changeset);
ret = clear_record_extent_bits(&inode->io_tree, start, start + len -1,
EXTENT_QGROUP_RESERVED, &changeset);
@@ -4143,7 +4146,8 @@ static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
btrfs_qgroup_free_refroot(inode->root->fs_info,
inode->root->root_key.objectid,
changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
- ret = changeset.bytes_changed;
+ if (released)
+ *released = changeset.bytes_changed;
out:
extent_changeset_release(&changeset);
return ret;
@@ -4162,9 +4166,10 @@ out:
* NOTE: This function may sleep for memory allocation.
*/
int btrfs_qgroup_free_data(struct btrfs_inode *inode,
- struct extent_changeset *reserved, u64 start, u64 len)
+ struct extent_changeset *reserved,
+ u64 start, u64 len, u64 *freed)
{
- return __btrfs_qgroup_release_data(inode, reserved, start, len, 1);
+ return __btrfs_qgroup_release_data(inode, reserved, start, len, freed, 1);
}
/*
@@ -4182,9 +4187,9 @@ int btrfs_qgroup_free_data(struct btrfs_inode *inode,
*
* NOTE: This function may sleep for memory allocation.
*/
-int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len)
+int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len, u64 *released)
{
- return __btrfs_qgroup_release_data(inode, NULL, start, len, 0);
+ return __btrfs_qgroup_release_data(inode, NULL, start, len, released, 0);
}
static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes,
@@ -4332,8 +4337,9 @@ static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
qgroup_rsv_release(fs_info, qgroup, num_bytes,
BTRFS_QGROUP_RSV_META_PREALLOC);
- qgroup_rsv_add(fs_info, qgroup, num_bytes,
- BTRFS_QGROUP_RSV_META_PERTRANS);
+ if (!sb_rdonly(fs_info->sb))
+ qgroup_rsv_add(fs_info, qgroup, num_bytes,
+ BTRFS_QGROUP_RSV_META_PERTRANS);
list_for_each_entry(glist, &qgroup->groups, next_group)
qgroup_iterator_add(&qgroup_list, glist->group);
@@ -4655,6 +4661,17 @@ void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
*root = RB_ROOT;
}
+void btrfs_free_squota_rsv(struct btrfs_fs_info *fs_info, u64 root, u64 rsv_bytes)
+{
+ if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE)
+ return;
+
+ if (!is_fstree(root))
+ return;
+
+ btrfs_qgroup_free_refroot(fs_info, root, rsv_bytes, BTRFS_QGROUP_RSV_DATA);
+}
+
int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
struct btrfs_squota_delta *delta)
{
@@ -4699,8 +4716,5 @@ int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
out:
spin_unlock(&fs_info->qgroup_lock);
- if (!ret && delta->rsv_bytes)
- btrfs_qgroup_free_refroot(fs_info, root, delta->rsv_bytes,
- BTRFS_QGROUP_RSV_DATA);
return ret;
}
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index 855a4f97876185..be18c862e64ede 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -274,8 +274,6 @@ struct btrfs_squota_delta {
u64 root;
/* The number of bytes in the extent being counted. */
u64 num_bytes;
- /* The number of bytes reserved for this extent. */
- u64 rsv_bytes;
/* The generation the extent was created in. */
u64 generation;
/* Whether we are using or freeing the extent. */
@@ -358,10 +356,10 @@ int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
/* New io_tree based accurate qgroup reserve API */
int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
struct extent_changeset **reserved, u64 start, u64 len);
-int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len);
+int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len, u64 *released);
int btrfs_qgroup_free_data(struct btrfs_inode *inode,
struct extent_changeset *reserved, u64 start,
- u64 len);
+ u64 len, u64 *freed);
int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
enum btrfs_qgroup_rsv_type type, bool enforce);
int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
@@ -422,6 +420,7 @@ int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *eb);
void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans);
bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info);
+void btrfs_free_squota_rsv(struct btrfs_fs_info *fs_info, u64 root, u64 rsv_bytes);
int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
struct btrfs_squota_delta *delta);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index bfc0eb5e3b7c22..5b3333ceef0481 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -37,8 +37,6 @@
static struct kmem_cache *btrfs_trans_handle_cachep;
-#define BTRFS_ROOT_TRANS_TAG 0
-
/*
* Transaction states and transitions
*
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 18c4f6e83b7883..2bf8bbdfd0b38b 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -12,6 +12,9 @@
#include "ctree.h"
#include "misc.h"
+/* Radix-tree tag for roots that are part of the trasaction. */
+#define BTRFS_ROOT_TRANS_TAG 0
+
enum btrfs_trans_state {
TRANS_STATE_RUNNING,
TRANS_STATE_COMMIT_PREP,
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 5063434be0fc83..6d7c1a49581f7a 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -104,12 +104,14 @@ int debugfs_file_get(struct dentry *dentry)
~DEBUGFS_FSDATA_IS_REAL_FOPS_BIT);
refcount_set(&fsd->active_users, 1);
init_completion(&fsd->active_users_drained);
+ INIT_LIST_HEAD(&fsd->cancellations);
+ mutex_init(&fsd->cancellations_mtx);
+
if (cmpxchg(&dentry->d_fsdata, d_fsd, fsd) != d_fsd) {
+ mutex_destroy(&fsd->cancellations_mtx);
kfree(fsd);
fsd = READ_ONCE(dentry->d_fsdata);
}
- INIT_LIST_HEAD(&fsd->cancellations);
- mutex_init(&fsd->cancellations_mtx);
}
/*
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index fdf2aad7347090..e6beaaf4f1700b 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -26,8 +26,6 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
int i;
int flags = nfsexp_flags(rqstp, exp);
- validate_process_creds();
-
/* discard any old override before preparing the new set */
revert_creds(get_cred(current_real_cred()));
new = prepare_creds();
@@ -81,10 +79,8 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
else
new->cap_effective = cap_raise_nfsd_set(new->cap_effective,
new->cap_permitted);
- validate_process_creds();
put_cred(override_creds(new));
put_cred(new);
- validate_process_creds();
return 0;
oom:
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 92bc109dabe638..4039ffcf90ba58 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -84,21 +84,7 @@ static void encode_uint32(struct xdr_stream *xdr, u32 n)
static void encode_bitmap4(struct xdr_stream *xdr, const __u32 *bitmap,
size_t len)
{
- xdr_stream_encode_uint32_array(xdr, bitmap, len);
-}
-
-static int decode_cb_fattr4(struct xdr_stream *xdr, uint32_t *bitmap,
- struct nfs4_cb_fattr *fattr)
-{
- fattr->ncf_cb_change = 0;
- fattr->ncf_cb_fsize = 0;
- if (bitmap[0] & FATTR4_WORD0_CHANGE)
- if (xdr_stream_decode_u64(xdr, &fattr->ncf_cb_change) < 0)
- return -NFSERR_BAD_XDR;
- if (bitmap[0] & FATTR4_WORD0_SIZE)
- if (xdr_stream_decode_u64(xdr, &fattr->ncf_cb_fsize) < 0)
- return -NFSERR_BAD_XDR;
- return 0;
+ WARN_ON_ONCE(xdr_stream_encode_uint32_array(xdr, bitmap, len) < 0);
}
/*
@@ -372,30 +358,6 @@ encode_cb_recallany4args(struct xdr_stream *xdr,
}
/*
- * CB_GETATTR4args
- * struct CB_GETATTR4args {
- * nfs_fh4 fh;
- * bitmap4 attr_request;
- * };
- *
- * The size and change attributes are the only one
- * guaranteed to be serviced by the client.
- */
-static void
-encode_cb_getattr4args(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr,
- struct nfs4_cb_fattr *fattr)
-{
- struct nfs4_delegation *dp =
- container_of(fattr, struct nfs4_delegation, dl_cb_fattr);
- struct knfsd_fh *fh = &dp->dl_stid.sc_file->fi_fhandle;
-
- encode_nfs_cb_opnum4(xdr, OP_CB_GETATTR);
- encode_nfs_fh4(xdr, fh);
- encode_bitmap4(xdr, fattr->ncf_cb_bmap, ARRAY_SIZE(fattr->ncf_cb_bmap));
- hdr->nops++;
-}
-
-/*
* CB_SEQUENCE4args
*
* struct CB_SEQUENCE4args {
@@ -531,26 +493,6 @@ static void nfs4_xdr_enc_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
}
/*
- * 20.1. Operation 3: CB_GETATTR - Get Attributes
- */
-static void nfs4_xdr_enc_cb_getattr(struct rpc_rqst *req,
- struct xdr_stream *xdr, const void *data)
-{
- const struct nfsd4_callback *cb = data;
- struct nfs4_cb_fattr *ncf =
- container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
- struct nfs4_cb_compound_hdr hdr = {
- .ident = cb->cb_clp->cl_cb_ident,
- .minorversion = cb->cb_clp->cl_minorversion,
- };
-
- encode_cb_compound4args(xdr, &hdr);
- encode_cb_sequence4args(xdr, cb, &hdr);
- encode_cb_getattr4args(xdr, &hdr, ncf);
- encode_cb_nops(&hdr);
-}
-
-/*
* 20.2. Operation 4: CB_RECALL - Recall a Delegation
*/
static void nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, struct xdr_stream *xdr,
@@ -606,42 +548,6 @@ static int nfs4_xdr_dec_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
}
/*
- * 20.1. Operation 3: CB_GETATTR - Get Attributes
- */
-static int nfs4_xdr_dec_cb_getattr(struct rpc_rqst *rqstp,
- struct xdr_stream *xdr,
- void *data)
-{
- struct nfsd4_callback *cb = data;
- struct nfs4_cb_compound_hdr hdr;
- int status;
- u32 bitmap[3] = {0};
- u32 attrlen;
- struct nfs4_cb_fattr *ncf =
- container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
-
- status = decode_cb_compound4res(xdr, &hdr);
- if (unlikely(status))
- return status;
-
- status = decode_cb_sequence4res(xdr, cb);
- if (unlikely(status || cb->cb_seq_status))
- return status;
-
- status = decode_cb_op_status(xdr, OP_CB_GETATTR, &cb->cb_status);
- if (status)
- return status;
- if (xdr_stream_decode_uint32_array(xdr, bitmap, 3) < 0)
- return -NFSERR_BAD_XDR;
- if (xdr_stream_decode_u32(xdr, &attrlen) < 0)
- return -NFSERR_BAD_XDR;
- if (attrlen > (sizeof(ncf->ncf_cb_change) + sizeof(ncf->ncf_cb_fsize)))
- return -NFSERR_BAD_XDR;
- status = decode_cb_fattr4(xdr, bitmap, ncf);
- return status;
-}
-
-/*
* 20.2. Operation 4: CB_RECALL - Recall a Delegation
*/
static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp,
@@ -949,7 +855,6 @@ static const struct rpc_procinfo nfs4_cb_procedures[] = {
PROC(CB_NOTIFY_LOCK, COMPOUND, cb_notify_lock, cb_notify_lock),
PROC(CB_OFFLOAD, COMPOUND, cb_offload, cb_offload),
PROC(CB_RECALL_ANY, COMPOUND, cb_recall_any, cb_recall_any),
- PROC(CB_GETATTR, COMPOUND, cb_getattr, cb_getattr),
};
static unsigned int nfs4_cb_counts[ARRAY_SIZE(nfs4_cb_procedures)];
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 40415929e2aef3..3edbfa0233e68a 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -127,7 +127,6 @@ static void free_session(struct nfsd4_session *);
static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
-static const struct nfsd4_callback_ops nfsd4_cb_getattr_ops;
static struct workqueue_struct *laundry_wq;
@@ -1190,10 +1189,6 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
dp->dl_recalled = false;
nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
&nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
- nfsd4_init_cb(&dp->dl_cb_fattr.ncf_getattr, dp->dl_stid.sc_client,
- &nfsd4_cb_getattr_ops, NFSPROC4_CLNT_CB_GETATTR);
- dp->dl_cb_fattr.ncf_file_modified = false;
- dp->dl_cb_fattr.ncf_cb_bmap[0] = FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE;
get_nfs4_file(fp);
dp->dl_stid.sc_file = fp;
return dp;
@@ -2901,56 +2896,11 @@ nfsd4_cb_recall_any_release(struct nfsd4_callback *cb)
spin_unlock(&nn->client_lock);
}
-static int
-nfsd4_cb_getattr_done(struct nfsd4_callback *cb, struct rpc_task *task)
-{
- struct nfs4_cb_fattr *ncf =
- container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
-
- ncf->ncf_cb_status = task->tk_status;
- switch (task->tk_status) {
- case -NFS4ERR_DELAY:
- rpc_delay(task, 2 * HZ);
- return 0;
- default:
- return 1;
- }
-}
-
-static void
-nfsd4_cb_getattr_release(struct nfsd4_callback *cb)
-{
- struct nfs4_cb_fattr *ncf =
- container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
- struct nfs4_delegation *dp =
- container_of(ncf, struct nfs4_delegation, dl_cb_fattr);
-
- nfs4_put_stid(&dp->dl_stid);
- clear_bit(CB_GETATTR_BUSY, &ncf->ncf_cb_flags);
- wake_up_bit(&ncf->ncf_cb_flags, CB_GETATTR_BUSY);
-}
-
static const struct nfsd4_callback_ops nfsd4_cb_recall_any_ops = {
.done = nfsd4_cb_recall_any_done,
.release = nfsd4_cb_recall_any_release,
};
-static const struct nfsd4_callback_ops nfsd4_cb_getattr_ops = {
- .done = nfsd4_cb_getattr_done,
- .release = nfsd4_cb_getattr_release,
-};
-
-void nfs4_cb_getattr(struct nfs4_cb_fattr *ncf)
-{
- struct nfs4_delegation *dp =
- container_of(ncf, struct nfs4_delegation, dl_cb_fattr);
-
- if (test_and_set_bit(CB_GETATTR_BUSY, &ncf->ncf_cb_flags))
- return;
- refcount_inc(&dp->dl_stid.sc_count);
- nfsd4_run_cb(&ncf->ncf_getattr);
-}
-
static struct nfs4_client *create_client(struct xdr_netobj name,
struct svc_rqst *rqstp, nfs4_verifier *verf)
{
@@ -5685,8 +5635,6 @@ nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
struct svc_fh *parent = NULL;
int cb_up;
int status = 0;
- struct kstat stat;
- struct path path;
cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
open->op_recall = false;
@@ -5724,18 +5672,6 @@ nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) {
open->op_delegate_type = NFS4_OPEN_DELEGATE_WRITE;
trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid);
- path.mnt = currentfh->fh_export->ex_path.mnt;
- path.dentry = currentfh->fh_dentry;
- if (vfs_getattr(&path, &stat,
- (STATX_SIZE | STATX_CTIME | STATX_CHANGE_COOKIE),
- AT_STATX_SYNC_AS_STAT)) {
- nfs4_put_stid(&dp->dl_stid);
- destroy_delegation(dp);
- goto out_no_deleg;
- }
- dp->dl_cb_fattr.ncf_cur_fsize = stat.size;
- dp->dl_cb_fattr.ncf_initial_cinfo =
- nfsd4_change_attribute(&stat, d_inode(currentfh->fh_dentry));
} else {
open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
@@ -8492,8 +8428,6 @@ nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
* nfsd4_deleg_getattr_conflict - Recall if GETATTR causes conflict
* @rqstp: RPC transaction context
* @inode: file to be checked for a conflict
- * @modified: return true if file was modified
- * @size: new size of file if modified is true
*
* This function is called when there is a conflict between a write
* delegation and a change/size GETATTR from another client. The server
@@ -8502,23 +8436,21 @@ nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
* delegation before replying to the GETATTR. See RFC 8881 section
* 18.7.4.
*
+ * The current implementation does not support CB_GETATTR yet. However
+ * this can avoid recalling the delegation could be added in follow up
+ * work.
+ *
* Returns 0 if there is no conflict; otherwise an nfs_stat
* code is returned.
*/
__be32
-nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode,
- bool *modified, u64 *size)
+nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode)
{
+ __be32 status;
struct file_lock_context *ctx;
- struct nfs4_delegation *dp;
- struct nfs4_cb_fattr *ncf;
struct file_lock *fl;
- struct iattr attrs;
- __be32 status;
-
- might_sleep();
+ struct nfs4_delegation *dp;
- *modified = false;
ctx = locks_inode_context(inode);
if (!ctx)
return 0;
@@ -8545,34 +8477,10 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode,
break_lease:
spin_unlock(&ctx->flc_lock);
nfsd_stats_wdeleg_getattr_inc();
-
- dp = fl->fl_owner;
- ncf = &dp->dl_cb_fattr;
- nfs4_cb_getattr(&dp->dl_cb_fattr);
- wait_on_bit(&ncf->ncf_cb_flags, CB_GETATTR_BUSY, TASK_INTERRUPTIBLE);
- if (ncf->ncf_cb_status) {
- status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
- if (status != nfserr_jukebox ||
- !nfsd_wait_for_delegreturn(rqstp, inode))
- return status;
- }
- if (!ncf->ncf_file_modified &&
- (ncf->ncf_initial_cinfo != ncf->ncf_cb_change ||
- ncf->ncf_cur_fsize != ncf->ncf_cb_fsize))
- ncf->ncf_file_modified = true;
- if (ncf->ncf_file_modified) {
- /*
- * The server would not update the file's metadata
- * with the client's modified size.
- */
- attrs.ia_mtime = attrs.ia_ctime = current_time(inode);
- attrs.ia_valid = ATTR_MTIME | ATTR_CTIME;
- setattr_copy(&nop_mnt_idmap, inode, &attrs);
- mark_inode_dirty(inode);
- ncf->ncf_cur_fsize = ncf->ncf_cb_fsize;
- *size = ncf->ncf_cur_fsize;
- *modified = true;
- }
+ status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
+ if (status != nfserr_jukebox ||
+ !nfsd_wait_for_delegreturn(rqstp, inode))
+ return status;
return 0;
}
break;
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index ec4ed6206df1d1..b499fe9caa3209 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -3505,9 +3505,7 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
u32 attrmask[3];
unsigned long mask[2];
} u;
- bool file_modified;
unsigned long bit;
- u64 size = 0;
WARN_ON_ONCE(bmval[1] & NFSD_WRITEONLY_ATTRS_WORD1);
WARN_ON_ONCE(!nfsd_attrs_supported(minorversion, bmval));
@@ -3534,8 +3532,7 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
}
args.size = 0;
if (u.attrmask[0] & (FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE)) {
- status = nfsd4_deleg_getattr_conflict(rqstp, d_inode(dentry),
- &file_modified, &size);
+ status = nfsd4_deleg_getattr_conflict(rqstp, d_inode(dentry));
if (status)
goto out;
}
@@ -3545,7 +3542,7 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
AT_STATX_SYNC_AS_STAT);
if (err)
goto out_nfserr;
- args.size = file_modified ? size : args.stat.size;
+ args.size = args.stat.size;
if (!(args.stat.result_mask & STATX_BTIME))
/* underlying FS does not offer btime so we can't share it */
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 3e15b72f421d4c..7cd513e59305ef 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -705,8 +705,10 @@ static ssize_t __write_ports_addfd(char *buf, struct net *net, const struct cred
err = svc_addsock(nn->nfsd_serv, net, fd, buf, SIMPLE_TRANSACTION_LIMIT, cred);
- if (err >= 0 &&
- !nn->nfsd_serv->sv_nrthreads && !xchg(&nn->keep_active, 1))
+ if (err < 0 && !nn->nfsd_serv->sv_nrthreads && !nn->keep_active)
+ nfsd_last_thread(net);
+ else if (err >= 0 &&
+ !nn->nfsd_serv->sv_nrthreads && !xchg(&nn->keep_active, 1))
svc_get(nn->nfsd_serv);
nfsd_put(net);
@@ -757,6 +759,9 @@ out_close:
svc_xprt_put(xprt);
}
out_err:
+ if (!nn->nfsd_serv->sv_nrthreads && !nn->keep_active)
+ nfsd_last_thread(net);
+
nfsd_put(net);
return err;
}
@@ -1510,11 +1515,10 @@ int nfsd_nl_rpc_status_get_start(struct netlink_callback *cb)
int ret = -ENODEV;
mutex_lock(&nfsd_mutex);
- if (nn->nfsd_serv) {
- svc_get(nn->nfsd_serv);
+ if (nn->nfsd_serv)
ret = 0;
- }
- mutex_unlock(&nfsd_mutex);
+ else
+ mutex_unlock(&nfsd_mutex);
return ret;
}
@@ -1686,8 +1690,6 @@ out:
*/
int nfsd_nl_rpc_status_get_done(struct netlink_callback *cb)
{
- mutex_lock(&nfsd_mutex);
- nfsd_put(sock_net(cb->skb->sk));
mutex_unlock(&nfsd_mutex);
return 0;
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index f5ff42f41ee7fe..3286ffacbc566d 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -155,6 +155,7 @@ int nfsd_vers(struct nfsd_net *nn, int vers, enum vers_op change);
int nfsd_minorversion(struct nfsd_net *nn, u32 minorversion, enum vers_op change);
void nfsd_reset_versions(struct nfsd_net *nn);
int nfsd_create_serv(struct net *net);
+void nfsd_last_thread(struct net *net);
extern int nfsd_max_blksize;
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index fe61d9bbcc1faa..7a2bc8e82a637a 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -542,7 +542,7 @@ static struct notifier_block nfsd_inet6addr_notifier = {
/* Only used under nfsd_mutex, so this atomic may be overkill: */
static atomic_t nfsd_notifier_refcount = ATOMIC_INIT(0);
-static void nfsd_last_thread(struct net *net)
+void nfsd_last_thread(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct svc_serv *serv = nn->nfsd_serv;
@@ -955,7 +955,6 @@ nfsd(void *vrqstp)
rqstp->rq_server->sv_maxconn = nn->max_connections;
svc_recv(rqstp);
- validate_process_creds();
}
atomic_dec(&nfsdstats.th_cnt);
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index f96eaa8e9413da..41bdc913fa715b 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -117,24 +117,6 @@ struct nfs4_cpntf_state {
time64_t cpntf_time; /* last time stateid used */
};
-struct nfs4_cb_fattr {
- struct nfsd4_callback ncf_getattr;
- u32 ncf_cb_status;
- u32 ncf_cb_bmap[1];
-
- /* from CB_GETATTR reply */
- u64 ncf_cb_change;
- u64 ncf_cb_fsize;
-
- unsigned long ncf_cb_flags;
- bool ncf_file_modified;
- u64 ncf_initial_cinfo;
- u64 ncf_cur_fsize;
-};
-
-/* bits for ncf_cb_flags */
-#define CB_GETATTR_BUSY 0
-
/*
* Represents a delegation stateid. The nfs4_client holds references to these
* and they are put when it is being destroyed or when the delegation is
@@ -168,9 +150,6 @@ struct nfs4_delegation {
int dl_retries;
struct nfsd4_callback dl_recall;
bool dl_recalled;
-
- /* for CB_GETATTR */
- struct nfs4_cb_fattr dl_cb_fattr;
};
#define cb_to_delegation(cb) \
@@ -661,7 +640,6 @@ enum nfsd4_cb_op {
NFSPROC4_CLNT_CB_SEQUENCE,
NFSPROC4_CLNT_CB_NOTIFY_LOCK,
NFSPROC4_CLNT_CB_RECALL_ANY,
- NFSPROC4_CLNT_CB_GETATTR,
};
/* Returns true iff a is later than b: */
@@ -754,6 +732,5 @@ static inline bool try_to_expire_client(struct nfs4_client *clp)
}
extern __be32 nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp,
- struct inode *inode, bool *file_modified, u64 *size);
-extern void nfs4_cb_getattr(struct nfs4_cb_fattr *ncf);
+ struct inode *inode);
#endif /* NFSD4_STATE_H */
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index fbbea7498f02b4..e01e4e2acbd9a2 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -901,7 +901,6 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
int host_err;
bool retried = false;
- validate_process_creds();
/*
* If we get here, then the client has already done an "open",
* and (hopefully) checked permission - so allow OWNER_OVERRIDE
@@ -926,7 +925,6 @@ retry:
}
err = nfserrno(host_err);
}
- validate_process_creds();
return err;
}
@@ -943,12 +941,7 @@ int
nfsd_open_verified(struct svc_rqst *rqstp, struct svc_fh *fhp, int may_flags,
struct file **filp)
{
- int err;
-
- validate_process_creds();
- err = __nfsd_open(rqstp, fhp, S_IFREG, may_flags, filp);
- validate_process_creds();
- return err;
+ return __nfsd_open(rqstp, fhp, S_IFREG, may_flags, filp);
}
/*
diff --git a/fs/nfsd/xdr4cb.h b/fs/nfsd/xdr4cb.h
index e8b00309c449fe..0d39af1b00a0f3 100644
--- a/fs/nfsd/xdr4cb.h
+++ b/fs/nfsd/xdr4cb.h
@@ -54,21 +54,3 @@
#define NFS4_dec_cb_recall_any_sz (cb_compound_dec_hdr_sz + \
cb_sequence_dec_sz + \
op_dec_sz)
-
-/*
- * 1: CB_GETATTR opcode (32-bit)
- * N: file_handle
- * 1: number of entry in attribute array (32-bit)
- * 1: entry 0 in attribute array (32-bit)
- */
-#define NFS4_enc_cb_getattr_sz (cb_compound_enc_hdr_sz + \
- cb_sequence_enc_sz + \
- 1 + enc_nfs4_fh_sz + 1 + 1)
-/*
- * 4: fattr_bitmap_maxsz
- * 1: attribute array len
- * 2: change attr (64-bit)
- * 2: size (64-bit)
- */
-#define NFS4_dec_cb_getattr_sz (cb_compound_dec_hdr_sz + \
- cb_sequence_dec_sz + 4 + 1 + 2 + 2 + op_dec_sz)
diff --git a/fs/open.c b/fs/open.c
index 02dc608d40d81f..3494a9cd8046c4 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -1088,8 +1088,6 @@ struct file *dentry_open(const struct path *path, int flags,
int error;
struct file *f;
- validate_creds(cred);
-
/* We must always pass in a valid mount pointer. */
BUG_ON(!path->mnt);
@@ -1128,7 +1126,6 @@ struct file *dentry_create(const struct path *path, int flags, umode_t mode,
struct file *f;
int error;
- validate_creds(cred);
f = alloc_empty_file(flags, cred);
if (IS_ERR(f))
return f;
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 4382881b070948..8bea66c9731616 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -753,15 +753,16 @@ static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c)
path.dentry = temp;
err = ovl_copy_up_data(c, &path);
/*
- * We cannot hold lock_rename() throughout this helper, because or
+ * We cannot hold lock_rename() throughout this helper, because of
* lock ordering with sb_writers, which shouldn't be held when calling
* ovl_copy_up_data(), so lock workdir and destdir and make sure that
* temp wasn't moved before copy up completion or cleanup.
- * If temp was moved, abort without the cleanup.
*/
ovl_start_write(c->dentry);
if (lock_rename(c->workdir, c->destdir) != NULL ||
temp->d_parent != c->workdir) {
+ /* temp or workdir moved underneath us? abort without cleanup */
+ dput(temp);
err = -EIO;
goto unlock;
} else if (err) {
diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
index 59f6b8e32cc97a..d64a306a414be0 100644
--- a/fs/smb/client/cached_dir.c
+++ b/fs/smb/client/cached_dir.c
@@ -291,16 +291,23 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
#endif /* CIFS_DEBUG2 */
- rc = -EINVAL;
+
if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) {
spin_unlock(&cfids->cfid_list_lock);
+ rc = -EINVAL;
+ goto oshr_free;
+ }
+
+ rc = smb2_parse_contexts(server, rsp_iov,
+ &oparms.fid->epoch,
+ oparms.fid->lease_key,
+ &oplock, NULL, NULL);
+ if (rc) {
+ spin_unlock(&cfids->cfid_list_lock);
goto oshr_free;
}
- smb2_parse_contexts(server, o_rsp,
- &oparms.fid->epoch,
- oparms.fid->lease_key, &oplock,
- NULL, NULL);
+ rc = -EINVAL;
if (!(oplock & SMB2_LEASE_READ_CACHING_HE)) {
spin_unlock(&cfids->cfid_list_lock);
goto oshr_free;
diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
index 5596c9f30ccb28..60027f5aebe87f 100644
--- a/fs/smb/client/cifs_debug.c
+++ b/fs/smb/client/cifs_debug.c
@@ -40,11 +40,13 @@ void cifs_dump_detail(void *buf, struct TCP_Server_Info *server)
#ifdef CONFIG_CIFS_DEBUG2
struct smb_hdr *smb = buf;
- cifs_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d\n",
- smb->Command, smb->Status.CifsError,
- smb->Flags, smb->Flags2, smb->Mid, smb->Pid);
- cifs_dbg(VFS, "smb buf %p len %u\n", smb,
- server->ops->calc_smb_size(smb));
+ cifs_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d Wct: %d\n",
+ smb->Command, smb->Status.CifsError, smb->Flags,
+ smb->Flags2, smb->Mid, smb->Pid, smb->WordCount);
+ if (!server->ops->check_message(buf, server->total_read, server)) {
+ cifs_dbg(VFS, "smb buf %p len %u\n", smb,
+ server->ops->calc_smb_size(smb));
+ }
#endif /* CONFIG_CIFS_DEBUG2 */
}
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index 7558167f603c38..55b3ce944022a0 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -532,7 +532,8 @@ struct smb_version_operations {
struct mid_q_entry **, char **, int *);
enum securityEnum (*select_sectype)(struct TCP_Server_Info *,
enum securityEnum);
- int (*next_header)(char *);
+ int (*next_header)(struct TCP_Server_Info *server, char *buf,
+ unsigned int *noff);
/* ioctl passthrough for query_info */
int (*ioctl_query_info)(const unsigned int xid,
struct cifs_tcon *tcon,
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index 9dc6dc2754c2e9..dd2a1fb65e7176 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -1201,7 +1201,12 @@ next_pdu:
server->total_read += length;
if (server->ops->next_header) {
- next_offset = server->ops->next_header(buf);
+ if (server->ops->next_header(server, buf, &next_offset)) {
+ cifs_dbg(VFS, "%s: malformed response (next_offset=%u)\n",
+ __func__, next_offset);
+ cifs_reconnect(server, true);
+ continue;
+ }
if (next_offset)
server->pdu_size = next_offset;
}
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index cf17e3dd703e65..32a8525415d96b 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -4671,7 +4671,7 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
/* we do not want atime to be less than mtime, it broke some apps */
atime = inode_set_atime_to_ts(inode, current_time(inode));
mtime = inode_get_mtime(inode);
- if (timespec64_compare(&atime, &mtime))
+ if (timespec64_compare(&atime, &mtime) < 0)
inode_set_atime_to_ts(inode, inode_get_mtime(inode));
if (PAGE_SIZE > rc)
diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
index 35b176457bbed0..c2137ea3c25389 100644
--- a/fs/smb/client/misc.c
+++ b/fs/smb/client/misc.c
@@ -363,6 +363,10 @@ checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
cifs_dbg(VFS, "Length less than smb header size\n");
}
return -EIO;
+ } else if (total_read < sizeof(*smb) + 2 * smb->WordCount) {
+ cifs_dbg(VFS, "%s: can't read BCC due to invalid WordCount(%u)\n",
+ __func__, smb->WordCount);
+ return -EIO;
}
/* otherwise, there is enough to get to the BCC */
diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
index 816e01c5589b45..2d3b332a79a164 100644
--- a/fs/smb/client/sess.c
+++ b/fs/smb/client/sess.c
@@ -439,7 +439,7 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
cifs_dbg(FYI, "unable to find a suitable iface\n");
}
- if (!chan_index && !iface) {
+ if (!iface) {
cifs_dbg(FYI, "unable to get the interface matching: %pIS\n",
&ss);
spin_unlock(&ses->iface_lock);
@@ -447,7 +447,7 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
}
/* now drop the ref to the current iface */
- if (old_iface && iface) {
+ if (old_iface) {
cifs_dbg(FYI, "replacing iface: %pIS with %pIS\n",
&old_iface->sockaddr,
&iface->sockaddr);
@@ -460,44 +460,32 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
kref_put(&old_iface->refcount, release_iface);
} else if (old_iface) {
- cifs_dbg(FYI, "releasing ref to iface: %pIS\n",
+ /* if a new candidate is not found, keep things as is */
+ cifs_dbg(FYI, "could not replace iface: %pIS\n",
&old_iface->sockaddr);
-
- old_iface->num_channels--;
- if (old_iface->weight_fulfilled)
- old_iface->weight_fulfilled--;
-
- kref_put(&old_iface->refcount, release_iface);
} else if (!chan_index) {
/* special case: update interface for primary channel */
- cifs_dbg(FYI, "referencing primary channel iface: %pIS\n",
- &iface->sockaddr);
- iface->num_channels++;
- iface->weight_fulfilled++;
- } else {
- WARN_ON(!iface);
- cifs_dbg(FYI, "adding new iface: %pIS\n", &iface->sockaddr);
+ if (iface) {
+ cifs_dbg(FYI, "referencing primary channel iface: %pIS\n",
+ &iface->sockaddr);
+ iface->num_channels++;
+ iface->weight_fulfilled++;
+ }
}
spin_unlock(&ses->iface_lock);
- spin_lock(&ses->chan_lock);
- chan_index = cifs_ses_get_chan_index(ses, server);
- if (chan_index == CIFS_INVAL_CHAN_INDEX) {
+ if (iface) {
+ spin_lock(&ses->chan_lock);
+ chan_index = cifs_ses_get_chan_index(ses, server);
+ if (chan_index == CIFS_INVAL_CHAN_INDEX) {
+ spin_unlock(&ses->chan_lock);
+ return 0;
+ }
+
+ ses->chans[chan_index].iface = iface;
spin_unlock(&ses->chan_lock);
- return 0;
}
- ses->chans[chan_index].iface = iface;
-
- /* No iface is found. if secondary chan, drop connection */
- if (!iface && SERVER_IS_CHAN(server))
- ses->chans[chan_index].server = NULL;
-
- spin_unlock(&ses->chan_lock);
-
- if (!iface && SERVER_IS_CHAN(server))
- cifs_put_tcp_session(server, false);
-
return rc;
}
diff --git a/fs/smb/client/smb2misc.c b/fs/smb/client/smb2misc.c
index 32dfa0f7a78c30..82b84a4941dd2f 100644
--- a/fs/smb/client/smb2misc.c
+++ b/fs/smb/client/smb2misc.c
@@ -173,6 +173,21 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *server)
}
mid = le64_to_cpu(shdr->MessageId);
+ if (check_smb2_hdr(shdr, mid))
+ return 1;
+
+ if (shdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) {
+ cifs_dbg(VFS, "Invalid structure size %u\n",
+ le16_to_cpu(shdr->StructureSize));
+ return 1;
+ }
+
+ command = le16_to_cpu(shdr->Command);
+ if (command >= NUMBER_OF_SMB2_COMMANDS) {
+ cifs_dbg(VFS, "Invalid SMB2 command %d\n", command);
+ return 1;
+ }
+
if (len < pdu_size) {
if ((len >= hdr_size)
&& (shdr->Status != 0)) {
@@ -193,21 +208,6 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *server)
return 1;
}
- if (check_smb2_hdr(shdr, mid))
- return 1;
-
- if (shdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) {
- cifs_dbg(VFS, "Invalid structure size %u\n",
- le16_to_cpu(shdr->StructureSize));
- return 1;
- }
-
- command = le16_to_cpu(shdr->Command);
- if (command >= NUMBER_OF_SMB2_COMMANDS) {
- cifs_dbg(VFS, "Invalid SMB2 command %d\n", command);
- return 1;
- }
-
if (smb2_rsp_struct_sizes[command] != pdu->StructureSize2) {
if (command != SMB2_OPLOCK_BREAK_HE && (shdr->Status == 0 ||
pdu->StructureSize2 != SMB2_ERROR_STRUCTURE_SIZE2_LE)) {
@@ -313,6 +313,9 @@ static const bool has_smb2_data_area[NUMBER_OF_SMB2_COMMANDS] = {
char *
smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *shdr)
{
+ const int max_off = 4096;
+ const int max_len = 128 * 1024;
+
*off = 0;
*len = 0;
@@ -384,29 +387,20 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *shdr)
* Invalid length or offset probably means data area is invalid, but
* we have little choice but to ignore the data area in this case.
*/
- if (*off > 4096) {
- cifs_dbg(VFS, "offset %d too large, data area ignored\n", *off);
- *len = 0;
+ if (unlikely(*off < 0 || *off > max_off ||
+ *len < 0 || *len > max_len)) {
+ cifs_dbg(VFS, "%s: invalid data area (off=%d len=%d)\n",
+ __func__, *off, *len);
*off = 0;
- } else if (*off < 0) {
- cifs_dbg(VFS, "negative offset %d to data invalid ignore data area\n",
- *off);
- *off = 0;
- *len = 0;
- } else if (*len < 0) {
- cifs_dbg(VFS, "negative data length %d invalid, data area ignored\n",
- *len);
*len = 0;
- } else if (*len > 128 * 1024) {
- cifs_dbg(VFS, "data area larger than 128K: %d\n", *len);
+ } else if (*off == 0) {
*len = 0;
}
/* return pointer to beginning of data area, ie offset from SMB start */
- if ((*off != 0) && (*len != 0))
+ if (*off > 0 && *len > 0)
return (char *)shdr + *off;
- else
- return NULL;
+ return NULL;
}
/*
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index fcfb6566b899fb..66b310208545bf 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -403,8 +403,10 @@ smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
cifs_server_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n",
shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId,
shdr->Id.SyncId.ProcessId);
- cifs_server_dbg(VFS, "smb buf %p len %u\n", buf,
- server->ops->calc_smb_size(buf));
+ if (!server->ops->check_message(buf, server->total_read, server)) {
+ cifs_server_dbg(VFS, "smb buf %p len %u\n", buf,
+ server->ops->calc_smb_size(buf));
+ }
#endif
}
@@ -3003,7 +3005,7 @@ static int smb2_query_reparse_point(const unsigned int xid,
struct kvec *rsp_iov;
struct smb2_ioctl_rsp *ioctl_rsp;
struct reparse_data_buffer *reparse_buf;
- u32 plen;
+ u32 off, count, len;
cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
@@ -3084,16 +3086,22 @@ static int smb2_query_reparse_point(const unsigned int xid,
*/
if (rc == 0) {
/* See MS-FSCC 2.3.23 */
+ off = le32_to_cpu(ioctl_rsp->OutputOffset);
+ count = le32_to_cpu(ioctl_rsp->OutputCount);
+ if (check_add_overflow(off, count, &len) ||
+ len > rsp_iov[1].iov_len) {
+ cifs_tcon_dbg(VFS, "%s: invalid ioctl: off=%d count=%d\n",
+ __func__, off, count);
+ rc = -EIO;
+ goto query_rp_exit;
+ }
- reparse_buf = (struct reparse_data_buffer *)
- ((char *)ioctl_rsp +
- le32_to_cpu(ioctl_rsp->OutputOffset));
- plen = le32_to_cpu(ioctl_rsp->OutputCount);
-
- if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) >
- rsp_iov[1].iov_len) {
- cifs_tcon_dbg(FYI, "srv returned invalid ioctl len: %d\n",
- plen);
+ reparse_buf = (void *)((u8 *)ioctl_rsp + off);
+ len = sizeof(*reparse_buf);
+ if (count < len ||
+ count < le16_to_cpu(reparse_buf->ReparseDataLength) + len) {
+ cifs_tcon_dbg(VFS, "%s: invalid ioctl: off=%d count=%d\n",
+ __func__, off, count);
rc = -EIO;
goto query_rp_exit;
}
@@ -4943,6 +4951,7 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
struct smb2_hdr *shdr;
unsigned int pdu_length = server->pdu_size;
unsigned int buf_size;
+ unsigned int next_cmd;
struct mid_q_entry *mid_entry;
int next_is_large;
char *next_buffer = NULL;
@@ -4971,14 +4980,15 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
next_is_large = server->large_buf;
one_more:
shdr = (struct smb2_hdr *)buf;
- if (shdr->NextCommand) {
+ next_cmd = le32_to_cpu(shdr->NextCommand);
+ if (next_cmd) {
+ if (WARN_ON_ONCE(next_cmd > pdu_length))
+ return -1;
if (next_is_large)
next_buffer = (char *)cifs_buf_get();
else
next_buffer = (char *)cifs_small_buf_get();
- memcpy(next_buffer,
- buf + le32_to_cpu(shdr->NextCommand),
- pdu_length - le32_to_cpu(shdr->NextCommand));
+ memcpy(next_buffer, buf + next_cmd, pdu_length - next_cmd);
}
mid_entry = smb2_find_mid(server, buf);
@@ -5002,8 +5012,8 @@ one_more:
else
ret = cifs_handle_standard(server, mid_entry);
- if (ret == 0 && shdr->NextCommand) {
- pdu_length -= le32_to_cpu(shdr->NextCommand);
+ if (ret == 0 && next_cmd) {
+ pdu_length -= next_cmd;
server->large_buf = next_is_large;
if (next_is_large)
server->bigbuf = buf = next_buffer;
@@ -5066,17 +5076,22 @@ smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
NULL, 0, false);
}
-static int
-smb2_next_header(char *buf)
+static int smb2_next_header(struct TCP_Server_Info *server, char *buf,
+ unsigned int *noff)
{
struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf;
- if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM)
- return sizeof(struct smb2_transform_hdr) +
- le32_to_cpu(t_hdr->OriginalMessageSize);
-
- return le32_to_cpu(hdr->NextCommand);
+ if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
+ *noff = le32_to_cpu(t_hdr->OriginalMessageSize);
+ if (unlikely(check_add_overflow(*noff, sizeof(*t_hdr), noff)))
+ return -EINVAL;
+ } else {
+ *noff = le32_to_cpu(hdr->NextCommand);
+ }
+ if (unlikely(*noff && *noff < MID_HEADER_SIZE(server)))
+ return -EINVAL;
+ return 0;
}
int cifs_sfu_make_node(unsigned int xid, struct inode *inode,
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index 20634fc6d4f09e..4f971c1061f0a3 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -411,8 +411,7 @@ skip_sess_setup:
}
if (smb2_command != SMB2_INTERNAL_CMD)
- if (mod_delayed_work(cifsiod_wq, &server->reconnect, 0))
- cifs_put_tcp_session(server, false);
+ mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
atomic_inc(&tconInfoReconnectCount);
out:
@@ -471,10 +470,15 @@ static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
void **request_buf, unsigned int *total_len)
{
/* BB eventually switch this to SMB2 specific small buf size */
- if (smb2_command == SMB2_SET_INFO)
+ switch (smb2_command) {
+ case SMB2_SET_INFO:
+ case SMB2_QUERY_INFO:
*request_buf = cifs_buf_get();
- else
+ break;
+ default:
*request_buf = cifs_small_buf_get();
+ break;
+ }
if (*request_buf == NULL) {
/* BB should we add a retry in here if not a writepage? */
return -ENOMEM;
@@ -2236,17 +2240,18 @@ parse_posix_ctxt(struct create_context *cc, struct smb2_file_all_info *info,
posix->nlink, posix->mode, posix->reparse_tag);
}
-void
-smb2_parse_contexts(struct TCP_Server_Info *server,
- struct smb2_create_rsp *rsp,
- unsigned int *epoch, char *lease_key, __u8 *oplock,
- struct smb2_file_all_info *buf,
- struct create_posix_rsp *posix)
+int smb2_parse_contexts(struct TCP_Server_Info *server,
+ struct kvec *rsp_iov,
+ unsigned int *epoch,
+ char *lease_key, __u8 *oplock,
+ struct smb2_file_all_info *buf,
+ struct create_posix_rsp *posix)
{
- char *data_offset;
+ struct smb2_create_rsp *rsp = rsp_iov->iov_base;
struct create_context *cc;
- unsigned int next;
- unsigned int remaining;
+ size_t rem, off, len;
+ size_t doff, dlen;
+ size_t noff, nlen;
char *name;
static const char smb3_create_tag_posix[] = {
0x93, 0xAD, 0x25, 0x50, 0x9C,
@@ -2255,45 +2260,63 @@ smb2_parse_contexts(struct TCP_Server_Info *server,
};
*oplock = 0;
- data_offset = (char *)rsp + le32_to_cpu(rsp->CreateContextsOffset);
- remaining = le32_to_cpu(rsp->CreateContextsLength);
- cc = (struct create_context *)data_offset;
+
+ off = le32_to_cpu(rsp->CreateContextsOffset);
+ rem = le32_to_cpu(rsp->CreateContextsLength);
+ if (check_add_overflow(off, rem, &len) || len > rsp_iov->iov_len)
+ return -EINVAL;
+ cc = (struct create_context *)((u8 *)rsp + off);
/* Initialize inode number to 0 in case no valid data in qfid context */
if (buf)
buf->IndexNumber = 0;
- while (remaining >= sizeof(struct create_context)) {
- name = le16_to_cpu(cc->NameOffset) + (char *)cc;
- if (le16_to_cpu(cc->NameLength) == 4 &&
- strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4) == 0)
- *oplock = server->ops->parse_lease_buf(cc, epoch,
- lease_key);
- else if (buf && (le16_to_cpu(cc->NameLength) == 4) &&
- strncmp(name, SMB2_CREATE_QUERY_ON_DISK_ID, 4) == 0)
- parse_query_id_ctxt(cc, buf);
- else if ((le16_to_cpu(cc->NameLength) == 16)) {
- if (posix &&
- memcmp(name, smb3_create_tag_posix, 16) == 0)
+ while (rem >= sizeof(*cc)) {
+ doff = le16_to_cpu(cc->DataOffset);
+ dlen = le32_to_cpu(cc->DataLength);
+ if (check_add_overflow(doff, dlen, &len) || len > rem)
+ return -EINVAL;
+
+ noff = le16_to_cpu(cc->NameOffset);
+ nlen = le16_to_cpu(cc->NameLength);
+ if (noff + nlen >= doff)
+ return -EINVAL;
+
+ name = (char *)cc + noff;
+ switch (nlen) {
+ case 4:
+ if (!strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4)) {
+ *oplock = server->ops->parse_lease_buf(cc, epoch,
+ lease_key);
+ } else if (buf &&
+ !strncmp(name, SMB2_CREATE_QUERY_ON_DISK_ID, 4)) {
+ parse_query_id_ctxt(cc, buf);
+ }
+ break;
+ case 16:
+ if (posix && !memcmp(name, smb3_create_tag_posix, 16))
parse_posix_ctxt(cc, buf, posix);
+ break;
+ default:
+ cifs_dbg(FYI, "%s: unhandled context (nlen=%zu dlen=%zu)\n",
+ __func__, nlen, dlen);
+ if (IS_ENABLED(CONFIG_CIFS_DEBUG2))
+ cifs_dump_mem("context data: ", cc, dlen);
+ break;
}
- /* else {
- cifs_dbg(FYI, "Context not matched with len %d\n",
- le16_to_cpu(cc->NameLength));
- cifs_dump_mem("Cctxt name: ", name, 4);
- } */
-
- next = le32_to_cpu(cc->Next);
- if (!next)
+
+ off = le32_to_cpu(cc->Next);
+ if (!off)
break;
- remaining -= next;
- cc = (struct create_context *)((char *)cc + next);
+ if (check_sub_overflow(rem, off, &rem))
+ return -EINVAL;
+ cc = (struct create_context *)((u8 *)cc + off);
}
if (rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
*oplock = rsp->OplockLevel;
- return;
+ return 0;
}
static int
@@ -3124,8 +3147,8 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
}
- smb2_parse_contexts(server, rsp, &oparms->fid->epoch,
- oparms->fid->lease_key, oplock, buf, posix);
+ rc = smb2_parse_contexts(server, &rsp_iov, &oparms->fid->epoch,
+ oparms->fid->lease_key, oplock, buf, posix);
creat_exit:
SMB2_open_free(&rqst);
free_rsp_buf(resp_buftype, rsp);
@@ -3568,8 +3591,13 @@ SMB2_query_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
struct smb2_query_info_req *req;
struct kvec *iov = rqst->rq_iov;
unsigned int total_len;
+ size_t len;
int rc;
+ if (unlikely(check_add_overflow(input_len, sizeof(*req), &len) ||
+ len > CIFSMaxBufSize))
+ return -EINVAL;
+
rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server,
(void **) &req, &total_len);
if (rc)
@@ -3591,7 +3619,7 @@ SMB2_query_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
iov[0].iov_base = (char *)req;
/* 1 for Buffer */
- iov[0].iov_len = total_len - 1 + input_len;
+ iov[0].iov_len = len;
return 0;
}
@@ -3599,7 +3627,7 @@ void
SMB2_query_info_free(struct smb_rqst *rqst)
{
if (rqst && rqst->rq_iov)
- cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
+ cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */
}
static int
@@ -5474,6 +5502,11 @@ build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon,
return 0;
}
+static inline void free_qfs_info_req(struct kvec *iov)
+{
+ cifs_buf_release(iov->iov_base);
+}
+
int
SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
@@ -5505,7 +5538,7 @@ SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
rc = cifs_send_recv(xid, ses, server,
&rqst, &resp_buftype, flags, &rsp_iov);
- cifs_small_buf_release(iov.iov_base);
+ free_qfs_info_req(&iov);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
goto posix_qfsinf_exit;
@@ -5556,7 +5589,7 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
rc = cifs_send_recv(xid, ses, server,
&rqst, &resp_buftype, flags, &rsp_iov);
- cifs_small_buf_release(iov.iov_base);
+ free_qfs_info_req(&iov);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
goto qfsinf_exit;
@@ -5623,7 +5656,7 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
rc = cifs_send_recv(xid, ses, server,
&rqst, &resp_buftype, flags, &rsp_iov);
- cifs_small_buf_release(iov.iov_base);
+ free_qfs_info_req(&iov);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
goto qfsattr_exit;
diff --git a/fs/smb/client/smb2proto.h b/fs/smb/client/smb2proto.h
index 46eff9ec302aad..0e371f7e2854b9 100644
--- a/fs/smb/client/smb2proto.h
+++ b/fs/smb/client/smb2proto.h
@@ -251,11 +251,13 @@ extern int smb3_validate_negotiate(const unsigned int, struct cifs_tcon *);
extern enum securityEnum smb2_select_sectype(struct TCP_Server_Info *,
enum securityEnum);
-extern void smb2_parse_contexts(struct TCP_Server_Info *server,
- struct smb2_create_rsp *rsp,
- unsigned int *epoch, char *lease_key,
- __u8 *oplock, struct smb2_file_all_info *buf,
- struct create_posix_rsp *posix);
+int smb2_parse_contexts(struct TCP_Server_Info *server,
+ struct kvec *rsp_iov,
+ unsigned int *epoch,
+ char *lease_key, __u8 *oplock,
+ struct smb2_file_all_info *buf,
+ struct create_posix_rsp *posix);
+
extern int smb3_encryption_required(const struct cifs_tcon *tcon);
extern int smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
struct kvec *iov, unsigned int min_buf_size);
diff --git a/fs/tracefs/event_inode.c b/fs/tracefs/event_inode.c
index 0b90869fd805cd..2ccc849a5bda50 100644
--- a/fs/tracefs/event_inode.c
+++ b/fs/tracefs/event_inode.c
@@ -148,7 +148,8 @@ static const struct file_operations eventfs_file_operations = {
.release = eventfs_release,
};
-static void update_inode_attr(struct inode *inode, struct eventfs_attr *attr, umode_t mode)
+static void update_inode_attr(struct dentry *dentry, struct inode *inode,
+ struct eventfs_attr *attr, umode_t mode)
{
if (!attr) {
inode->i_mode = mode;
@@ -162,9 +163,13 @@ static void update_inode_attr(struct inode *inode, struct eventfs_attr *attr, um
if (attr->mode & EVENTFS_SAVE_UID)
inode->i_uid = attr->uid;
+ else
+ inode->i_uid = d_inode(dentry->d_parent)->i_uid;
if (attr->mode & EVENTFS_SAVE_GID)
inode->i_gid = attr->gid;
+ else
+ inode->i_gid = d_inode(dentry->d_parent)->i_gid;
}
/**
@@ -206,7 +211,7 @@ static struct dentry *create_file(const char *name, umode_t mode,
return eventfs_failed_creating(dentry);
/* If the user updated the directory's attributes, use them */
- update_inode_attr(inode, attr, mode);
+ update_inode_attr(dentry, inode, attr, mode);
inode->i_op = &eventfs_file_inode_operations;
inode->i_fop = fop;
@@ -242,7 +247,8 @@ static struct dentry *create_dir(struct eventfs_inode *ei, struct dentry *parent
return eventfs_failed_creating(dentry);
/* If the user updated the directory's attributes, use them */
- update_inode_attr(inode, &ei->attr, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO);
+ update_inode_attr(dentry, inode, &ei->attr,
+ S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO);
inode->i_op = &eventfs_root_dir_inode_operations;
inode->i_fop = &eventfs_file_operations;
@@ -546,6 +552,8 @@ static struct dentry *eventfs_root_lookup(struct inode *dir,
if (strcmp(ei_child->name, name) != 0)
continue;
ret = simple_lookup(dir, dentry, flags);
+ if (IS_ERR(ret))
+ goto out;
create_dir_dentry(ei, ei_child, ei_dentry, true);
created = true;
break;
@@ -568,6 +576,8 @@ static struct dentry *eventfs_root_lookup(struct inode *dir,
if (r <= 0)
continue;
ret = simple_lookup(dir, dentry, flags);
+ if (IS_ERR(ret))
+ goto out;
create_file_dentry(ei, i, ei_dentry, name, mode, cdata,
fops, true);
break;
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index fc0d6f32c68760..94baced5a1ad64 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -142,9 +142,13 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_ITER, iter)
#ifdef CONFIG_NET
BPF_LINK_TYPE(BPF_LINK_TYPE_NETNS, netns)
BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp)
+BPF_LINK_TYPE(BPF_LINK_TYPE_NETFILTER, netfilter)
+BPF_LINK_TYPE(BPF_LINK_TYPE_TCX, tcx)
+BPF_LINK_TYPE(BPF_LINK_TYPE_NETKIT, netkit)
#endif
#ifdef CONFIG_PERF_EVENTS
BPF_LINK_TYPE(BPF_LINK_TYPE_PERF_EVENT, perf)
#endif
BPF_LINK_TYPE(BPF_LINK_TYPE_KPROBE_MULTI, kprobe_multi)
BPF_LINK_TYPE(BPF_LINK_TYPE_STRUCT_OPS, struct_ops)
+BPF_LINK_TYPE(BPF_LINK_TYPE_UPROBE_MULTI, uprobe_multi)
diff --git a/include/linux/cred.h b/include/linux/cred.h
index af8d353a4b86af..2976f534a7a32f 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -109,14 +109,7 @@ static inline int groups_search(const struct group_info *group_info, kgid_t grp)
* same context as task->real_cred.
*/
struct cred {
- atomic_t usage;
-#ifdef CONFIG_DEBUG_CREDENTIALS
- atomic_t subscribers; /* number of processes subscribed */
- void *put_addr;
- unsigned magic;
-#define CRED_MAGIC 0x43736564
-#define CRED_MAGIC_DEAD 0x44656144
-#endif
+ atomic_long_t usage;
kuid_t uid; /* real UID of the task */
kgid_t gid; /* real GID of the task */
kuid_t suid; /* saved UID of the task */
@@ -172,46 +165,6 @@ extern int cred_fscmp(const struct cred *, const struct cred *);
extern void __init cred_init(void);
extern int set_cred_ucounts(struct cred *);
-/*
- * check for validity of credentials
- */
-#ifdef CONFIG_DEBUG_CREDENTIALS
-extern void __noreturn __invalid_creds(const struct cred *, const char *, unsigned);
-extern void __validate_process_creds(struct task_struct *,
- const char *, unsigned);
-
-extern bool creds_are_invalid(const struct cred *cred);
-
-static inline void __validate_creds(const struct cred *cred,
- const char *file, unsigned line)
-{
- if (unlikely(creds_are_invalid(cred)))
- __invalid_creds(cred, file, line);
-}
-
-#define validate_creds(cred) \
-do { \
- __validate_creds((cred), __FILE__, __LINE__); \
-} while(0)
-
-#define validate_process_creds() \
-do { \
- __validate_process_creds(current, __FILE__, __LINE__); \
-} while(0)
-
-extern void validate_creds_for_do_exit(struct task_struct *);
-#else
-static inline void validate_creds(const struct cred *cred)
-{
-}
-static inline void validate_creds_for_do_exit(struct task_struct *tsk)
-{
-}
-static inline void validate_process_creds(void)
-{
-}
-#endif
-
static inline bool cap_ambient_invariant_ok(const struct cred *cred)
{
return cap_issubset(cred->cap_ambient,
@@ -229,7 +182,7 @@ static inline bool cap_ambient_invariant_ok(const struct cred *cred)
*/
static inline struct cred *get_new_cred_many(struct cred *cred, int nr)
{
- atomic_add(nr, &cred->usage);
+ atomic_long_add(nr, &cred->usage);
return cred;
}
@@ -264,7 +217,6 @@ static inline const struct cred *get_cred_many(const struct cred *cred, int nr)
struct cred *nonconst_cred = (struct cred *) cred;
if (!cred)
return cred;
- validate_creds(cred);
nonconst_cred->non_rcu = 0;
return get_new_cred_many(nonconst_cred, nr);
}
@@ -288,9 +240,8 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred)
struct cred *nonconst_cred = (struct cred *) cred;
if (!cred)
return NULL;
- if (!atomic_inc_not_zero(&nonconst_cred->usage))
+ if (!atomic_long_inc_not_zero(&nonconst_cred->usage))
return NULL;
- validate_creds(cred);
nonconst_cred->non_rcu = 0;
return cred;
}
@@ -312,8 +263,7 @@ static inline void put_cred_many(const struct cred *_cred, int nr)
struct cred *cred = (struct cred *) _cred;
if (cred) {
- validate_creds(cred);
- if (atomic_sub_and_test(nr, &cred->usage))
+ if (atomic_long_sub_and_test(nr, &cred->usage))
__put_cred(cred);
}
}
diff --git a/include/linux/damon.h b/include/linux/damon.h
index ab2f17d9926b59..e00ddf1ed39c05 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -559,6 +559,8 @@ struct damon_ctx {
* update
*/
unsigned long next_ops_update_sis;
+ /* for waiting until the execution of the kdamond_fn is started */
+ struct completion kdamond_started;
/* public: */
struct task_struct *kdamond;
diff --git a/include/linux/device.h b/include/linux/device.h
index d7a72a8749ea03..6c83294395ac08 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -1007,6 +1007,8 @@ static inline void device_unlock(struct device *dev)
mutex_unlock(&dev->mutex);
}
+DEFINE_GUARD(device, struct device *, device_lock(_T), device_unlock(_T))
+
static inline void device_lock_assert(struct device *dev)
{
lockdep_assert_held(&dev->mutex);
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
index 6730ee900ee1c9..13b1e65fbdccb7 100644
--- a/include/linux/hid-sensor-ids.h
+++ b/include/linux/hid-sensor-ids.h
@@ -21,10 +21,6 @@
#define HID_USAGE_SENSOR_ALS 0x200041
#define HID_USAGE_SENSOR_DATA_LIGHT 0x2004d0
#define HID_USAGE_SENSOR_LIGHT_ILLUM 0x2004d1
-#define HID_USAGE_SENSOR_LIGHT_COLOR_TEMPERATURE 0x2004d2
-#define HID_USAGE_SENSOR_LIGHT_CHROMATICITY 0x2004d3
-#define HID_USAGE_SENSOR_LIGHT_CHROMATICITY_X 0x2004d4
-#define HID_USAGE_SENSOR_LIGHT_CHROMATICITY_Y 0x2004d5
/* PROX (200011) */
#define HID_USAGE_SENSOR_PROX 0x200011
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index c2ac9e9e7ee9a8..2b5e500bf0930f 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -4447,7 +4447,8 @@ ieee80211_is_protected_dual_of_public_action(struct sk_buff *skb)
action != WLAN_PUB_ACTION_LOC_TRACK_NOTI &&
action != WLAN_PUB_ACTION_FTM_REQUEST &&
action != WLAN_PUB_ACTION_FTM_RESPONSE &&
- action != WLAN_PUB_ACTION_FILS_DISCOVERY;
+ action != WLAN_PUB_ACTION_FILS_DISCOVERY &&
+ action != WLAN_PUB_ACTION_VENDOR_SPECIFIC;
}
/**
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 805bb635cdf558..239a4f68801bb5 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -434,6 +434,7 @@ enum {
/* keep async read/write and isreg together and in order */
REQ_F_SUPPORT_NOWAIT_BIT,
REQ_F_ISREG_BIT,
+ REQ_F_POLL_NO_LAZY_BIT,
/* not a real bit, just to check we're not overflowing the space */
__REQ_F_LAST_BIT,
@@ -501,6 +502,8 @@ enum {
REQ_F_CLEAR_POLLIN = BIT(REQ_F_CLEAR_POLLIN_BIT),
/* hashed into ->cancel_hash_locked, protected by ->uring_lock */
REQ_F_HASH_LOCKED = BIT(REQ_F_HASH_LOCKED_BIT),
+ /* don't use lazy poll wake for this request */
+ REQ_F_POLL_NO_LAZY = BIT(REQ_F_POLL_NO_LAZY_BIT),
};
typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 7d985a1dfe4af9..5caf3ce823733a 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -73,6 +73,7 @@ struct key_type {
unsigned int flags;
#define KEY_TYPE_NET_DOMAIN 0x00000001 /* Keys of this type have a net namespace domain */
+#define KEY_TYPE_INSTANT_REAP 0x00000002 /* Keys of this type don't have a delay after expiring */
/* vet a description */
int (*vet_description)(const char *description);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 6f3631425f386d..3f7b664d625b94 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -621,7 +621,7 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_140[0x8];
u8 bth_dst_qp[0x18];
- u8 reserved_at_160[0x20];
+ u8 inner_esp_spi[0x20];
u8 outer_esp_spi[0x20];
u8 reserved_at_1a0[0x60];
};
@@ -12001,6 +12001,13 @@ enum {
MLX5_IPSEC_ASO_INC_SN = 0x2,
};
+enum {
+ MLX5_IPSEC_ASO_REPLAY_WIN_32BIT = 0x0,
+ MLX5_IPSEC_ASO_REPLAY_WIN_64BIT = 0x1,
+ MLX5_IPSEC_ASO_REPLAY_WIN_128BIT = 0x2,
+ MLX5_IPSEC_ASO_REPLAY_WIN_256BIT = 0x3,
+};
+
struct mlx5_ifc_ipsec_aso_bits {
u8 valid[0x1];
u8 reserved_at_201[0x1];
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 418d26608ece70..da5219b48d5229 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -886,8 +886,8 @@ static inline bool vma_is_anonymous(struct vm_area_struct *vma)
*/
static inline bool vma_is_initial_heap(const struct vm_area_struct *vma)
{
- return vma->vm_start <= vma->vm_mm->brk &&
- vma->vm_end >= vma->vm_mm->start_brk;
+ return vma->vm_start < vma->vm_mm->brk &&
+ vma->vm_end > vma->vm_mm->start_brk;
}
/*
@@ -901,8 +901,8 @@ static inline bool vma_is_initial_stack(const struct vm_area_struct *vma)
* its "stack". It's not even well-defined for programs written
* languages like Go.
*/
- return vma->vm_start <= vma->vm_mm->start_stack &&
- vma->vm_end >= vma->vm_mm->start_stack;
+ return vma->vm_start <= vma->vm_mm->start_stack &&
+ vma->vm_end >= vma->vm_mm->start_stack;
}
static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 9ae7def16cb2a9..f4fe593c1400e6 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -232,22 +232,27 @@ static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio,
if (folio_test_unevictable(folio) || !lrugen->enabled)
return false;
/*
- * There are three common cases for this page:
- * 1. If it's hot, e.g., freshly faulted in or previously hot and
- * migrated, add it to the youngest generation.
- * 2. If it's cold but can't be evicted immediately, i.e., an anon page
- * not in swapcache or a dirty page pending writeback, add it to the
- * second oldest generation.
- * 3. Everything else (clean, cold) is added to the oldest generation.
+ * There are four common cases for this page:
+ * 1. If it's hot, i.e., freshly faulted in, add it to the youngest
+ * generation, and it's protected over the rest below.
+ * 2. If it can't be evicted immediately, i.e., a dirty page pending
+ * writeback, add it to the second youngest generation.
+ * 3. If it should be evicted first, e.g., cold and clean from
+ * folio_rotate_reclaimable(), add it to the oldest generation.
+ * 4. Everything else falls between 2 & 3 above and is added to the
+ * second oldest generation if it's considered inactive, or the
+ * oldest generation otherwise. See lru_gen_is_active().
*/
if (folio_test_active(folio))
seq = lrugen->max_seq;
else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) ||
(folio_test_reclaim(folio) &&
(folio_test_dirty(folio) || folio_test_writeback(folio))))
- seq = lrugen->min_seq[type] + 1;
- else
+ seq = lrugen->max_seq - 1;
+ else if (reclaiming || lrugen->min_seq[type] + MIN_NR_GENS >= lrugen->max_seq)
seq = lrugen->min_seq[type];
+ else
+ seq = lrugen->min_seq[type] + 1;
gen = lru_gen_from_seq(seq);
flags = (gen + 1UL) << LRU_GEN_PGOFF;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 3c25226beeed47..9db36e1977125b 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -505,33 +505,37 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
* the old generation, is incremented when all its bins become empty.
*
* There are four operations:
- * 1. MEMCG_LRU_HEAD, which moves an memcg to the head of a random bin in its
+ * 1. MEMCG_LRU_HEAD, which moves a memcg to the head of a random bin in its
* current generation (old or young) and updates its "seg" to "head";
- * 2. MEMCG_LRU_TAIL, which moves an memcg to the tail of a random bin in its
+ * 2. MEMCG_LRU_TAIL, which moves a memcg to the tail of a random bin in its
* current generation (old or young) and updates its "seg" to "tail";
- * 3. MEMCG_LRU_OLD, which moves an memcg to the head of a random bin in the old
+ * 3. MEMCG_LRU_OLD, which moves a memcg to the head of a random bin in the old
* generation, updates its "gen" to "old" and resets its "seg" to "default";
- * 4. MEMCG_LRU_YOUNG, which moves an memcg to the tail of a random bin in the
+ * 4. MEMCG_LRU_YOUNG, which moves a memcg to the tail of a random bin in the
* young generation, updates its "gen" to "young" and resets its "seg" to
* "default".
*
* The events that trigger the above operations are:
* 1. Exceeding the soft limit, which triggers MEMCG_LRU_HEAD;
- * 2. The first attempt to reclaim an memcg below low, which triggers
+ * 2. The first attempt to reclaim a memcg below low, which triggers
* MEMCG_LRU_TAIL;
- * 3. The first attempt to reclaim an memcg below reclaimable size threshold,
- * which triggers MEMCG_LRU_TAIL;
- * 4. The second attempt to reclaim an memcg below reclaimable size threshold,
- * which triggers MEMCG_LRU_YOUNG;
- * 5. Attempting to reclaim an memcg below min, which triggers MEMCG_LRU_YOUNG;
+ * 3. The first attempt to reclaim a memcg offlined or below reclaimable size
+ * threshold, which triggers MEMCG_LRU_TAIL;
+ * 4. The second attempt to reclaim a memcg offlined or below reclaimable size
+ * threshold, which triggers MEMCG_LRU_YOUNG;
+ * 5. Attempting to reclaim a memcg below min, which triggers MEMCG_LRU_YOUNG;
* 6. Finishing the aging on the eviction path, which triggers MEMCG_LRU_YOUNG;
- * 7. Offlining an memcg, which triggers MEMCG_LRU_OLD.
+ * 7. Offlining a memcg, which triggers MEMCG_LRU_OLD.
*
- * Note that memcg LRU only applies to global reclaim, and the round-robin
- * incrementing of their max_seq counters ensures the eventual fairness to all
- * eligible memcgs. For memcg reclaim, it still relies on mem_cgroup_iter().
+ * Notes:
+ * 1. Memcg LRU only applies to global reclaim, and the round-robin incrementing
+ * of their max_seq counters ensures the eventual fairness to all eligible
+ * memcgs. For memcg reclaim, it still relies on mem_cgroup_iter().
+ * 2. There are only two valid generations: old (seq) and young (seq+1).
+ * MEMCG_NR_GENS is set to three so that when reading the generation counter
+ * locklessly, a stale value (seq-1) does not wraparound to young.
*/
-#define MEMCG_NR_GENS 2
+#define MEMCG_NR_GENS 3
#define MEMCG_NR_BINS 8
struct lru_gen_memcg {
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 60ca768bc8679f..dea043bc1e383a 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1829,6 +1829,7 @@ extern bool pcie_ports_native;
int pci_disable_link_state(struct pci_dev *pdev, int state);
int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
int pci_enable_link_state(struct pci_dev *pdev, int state);
+int pci_enable_link_state_locked(struct pci_dev *pdev, int state);
void pcie_no_aspm(void);
bool pcie_aspm_support_enabled(void);
bool pcie_aspm_enabled(struct pci_dev *pdev);
@@ -1839,6 +1840,8 @@ static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
{ return 0; }
static inline int pci_enable_link_state(struct pci_dev *pdev, int state)
{ return 0; }
+static inline int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
+{ return 0; }
static inline void pcie_no_aspm(void) { }
static inline bool pcie_aspm_support_enabled(void) { return false; }
static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 82da55101b5a30..61ebe723ee4d50 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -31,17 +31,22 @@ struct prefix_info {
__u8 length;
__u8 prefix_len;
+ union __packed {
+ __u8 flags;
+ struct __packed {
#if defined(__BIG_ENDIAN_BITFIELD)
- __u8 onlink : 1,
+ __u8 onlink : 1,
autoconf : 1,
reserved : 6;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
- __u8 reserved : 6,
+ __u8 reserved : 6,
autoconf : 1,
onlink : 1;
#else
#error "Please fix <asm/byteorder.h>"
#endif
+ };
+ };
__be32 valid;
__be32 prefered;
__be32 reserved2;
@@ -49,6 +54,9 @@ struct prefix_info {
struct in6_addr prefix;
};
+/* rfc4861 4.6.2: IPv6 PIO is 32 bytes in size */
+static_assert(sizeof(struct prefix_info) == 32);
+
#include <linux/ipv6.h>
#include <linux/netdevice.h>
#include <net/if_inet6.h>
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 20988623c5cc27..a3a1ea2696a804 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -189,6 +189,7 @@ struct blocked_key {
struct smp_csrk {
bdaddr_t bdaddr;
u8 bdaddr_type;
+ u8 link_type;
u8 type;
u8 val[16];
};
@@ -198,6 +199,7 @@ struct smp_ltk {
struct rcu_head rcu;
bdaddr_t bdaddr;
u8 bdaddr_type;
+ u8 link_type;
u8 authenticated;
u8 type;
u8 enc_size;
@@ -212,6 +214,7 @@ struct smp_irk {
bdaddr_t rpa;
bdaddr_t bdaddr;
u8 addr_type;
+ u8 link_type;
u8 val[16];
};
@@ -219,6 +222,8 @@ struct link_key {
struct list_head list;
struct rcu_head rcu;
bdaddr_t bdaddr;
+ u8 bdaddr_type;
+ u8 link_type;
u8 type;
u8 val[HCI_LINK_KEY_SIZE];
u8 pin_len;
@@ -1227,11 +1232,11 @@ static inline struct hci_conn *hci_conn_hash_lookup_cis(struct hci_dev *hdev,
continue;
/* Match CIG ID if set */
- if (cig != BT_ISO_QOS_CIG_UNSET && cig != c->iso_qos.ucast.cig)
+ if (cig != c->iso_qos.ucast.cig)
continue;
/* Match CIS ID if set */
- if (id != BT_ISO_QOS_CIS_UNSET && id != c->iso_qos.ucast.cis)
+ if (id != c->iso_qos.ucast.cis)
continue;
/* Match destination address if set */
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 3e454c4d7ba623..f07642264c1eb6 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -22,10 +22,6 @@
#define IF_RS_SENT 0x10
#define IF_READY 0x80000000
-/* prefix flags */
-#define IF_PREFIX_ONLINK 0x01
-#define IF_PREFIX_AUTOCONF 0x02
-
enum {
INET6_IFADDR_STATE_PREDAD,
INET6_IFADDR_STATE_DAD,
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 1ba9f4ddf2f6db..9ba6413fd2e3ea 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -179,9 +179,6 @@ struct fib6_info {
refcount_t fib6_ref;
unsigned long expires;
-
- struct hlist_node gc_link;
-
struct dst_metrics *fib6_metrics;
#define fib6_pmtu fib6_metrics->metrics[RTAX_MTU-1]
@@ -250,6 +247,19 @@ static inline bool fib6_requires_src(const struct fib6_info *rt)
return rt->fib6_src.plen > 0;
}
+static inline void fib6_clean_expires(struct fib6_info *f6i)
+{
+ f6i->fib6_flags &= ~RTF_EXPIRES;
+ f6i->expires = 0;
+}
+
+static inline void fib6_set_expires(struct fib6_info *f6i,
+ unsigned long expires)
+{
+ f6i->expires = expires;
+ f6i->fib6_flags |= RTF_EXPIRES;
+}
+
static inline bool fib6_check_expired(const struct fib6_info *f6i)
{
if (f6i->fib6_flags & RTF_EXPIRES)
@@ -257,11 +267,6 @@ static inline bool fib6_check_expired(const struct fib6_info *f6i)
return false;
}
-static inline bool fib6_has_expires(const struct fib6_info *f6i)
-{
- return f6i->fib6_flags & RTF_EXPIRES;
-}
-
/* Function to safely get fn->fn_sernum for passed in rt
* and store result in passed in cookie.
* Return true if we can get cookie safely
@@ -383,7 +388,6 @@ struct fib6_table {
struct inet_peer_base tb6_peers;
unsigned int flags;
unsigned int fib_seq;
- struct hlist_head tb6_gc_hlist; /* GC candidates */
#define RT6_TABLE_HAS_DFLT_ROUTER BIT(0)
};
@@ -500,48 +504,6 @@ void fib6_gc_cleanup(void);
int fib6_init(void);
-/* fib6_info must be locked by the caller, and fib6_info->fib6_table can be
- * NULL.
- */
-static inline void fib6_set_expires_locked(struct fib6_info *f6i,
- unsigned long expires)
-{
- struct fib6_table *tb6;
-
- tb6 = f6i->fib6_table;
- f6i->expires = expires;
- if (tb6 && !fib6_has_expires(f6i))
- hlist_add_head(&f6i->gc_link, &tb6->tb6_gc_hlist);
- f6i->fib6_flags |= RTF_EXPIRES;
-}
-
-/* fib6_info must be locked by the caller, and fib6_info->fib6_table can be
- * NULL. If fib6_table is NULL, the fib6_info will no be inserted into the
- * list of GC candidates until it is inserted into a table.
- */
-static inline void fib6_set_expires(struct fib6_info *f6i,
- unsigned long expires)
-{
- spin_lock_bh(&f6i->fib6_table->tb6_lock);
- fib6_set_expires_locked(f6i, expires);
- spin_unlock_bh(&f6i->fib6_table->tb6_lock);
-}
-
-static inline void fib6_clean_expires_locked(struct fib6_info *f6i)
-{
- if (fib6_has_expires(f6i))
- hlist_del_init(&f6i->gc_link);
- f6i->fib6_flags &= ~RTF_EXPIRES;
- f6i->expires = 0;
-}
-
-static inline void fib6_clean_expires(struct fib6_info *f6i)
-{
- spin_lock_bh(&f6i->fib6_table->tb6_lock);
- fib6_clean_expires_locked(f6i);
- spin_unlock_bh(&f6i->fib6_table->tb6_lock);
-}
-
struct ipv6_route_iter {
struct seq_net_private p;
struct fib6_walker w;
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index fe1507c1db828b..692d5955911c7d 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -62,6 +62,8 @@ struct nf_flowtable_type {
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule);
void (*free)(struct nf_flowtable *ft);
+ void (*get)(struct nf_flowtable *ft);
+ void (*put)(struct nf_flowtable *ft);
nf_hookfn *hook;
struct module *owner;
};
@@ -240,6 +242,11 @@ nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
}
list_add_tail(&block_cb->list, &block->cb_list);
+ up_write(&flow_table->flow_block_lock);
+
+ if (flow_table->type->get)
+ flow_table->type->get(flow_table);
+ return 0;
unlock:
up_write(&flow_table->flow_block_lock);
@@ -262,6 +269,9 @@ nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
WARN_ON(true);
}
up_write(&flow_table->flow_block_lock);
+
+ if (flow_table->type->put)
+ flow_table->type->put(flow_table);
}
void flow_offload_route_init(struct flow_offload *flow,
diff --git a/include/net/sock.h b/include/net/sock.h
index 1d6931caf0c3c1..0201136b0b9caf 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2799,6 +2799,11 @@ static inline bool sk_is_tcp(const struct sock *sk)
return sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP;
}
+static inline bool sk_is_stream_unix(const struct sock *sk)
+{
+ return sk->sk_family == AF_UNIX && sk->sk_type == SOCK_STREAM;
+}
+
/**
* sk_eat_skb - Release a skb if it is no longer needed
* @sk: socket to eat this skb from
diff --git a/include/trace/events/9p.h b/include/trace/events/9p.h
index 4dfa6d7f83baa9..cd104a1343e2d6 100644
--- a/include/trace/events/9p.h
+++ b/include/trace/events/9p.h
@@ -178,18 +178,21 @@ TRACE_EVENT(9p_protocol_dump,
__field( void *, clnt )
__field( __u8, type )
__field( __u16, tag )
- __array( unsigned char, line, P9_PROTO_DUMP_SZ )
+ __dynamic_array(unsigned char, line,
+ min_t(size_t, pdu->capacity, P9_PROTO_DUMP_SZ))
),
TP_fast_assign(
__entry->clnt = clnt;
__entry->type = pdu->id;
__entry->tag = pdu->tag;
- memcpy(__entry->line, pdu->sdata, P9_PROTO_DUMP_SZ);
+ memcpy(__get_dynamic_array(line), pdu->sdata,
+ __get_dynamic_array_len(line));
),
- TP_printk("clnt %lu %s(tag = %d)\n%.3x: %16ph\n%.3x: %16ph\n",
+ TP_printk("clnt %lu %s(tag = %d)\n%*ph\n",
(unsigned long)__entry->clnt, show_9p_op(__entry->type),
- __entry->tag, 0, __entry->line, 16, __entry->line + 16)
+ __entry->tag, __get_dynamic_array_len(line),
+ __get_dynamic_array(line))
);
diff --git a/io_uring/poll.c b/io_uring/poll.c
index d38d05edb4fa26..d59b74a99d4e4b 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -366,11 +366,16 @@ void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
static void __io_poll_execute(struct io_kiocb *req, int mask)
{
+ unsigned flags = 0;
+
io_req_set_res(req, mask, 0);
req->io_task_work.func = io_poll_task_func;
trace_io_uring_task_add(req, mask);
- __io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE);
+
+ if (!(req->flags & REQ_F_POLL_NO_LAZY))
+ flags = IOU_F_TWQ_LAZY_WAKE;
+ __io_req_task_work_add(req, flags);
}
static inline void io_poll_execute(struct io_kiocb *req, int res)
@@ -526,10 +531,19 @@ static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
poll->head = head;
poll->wait.private = (void *) wqe_private;
- if (poll->events & EPOLLEXCLUSIVE)
+ if (poll->events & EPOLLEXCLUSIVE) {
+ /*
+ * Exclusive waits may only wake a limited amount of entries
+ * rather than all of them, this may interfere with lazy
+ * wake if someone does wait(events > 1). Ensure we don't do
+ * lazy wake for those, as we need to process each one as they
+ * come in.
+ */
+ req->flags |= REQ_F_POLL_NO_LAZY;
add_wait_queue_exclusive(head, &poll->wait);
- else
+ } else {
add_wait_queue(head, &poll->wait);
+ }
}
static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index acbc2924ecd211..7d3ef62e620a58 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -7,7 +7,7 @@
#include <linux/nospec.h>
#include <uapi/linux/io_uring.h>
-#include <uapi/asm-generic/ioctls.h>
+#include <asm/ioctls.h>
#include "io_uring.h"
#include "rsrc.h"
diff --git a/kernel/Kconfig.kexec b/kernel/Kconfig.kexec
index 1cc3b1c595d7f9..2fd510256604e9 100644
--- a/kernel/Kconfig.kexec
+++ b/kernel/Kconfig.kexec
@@ -94,7 +94,6 @@ config KEXEC_JUMP
config CRASH_DUMP
bool "kernel crash dumps"
depends on ARCH_SUPPORTS_CRASH_DUMP
- depends on ARCH_SUPPORTS_KEXEC
select CRASH_CORE
select KEXEC_CORE
help
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index efe87d501c8c3c..d4313b53837e3d 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -199,7 +199,7 @@ static __initdata char *suffix_tbl[] = {
* It returns 0 on success and -EINVAL on failure.
*/
static int __init parse_crashkernel_suffix(char *cmdline,
- unsigned long long *crash_size,
+ unsigned long long *crash_size,
const char *suffix)
{
char *cur = cmdline;
@@ -268,9 +268,9 @@ static int __init __parse_crashkernel(char *cmdline,
unsigned long long *crash_base,
const char *suffix)
{
- char *first_colon, *first_space;
- char *ck_cmdline;
- char *name = "crashkernel=";
+ char *first_colon, *first_space;
+ char *ck_cmdline;
+ char *name = "crashkernel=";
BUG_ON(!crash_size || !crash_base);
*crash_size = 0;
@@ -440,7 +440,7 @@ retry:
return;
}
- if ((crash_base > CRASH_ADDR_LOW_MAX) &&
+ if ((crash_base >= CRASH_ADDR_LOW_MAX) &&
crash_low_size && reserve_crashkernel_low(crash_low_size)) {
memblock_phys_free(crash_base, crash_size);
return;
diff --git a/kernel/cred.c b/kernel/cred.c
index 3c714cb31660d9..c033a201c808e5 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -43,10 +43,6 @@ static struct group_info init_groups = { .usage = REFCOUNT_INIT(2) };
*/
struct cred init_cred = {
.usage = ATOMIC_INIT(4),
-#ifdef CONFIG_DEBUG_CREDENTIALS
- .subscribers = ATOMIC_INIT(2),
- .magic = CRED_MAGIC,
-#endif
.uid = GLOBAL_ROOT_UID,
.gid = GLOBAL_ROOT_GID,
.suid = GLOBAL_ROOT_UID,
@@ -66,31 +62,6 @@ struct cred init_cred = {
.ucounts = &init_ucounts,
};
-static inline void set_cred_subscribers(struct cred *cred, int n)
-{
-#ifdef CONFIG_DEBUG_CREDENTIALS
- atomic_set(&cred->subscribers, n);
-#endif
-}
-
-static inline int read_cred_subscribers(const struct cred *cred)
-{
-#ifdef CONFIG_DEBUG_CREDENTIALS
- return atomic_read(&cred->subscribers);
-#else
- return 0;
-#endif
-}
-
-static inline void alter_cred_subscribers(const struct cred *_cred, int n)
-{
-#ifdef CONFIG_DEBUG_CREDENTIALS
- struct cred *cred = (struct cred *) _cred;
-
- atomic_add(n, &cred->subscribers);
-#endif
-}
-
/*
* The RCU callback to actually dispose of a set of credentials
*/
@@ -100,20 +71,9 @@ static void put_cred_rcu(struct rcu_head *rcu)
kdebug("put_cred_rcu(%p)", cred);
-#ifdef CONFIG_DEBUG_CREDENTIALS
- if (cred->magic != CRED_MAGIC_DEAD ||
- atomic_read(&cred->usage) != 0 ||
- read_cred_subscribers(cred) != 0)
- panic("CRED: put_cred_rcu() sees %p with"
- " mag %x, put %p, usage %d, subscr %d\n",
- cred, cred->magic, cred->put_addr,
- atomic_read(&cred->usage),
- read_cred_subscribers(cred));
-#else
- if (atomic_read(&cred->usage) != 0)
- panic("CRED: put_cred_rcu() sees %p with usage %d\n",
- cred, atomic_read(&cred->usage));
-#endif
+ if (atomic_long_read(&cred->usage) != 0)
+ panic("CRED: put_cred_rcu() sees %p with usage %ld\n",
+ cred, atomic_long_read(&cred->usage));
security_cred_free(cred);
key_put(cred->session_keyring);
@@ -137,16 +97,10 @@ static void put_cred_rcu(struct rcu_head *rcu)
*/
void __put_cred(struct cred *cred)
{
- kdebug("__put_cred(%p{%d,%d})", cred,
- atomic_read(&cred->usage),
- read_cred_subscribers(cred));
-
- BUG_ON(atomic_read(&cred->usage) != 0);
-#ifdef CONFIG_DEBUG_CREDENTIALS
- BUG_ON(read_cred_subscribers(cred) != 0);
- cred->magic = CRED_MAGIC_DEAD;
- cred->put_addr = __builtin_return_address(0);
-#endif
+ kdebug("__put_cred(%p{%ld})", cred,
+ atomic_long_read(&cred->usage));
+
+ BUG_ON(atomic_long_read(&cred->usage) != 0);
BUG_ON(cred == current->cred);
BUG_ON(cred == current->real_cred);
@@ -164,9 +118,8 @@ void exit_creds(struct task_struct *tsk)
{
struct cred *real_cred, *cred;
- kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
- atomic_read(&tsk->cred->usage),
- read_cred_subscribers(tsk->cred));
+ kdebug("exit_creds(%u,%p,%p,{%ld})", tsk->pid, tsk->real_cred, tsk->cred,
+ atomic_long_read(&tsk->cred->usage));
real_cred = (struct cred *) tsk->real_cred;
tsk->real_cred = NULL;
@@ -174,15 +127,10 @@ void exit_creds(struct task_struct *tsk)
cred = (struct cred *) tsk->cred;
tsk->cred = NULL;
- validate_creds(cred);
if (real_cred == cred) {
- alter_cred_subscribers(cred, -2);
put_cred_many(cred, 2);
} else {
- validate_creds(real_cred);
- alter_cred_subscribers(real_cred, -1);
put_cred(real_cred);
- alter_cred_subscribers(cred, -1);
put_cred(cred);
}
@@ -230,10 +178,7 @@ struct cred *cred_alloc_blank(void)
if (!new)
return NULL;
- atomic_set(&new->usage, 1);
-#ifdef CONFIG_DEBUG_CREDENTIALS
- new->magic = CRED_MAGIC;
-#endif
+ atomic_long_set(&new->usage, 1);
if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0)
goto error;
@@ -264,8 +209,6 @@ struct cred *prepare_creds(void)
const struct cred *old;
struct cred *new;
- validate_process_creds();
-
new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
if (!new)
return NULL;
@@ -276,8 +219,7 @@ struct cred *prepare_creds(void)
memcpy(new, old, sizeof(struct cred));
new->non_rcu = 0;
- atomic_set(&new->usage, 1);
- set_cred_subscribers(new, 0);
+ atomic_long_set(&new->usage, 1);
get_group_info(new->group_info);
get_uid(new->user);
get_user_ns(new->user_ns);
@@ -300,7 +242,6 @@ struct cred *prepare_creds(void)
if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
goto error;
- validate_creds(new);
return new;
error:
@@ -362,10 +303,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
clone_flags & CLONE_THREAD
) {
p->real_cred = get_cred_many(p->cred, 2);
- alter_cred_subscribers(p->cred, 2);
- kdebug("share_creds(%p{%d,%d})",
- p->cred, atomic_read(&p->cred->usage),
- read_cred_subscribers(p->cred));
+ kdebug("share_creds(%p{%ld})",
+ p->cred, atomic_long_read(&p->cred->usage));
inc_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1);
return 0;
}
@@ -404,8 +343,6 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
p->cred = p->real_cred = get_cred(new);
inc_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1);
- alter_cred_subscribers(new, 2);
- validate_creds(new);
return 0;
error_put:
@@ -457,17 +394,11 @@ int commit_creds(struct cred *new)
struct task_struct *task = current;
const struct cred *old = task->real_cred;
- kdebug("commit_creds(%p{%d,%d})", new,
- atomic_read(&new->usage),
- read_cred_subscribers(new));
+ kdebug("commit_creds(%p{%ld})", new,
+ atomic_long_read(&new->usage));
BUG_ON(task->cred != old);
-#ifdef CONFIG_DEBUG_CREDENTIALS
- BUG_ON(read_cred_subscribers(old) < 2);
- validate_creds(old);
- validate_creds(new);
-#endif
- BUG_ON(atomic_read(&new->usage) < 1);
+ BUG_ON(atomic_long_read(&new->usage) < 1);
get_cred(new); /* we will require a ref for the subj creds too */
@@ -502,14 +433,12 @@ int commit_creds(struct cred *new)
* RLIMIT_NPROC limits on user->processes have already been checked
* in set_user().
*/
- alter_cred_subscribers(new, 2);
if (new->user != old->user || new->user_ns != old->user_ns)
inc_rlimit_ucounts(new->ucounts, UCOUNT_RLIMIT_NPROC, 1);
rcu_assign_pointer(task->real_cred, new);
rcu_assign_pointer(task->cred, new);
if (new->user != old->user || new->user_ns != old->user_ns)
dec_rlimit_ucounts(old->ucounts, UCOUNT_RLIMIT_NPROC, 1);
- alter_cred_subscribers(old, -2);
/* send notifications */
if (!uid_eq(new->uid, old->uid) ||
@@ -539,14 +468,10 @@ EXPORT_SYMBOL(commit_creds);
*/
void abort_creds(struct cred *new)
{
- kdebug("abort_creds(%p{%d,%d})", new,
- atomic_read(&new->usage),
- read_cred_subscribers(new));
+ kdebug("abort_creds(%p{%ld})", new,
+ atomic_long_read(&new->usage));
-#ifdef CONFIG_DEBUG_CREDENTIALS
- BUG_ON(read_cred_subscribers(new) != 0);
-#endif
- BUG_ON(atomic_read(&new->usage) < 1);
+ BUG_ON(atomic_long_read(&new->usage) < 1);
put_cred(new);
}
EXPORT_SYMBOL(abort_creds);
@@ -562,12 +487,8 @@ const struct cred *override_creds(const struct cred *new)
{
const struct cred *old = current->cred;
- kdebug("override_creds(%p{%d,%d})", new,
- atomic_read(&new->usage),
- read_cred_subscribers(new));
-
- validate_creds(old);
- validate_creds(new);
+ kdebug("override_creds(%p{%ld})", new,
+ atomic_long_read(&new->usage));
/*
* NOTE! This uses 'get_new_cred()' rather than 'get_cred()'.
@@ -576,18 +497,12 @@ const struct cred *override_creds(const struct cred *new)
* we are only installing the cred into the thread-synchronous
* '->cred' pointer, not the '->real_cred' pointer that is
* visible to other threads under RCU.
- *
- * Also note that we did validate_creds() manually, not depending
- * on the validation in 'get_cred()'.
*/
get_new_cred((struct cred *)new);
- alter_cred_subscribers(new, 1);
rcu_assign_pointer(current->cred, new);
- alter_cred_subscribers(old, -1);
- kdebug("override_creds() = %p{%d,%d}", old,
- atomic_read(&old->usage),
- read_cred_subscribers(old));
+ kdebug("override_creds() = %p{%ld}", old,
+ atomic_long_read(&old->usage));
return old;
}
EXPORT_SYMBOL(override_creds);
@@ -603,15 +518,10 @@ void revert_creds(const struct cred *old)
{
const struct cred *override = current->cred;
- kdebug("revert_creds(%p{%d,%d})", old,
- atomic_read(&old->usage),
- read_cred_subscribers(old));
+ kdebug("revert_creds(%p{%ld})", old,
+ atomic_long_read(&old->usage));
- validate_creds(old);
- validate_creds(override);
- alter_cred_subscribers(old, 1);
rcu_assign_pointer(current->cred, old);
- alter_cred_subscribers(override, -1);
put_cred(override);
}
EXPORT_SYMBOL(revert_creds);
@@ -731,12 +641,10 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
kdebug("prepare_kernel_cred() alloc %p", new);
old = get_task_cred(daemon);
- validate_creds(old);
*new = *old;
new->non_rcu = 0;
- atomic_set(&new->usage, 1);
- set_cred_subscribers(new, 0);
+ atomic_long_set(&new->usage, 1);
get_uid(new->user);
get_user_ns(new->user_ns);
get_group_info(new->group_info);
@@ -760,7 +668,6 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
goto error;
put_cred(old);
- validate_creds(new);
return new;
error:
@@ -825,109 +732,3 @@ int set_create_files_as(struct cred *new, struct inode *inode)
return security_kernel_create_files_as(new, inode);
}
EXPORT_SYMBOL(set_create_files_as);
-
-#ifdef CONFIG_DEBUG_CREDENTIALS
-
-bool creds_are_invalid(const struct cred *cred)
-{
- if (cred->magic != CRED_MAGIC)
- return true;
- return false;
-}
-EXPORT_SYMBOL(creds_are_invalid);
-
-/*
- * dump invalid credentials
- */
-static void dump_invalid_creds(const struct cred *cred, const char *label,
- const struct task_struct *tsk)
-{
- pr_err("%s credentials: %p %s%s%s\n",
- label, cred,
- cred == &init_cred ? "[init]" : "",
- cred == tsk->real_cred ? "[real]" : "",
- cred == tsk->cred ? "[eff]" : "");
- pr_err("->magic=%x, put_addr=%p\n",
- cred->magic, cred->put_addr);
- pr_err("->usage=%d, subscr=%d\n",
- atomic_read(&cred->usage),
- read_cred_subscribers(cred));
- pr_err("->*uid = { %d,%d,%d,%d }\n",
- from_kuid_munged(&init_user_ns, cred->uid),
- from_kuid_munged(&init_user_ns, cred->euid),
- from_kuid_munged(&init_user_ns, cred->suid),
- from_kuid_munged(&init_user_ns, cred->fsuid));
- pr_err("->*gid = { %d,%d,%d,%d }\n",
- from_kgid_munged(&init_user_ns, cred->gid),
- from_kgid_munged(&init_user_ns, cred->egid),
- from_kgid_munged(&init_user_ns, cred->sgid),
- from_kgid_munged(&init_user_ns, cred->fsgid));
-#ifdef CONFIG_SECURITY
- pr_err("->security is %p\n", cred->security);
- if ((unsigned long) cred->security >= PAGE_SIZE &&
- (((unsigned long) cred->security & 0xffffff00) !=
- (POISON_FREE << 24 | POISON_FREE << 16 | POISON_FREE << 8)))
- pr_err("->security {%x, %x}\n",
- ((u32*)cred->security)[0],
- ((u32*)cred->security)[1]);
-#endif
-}
-
-/*
- * report use of invalid credentials
- */
-void __noreturn __invalid_creds(const struct cred *cred, const char *file, unsigned line)
-{
- pr_err("Invalid credentials\n");
- pr_err("At %s:%u\n", file, line);
- dump_invalid_creds(cred, "Specified", current);
- BUG();
-}
-EXPORT_SYMBOL(__invalid_creds);
-
-/*
- * check the credentials on a process
- */
-void __validate_process_creds(struct task_struct *tsk,
- const char *file, unsigned line)
-{
- if (tsk->cred == tsk->real_cred) {
- if (unlikely(read_cred_subscribers(tsk->cred) < 2 ||
- creds_are_invalid(tsk->cred)))
- goto invalid_creds;
- } else {
- if (unlikely(read_cred_subscribers(tsk->real_cred) < 1 ||
- read_cred_subscribers(tsk->cred) < 1 ||
- creds_are_invalid(tsk->real_cred) ||
- creds_are_invalid(tsk->cred)))
- goto invalid_creds;
- }
- return;
-
-invalid_creds:
- pr_err("Invalid process credentials\n");
- pr_err("At %s:%u\n", file, line);
-
- dump_invalid_creds(tsk->real_cred, "Real", tsk);
- if (tsk->cred != tsk->real_cred)
- dump_invalid_creds(tsk->cred, "Effective", tsk);
- else
- pr_err("Effective creds == Real creds\n");
- BUG();
-}
-EXPORT_SYMBOL(__validate_process_creds);
-
-/*
- * check creds for do_exit()
- */
-void validate_creds_for_do_exit(struct task_struct *tsk)
-{
- kdebug("validate_creds_for_do_exit(%p,%p{%d,%d})",
- tsk->real_cred, tsk->cred,
- atomic_read(&tsk->cred->usage),
- read_cred_subscribers(tsk->cred));
-
- __validate_process_creds(tsk, __FILE__, __LINE__);
-}
-
-#endif /* CONFIG_DEBUG_CREDENTIALS */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index c9d123e13b5795..9efd0d7775e7ce 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1947,6 +1947,16 @@ static bool perf_event_validate_size(struct perf_event *event)
group_leader->nr_siblings + 1) > 16*1024)
return false;
+ /*
+ * When creating a new group leader, group_leader->ctx is initialized
+ * after the size has been validated, but we cannot safely use
+ * for_each_sibling_event() until group_leader->ctx is set. A new group
+ * leader cannot have any siblings yet, so we can safely skip checking
+ * the non-existent siblings.
+ */
+ if (event == group_leader)
+ return true;
+
for_each_sibling_event(sibling, group_leader) {
if (__perf_event_read_size(sibling->attr.read_format,
group_leader->nr_siblings + 1) > 16*1024)
diff --git a/kernel/exit.c b/kernel/exit.c
index ee9f43bed49a24..aedc0832c9f4de 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -824,8 +824,6 @@ void __noreturn do_exit(long code)
ptrace_event(PTRACE_EVENT_EXIT, code);
user_events_exit(tsk);
- validate_creds_for_do_exit(tsk);
-
io_uring_files_cancel();
exit_signals(tsk); /* sets PF_EXITING */
@@ -909,7 +907,6 @@ void __noreturn do_exit(long code)
if (tsk->task_frag.page)
put_page(tsk->task_frag.page);
- validate_creds_for_do_exit(tsk);
exit_task_stack_account(tsk);
check_stack_usage();
diff --git a/kernel/resource.c b/kernel/resource.c
index 866ef3663a0b65..91be1bc50b60e9 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -1844,8 +1844,8 @@ get_free_mem_region(struct device *dev, struct resource *base,
write_lock(&resource_lock);
for (addr = gfr_start(base, size, align, flags);
- gfr_continue(base, addr, size, flags);
- addr = gfr_next(addr, size, flags)) {
+ gfr_continue(base, addr, align, flags);
+ addr = gfr_next(addr, align, flags)) {
if (__region_intersects(base, addr, size, 0, IORES_DESC_NONE) !=
REGION_DISJOINT)
continue;
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index e1a6e3c675c0e8..9a846439b36a0f 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -201,6 +201,20 @@ COND_SYSCALL(recvmmsg_time32);
COND_SYSCALL_COMPAT(recvmmsg_time32);
COND_SYSCALL_COMPAT(recvmmsg_time64);
+/* Posix timer syscalls may be configured out */
+COND_SYSCALL(timer_create);
+COND_SYSCALL(timer_gettime);
+COND_SYSCALL(timer_getoverrun);
+COND_SYSCALL(timer_settime);
+COND_SYSCALL(timer_delete);
+COND_SYSCALL(clock_adjtime);
+COND_SYSCALL(getitimer);
+COND_SYSCALL(setitimer);
+COND_SYSCALL(alarm);
+COND_SYSCALL_COMPAT(timer_create);
+COND_SYSCALL_COMPAT(getitimer);
+COND_SYSCALL_COMPAT(setitimer);
+
/*
* Architecture specific syscalls: see further below
*/
diff --git a/kernel/time/posix-stubs.c b/kernel/time/posix-stubs.c
index 828aeecbd1e8a9..9b6fcb8d85e78d 100644
--- a/kernel/time/posix-stubs.c
+++ b/kernel/time/posix-stubs.c
@@ -17,40 +17,6 @@
#include <linux/time_namespace.h>
#include <linux/compat.h>
-#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
-/* Architectures may override SYS_NI and COMPAT_SYS_NI */
-#include <asm/syscall_wrapper.h>
-#endif
-
-asmlinkage long sys_ni_posix_timers(void)
-{
- pr_err_once("process %d (%s) attempted a POSIX timer syscall "
- "while CONFIG_POSIX_TIMERS is not set\n",
- current->pid, current->comm);
- return -ENOSYS;
-}
-
-#ifndef SYS_NI
-#define SYS_NI(name) SYSCALL_ALIAS(sys_##name, sys_ni_posix_timers)
-#endif
-
-#ifndef COMPAT_SYS_NI
-#define COMPAT_SYS_NI(name) SYSCALL_ALIAS(compat_sys_##name, sys_ni_posix_timers)
-#endif
-
-SYS_NI(timer_create);
-SYS_NI(timer_gettime);
-SYS_NI(timer_getoverrun);
-SYS_NI(timer_settime);
-SYS_NI(timer_delete);
-SYS_NI(clock_adjtime);
-SYS_NI(getitimer);
-SYS_NI(setitimer);
-SYS_NI(clock_adjtime32);
-#ifdef __ARCH_WANT_SYS_ALARM
-SYS_NI(alarm);
-#endif
-
/*
* We preserve minimal support for CLOCK_REALTIME and CLOCK_MONOTONIC
* as it is easy to remain compatible with little code. CLOCK_BOOTTIME
@@ -158,18 +124,7 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
which_clock);
}
-#ifdef CONFIG_COMPAT
-COMPAT_SYS_NI(timer_create);
-#endif
-
-#if defined(CONFIG_COMPAT) || defined(CONFIG_ALPHA)
-COMPAT_SYS_NI(getitimer);
-COMPAT_SYS_NI(setitimer);
-#endif
-
#ifdef CONFIG_COMPAT_32BIT_TIME
-SYS_NI(timer_settime32);
-SYS_NI(timer_gettime32);
SYSCALL_DEFINE2(clock_settime32, const clockid_t, which_clock,
struct old_timespec32 __user *, tp)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 8d2a4f00eca9f0..83eab547f1d1f2 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -644,8 +644,8 @@ static inline bool __rb_time_read(rb_time_t *t, u64 *ret, unsigned long *cnt)
*cnt = rb_time_cnt(top);
- /* If top and msb counts don't match, this interrupted a write */
- if (*cnt != rb_time_cnt(msb))
+ /* If top, msb or bottom counts don't match, this interrupted a write */
+ if (*cnt != rb_time_cnt(msb) || *cnt != rb_time_cnt(bottom))
return false;
/* The shift to msb will lose its cnt bits */
@@ -700,44 +700,6 @@ rb_time_read_cmpxchg(local_t *l, unsigned long expect, unsigned long set)
return local_try_cmpxchg(l, &expect, set);
}
-static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
-{
- unsigned long cnt, top, bottom, msb;
- unsigned long cnt2, top2, bottom2, msb2;
- u64 val;
-
- /* The cmpxchg always fails if it interrupted an update */
- if (!__rb_time_read(t, &val, &cnt2))
- return false;
-
- if (val != expect)
- return false;
-
- cnt = local_read(&t->cnt);
- if ((cnt & 3) != cnt2)
- return false;
-
- cnt2 = cnt + 1;
-
- rb_time_split(val, &top, &bottom, &msb);
- top = rb_time_val_cnt(top, cnt);
- bottom = rb_time_val_cnt(bottom, cnt);
-
- rb_time_split(set, &top2, &bottom2, &msb2);
- top2 = rb_time_val_cnt(top2, cnt2);
- bottom2 = rb_time_val_cnt(bottom2, cnt2);
-
- if (!rb_time_read_cmpxchg(&t->cnt, cnt, cnt2))
- return false;
- if (!rb_time_read_cmpxchg(&t->msb, msb, msb2))
- return false;
- if (!rb_time_read_cmpxchg(&t->top, top, top2))
- return false;
- if (!rb_time_read_cmpxchg(&t->bottom, bottom, bottom2))
- return false;
- return true;
-}
-
#else /* 64 bits */
/* local64_t always succeeds */
@@ -751,11 +713,6 @@ static void rb_time_set(rb_time_t *t, u64 val)
{
local64_set(&t->time, val);
}
-
-static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
-{
- return local64_try_cmpxchg(&t->time, &expect, set);
-}
#endif
/*
@@ -1787,6 +1744,8 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
free_buffer_page(bpage);
}
+ free_page((unsigned long)cpu_buffer->free_page);
+
kfree(cpu_buffer);
}
@@ -2407,7 +2366,7 @@ rb_iter_head_event(struct ring_buffer_iter *iter)
*/
barrier();
- if ((iter->head + length) > commit || length > BUF_MAX_DATA_SIZE)
+ if ((iter->head + length) > commit || length > BUF_PAGE_SIZE)
/* Writer corrupted the read? */
goto reset;
@@ -2981,25 +2940,6 @@ static unsigned rb_calculate_event_length(unsigned length)
return length;
}
-static u64 rb_time_delta(struct ring_buffer_event *event)
-{
- switch (event->type_len) {
- case RINGBUF_TYPE_PADDING:
- return 0;
-
- case RINGBUF_TYPE_TIME_EXTEND:
- return rb_event_time_stamp(event);
-
- case RINGBUF_TYPE_TIME_STAMP:
- return 0;
-
- case RINGBUF_TYPE_DATA:
- return event->time_delta;
- default:
- return 0;
- }
-}
-
static inline bool
rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_event *event)
@@ -3007,8 +2947,6 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
unsigned long new_index, old_index;
struct buffer_page *bpage;
unsigned long addr;
- u64 write_stamp;
- u64 delta;
new_index = rb_event_index(event);
old_index = new_index + rb_event_ts_length(event);
@@ -3017,14 +2955,10 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
bpage = READ_ONCE(cpu_buffer->tail_page);
- delta = rb_time_delta(event);
-
- if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp))
- return false;
-
- /* Make sure the write stamp is read before testing the location */
- barrier();
-
+ /*
+ * Make sure the tail_page is still the same and
+ * the next write location is the end of this event
+ */
if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
unsigned long write_mask =
local_read(&bpage->write) & ~RB_WRITE_MASK;
@@ -3035,20 +2969,20 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
* to make sure that the next event adds an absolute
* value and does not rely on the saved write stamp, which
* is now going to be bogus.
+ *
+ * By setting the before_stamp to zero, the next event
+ * is not going to use the write_stamp and will instead
+ * create an absolute timestamp. This means there's no
+ * reason to update the wirte_stamp!
*/
rb_time_set(&cpu_buffer->before_stamp, 0);
- /* Something came in, can't discard */
- if (!rb_time_cmpxchg(&cpu_buffer->write_stamp,
- write_stamp, write_stamp - delta))
- return false;
-
/*
* If an event were to come in now, it would see that the
* write_stamp and the before_stamp are different, and assume
* that this event just added itself before updating
* the write stamp. The interrupting event will fix the
- * write stamp for us, and use the before stamp as its delta.
+ * write stamp for us, and use an absolute timestamp.
*/
/*
@@ -3485,7 +3419,7 @@ static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
return;
/*
- * If this interrupted another event,
+ * If this interrupted another event,
*/
if (atomic_inc_return(this_cpu_ptr(&checking)) != 1)
goto out;
@@ -3579,7 +3513,10 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
* absolute timestamp.
* Don't bother if this is the start of a new page (w == 0).
*/
- if (unlikely(!a_ok || !b_ok || (info->before != info->after && w))) {
+ if (!w) {
+ /* Use the sub-buffer timestamp */
+ info->delta = 0;
+ } else if (unlikely(!a_ok || !b_ok || info->before != info->after)) {
info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND;
info->length += RB_LEN_TIME_EXTEND;
} else {
@@ -3602,26 +3539,19 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
/* See if we shot pass the end of this buffer page */
if (unlikely(write > BUF_PAGE_SIZE)) {
- /* before and after may now different, fix it up*/
- b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
- a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
- if (a_ok && b_ok && info->before != info->after)
- (void)rb_time_cmpxchg(&cpu_buffer->before_stamp,
- info->before, info->after);
- if (a_ok && b_ok)
- check_buffer(cpu_buffer, info, CHECK_FULL_PAGE);
+ check_buffer(cpu_buffer, info, CHECK_FULL_PAGE);
return rb_move_tail(cpu_buffer, tail, info);
}
if (likely(tail == w)) {
- u64 save_before;
- bool s_ok;
-
/* Nothing interrupted us between A and C */
/*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts);
- barrier();
- /*E*/ s_ok = rb_time_read(&cpu_buffer->before_stamp, &save_before);
- RB_WARN_ON(cpu_buffer, !s_ok);
+ /*
+ * If something came in between C and D, the write stamp
+ * may now not be in sync. But that's fine as the before_stamp
+ * will be different and then next event will just be forced
+ * to use an absolute timestamp.
+ */
if (likely(!(info->add_timestamp &
(RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
/* This did not interrupt any time update */
@@ -3629,41 +3559,40 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
else
/* Just use full timestamp for interrupting event */
info->delta = info->ts;
- barrier();
check_buffer(cpu_buffer, info, tail);
- if (unlikely(info->ts != save_before)) {
- /* SLOW PATH - Interrupted between C and E */
-
- a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
- RB_WARN_ON(cpu_buffer, !a_ok);
-
- /* Write stamp must only go forward */
- if (save_before > info->after) {
- /*
- * We do not care about the result, only that
- * it gets updated atomically.
- */
- (void)rb_time_cmpxchg(&cpu_buffer->write_stamp,
- info->after, save_before);
- }
- }
} else {
u64 ts;
/* SLOW PATH - Interrupted between A and C */
- a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
- /* Was interrupted before here, write_stamp must be valid */
+
+ /* Save the old before_stamp */
+ a_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
RB_WARN_ON(cpu_buffer, !a_ok);
+
+ /*
+ * Read a new timestamp and update the before_stamp to make
+ * the next event after this one force using an absolute
+ * timestamp. This is in case an interrupt were to come in
+ * between E and F.
+ */
ts = rb_time_stamp(cpu_buffer->buffer);
+ rb_time_set(&cpu_buffer->before_stamp, ts);
+
barrier();
- /*E*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) &&
- info->after < ts &&
- rb_time_cmpxchg(&cpu_buffer->write_stamp,
- info->after, ts)) {
- /* Nothing came after this event between C and E */
+ /*E*/ a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
+ /* Was interrupted before here, write_stamp must be valid */
+ RB_WARN_ON(cpu_buffer, !a_ok);
+ barrier();
+ /*F*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) &&
+ info->after == info->before && info->after < ts) {
+ /*
+ * Nothing came after this event between C and F, it is
+ * safe to use info->after for the delta as it
+ * matched info->before and is still valid.
+ */
info->delta = ts - info->after;
} else {
/*
- * Interrupted between C and E:
+ * Interrupted between C and F:
* Lost the previous events time stamp. Just set the
* delta to zero, and this will be the same time as
* the event this event interrupted. And the events that
@@ -3714,6 +3643,12 @@ rb_reserve_next_event(struct trace_buffer *buffer,
int nr_loops = 0;
int add_ts_default;
+ /* ring buffer does cmpxchg, make sure it is safe in NMI context */
+ if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) &&
+ (unlikely(in_nmi()))) {
+ return NULL;
+ }
+
rb_start_commit(cpu_buffer);
/* The commit page can not change after this */
@@ -3737,6 +3672,8 @@ rb_reserve_next_event(struct trace_buffer *buffer,
if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) {
add_ts_default = RB_ADD_STAMP_ABSOLUTE;
info.length += RB_LEN_TIME_EXTEND;
+ if (info.length > BUF_MAX_DATA_SIZE)
+ goto out_fail;
} else {
add_ts_default = RB_ADD_STAMP_NONE;
}
@@ -5118,7 +5055,8 @@ ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
if (!iter)
return NULL;
- iter->event = kmalloc(BUF_MAX_DATA_SIZE, flags);
+ /* Holds the entire event: data and meta data */
+ iter->event = kmalloc(BUF_PAGE_SIZE, flags);
if (!iter->event) {
kfree(iter);
return NULL;
diff --git a/kernel/trace/synth_event_gen_test.c b/kernel/trace/synth_event_gen_test.c
index 8dfe85499d4a2b..354c2117be43f7 100644
--- a/kernel/trace/synth_event_gen_test.c
+++ b/kernel/trace/synth_event_gen_test.c
@@ -477,6 +477,17 @@ static int __init synth_event_gen_test_init(void)
ret = test_trace_synth_event();
WARN_ON(ret);
+
+ /* Disable when done */
+ trace_array_set_clr_event(gen_synth_test->tr,
+ "synthetic",
+ "gen_synth_test", false);
+ trace_array_set_clr_event(empty_synth_test->tr,
+ "synthetic",
+ "empty_synth_test", false);
+ trace_array_set_clr_event(create_synth_test->tr,
+ "synthetic",
+ "create_synth_test", false);
out:
return ret;
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index fbcd3bafb93e2c..199df497db07eb 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -4722,7 +4722,11 @@ static int s_show(struct seq_file *m, void *v)
iter->leftover = ret;
} else {
- print_trace_line(iter);
+ ret = print_trace_line(iter);
+ if (ret == TRACE_TYPE_PARTIAL_LINE) {
+ iter->seq.full = 0;
+ trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
+ }
ret = trace_print_seq(m, &iter->seq);
/*
* If we overflow the seq_file buffer, then it will
@@ -4964,6 +4968,12 @@ int tracing_release_file_tr(struct inode *inode, struct file *filp)
return 0;
}
+int tracing_single_release_file_tr(struct inode *inode, struct file *filp)
+{
+ tracing_release_file_tr(inode, filp);
+ return single_release(inode, filp);
+}
+
static int tracing_mark_open(struct inode *inode, struct file *filp)
{
stream_open(inode, filp);
@@ -6344,7 +6354,7 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
if (!tr->array_buffer.buffer)
return 0;
- /* Do not allow tracing while resizng ring buffer */
+ /* Do not allow tracing while resizing ring buffer */
tracing_stop_tr(tr);
ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
@@ -6352,7 +6362,7 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
goto out_start;
#ifdef CONFIG_TRACER_MAX_TRACE
- if (!tr->current_trace->use_max_tr)
+ if (!tr->allocated_snapshot)
goto out;
ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index b7f4ea25a19431..0489e72c8169c1 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -617,6 +617,7 @@ int tracing_open_generic(struct inode *inode, struct file *filp);
int tracing_open_generic_tr(struct inode *inode, struct file *filp);
int tracing_open_file_tr(struct inode *inode, struct file *filp);
int tracing_release_file_tr(struct inode *inode, struct file *filp);
+int tracing_single_release_file_tr(struct inode *inode, struct file *filp);
bool tracing_is_disabled(void);
bool tracer_tracing_is_on(struct trace_array *tr);
void tracer_tracing_on(struct trace_array *tr);
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 1abc07fba1b91d..5ecf3c8bde205f 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -5623,10 +5623,12 @@ static int event_hist_open(struct inode *inode, struct file *file)
{
int ret;
- ret = security_locked_down(LOCKDOWN_TRACEFS);
+ ret = tracing_open_file_tr(inode, file);
if (ret)
return ret;
+ /* Clear private_data to avoid warning in single_open() */
+ file->private_data = NULL;
return single_open(file, hist_show, file);
}
@@ -5634,7 +5636,7 @@ const struct file_operations event_hist_fops = {
.open = event_hist_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = single_release,
+ .release = tracing_single_release_file_tr,
};
#ifdef CONFIG_HIST_TRIGGERS_DEBUG
@@ -5900,10 +5902,12 @@ static int event_hist_debug_open(struct inode *inode, struct file *file)
{
int ret;
- ret = security_locked_down(LOCKDOWN_TRACEFS);
+ ret = tracing_open_file_tr(inode, file);
if (ret)
return ret;
+ /* Clear private_data to avoid warning in single_open() */
+ file->private_data = NULL;
return single_open(file, hist_debug_show, file);
}
@@ -5911,7 +5915,7 @@ const struct file_operations event_hist_debug_fops = {
.open = event_hist_debug_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = single_release,
+ .release = tracing_single_release_file_tr,
};
#endif
diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
index 846e02c0fb59a2..e7af286af4f1ad 100644
--- a/kernel/trace/trace_events_synth.c
+++ b/kernel/trace/trace_events_synth.c
@@ -1137,7 +1137,7 @@ EXPORT_SYMBOL_GPL(synth_event_add_fields);
* @cmd: A pointer to the dynevent_cmd struct representing the new event
* @name: The name of the synthetic event
* @mod: The module creating the event, NULL if not created from a module
- * @args: Variable number of arg (pairs), one pair for each field
+ * @...: Variable number of arg (pairs), one pair for each field
*
* NOTE: Users normally won't want to call this function directly, but
* rather use the synth_event_gen_cmd_start() wrapper, which
@@ -1695,7 +1695,7 @@ __synth_event_trace_end(struct synth_event_trace_state *trace_state)
* synth_event_trace - Trace a synthetic event
* @file: The trace_event_file representing the synthetic event
* @n_vals: The number of values in vals
- * @args: Variable number of args containing the event values
+ * @...: Variable number of args containing the event values
*
* Trace a synthetic event using the values passed in the variable
* argument list.
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index d8b302d0108302..3e7fa44dc2b248 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -1587,11 +1587,12 @@ static enum print_line_t trace_print_print(struct trace_iterator *iter,
{
struct print_entry *field;
struct trace_seq *s = &iter->seq;
+ int max = iter->ent_size - offsetof(struct print_entry, buf);
trace_assign_type(field, iter->ent);
seq_print_ip_sym(s, field->ip, flags);
- trace_seq_printf(s, ": %s", field->buf);
+ trace_seq_printf(s, ": %.*s", max, field->buf);
return trace_handle_return(s);
}
@@ -1600,10 +1601,11 @@ static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags,
struct trace_event *event)
{
struct print_entry *field;
+ int max = iter->ent_size - offsetof(struct print_entry, buf);
trace_assign_type(field, iter->ent);
- trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf);
+ trace_seq_printf(&iter->seq, "# %lx %.*s", field->ip, max, field->buf);
return trace_handle_return(&iter->seq);
}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index cc7d53d9dc0191..4405f81248fbc7 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1739,21 +1739,6 @@ config DEBUG_MAPLE_TREE
endmenu
-config DEBUG_CREDENTIALS
- bool "Debug credential management"
- depends on DEBUG_KERNEL
- help
- Enable this to turn on some debug checking for credential
- management. The additional code keeps track of the number of
- pointers from task_structs to any given cred struct, and checks to
- see that this number never exceeds the usage count of the cred
- struct.
-
- Furthermore, if SELinux is enabled, this also checks that the
- security pointer in the cred struct is never seen to be invalid.
-
- If unsure, say N.
-
source "kernel/rcu/Kconfig.debug"
config DEBUG_WQ_FORCE_RR_CPU
diff --git a/lib/idr.c b/lib/idr.c
index 13f2758c237735..da36054c3ca020 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -508,7 +508,7 @@ void ida_free(struct ida *ida, unsigned int id)
goto delete;
xas_store(&xas, xa_mk_value(v));
} else {
- if (!test_bit(bit, bitmap->bitmap))
+ if (!bitmap || !test_bit(bit, bitmap->bitmap))
goto err;
__clear_bit(bit, bitmap->bitmap);
xas_set_mark(&xas, XA_FREE_MARK);
diff --git a/lib/test_ida.c b/lib/test_ida.c
index b0688062596150..55105baa19da9a 100644
--- a/lib/test_ida.c
+++ b/lib/test_ida.c
@@ -150,6 +150,45 @@ static void ida_check_conv(struct ida *ida)
IDA_BUG_ON(ida, !ida_is_empty(ida));
}
+/*
+ * Check various situations where we attempt to free an ID we don't own.
+ */
+static void ida_check_bad_free(struct ida *ida)
+{
+ unsigned long i;
+
+ printk("vvv Ignore \"not allocated\" warnings\n");
+ /* IDA is empty; all of these will fail */
+ ida_free(ida, 0);
+ for (i = 0; i < 31; i++)
+ ida_free(ida, 1 << i);
+
+ /* IDA contains a single value entry */
+ IDA_BUG_ON(ida, ida_alloc_min(ida, 3, GFP_KERNEL) != 3);
+ ida_free(ida, 0);
+ for (i = 0; i < 31; i++)
+ ida_free(ida, 1 << i);
+
+ /* IDA contains a single bitmap */
+ IDA_BUG_ON(ida, ida_alloc_min(ida, 1023, GFP_KERNEL) != 1023);
+ ida_free(ida, 0);
+ for (i = 0; i < 31; i++)
+ ida_free(ida, 1 << i);
+
+ /* IDA contains a tree */
+ IDA_BUG_ON(ida, ida_alloc_min(ida, (1 << 20) - 1, GFP_KERNEL) != (1 << 20) - 1);
+ ida_free(ida, 0);
+ for (i = 0; i < 31; i++)
+ ida_free(ida, 1 << i);
+ printk("^^^ \"not allocated\" warnings over\n");
+
+ ida_free(ida, 3);
+ ida_free(ida, 1023);
+ ida_free(ida, (1 << 20) - 1);
+
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+}
+
static DEFINE_IDA(ida);
static int ida_checks(void)
@@ -162,6 +201,7 @@ static int ida_checks(void)
ida_check_leaf(&ida, 1024 * 64);
ida_check_max(&ida);
ida_check_conv(&ida);
+ ida_check_bad_free(&ida);
printk("IDA: %u of %u tests passed\n", tests_passed, tests_run);
return (tests_run != tests_passed) ? 0 : -EINVAL;
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 3e3733a7084fb7..552738f14275a7 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -2111,15 +2111,20 @@ char *fwnode_full_name_string(struct fwnode_handle *fwnode, char *buf,
/* Loop starting from the root node to the current node. */
for (depth = fwnode_count_parents(fwnode); depth >= 0; depth--) {
- struct fwnode_handle *__fwnode =
- fwnode_get_nth_parent(fwnode, depth);
+ /*
+ * Only get a reference for other nodes (i.e. parent nodes).
+ * fwnode refcount may be 0 here.
+ */
+ struct fwnode_handle *__fwnode = depth ?
+ fwnode_get_nth_parent(fwnode, depth) : fwnode;
buf = string(buf, end, fwnode_get_name_prefix(__fwnode),
default_str_spec);
buf = string(buf, end, fwnode_get_name(__fwnode),
default_str_spec);
- fwnode_handle_put(__fwnode);
+ if (depth)
+ fwnode_handle_put(__fwnode);
}
return buf;
diff --git a/mm/damon/core.c b/mm/damon/core.c
index ce1562783e7e5d..3a05e71509b9db 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -445,6 +445,8 @@ struct damon_ctx *damon_new_ctx(void)
if (!ctx)
return NULL;
+ init_completion(&ctx->kdamond_started);
+
ctx->attrs.sample_interval = 5 * 1000;
ctx->attrs.aggr_interval = 100 * 1000;
ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
@@ -668,11 +670,14 @@ static int __damon_start(struct damon_ctx *ctx)
mutex_lock(&ctx->kdamond_lock);
if (!ctx->kdamond) {
err = 0;
+ reinit_completion(&ctx->kdamond_started);
ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
nr_running_ctxs);
if (IS_ERR(ctx->kdamond)) {
err = PTR_ERR(ctx->kdamond);
ctx->kdamond = NULL;
+ } else {
+ wait_for_completion(&ctx->kdamond_started);
}
}
mutex_unlock(&ctx->kdamond_lock);
@@ -1433,6 +1438,7 @@ static int kdamond_fn(void *data)
pr_debug("kdamond (%d) starts\n", current->pid);
+ complete(&ctx->kdamond_started);
kdamond_init_intervals_sis(ctx);
if (ctx->ops.init)
diff --git a/mm/shmem.c b/mm/shmem.c
index 91e2620148b2f6..0d1ce70bce3802 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1080,7 +1080,24 @@ whole_folios:
}
VM_BUG_ON_FOLIO(folio_test_writeback(folio),
folio);
- truncate_inode_folio(mapping, folio);
+
+ if (!folio_test_large(folio)) {
+ truncate_inode_folio(mapping, folio);
+ } else if (truncate_inode_partial_folio(folio, lstart, lend)) {
+ /*
+ * If we split a page, reset the loop so
+ * that we pick up the new sub pages.
+ * Otherwise the THP was entirely
+ * dropped or the target range was
+ * zeroed, so just continue the loop as
+ * is.
+ */
+ if (!folio_test_large(folio)) {
+ folio_unlock(folio);
+ index = start;
+ break;
+ }
+ }
}
folio_unlock(folio);
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 506f8220c5fe53..9dd8977de5a223 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4089,6 +4089,9 @@ static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
else
VM_WARN_ON_ONCE(true);
+ WRITE_ONCE(lruvec->lrugen.seg, seg);
+ WRITE_ONCE(lruvec->lrugen.gen, new);
+
hlist_nulls_del_rcu(&lruvec->lrugen.list);
if (op == MEMCG_LRU_HEAD || op == MEMCG_LRU_OLD)
@@ -4099,9 +4102,6 @@ static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
pgdat->memcg_lru.nr_memcgs[old]--;
pgdat->memcg_lru.nr_memcgs[new]++;
- lruvec->lrugen.gen = new;
- WRITE_ONCE(lruvec->lrugen.seg, seg);
-
if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq))
WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1);
@@ -4124,11 +4124,11 @@ void lru_gen_online_memcg(struct mem_cgroup *memcg)
gen = get_memcg_gen(pgdat->memcg_lru.seq);
+ lruvec->lrugen.gen = gen;
+
hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[gen][bin]);
pgdat->memcg_lru.nr_memcgs[gen]++;
- lruvec->lrugen.gen = gen;
-
spin_unlock_irq(&pgdat->memcg_lru.lock);
}
}
@@ -4232,7 +4232,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
}
/* protected */
- if (tier > tier_idx) {
+ if (tier > tier_idx || refs == BIT(LRU_REFS_WIDTH)) {
int hist = lru_hist_from_seq(lrugen->min_seq[type]);
gen = folio_inc_gen(lruvec, folio, false);
@@ -4598,7 +4598,12 @@ static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
}
/* try to scrape all its memory if this memcg was deleted */
- *nr_to_scan = mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
+ if (!mem_cgroup_online(memcg)) {
+ *nr_to_scan = total;
+ return false;
+ }
+
+ *nr_to_scan = total >> sc->priority;
/*
* The aging tries to be lazy to reduce the overhead, while the eviction
@@ -4635,7 +4640,7 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, bool
DEFINE_MAX_SEQ(lruvec);
if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg))
- return 0;
+ return -1;
if (!should_run_aging(lruvec, max_seq, sc, can_swap, &nr_to_scan))
return nr_to_scan;
@@ -4648,20 +4653,41 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, bool
return try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, false) ? -1 : 0;
}
-static unsigned long get_nr_to_reclaim(struct scan_control *sc)
+static bool should_abort_scan(struct lruvec *lruvec, struct scan_control *sc)
{
+ int i;
+ enum zone_watermarks mark;
+
/* don't abort memcg reclaim to ensure fairness */
if (!root_reclaim(sc))
- return -1;
+ return false;
+
+ if (sc->nr_reclaimed >= max(sc->nr_to_reclaim, compact_gap(sc->order)))
+ return true;
+
+ /* check the order to exclude compaction-induced reclaim */
+ if (!current_is_kswapd() || sc->order)
+ return false;
+
+ mark = sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING ?
+ WMARK_PROMO : WMARK_HIGH;
+
+ for (i = 0; i <= sc->reclaim_idx; i++) {
+ struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i;
+ unsigned long size = wmark_pages(zone, mark) + MIN_LRU_BATCH;
+
+ if (managed_zone(zone) && !zone_watermark_ok(zone, 0, size, sc->reclaim_idx, 0))
+ return false;
+ }
- return max(sc->nr_to_reclaim, compact_gap(sc->order));
+ /* kswapd should abort if all eligible zones are safe */
+ return true;
}
static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
{
long nr_to_scan;
unsigned long scanned = 0;
- unsigned long nr_to_reclaim = get_nr_to_reclaim(sc);
int swappiness = get_swappiness(lruvec, sc);
/* clean file folios are more likely to exist */
@@ -4683,13 +4709,13 @@ static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
if (scanned >= nr_to_scan)
break;
- if (sc->nr_reclaimed >= nr_to_reclaim)
+ if (should_abort_scan(lruvec, sc))
break;
cond_resched();
}
- /* whether try_to_inc_max_seq() was successful */
+ /* whether this lruvec should be rotated */
return nr_to_scan < 0;
}
@@ -4698,14 +4724,9 @@ static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
bool success;
unsigned long scanned = sc->nr_scanned;
unsigned long reclaimed = sc->nr_reclaimed;
- int seg = lru_gen_memcg_seg(lruvec);
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
- /* see the comment on MEMCG_NR_GENS */
- if (!lruvec_is_sizable(lruvec, sc))
- return seg != MEMCG_LRU_TAIL ? MEMCG_LRU_TAIL : MEMCG_LRU_YOUNG;
-
mem_cgroup_calculate_protection(NULL, memcg);
if (mem_cgroup_below_min(NULL, memcg))
@@ -4713,7 +4734,7 @@ static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
if (mem_cgroup_below_low(NULL, memcg)) {
/* see the comment on MEMCG_NR_GENS */
- if (seg != MEMCG_LRU_TAIL)
+ if (lru_gen_memcg_seg(lruvec) != MEMCG_LRU_TAIL)
return MEMCG_LRU_TAIL;
memcg_memory_event(memcg, MEMCG_LOW);
@@ -4729,7 +4750,15 @@ static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
flush_reclaim_state(sc);
- return success ? MEMCG_LRU_YOUNG : 0;
+ if (success && mem_cgroup_online(memcg))
+ return MEMCG_LRU_YOUNG;
+
+ if (!success && lruvec_is_sizable(lruvec, sc))
+ return 0;
+
+ /* one retry if offlined or too small */
+ return lru_gen_memcg_seg(lruvec) != MEMCG_LRU_TAIL ?
+ MEMCG_LRU_TAIL : MEMCG_LRU_YOUNG;
}
#ifdef CONFIG_MEMCG
@@ -4743,14 +4772,13 @@ static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc)
struct lruvec *lruvec;
struct lru_gen_folio *lrugen;
struct mem_cgroup *memcg;
- const struct hlist_nulls_node *pos;
- unsigned long nr_to_reclaim = get_nr_to_reclaim(sc);
+ struct hlist_nulls_node *pos;
+ gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq));
bin = first_bin = get_random_u32_below(MEMCG_NR_BINS);
restart:
op = 0;
memcg = NULL;
- gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq));
rcu_read_lock();
@@ -4761,6 +4789,10 @@ restart:
}
mem_cgroup_put(memcg);
+ memcg = NULL;
+
+ if (gen != READ_ONCE(lrugen->gen))
+ continue;
lruvec = container_of(lrugen, struct lruvec, lrugen);
memcg = lruvec_memcg(lruvec);
@@ -4777,7 +4809,7 @@ restart:
rcu_read_lock();
- if (sc->nr_reclaimed >= nr_to_reclaim)
+ if (should_abort_scan(lruvec, sc))
break;
}
@@ -4788,7 +4820,7 @@ restart:
mem_cgroup_put(memcg);
- if (sc->nr_reclaimed >= nr_to_reclaim)
+ if (!is_a_nulls(pos))
return;
/* restart if raced with lru_gen_rotate_memcg() */
@@ -4845,16 +4877,14 @@ static void set_initial_priority(struct pglist_data *pgdat, struct scan_control
if (sc->priority != DEF_PRIORITY || sc->nr_to_reclaim < MIN_LRU_BATCH)
return;
/*
- * Determine the initial priority based on ((total / MEMCG_NR_GENS) >>
- * priority) * reclaimed_to_scanned_ratio = nr_to_reclaim, where the
- * estimated reclaimed_to_scanned_ratio = inactive / total.
+ * Determine the initial priority based on
+ * (total >> priority) * reclaimed_to_scanned_ratio = nr_to_reclaim,
+ * where reclaimed_to_scanned_ratio = inactive / total.
*/
reclaimable = node_page_state(pgdat, NR_INACTIVE_FILE);
if (get_swappiness(lruvec, sc))
reclaimable += node_page_state(pgdat, NR_INACTIVE_ANON);
- reclaimable /= MEMCG_NR_GENS;
-
/* round down reclaimable and round up sc->nr_to_reclaim */
priority = fls_long(reclaimable) - 1 - fls_long(sc->nr_to_reclaim - 1);
diff --git a/mm/workingset.c b/mm/workingset.c
index b192e44a0e7ccc..33baad203277d1 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -313,10 +313,10 @@ static void lru_gen_refault(struct folio *folio, void *shadow)
* 1. For pages accessed through page tables, hotter pages pushed out
* hot pages which refaulted immediately.
* 2. For pages accessed multiple times through file descriptors,
- * numbers of accesses might have been out of the range.
+ * they would have been protected by sort_folio().
*/
- if (lru_gen_in_fault() || refs == BIT(LRU_REFS_WIDTH)) {
- folio_set_workingset(folio);
+ if (lru_gen_in_fault() || refs >= BIT(LRU_REFS_WIDTH) - 1) {
+ set_mask_bits(&folio->flags, 0, LRU_REFS_MASK | BIT(PG_workingset));
mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + type, delta);
}
unlock:
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 0beb44f2fe1f0d..f001582345052f 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -407,6 +407,8 @@ int vlan_vids_add_by_dev(struct net_device *dev,
return 0;
list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
+ if (!vlan_hw_filter_capable(by_dev, vid_info->proto))
+ continue;
err = vlan_vid_add(dev, vid_info->proto, vid_info->vid);
if (err)
goto unwind;
@@ -417,6 +419,8 @@ unwind:
list_for_each_entry_continue_reverse(vid_info,
&vlan_info->vid_list,
list) {
+ if (!vlan_hw_filter_capable(by_dev, vid_info->proto))
+ continue;
vlan_vid_del(dev, vid_info->proto, vid_info->vid);
}
@@ -436,8 +440,11 @@ void vlan_vids_del_by_dev(struct net_device *dev,
if (!vlan_info)
return;
- list_for_each_entry(vid_info, &vlan_info->vid_list, list)
+ list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
+ if (!vlan_hw_filter_capable(by_dev, vid_info->proto))
+ continue;
vlan_vid_del(dev, vid_info->proto, vid_info->vid);
+ }
}
EXPORT_SYMBOL(vlan_vids_del_by_dev);
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 4e3a2a1ffcb3fa..0e6603b1ec906a 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -394,6 +394,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
uint16_t *nwname = va_arg(ap, uint16_t *);
char ***wnames = va_arg(ap, char ***);
+ *wnames = NULL;
+
errcode = p9pdu_readf(pdu, proto_version,
"w", nwname);
if (!errcode) {
@@ -403,6 +405,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
GFP_NOFS);
if (!*wnames)
errcode = -ENOMEM;
+ else
+ (*wnames)[0] = NULL;
}
if (!errcode) {
@@ -414,8 +418,10 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
proto_version,
"s",
&(*wnames)[i]);
- if (errcode)
+ if (errcode) {
+ (*wnames)[i] = NULL;
break;
+ }
}
}
@@ -423,11 +429,14 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
if (*wnames) {
int i;
- for (i = 0; i < *nwname; i++)
+ for (i = 0; i < *nwname; i++) {
+ if (!(*wnames)[i])
+ break;
kfree((*wnames)[i]);
+ }
+ kfree(*wnames);
+ *wnames = NULL;
}
- kfree(*wnames);
- *wnames = NULL;
}
}
break;
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 9ba04a69ec2ae7..a852ec093fa8b9 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1775,15 +1775,14 @@ static int atalk_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
break;
}
case TIOCINQ: {
- /*
- * These two are safe on a single CPU system as only
- * user tasks fiddle here
- */
- struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
+ struct sk_buff *skb;
long amount = 0;
+ spin_lock_irq(&sk->sk_receive_queue.lock);
+ skb = skb_peek(&sk->sk_receive_queue);
if (skb)
amount = skb->len - sizeof(struct ddpehdr);
+ spin_unlock_irq(&sk->sk_receive_queue.lock);
rc = put_user(amount, (int __user *)argp);
break;
}
diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c
index 838ebf0cabbfb7..f81f8d56f5c0c5 100644
--- a/net/atm/ioctl.c
+++ b/net/atm/ioctl.c
@@ -73,14 +73,17 @@ static int do_vcc_ioctl(struct socket *sock, unsigned int cmd,
case SIOCINQ:
{
struct sk_buff *skb;
+ int amount;
if (sock->state != SS_CONNECTED) {
error = -EINVAL;
goto done;
}
+ spin_lock_irq(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
- error = put_user(skb ? skb->len : 0,
- (int __user *)argp) ? -EFAULT : 0;
+ amount = skb ? skb->len : 0;
+ spin_unlock_irq(&sk->sk_receive_queue.lock);
+ error = put_user(amount, (int __user *)argp) ? -EFAULT : 0;
goto done;
}
case ATM_SETSC:
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 336a7616545468..b93464ac3517f4 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -309,11 +309,14 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
if (flags & MSG_OOB)
return -EOPNOTSUPP;
+ lock_sock(sk);
+
skb = skb_recv_datagram(sk, flags, &err);
if (!skb) {
if (sk->sk_shutdown & RCV_SHUTDOWN)
- return 0;
+ err = 0;
+ release_sock(sk);
return err;
}
@@ -343,6 +346,8 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
skb_free_datagram(sk, skb);
+ release_sock(sk);
+
if (flags & MSG_TRUNC)
copied = skblen;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 0849e0dafa951c..ebf17b51072fc1 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -516,6 +516,9 @@ static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
{
struct hci_rp_read_class_of_dev *rp = data;
+ if (WARN_ON(!hdev))
+ return HCI_ERROR_UNSPECIFIED;
+
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
@@ -747,9 +750,23 @@ static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
} else {
conn->enc_key_size = rp->key_size;
status = 0;
+
+ if (conn->enc_key_size < hdev->min_enc_key_size) {
+ /* As slave role, the conn->state has been set to
+ * BT_CONNECTED and l2cap conn req might not be received
+ * yet, at this moment the l2cap layer almost does
+ * nothing with the non-zero status.
+ * So we also clear encrypt related bits, and then the
+ * handler of l2cap conn req will get the right secure
+ * state at a later time.
+ */
+ status = HCI_ERROR_AUTH_FAILURE;
+ clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
+ clear_bit(HCI_CONN_AES_CCM, &conn->flags);
+ }
}
- hci_encrypt_cfm(conn, 0);
+ hci_encrypt_cfm(conn, status);
done:
hci_dev_unlock(hdev);
@@ -820,8 +837,6 @@ static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
if (!rp->status)
conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
- hci_encrypt_cfm(conn, 0);
-
unlock:
hci_dev_unlock(hdev);
@@ -2304,7 +2319,8 @@ static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
return;
}
- set_bit(HCI_INQUIRY, &hdev->flags);
+ if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
+ set_bit(HCI_INQUIRY, &hdev->flags);
}
static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
@@ -3683,12 +3699,8 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
cp.handle = cpu_to_le16(conn->handle);
cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
- sizeof(cp), &cp)) {
+ sizeof(cp), &cp))
bt_dev_err(hdev, "write auth payload timeout failed");
- goto notify;
- }
-
- goto unlock;
}
notify:
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 17ca13e8c044cb..baeebee41cd9e8 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -6492,6 +6492,14 @@ drop:
kfree_skb(skb);
}
+static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
+{
+ struct l2cap_cmd_rej_unk rej;
+
+ rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
+ l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
+}
+
static inline void l2cap_sig_channel(struct l2cap_conn *conn,
struct sk_buff *skb)
{
@@ -6517,23 +6525,24 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
if (len > skb->len || !cmd->ident) {
BT_DBG("corrupted command");
+ l2cap_sig_send_rej(conn, cmd->ident);
break;
}
err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
if (err) {
- struct l2cap_cmd_rej_unk rej;
-
BT_ERR("Wrong link type (%d)", err);
-
- rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
- l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
- sizeof(rej), &rej);
+ l2cap_sig_send_rej(conn, cmd->ident);
}
skb_pull(skb, len);
}
+ if (skb->len > 0) {
+ BT_DBG("corrupted command");
+ l2cap_sig_send_rej(conn, 0);
+ }
+
drop:
kfree_skb(skb);
}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index ba2e00646e8e82..9dd815b6603feb 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -2897,7 +2897,8 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
for (i = 0; i < key_count; i++) {
struct mgmt_link_key_info *key = &cp->keys[i];
- if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
+ /* Considering SMP over BREDR/LE, there is no need to check addr_type */
+ if (key->type > 0x08)
return mgmt_cmd_status(sk, hdev->id,
MGMT_OP_LOAD_LINK_KEYS,
MGMT_STATUS_INVALID_PARAMS);
@@ -7130,6 +7131,7 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
for (i = 0; i < irk_count; i++) {
struct mgmt_irk_info *irk = &cp->irks[i];
+ u8 addr_type = le_addr_type(irk->addr.type);
if (hci_is_blocked_key(hdev,
HCI_BLOCKED_KEY_TYPE_IRK,
@@ -7139,8 +7141,12 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
continue;
}
+ /* When using SMP over BR/EDR, the addr type should be set to BREDR */
+ if (irk->addr.type == BDADDR_BREDR)
+ addr_type = BDADDR_BREDR;
+
hci_add_irk(hdev, &irk->addr.bdaddr,
- le_addr_type(irk->addr.type), irk->val,
+ addr_type, irk->val,
BDADDR_ANY);
}
@@ -7221,6 +7227,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
for (i = 0; i < key_count; i++) {
struct mgmt_ltk_info *key = &cp->keys[i];
u8 type, authenticated;
+ u8 addr_type = le_addr_type(key->addr.type);
if (hci_is_blocked_key(hdev,
HCI_BLOCKED_KEY_TYPE_LTK,
@@ -7255,8 +7262,12 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
continue;
}
+ /* When using SMP over BR/EDR, the addr type should be set to BREDR */
+ if (key->addr.type == BDADDR_BREDR)
+ addr_type = BDADDR_BREDR;
+
hci_add_ltk(hdev, &key->addr.bdaddr,
- le_addr_type(key->addr.type), type, authenticated,
+ addr_type, type, authenticated,
key->val, key->enc_size, key->ediv, key->rand);
}
@@ -9523,7 +9534,7 @@ void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
ev.store_hint = persistent;
bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
- ev.key.addr.type = BDADDR_BREDR;
+ ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
ev.key.type = key->type;
memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
ev.key.pin_len = key->pin_len;
@@ -9574,7 +9585,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
ev.store_hint = persistent;
bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
- ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
+ ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
ev.key.type = mgmt_ltk_type(key);
ev.key.enc_size = key->enc_size;
ev.key.ediv = key->ediv;
@@ -9603,7 +9614,7 @@ void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
bacpy(&ev.rpa, &irk->rpa);
bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
- ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
+ ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
memcpy(ev.irk.val, irk->val, sizeof(irk->val));
mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
@@ -9632,7 +9643,7 @@ void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
ev.store_hint = persistent;
bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
- ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
+ ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
ev.key.type = csrk->type;
memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 5f2f97de295eb6..1e7ea3a4b7ef32 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -1059,6 +1059,7 @@ static void smp_notify_keys(struct l2cap_conn *conn)
}
if (smp->remote_irk) {
+ smp->remote_irk->link_type = hcon->type;
mgmt_new_irk(hdev, smp->remote_irk, persistent);
/* Now that user space can be considered to know the
@@ -1078,24 +1079,28 @@ static void smp_notify_keys(struct l2cap_conn *conn)
}
if (smp->csrk) {
+ smp->csrk->link_type = hcon->type;
smp->csrk->bdaddr_type = hcon->dst_type;
bacpy(&smp->csrk->bdaddr, &hcon->dst);
mgmt_new_csrk(hdev, smp->csrk, persistent);
}
if (smp->responder_csrk) {
+ smp->responder_csrk->link_type = hcon->type;
smp->responder_csrk->bdaddr_type = hcon->dst_type;
bacpy(&smp->responder_csrk->bdaddr, &hcon->dst);
mgmt_new_csrk(hdev, smp->responder_csrk, persistent);
}
if (smp->ltk) {
+ smp->ltk->link_type = hcon->type;
smp->ltk->bdaddr_type = hcon->dst_type;
bacpy(&smp->ltk->bdaddr, &hcon->dst);
mgmt_new_ltk(hdev, smp->ltk, persistent);
}
if (smp->responder_ltk) {
+ smp->responder_ltk->link_type = hcon->type;
smp->responder_ltk->bdaddr_type = hcon->dst_type;
bacpy(&smp->responder_ltk->bdaddr, &hcon->dst);
mgmt_new_ltk(hdev, smp->responder_ltk, persistent);
@@ -1115,6 +1120,8 @@ static void smp_notify_keys(struct l2cap_conn *conn)
key = hci_add_link_key(hdev, smp->conn->hcon, &hcon->dst,
smp->link_key, type, 0, &persistent);
if (key) {
+ key->link_type = hcon->type;
+ key->bdaddr_type = hcon->dst_type;
mgmt_new_link_key(hdev, key, persistent);
/* Don't keep debug keys around if the relevant
diff --git a/net/core/dev.c b/net/core/dev.c
index c879246be48d82..ad20bebe153fc7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3472,6 +3472,9 @@ static netdev_features_t gso_features_check(const struct sk_buff *skb,
if (gso_segs > READ_ONCE(dev->gso_max_segs))
return features & ~NETIF_F_GSO_MASK;
+ if (unlikely(skb->len >= READ_ONCE(dev->gso_max_size)))
+ return features & ~NETIF_F_GSO_MASK;
+
if (!skb_shinfo(skb)->gso_type) {
skb_warn_bad_offload(skb);
return features & ~NETIF_F_GSO_MASK;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index df81c1f0a57047..552719c3bbc3d7 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -253,9 +253,11 @@ static int neigh_forced_gc(struct neigh_table *tbl)
{
int max_clean = atomic_read(&tbl->gc_entries) -
READ_ONCE(tbl->gc_thresh2);
+ u64 tmax = ktime_get_ns() + NSEC_PER_MSEC;
unsigned long tref = jiffies - 5 * HZ;
struct neighbour *n, *tmp;
int shrunk = 0;
+ int loop = 0;
NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
@@ -278,11 +280,16 @@ static int neigh_forced_gc(struct neigh_table *tbl)
shrunk++;
if (shrunk >= max_clean)
break;
+ if (++loop == 16) {
+ if (ktime_get_ns() > tmax)
+ goto unlock;
+ loop = 0;
+ }
}
}
WRITE_ONCE(tbl->last_flush, jiffies);
-
+unlock:
write_unlock_bh(&tbl->lock);
return shrunk;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b157efea5dea88..94cc40a6f79756 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4522,8 +4522,9 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
/* GSO partial only requires that we trim off any excess that
* doesn't fit into an MSS sized block, so take care of that
* now.
+ * Cap len to not accidentally hit GSO_BY_FRAGS.
*/
- partial_segs = len / mss;
+ partial_segs = min(len, GSO_BY_FRAGS - 1) / mss;
if (partial_segs > 1)
mss *= partial_segs;
else
@@ -4824,7 +4825,9 @@ static __always_inline unsigned int skb_ext_total_length(void)
static void skb_extensions_init(void)
{
BUILD_BUG_ON(SKB_EXT_NUM >= 8);
+#if !IS_ENABLED(CONFIG_KCOV_INSTRUMENT_ALL)
BUILD_BUG_ON(skb_ext_total_length() > 255);
+#endif
skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache",
SKB_EXT_ALIGN_VALUE * skb_ext_total_length(),
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 4292c2ed182866..27d733c0f65e16 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -536,6 +536,8 @@ static bool sock_map_sk_state_allowed(const struct sock *sk)
{
if (sk_is_tcp(sk))
return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN);
+ if (sk_is_stream_unix(sk))
+ return (1 << sk->sk_state) & TCPF_ESTABLISHED;
return true;
}
diff --git a/net/core/stream.c b/net/core/stream.c
index 96fbcb9bbb30a5..b16dfa568a2d5b 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -79,7 +79,7 @@ int sk_stream_wait_connect(struct sock *sk, long *timeo_p)
remove_wait_queue(sk_sleep(sk), &wait);
sk->sk_write_pending--;
} while (!done);
- return 0;
+ return done < 0 ? done : 0;
}
EXPORT_SYMBOL(sk_stream_wait_connect);
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 01e54b46ae0b97..2a6d363763a2bc 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -91,6 +91,7 @@ const struct cred *dns_resolver_cache;
static int
dns_resolver_preparse(struct key_preparsed_payload *prep)
{
+ const struct dns_server_list_v1_header *v1;
const struct dns_payload_header *bin;
struct user_key_payload *upayload;
unsigned long derrno;
@@ -122,6 +123,13 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
return -EINVAL;
}
+ v1 = (const struct dns_server_list_v1_header *)bin;
+ if ((v1->status != DNS_LOOKUP_GOOD &&
+ v1->status != DNS_LOOKUP_GOOD_WITH_BAD)) {
+ if (prep->expiry == TIME64_MAX)
+ prep->expiry = ktime_get_real_seconds() + 1;
+ }
+
result_len = datalen;
goto store_result;
}
@@ -314,7 +322,7 @@ static long dns_resolver_read(const struct key *key,
struct key_type key_type_dns_resolver = {
.name = "dns_resolver",
- .flags = KEY_TYPE_NET_DOMAIN,
+ .flags = KEY_TYPE_NET_DOMAIN | KEY_TYPE_INSTANT_REAP,
.preparse = dns_resolver_preparse,
.free_preparse = dns_resolver_free_preparse,
.instantiate = generic_key_instantiate,
diff --git a/net/ife/ife.c b/net/ife/ife.c
index 13bbf8cb6a3961..be05b690b9ef29 100644
--- a/net/ife/ife.c
+++ b/net/ife/ife.c
@@ -82,6 +82,7 @@ void *ife_decode(struct sk_buff *skb, u16 *metalen)
if (unlikely(!pskb_may_pull(skb, total_pull)))
return NULL;
+ ifehdr = (struct ifeheadr *)(skb->data + skb->dev->hard_header_len);
skb_set_mac_header(skb, total_pull);
__skb_pull(skb, total_pull);
*metalen = ifehdrln - IFE_METAHDRLEN;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 90de838a274519..701cb87043f280 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4368,6 +4368,23 @@ EXPORT_SYMBOL(tcp_do_parse_auth_options);
* up to bandwidth of 18Gigabit/sec. 8) ]
*/
+/* Estimates max number of increments of remote peer TSval in
+ * a replay window (based on our current RTO estimation).
+ */
+static u32 tcp_tsval_replay(const struct sock *sk)
+{
+ /* If we use usec TS resolution,
+ * then expect the remote peer to use the same resolution.
+ */
+ if (tcp_sk(sk)->tcp_usec_ts)
+ return inet_csk(sk)->icsk_rto * (USEC_PER_SEC / HZ);
+
+ /* RFC 7323 recommends a TSval clock between 1ms and 1sec.
+ * We know that some OS (including old linux) can use 1200 Hz.
+ */
+ return inet_csk(sk)->icsk_rto * 1200 / HZ;
+}
+
static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
{
const struct tcp_sock *tp = tcp_sk(sk);
@@ -4375,7 +4392,7 @@ static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
u32 seq = TCP_SKB_CB(skb)->seq;
u32 ack = TCP_SKB_CB(skb)->ack_seq;
- return (/* 1. Pure ACK with correct sequence number. */
+ return /* 1. Pure ACK with correct sequence number. */
(th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) &&
/* 2. ... and duplicate ACK. */
@@ -4385,7 +4402,8 @@ static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
!tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) &&
/* 4. ... and sits in replay window. */
- (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ);
+ (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <=
+ tcp_tsval_replay(sk);
}
static inline bool tcp_paws_discard(const struct sock *sk,
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index f5ef15e1d9ac38..e3167ad965676f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -3293,7 +3293,13 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
if (skb_still_in_host_queue(sk, skb))
return -EBUSY;
+start:
if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
+ if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
+ TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
+ TCP_SKB_CB(skb)->seq++;
+ goto start;
+ }
if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
WARN_ON_ONCE(1);
return -EINVAL;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 3aaea56b516601..2692a7b24c4097 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -6149,11 +6149,7 @@ static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
pmsg->prefix_len = pinfo->prefix_len;
pmsg->prefix_type = pinfo->type;
pmsg->prefix_pad3 = 0;
- pmsg->prefix_flags = 0;
- if (pinfo->onlink)
- pmsg->prefix_flags |= IF_PREFIX_ONLINK;
- if (pinfo->autoconf)
- pmsg->prefix_flags |= IF_PREFIX_AUTOCONF;
+ pmsg->prefix_flags = pinfo->flags;
if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
goto nla_put_failure;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 7772f42ff2b940..4fc2cae0d116c5 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -160,8 +160,6 @@ struct fib6_info *fib6_info_alloc(gfp_t gfp_flags, bool with_fib6_nh)
INIT_LIST_HEAD(&f6i->fib6_siblings);
refcount_set(&f6i->fib6_ref, 1);
- INIT_HLIST_NODE(&f6i->gc_link);
-
return f6i;
}
@@ -248,7 +246,6 @@ static struct fib6_table *fib6_alloc_table(struct net *net, u32 id)
net->ipv6.fib6_null_entry);
table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
inet_peer_base_init(&table->tb6_peers);
- INIT_HLIST_HEAD(&table->tb6_gc_hlist);
}
return table;
@@ -1060,8 +1057,6 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn,
lockdep_is_held(&table->tb6_lock));
}
}
-
- fib6_clean_expires_locked(rt);
}
/*
@@ -1123,10 +1118,9 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
if (!(iter->fib6_flags & RTF_EXPIRES))
return -EEXIST;
if (!(rt->fib6_flags & RTF_EXPIRES))
- fib6_clean_expires_locked(iter);
+ fib6_clean_expires(iter);
else
- fib6_set_expires_locked(iter,
- rt->expires);
+ fib6_set_expires(iter, rt->expires);
if (rt->fib6_pmtu)
fib6_metric_set(iter, RTAX_MTU,
@@ -1485,10 +1479,6 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
if (rt->nh)
list_add(&rt->nh_list, &rt->nh->f6i_list);
__fib6_update_sernum_upto_root(rt, fib6_new_sernum(info->nl_net));
-
- if (fib6_has_expires(rt))
- hlist_add_head(&rt->gc_link, &table->tb6_gc_hlist);
-
fib6_start_gc(info->nl_net, rt);
}
@@ -2291,8 +2281,9 @@ static void fib6_flush_trees(struct net *net)
* Garbage collection
*/
-static int fib6_age(struct fib6_info *rt, struct fib6_gc_args *gc_args)
+static int fib6_age(struct fib6_info *rt, void *arg)
{
+ struct fib6_gc_args *gc_args = arg;
unsigned long now = jiffies;
/*
@@ -2300,7 +2291,7 @@ static int fib6_age(struct fib6_info *rt, struct fib6_gc_args *gc_args)
* Routes are expired even if they are in use.
*/
- if (fib6_has_expires(rt) && rt->expires) {
+ if (rt->fib6_flags & RTF_EXPIRES && rt->expires) {
if (time_after(now, rt->expires)) {
RT6_TRACE("expiring %p\n", rt);
return -1;
@@ -2317,40 +2308,6 @@ static int fib6_age(struct fib6_info *rt, struct fib6_gc_args *gc_args)
return 0;
}
-static void fib6_gc_table(struct net *net,
- struct fib6_table *tb6,
- struct fib6_gc_args *gc_args)
-{
- struct fib6_info *rt;
- struct hlist_node *n;
- struct nl_info info = {
- .nl_net = net,
- .skip_notify = false,
- };
-
- hlist_for_each_entry_safe(rt, n, &tb6->tb6_gc_hlist, gc_link)
- if (fib6_age(rt, gc_args) == -1)
- fib6_del(rt, &info);
-}
-
-static void fib6_gc_all(struct net *net, struct fib6_gc_args *gc_args)
-{
- struct fib6_table *table;
- struct hlist_head *head;
- unsigned int h;
-
- rcu_read_lock();
- for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
- head = &net->ipv6.fib_table_hash[h];
- hlist_for_each_entry_rcu(table, head, tb6_hlist) {
- spin_lock_bh(&table->tb6_lock);
- fib6_gc_table(net, table, gc_args);
- spin_unlock_bh(&table->tb6_lock);
- }
- }
- rcu_read_unlock();
-}
-
void fib6_run_gc(unsigned long expires, struct net *net, bool force)
{
struct fib6_gc_args gc_args;
@@ -2366,7 +2323,7 @@ void fib6_run_gc(unsigned long expires, struct net *net, bool force)
net->ipv6.sysctl.ip6_rt_gc_interval;
gc_args.more = 0;
- fib6_gc_all(net, &gc_args);
+ fib6_clean_all(net, fib6_age, &gc_args);
now = jiffies;
net->ipv6.ip6_rt_last_gc = now;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index b132feae3393f3..ea1dec8448fce8 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3763,10 +3763,10 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
rt->dst_nocount = true;
if (cfg->fc_flags & RTF_EXPIRES)
- fib6_set_expires_locked(rt, jiffies +
- clock_t_to_jiffies(cfg->fc_expires));
+ fib6_set_expires(rt, jiffies +
+ clock_t_to_jiffies(cfg->fc_expires));
else
- fib6_clean_expires_locked(rt);
+ fib6_clean_expires(rt);
if (cfg->fc_protocol == RTPROT_UNSPEC)
cfg->fc_protocol = RTPROT_BOOT;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 606b1b2e41235e..eb1d3ef843538c 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1788,10 +1788,10 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
lockdep_is_held(&local->hw.wiphy->mtx));
/*
- * If there are no changes, then accept a link that doesn't exist,
+ * If there are no changes, then accept a link that exist,
* unless it's a new link.
*/
- if (params->link_id < 0 && !new_link &&
+ if (params->link_id >= 0 && !new_link &&
!params->link_mac && !params->txpwr_set &&
!params->supported_rates_len &&
!params->ht_capa && !params->vht_capa &&
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
index 7938ec87ef25be..d3820333cd59c8 100644
--- a/net/mac80211/driver-ops.c
+++ b/net/mac80211/driver-ops.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2015 Intel Deutschland GmbH
- * Copyright (C) 2022 Intel Corporation
+ * Copyright (C) 2022-2023 Intel Corporation
*/
#include <net/mac80211.h>
#include "ieee80211_i.h"
@@ -589,6 +589,10 @@ int drv_change_sta_links(struct ieee80211_local *local,
if (ret)
return ret;
+ /* during reconfig don't add it to debugfs again */
+ if (local->in_reconfig)
+ return 0;
+
for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
link_sta = rcu_dereference_protected(info->link[link_id],
lockdep_is_held(&local->hw.wiphy->mtx));
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index dbabeefe451513..28bf794f67f8c4 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -1068,8 +1068,8 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
case WLAN_SP_MESH_PEERING_OPEN:
if (!matches_local)
event = OPN_RJCT;
- if (!mesh_plink_free_count(sdata) ||
- (sta->mesh->plid && sta->mesh->plid != plid))
+ else if (!mesh_plink_free_count(sdata) ||
+ (sta->mesh->plid && sta->mesh->plid != plid))
event = OPN_IGNR;
else
event = OPN_ACPT;
@@ -1077,9 +1077,9 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
case WLAN_SP_MESH_PEERING_CONFIRM:
if (!matches_local)
event = CNF_RJCT;
- if (!mesh_plink_free_count(sdata) ||
- sta->mesh->llid != llid ||
- (sta->mesh->plid && sta->mesh->plid != plid))
+ else if (!mesh_plink_free_count(sdata) ||
+ sta->mesh->llid != llid ||
+ (sta->mesh->plid && sta->mesh->plid != plid))
event = CNF_IGNR;
else
event = CNF_ACPT;
@@ -1247,6 +1247,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
return;
}
elems = ieee802_11_parse_elems(baseaddr, len - baselen, true, NULL);
- mesh_process_plink_frame(sdata, mgmt, elems, rx_status);
- kfree(elems);
+ if (elems) {
+ mesh_process_plink_frame(sdata, mgmt, elems, rx_status);
+ kfree(elems);
+ }
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 887b496f2b81ed..c8998cf01b7a54 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -5782,7 +5782,7 @@ static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata,
{
const struct ieee80211_multi_link_elem *ml;
const struct element *sub;
- size_t ml_len;
+ ssize_t ml_len;
unsigned long removed_links = 0;
u16 link_removal_timeout[IEEE80211_MLD_MAX_NUM_LINKS] = {};
u8 link_id;
@@ -5798,6 +5798,8 @@ static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata,
elems->scratch + elems->scratch_len -
elems->scratch_pos,
WLAN_EID_FRAGMENT);
+ if (ml_len < 0)
+ return;
elems->ml_reconf = (const void *)elems->scratch_pos;
elems->ml_reconf_len = ml_len;
diff --git a/net/mptcp/crypto_test.c b/net/mptcp/crypto_test.c
index 017248dea038c1..220414e5c850f4 100644
--- a/net/mptcp/crypto_test.c
+++ b/net/mptcp/crypto_test.c
@@ -70,3 +70,4 @@ static struct kunit_suite mptcp_crypto_suite = {
kunit_test_suite(mptcp_crypto_suite);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("KUnit tests for MPTCP Crypto");
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index bc81ea53a04992..5cd5c3f535a821 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -3402,12 +3402,12 @@ static void mptcp_release_cb(struct sock *sk)
if (__test_and_clear_bit(MPTCP_CLEAN_UNA, &msk->cb_flags))
__mptcp_clean_una_wakeup(sk);
if (unlikely(msk->cb_flags)) {
- /* be sure to set the current sk state before taking actions
+ /* be sure to sync the msk state before taking actions
* depending on sk_state (MPTCP_ERROR_REPORT)
* On sk release avoid actions depending on the first subflow
*/
- if (__test_and_clear_bit(MPTCP_CONNECTED, &msk->cb_flags) && msk->first)
- __mptcp_set_connected(sk);
+ if (__test_and_clear_bit(MPTCP_SYNC_STATE, &msk->cb_flags) && msk->first)
+ __mptcp_sync_state(sk, msk->pending_state);
if (__test_and_clear_bit(MPTCP_ERROR_REPORT, &msk->cb_flags))
__mptcp_error_report(sk);
if (__test_and_clear_bit(MPTCP_SYNC_SNDBUF, &msk->cb_flags))
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index fe6f2d399ee898..aa1a93fe40ffaf 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -124,7 +124,7 @@
#define MPTCP_ERROR_REPORT 3
#define MPTCP_RETRANSMIT 4
#define MPTCP_FLUSH_JOIN_LIST 5
-#define MPTCP_CONNECTED 6
+#define MPTCP_SYNC_STATE 6
#define MPTCP_SYNC_SNDBUF 7
struct mptcp_skb_cb {
@@ -296,6 +296,9 @@ struct mptcp_sock {
bool use_64bit_ack; /* Set when we received a 64-bit DSN */
bool csum_enabled;
bool allow_infinite_fallback;
+ u8 pending_state; /* A subflow asked to set this sk_state,
+ * protected by the msk data lock
+ */
u8 mpc_endpoint_id;
u8 recvmsg_inq:1,
cork:1,
@@ -728,7 +731,7 @@ void mptcp_get_options(const struct sk_buff *skb,
struct mptcp_options_received *mp_opt);
void mptcp_finish_connect(struct sock *sk);
-void __mptcp_set_connected(struct sock *sk);
+void __mptcp_sync_state(struct sock *sk, int state);
void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout);
static inline void mptcp_stop_tout_timer(struct sock *sk)
@@ -1115,7 +1118,7 @@ static inline bool subflow_simultaneous_connect(struct sock *sk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
- return sk->sk_state == TCP_ESTABLISHED &&
+ return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_FIN_WAIT1) &&
is_active_ssk(subflow) &&
!subflow->conn_finished;
}
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index a4f3c27f0309f9..6d7684c35e9331 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -419,22 +419,28 @@ static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct soc
return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport;
}
-void __mptcp_set_connected(struct sock *sk)
+void __mptcp_sync_state(struct sock *sk, int state)
{
- __mptcp_propagate_sndbuf(sk, mptcp_sk(sk)->first);
+ struct mptcp_sock *msk = mptcp_sk(sk);
+
+ __mptcp_propagate_sndbuf(sk, msk->first);
if (sk->sk_state == TCP_SYN_SENT) {
- inet_sk_state_store(sk, TCP_ESTABLISHED);
+ inet_sk_state_store(sk, state);
sk->sk_state_change(sk);
}
}
-static void mptcp_set_connected(struct sock *sk)
+static void mptcp_propagate_state(struct sock *sk, struct sock *ssk)
{
+ struct mptcp_sock *msk = mptcp_sk(sk);
+
mptcp_data_lock(sk);
- if (!sock_owned_by_user(sk))
- __mptcp_set_connected(sk);
- else
- __set_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->cb_flags);
+ if (!sock_owned_by_user(sk)) {
+ __mptcp_sync_state(sk, ssk->sk_state);
+ } else {
+ msk->pending_state = ssk->sk_state;
+ __set_bit(MPTCP_SYNC_STATE, &msk->cb_flags);
+ }
mptcp_data_unlock(sk);
}
@@ -496,7 +502,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
subflow_set_remote_key(msk, subflow, &mp_opt);
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK);
mptcp_finish_connect(sk);
- mptcp_set_connected(parent);
+ mptcp_propagate_state(parent, sk);
} else if (subflow->request_join) {
u8 hmac[SHA256_DIGEST_SIZE];
@@ -540,7 +546,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
} else if (mptcp_check_fallback(sk)) {
fallback:
mptcp_rcv_space_init(msk, sk);
- mptcp_set_connected(parent);
+ mptcp_propagate_state(parent, sk);
}
return;
@@ -1740,7 +1746,7 @@ static void subflow_state_change(struct sock *sk)
mptcp_rcv_space_init(msk, sk);
pr_fallback(msk);
subflow->conn_finished = 1;
- mptcp_set_connected(parent);
+ mptcp_propagate_state(parent, sk);
}
/* as recvmsg() does not acquire the subflow socket for ssk selection
diff --git a/net/mptcp/token_test.c b/net/mptcp/token_test.c
index 0758865ab658ea..bfff53e668da68 100644
--- a/net/mptcp/token_test.c
+++ b/net/mptcp/token_test.c
@@ -143,3 +143,4 @@ static struct kunit_suite mptcp_token_suite = {
kunit_test_suite(mptcp_token_suite);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("KUnit tests for MPTCP Token");
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index 5a81505fba9ac4..4e32d659524e0d 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -126,6 +126,14 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
return -EINVAL;
}
+ ret = gpiod_direction_output(rfkill->reset_gpio, true);
+ if (ret)
+ return ret;
+
+ ret = gpiod_direction_output(rfkill->shutdown_gpio, true);
+ if (ret)
+ return ret;
+
rfkill->rfkill_dev = rfkill_alloc(rfkill->name, &pdev->dev,
rfkill->type, &rfkill_gpio_ops,
rfkill);
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 0cc5a4e19900e1..ef81d019b20f48 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -182,21 +182,47 @@ void rose_kill_by_neigh(struct rose_neigh *neigh)
*/
static void rose_kill_by_device(struct net_device *dev)
{
- struct sock *s;
+ struct sock *sk, *array[16];
+ struct rose_sock *rose;
+ bool rescan;
+ int i, cnt;
+start:
+ rescan = false;
+ cnt = 0;
spin_lock_bh(&rose_list_lock);
- sk_for_each(s, &rose_list) {
- struct rose_sock *rose = rose_sk(s);
+ sk_for_each(sk, &rose_list) {
+ rose = rose_sk(sk);
+ if (rose->device == dev) {
+ if (cnt == ARRAY_SIZE(array)) {
+ rescan = true;
+ break;
+ }
+ sock_hold(sk);
+ array[cnt++] = sk;
+ }
+ }
+ spin_unlock_bh(&rose_list_lock);
+ for (i = 0; i < cnt; i++) {
+ sk = array[cnt];
+ rose = rose_sk(sk);
+ lock_sock(sk);
+ spin_lock_bh(&rose_list_lock);
if (rose->device == dev) {
- rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
+ rose_disconnect(sk, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
if (rose->neighbour)
rose->neighbour->use--;
netdev_put(rose->device, &rose->dev_tracker);
rose->device = NULL;
}
+ spin_unlock_bh(&rose_list_lock);
+ release_sock(sk);
+ sock_put(sk);
+ cond_resched();
}
- spin_unlock_bh(&rose_list_lock);
+ if (rescan)
+ goto start;
}
/*
@@ -656,7 +682,10 @@ static int rose_release(struct socket *sock)
break;
}
+ spin_lock_bh(&rose_list_lock);
netdev_put(rose->device, &rose->dev_tracker);
+ rose->device = NULL;
+ spin_unlock_bh(&rose_list_lock);
sock->sk = NULL;
release_sock(sk);
sock_put(sk);
@@ -1315,9 +1344,11 @@ static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
case TIOCINQ: {
struct sk_buff *skb;
long amount = 0L;
- /* These two are safe on a single CPU system as only user tasks fiddle here */
+
+ spin_lock_irq(&sk->sk_receive_queue.lock);
if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
amount = skb->len;
+ spin_unlock_irq(&sk->sk_receive_queue.lock);
return put_user(amount, (unsigned int __user *) argp);
}
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index b3f4a503ee2ba4..f69c47945175b6 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -286,9 +286,31 @@ static bool tcf_ct_flow_is_outdated(const struct flow_offload *flow)
!test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
}
+static void tcf_ct_flow_table_get_ref(struct tcf_ct_flow_table *ct_ft);
+
+static void tcf_ct_nf_get(struct nf_flowtable *ft)
+{
+ struct tcf_ct_flow_table *ct_ft =
+ container_of(ft, struct tcf_ct_flow_table, nf_ft);
+
+ tcf_ct_flow_table_get_ref(ct_ft);
+}
+
+static void tcf_ct_flow_table_put(struct tcf_ct_flow_table *ct_ft);
+
+static void tcf_ct_nf_put(struct nf_flowtable *ft)
+{
+ struct tcf_ct_flow_table *ct_ft =
+ container_of(ft, struct tcf_ct_flow_table, nf_ft);
+
+ tcf_ct_flow_table_put(ct_ft);
+}
+
static struct nf_flowtable_type flowtable_ct = {
.gc = tcf_ct_flow_is_outdated,
.action = tcf_ct_flow_table_fill_actions,
+ .get = tcf_ct_nf_get,
+ .put = tcf_ct_nf_put,
.owner = THIS_MODULE,
};
@@ -337,9 +359,13 @@ err_alloc:
return err;
}
+static void tcf_ct_flow_table_get_ref(struct tcf_ct_flow_table *ct_ft)
+{
+ refcount_inc(&ct_ft->ref);
+}
+
static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
{
- struct flow_block_cb *block_cb, *tmp_cb;
struct tcf_ct_flow_table *ct_ft;
struct flow_block *block;
@@ -347,13 +373,9 @@ static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
rwork);
nf_flow_table_free(&ct_ft->nf_ft);
- /* Remove any remaining callbacks before cleanup */
block = &ct_ft->nf_ft.flow_block;
down_write(&ct_ft->nf_ft.flow_block_lock);
- list_for_each_entry_safe(block_cb, tmp_cb, &block->cb_list, list) {
- list_del(&block_cb->list);
- flow_block_cb_free(block_cb);
- }
+ WARN_ON(!list_empty(&block->cb_list));
up_write(&ct_ft->nf_ft.flow_block_lock);
kfree(ct_ft);
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 7bfe7d9a32aa60..04534ea537c8fd 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -40,9 +40,6 @@ static unsigned long number_cred_unused;
static struct cred machine_cred = {
.usage = ATOMIC_INIT(1),
-#ifdef CONFIG_DEBUG_CREDENTIALS
- .magic = CRED_MAGIC,
-#endif
};
/*
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index fee83d1024bc8d..1b71055fc391a6 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -654,9 +654,8 @@ static bool svc_alloc_arg(struct svc_rqst *rqstp)
}
for (filled = 0; filled < pages; filled = ret) {
- ret = alloc_pages_bulk_array_node(GFP_KERNEL,
- rqstp->rq_pool->sp_id,
- pages, rqstp->rq_pages);
+ ret = alloc_pages_bulk_array(GFP_KERNEL, pages,
+ rqstp->rq_pages);
if (ret > filled)
/* Made progress, don't sleep yet */
continue;
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index c8e162c9d1df92..6df246b5326063 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -843,7 +843,7 @@ static s64 virtio_transport_has_space(struct vsock_sock *vsk)
struct virtio_vsock_sock *vvs = vsk->trans;
s64 bytes;
- bytes = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
+ bytes = (s64)vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
if (bytes < 0)
bytes = 0;
diff --git a/net/wireless/certs/wens.hex b/net/wireless/certs/wens.hex
new file mode 100644
index 00000000000000..0d50369bede989
--- /dev/null
+++ b/net/wireless/certs/wens.hex
@@ -0,0 +1,87 @@
+/* Chen-Yu Tsai's regdb certificate */
+0x30, 0x82, 0x02, 0xa7, 0x30, 0x82, 0x01, 0x8f,
+0x02, 0x14, 0x61, 0xc0, 0x38, 0x65, 0x1a, 0xab,
+0xdc, 0xf9, 0x4b, 0xd0, 0xac, 0x7f, 0xf0, 0x6c,
+0x72, 0x48, 0xdb, 0x18, 0xc6, 0x00, 0x30, 0x0d,
+0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d,
+0x01, 0x01, 0x0b, 0x05, 0x00, 0x30, 0x0f, 0x31,
+0x0d, 0x30, 0x0b, 0x06, 0x03, 0x55, 0x04, 0x03,
+0x0c, 0x04, 0x77, 0x65, 0x6e, 0x73, 0x30, 0x20,
+0x17, 0x0d, 0x32, 0x33, 0x31, 0x32, 0x30, 0x31,
+0x30, 0x37, 0x34, 0x31, 0x31, 0x34, 0x5a, 0x18,
+0x0f, 0x32, 0x31, 0x32, 0x33, 0x31, 0x31, 0x30,
+0x37, 0x30, 0x37, 0x34, 0x31, 0x31, 0x34, 0x5a,
+0x30, 0x0f, 0x31, 0x0d, 0x30, 0x0b, 0x06, 0x03,
+0x55, 0x04, 0x03, 0x0c, 0x04, 0x77, 0x65, 0x6e,
+0x73, 0x30, 0x82, 0x01, 0x22, 0x30, 0x0d, 0x06,
+0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01,
+0x01, 0x01, 0x05, 0x00, 0x03, 0x82, 0x01, 0x0f,
+0x00, 0x30, 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01,
+0x01, 0x00, 0xa9, 0x7a, 0x2c, 0x78, 0x4d, 0xa7,
+0x19, 0x2d, 0x32, 0x52, 0xa0, 0x2e, 0x6c, 0xef,
+0x88, 0x7f, 0x15, 0xc5, 0xb6, 0x69, 0x54, 0x16,
+0x43, 0x14, 0x79, 0x53, 0xb7, 0xae, 0x88, 0xfe,
+0xc0, 0xb7, 0x5d, 0x47, 0x8e, 0x1a, 0xe1, 0xef,
+0xb3, 0x90, 0x86, 0xda, 0xd3, 0x64, 0x81, 0x1f,
+0xce, 0x5d, 0x9e, 0x4b, 0x6e, 0x58, 0x02, 0x3e,
+0xb2, 0x6f, 0x5e, 0x42, 0x47, 0x41, 0xf4, 0x2c,
+0xb8, 0xa8, 0xd4, 0xaa, 0xc0, 0x0e, 0xe6, 0x48,
+0xf0, 0xa8, 0xce, 0xcb, 0x08, 0xae, 0x37, 0xaf,
+0xf6, 0x40, 0x39, 0xcb, 0x55, 0x6f, 0x5b, 0x4f,
+0x85, 0x34, 0xe6, 0x69, 0x10, 0x50, 0x72, 0x5e,
+0x4e, 0x9d, 0x4c, 0xba, 0x38, 0x36, 0x0d, 0xce,
+0x73, 0x38, 0xd7, 0x27, 0x02, 0x2a, 0x79, 0x03,
+0xe1, 0xac, 0xcf, 0xb0, 0x27, 0x85, 0x86, 0x93,
+0x17, 0xab, 0xec, 0x42, 0x77, 0x37, 0x65, 0x8a,
+0x44, 0xcb, 0xd6, 0x42, 0x93, 0x92, 0x13, 0xe3,
+0x39, 0x45, 0xc5, 0x6e, 0x00, 0x4a, 0x7f, 0xcb,
+0x42, 0x17, 0x2b, 0x25, 0x8c, 0xb8, 0x17, 0x3b,
+0x15, 0x36, 0x59, 0xde, 0x42, 0xce, 0x21, 0xe6,
+0xb6, 0xc7, 0x6e, 0x5e, 0x26, 0x1f, 0xf7, 0x8a,
+0x57, 0x9e, 0xa5, 0x96, 0x72, 0xb7, 0x02, 0x32,
+0xeb, 0x07, 0x2b, 0x73, 0xe2, 0x4f, 0x66, 0x58,
+0x9a, 0xeb, 0x0f, 0x07, 0xb6, 0xab, 0x50, 0x8b,
+0xc3, 0x8f, 0x17, 0xfa, 0x0a, 0x99, 0xc2, 0x16,
+0x25, 0xbf, 0x2d, 0x6b, 0x1a, 0xaa, 0xe6, 0x3e,
+0x5f, 0xeb, 0x6d, 0x9b, 0x5d, 0x4d, 0x42, 0x83,
+0x2d, 0x39, 0xb8, 0xc9, 0xac, 0xdb, 0x3a, 0x91,
+0x50, 0xdf, 0xbb, 0xb1, 0x76, 0x6d, 0x15, 0x73,
+0xfd, 0xc6, 0xe6, 0x6b, 0x71, 0x9e, 0x67, 0x36,
+0x22, 0x83, 0x79, 0xb1, 0xd6, 0xb8, 0x84, 0x52,
+0xaf, 0x96, 0x5b, 0xc3, 0x63, 0x02, 0x4e, 0x78,
+0x70, 0x57, 0x02, 0x03, 0x01, 0x00, 0x01, 0x30,
+0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7,
+0x0d, 0x01, 0x01, 0x0b, 0x05, 0x00, 0x03, 0x82,
+0x01, 0x01, 0x00, 0x24, 0x28, 0xee, 0x22, 0x74,
+0x7f, 0x7c, 0xfa, 0x6c, 0x1f, 0xb3, 0x18, 0xd1,
+0xc2, 0x3d, 0x7d, 0x29, 0x42, 0x88, 0xad, 0x82,
+0xa5, 0xb1, 0x8a, 0x05, 0xd0, 0xec, 0x5c, 0x91,
+0x20, 0xf6, 0x82, 0xfd, 0xd5, 0x67, 0x60, 0x5f,
+0x31, 0xf5, 0xbd, 0x88, 0x91, 0x70, 0xbd, 0xb8,
+0xb9, 0x8c, 0x88, 0xfe, 0x53, 0xc9, 0x54, 0x9b,
+0x43, 0xc4, 0x7a, 0x43, 0x74, 0x6b, 0xdd, 0xb0,
+0xb1, 0x3b, 0x33, 0x45, 0x46, 0x78, 0xa3, 0x1c,
+0xef, 0x54, 0x68, 0xf7, 0x85, 0x9c, 0xe4, 0x51,
+0x6f, 0x06, 0xaf, 0x81, 0xdb, 0x2a, 0x7b, 0x7b,
+0x6f, 0xa8, 0x9c, 0x67, 0xd8, 0xcb, 0xc9, 0x91,
+0x40, 0x00, 0xae, 0xd9, 0xa1, 0x9f, 0xdd, 0xa6,
+0x43, 0x0e, 0x28, 0x7b, 0xaa, 0x1b, 0xe9, 0x84,
+0xdb, 0x76, 0x64, 0x42, 0x70, 0xc9, 0xc0, 0xeb,
+0xae, 0x84, 0x11, 0x16, 0x68, 0x4e, 0x84, 0x9e,
+0x7e, 0x92, 0x36, 0xee, 0x1c, 0x3b, 0x08, 0x63,
+0xeb, 0x79, 0x84, 0x15, 0x08, 0x9d, 0xaf, 0xc8,
+0x9a, 0xc7, 0x34, 0xd3, 0x94, 0x4b, 0xd1, 0x28,
+0x97, 0xbe, 0xd1, 0x45, 0x75, 0xdc, 0x35, 0x62,
+0xac, 0x1d, 0x1f, 0xb7, 0xb7, 0x15, 0x87, 0xc8,
+0x98, 0xc0, 0x24, 0x31, 0x56, 0x8d, 0xed, 0xdb,
+0x06, 0xc6, 0x46, 0xbf, 0x4b, 0x6d, 0xa6, 0xd5,
+0xab, 0xcc, 0x60, 0xfc, 0xe5, 0x37, 0xb6, 0x53,
+0x7d, 0x58, 0x95, 0xa9, 0x56, 0xc7, 0xf7, 0xee,
+0xc3, 0xa0, 0x76, 0xf7, 0x65, 0x4d, 0x53, 0xfa,
+0xff, 0x5f, 0x76, 0x33, 0x5a, 0x08, 0xfa, 0x86,
+0x92, 0x5a, 0x13, 0xfa, 0x1a, 0xfc, 0xf2, 0x1b,
+0x8c, 0x7f, 0x42, 0x6d, 0xb7, 0x7e, 0xb7, 0xb4,
+0xf0, 0xc7, 0x83, 0xbb, 0xa2, 0x81, 0x03, 0x2d,
+0xd4, 0x2a, 0x63, 0x3f, 0xf7, 0x31, 0x2e, 0x40,
+0x33, 0x5c, 0x46, 0xbc, 0x9b, 0xc1, 0x05, 0xa5,
+0x45, 0x4e, 0xc3,
diff --git a/scripts/sign-file.c b/scripts/sign-file.c
index 598ef5465f8256..3edb156ae52c30 100644
--- a/scripts/sign-file.c
+++ b/scripts/sign-file.c
@@ -322,7 +322,7 @@ int main(int argc, char **argv)
CMS_NOSMIMECAP | use_keyid |
use_signed_attrs),
"CMS_add1_signer");
- ERR(CMS_final(cms, bm, NULL, CMS_NOCERTS | CMS_BINARY) < 0,
+ ERR(CMS_final(cms, bm, NULL, CMS_NOCERTS | CMS_BINARY) != 1,
"CMS_final");
#else
@@ -341,10 +341,10 @@ int main(int argc, char **argv)
b = BIO_new_file(sig_file_name, "wb");
ERR(!b, "%s", sig_file_name);
#ifndef USE_PKCS7
- ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0,
+ ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) != 1,
"%s", sig_file_name);
#else
- ERR(i2d_PKCS7_bio(b, pkcs7) < 0,
+ ERR(i2d_PKCS7_bio(b, pkcs7) != 1,
"%s", sig_file_name);
#endif
BIO_free(b);
@@ -374,9 +374,9 @@ int main(int argc, char **argv)
if (!raw_sig) {
#ifndef USE_PKCS7
- ERR(i2d_CMS_bio_stream(bd, cms, NULL, 0) < 0, "%s", dest_name);
+ ERR(i2d_CMS_bio_stream(bd, cms, NULL, 0) != 1, "%s", dest_name);
#else
- ERR(i2d_PKCS7_bio(bd, pkcs7) < 0, "%s", dest_name);
+ ERR(i2d_PKCS7_bio(bd, pkcs7) != 1, "%s", dest_name);
#endif
} else {
BIO *b;
@@ -396,7 +396,7 @@ int main(int argc, char **argv)
ERR(BIO_write(bd, &sig_info, sizeof(sig_info)) < 0, "%s", dest_name);
ERR(BIO_write(bd, magic_number, sizeof(magic_number) - 1) < 0, "%s", dest_name);
- ERR(BIO_free(bd) < 0, "%s", dest_name);
+ ERR(BIO_free(bd) != 1, "%s", dest_name);
/* Finally, if we're signing in place, replace the original. */
if (replace_orig)
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 3c90807476eb0e..eaddaceda14eab 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -67,6 +67,19 @@ void key_schedule_gc(time64_t gc_at)
}
/*
+ * Set the expiration time on a key.
+ */
+void key_set_expiry(struct key *key, time64_t expiry)
+{
+ key->expiry = expiry;
+ if (expiry != TIME64_MAX) {
+ if (!(key->type->flags & KEY_TYPE_INSTANT_REAP))
+ expiry += key_gc_delay;
+ key_schedule_gc(expiry);
+ }
+}
+
+/*
* Schedule a dead links collection run.
*/
void key_schedule_gc_links(void)
@@ -176,7 +189,6 @@ static void key_garbage_collector(struct work_struct *work)
static u8 gc_state; /* Internal persistent state */
#define KEY_GC_REAP_AGAIN 0x01 /* - Need another cycle */
#define KEY_GC_REAPING_LINKS 0x02 /* - We need to reap links */
-#define KEY_GC_SET_TIMER 0x04 /* - We need to restart the timer */
#define KEY_GC_REAPING_DEAD_1 0x10 /* - We need to mark dead keys */
#define KEY_GC_REAPING_DEAD_2 0x20 /* - We need to reap dead key links */
#define KEY_GC_REAPING_DEAD_3 0x40 /* - We need to reap dead keys */
@@ -184,21 +196,17 @@ static void key_garbage_collector(struct work_struct *work)
struct rb_node *cursor;
struct key *key;
- time64_t new_timer, limit;
+ time64_t new_timer, limit, expiry;
kenter("[%lx,%x]", key_gc_flags, gc_state);
limit = ktime_get_real_seconds();
- if (limit > key_gc_delay)
- limit -= key_gc_delay;
- else
- limit = key_gc_delay;
/* Work out what we're going to be doing in this pass */
gc_state &= KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2;
gc_state <<= 1;
if (test_and_clear_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags))
- gc_state |= KEY_GC_REAPING_LINKS | KEY_GC_SET_TIMER;
+ gc_state |= KEY_GC_REAPING_LINKS;
if (test_and_clear_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags))
gc_state |= KEY_GC_REAPING_DEAD_1;
@@ -233,8 +241,11 @@ continue_scanning:
}
}
- if (gc_state & KEY_GC_SET_TIMER) {
- if (key->expiry > limit && key->expiry < new_timer) {
+ expiry = key->expiry;
+ if (expiry != TIME64_MAX) {
+ if (!(key->type->flags & KEY_TYPE_INSTANT_REAP))
+ expiry += key_gc_delay;
+ if (expiry > limit && expiry < new_timer) {
kdebug("will expire %x in %lld",
key_serial(key), key->expiry - limit);
new_timer = key->expiry;
@@ -276,7 +287,7 @@ maybe_resched:
*/
kdebug("pass complete");
- if (gc_state & KEY_GC_SET_TIMER && new_timer != (time64_t)TIME64_MAX) {
+ if (new_timer != TIME64_MAX) {
new_timer += key_gc_delay;
key_schedule_gc(new_timer);
}
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 471cf36dedc026..2cffa6dc82557b 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -167,6 +167,7 @@ extern unsigned key_gc_delay;
extern void keyring_gc(struct key *keyring, time64_t limit);
extern void keyring_restriction_gc(struct key *keyring,
struct key_type *dead_type);
+void key_set_expiry(struct key *key, time64_t expiry);
extern void key_schedule_gc(time64_t gc_at);
extern void key_schedule_gc_links(void);
extern void key_gc_keytype(struct key_type *ktype);
@@ -215,10 +216,18 @@ extern struct key *key_get_instantiation_authkey(key_serial_t target_id);
*/
static inline bool key_is_dead(const struct key *key, time64_t limit)
{
+ time64_t expiry = key->expiry;
+
+ if (expiry != TIME64_MAX) {
+ if (!(key->type->flags & KEY_TYPE_INSTANT_REAP))
+ expiry += key_gc_delay;
+ if (expiry <= limit)
+ return true;
+ }
+
return
key->flags & ((1 << KEY_FLAG_DEAD) |
(1 << KEY_FLAG_INVALIDATED)) ||
- (key->expiry > 0 && key->expiry <= limit) ||
key->domain_tag->removed;
}
diff --git a/security/keys/key.c b/security/keys/key.c
index 0260a1902922eb..5b10641debd549 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -294,6 +294,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
key->uid = uid;
key->gid = gid;
key->perm = perm;
+ key->expiry = TIME64_MAX;
key->restrict_link = restrict_link;
key->last_used_at = ktime_get_real_seconds();
@@ -463,10 +464,7 @@ static int __key_instantiate_and_link(struct key *key,
if (authkey)
key_invalidate(authkey);
- if (prep->expiry != TIME64_MAX) {
- key->expiry = prep->expiry;
- key_schedule_gc(prep->expiry + key_gc_delay);
- }
+ key_set_expiry(key, prep->expiry);
}
}
@@ -606,8 +604,7 @@ int key_reject_and_link(struct key *key,
atomic_inc(&key->user->nikeys);
mark_key_instantiated(key, -error);
notify_key(key, NOTIFY_KEY_INSTANTIATED, -error);
- key->expiry = ktime_get_real_seconds() + timeout;
- key_schedule_gc(key->expiry + key_gc_delay);
+ key_set_expiry(key, ktime_get_real_seconds() + timeout);
if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
awaken = 1;
@@ -723,16 +720,14 @@ found_kernel_type:
void key_set_timeout(struct key *key, unsigned timeout)
{
- time64_t expiry = 0;
+ time64_t expiry = TIME64_MAX;
/* make the changes with the locks held to prevent races */
down_write(&key->sem);
if (timeout > 0)
expiry = ktime_get_real_seconds() + timeout;
-
- key->expiry = expiry;
- key_schedule_gc(key->expiry + key_gc_delay);
+ key_set_expiry(key, expiry);
up_write(&key->sem);
}
diff --git a/security/keys/proc.c b/security/keys/proc.c
index d0cde6685627f2..4f4e2c1824f18b 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -198,7 +198,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
/* come up with a suitable timeout value */
expiry = READ_ONCE(key->expiry);
- if (expiry == 0) {
+ if (expiry == TIME64_MAX) {
memcpy(xbuf, "perm", 5);
} else if (now >= expiry) {
memcpy(xbuf, "expd", 5);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index feda711c6b7b84..340b2bbbb2dd35 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1660,8 +1660,6 @@ static int inode_has_perm(const struct cred *cred,
struct inode_security_struct *isec;
u32 sid;
- validate_creds(cred);
-
if (unlikely(IS_PRIVATE(inode)))
return 0;
@@ -3056,8 +3054,6 @@ static int selinux_inode_follow_link(struct dentry *dentry, struct inode *inode,
struct inode_security_struct *isec;
u32 sid;
- validate_creds(cred);
-
ad.type = LSM_AUDIT_DATA_DENTRY;
ad.u.dentry = dentry;
sid = cred_sid(cred);
@@ -3101,8 +3097,6 @@ static int selinux_inode_permission(struct inode *inode, int mask)
if (!mask)
return 0;
- validate_creds(cred);
-
if (unlikely(IS_PRIVATE(inode)))
return 0;
diff --git a/sound/pci/hda/cs35l41_hda.c b/sound/pci/hda/cs35l41_hda.c
index cbd7d8badf9130..92ca2b3b6c9240 100644
--- a/sound/pci/hda/cs35l41_hda.c
+++ b/sound/pci/hda/cs35l41_hda.c
@@ -1826,6 +1826,7 @@ err:
if (cs35l41_safe_reset(cs35l41->regmap, cs35l41->hw_cfg.bst_type))
gpiod_set_value_cansleep(cs35l41->reset_gpio, 0);
gpiod_put(cs35l41->reset_gpio);
+ gpiod_put(cs35l41->cs_gpio);
acpi_dev_put(cs35l41->dacpi);
kfree(cs35l41->acpi_subsystem_id);
@@ -1853,6 +1854,7 @@ void cs35l41_hda_remove(struct device *dev)
if (cs35l41_safe_reset(cs35l41->regmap, cs35l41->hw_cfg.bst_type))
gpiod_set_value_cansleep(cs35l41->reset_gpio, 0);
gpiod_put(cs35l41->reset_gpio);
+ gpiod_put(cs35l41->cs_gpio);
kfree(cs35l41->acpi_subsystem_id);
}
EXPORT_SYMBOL_NS_GPL(cs35l41_hda_remove, SND_HDA_SCODEC_CS35L41);
diff --git a/sound/pci/hda/cs35l41_hda.h b/sound/pci/hda/cs35l41_hda.h
index ce3f2bb6ffd08a..3d925d677213d4 100644
--- a/sound/pci/hda/cs35l41_hda.h
+++ b/sound/pci/hda/cs35l41_hda.h
@@ -35,8 +35,8 @@ struct cs35l41_amp_efi_data {
} __packed;
enum cs35l41_hda_spk_pos {
- CS35l41_LEFT,
- CS35l41_RIGHT,
+ CS35L41_LEFT,
+ CS35L41_RIGHT,
};
enum cs35l41_hda_gpio_function {
@@ -50,6 +50,7 @@ struct cs35l41_hda {
struct device *dev;
struct regmap *regmap;
struct gpio_desc *reset_gpio;
+ struct gpio_desc *cs_gpio;
struct cs35l41_hw_cfg hw_cfg;
struct hda_codec *codec;
diff --git a/sound/pci/hda/cs35l41_hda_property.c b/sound/pci/hda/cs35l41_hda_property.c
index c83328971728f4..194e1179a253e2 100644
--- a/sound/pci/hda/cs35l41_hda_property.c
+++ b/sound/pci/hda/cs35l41_hda_property.c
@@ -6,9 +6,303 @@
//
// Author: Stefan Binding <sbinding@opensource.cirrus.com>
+#include <linux/acpi.h>
#include <linux/gpio/consumer.h>
#include <linux/string.h>
#include "cs35l41_hda_property.h"
+#include <linux/spi/spi.h>
+
+#define MAX_AMPS 4
+
+struct cs35l41_config {
+ const char *ssid;
+ enum {
+ SPI,
+ I2C
+ } bus;
+ int num_amps;
+ enum {
+ INTERNAL,
+ EXTERNAL
+ } boost_type;
+ u8 channel[MAX_AMPS];
+ int reset_gpio_index; /* -1 if no reset gpio */
+ int spkid_gpio_index; /* -1 if no spkid gpio */
+ int cs_gpio_index; /* -1 if no cs gpio, or cs-gpios already exists, max num amps == 2 */
+ int boost_ind_nanohenry; /* Required if boost_type == Internal */
+ int boost_peak_milliamp; /* Required if boost_type == Internal */
+ int boost_cap_microfarad; /* Required if boost_type == Internal */
+};
+
+static const struct cs35l41_config cs35l41_config_table[] = {
+/*
+ * Device 103C89C6 does have _DSD, however it is setup to use the wrong boost type.
+ * We can override the _DSD to correct the boost type here.
+ * Since this laptop has valid ACPI, we do not need to handle cs-gpios, since that already exists
+ * in the ACPI. The Reset GPIO is also valid, so we can use the Reset defined in _DSD.
+ */
+ { "103C89C6", SPI, 2, INTERNAL, { CS35L41_RIGHT, CS35L41_LEFT, 0, 0 }, -1, -1, -1, 1000, 4500, 24 },
+ { "104312AF", SPI, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 },
+ { "10431433", I2C, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 1000, 4500, 24 },
+ { "10431463", I2C, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 1000, 4500, 24 },
+ { "10431473", SPI, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, -1, 0, 1000, 4500, 24 },
+ { "10431483", SPI, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, -1, 0, 1000, 4500, 24 },
+ { "10431493", SPI, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 },
+ { "104314D3", SPI, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 },
+ { "104314E3", I2C, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 1000, 4500, 24 },
+ { "10431503", I2C, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 1000, 4500, 24 },
+ { "10431533", I2C, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 1000, 4500, 24 },
+ { "10431573", SPI, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 },
+ { "10431663", SPI, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, -1, 0, 1000, 4500, 24 },
+ { "104316D3", SPI, 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 0, 0, 0 },
+ { "104316F3", SPI, 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 0, 0, 0 },
+ { "104317F3", I2C, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 1000, 4500, 24 },
+ { "10431863", SPI, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 },
+ { "104318D3", I2C, 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 },
+ { "10431C9F", SPI, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 },
+ { "10431CAF", SPI, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 },
+ { "10431CCF", SPI, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 },
+ { "10431CDF", SPI, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 },
+ { "10431CEF", SPI, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 },
+ { "10431D1F", I2C, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 1000, 4500, 24 },
+ { "10431DA2", SPI, 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 0, 0, 0 },
+ { "10431E02", SPI, 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 0, 0, 0 },
+ { "10431EE2", I2C, 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, -1, -1, 0, 0, 0 },
+ { "10431F12", I2C, 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 1000, 4500, 24 },
+ { "10431F1F", SPI, 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, -1, 0, 0, 0, 0 },
+ { "10431F62", SPI, 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 0, 0, 0 },
+ {}
+};
+
+static int cs35l41_add_gpios(struct cs35l41_hda *cs35l41, struct device *physdev, int reset_gpio,
+ int spkid_gpio, int cs_gpio_index, int num_amps)
+{
+ struct acpi_gpio_mapping *gpio_mapping = NULL;
+ struct acpi_gpio_params *reset_gpio_params = NULL;
+ struct acpi_gpio_params *spkid_gpio_params = NULL;
+ struct acpi_gpio_params *cs_gpio_params = NULL;
+ unsigned int num_entries = 0;
+ unsigned int reset_index, spkid_index, csgpio_index;
+ int i;
+
+ /*
+ * GPIO Mapping only needs to be done once, since it would be available for subsequent amps
+ */
+ if (cs35l41->dacpi->driver_gpios)
+ return 0;
+
+ if (reset_gpio >= 0) {
+ reset_index = num_entries;
+ num_entries++;
+ }
+
+ if (spkid_gpio >= 0) {
+ spkid_index = num_entries;
+ num_entries++;
+ }
+
+ if ((cs_gpio_index >= 0) && (num_amps == 2)) {
+ csgpio_index = num_entries;
+ num_entries++;
+ }
+
+ if (!num_entries)
+ return 0;
+
+ /* must include termination entry */
+ num_entries++;
+
+ gpio_mapping = devm_kcalloc(physdev, num_entries, sizeof(struct acpi_gpio_mapping),
+ GFP_KERNEL);
+
+ if (!gpio_mapping)
+ goto err;
+
+ if (reset_gpio >= 0) {
+ gpio_mapping[reset_index].name = "reset-gpios";
+ reset_gpio_params = devm_kcalloc(physdev, num_amps, sizeof(struct acpi_gpio_params),
+ GFP_KERNEL);
+ if (!reset_gpio_params)
+ goto err;
+
+ for (i = 0; i < num_amps; i++)
+ reset_gpio_params[i].crs_entry_index = reset_gpio;
+
+ gpio_mapping[reset_index].data = reset_gpio_params;
+ gpio_mapping[reset_index].size = num_amps;
+ }
+
+ if (spkid_gpio >= 0) {
+ gpio_mapping[spkid_index].name = "spk-id-gpios";
+ spkid_gpio_params = devm_kcalloc(physdev, num_amps, sizeof(struct acpi_gpio_params),
+ GFP_KERNEL);
+ if (!spkid_gpio_params)
+ goto err;
+
+ for (i = 0; i < num_amps; i++)
+ spkid_gpio_params[i].crs_entry_index = spkid_gpio;
+
+ gpio_mapping[spkid_index].data = spkid_gpio_params;
+ gpio_mapping[spkid_index].size = num_amps;
+ }
+
+ if ((cs_gpio_index >= 0) && (num_amps == 2)) {
+ gpio_mapping[csgpio_index].name = "cs-gpios";
+ /* only one GPIO CS is supported without using _DSD, obtained using index 0 */
+ cs_gpio_params = devm_kzalloc(physdev, sizeof(struct acpi_gpio_params), GFP_KERNEL);
+ if (!cs_gpio_params)
+ goto err;
+
+ cs_gpio_params->crs_entry_index = cs_gpio_index;
+
+ gpio_mapping[csgpio_index].data = cs_gpio_params;
+ gpio_mapping[csgpio_index].size = 1;
+ }
+
+ return devm_acpi_dev_add_driver_gpios(physdev, gpio_mapping);
+err:
+ devm_kfree(physdev, gpio_mapping);
+ devm_kfree(physdev, reset_gpio_params);
+ devm_kfree(physdev, spkid_gpio_params);
+ devm_kfree(physdev, cs_gpio_params);
+ return -ENOMEM;
+}
+
+static int generic_dsd_config(struct cs35l41_hda *cs35l41, struct device *physdev, int id,
+ const char *hid)
+{
+ struct cs35l41_hw_cfg *hw_cfg = &cs35l41->hw_cfg;
+ const struct cs35l41_config *cfg;
+ struct gpio_desc *cs_gpiod;
+ struct spi_device *spi;
+ bool dsd_found;
+ int ret;
+
+ for (cfg = cs35l41_config_table; cfg->ssid; cfg++) {
+ if (!strcasecmp(cfg->ssid, cs35l41->acpi_subsystem_id))
+ break;
+ }
+
+ if (!cfg->ssid)
+ return -ENOENT;
+
+ if (!cs35l41->dacpi || cs35l41->dacpi != ACPI_COMPANION(physdev)) {
+ dev_err(cs35l41->dev, "ACPI Device does not match, cannot override _DSD.\n");
+ return -ENODEV;
+ }
+
+ dev_info(cs35l41->dev, "Adding DSD properties for %s\n", cs35l41->acpi_subsystem_id);
+
+ dsd_found = acpi_dev_has_props(cs35l41->dacpi);
+
+ if (!dsd_found) {
+ ret = cs35l41_add_gpios(cs35l41, physdev, cfg->reset_gpio_index,
+ cfg->spkid_gpio_index, cfg->cs_gpio_index,
+ cfg->num_amps);
+ if (ret) {
+ dev_err(cs35l41->dev, "Error adding GPIO mapping: %d\n", ret);
+ return ret;
+ }
+ } else if (cfg->reset_gpio_index >= 0 || cfg->spkid_gpio_index >= 0) {
+ dev_warn(cs35l41->dev, "Cannot add Reset/Speaker ID/SPI CS GPIO Mapping, "
+ "_DSD already exists.\n");
+ }
+
+ if (cfg->bus == SPI) {
+ cs35l41->index = id;
+
+#if IS_ENABLED(CONFIG_SPI)
+ /*
+ * Manually set the Chip Select for the second amp <cs_gpio_index> in the node.
+ * This is only supported for systems with 2 amps, since we cannot expand the
+ * default number of chip selects without using cs-gpios
+ * The CS GPIO must be set high prior to communicating with the first amp (which
+ * uses a native chip select), to ensure the second amp does not clash with the
+ * first.
+ */
+ if (cfg->cs_gpio_index >= 0) {
+ spi = to_spi_device(cs35l41->dev);
+
+ if (cfg->num_amps != 2) {
+ dev_warn(cs35l41->dev,
+ "Cannot update SPI CS, Number of Amps (%d) != 2\n",
+ cfg->num_amps);
+ } else if (dsd_found) {
+ dev_warn(cs35l41->dev,
+ "Cannot update SPI CS, _DSD already exists.\n");
+ } else {
+ /*
+ * This is obtained using driver_gpios, since only one GPIO for CS
+ * exists, this can be obtained using index 0.
+ */
+ cs_gpiod = gpiod_get_index(physdev, "cs", 0, GPIOD_OUT_LOW);
+ if (IS_ERR(cs_gpiod)) {
+ dev_err(cs35l41->dev,
+ "Unable to get Chip Select GPIO descriptor\n");
+ return PTR_ERR(cs_gpiod);
+ }
+ if (id == 1) {
+ spi_set_csgpiod(spi, 0, cs_gpiod);
+ cs35l41->cs_gpio = cs_gpiod;
+ } else {
+ gpiod_set_value_cansleep(cs_gpiod, true);
+ gpiod_put(cs_gpiod);
+ }
+ spi_setup(spi);
+ }
+ }
+#endif
+ } else {
+ if (cfg->num_amps > 2)
+ /*
+ * i2c addresses for 3/4 amps are used in order: 0x40, 0x41, 0x42, 0x43,
+ * subtracting 0x40 would give zero-based index
+ */
+ cs35l41->index = id - 0x40;
+ else
+ /* i2c addr 0x40 for first amp (always), 0x41/0x42 for 2nd amp */
+ cs35l41->index = id == 0x40 ? 0 : 1;
+ }
+
+ if (cfg->num_amps == 3)
+ /* 3 amps means a center channel, so no duplicate channels */
+ cs35l41->channel_index = 0;
+ else
+ /*
+ * if 4 amps, there are duplicate channels, so they need different indexes
+ * if 2 amps, no duplicate channels, channel_index would be 0
+ */
+ cs35l41->channel_index = cs35l41->index / 2;
+
+ cs35l41->reset_gpio = fwnode_gpiod_get_index(acpi_fwnode_handle(cs35l41->dacpi), "reset",
+ cs35l41->index, GPIOD_OUT_LOW,
+ "cs35l41-reset");
+ cs35l41->speaker_id = cs35l41_get_speaker_id(physdev, cs35l41->index, cfg->num_amps, -1);
+
+ hw_cfg->spk_pos = cfg->channel[cs35l41->index];
+
+ if (cfg->boost_type == INTERNAL) {
+ hw_cfg->bst_type = CS35L41_INT_BOOST;
+ hw_cfg->bst_ind = cfg->boost_ind_nanohenry;
+ hw_cfg->bst_ipk = cfg->boost_peak_milliamp;
+ hw_cfg->bst_cap = cfg->boost_cap_microfarad;
+ hw_cfg->gpio1.func = CS35L41_NOT_USED;
+ hw_cfg->gpio1.valid = true;
+ } else {
+ hw_cfg->bst_type = CS35L41_EXT_BOOST;
+ hw_cfg->bst_ind = -1;
+ hw_cfg->bst_ipk = -1;
+ hw_cfg->bst_cap = -1;
+ hw_cfg->gpio1.func = CS35l41_VSPK_SWITCH;
+ hw_cfg->gpio1.valid = true;
+ }
+
+ hw_cfg->gpio2.func = CS35L41_INTERRUPT;
+ hw_cfg->gpio2.valid = true;
+ hw_cfg->valid = true;
+
+ return 0;
+}
/*
* Device CLSA010(0/1) doesn't have _DSD so a gpiod_get by the label reset won't work.
@@ -43,44 +337,6 @@ static int lenovo_legion_no_acpi(struct cs35l41_hda *cs35l41, struct device *phy
return 0;
}
-/*
- * Device 103C89C6 does have _DSD, however it is setup to use the wrong boost type.
- * We can override the _DSD to correct the boost type here.
- * Since this laptop has valid ACPI, we do not need to handle cs-gpios, since that already exists
- * in the ACPI.
- */
-static int hp_vision_acpi_fix(struct cs35l41_hda *cs35l41, struct device *physdev, int id,
- const char *hid)
-{
- struct cs35l41_hw_cfg *hw_cfg = &cs35l41->hw_cfg;
-
- dev_info(cs35l41->dev, "Adding DSD properties for %s\n", cs35l41->acpi_subsystem_id);
-
- cs35l41->index = id;
- cs35l41->channel_index = 0;
-
- /*
- * This system has _DSD, it just contains an error, so we can still get the reset using
- * the "reset" label.
- */
- cs35l41->reset_gpio = fwnode_gpiod_get_index(acpi_fwnode_handle(cs35l41->dacpi), "reset",
- cs35l41->index, GPIOD_OUT_LOW,
- "cs35l41-reset");
- cs35l41->speaker_id = -ENOENT;
- hw_cfg->spk_pos = cs35l41->index ? 0 : 1; // right:left
- hw_cfg->gpio1.func = CS35L41_NOT_USED;
- hw_cfg->gpio1.valid = true;
- hw_cfg->gpio2.func = CS35L41_INTERRUPT;
- hw_cfg->gpio2.valid = true;
- hw_cfg->bst_type = CS35L41_INT_BOOST;
- hw_cfg->bst_ind = 1000;
- hw_cfg->bst_ipk = 4500;
- hw_cfg->bst_cap = 24;
- hw_cfg->valid = true;
-
- return 0;
-}
-
struct cs35l41_prop_model {
const char *hid;
const char *ssid;
@@ -91,7 +347,36 @@ struct cs35l41_prop_model {
static const struct cs35l41_prop_model cs35l41_prop_model_table[] = {
{ "CLSA0100", NULL, lenovo_legion_no_acpi },
{ "CLSA0101", NULL, lenovo_legion_no_acpi },
- { "CSC3551", "103C89C6", hp_vision_acpi_fix },
+ { "CSC3551", "103C89C6", generic_dsd_config },
+ { "CSC3551", "104312AF", generic_dsd_config },
+ { "CSC3551", "10431433", generic_dsd_config },
+ { "CSC3551", "10431463", generic_dsd_config },
+ { "CSC3551", "10431473", generic_dsd_config },
+ { "CSC3551", "10431483", generic_dsd_config },
+ { "CSC3551", "10431493", generic_dsd_config },
+ { "CSC3551", "104314D3", generic_dsd_config },
+ { "CSC3551", "104314E3", generic_dsd_config },
+ { "CSC3551", "10431503", generic_dsd_config },
+ { "CSC3551", "10431533", generic_dsd_config },
+ { "CSC3551", "10431573", generic_dsd_config },
+ { "CSC3551", "10431663", generic_dsd_config },
+ { "CSC3551", "104316D3", generic_dsd_config },
+ { "CSC3551", "104316F3", generic_dsd_config },
+ { "CSC3551", "104317F3", generic_dsd_config },
+ { "CSC3551", "10431863", generic_dsd_config },
+ { "CSC3551", "104318D3", generic_dsd_config },
+ { "CSC3551", "10431C9F", generic_dsd_config },
+ { "CSC3551", "10431CAF", generic_dsd_config },
+ { "CSC3551", "10431CCF", generic_dsd_config },
+ { "CSC3551", "10431CDF", generic_dsd_config },
+ { "CSC3551", "10431CEF", generic_dsd_config },
+ { "CSC3551", "10431D1F", generic_dsd_config },
+ { "CSC3551", "10431DA2", generic_dsd_config },
+ { "CSC3551", "10431E02", generic_dsd_config },
+ { "CSC3551", "10431EE2", generic_dsd_config },
+ { "CSC3551", "10431F12", generic_dsd_config },
+ { "CSC3551", "10431F1F", generic_dsd_config },
+ { "CSC3551", "10431F62", generic_dsd_config },
{}
};
@@ -104,7 +389,7 @@ int cs35l41_add_dsd_properties(struct cs35l41_hda *cs35l41, struct device *physd
if (!strcmp(model->hid, hid) &&
(!model->ssid ||
(cs35l41->acpi_subsystem_id &&
- !strcmp(model->ssid, cs35l41->acpi_subsystem_id))))
+ !strcasecmp(model->ssid, cs35l41->acpi_subsystem_id))))
return model->add_prop(cs35l41, physdev, id, hid);
}
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 1cde2a69bdb4ba..78cee53fee02aa 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1993,7 +1993,10 @@ static const struct snd_pci_quirk force_connect_list[] = {
SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1),
SND_PCI_QUIRK(0x103c, 0x8711, "HP", 1),
SND_PCI_QUIRK(0x103c, 0x8715, "HP", 1),
+ SND_PCI_QUIRK(0x1043, 0x86ae, "ASUS", 1), /* Z170 PRO */
+ SND_PCI_QUIRK(0x1043, 0x86c7, "ASUS", 1), /* Z170M PLUS */
SND_PCI_QUIRK(0x1462, 0xec94, "MS-7C94", 1),
+ SND_PCI_QUIRK(0x8086, 0x2060, "Intel NUC5CPYB", 1),
SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", 1),
{}
};
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 0377912e926435..c3a75652888677 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -9795,6 +9795,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x841c, "HP Pavilion 15-CK0xx", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ SND_PCI_QUIRK(0x103c, 0x84ae, "HP 15-db0403ng", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
@@ -9947,21 +9948,28 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1313, "Asus K42JZ", ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
- SND_PCI_QUIRK(0x1043, 0x1433, "ASUS GX650P", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC),
- SND_PCI_QUIRK(0x1043, 0x1463, "Asus GA402X", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC),
- SND_PCI_QUIRK(0x1043, 0x1473, "ASUS GU604V", ALC285_FIXUP_ASUS_HEADSET_MIC),
- SND_PCI_QUIRK(0x1043, 0x1483, "ASUS GU603V", ALC285_FIXUP_ASUS_HEADSET_MIC),
- SND_PCI_QUIRK(0x1043, 0x1493, "ASUS GV601V", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1433, "ASUS GX650PY/PZ/PV/PU/PYV/PZV/PIV/PVV", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1463, "Asus GA402X/GA402N", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1473, "ASUS GU604VI/VC/VE/VG/VJ/VQ/VU/VV/VY/VZ", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1483, "ASUS GU603VQ/VU/VV/VJ/VI", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1493, "ASUS GV601VV/VU/VJ/VQ/VI", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x14d3, "ASUS G614JY/JZ/JG", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x14e3, "ASUS G513PI/PU/PV", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x1503, "ASUS G733PY/PZ/PZV/PYV", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
- SND_PCI_QUIRK(0x1043, 0x1573, "ASUS GZ301V", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1533, "ASUS GV302XA/XJ/XQ/XU/XV/XI", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x1573, "ASUS GZ301VV/VQ/VU/VJ/VA/VC/VE/VVC/VQC/VUC/VJC/VEC/VCC", ALC285_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
- SND_PCI_QUIRK(0x1043, 0x1663, "ASUS GU603ZV", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1663, "ASUS GU603ZI/ZJ/ZQ/ZU/ZV", ALC285_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1683, "ASUS UM3402YAR", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
+ SND_PCI_QUIRK(0x1043, 0x16d3, "ASUS UX5304VA", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x1043, 0x16f3, "ASUS UX7602VI/BZ", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS),
SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
- SND_PCI_QUIRK(0x1043, 0x17f3, "ROG Ally RC71L_RC71L", ALC294_FIXUP_ASUS_ALLY),
+ SND_PCI_QUIRK(0x1043, 0x17f3, "ROG Ally NR2301L/X", ALC294_FIXUP_ASUS_ALLY),
+ SND_PCI_QUIRK(0x1043, 0x1863, "ASUS UX6404VI/VV", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x18d3, "ASUS UM3504DA", ALC294_FIXUP_CS35L41_I2C_2),
@@ -9986,23 +9994,30 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1c43, "ASUS UX8406MA", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
- SND_PCI_QUIRK(0x1043, 0x1c9f, "ASUS G614JI", ALC285_FIXUP_ASUS_HEADSET_MIC),
- SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
+ SND_PCI_QUIRK(0x1043, 0x1c9f, "ASUS G614JU/JV/JI", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JY/JZ/JI/JG", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
- SND_PCI_QUIRK(0x1043, 0x1d1f, "ASUS ROG Strix G17 2023 (G713PV)", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x1ccf, "ASUS G814JU/JV/JI", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x1cdf, "ASUS G814JY/JZ/JG", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x1cef, "ASUS G834JY/JZ/JI/JG", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1d1f, "ASUS G713PI/PU/PV/PVN", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
+ SND_PCI_QUIRK(0x1043, 0x1da2, "ASUS UP6502ZA/ZD", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402ZA", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x16a3, "ASUS UX3402VA", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1f62, "ASUS UX7602ZM", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
- SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM6702RA/RC", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS),
SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
+ SND_PCI_QUIRK(0x1043, 0x1ee2, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x1c52, "ASUS Zephyrus G15 2022", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1f12, "ASUS UM5302", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x1f1f, "ASUS H7604JI/JV/J3D", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x1f62, "ASUS UX7602ZM", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1f92, "ASUS ROG Flow X16", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
SND_PCI_QUIRK(0x1043, 0x3a20, "ASUS G614JZR", ALC245_FIXUP_CS35L41_SPI_2),
diff --git a/sound/pci/hda/tas2781_hda_i2c.c b/sound/pci/hda/tas2781_hda_i2c.c
index fb802802939e17..2fb1a7037c82cf 100644
--- a/sound/pci/hda/tas2781_hda_i2c.c
+++ b/sound/pci/hda/tas2781_hda_i2c.c
@@ -455,9 +455,9 @@ static int tas2781_save_calibration(struct tasdevice_priv *tas_priv)
status = efi.get_variable(efi_name, &efi_guid, &attr,
&tas_priv->cali_data.total_sz,
tas_priv->cali_data.data);
- if (status != EFI_SUCCESS)
- return -EINVAL;
}
+ if (status != EFI_SUCCESS)
+ return -EINVAL;
tmp_val = (unsigned int *)tas_priv->cali_data.data;
@@ -543,6 +543,10 @@ static void tasdev_fw_ready(const struct firmware *fmw, void *context)
tas_priv->fw_state = TASDEVICE_DSP_FW_ALL_OK;
tasdevice_prmg_load(tas_priv, 0);
+ if (tas_priv->fmw->nr_programs > 0)
+ tas_priv->cur_prog = 0;
+ if (tas_priv->fmw->nr_configurations > 0)
+ tas_priv->cur_conf = 0;
/* If calibrated data occurs error, dsp will still works with default
* calibrated data inside algo.
@@ -550,11 +554,6 @@ static void tasdev_fw_ready(const struct firmware *fmw, void *context)
tas2781_save_calibration(tas_priv);
out:
- if (tas_priv->fw_state == TASDEVICE_DSP_FW_FAIL) {
- /*If DSP FW fail, kcontrol won't be created */
- tasdevice_config_info_remove(tas_priv);
- tasdevice_dsp_remove(tas_priv);
- }
mutex_unlock(&tas_priv->codec_lock);
if (fmw)
release_firmware(fmw);
@@ -612,9 +611,13 @@ static void tas2781_hda_unbind(struct device *dev,
{
struct tasdevice_priv *tas_priv = dev_get_drvdata(dev);
struct hda_component *comps = master_data;
+ comps = &comps[tas_priv->index];
- if (comps[tas_priv->index].dev == dev)
- memset(&comps[tas_priv->index], 0, sizeof(*comps));
+ if (comps->dev == dev) {
+ comps->dev = NULL;
+ memset(comps->name, 0, sizeof(comps->name));
+ comps->playback_hook = NULL;
+ }
tasdevice_config_info_remove(tas_priv);
tasdevice_dsp_remove(tas_priv);
@@ -675,14 +678,14 @@ static int tas2781_hda_i2c_probe(struct i2c_client *clt)
pm_runtime_put_autosuspend(tas_priv->dev);
+ tas2781_reset(tas_priv);
+
ret = component_add(tas_priv->dev, &tas2781_hda_comp_ops);
if (ret) {
dev_err(tas_priv->dev, "Register component failed: %d\n", ret);
pm_runtime_disable(tas_priv->dev);
- goto err;
}
- tas2781_reset(tas_priv);
err:
if (ret)
tas2781_hda_remove(&clt->dev);
diff --git a/sound/soc/codecs/cs35l45-i2c.c b/sound/soc/codecs/cs35l45-i2c.c
index 77e0f8750f3757..bc2af1ed0fe9bb 100644
--- a/sound/soc/codecs/cs35l45-i2c.c
+++ b/sound/soc/codecs/cs35l45-i2c.c
@@ -62,7 +62,7 @@ static struct i2c_driver cs35l45_i2c_driver = {
.driver = {
.name = "cs35l45",
.of_match_table = cs35l45_of_match,
- .pm = &cs35l45_pm_ops,
+ .pm = pm_ptr(&cs35l45_pm_ops),
},
.id_table = cs35l45_id_i2c,
.probe = cs35l45_i2c_probe,
diff --git a/sound/soc/codecs/cs35l45-spi.c b/sound/soc/codecs/cs35l45-spi.c
index 5efb77530cc33e..39e203a5f060c6 100644
--- a/sound/soc/codecs/cs35l45-spi.c
+++ b/sound/soc/codecs/cs35l45-spi.c
@@ -64,7 +64,7 @@ static struct spi_driver cs35l45_spi_driver = {
.driver = {
.name = "cs35l45",
.of_match_table = cs35l45_of_match,
- .pm = &cs35l45_pm_ops,
+ .pm = pm_ptr(&cs35l45_pm_ops),
},
.id_table = cs35l45_id_spi,
.probe = cs35l45_spi_probe,
diff --git a/sound/soc/codecs/cs35l45.c b/sound/soc/codecs/cs35l45.c
index b68853e42fd1e3..44c221745c3b25 100644
--- a/sound/soc/codecs/cs35l45.c
+++ b/sound/soc/codecs/cs35l45.c
@@ -947,6 +947,8 @@ static int cs35l45_enter_hibernate(struct cs35l45_private *cs35l45)
cs35l45_setup_hibernate(cs35l45);
+ regmap_set_bits(cs35l45->regmap, CS35L45_IRQ1_MASK_2, CS35L45_DSP_VIRT2_MBOX_MASK);
+
// Don't wait for ACK since bus activity would wake the device
regmap_write(cs35l45->regmap, CS35L45_DSP_VIRT1_MBOX_1, CSPL_MBOX_CMD_HIBERNATE);
@@ -967,6 +969,8 @@ static int cs35l45_exit_hibernate(struct cs35l45_private *cs35l45)
CSPL_MBOX_CMD_OUT_OF_HIBERNATE);
if (!ret) {
dev_dbg(cs35l45->dev, "Wake success at cycle: %d\n", j);
+ regmap_clear_bits(cs35l45->regmap, CS35L45_IRQ1_MASK_2,
+ CS35L45_DSP_VIRT2_MBOX_MASK);
return 0;
}
usleep_range(100, 200);
@@ -982,7 +986,7 @@ static int cs35l45_exit_hibernate(struct cs35l45_private *cs35l45)
return -ETIMEDOUT;
}
-static int __maybe_unused cs35l45_runtime_suspend(struct device *dev)
+static int cs35l45_runtime_suspend(struct device *dev)
{
struct cs35l45_private *cs35l45 = dev_get_drvdata(dev);
@@ -999,7 +1003,7 @@ static int __maybe_unused cs35l45_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused cs35l45_runtime_resume(struct device *dev)
+static int cs35l45_runtime_resume(struct device *dev)
{
struct cs35l45_private *cs35l45 = dev_get_drvdata(dev);
int ret;
@@ -1026,6 +1030,46 @@ static int __maybe_unused cs35l45_runtime_resume(struct device *dev)
return ret;
}
+static int cs35l45_sys_suspend(struct device *dev)
+{
+ struct cs35l45_private *cs35l45 = dev_get_drvdata(dev);
+
+ dev_dbg(cs35l45->dev, "System suspend, disabling IRQ\n");
+ disable_irq(cs35l45->irq);
+
+ return 0;
+}
+
+static int cs35l45_sys_suspend_noirq(struct device *dev)
+{
+ struct cs35l45_private *cs35l45 = dev_get_drvdata(dev);
+
+ dev_dbg(cs35l45->dev, "Late system suspend, reenabling IRQ\n");
+ enable_irq(cs35l45->irq);
+
+ return 0;
+}
+
+static int cs35l45_sys_resume_noirq(struct device *dev)
+{
+ struct cs35l45_private *cs35l45 = dev_get_drvdata(dev);
+
+ dev_dbg(cs35l45->dev, "Early system resume, disabling IRQ\n");
+ disable_irq(cs35l45->irq);
+
+ return 0;
+}
+
+static int cs35l45_sys_resume(struct device *dev)
+{
+ struct cs35l45_private *cs35l45 = dev_get_drvdata(dev);
+
+ dev_dbg(cs35l45->dev, "System resume, reenabling IRQ\n");
+ enable_irq(cs35l45->irq);
+
+ return 0;
+}
+
static int cs35l45_apply_property_config(struct cs35l45_private *cs35l45)
{
struct device_node *node = cs35l45->dev->of_node;
@@ -1466,10 +1510,12 @@ void cs35l45_remove(struct cs35l45_private *cs35l45)
}
EXPORT_SYMBOL_NS_GPL(cs35l45_remove, SND_SOC_CS35L45);
-const struct dev_pm_ops cs35l45_pm_ops = {
- SET_RUNTIME_PM_OPS(cs35l45_runtime_suspend, cs35l45_runtime_resume, NULL)
+EXPORT_GPL_DEV_PM_OPS(cs35l45_pm_ops) = {
+ RUNTIME_PM_OPS(cs35l45_runtime_suspend, cs35l45_runtime_resume, NULL)
+
+ SYSTEM_SLEEP_PM_OPS(cs35l45_sys_suspend, cs35l45_sys_resume)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(cs35l45_sys_suspend_noirq, cs35l45_sys_resume_noirq)
};
-EXPORT_SYMBOL_NS_GPL(cs35l45_pm_ops, SND_SOC_CS35L45);
MODULE_DESCRIPTION("ASoC CS35L45 driver");
MODULE_AUTHOR("James Schulman, Cirrus Logic Inc, <james.schulman@cirrus.com>");
diff --git a/sound/soc/codecs/cs42l43-jack.c b/sound/soc/codecs/cs42l43-jack.c
index 73454de068cf8c..54a3ea60644384 100644
--- a/sound/soc/codecs/cs42l43-jack.c
+++ b/sound/soc/codecs/cs42l43-jack.c
@@ -237,7 +237,7 @@ error:
return ret;
}
-static void cs42l43_start_hs_bias(struct cs42l43_codec *priv, bool force_high)
+static void cs42l43_start_hs_bias(struct cs42l43_codec *priv, bool type_detect)
{
struct cs42l43 *cs42l43 = priv->core;
unsigned int val = 0x3 << CS42L43_HSBIAS_MODE_SHIFT;
@@ -247,16 +247,17 @@ static void cs42l43_start_hs_bias(struct cs42l43_codec *priv, bool force_high)
regmap_update_bits(cs42l43->regmap, CS42L43_HS2,
CS42L43_HS_CLAMP_DISABLE_MASK, CS42L43_HS_CLAMP_DISABLE_MASK);
- if (!force_high && priv->bias_low)
- val = 0x2 << CS42L43_HSBIAS_MODE_SHIFT;
-
- if (priv->bias_sense_ua) {
- regmap_update_bits(cs42l43->regmap,
- CS42L43_HS_BIAS_SENSE_AND_CLAMP_AUTOCONTROL,
- CS42L43_HSBIAS_SENSE_EN_MASK |
- CS42L43_AUTO_HSBIAS_CLAMP_EN_MASK,
- CS42L43_HSBIAS_SENSE_EN_MASK |
- CS42L43_AUTO_HSBIAS_CLAMP_EN_MASK);
+ if (!type_detect) {
+ if (priv->bias_low)
+ val = 0x2 << CS42L43_HSBIAS_MODE_SHIFT;
+
+ if (priv->bias_sense_ua)
+ regmap_update_bits(cs42l43->regmap,
+ CS42L43_HS_BIAS_SENSE_AND_CLAMP_AUTOCONTROL,
+ CS42L43_HSBIAS_SENSE_EN_MASK |
+ CS42L43_AUTO_HSBIAS_CLAMP_EN_MASK,
+ CS42L43_HSBIAS_SENSE_EN_MASK |
+ CS42L43_AUTO_HSBIAS_CLAMP_EN_MASK);
}
regmap_update_bits(cs42l43->regmap, CS42L43_MIC_DETECT_CONTROL_1,
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
index 20da1eaa4f1c7e..0938671700c621 100644
--- a/sound/soc/codecs/hdmi-codec.c
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -850,8 +850,9 @@ static int hdmi_dai_probe(struct snd_soc_dai *dai)
static void hdmi_codec_jack_report(struct hdmi_codec_priv *hcp,
unsigned int jack_status)
{
- if (hcp->jack && jack_status != hcp->jack_status) {
- snd_soc_jack_report(hcp->jack, jack_status, SND_JACK_LINEOUT);
+ if (jack_status != hcp->jack_status) {
+ if (hcp->jack)
+ snd_soc_jack_report(hcp->jack, jack_status, SND_JACK_LINEOUT);
hcp->jack_status = jack_status;
}
}
@@ -880,6 +881,13 @@ static int hdmi_codec_set_jack(struct snd_soc_component *component,
if (hcp->hcd.ops->hook_plugged_cb) {
hcp->jack = jack;
+
+ /*
+ * Report the initial jack status which may have been provided
+ * by the parent hdmi driver while the hpd hook was registered.
+ */
+ snd_soc_jack_report(jack, hcp->jack_status, SND_JACK_LINEOUT);
+
return 0;
}
diff --git a/sound/soc/codecs/tas2781-fmwlib.c b/sound/soc/codecs/tas2781-fmwlib.c
index 4efe95b60aaa73..5c09e441a93681 100644
--- a/sound/soc/codecs/tas2781-fmwlib.c
+++ b/sound/soc/codecs/tas2781-fmwlib.c
@@ -2189,11 +2189,11 @@ int tasdevice_select_tuningprm_cfg(void *context, int prm_no,
goto out;
}
- conf = &(tas_fmw->configs[cfg_no]);
for (i = 0, prog_status = 0; i < tas_priv->ndev; i++) {
if (cfg_info[rca_conf_no]->active_dev & (1 << i)) {
- if (tas_priv->tasdevice[i].cur_prog != prm_no
- || tas_priv->force_fwload_status) {
+ if (prm_no >= 0
+ && (tas_priv->tasdevice[i].cur_prog != prm_no
+ || tas_priv->force_fwload_status)) {
tas_priv->tasdevice[i].cur_conf = -1;
tas_priv->tasdevice[i].is_loading = true;
prog_status++;
@@ -2228,7 +2228,8 @@ int tasdevice_select_tuningprm_cfg(void *context, int prm_no,
}
for (i = 0, status = 0; i < tas_priv->ndev; i++) {
- if (tas_priv->tasdevice[i].cur_conf != cfg_no
+ if (cfg_no >= 0
+ && tas_priv->tasdevice[i].cur_conf != cfg_no
&& (cfg_info[rca_conf_no]->active_dev & (1 << i))
&& (tas_priv->tasdevice[i].is_loaderr == false)) {
status++;
@@ -2238,6 +2239,7 @@ int tasdevice_select_tuningprm_cfg(void *context, int prm_no,
}
if (status) {
+ conf = &(tas_fmw->configs[cfg_no]);
status = 0;
tasdevice_load_data(tas_priv, &(conf->dev_data));
for (i = 0; i < tas_priv->ndev; i++) {
@@ -2281,7 +2283,7 @@ int tasdevice_prmg_load(void *context, int prm_no)
}
for (i = 0, prog_status = 0; i < tas_priv->ndev; i++) {
- if (tas_priv->tasdevice[i].cur_prog != prm_no) {
+ if (prm_no >= 0 && tas_priv->tasdevice[i].cur_prog != prm_no) {
tas_priv->tasdevice[i].cur_conf = -1;
tas_priv->tasdevice[i].is_loading = true;
prog_status++;
@@ -2326,7 +2328,7 @@ int tasdevice_prmg_calibdata_load(void *context, int prm_no)
}
for (i = 0, prog_status = 0; i < tas_priv->ndev; i++) {
- if (tas_priv->tasdevice[i].cur_prog != prm_no) {
+ if (prm_no >= 0 && tas_priv->tasdevice[i].cur_prog != prm_no) {
tas_priv->tasdevice[i].cur_conf = -1;
tas_priv->tasdevice[i].is_loading = true;
prog_status++;
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index 32bbe5056a6352..546bd4e333b5fb 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -714,6 +714,9 @@ static int fsl_sai_hw_free(struct snd_pcm_substream *substream,
bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
unsigned int ofs = sai->soc_data->reg_offset;
+ /* Clear xMR to avoid channel swap with mclk_with_tere enabled case */
+ regmap_write(sai->regmap, FSL_SAI_xMR(tx), 0);
+
regmap_update_bits(sai->regmap, FSL_SAI_xCR3(tx, ofs),
FSL_SAI_CR3_TRCE_MASK, 0);
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index ed14d9e4aa5333..42466b4b1ca45e 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -83,6 +83,7 @@ enum {
#define BYT_RT5640_HSMIC2_ON_IN1 BIT(27)
#define BYT_RT5640_JD_HP_ELITEP_1000G2 BIT(28)
#define BYT_RT5640_USE_AMCR0F28 BIT(29)
+#define BYT_RT5640_SWAPPED_SPEAKERS BIT(30)
#define BYTCR_INPUT_DEFAULTS \
(BYT_RT5640_IN3_MAP | \
@@ -157,6 +158,8 @@ static void log_quirks(struct device *dev)
dev_info(dev, "quirk MONO_SPEAKER enabled\n");
if (byt_rt5640_quirk & BYT_RT5640_NO_SPEAKERS)
dev_info(dev, "quirk NO_SPEAKERS enabled\n");
+ if (byt_rt5640_quirk & BYT_RT5640_SWAPPED_SPEAKERS)
+ dev_info(dev, "quirk SWAPPED_SPEAKERS enabled\n");
if (byt_rt5640_quirk & BYT_RT5640_LINEOUT)
dev_info(dev, "quirk LINEOUT enabled\n");
if (byt_rt5640_quirk & BYT_RT5640_LINEOUT_AS_HP2)
@@ -894,6 +897,19 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
BYT_RT5640_SSP0_AIF1 |
BYT_RT5640_MCLK_EN),
},
+ {
+ /* Medion Lifetab S10346 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ /* Above strings are much too generic, also match on BIOS date */
+ DMI_MATCH(DMI_BIOS_DATE, "10/22/2015"),
+ },
+ .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
+ BYT_RT5640_SWAPPED_SPEAKERS |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
{ /* Mele PCG03 Mini PC */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Mini PC"),
@@ -1619,11 +1635,11 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
const char *platform_name;
struct acpi_device *adev;
struct device *codec_dev;
+ const char *cfg_spk;
bool sof_parent;
int ret_val = 0;
int dai_index = 0;
- int i, cfg_spk;
- int aif;
+ int i, aif;
is_bytcr = false;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -1783,13 +1799,16 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
}
if (byt_rt5640_quirk & BYT_RT5640_NO_SPEAKERS) {
- cfg_spk = 0;
+ cfg_spk = "0";
spk_type = "none";
} else if (byt_rt5640_quirk & BYT_RT5640_MONO_SPEAKER) {
- cfg_spk = 1;
+ cfg_spk = "1";
spk_type = "mono";
+ } else if (byt_rt5640_quirk & BYT_RT5640_SWAPPED_SPEAKERS) {
+ cfg_spk = "swapped";
+ spk_type = "swapped";
} else {
- cfg_spk = 2;
+ cfg_spk = "2";
spk_type = "stereo";
}
@@ -1804,7 +1823,7 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
headset2_string = " cfg-hs2:in1";
snprintf(byt_rt5640_components, sizeof(byt_rt5640_components),
- "cfg-spk:%d cfg-mic:%s aif:%d%s%s", cfg_spk,
+ "cfg-spk:%s cfg-mic:%s aif:%d%s%s", cfg_spk,
map_name[BYT_RT5640_MAP(byt_rt5640_quirk)], aif,
lineout_string, headset2_string);
byt_rt5640_card.components = byt_rt5640_components;
diff --git a/sound/soc/intel/common/soc-acpi-intel-mtl-match.c b/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
index 301b8142d554cb..9008b67682059c 100644
--- a/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
@@ -306,13 +306,13 @@ static const struct snd_soc_acpi_adr_device cs35l56_1_adr[] = {
.adr = 0x00013701FA355601ull,
.num_endpoints = 1,
.endpoints = &spk_r_endpoint,
- .name_prefix = "cs35l56-8"
+ .name_prefix = "AMP8"
},
{
.adr = 0x00013601FA355601ull,
.num_endpoints = 1,
.endpoints = &spk_3_endpoint,
- .name_prefix = "cs35l56-7"
+ .name_prefix = "AMP7"
}
};
@@ -321,13 +321,13 @@ static const struct snd_soc_acpi_adr_device cs35l56_2_adr[] = {
.adr = 0x00023301FA355601ull,
.num_endpoints = 1,
.endpoints = &spk_l_endpoint,
- .name_prefix = "cs35l56-1"
+ .name_prefix = "AMP1"
},
{
.adr = 0x00023201FA355601ull,
.num_endpoints = 1,
.endpoints = &spk_2_endpoint,
- .name_prefix = "cs35l56-2"
+ .name_prefix = "AMP2"
}
};
diff --git a/sound/soc/sof/mediatek/mt8186/mt8186.c b/sound/soc/sof/mediatek/mt8186/mt8186.c
index e0d88e7aa8ca02..b69fa788b16f71 100644
--- a/sound/soc/sof/mediatek/mt8186/mt8186.c
+++ b/sound/soc/sof/mediatek/mt8186/mt8186.c
@@ -597,9 +597,6 @@ static struct snd_sof_dsp_ops sof_mt8186_ops = {
static struct snd_sof_of_mach sof_mt8186_machs[] = {
{
- .compatible = "google,steelix",
- .sof_tplg_filename = "sof-mt8186-google-steelix.tplg"
- }, {
.compatible = "mediatek,mt8186",
.sof_tplg_filename = "sof-mt8186.tplg",
},
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index ab2b938502ebed..07cc6a201579aa 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1387,7 +1387,7 @@ free_buf:
static int snd_usb_motu_m_series_boot_quirk(struct usb_device *dev)
{
- msleep(2000);
+ msleep(4000);
return 0;
}
@@ -1630,7 +1630,7 @@ int snd_usb_apply_boot_quirk_once(struct usb_device *dev,
unsigned int id)
{
switch (id) {
- case USB_ID(0x07fd, 0x0008): /* MOTU M Series */
+ case USB_ID(0x07fd, 0x0008): /* MOTU M Series, 1st hardware version */
return snd_usb_motu_m_series_boot_quirk(dev);
}
diff --git a/tools/objtool/noreturns.h b/tools/objtool/noreturns.h
index 649ebdef9c3fa0..1685d7ea6a9f70 100644
--- a/tools/objtool/noreturns.h
+++ b/tools/objtool/noreturns.h
@@ -6,7 +6,6 @@
*
* Yes, this is unfortunate. A better solution is in the works.
*/
-NORETURN(__invalid_creds)
NORETURN(__kunit_abort)
NORETURN(__module_put_and_kthread_exit)
NORETURN(__reiserfs_panic)
diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild
index 90f3c9802ffb80..95dc58b94178bf 100644
--- a/tools/testing/cxl/Kbuild
+++ b/tools/testing/cxl/Kbuild
@@ -62,5 +62,6 @@ cxl_core-$(CONFIG_TRACING) += $(CXL_CORE_SRC)/trace.o
cxl_core-$(CONFIG_CXL_REGION) += $(CXL_CORE_SRC)/region.o
cxl_core-y += config_check.o
cxl_core-y += cxl_core_test.o
+cxl_core-y += cxl_core_exports.o
obj-m += test/
diff --git a/tools/testing/cxl/cxl_core_exports.c b/tools/testing/cxl/cxl_core_exports.c
new file mode 100644
index 00000000000000..077e6883921df2
--- /dev/null
+++ b/tools/testing/cxl/cxl_core_exports.c
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
+
+#include "cxl.h"
+
+/* Exporting of cxl_core symbols that are only used by cxl_test */
+EXPORT_SYMBOL_NS_GPL(cxl_num_decoders_committed, CXL);
diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
index b8854629990227..f4e517a0c7740f 100644
--- a/tools/testing/cxl/test/cxl.c
+++ b/tools/testing/cxl/test/cxl.c
@@ -669,10 +669,11 @@ static int mock_decoder_commit(struct cxl_decoder *cxld)
return 0;
dev_dbg(&port->dev, "%s commit\n", dev_name(&cxld->dev));
- if (port->commit_end + 1 != id) {
+ if (cxl_num_decoders_committed(port) != id) {
dev_dbg(&port->dev,
"%s: out of order commit, expected decoder%d.%d\n",
- dev_name(&cxld->dev), port->id, port->commit_end + 1);
+ dev_name(&cxld->dev), port->id,
+ cxl_num_decoders_committed(port));
return -EBUSY;
}
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 3b2061d1c1a527..8247a7c69c36d7 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -155,12 +155,10 @@ ifneq ($(KBUILD_OUTPUT),)
abs_objtree := $(realpath $(abs_objtree))
BUILD := $(abs_objtree)/kselftest
KHDR_INCLUDES := -isystem ${abs_objtree}/usr/include
- KHDR_DIR := ${abs_objtree}/usr/include
else
BUILD := $(CURDIR)
abs_srctree := $(shell cd $(top_srcdir) && pwd)
KHDR_INCLUDES := -isystem ${abs_srctree}/usr/include
- KHDR_DIR := ${abs_srctree}/usr/include
DEFAULT_INSTALL_HDR_PATH := 1
endif
@@ -174,7 +172,7 @@ export KHDR_INCLUDES
# all isn't the first target in the file.
.DEFAULT_GOAL := all
-all: kernel_header_files
+all:
@ret=1; \
for TARGET in $(TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
@@ -185,23 +183,6 @@ all: kernel_header_files
ret=$$((ret * $$?)); \
done; exit $$ret;
-kernel_header_files:
- @ls $(KHDR_DIR)/linux/*.h >/dev/null 2>/dev/null; \
- if [ $$? -ne 0 ]; then \
- RED='\033[1;31m'; \
- NOCOLOR='\033[0m'; \
- echo; \
- echo -e "$${RED}error$${NOCOLOR}: missing kernel header files."; \
- echo "Please run this and try again:"; \
- echo; \
- echo " cd $(top_srcdir)"; \
- echo " make headers"; \
- echo; \
- exit 1; \
- fi
-
-.PHONY: kernel_header_files
-
run_tests: all
@for TARGET in $(TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
diff --git a/tools/testing/selftests/alsa/mixer-test.c b/tools/testing/selftests/alsa/mixer-test.c
index 21e482b23f5028..23df154fcdd77b 100644
--- a/tools/testing/selftests/alsa/mixer-test.c
+++ b/tools/testing/selftests/alsa/mixer-test.c
@@ -138,7 +138,7 @@ static void find_controls(void)
err = snd_ctl_elem_info(card_data->handle,
ctl_data->info);
if (err < 0) {
- ksft_print_msg("%s getting info for %d\n",
+ ksft_print_msg("%s getting info for %s\n",
snd_strerror(err),
ctl_data->name);
}
diff --git a/tools/testing/selftests/bpf/config.x86_64 b/tools/testing/selftests/bpf/config.x86_64
index 2e70a604827845..49a29dbc191072 100644
--- a/tools/testing/selftests/bpf/config.x86_64
+++ b/tools/testing/selftests/bpf/config.x86_64
@@ -50,7 +50,6 @@ CONFIG_CRYPTO_SEQIV=y
CONFIG_CRYPTO_XXHASH=y
CONFIG_DCB=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
-CONFIG_DEBUG_CREDENTIALS=y
CONFIG_DEBUG_INFO_BTF=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_DEBUG_MEMORY_INIT=y
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
index f75f84d0b3d798..7c2241fae19a6e 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
@@ -524,6 +524,37 @@ out:
test_sockmap_pass_prog__destroy(pass);
}
+static void test_sockmap_unconnected_unix(void)
+{
+ int err, map, stream = 0, dgram = 0, zero = 0;
+ struct test_sockmap_pass_prog *skel;
+
+ skel = test_sockmap_pass_prog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+
+ map = bpf_map__fd(skel->maps.sock_map_rx);
+
+ stream = xsocket(AF_UNIX, SOCK_STREAM, 0);
+ if (stream < 0)
+ return;
+
+ dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0);
+ if (dgram < 0) {
+ close(stream);
+ return;
+ }
+
+ err = bpf_map_update_elem(map, &zero, &stream, BPF_ANY);
+ ASSERT_ERR(err, "bpf_map_update_elem(stream)");
+
+ err = bpf_map_update_elem(map, &zero, &dgram, BPF_ANY);
+ ASSERT_OK(err, "bpf_map_update_elem(dgram)");
+
+ close(stream);
+ close(dgram);
+}
+
void test_sockmap_basic(void)
{
if (test__start_subtest("sockmap create_update_free"))
@@ -566,4 +597,7 @@ void test_sockmap_basic(void)
test_sockmap_skb_verdict_fionread(false);
if (test__start_subtest("sockmap skb_verdict msg_f_peek"))
test_sockmap_skb_verdict_peek();
+
+ if (test__start_subtest("sockmap unconnected af_unix"))
+ test_sockmap_unconnected_unix();
}
diff --git a/tools/testing/selftests/hid/config.common b/tools/testing/selftests/hid/config.common
index 0617275d93cc70..0f456dbab62f37 100644
--- a/tools/testing/selftests/hid/config.common
+++ b/tools/testing/selftests/hid/config.common
@@ -46,7 +46,6 @@ CONFIG_CRYPTO_SEQIV=y
CONFIG_CRYPTO_XXHASH=y
CONFIG_DCB=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
-CONFIG_DEBUG_CREDENTIALS=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_DEFAULT_FQ_CODEL=y
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 963435959a92f0..3e0c36b8ddd56e 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -17,16 +17,6 @@ else
ARCH_DIR := $(ARCH)
endif
-ifeq ($(ARCH),arm64)
-tools_dir := $(top_srcdir)/tools
-arm64_tools_dir := $(tools_dir)/arch/arm64/tools/
-GEN_HDRS := $(top_srcdir)/tools/arch/arm64/include/generated/
-CFLAGS += -I$(GEN_HDRS)
-
-$(GEN_HDRS): $(wildcard $(arm64_tools_dir)/*)
- $(MAKE) -C $(arm64_tools_dir) O=$(tools_dir)
-endif
-
LIBKVM += lib/assert.c
LIBKVM += lib/elf.c
LIBKVM += lib/guest_modes.c
@@ -234,6 +224,22 @@ CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
ifeq ($(ARCH),s390)
CFLAGS += -march=z10
endif
+ifeq ($(ARCH),arm64)
+tools_dir := $(top_srcdir)/tools
+arm64_tools_dir := $(tools_dir)/arch/arm64/tools/
+
+ifneq ($(abs_objdir),)
+arm64_hdr_outdir := $(abs_objdir)/tools/
+else
+arm64_hdr_outdir := $(tools_dir)/
+endif
+
+GEN_HDRS := $(arm64_hdr_outdir)arch/arm64/include/generated/
+CFLAGS += -I$(GEN_HDRS)
+
+$(GEN_HDRS): $(wildcard $(arm64_tools_dir)/*)
+ $(MAKE) -C $(arm64_tools_dir) OUTPUT=$(arm64_hdr_outdir)
+endif
no-pie-option := $(call try-run, echo 'int main(void) { return 0; }' | \
$(CC) -Werror $(CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie)
diff --git a/tools/testing/selftests/kvm/get-reg-list.c b/tools/testing/selftests/kvm/get-reg-list.c
index be7bf522443478..8274ef04301f67 100644
--- a/tools/testing/selftests/kvm/get-reg-list.c
+++ b/tools/testing/selftests/kvm/get-reg-list.c
@@ -71,11 +71,12 @@ static const char *config_name(struct vcpu_reg_list *c)
for_each_sublist(c, s) {
if (!strcmp(s->name, "base"))
continue;
- strcat(c->name + len, s->name);
- len += strlen(s->name) + 1;
- c->name[len - 1] = '+';
+ if (len)
+ c->name[len++] = '+';
+ strcpy(c->name + len, s->name);
+ len += strlen(s->name);
}
- c->name[len - 1] = '\0';
+ c->name[len] = '\0';
return c->name;
}
diff --git a/tools/testing/selftests/kvm/riscv/get-reg-list.c b/tools/testing/selftests/kvm/riscv/get-reg-list.c
index 6bedaea9539515..25de4b8bc3472e 100644
--- a/tools/testing/selftests/kvm/riscv/get-reg-list.c
+++ b/tools/testing/selftests/kvm/riscv/get-reg-list.c
@@ -458,8 +458,9 @@ void print_reg(const char *prefix, __u64 id)
reg_size = "KVM_REG_SIZE_U128";
break;
default:
- printf("\tKVM_REG_RISCV | (%lld << KVM_REG_SIZE_SHIFT) | 0x%llx /* UNKNOWN */,",
- (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id & REG_MASK);
+ printf("\tKVM_REG_RISCV | (%lld << KVM_REG_SIZE_SHIFT) | 0x%llx /* UNKNOWN */,\n",
+ (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id & ~REG_MASK);
+ return;
}
switch (id & KVM_REG_RISCV_TYPE_MASK) {
@@ -496,8 +497,9 @@ void print_reg(const char *prefix, __u64 id)
reg_size, sbi_ext_id_to_str(prefix, id));
break;
default:
- printf("\tKVM_REG_RISCV | %s | 0x%llx /* UNKNOWN */,",
- reg_size, id & REG_MASK);
+ printf("\tKVM_REG_RISCV | %s | 0x%llx /* UNKNOWN */,\n",
+ reg_size, id & ~REG_MASK);
+ return;
}
}
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 118e0964bda946..aa646e0661f36c 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -44,26 +44,10 @@ endif
selfdir = $(realpath $(dir $(filter %/lib.mk,$(MAKEFILE_LIST))))
top_srcdir = $(selfdir)/../../..
-ifeq ("$(origin O)", "command line")
- KBUILD_OUTPUT := $(O)
+ifeq ($(KHDR_INCLUDES),)
+KHDR_INCLUDES := -isystem $(top_srcdir)/usr/include
endif
-ifneq ($(KBUILD_OUTPUT),)
- # Make's built-in functions such as $(abspath ...), $(realpath ...) cannot
- # expand a shell special character '~'. We use a somewhat tedious way here.
- abs_objtree := $(shell cd $(top_srcdir) && mkdir -p $(KBUILD_OUTPUT) && cd $(KBUILD_OUTPUT) && pwd)
- $(if $(abs_objtree),, \
- $(error failed to create output directory "$(KBUILD_OUTPUT)"))
- # $(realpath ...) resolves symlinks
- abs_objtree := $(realpath $(abs_objtree))
- KHDR_DIR := ${abs_objtree}/usr/include
-else
- abs_srctree := $(shell cd $(top_srcdir) && pwd)
- KHDR_DIR := ${abs_srctree}/usr/include
-endif
-
-KHDR_INCLUDES := -isystem $(KHDR_DIR)
-
# The following are built by lib.mk common compile rules.
# TEST_CUSTOM_PROGS should be used by tests that require
# custom build rule and prevent common build rule use.
@@ -74,25 +58,7 @@ TEST_GEN_PROGS := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS))
TEST_GEN_PROGS_EXTENDED := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS_EXTENDED))
TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES))
-all: kernel_header_files $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) \
- $(TEST_GEN_FILES)
-
-kernel_header_files:
- @ls $(KHDR_DIR)/linux/*.h >/dev/null 2>/dev/null; \
- if [ $$? -ne 0 ]; then \
- RED='\033[1;31m'; \
- NOCOLOR='\033[0m'; \
- echo; \
- echo -e "$${RED}error$${NOCOLOR}: missing kernel header files."; \
- echo "Please run this and try again:"; \
- echo; \
- echo " cd $(top_srcdir)"; \
- echo " make headers"; \
- echo; \
- exit 1; \
- fi
-
-.PHONY: kernel_header_files
+all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
define RUN_TESTS
BASE_DIR="$(selfdir)"; \
diff --git a/tools/testing/selftests/mm/cow.c b/tools/testing/selftests/mm/cow.c
index 7324ce5363c0c9..6f2f839904416c 100644
--- a/tools/testing/selftests/mm/cow.c
+++ b/tools/testing/selftests/mm/cow.c
@@ -1680,6 +1680,8 @@ int main(int argc, char **argv)
{
int err;
+ ksft_print_header();
+
pagesize = getpagesize();
thpsize = read_pmd_pagesize();
if (thpsize)
@@ -1689,7 +1691,6 @@ int main(int argc, char **argv)
ARRAY_SIZE(hugetlbsizes));
detect_huge_zeropage();
- ksft_print_header();
ksft_set_plan(ARRAY_SIZE(anon_test_cases) * tests_per_anon_test_case() +
ARRAY_SIZE(anon_thp_test_cases) * tests_per_anon_thp_test_case() +
ARRAY_SIZE(non_anon_test_cases) * tests_per_non_anon_test_case());
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 5b2aca4c5f1007..9e5bf59a20bff4 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -91,6 +91,7 @@ TEST_PROGS += test_bridge_neigh_suppress.sh
TEST_PROGS += test_vxlan_nolocalbypass.sh
TEST_PROGS += test_bridge_backup_port.sh
TEST_PROGS += fdb_flush.sh
+TEST_PROGS += vlan_hw_filter.sh
TEST_FILES := settings
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index 3c94f2f194d681..24a57b3ae2155d 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -2776,7 +2776,7 @@ backup_tests()
fi
if reset "mpc backup" &&
- continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
+ continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then
pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup
speed=slow \
run_tests $ns1 $ns2 10.0.1.1
@@ -2785,7 +2785,7 @@ backup_tests()
fi
if reset "mpc backup both sides" &&
- continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
+ continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then
pm_nl_add_endpoint $ns1 10.0.1.1 flags subflow,backup
pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup
speed=slow \
@@ -2795,7 +2795,7 @@ backup_tests()
fi
if reset "mpc switch to backup" &&
- continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
+ continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then
pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow
sflags=backup speed=slow \
run_tests $ns1 $ns2 10.0.1.1
@@ -2804,7 +2804,7 @@ backup_tests()
fi
if reset "mpc switch to backup both sides" &&
- continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
+ continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then
pm_nl_add_endpoint $ns1 10.0.1.1 flags subflow
pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow
sflags=backup speed=slow \
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
index 38be9706c45f18..26827ea4e3e5f2 100755
--- a/tools/testing/selftests/net/rtnetlink.sh
+++ b/tools/testing/selftests/net/rtnetlink.sh
@@ -297,7 +297,7 @@ kci_test_addrlft()
done
sleep 5
- run_cmd_grep "10.23.11." ip addr show dev "$devdummy"
+ run_cmd_grep_fail "10.23.11." ip addr show dev "$devdummy"
if [ $? -eq 0 ]; then
check_err 1
end_test "FAIL: preferred_lft addresses remaining"
diff --git a/tools/testing/selftests/net/vlan_hw_filter.sh b/tools/testing/selftests/net/vlan_hw_filter.sh
new file mode 100755
index 00000000000000..7bc804ffaf7c58
--- /dev/null
+++ b/tools/testing/selftests/net/vlan_hw_filter.sh
@@ -0,0 +1,29 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+readonly NETNS="ns-$(mktemp -u XXXXXX)"
+
+ret=0
+
+cleanup() {
+ ip netns del $NETNS
+}
+
+trap cleanup EXIT
+
+fail() {
+ echo "ERROR: ${1:-unexpected return code} (ret: $_)" >&2
+ ret=1
+}
+
+ip netns add ${NETNS}
+ip netns exec ${NETNS} ip link add bond0 type bond mode 0
+ip netns exec ${NETNS} ip link add bond_slave_1 type veth peer veth2
+ip netns exec ${NETNS} ip link set bond_slave_1 master bond0
+ip netns exec ${NETNS} ethtool -K bond0 rx-vlan-filter off
+ip netns exec ${NETNS} ip link add link bond_slave_1 name bond_slave_1.0 type vlan id 0
+ip netns exec ${NETNS} ip link add link bond0 name bond0.0 type vlan id 0
+ip netns exec ${NETNS} ip link set bond_slave_1 nomaster
+ip netns exec ${NETNS} ip link del veth2 || fail "Please check vlan HW filter function"
+
+exit $ret
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 3b1b9e8dd70c5d..7db96875ac4627 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -5540,7 +5540,6 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
return r < 0 ? r : 0;
}
-/* Caller must hold slots_lock. */
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, struct kvm_io_device *dev)
{
@@ -5548,6 +5547,8 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
struct kvm_io_bus *new_bus, *bus;
struct kvm_io_range range;
+ lockdep_assert_held(&kvm->slots_lock);
+
bus = kvm_get_bus(kvm, bus_idx);
if (!bus)
return -ENOMEM;